1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 * Copyright (C) 2019 Isovalent, Inc. 11 */ 12 13 #ifndef _GNU_SOURCE 14 #define _GNU_SOURCE 15 #endif 16 #include <stdlib.h> 17 #include <stdio.h> 18 #include <stdarg.h> 19 #include <libgen.h> 20 #include <inttypes.h> 21 #include <string.h> 22 #include <unistd.h> 23 #include <fcntl.h> 24 #include <errno.h> 25 #include <asm/unistd.h> 26 #include <linux/err.h> 27 #include <linux/kernel.h> 28 #include <linux/bpf.h> 29 #include <linux/btf.h> 30 #include <linux/filter.h> 31 #include <linux/list.h> 32 #include <linux/limits.h> 33 #include <linux/perf_event.h> 34 #include <linux/ring_buffer.h> 35 #include <sys/stat.h> 36 #include <sys/types.h> 37 #include <sys/vfs.h> 38 #include <tools/libc_compat.h> 39 #include <libelf.h> 40 #include <gelf.h> 41 42 #include "libbpf.h" 43 #include "bpf.h" 44 #include "btf.h" 45 #include "str_error.h" 46 #include "libbpf_util.h" 47 48 #ifndef EM_BPF 49 #define EM_BPF 247 50 #endif 51 52 #ifndef BPF_FS_MAGIC 53 #define BPF_FS_MAGIC 0xcafe4a11 54 #endif 55 56 /* vsprintf() in __base_pr() uses nonliteral format string. It may break 57 * compilation if user enables corresponding warning. Disable it explicitly. 58 */ 59 #pragma GCC diagnostic ignored "-Wformat-nonliteral" 60 61 #define __printf(a, b) __attribute__((format(printf, a, b))) 62 63 static int __base_pr(enum libbpf_print_level level, const char *format, 64 va_list args) 65 { 66 if (level == LIBBPF_DEBUG) 67 return 0; 68 69 return vfprintf(stderr, format, args); 70 } 71 72 static libbpf_print_fn_t __libbpf_pr = __base_pr; 73 74 void libbpf_set_print(libbpf_print_fn_t fn) 75 { 76 __libbpf_pr = fn; 77 } 78 79 __printf(2, 3) 80 void libbpf_print(enum libbpf_print_level level, const char *format, ...) 81 { 82 va_list args; 83 84 if (!__libbpf_pr) 85 return; 86 87 va_start(args, format); 88 __libbpf_pr(level, format, args); 89 va_end(args); 90 } 91 92 #define STRERR_BUFSIZE 128 93 94 #define CHECK_ERR(action, err, out) do { \ 95 err = action; \ 96 if (err) \ 97 goto out; \ 98 } while(0) 99 100 101 /* Copied from tools/perf/util/util.h */ 102 #ifndef zfree 103 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 104 #endif 105 106 #ifndef zclose 107 # define zclose(fd) ({ \ 108 int ___err = 0; \ 109 if ((fd) >= 0) \ 110 ___err = close((fd)); \ 111 fd = -1; \ 112 ___err; }) 113 #endif 114 115 #ifdef HAVE_LIBELF_MMAP_SUPPORT 116 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP 117 #else 118 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 119 #endif 120 121 static inline __u64 ptr_to_u64(const void *ptr) 122 { 123 return (__u64) (unsigned long) ptr; 124 } 125 126 struct bpf_capabilities { 127 /* v4.14: kernel support for program & map names. */ 128 __u32 name:1; 129 }; 130 131 /* 132 * bpf_prog should be a better name but it has been used in 133 * linux/filter.h. 134 */ 135 struct bpf_program { 136 /* Index in elf obj file, for relocation use. */ 137 int idx; 138 char *name; 139 int prog_ifindex; 140 char *section_name; 141 /* section_name with / replaced by _; makes recursive pinning 142 * in bpf_object__pin_programs easier 143 */ 144 char *pin_name; 145 struct bpf_insn *insns; 146 size_t insns_cnt, main_prog_cnt; 147 enum bpf_prog_type type; 148 149 struct reloc_desc { 150 enum { 151 RELO_LD64, 152 RELO_CALL, 153 RELO_DATA, 154 } type; 155 int insn_idx; 156 union { 157 int map_idx; 158 int text_off; 159 }; 160 } *reloc_desc; 161 int nr_reloc; 162 int log_level; 163 164 struct { 165 int nr; 166 int *fds; 167 } instances; 168 bpf_program_prep_t preprocessor; 169 170 struct bpf_object *obj; 171 void *priv; 172 bpf_program_clear_priv_t clear_priv; 173 174 enum bpf_attach_type expected_attach_type; 175 int btf_fd; 176 void *func_info; 177 __u32 func_info_rec_size; 178 __u32 func_info_cnt; 179 180 struct bpf_capabilities *caps; 181 182 void *line_info; 183 __u32 line_info_rec_size; 184 __u32 line_info_cnt; 185 }; 186 187 enum libbpf_map_type { 188 LIBBPF_MAP_UNSPEC, 189 LIBBPF_MAP_DATA, 190 LIBBPF_MAP_BSS, 191 LIBBPF_MAP_RODATA, 192 }; 193 194 static const char * const libbpf_type_to_btf_name[] = { 195 [LIBBPF_MAP_DATA] = ".data", 196 [LIBBPF_MAP_BSS] = ".bss", 197 [LIBBPF_MAP_RODATA] = ".rodata", 198 }; 199 200 struct bpf_map { 201 int fd; 202 char *name; 203 size_t offset; 204 int map_ifindex; 205 int inner_map_fd; 206 struct bpf_map_def def; 207 __u32 btf_key_type_id; 208 __u32 btf_value_type_id; 209 void *priv; 210 bpf_map_clear_priv_t clear_priv; 211 enum libbpf_map_type libbpf_type; 212 }; 213 214 struct bpf_secdata { 215 void *rodata; 216 void *data; 217 }; 218 219 static LIST_HEAD(bpf_objects_list); 220 221 struct bpf_object { 222 char name[BPF_OBJ_NAME_LEN]; 223 char license[64]; 224 __u32 kern_version; 225 226 struct bpf_program *programs; 227 size_t nr_programs; 228 struct bpf_map *maps; 229 size_t nr_maps; 230 struct bpf_secdata sections; 231 232 bool loaded; 233 bool has_pseudo_calls; 234 235 /* 236 * Information when doing elf related work. Only valid if fd 237 * is valid. 238 */ 239 struct { 240 int fd; 241 void *obj_buf; 242 size_t obj_buf_sz; 243 Elf *elf; 244 GElf_Ehdr ehdr; 245 Elf_Data *symbols; 246 Elf_Data *data; 247 Elf_Data *rodata; 248 Elf_Data *bss; 249 size_t strtabidx; 250 struct { 251 GElf_Shdr shdr; 252 Elf_Data *data; 253 } *reloc; 254 int nr_reloc; 255 int maps_shndx; 256 int text_shndx; 257 int data_shndx; 258 int rodata_shndx; 259 int bss_shndx; 260 } efile; 261 /* 262 * All loaded bpf_object is linked in a list, which is 263 * hidden to caller. bpf_objects__<func> handlers deal with 264 * all objects. 265 */ 266 struct list_head list; 267 268 struct btf *btf; 269 struct btf_ext *btf_ext; 270 271 void *priv; 272 bpf_object_clear_priv_t clear_priv; 273 274 struct bpf_capabilities caps; 275 276 char path[]; 277 }; 278 #define obj_elf_valid(o) ((o)->efile.elf) 279 280 void bpf_program__unload(struct bpf_program *prog) 281 { 282 int i; 283 284 if (!prog) 285 return; 286 287 /* 288 * If the object is opened but the program was never loaded, 289 * it is possible that prog->instances.nr == -1. 290 */ 291 if (prog->instances.nr > 0) { 292 for (i = 0; i < prog->instances.nr; i++) 293 zclose(prog->instances.fds[i]); 294 } else if (prog->instances.nr != -1) { 295 pr_warning("Internal error: instances.nr is %d\n", 296 prog->instances.nr); 297 } 298 299 prog->instances.nr = -1; 300 zfree(&prog->instances.fds); 301 302 zclose(prog->btf_fd); 303 zfree(&prog->func_info); 304 zfree(&prog->line_info); 305 } 306 307 static void bpf_program__exit(struct bpf_program *prog) 308 { 309 if (!prog) 310 return; 311 312 if (prog->clear_priv) 313 prog->clear_priv(prog, prog->priv); 314 315 prog->priv = NULL; 316 prog->clear_priv = NULL; 317 318 bpf_program__unload(prog); 319 zfree(&prog->name); 320 zfree(&prog->section_name); 321 zfree(&prog->pin_name); 322 zfree(&prog->insns); 323 zfree(&prog->reloc_desc); 324 325 prog->nr_reloc = 0; 326 prog->insns_cnt = 0; 327 prog->idx = -1; 328 } 329 330 static char *__bpf_program__pin_name(struct bpf_program *prog) 331 { 332 char *name, *p; 333 334 name = p = strdup(prog->section_name); 335 while ((p = strchr(p, '/'))) 336 *p = '_'; 337 338 return name; 339 } 340 341 static int 342 bpf_program__init(void *data, size_t size, char *section_name, int idx, 343 struct bpf_program *prog) 344 { 345 if (size < sizeof(struct bpf_insn)) { 346 pr_warning("corrupted section '%s'\n", section_name); 347 return -EINVAL; 348 } 349 350 memset(prog, 0, sizeof(*prog)); 351 352 prog->section_name = strdup(section_name); 353 if (!prog->section_name) { 354 pr_warning("failed to alloc name for prog under section(%d) %s\n", 355 idx, section_name); 356 goto errout; 357 } 358 359 prog->pin_name = __bpf_program__pin_name(prog); 360 if (!prog->pin_name) { 361 pr_warning("failed to alloc pin name for prog under section(%d) %s\n", 362 idx, section_name); 363 goto errout; 364 } 365 366 prog->insns = malloc(size); 367 if (!prog->insns) { 368 pr_warning("failed to alloc insns for prog under section %s\n", 369 section_name); 370 goto errout; 371 } 372 prog->insns_cnt = size / sizeof(struct bpf_insn); 373 memcpy(prog->insns, data, 374 prog->insns_cnt * sizeof(struct bpf_insn)); 375 prog->idx = idx; 376 prog->instances.fds = NULL; 377 prog->instances.nr = -1; 378 prog->type = BPF_PROG_TYPE_UNSPEC; 379 prog->btf_fd = -1; 380 381 return 0; 382 errout: 383 bpf_program__exit(prog); 384 return -ENOMEM; 385 } 386 387 static int 388 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 389 char *section_name, int idx) 390 { 391 struct bpf_program prog, *progs; 392 int nr_progs, err; 393 394 err = bpf_program__init(data, size, section_name, idx, &prog); 395 if (err) 396 return err; 397 398 prog.caps = &obj->caps; 399 progs = obj->programs; 400 nr_progs = obj->nr_programs; 401 402 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); 403 if (!progs) { 404 /* 405 * In this case the original obj->programs 406 * is still valid, so don't need special treat for 407 * bpf_close_object(). 408 */ 409 pr_warning("failed to alloc a new program under section '%s'\n", 410 section_name); 411 bpf_program__exit(&prog); 412 return -ENOMEM; 413 } 414 415 pr_debug("found program %s\n", prog.section_name); 416 obj->programs = progs; 417 obj->nr_programs = nr_progs + 1; 418 prog.obj = obj; 419 progs[nr_progs] = prog; 420 return 0; 421 } 422 423 static int 424 bpf_object__init_prog_names(struct bpf_object *obj) 425 { 426 Elf_Data *symbols = obj->efile.symbols; 427 struct bpf_program *prog; 428 size_t pi, si; 429 430 for (pi = 0; pi < obj->nr_programs; pi++) { 431 const char *name = NULL; 432 433 prog = &obj->programs[pi]; 434 435 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; 436 si++) { 437 GElf_Sym sym; 438 439 if (!gelf_getsym(symbols, si, &sym)) 440 continue; 441 if (sym.st_shndx != prog->idx) 442 continue; 443 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) 444 continue; 445 446 name = elf_strptr(obj->efile.elf, 447 obj->efile.strtabidx, 448 sym.st_name); 449 if (!name) { 450 pr_warning("failed to get sym name string for prog %s\n", 451 prog->section_name); 452 return -LIBBPF_ERRNO__LIBELF; 453 } 454 } 455 456 if (!name && prog->idx == obj->efile.text_shndx) 457 name = ".text"; 458 459 if (!name) { 460 pr_warning("failed to find sym for prog %s\n", 461 prog->section_name); 462 return -EINVAL; 463 } 464 465 prog->name = strdup(name); 466 if (!prog->name) { 467 pr_warning("failed to allocate memory for prog sym %s\n", 468 name); 469 return -ENOMEM; 470 } 471 } 472 473 return 0; 474 } 475 476 static struct bpf_object *bpf_object__new(const char *path, 477 void *obj_buf, 478 size_t obj_buf_sz) 479 { 480 struct bpf_object *obj; 481 char *end; 482 483 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 484 if (!obj) { 485 pr_warning("alloc memory failed for %s\n", path); 486 return ERR_PTR(-ENOMEM); 487 } 488 489 strcpy(obj->path, path); 490 /* Using basename() GNU version which doesn't modify arg. */ 491 strncpy(obj->name, basename((void *)path), 492 sizeof(obj->name) - 1); 493 end = strchr(obj->name, '.'); 494 if (end) 495 *end = 0; 496 497 obj->efile.fd = -1; 498 /* 499 * Caller of this function should also calls 500 * bpf_object__elf_finish() after data collection to return 501 * obj_buf to user. If not, we should duplicate the buffer to 502 * avoid user freeing them before elf finish. 503 */ 504 obj->efile.obj_buf = obj_buf; 505 obj->efile.obj_buf_sz = obj_buf_sz; 506 obj->efile.maps_shndx = -1; 507 obj->efile.data_shndx = -1; 508 obj->efile.rodata_shndx = -1; 509 obj->efile.bss_shndx = -1; 510 511 obj->loaded = false; 512 513 INIT_LIST_HEAD(&obj->list); 514 list_add(&obj->list, &bpf_objects_list); 515 return obj; 516 } 517 518 static void bpf_object__elf_finish(struct bpf_object *obj) 519 { 520 if (!obj_elf_valid(obj)) 521 return; 522 523 if (obj->efile.elf) { 524 elf_end(obj->efile.elf); 525 obj->efile.elf = NULL; 526 } 527 obj->efile.symbols = NULL; 528 obj->efile.data = NULL; 529 obj->efile.rodata = NULL; 530 obj->efile.bss = NULL; 531 532 zfree(&obj->efile.reloc); 533 obj->efile.nr_reloc = 0; 534 zclose(obj->efile.fd); 535 obj->efile.obj_buf = NULL; 536 obj->efile.obj_buf_sz = 0; 537 } 538 539 static int bpf_object__elf_init(struct bpf_object *obj) 540 { 541 int err = 0; 542 GElf_Ehdr *ep; 543 544 if (obj_elf_valid(obj)) { 545 pr_warning("elf init: internal error\n"); 546 return -LIBBPF_ERRNO__LIBELF; 547 } 548 549 if (obj->efile.obj_buf_sz > 0) { 550 /* 551 * obj_buf should have been validated by 552 * bpf_object__open_buffer(). 553 */ 554 obj->efile.elf = elf_memory(obj->efile.obj_buf, 555 obj->efile.obj_buf_sz); 556 } else { 557 obj->efile.fd = open(obj->path, O_RDONLY); 558 if (obj->efile.fd < 0) { 559 char errmsg[STRERR_BUFSIZE]; 560 char *cp = libbpf_strerror_r(errno, errmsg, 561 sizeof(errmsg)); 562 563 pr_warning("failed to open %s: %s\n", obj->path, cp); 564 return -errno; 565 } 566 567 obj->efile.elf = elf_begin(obj->efile.fd, 568 LIBBPF_ELF_C_READ_MMAP, 569 NULL); 570 } 571 572 if (!obj->efile.elf) { 573 pr_warning("failed to open %s as ELF file\n", 574 obj->path); 575 err = -LIBBPF_ERRNO__LIBELF; 576 goto errout; 577 } 578 579 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 580 pr_warning("failed to get EHDR from %s\n", 581 obj->path); 582 err = -LIBBPF_ERRNO__FORMAT; 583 goto errout; 584 } 585 ep = &obj->efile.ehdr; 586 587 /* Old LLVM set e_machine to EM_NONE */ 588 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { 589 pr_warning("%s is not an eBPF object file\n", 590 obj->path); 591 err = -LIBBPF_ERRNO__FORMAT; 592 goto errout; 593 } 594 595 return 0; 596 errout: 597 bpf_object__elf_finish(obj); 598 return err; 599 } 600 601 static int 602 bpf_object__check_endianness(struct bpf_object *obj) 603 { 604 static unsigned int const endian = 1; 605 606 switch (obj->efile.ehdr.e_ident[EI_DATA]) { 607 case ELFDATA2LSB: 608 /* We are big endian, BPF obj is little endian. */ 609 if (*(unsigned char const *)&endian != 1) 610 goto mismatch; 611 break; 612 613 case ELFDATA2MSB: 614 /* We are little endian, BPF obj is big endian. */ 615 if (*(unsigned char const *)&endian != 0) 616 goto mismatch; 617 break; 618 default: 619 return -LIBBPF_ERRNO__ENDIAN; 620 } 621 622 return 0; 623 624 mismatch: 625 pr_warning("Error: endianness mismatch.\n"); 626 return -LIBBPF_ERRNO__ENDIAN; 627 } 628 629 static int 630 bpf_object__init_license(struct bpf_object *obj, 631 void *data, size_t size) 632 { 633 memcpy(obj->license, data, 634 min(size, sizeof(obj->license) - 1)); 635 pr_debug("license of %s is %s\n", obj->path, obj->license); 636 return 0; 637 } 638 639 static int 640 bpf_object__init_kversion(struct bpf_object *obj, 641 void *data, size_t size) 642 { 643 __u32 kver; 644 645 if (size != sizeof(kver)) { 646 pr_warning("invalid kver section in %s\n", obj->path); 647 return -LIBBPF_ERRNO__FORMAT; 648 } 649 memcpy(&kver, data, sizeof(kver)); 650 obj->kern_version = kver; 651 pr_debug("kernel version of %s is %x\n", obj->path, 652 obj->kern_version); 653 return 0; 654 } 655 656 static int compare_bpf_map(const void *_a, const void *_b) 657 { 658 const struct bpf_map *a = _a; 659 const struct bpf_map *b = _b; 660 661 return a->offset - b->offset; 662 } 663 664 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 665 { 666 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 667 type == BPF_MAP_TYPE_HASH_OF_MAPS) 668 return true; 669 return false; 670 } 671 672 static int bpf_object_search_section_size(const struct bpf_object *obj, 673 const char *name, size_t *d_size) 674 { 675 const GElf_Ehdr *ep = &obj->efile.ehdr; 676 Elf *elf = obj->efile.elf; 677 Elf_Scn *scn = NULL; 678 int idx = 0; 679 680 while ((scn = elf_nextscn(elf, scn)) != NULL) { 681 const char *sec_name; 682 Elf_Data *data; 683 GElf_Shdr sh; 684 685 idx++; 686 if (gelf_getshdr(scn, &sh) != &sh) { 687 pr_warning("failed to get section(%d) header from %s\n", 688 idx, obj->path); 689 return -EIO; 690 } 691 692 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 693 if (!sec_name) { 694 pr_warning("failed to get section(%d) name from %s\n", 695 idx, obj->path); 696 return -EIO; 697 } 698 699 if (strcmp(name, sec_name)) 700 continue; 701 702 data = elf_getdata(scn, 0); 703 if (!data) { 704 pr_warning("failed to get section(%d) data from %s(%s)\n", 705 idx, name, obj->path); 706 return -EIO; 707 } 708 709 *d_size = data->d_size; 710 return 0; 711 } 712 713 return -ENOENT; 714 } 715 716 int bpf_object__section_size(const struct bpf_object *obj, const char *name, 717 __u32 *size) 718 { 719 int ret = -ENOENT; 720 size_t d_size; 721 722 *size = 0; 723 if (!name) { 724 return -EINVAL; 725 } else if (!strcmp(name, ".data")) { 726 if (obj->efile.data) 727 *size = obj->efile.data->d_size; 728 } else if (!strcmp(name, ".bss")) { 729 if (obj->efile.bss) 730 *size = obj->efile.bss->d_size; 731 } else if (!strcmp(name, ".rodata")) { 732 if (obj->efile.rodata) 733 *size = obj->efile.rodata->d_size; 734 } else { 735 ret = bpf_object_search_section_size(obj, name, &d_size); 736 if (!ret) 737 *size = d_size; 738 } 739 740 return *size ? 0 : ret; 741 } 742 743 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, 744 __u32 *off) 745 { 746 Elf_Data *symbols = obj->efile.symbols; 747 const char *sname; 748 size_t si; 749 750 if (!name || !off) 751 return -EINVAL; 752 753 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { 754 GElf_Sym sym; 755 756 if (!gelf_getsym(symbols, si, &sym)) 757 continue; 758 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL || 759 GELF_ST_TYPE(sym.st_info) != STT_OBJECT) 760 continue; 761 762 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 763 sym.st_name); 764 if (!sname) { 765 pr_warning("failed to get sym name string for var %s\n", 766 name); 767 return -EIO; 768 } 769 if (strcmp(name, sname) == 0) { 770 *off = sym.st_value; 771 return 0; 772 } 773 } 774 775 return -ENOENT; 776 } 777 778 static bool bpf_object__has_maps(const struct bpf_object *obj) 779 { 780 return obj->efile.maps_shndx >= 0 || 781 obj->efile.data_shndx >= 0 || 782 obj->efile.rodata_shndx >= 0 || 783 obj->efile.bss_shndx >= 0; 784 } 785 786 static int 787 bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map, 788 enum libbpf_map_type type, Elf_Data *data, 789 void **data_buff) 790 { 791 struct bpf_map_def *def = &map->def; 792 char map_name[BPF_OBJ_NAME_LEN]; 793 794 map->libbpf_type = type; 795 map->offset = ~(typeof(map->offset))0; 796 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name, 797 libbpf_type_to_btf_name[type]); 798 map->name = strdup(map_name); 799 if (!map->name) { 800 pr_warning("failed to alloc map name\n"); 801 return -ENOMEM; 802 } 803 804 def->type = BPF_MAP_TYPE_ARRAY; 805 def->key_size = sizeof(int); 806 def->value_size = data->d_size; 807 def->max_entries = 1; 808 def->map_flags = type == LIBBPF_MAP_RODATA ? 809 BPF_F_RDONLY_PROG : 0; 810 if (data_buff) { 811 *data_buff = malloc(data->d_size); 812 if (!*data_buff) { 813 zfree(&map->name); 814 pr_warning("failed to alloc map content buffer\n"); 815 return -ENOMEM; 816 } 817 memcpy(*data_buff, data->d_buf, data->d_size); 818 } 819 820 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); 821 return 0; 822 } 823 824 static int 825 bpf_object__init_maps(struct bpf_object *obj, int flags) 826 { 827 int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0; 828 bool strict = !(flags & MAPS_RELAX_COMPAT); 829 Elf_Data *symbols = obj->efile.symbols; 830 Elf_Data *data = NULL; 831 int ret = 0; 832 833 if (!symbols) 834 return -EINVAL; 835 nr_syms = symbols->d_size / sizeof(GElf_Sym); 836 837 if (obj->efile.maps_shndx >= 0) { 838 Elf_Scn *scn = elf_getscn(obj->efile.elf, 839 obj->efile.maps_shndx); 840 841 if (scn) 842 data = elf_getdata(scn, NULL); 843 if (!scn || !data) { 844 pr_warning("failed to get Elf_Data from map section %d\n", 845 obj->efile.maps_shndx); 846 return -EINVAL; 847 } 848 } 849 850 /* 851 * Count number of maps. Each map has a name. 852 * Array of maps is not supported: only the first element is 853 * considered. 854 * 855 * TODO: Detect array of map and report error. 856 */ 857 if (obj->efile.data_shndx >= 0) 858 nr_maps_glob++; 859 if (obj->efile.rodata_shndx >= 0) 860 nr_maps_glob++; 861 if (obj->efile.bss_shndx >= 0) 862 nr_maps_glob++; 863 for (i = 0; data && i < nr_syms; i++) { 864 GElf_Sym sym; 865 866 if (!gelf_getsym(symbols, i, &sym)) 867 continue; 868 if (sym.st_shndx != obj->efile.maps_shndx) 869 continue; 870 nr_maps++; 871 } 872 873 /* Alloc obj->maps and fill nr_maps. */ 874 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, 875 nr_maps, data->d_size); 876 if (!nr_maps && !nr_maps_glob) 877 return 0; 878 879 /* Assume equally sized map definitions */ 880 if (data) { 881 map_def_sz = data->d_size / nr_maps; 882 if (!data->d_size || (data->d_size % nr_maps) != 0) { 883 pr_warning("unable to determine map definition size " 884 "section %s, %d maps in %zd bytes\n", 885 obj->path, nr_maps, data->d_size); 886 return -EINVAL; 887 } 888 } 889 890 nr_maps += nr_maps_glob; 891 obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); 892 if (!obj->maps) { 893 pr_warning("alloc maps for object failed\n"); 894 return -ENOMEM; 895 } 896 obj->nr_maps = nr_maps; 897 898 for (i = 0; i < nr_maps; i++) { 899 /* 900 * fill all fd with -1 so won't close incorrect 901 * fd (fd=0 is stdin) when failure (zclose won't close 902 * negative fd)). 903 */ 904 obj->maps[i].fd = -1; 905 obj->maps[i].inner_map_fd = -1; 906 } 907 908 /* 909 * Fill obj->maps using data in "maps" section. 910 */ 911 for (i = 0, map_idx = 0; data && i < nr_syms; i++) { 912 GElf_Sym sym; 913 const char *map_name; 914 struct bpf_map_def *def; 915 916 if (!gelf_getsym(symbols, i, &sym)) 917 continue; 918 if (sym.st_shndx != obj->efile.maps_shndx) 919 continue; 920 921 map_name = elf_strptr(obj->efile.elf, 922 obj->efile.strtabidx, 923 sym.st_name); 924 925 obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC; 926 obj->maps[map_idx].offset = sym.st_value; 927 if (sym.st_value + map_def_sz > data->d_size) { 928 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", 929 obj->path, map_name); 930 return -EINVAL; 931 } 932 933 obj->maps[map_idx].name = strdup(map_name); 934 if (!obj->maps[map_idx].name) { 935 pr_warning("failed to alloc map name\n"); 936 return -ENOMEM; 937 } 938 pr_debug("map %d is \"%s\"\n", map_idx, 939 obj->maps[map_idx].name); 940 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 941 /* 942 * If the definition of the map in the object file fits in 943 * bpf_map_def, copy it. Any extra fields in our version 944 * of bpf_map_def will default to zero as a result of the 945 * calloc above. 946 */ 947 if (map_def_sz <= sizeof(struct bpf_map_def)) { 948 memcpy(&obj->maps[map_idx].def, def, map_def_sz); 949 } else { 950 /* 951 * Here the map structure being read is bigger than what 952 * we expect, truncate if the excess bits are all zero. 953 * If they are not zero, reject this map as 954 * incompatible. 955 */ 956 char *b; 957 for (b = ((char *)def) + sizeof(struct bpf_map_def); 958 b < ((char *)def) + map_def_sz; b++) { 959 if (*b != 0) { 960 pr_warning("maps section in %s: \"%s\" " 961 "has unrecognized, non-zero " 962 "options\n", 963 obj->path, map_name); 964 if (strict) 965 return -EINVAL; 966 } 967 } 968 memcpy(&obj->maps[map_idx].def, def, 969 sizeof(struct bpf_map_def)); 970 } 971 map_idx++; 972 } 973 974 /* 975 * Populate rest of obj->maps with libbpf internal maps. 976 */ 977 if (obj->efile.data_shndx >= 0) 978 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], 979 LIBBPF_MAP_DATA, 980 obj->efile.data, 981 &obj->sections.data); 982 if (!ret && obj->efile.rodata_shndx >= 0) 983 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], 984 LIBBPF_MAP_RODATA, 985 obj->efile.rodata, 986 &obj->sections.rodata); 987 if (!ret && obj->efile.bss_shndx >= 0) 988 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], 989 LIBBPF_MAP_BSS, 990 obj->efile.bss, NULL); 991 if (!ret) 992 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), 993 compare_bpf_map); 994 return ret; 995 } 996 997 static bool section_have_execinstr(struct bpf_object *obj, int idx) 998 { 999 Elf_Scn *scn; 1000 GElf_Shdr sh; 1001 1002 scn = elf_getscn(obj->efile.elf, idx); 1003 if (!scn) 1004 return false; 1005 1006 if (gelf_getshdr(scn, &sh) != &sh) 1007 return false; 1008 1009 if (sh.sh_flags & SHF_EXECINSTR) 1010 return true; 1011 1012 return false; 1013 } 1014 1015 static int bpf_object__elf_collect(struct bpf_object *obj, int flags) 1016 { 1017 Elf *elf = obj->efile.elf; 1018 GElf_Ehdr *ep = &obj->efile.ehdr; 1019 Elf_Data *btf_ext_data = NULL; 1020 Elf_Data *btf_data = NULL; 1021 Elf_Scn *scn = NULL; 1022 int idx = 0, err = 0; 1023 1024 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 1025 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 1026 pr_warning("failed to get e_shstrndx from %s\n", 1027 obj->path); 1028 return -LIBBPF_ERRNO__FORMAT; 1029 } 1030 1031 while ((scn = elf_nextscn(elf, scn)) != NULL) { 1032 char *name; 1033 GElf_Shdr sh; 1034 Elf_Data *data; 1035 1036 idx++; 1037 if (gelf_getshdr(scn, &sh) != &sh) { 1038 pr_warning("failed to get section(%d) header from %s\n", 1039 idx, obj->path); 1040 err = -LIBBPF_ERRNO__FORMAT; 1041 goto out; 1042 } 1043 1044 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 1045 if (!name) { 1046 pr_warning("failed to get section(%d) name from %s\n", 1047 idx, obj->path); 1048 err = -LIBBPF_ERRNO__FORMAT; 1049 goto out; 1050 } 1051 1052 data = elf_getdata(scn, 0); 1053 if (!data) { 1054 pr_warning("failed to get section(%d) data from %s(%s)\n", 1055 idx, name, obj->path); 1056 err = -LIBBPF_ERRNO__FORMAT; 1057 goto out; 1058 } 1059 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 1060 idx, name, (unsigned long)data->d_size, 1061 (int)sh.sh_link, (unsigned long)sh.sh_flags, 1062 (int)sh.sh_type); 1063 1064 if (strcmp(name, "license") == 0) { 1065 err = bpf_object__init_license(obj, 1066 data->d_buf, 1067 data->d_size); 1068 } else if (strcmp(name, "version") == 0) { 1069 err = bpf_object__init_kversion(obj, 1070 data->d_buf, 1071 data->d_size); 1072 } else if (strcmp(name, "maps") == 0) { 1073 obj->efile.maps_shndx = idx; 1074 } else if (strcmp(name, BTF_ELF_SEC) == 0) { 1075 btf_data = data; 1076 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 1077 btf_ext_data = data; 1078 } else if (sh.sh_type == SHT_SYMTAB) { 1079 if (obj->efile.symbols) { 1080 pr_warning("bpf: multiple SYMTAB in %s\n", 1081 obj->path); 1082 err = -LIBBPF_ERRNO__FORMAT; 1083 } else { 1084 obj->efile.symbols = data; 1085 obj->efile.strtabidx = sh.sh_link; 1086 } 1087 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { 1088 if (sh.sh_flags & SHF_EXECINSTR) { 1089 if (strcmp(name, ".text") == 0) 1090 obj->efile.text_shndx = idx; 1091 err = bpf_object__add_program(obj, data->d_buf, 1092 data->d_size, name, idx); 1093 if (err) { 1094 char errmsg[STRERR_BUFSIZE]; 1095 char *cp = libbpf_strerror_r(-err, errmsg, 1096 sizeof(errmsg)); 1097 1098 pr_warning("failed to alloc program %s (%s): %s", 1099 name, obj->path, cp); 1100 } 1101 } else if (strcmp(name, ".data") == 0) { 1102 obj->efile.data = data; 1103 obj->efile.data_shndx = idx; 1104 } else if (strcmp(name, ".rodata") == 0) { 1105 obj->efile.rodata = data; 1106 obj->efile.rodata_shndx = idx; 1107 } else { 1108 pr_debug("skip section(%d) %s\n", idx, name); 1109 } 1110 } else if (sh.sh_type == SHT_REL) { 1111 void *reloc = obj->efile.reloc; 1112 int nr_reloc = obj->efile.nr_reloc + 1; 1113 int sec = sh.sh_info; /* points to other section */ 1114 1115 /* Only do relo for section with exec instructions */ 1116 if (!section_have_execinstr(obj, sec)) { 1117 pr_debug("skip relo %s(%d) for section(%d)\n", 1118 name, idx, sec); 1119 continue; 1120 } 1121 1122 reloc = reallocarray(reloc, nr_reloc, 1123 sizeof(*obj->efile.reloc)); 1124 if (!reloc) { 1125 pr_warning("realloc failed\n"); 1126 err = -ENOMEM; 1127 } else { 1128 int n = nr_reloc - 1; 1129 1130 obj->efile.reloc = reloc; 1131 obj->efile.nr_reloc = nr_reloc; 1132 1133 obj->efile.reloc[n].shdr = sh; 1134 obj->efile.reloc[n].data = data; 1135 } 1136 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) { 1137 obj->efile.bss = data; 1138 obj->efile.bss_shndx = idx; 1139 } else { 1140 pr_debug("skip section(%d) %s\n", idx, name); 1141 } 1142 if (err) 1143 goto out; 1144 } 1145 1146 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 1147 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 1148 return LIBBPF_ERRNO__FORMAT; 1149 } 1150 if (btf_data) { 1151 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 1152 if (IS_ERR(obj->btf)) { 1153 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 1154 BTF_ELF_SEC, PTR_ERR(obj->btf)); 1155 obj->btf = NULL; 1156 } else { 1157 err = btf__finalize_data(obj, obj->btf); 1158 if (!err) 1159 err = btf__load(obj->btf); 1160 if (err) { 1161 pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n", 1162 BTF_ELF_SEC, err); 1163 btf__free(obj->btf); 1164 obj->btf = NULL; 1165 err = 0; 1166 } 1167 } 1168 } 1169 if (btf_ext_data) { 1170 if (!obj->btf) { 1171 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 1172 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 1173 } else { 1174 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 1175 btf_ext_data->d_size); 1176 if (IS_ERR(obj->btf_ext)) { 1177 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 1178 BTF_EXT_ELF_SEC, 1179 PTR_ERR(obj->btf_ext)); 1180 obj->btf_ext = NULL; 1181 } 1182 } 1183 } 1184 if (bpf_object__has_maps(obj)) { 1185 err = bpf_object__init_maps(obj, flags); 1186 if (err) 1187 goto out; 1188 } 1189 err = bpf_object__init_prog_names(obj); 1190 out: 1191 return err; 1192 } 1193 1194 static struct bpf_program * 1195 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) 1196 { 1197 struct bpf_program *prog; 1198 size_t i; 1199 1200 for (i = 0; i < obj->nr_programs; i++) { 1201 prog = &obj->programs[i]; 1202 if (prog->idx == idx) 1203 return prog; 1204 } 1205 return NULL; 1206 } 1207 1208 struct bpf_program * 1209 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title) 1210 { 1211 struct bpf_program *pos; 1212 1213 bpf_object__for_each_program(pos, obj) { 1214 if (pos->section_name && !strcmp(pos->section_name, title)) 1215 return pos; 1216 } 1217 return NULL; 1218 } 1219 1220 static bool bpf_object__shndx_is_data(const struct bpf_object *obj, 1221 int shndx) 1222 { 1223 return shndx == obj->efile.data_shndx || 1224 shndx == obj->efile.bss_shndx || 1225 shndx == obj->efile.rodata_shndx; 1226 } 1227 1228 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, 1229 int shndx) 1230 { 1231 return shndx == obj->efile.maps_shndx; 1232 } 1233 1234 static bool bpf_object__relo_in_known_section(const struct bpf_object *obj, 1235 int shndx) 1236 { 1237 return shndx == obj->efile.text_shndx || 1238 bpf_object__shndx_is_maps(obj, shndx) || 1239 bpf_object__shndx_is_data(obj, shndx); 1240 } 1241 1242 static enum libbpf_map_type 1243 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) 1244 { 1245 if (shndx == obj->efile.data_shndx) 1246 return LIBBPF_MAP_DATA; 1247 else if (shndx == obj->efile.bss_shndx) 1248 return LIBBPF_MAP_BSS; 1249 else if (shndx == obj->efile.rodata_shndx) 1250 return LIBBPF_MAP_RODATA; 1251 else 1252 return LIBBPF_MAP_UNSPEC; 1253 } 1254 1255 static int 1256 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, 1257 Elf_Data *data, struct bpf_object *obj) 1258 { 1259 Elf_Data *symbols = obj->efile.symbols; 1260 struct bpf_map *maps = obj->maps; 1261 size_t nr_maps = obj->nr_maps; 1262 int i, nrels; 1263 1264 pr_debug("collecting relocating info for: '%s'\n", 1265 prog->section_name); 1266 nrels = shdr->sh_size / shdr->sh_entsize; 1267 1268 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 1269 if (!prog->reloc_desc) { 1270 pr_warning("failed to alloc memory in relocation\n"); 1271 return -ENOMEM; 1272 } 1273 prog->nr_reloc = nrels; 1274 1275 for (i = 0; i < nrels; i++) { 1276 GElf_Sym sym; 1277 GElf_Rel rel; 1278 unsigned int insn_idx; 1279 unsigned int shdr_idx; 1280 struct bpf_insn *insns = prog->insns; 1281 enum libbpf_map_type type; 1282 const char *name; 1283 size_t map_idx; 1284 1285 if (!gelf_getrel(data, i, &rel)) { 1286 pr_warning("relocation: failed to get %d reloc\n", i); 1287 return -LIBBPF_ERRNO__FORMAT; 1288 } 1289 1290 if (!gelf_getsym(symbols, 1291 GELF_R_SYM(rel.r_info), 1292 &sym)) { 1293 pr_warning("relocation: symbol %"PRIx64" not found\n", 1294 GELF_R_SYM(rel.r_info)); 1295 return -LIBBPF_ERRNO__FORMAT; 1296 } 1297 1298 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, 1299 sym.st_name) ? : "<?>"; 1300 1301 pr_debug("relo for %lld value %lld name %d (\'%s\')\n", 1302 (long long) (rel.r_info >> 32), 1303 (long long) sym.st_value, sym.st_name, name); 1304 1305 shdr_idx = sym.st_shndx; 1306 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) { 1307 pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n", 1308 prog->section_name, shdr_idx); 1309 return -LIBBPF_ERRNO__RELOC; 1310 } 1311 1312 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 1313 pr_debug("relocation: insn_idx=%u\n", insn_idx); 1314 1315 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { 1316 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { 1317 pr_warning("incorrect bpf_call opcode\n"); 1318 return -LIBBPF_ERRNO__RELOC; 1319 } 1320 prog->reloc_desc[i].type = RELO_CALL; 1321 prog->reloc_desc[i].insn_idx = insn_idx; 1322 prog->reloc_desc[i].text_off = sym.st_value; 1323 obj->has_pseudo_calls = true; 1324 continue; 1325 } 1326 1327 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 1328 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 1329 insn_idx, insns[insn_idx].code); 1330 return -LIBBPF_ERRNO__RELOC; 1331 } 1332 1333 if (bpf_object__shndx_is_maps(obj, shdr_idx) || 1334 bpf_object__shndx_is_data(obj, shdr_idx)) { 1335 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); 1336 if (type != LIBBPF_MAP_UNSPEC && 1337 GELF_ST_BIND(sym.st_info) == STB_GLOBAL) { 1338 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n", 1339 name, insn_idx, insns[insn_idx].code); 1340 return -LIBBPF_ERRNO__RELOC; 1341 } 1342 1343 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 1344 if (maps[map_idx].libbpf_type != type) 1345 continue; 1346 if (type != LIBBPF_MAP_UNSPEC || 1347 (type == LIBBPF_MAP_UNSPEC && 1348 maps[map_idx].offset == sym.st_value)) { 1349 pr_debug("relocation: find map %zd (%s) for insn %u\n", 1350 map_idx, maps[map_idx].name, insn_idx); 1351 break; 1352 } 1353 } 1354 1355 if (map_idx >= nr_maps) { 1356 pr_warning("bpf relocation: map_idx %d large than %d\n", 1357 (int)map_idx, (int)nr_maps - 1); 1358 return -LIBBPF_ERRNO__RELOC; 1359 } 1360 1361 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ? 1362 RELO_DATA : RELO_LD64; 1363 prog->reloc_desc[i].insn_idx = insn_idx; 1364 prog->reloc_desc[i].map_idx = map_idx; 1365 } 1366 } 1367 return 0; 1368 } 1369 1370 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 1371 { 1372 struct bpf_map_def *def = &map->def; 1373 __u32 key_type_id = 0, value_type_id = 0; 1374 int ret; 1375 1376 if (!bpf_map__is_internal(map)) { 1377 ret = btf__get_map_kv_tids(btf, map->name, def->key_size, 1378 def->value_size, &key_type_id, 1379 &value_type_id); 1380 } else { 1381 /* 1382 * LLVM annotates global data differently in BTF, that is, 1383 * only as '.data', '.bss' or '.rodata'. 1384 */ 1385 ret = btf__find_by_name(btf, 1386 libbpf_type_to_btf_name[map->libbpf_type]); 1387 } 1388 if (ret < 0) 1389 return ret; 1390 1391 map->btf_key_type_id = key_type_id; 1392 map->btf_value_type_id = bpf_map__is_internal(map) ? 1393 ret : value_type_id; 1394 return 0; 1395 } 1396 1397 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 1398 { 1399 struct bpf_map_info info = {}; 1400 __u32 len = sizeof(info); 1401 int new_fd, err; 1402 char *new_name; 1403 1404 err = bpf_obj_get_info_by_fd(fd, &info, &len); 1405 if (err) 1406 return err; 1407 1408 new_name = strdup(info.name); 1409 if (!new_name) 1410 return -errno; 1411 1412 new_fd = open("/", O_RDONLY | O_CLOEXEC); 1413 if (new_fd < 0) 1414 goto err_free_new_name; 1415 1416 new_fd = dup3(fd, new_fd, O_CLOEXEC); 1417 if (new_fd < 0) 1418 goto err_close_new_fd; 1419 1420 err = zclose(map->fd); 1421 if (err) 1422 goto err_close_new_fd; 1423 free(map->name); 1424 1425 map->fd = new_fd; 1426 map->name = new_name; 1427 map->def.type = info.type; 1428 map->def.key_size = info.key_size; 1429 map->def.value_size = info.value_size; 1430 map->def.max_entries = info.max_entries; 1431 map->def.map_flags = info.map_flags; 1432 map->btf_key_type_id = info.btf_key_type_id; 1433 map->btf_value_type_id = info.btf_value_type_id; 1434 1435 return 0; 1436 1437 err_close_new_fd: 1438 close(new_fd); 1439 err_free_new_name: 1440 free(new_name); 1441 return -errno; 1442 } 1443 1444 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) 1445 { 1446 if (!map || !max_entries) 1447 return -EINVAL; 1448 1449 /* If map already created, its attributes can't be changed. */ 1450 if (map->fd >= 0) 1451 return -EBUSY; 1452 1453 map->def.max_entries = max_entries; 1454 1455 return 0; 1456 } 1457 1458 static int 1459 bpf_object__probe_name(struct bpf_object *obj) 1460 { 1461 struct bpf_load_program_attr attr; 1462 char *cp, errmsg[STRERR_BUFSIZE]; 1463 struct bpf_insn insns[] = { 1464 BPF_MOV64_IMM(BPF_REG_0, 0), 1465 BPF_EXIT_INSN(), 1466 }; 1467 int ret; 1468 1469 /* make sure basic loading works */ 1470 1471 memset(&attr, 0, sizeof(attr)); 1472 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 1473 attr.insns = insns; 1474 attr.insns_cnt = ARRAY_SIZE(insns); 1475 attr.license = "GPL"; 1476 1477 ret = bpf_load_program_xattr(&attr, NULL, 0); 1478 if (ret < 0) { 1479 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1480 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n", 1481 __func__, cp, errno); 1482 return -errno; 1483 } 1484 close(ret); 1485 1486 /* now try the same program, but with the name */ 1487 1488 attr.name = "test"; 1489 ret = bpf_load_program_xattr(&attr, NULL, 0); 1490 if (ret >= 0) { 1491 obj->caps.name = 1; 1492 close(ret); 1493 } 1494 1495 return 0; 1496 } 1497 1498 static int 1499 bpf_object__probe_caps(struct bpf_object *obj) 1500 { 1501 return bpf_object__probe_name(obj); 1502 } 1503 1504 static int 1505 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) 1506 { 1507 char *cp, errmsg[STRERR_BUFSIZE]; 1508 int err, zero = 0; 1509 __u8 *data; 1510 1511 /* Nothing to do here since kernel already zero-initializes .bss map. */ 1512 if (map->libbpf_type == LIBBPF_MAP_BSS) 1513 return 0; 1514 1515 data = map->libbpf_type == LIBBPF_MAP_DATA ? 1516 obj->sections.data : obj->sections.rodata; 1517 1518 err = bpf_map_update_elem(map->fd, &zero, data, 0); 1519 /* Freeze .rodata map as read-only from syscall side. */ 1520 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) { 1521 err = bpf_map_freeze(map->fd); 1522 if (err) { 1523 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1524 pr_warning("Error freezing map(%s) as read-only: %s\n", 1525 map->name, cp); 1526 err = 0; 1527 } 1528 } 1529 return err; 1530 } 1531 1532 static int 1533 bpf_object__create_maps(struct bpf_object *obj) 1534 { 1535 struct bpf_create_map_attr create_attr = {}; 1536 unsigned int i; 1537 int err; 1538 1539 for (i = 0; i < obj->nr_maps; i++) { 1540 struct bpf_map *map = &obj->maps[i]; 1541 struct bpf_map_def *def = &map->def; 1542 char *cp, errmsg[STRERR_BUFSIZE]; 1543 int *pfd = &map->fd; 1544 1545 if (map->fd >= 0) { 1546 pr_debug("skip map create (preset) %s: fd=%d\n", 1547 map->name, map->fd); 1548 continue; 1549 } 1550 1551 if (obj->caps.name) 1552 create_attr.name = map->name; 1553 create_attr.map_ifindex = map->map_ifindex; 1554 create_attr.map_type = def->type; 1555 create_attr.map_flags = def->map_flags; 1556 create_attr.key_size = def->key_size; 1557 create_attr.value_size = def->value_size; 1558 create_attr.max_entries = def->max_entries; 1559 create_attr.btf_fd = 0; 1560 create_attr.btf_key_type_id = 0; 1561 create_attr.btf_value_type_id = 0; 1562 if (bpf_map_type__is_map_in_map(def->type) && 1563 map->inner_map_fd >= 0) 1564 create_attr.inner_map_fd = map->inner_map_fd; 1565 1566 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) { 1567 create_attr.btf_fd = btf__fd(obj->btf); 1568 create_attr.btf_key_type_id = map->btf_key_type_id; 1569 create_attr.btf_value_type_id = map->btf_value_type_id; 1570 } 1571 1572 *pfd = bpf_create_map_xattr(&create_attr); 1573 if (*pfd < 0 && create_attr.btf_key_type_id) { 1574 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1575 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 1576 map->name, cp, errno); 1577 create_attr.btf_fd = 0; 1578 create_attr.btf_key_type_id = 0; 1579 create_attr.btf_value_type_id = 0; 1580 map->btf_key_type_id = 0; 1581 map->btf_value_type_id = 0; 1582 *pfd = bpf_create_map_xattr(&create_attr); 1583 } 1584 1585 if (*pfd < 0) { 1586 size_t j; 1587 1588 err = *pfd; 1589 err_out: 1590 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1591 pr_warning("failed to create map (name: '%s'): %s\n", 1592 map->name, cp); 1593 for (j = 0; j < i; j++) 1594 zclose(obj->maps[j].fd); 1595 return err; 1596 } 1597 1598 if (bpf_map__is_internal(map)) { 1599 err = bpf_object__populate_internal_map(obj, map); 1600 if (err < 0) { 1601 zclose(*pfd); 1602 goto err_out; 1603 } 1604 } 1605 1606 pr_debug("create map %s: fd=%d\n", map->name, *pfd); 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int 1613 check_btf_ext_reloc_err(struct bpf_program *prog, int err, 1614 void *btf_prog_info, const char *info_name) 1615 { 1616 if (err != -ENOENT) { 1617 pr_warning("Error in loading %s for sec %s.\n", 1618 info_name, prog->section_name); 1619 return err; 1620 } 1621 1622 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */ 1623 1624 if (btf_prog_info) { 1625 /* 1626 * Some info has already been found but has problem 1627 * in the last btf_ext reloc. Must have to error 1628 * out. 1629 */ 1630 pr_warning("Error in relocating %s for sec %s.\n", 1631 info_name, prog->section_name); 1632 return err; 1633 } 1634 1635 /* 1636 * Have problem loading the very first info. Ignore 1637 * the rest. 1638 */ 1639 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", 1640 info_name, prog->section_name, info_name); 1641 return 0; 1642 } 1643 1644 static int 1645 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, 1646 const char *section_name, __u32 insn_offset) 1647 { 1648 int err; 1649 1650 if (!insn_offset || prog->func_info) { 1651 /* 1652 * !insn_offset => main program 1653 * 1654 * For sub prog, the main program's func_info has to 1655 * be loaded first (i.e. prog->func_info != NULL) 1656 */ 1657 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext, 1658 section_name, insn_offset, 1659 &prog->func_info, 1660 &prog->func_info_cnt); 1661 if (err) 1662 return check_btf_ext_reloc_err(prog, err, 1663 prog->func_info, 1664 "bpf_func_info"); 1665 1666 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext); 1667 } 1668 1669 if (!insn_offset || prog->line_info) { 1670 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext, 1671 section_name, insn_offset, 1672 &prog->line_info, 1673 &prog->line_info_cnt); 1674 if (err) 1675 return check_btf_ext_reloc_err(prog, err, 1676 prog->line_info, 1677 "bpf_line_info"); 1678 1679 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 1680 } 1681 1682 if (!insn_offset) 1683 prog->btf_fd = btf__fd(obj->btf); 1684 1685 return 0; 1686 } 1687 1688 static int 1689 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, 1690 struct reloc_desc *relo) 1691 { 1692 struct bpf_insn *insn, *new_insn; 1693 struct bpf_program *text; 1694 size_t new_cnt; 1695 int err; 1696 1697 if (relo->type != RELO_CALL) 1698 return -LIBBPF_ERRNO__RELOC; 1699 1700 if (prog->idx == obj->efile.text_shndx) { 1701 pr_warning("relo in .text insn %d into off %d\n", 1702 relo->insn_idx, relo->text_off); 1703 return -LIBBPF_ERRNO__RELOC; 1704 } 1705 1706 if (prog->main_prog_cnt == 0) { 1707 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); 1708 if (!text) { 1709 pr_warning("no .text section found yet relo into text exist\n"); 1710 return -LIBBPF_ERRNO__RELOC; 1711 } 1712 new_cnt = prog->insns_cnt + text->insns_cnt; 1713 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); 1714 if (!new_insn) { 1715 pr_warning("oom in prog realloc\n"); 1716 return -ENOMEM; 1717 } 1718 1719 if (obj->btf_ext) { 1720 err = bpf_program_reloc_btf_ext(prog, obj, 1721 text->section_name, 1722 prog->insns_cnt); 1723 if (err) 1724 return err; 1725 } 1726 1727 memcpy(new_insn + prog->insns_cnt, text->insns, 1728 text->insns_cnt * sizeof(*insn)); 1729 prog->insns = new_insn; 1730 prog->main_prog_cnt = prog->insns_cnt; 1731 prog->insns_cnt = new_cnt; 1732 pr_debug("added %zd insn from %s to prog %s\n", 1733 text->insns_cnt, text->section_name, 1734 prog->section_name); 1735 } 1736 insn = &prog->insns[relo->insn_idx]; 1737 insn->imm += prog->main_prog_cnt - relo->insn_idx; 1738 return 0; 1739 } 1740 1741 static int 1742 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) 1743 { 1744 int i, err; 1745 1746 if (!prog) 1747 return 0; 1748 1749 if (obj->btf_ext) { 1750 err = bpf_program_reloc_btf_ext(prog, obj, 1751 prog->section_name, 0); 1752 if (err) 1753 return err; 1754 } 1755 1756 if (!prog->reloc_desc) 1757 return 0; 1758 1759 for (i = 0; i < prog->nr_reloc; i++) { 1760 if (prog->reloc_desc[i].type == RELO_LD64 || 1761 prog->reloc_desc[i].type == RELO_DATA) { 1762 bool relo_data = prog->reloc_desc[i].type == RELO_DATA; 1763 struct bpf_insn *insns = prog->insns; 1764 int insn_idx, map_idx; 1765 1766 insn_idx = prog->reloc_desc[i].insn_idx; 1767 map_idx = prog->reloc_desc[i].map_idx; 1768 1769 if (insn_idx + 1 >= (int)prog->insns_cnt) { 1770 pr_warning("relocation out of range: '%s'\n", 1771 prog->section_name); 1772 return -LIBBPF_ERRNO__RELOC; 1773 } 1774 1775 if (!relo_data) { 1776 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 1777 } else { 1778 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE; 1779 insns[insn_idx + 1].imm = insns[insn_idx].imm; 1780 } 1781 insns[insn_idx].imm = obj->maps[map_idx].fd; 1782 } else if (prog->reloc_desc[i].type == RELO_CALL) { 1783 err = bpf_program__reloc_text(prog, obj, 1784 &prog->reloc_desc[i]); 1785 if (err) 1786 return err; 1787 } 1788 } 1789 1790 zfree(&prog->reloc_desc); 1791 prog->nr_reloc = 0; 1792 return 0; 1793 } 1794 1795 1796 static int 1797 bpf_object__relocate(struct bpf_object *obj) 1798 { 1799 struct bpf_program *prog; 1800 size_t i; 1801 int err; 1802 1803 for (i = 0; i < obj->nr_programs; i++) { 1804 prog = &obj->programs[i]; 1805 1806 err = bpf_program__relocate(prog, obj); 1807 if (err) { 1808 pr_warning("failed to relocate '%s'\n", 1809 prog->section_name); 1810 return err; 1811 } 1812 } 1813 return 0; 1814 } 1815 1816 static int bpf_object__collect_reloc(struct bpf_object *obj) 1817 { 1818 int i, err; 1819 1820 if (!obj_elf_valid(obj)) { 1821 pr_warning("Internal error: elf object is closed\n"); 1822 return -LIBBPF_ERRNO__INTERNAL; 1823 } 1824 1825 for (i = 0; i < obj->efile.nr_reloc; i++) { 1826 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; 1827 Elf_Data *data = obj->efile.reloc[i].data; 1828 int idx = shdr->sh_info; 1829 struct bpf_program *prog; 1830 1831 if (shdr->sh_type != SHT_REL) { 1832 pr_warning("internal error at %d\n", __LINE__); 1833 return -LIBBPF_ERRNO__INTERNAL; 1834 } 1835 1836 prog = bpf_object__find_prog_by_idx(obj, idx); 1837 if (!prog) { 1838 pr_warning("relocation failed: no section(%d)\n", idx); 1839 return -LIBBPF_ERRNO__RELOC; 1840 } 1841 1842 err = bpf_program__collect_reloc(prog, 1843 shdr, data, 1844 obj); 1845 if (err) 1846 return err; 1847 } 1848 return 0; 1849 } 1850 1851 static int 1852 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, 1853 char *license, __u32 kern_version, int *pfd) 1854 { 1855 struct bpf_load_program_attr load_attr; 1856 char *cp, errmsg[STRERR_BUFSIZE]; 1857 int log_buf_size = BPF_LOG_BUF_SIZE; 1858 char *log_buf; 1859 int ret; 1860 1861 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 1862 load_attr.prog_type = prog->type; 1863 load_attr.expected_attach_type = prog->expected_attach_type; 1864 if (prog->caps->name) 1865 load_attr.name = prog->name; 1866 load_attr.insns = insns; 1867 load_attr.insns_cnt = insns_cnt; 1868 load_attr.license = license; 1869 load_attr.kern_version = kern_version; 1870 load_attr.prog_ifindex = prog->prog_ifindex; 1871 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 1872 load_attr.func_info = prog->func_info; 1873 load_attr.func_info_rec_size = prog->func_info_rec_size; 1874 load_attr.func_info_cnt = prog->func_info_cnt; 1875 load_attr.line_info = prog->line_info; 1876 load_attr.line_info_rec_size = prog->line_info_rec_size; 1877 load_attr.line_info_cnt = prog->line_info_cnt; 1878 load_attr.log_level = prog->log_level; 1879 if (!load_attr.insns || !load_attr.insns_cnt) 1880 return -EINVAL; 1881 1882 retry_load: 1883 log_buf = malloc(log_buf_size); 1884 if (!log_buf) 1885 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 1886 1887 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size); 1888 1889 if (ret >= 0) { 1890 if (load_attr.log_level) 1891 pr_debug("verifier log:\n%s", log_buf); 1892 *pfd = ret; 1893 ret = 0; 1894 goto out; 1895 } 1896 1897 if (errno == ENOSPC) { 1898 log_buf_size <<= 1; 1899 free(log_buf); 1900 goto retry_load; 1901 } 1902 ret = -LIBBPF_ERRNO__LOAD; 1903 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1904 pr_warning("load bpf program failed: %s\n", cp); 1905 1906 if (log_buf && log_buf[0] != '\0') { 1907 ret = -LIBBPF_ERRNO__VERIFY; 1908 pr_warning("-- BEGIN DUMP LOG ---\n"); 1909 pr_warning("\n%s\n", log_buf); 1910 pr_warning("-- END LOG --\n"); 1911 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { 1912 pr_warning("Program too large (%zu insns), at most %d insns\n", 1913 load_attr.insns_cnt, BPF_MAXINSNS); 1914 ret = -LIBBPF_ERRNO__PROG2BIG; 1915 } else { 1916 /* Wrong program type? */ 1917 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { 1918 int fd; 1919 1920 load_attr.prog_type = BPF_PROG_TYPE_KPROBE; 1921 load_attr.expected_attach_type = 0; 1922 fd = bpf_load_program_xattr(&load_attr, NULL, 0); 1923 if (fd >= 0) { 1924 close(fd); 1925 ret = -LIBBPF_ERRNO__PROGTYPE; 1926 goto out; 1927 } 1928 } 1929 1930 if (log_buf) 1931 ret = -LIBBPF_ERRNO__KVER; 1932 } 1933 1934 out: 1935 free(log_buf); 1936 return ret; 1937 } 1938 1939 int 1940 bpf_program__load(struct bpf_program *prog, 1941 char *license, __u32 kern_version) 1942 { 1943 int err = 0, fd, i; 1944 1945 if (prog->instances.nr < 0 || !prog->instances.fds) { 1946 if (prog->preprocessor) { 1947 pr_warning("Internal error: can't load program '%s'\n", 1948 prog->section_name); 1949 return -LIBBPF_ERRNO__INTERNAL; 1950 } 1951 1952 prog->instances.fds = malloc(sizeof(int)); 1953 if (!prog->instances.fds) { 1954 pr_warning("Not enough memory for BPF fds\n"); 1955 return -ENOMEM; 1956 } 1957 prog->instances.nr = 1; 1958 prog->instances.fds[0] = -1; 1959 } 1960 1961 if (!prog->preprocessor) { 1962 if (prog->instances.nr != 1) { 1963 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 1964 prog->section_name, prog->instances.nr); 1965 } 1966 err = load_program(prog, prog->insns, prog->insns_cnt, 1967 license, kern_version, &fd); 1968 if (!err) 1969 prog->instances.fds[0] = fd; 1970 goto out; 1971 } 1972 1973 for (i = 0; i < prog->instances.nr; i++) { 1974 struct bpf_prog_prep_result result; 1975 bpf_program_prep_t preprocessor = prog->preprocessor; 1976 1977 memset(&result, 0, sizeof(result)); 1978 err = preprocessor(prog, i, prog->insns, 1979 prog->insns_cnt, &result); 1980 if (err) { 1981 pr_warning("Preprocessing the %dth instance of program '%s' failed\n", 1982 i, prog->section_name); 1983 goto out; 1984 } 1985 1986 if (!result.new_insn_ptr || !result.new_insn_cnt) { 1987 pr_debug("Skip loading the %dth instance of program '%s'\n", 1988 i, prog->section_name); 1989 prog->instances.fds[i] = -1; 1990 if (result.pfd) 1991 *result.pfd = -1; 1992 continue; 1993 } 1994 1995 err = load_program(prog, result.new_insn_ptr, 1996 result.new_insn_cnt, 1997 license, kern_version, &fd); 1998 1999 if (err) { 2000 pr_warning("Loading the %dth instance of program '%s' failed\n", 2001 i, prog->section_name); 2002 goto out; 2003 } 2004 2005 if (result.pfd) 2006 *result.pfd = fd; 2007 prog->instances.fds[i] = fd; 2008 } 2009 out: 2010 if (err) 2011 pr_warning("failed to load program '%s'\n", 2012 prog->section_name); 2013 zfree(&prog->insns); 2014 prog->insns_cnt = 0; 2015 return err; 2016 } 2017 2018 static bool bpf_program__is_function_storage(struct bpf_program *prog, 2019 struct bpf_object *obj) 2020 { 2021 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; 2022 } 2023 2024 static int 2025 bpf_object__load_progs(struct bpf_object *obj) 2026 { 2027 size_t i; 2028 int err; 2029 2030 for (i = 0; i < obj->nr_programs; i++) { 2031 if (bpf_program__is_function_storage(&obj->programs[i], obj)) 2032 continue; 2033 err = bpf_program__load(&obj->programs[i], 2034 obj->license, 2035 obj->kern_version); 2036 if (err) 2037 return err; 2038 } 2039 return 0; 2040 } 2041 2042 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) 2043 { 2044 switch (type) { 2045 case BPF_PROG_TYPE_SOCKET_FILTER: 2046 case BPF_PROG_TYPE_SCHED_CLS: 2047 case BPF_PROG_TYPE_SCHED_ACT: 2048 case BPF_PROG_TYPE_XDP: 2049 case BPF_PROG_TYPE_CGROUP_SKB: 2050 case BPF_PROG_TYPE_CGROUP_SOCK: 2051 case BPF_PROG_TYPE_LWT_IN: 2052 case BPF_PROG_TYPE_LWT_OUT: 2053 case BPF_PROG_TYPE_LWT_XMIT: 2054 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2055 case BPF_PROG_TYPE_SOCK_OPS: 2056 case BPF_PROG_TYPE_SK_SKB: 2057 case BPF_PROG_TYPE_CGROUP_DEVICE: 2058 case BPF_PROG_TYPE_SK_MSG: 2059 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2060 case BPF_PROG_TYPE_LIRC_MODE2: 2061 case BPF_PROG_TYPE_SK_REUSEPORT: 2062 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2063 case BPF_PROG_TYPE_UNSPEC: 2064 case BPF_PROG_TYPE_TRACEPOINT: 2065 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2066 case BPF_PROG_TYPE_PERF_EVENT: 2067 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2068 return false; 2069 case BPF_PROG_TYPE_KPROBE: 2070 default: 2071 return true; 2072 } 2073 } 2074 2075 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) 2076 { 2077 if (needs_kver && obj->kern_version == 0) { 2078 pr_warning("%s doesn't provide kernel version\n", 2079 obj->path); 2080 return -LIBBPF_ERRNO__KVERSION; 2081 } 2082 return 0; 2083 } 2084 2085 static struct bpf_object * 2086 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, 2087 bool needs_kver, int flags) 2088 { 2089 struct bpf_object *obj; 2090 int err; 2091 2092 if (elf_version(EV_CURRENT) == EV_NONE) { 2093 pr_warning("failed to init libelf for %s\n", path); 2094 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 2095 } 2096 2097 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 2098 if (IS_ERR(obj)) 2099 return obj; 2100 2101 CHECK_ERR(bpf_object__elf_init(obj), err, out); 2102 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 2103 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); 2104 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 2105 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); 2106 2107 bpf_object__elf_finish(obj); 2108 return obj; 2109 out: 2110 bpf_object__close(obj); 2111 return ERR_PTR(err); 2112 } 2113 2114 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, 2115 int flags) 2116 { 2117 /* param validation */ 2118 if (!attr->file) 2119 return NULL; 2120 2121 pr_debug("loading %s\n", attr->file); 2122 2123 return __bpf_object__open(attr->file, NULL, 0, 2124 bpf_prog_type__needs_kver(attr->prog_type), 2125 flags); 2126 } 2127 2128 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) 2129 { 2130 return __bpf_object__open_xattr(attr, 0); 2131 } 2132 2133 struct bpf_object *bpf_object__open(const char *path) 2134 { 2135 struct bpf_object_open_attr attr = { 2136 .file = path, 2137 .prog_type = BPF_PROG_TYPE_UNSPEC, 2138 }; 2139 2140 return bpf_object__open_xattr(&attr); 2141 } 2142 2143 struct bpf_object *bpf_object__open_buffer(void *obj_buf, 2144 size_t obj_buf_sz, 2145 const char *name) 2146 { 2147 char tmp_name[64]; 2148 2149 /* param validation */ 2150 if (!obj_buf || obj_buf_sz <= 0) 2151 return NULL; 2152 2153 if (!name) { 2154 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 2155 (unsigned long)obj_buf, 2156 (unsigned long)obj_buf_sz); 2157 tmp_name[sizeof(tmp_name) - 1] = '\0'; 2158 name = tmp_name; 2159 } 2160 pr_debug("loading object '%s' from buffer\n", 2161 name); 2162 2163 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); 2164 } 2165 2166 int bpf_object__unload(struct bpf_object *obj) 2167 { 2168 size_t i; 2169 2170 if (!obj) 2171 return -EINVAL; 2172 2173 for (i = 0; i < obj->nr_maps; i++) 2174 zclose(obj->maps[i].fd); 2175 2176 for (i = 0; i < obj->nr_programs; i++) 2177 bpf_program__unload(&obj->programs[i]); 2178 2179 return 0; 2180 } 2181 2182 int bpf_object__load(struct bpf_object *obj) 2183 { 2184 int err; 2185 2186 if (!obj) 2187 return -EINVAL; 2188 2189 if (obj->loaded) { 2190 pr_warning("object should not be loaded twice\n"); 2191 return -EINVAL; 2192 } 2193 2194 obj->loaded = true; 2195 2196 CHECK_ERR(bpf_object__probe_caps(obj), err, out); 2197 CHECK_ERR(bpf_object__create_maps(obj), err, out); 2198 CHECK_ERR(bpf_object__relocate(obj), err, out); 2199 CHECK_ERR(bpf_object__load_progs(obj), err, out); 2200 2201 return 0; 2202 out: 2203 bpf_object__unload(obj); 2204 pr_warning("failed to load object '%s'\n", obj->path); 2205 return err; 2206 } 2207 2208 static int check_path(const char *path) 2209 { 2210 char *cp, errmsg[STRERR_BUFSIZE]; 2211 struct statfs st_fs; 2212 char *dname, *dir; 2213 int err = 0; 2214 2215 if (path == NULL) 2216 return -EINVAL; 2217 2218 dname = strdup(path); 2219 if (dname == NULL) 2220 return -ENOMEM; 2221 2222 dir = dirname(dname); 2223 if (statfs(dir, &st_fs)) { 2224 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2225 pr_warning("failed to statfs %s: %s\n", dir, cp); 2226 err = -errno; 2227 } 2228 free(dname); 2229 2230 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 2231 pr_warning("specified path %s is not on BPF FS\n", path); 2232 err = -EINVAL; 2233 } 2234 2235 return err; 2236 } 2237 2238 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, 2239 int instance) 2240 { 2241 char *cp, errmsg[STRERR_BUFSIZE]; 2242 int err; 2243 2244 err = check_path(path); 2245 if (err) 2246 return err; 2247 2248 if (prog == NULL) { 2249 pr_warning("invalid program pointer\n"); 2250 return -EINVAL; 2251 } 2252 2253 if (instance < 0 || instance >= prog->instances.nr) { 2254 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 2255 instance, prog->section_name, prog->instances.nr); 2256 return -EINVAL; 2257 } 2258 2259 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 2260 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2261 pr_warning("failed to pin program: %s\n", cp); 2262 return -errno; 2263 } 2264 pr_debug("pinned program '%s'\n", path); 2265 2266 return 0; 2267 } 2268 2269 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, 2270 int instance) 2271 { 2272 int err; 2273 2274 err = check_path(path); 2275 if (err) 2276 return err; 2277 2278 if (prog == NULL) { 2279 pr_warning("invalid program pointer\n"); 2280 return -EINVAL; 2281 } 2282 2283 if (instance < 0 || instance >= prog->instances.nr) { 2284 pr_warning("invalid prog instance %d of prog %s (max %d)\n", 2285 instance, prog->section_name, prog->instances.nr); 2286 return -EINVAL; 2287 } 2288 2289 err = unlink(path); 2290 if (err != 0) 2291 return -errno; 2292 pr_debug("unpinned program '%s'\n", path); 2293 2294 return 0; 2295 } 2296 2297 static int make_dir(const char *path) 2298 { 2299 char *cp, errmsg[STRERR_BUFSIZE]; 2300 int err = 0; 2301 2302 if (mkdir(path, 0700) && errno != EEXIST) 2303 err = -errno; 2304 2305 if (err) { 2306 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 2307 pr_warning("failed to mkdir %s: %s\n", path, cp); 2308 } 2309 return err; 2310 } 2311 2312 int bpf_program__pin(struct bpf_program *prog, const char *path) 2313 { 2314 int i, err; 2315 2316 err = check_path(path); 2317 if (err) 2318 return err; 2319 2320 if (prog == NULL) { 2321 pr_warning("invalid program pointer\n"); 2322 return -EINVAL; 2323 } 2324 2325 if (prog->instances.nr <= 0) { 2326 pr_warning("no instances of prog %s to pin\n", 2327 prog->section_name); 2328 return -EINVAL; 2329 } 2330 2331 if (prog->instances.nr == 1) { 2332 /* don't create subdirs when pinning single instance */ 2333 return bpf_program__pin_instance(prog, path, 0); 2334 } 2335 2336 err = make_dir(path); 2337 if (err) 2338 return err; 2339 2340 for (i = 0; i < prog->instances.nr; i++) { 2341 char buf[PATH_MAX]; 2342 int len; 2343 2344 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2345 if (len < 0) { 2346 err = -EINVAL; 2347 goto err_unpin; 2348 } else if (len >= PATH_MAX) { 2349 err = -ENAMETOOLONG; 2350 goto err_unpin; 2351 } 2352 2353 err = bpf_program__pin_instance(prog, buf, i); 2354 if (err) 2355 goto err_unpin; 2356 } 2357 2358 return 0; 2359 2360 err_unpin: 2361 for (i = i - 1; i >= 0; i--) { 2362 char buf[PATH_MAX]; 2363 int len; 2364 2365 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2366 if (len < 0) 2367 continue; 2368 else if (len >= PATH_MAX) 2369 continue; 2370 2371 bpf_program__unpin_instance(prog, buf, i); 2372 } 2373 2374 rmdir(path); 2375 2376 return err; 2377 } 2378 2379 int bpf_program__unpin(struct bpf_program *prog, const char *path) 2380 { 2381 int i, err; 2382 2383 err = check_path(path); 2384 if (err) 2385 return err; 2386 2387 if (prog == NULL) { 2388 pr_warning("invalid program pointer\n"); 2389 return -EINVAL; 2390 } 2391 2392 if (prog->instances.nr <= 0) { 2393 pr_warning("no instances of prog %s to pin\n", 2394 prog->section_name); 2395 return -EINVAL; 2396 } 2397 2398 if (prog->instances.nr == 1) { 2399 /* don't create subdirs when pinning single instance */ 2400 return bpf_program__unpin_instance(prog, path, 0); 2401 } 2402 2403 for (i = 0; i < prog->instances.nr; i++) { 2404 char buf[PATH_MAX]; 2405 int len; 2406 2407 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); 2408 if (len < 0) 2409 return -EINVAL; 2410 else if (len >= PATH_MAX) 2411 return -ENAMETOOLONG; 2412 2413 err = bpf_program__unpin_instance(prog, buf, i); 2414 if (err) 2415 return err; 2416 } 2417 2418 err = rmdir(path); 2419 if (err) 2420 return -errno; 2421 2422 return 0; 2423 } 2424 2425 int bpf_map__pin(struct bpf_map *map, const char *path) 2426 { 2427 char *cp, errmsg[STRERR_BUFSIZE]; 2428 int err; 2429 2430 err = check_path(path); 2431 if (err) 2432 return err; 2433 2434 if (map == NULL) { 2435 pr_warning("invalid map pointer\n"); 2436 return -EINVAL; 2437 } 2438 2439 if (bpf_obj_pin(map->fd, path)) { 2440 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 2441 pr_warning("failed to pin map: %s\n", cp); 2442 return -errno; 2443 } 2444 2445 pr_debug("pinned map '%s'\n", path); 2446 2447 return 0; 2448 } 2449 2450 int bpf_map__unpin(struct bpf_map *map, const char *path) 2451 { 2452 int err; 2453 2454 err = check_path(path); 2455 if (err) 2456 return err; 2457 2458 if (map == NULL) { 2459 pr_warning("invalid map pointer\n"); 2460 return -EINVAL; 2461 } 2462 2463 err = unlink(path); 2464 if (err != 0) 2465 return -errno; 2466 pr_debug("unpinned map '%s'\n", path); 2467 2468 return 0; 2469 } 2470 2471 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 2472 { 2473 struct bpf_map *map; 2474 int err; 2475 2476 if (!obj) 2477 return -ENOENT; 2478 2479 if (!obj->loaded) { 2480 pr_warning("object not yet loaded; load it first\n"); 2481 return -ENOENT; 2482 } 2483 2484 err = make_dir(path); 2485 if (err) 2486 return err; 2487 2488 bpf_object__for_each_map(map, obj) { 2489 char buf[PATH_MAX]; 2490 int len; 2491 2492 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2493 bpf_map__name(map)); 2494 if (len < 0) { 2495 err = -EINVAL; 2496 goto err_unpin_maps; 2497 } else if (len >= PATH_MAX) { 2498 err = -ENAMETOOLONG; 2499 goto err_unpin_maps; 2500 } 2501 2502 err = bpf_map__pin(map, buf); 2503 if (err) 2504 goto err_unpin_maps; 2505 } 2506 2507 return 0; 2508 2509 err_unpin_maps: 2510 while ((map = bpf_map__prev(map, obj))) { 2511 char buf[PATH_MAX]; 2512 int len; 2513 2514 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2515 bpf_map__name(map)); 2516 if (len < 0) 2517 continue; 2518 else if (len >= PATH_MAX) 2519 continue; 2520 2521 bpf_map__unpin(map, buf); 2522 } 2523 2524 return err; 2525 } 2526 2527 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 2528 { 2529 struct bpf_map *map; 2530 int err; 2531 2532 if (!obj) 2533 return -ENOENT; 2534 2535 bpf_object__for_each_map(map, obj) { 2536 char buf[PATH_MAX]; 2537 int len; 2538 2539 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2540 bpf_map__name(map)); 2541 if (len < 0) 2542 return -EINVAL; 2543 else if (len >= PATH_MAX) 2544 return -ENAMETOOLONG; 2545 2546 err = bpf_map__unpin(map, buf); 2547 if (err) 2548 return err; 2549 } 2550 2551 return 0; 2552 } 2553 2554 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 2555 { 2556 struct bpf_program *prog; 2557 int err; 2558 2559 if (!obj) 2560 return -ENOENT; 2561 2562 if (!obj->loaded) { 2563 pr_warning("object not yet loaded; load it first\n"); 2564 return -ENOENT; 2565 } 2566 2567 err = make_dir(path); 2568 if (err) 2569 return err; 2570 2571 bpf_object__for_each_program(prog, obj) { 2572 char buf[PATH_MAX]; 2573 int len; 2574 2575 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2576 prog->pin_name); 2577 if (len < 0) { 2578 err = -EINVAL; 2579 goto err_unpin_programs; 2580 } else if (len >= PATH_MAX) { 2581 err = -ENAMETOOLONG; 2582 goto err_unpin_programs; 2583 } 2584 2585 err = bpf_program__pin(prog, buf); 2586 if (err) 2587 goto err_unpin_programs; 2588 } 2589 2590 return 0; 2591 2592 err_unpin_programs: 2593 while ((prog = bpf_program__prev(prog, obj))) { 2594 char buf[PATH_MAX]; 2595 int len; 2596 2597 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2598 prog->pin_name); 2599 if (len < 0) 2600 continue; 2601 else if (len >= PATH_MAX) 2602 continue; 2603 2604 bpf_program__unpin(prog, buf); 2605 } 2606 2607 return err; 2608 } 2609 2610 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 2611 { 2612 struct bpf_program *prog; 2613 int err; 2614 2615 if (!obj) 2616 return -ENOENT; 2617 2618 bpf_object__for_each_program(prog, obj) { 2619 char buf[PATH_MAX]; 2620 int len; 2621 2622 len = snprintf(buf, PATH_MAX, "%s/%s", path, 2623 prog->pin_name); 2624 if (len < 0) 2625 return -EINVAL; 2626 else if (len >= PATH_MAX) 2627 return -ENAMETOOLONG; 2628 2629 err = bpf_program__unpin(prog, buf); 2630 if (err) 2631 return err; 2632 } 2633 2634 return 0; 2635 } 2636 2637 int bpf_object__pin(struct bpf_object *obj, const char *path) 2638 { 2639 int err; 2640 2641 err = bpf_object__pin_maps(obj, path); 2642 if (err) 2643 return err; 2644 2645 err = bpf_object__pin_programs(obj, path); 2646 if (err) { 2647 bpf_object__unpin_maps(obj, path); 2648 return err; 2649 } 2650 2651 return 0; 2652 } 2653 2654 void bpf_object__close(struct bpf_object *obj) 2655 { 2656 size_t i; 2657 2658 if (!obj) 2659 return; 2660 2661 if (obj->clear_priv) 2662 obj->clear_priv(obj, obj->priv); 2663 2664 bpf_object__elf_finish(obj); 2665 bpf_object__unload(obj); 2666 btf__free(obj->btf); 2667 btf_ext__free(obj->btf_ext); 2668 2669 for (i = 0; i < obj->nr_maps; i++) { 2670 zfree(&obj->maps[i].name); 2671 if (obj->maps[i].clear_priv) 2672 obj->maps[i].clear_priv(&obj->maps[i], 2673 obj->maps[i].priv); 2674 obj->maps[i].priv = NULL; 2675 obj->maps[i].clear_priv = NULL; 2676 } 2677 2678 zfree(&obj->sections.rodata); 2679 zfree(&obj->sections.data); 2680 zfree(&obj->maps); 2681 obj->nr_maps = 0; 2682 2683 if (obj->programs && obj->nr_programs) { 2684 for (i = 0; i < obj->nr_programs; i++) 2685 bpf_program__exit(&obj->programs[i]); 2686 } 2687 zfree(&obj->programs); 2688 2689 list_del(&obj->list); 2690 free(obj); 2691 } 2692 2693 struct bpf_object * 2694 bpf_object__next(struct bpf_object *prev) 2695 { 2696 struct bpf_object *next; 2697 2698 if (!prev) 2699 next = list_first_entry(&bpf_objects_list, 2700 struct bpf_object, 2701 list); 2702 else 2703 next = list_next_entry(prev, list); 2704 2705 /* Empty list is noticed here so don't need checking on entry. */ 2706 if (&next->list == &bpf_objects_list) 2707 return NULL; 2708 2709 return next; 2710 } 2711 2712 const char *bpf_object__name(struct bpf_object *obj) 2713 { 2714 return obj ? obj->path : ERR_PTR(-EINVAL); 2715 } 2716 2717 unsigned int bpf_object__kversion(struct bpf_object *obj) 2718 { 2719 return obj ? obj->kern_version : 0; 2720 } 2721 2722 struct btf *bpf_object__btf(struct bpf_object *obj) 2723 { 2724 return obj ? obj->btf : NULL; 2725 } 2726 2727 int bpf_object__btf_fd(const struct bpf_object *obj) 2728 { 2729 return obj->btf ? btf__fd(obj->btf) : -1; 2730 } 2731 2732 int bpf_object__set_priv(struct bpf_object *obj, void *priv, 2733 bpf_object_clear_priv_t clear_priv) 2734 { 2735 if (obj->priv && obj->clear_priv) 2736 obj->clear_priv(obj, obj->priv); 2737 2738 obj->priv = priv; 2739 obj->clear_priv = clear_priv; 2740 return 0; 2741 } 2742 2743 void *bpf_object__priv(struct bpf_object *obj) 2744 { 2745 return obj ? obj->priv : ERR_PTR(-EINVAL); 2746 } 2747 2748 static struct bpf_program * 2749 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward) 2750 { 2751 size_t nr_programs = obj->nr_programs; 2752 ssize_t idx; 2753 2754 if (!nr_programs) 2755 return NULL; 2756 2757 if (!p) 2758 /* Iter from the beginning */ 2759 return forward ? &obj->programs[0] : 2760 &obj->programs[nr_programs - 1]; 2761 2762 if (p->obj != obj) { 2763 pr_warning("error: program handler doesn't match object\n"); 2764 return NULL; 2765 } 2766 2767 idx = (p - obj->programs) + (forward ? 1 : -1); 2768 if (idx >= obj->nr_programs || idx < 0) 2769 return NULL; 2770 return &obj->programs[idx]; 2771 } 2772 2773 struct bpf_program * 2774 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) 2775 { 2776 struct bpf_program *prog = prev; 2777 2778 do { 2779 prog = __bpf_program__iter(prog, obj, true); 2780 } while (prog && bpf_program__is_function_storage(prog, obj)); 2781 2782 return prog; 2783 } 2784 2785 struct bpf_program * 2786 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj) 2787 { 2788 struct bpf_program *prog = next; 2789 2790 do { 2791 prog = __bpf_program__iter(prog, obj, false); 2792 } while (prog && bpf_program__is_function_storage(prog, obj)); 2793 2794 return prog; 2795 } 2796 2797 int bpf_program__set_priv(struct bpf_program *prog, void *priv, 2798 bpf_program_clear_priv_t clear_priv) 2799 { 2800 if (prog->priv && prog->clear_priv) 2801 prog->clear_priv(prog, prog->priv); 2802 2803 prog->priv = priv; 2804 prog->clear_priv = clear_priv; 2805 return 0; 2806 } 2807 2808 void *bpf_program__priv(struct bpf_program *prog) 2809 { 2810 return prog ? prog->priv : ERR_PTR(-EINVAL); 2811 } 2812 2813 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 2814 { 2815 prog->prog_ifindex = ifindex; 2816 } 2817 2818 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) 2819 { 2820 const char *title; 2821 2822 title = prog->section_name; 2823 if (needs_copy) { 2824 title = strdup(title); 2825 if (!title) { 2826 pr_warning("failed to strdup program title\n"); 2827 return ERR_PTR(-ENOMEM); 2828 } 2829 } 2830 2831 return title; 2832 } 2833 2834 int bpf_program__fd(struct bpf_program *prog) 2835 { 2836 return bpf_program__nth_fd(prog, 0); 2837 } 2838 2839 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 2840 bpf_program_prep_t prep) 2841 { 2842 int *instances_fds; 2843 2844 if (nr_instances <= 0 || !prep) 2845 return -EINVAL; 2846 2847 if (prog->instances.nr > 0 || prog->instances.fds) { 2848 pr_warning("Can't set pre-processor after loading\n"); 2849 return -EINVAL; 2850 } 2851 2852 instances_fds = malloc(sizeof(int) * nr_instances); 2853 if (!instances_fds) { 2854 pr_warning("alloc memory failed for fds\n"); 2855 return -ENOMEM; 2856 } 2857 2858 /* fill all fd with -1 */ 2859 memset(instances_fds, -1, sizeof(int) * nr_instances); 2860 2861 prog->instances.nr = nr_instances; 2862 prog->instances.fds = instances_fds; 2863 prog->preprocessor = prep; 2864 return 0; 2865 } 2866 2867 int bpf_program__nth_fd(struct bpf_program *prog, int n) 2868 { 2869 int fd; 2870 2871 if (!prog) 2872 return -EINVAL; 2873 2874 if (n >= prog->instances.nr || n < 0) { 2875 pr_warning("Can't get the %dth fd from program %s: only %d instances\n", 2876 n, prog->section_name, prog->instances.nr); 2877 return -EINVAL; 2878 } 2879 2880 fd = prog->instances.fds[n]; 2881 if (fd < 0) { 2882 pr_warning("%dth instance of program '%s' is invalid\n", 2883 n, prog->section_name); 2884 return -ENOENT; 2885 } 2886 2887 return fd; 2888 } 2889 2890 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 2891 { 2892 prog->type = type; 2893 } 2894 2895 static bool bpf_program__is_type(struct bpf_program *prog, 2896 enum bpf_prog_type type) 2897 { 2898 return prog ? (prog->type == type) : false; 2899 } 2900 2901 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ 2902 int bpf_program__set_##NAME(struct bpf_program *prog) \ 2903 { \ 2904 if (!prog) \ 2905 return -EINVAL; \ 2906 bpf_program__set_type(prog, TYPE); \ 2907 return 0; \ 2908 } \ 2909 \ 2910 bool bpf_program__is_##NAME(struct bpf_program *prog) \ 2911 { \ 2912 return bpf_program__is_type(prog, TYPE); \ 2913 } \ 2914 2915 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); 2916 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); 2917 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); 2918 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); 2919 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); 2920 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); 2921 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); 2922 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); 2923 2924 void bpf_program__set_expected_attach_type(struct bpf_program *prog, 2925 enum bpf_attach_type type) 2926 { 2927 prog->expected_attach_type = type; 2928 } 2929 2930 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ 2931 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } 2932 2933 /* Programs that can NOT be attached. */ 2934 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) 2935 2936 /* Programs that can be attached. */ 2937 #define BPF_APROG_SEC(string, ptype, atype) \ 2938 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) 2939 2940 /* Programs that must specify expected attach type at load time. */ 2941 #define BPF_EAPROG_SEC(string, ptype, eatype) \ 2942 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) 2943 2944 /* Programs that can be attached but attach type can't be identified by section 2945 * name. Kept for backward compatibility. 2946 */ 2947 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) 2948 2949 static const struct { 2950 const char *sec; 2951 size_t len; 2952 enum bpf_prog_type prog_type; 2953 enum bpf_attach_type expected_attach_type; 2954 int is_attachable; 2955 enum bpf_attach_type attach_type; 2956 } section_names[] = { 2957 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), 2958 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), 2959 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), 2960 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), 2961 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), 2962 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), 2963 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), 2964 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), 2965 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), 2966 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), 2967 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), 2968 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), 2969 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), 2970 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, 2971 BPF_CGROUP_INET_INGRESS), 2972 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, 2973 BPF_CGROUP_INET_EGRESS), 2974 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), 2975 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, 2976 BPF_CGROUP_INET_SOCK_CREATE), 2977 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, 2978 BPF_CGROUP_INET4_POST_BIND), 2979 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, 2980 BPF_CGROUP_INET6_POST_BIND), 2981 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, 2982 BPF_CGROUP_DEVICE), 2983 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, 2984 BPF_CGROUP_SOCK_OPS), 2985 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, 2986 BPF_SK_SKB_STREAM_PARSER), 2987 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, 2988 BPF_SK_SKB_STREAM_VERDICT), 2989 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), 2990 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, 2991 BPF_SK_MSG_VERDICT), 2992 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, 2993 BPF_LIRC_MODE2), 2994 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, 2995 BPF_FLOW_DISSECTOR), 2996 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2997 BPF_CGROUP_INET4_BIND), 2998 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 2999 BPF_CGROUP_INET6_BIND), 3000 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3001 BPF_CGROUP_INET4_CONNECT), 3002 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3003 BPF_CGROUP_INET6_CONNECT), 3004 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3005 BPF_CGROUP_UDP4_SENDMSG), 3006 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 3007 BPF_CGROUP_UDP6_SENDMSG), 3008 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, 3009 BPF_CGROUP_SYSCTL), 3010 }; 3011 3012 #undef BPF_PROG_SEC_IMPL 3013 #undef BPF_PROG_SEC 3014 #undef BPF_APROG_SEC 3015 #undef BPF_EAPROG_SEC 3016 #undef BPF_APROG_COMPAT 3017 3018 #define MAX_TYPE_NAME_SIZE 32 3019 3020 static char *libbpf_get_type_names(bool attach_type) 3021 { 3022 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; 3023 char *buf; 3024 3025 buf = malloc(len); 3026 if (!buf) 3027 return NULL; 3028 3029 buf[0] = '\0'; 3030 /* Forge string buf with all available names */ 3031 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3032 if (attach_type && !section_names[i].is_attachable) 3033 continue; 3034 3035 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { 3036 free(buf); 3037 return NULL; 3038 } 3039 strcat(buf, " "); 3040 strcat(buf, section_names[i].sec); 3041 } 3042 3043 return buf; 3044 } 3045 3046 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 3047 enum bpf_attach_type *expected_attach_type) 3048 { 3049 char *type_names; 3050 int i; 3051 3052 if (!name) 3053 return -EINVAL; 3054 3055 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3056 if (strncmp(name, section_names[i].sec, section_names[i].len)) 3057 continue; 3058 *prog_type = section_names[i].prog_type; 3059 *expected_attach_type = section_names[i].expected_attach_type; 3060 return 0; 3061 } 3062 pr_warning("failed to guess program type based on ELF section name '%s'\n", name); 3063 type_names = libbpf_get_type_names(false); 3064 if (type_names != NULL) { 3065 pr_info("supported section(type) names are:%s\n", type_names); 3066 free(type_names); 3067 } 3068 3069 return -EINVAL; 3070 } 3071 3072 int libbpf_attach_type_by_name(const char *name, 3073 enum bpf_attach_type *attach_type) 3074 { 3075 char *type_names; 3076 int i; 3077 3078 if (!name) 3079 return -EINVAL; 3080 3081 for (i = 0; i < ARRAY_SIZE(section_names); i++) { 3082 if (strncmp(name, section_names[i].sec, section_names[i].len)) 3083 continue; 3084 if (!section_names[i].is_attachable) 3085 return -EINVAL; 3086 *attach_type = section_names[i].attach_type; 3087 return 0; 3088 } 3089 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); 3090 type_names = libbpf_get_type_names(true); 3091 if (type_names != NULL) { 3092 pr_info("attachable section(type) names are:%s\n", type_names); 3093 free(type_names); 3094 } 3095 3096 return -EINVAL; 3097 } 3098 3099 static int 3100 bpf_program__identify_section(struct bpf_program *prog, 3101 enum bpf_prog_type *prog_type, 3102 enum bpf_attach_type *expected_attach_type) 3103 { 3104 return libbpf_prog_type_by_name(prog->section_name, prog_type, 3105 expected_attach_type); 3106 } 3107 3108 int bpf_map__fd(struct bpf_map *map) 3109 { 3110 return map ? map->fd : -EINVAL; 3111 } 3112 3113 const struct bpf_map_def *bpf_map__def(struct bpf_map *map) 3114 { 3115 return map ? &map->def : ERR_PTR(-EINVAL); 3116 } 3117 3118 const char *bpf_map__name(struct bpf_map *map) 3119 { 3120 return map ? map->name : NULL; 3121 } 3122 3123 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 3124 { 3125 return map ? map->btf_key_type_id : 0; 3126 } 3127 3128 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 3129 { 3130 return map ? map->btf_value_type_id : 0; 3131 } 3132 3133 int bpf_map__set_priv(struct bpf_map *map, void *priv, 3134 bpf_map_clear_priv_t clear_priv) 3135 { 3136 if (!map) 3137 return -EINVAL; 3138 3139 if (map->priv) { 3140 if (map->clear_priv) 3141 map->clear_priv(map, map->priv); 3142 } 3143 3144 map->priv = priv; 3145 map->clear_priv = clear_priv; 3146 return 0; 3147 } 3148 3149 void *bpf_map__priv(struct bpf_map *map) 3150 { 3151 return map ? map->priv : ERR_PTR(-EINVAL); 3152 } 3153 3154 bool bpf_map__is_offload_neutral(struct bpf_map *map) 3155 { 3156 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 3157 } 3158 3159 bool bpf_map__is_internal(struct bpf_map *map) 3160 { 3161 return map->libbpf_type != LIBBPF_MAP_UNSPEC; 3162 } 3163 3164 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 3165 { 3166 map->map_ifindex = ifindex; 3167 } 3168 3169 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 3170 { 3171 if (!bpf_map_type__is_map_in_map(map->def.type)) { 3172 pr_warning("error: unsupported map type\n"); 3173 return -EINVAL; 3174 } 3175 if (map->inner_map_fd != -1) { 3176 pr_warning("error: inner_map_fd already specified\n"); 3177 return -EINVAL; 3178 } 3179 map->inner_map_fd = fd; 3180 return 0; 3181 } 3182 3183 static struct bpf_map * 3184 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i) 3185 { 3186 ssize_t idx; 3187 struct bpf_map *s, *e; 3188 3189 if (!obj || !obj->maps) 3190 return NULL; 3191 3192 s = obj->maps; 3193 e = obj->maps + obj->nr_maps; 3194 3195 if ((m < s) || (m >= e)) { 3196 pr_warning("error in %s: map handler doesn't belong to object\n", 3197 __func__); 3198 return NULL; 3199 } 3200 3201 idx = (m - obj->maps) + i; 3202 if (idx >= obj->nr_maps || idx < 0) 3203 return NULL; 3204 return &obj->maps[idx]; 3205 } 3206 3207 struct bpf_map * 3208 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) 3209 { 3210 if (prev == NULL) 3211 return obj->maps; 3212 3213 return __bpf_map__iter(prev, obj, 1); 3214 } 3215 3216 struct bpf_map * 3217 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj) 3218 { 3219 if (next == NULL) { 3220 if (!obj->nr_maps) 3221 return NULL; 3222 return obj->maps + obj->nr_maps - 1; 3223 } 3224 3225 return __bpf_map__iter(next, obj, -1); 3226 } 3227 3228 struct bpf_map * 3229 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name) 3230 { 3231 struct bpf_map *pos; 3232 3233 bpf_object__for_each_map(pos, obj) { 3234 if (pos->name && !strcmp(pos->name, name)) 3235 return pos; 3236 } 3237 return NULL; 3238 } 3239 3240 int 3241 bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name) 3242 { 3243 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 3244 } 3245 3246 struct bpf_map * 3247 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) 3248 { 3249 int i; 3250 3251 for (i = 0; i < obj->nr_maps; i++) { 3252 if (obj->maps[i].offset == offset) 3253 return &obj->maps[i]; 3254 } 3255 return ERR_PTR(-ENOENT); 3256 } 3257 3258 long libbpf_get_error(const void *ptr) 3259 { 3260 if (IS_ERR(ptr)) 3261 return PTR_ERR(ptr); 3262 return 0; 3263 } 3264 3265 int bpf_prog_load(const char *file, enum bpf_prog_type type, 3266 struct bpf_object **pobj, int *prog_fd) 3267 { 3268 struct bpf_prog_load_attr attr; 3269 3270 memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); 3271 attr.file = file; 3272 attr.prog_type = type; 3273 attr.expected_attach_type = 0; 3274 3275 return bpf_prog_load_xattr(&attr, pobj, prog_fd); 3276 } 3277 3278 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 3279 struct bpf_object **pobj, int *prog_fd) 3280 { 3281 struct bpf_object_open_attr open_attr = { 3282 .file = attr->file, 3283 .prog_type = attr->prog_type, 3284 }; 3285 struct bpf_program *prog, *first_prog = NULL; 3286 enum bpf_attach_type expected_attach_type; 3287 enum bpf_prog_type prog_type; 3288 struct bpf_object *obj; 3289 struct bpf_map *map; 3290 int err; 3291 3292 if (!attr) 3293 return -EINVAL; 3294 if (!attr->file) 3295 return -EINVAL; 3296 3297 obj = bpf_object__open_xattr(&open_attr); 3298 if (IS_ERR_OR_NULL(obj)) 3299 return -ENOENT; 3300 3301 bpf_object__for_each_program(prog, obj) { 3302 /* 3303 * If type is not specified, try to guess it based on 3304 * section name. 3305 */ 3306 prog_type = attr->prog_type; 3307 prog->prog_ifindex = attr->ifindex; 3308 expected_attach_type = attr->expected_attach_type; 3309 if (prog_type == BPF_PROG_TYPE_UNSPEC) { 3310 err = bpf_program__identify_section(prog, &prog_type, 3311 &expected_attach_type); 3312 if (err < 0) { 3313 bpf_object__close(obj); 3314 return -EINVAL; 3315 } 3316 } 3317 3318 bpf_program__set_type(prog, prog_type); 3319 bpf_program__set_expected_attach_type(prog, 3320 expected_attach_type); 3321 3322 prog->log_level = attr->log_level; 3323 if (!first_prog) 3324 first_prog = prog; 3325 } 3326 3327 bpf_object__for_each_map(map, obj) { 3328 if (!bpf_map__is_offload_neutral(map)) 3329 map->map_ifindex = attr->ifindex; 3330 } 3331 3332 if (!first_prog) { 3333 pr_warning("object file doesn't contain bpf program\n"); 3334 bpf_object__close(obj); 3335 return -ENOENT; 3336 } 3337 3338 err = bpf_object__load(obj); 3339 if (err) { 3340 bpf_object__close(obj); 3341 return -EINVAL; 3342 } 3343 3344 *pobj = obj; 3345 *prog_fd = bpf_program__fd(first_prog); 3346 return 0; 3347 } 3348 3349 enum bpf_perf_event_ret 3350 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 3351 void **copy_mem, size_t *copy_size, 3352 bpf_perf_event_print_t fn, void *private_data) 3353 { 3354 struct perf_event_mmap_page *header = mmap_mem; 3355 __u64 data_head = ring_buffer_read_head(header); 3356 __u64 data_tail = header->data_tail; 3357 void *base = ((__u8 *)header) + page_size; 3358 int ret = LIBBPF_PERF_EVENT_CONT; 3359 struct perf_event_header *ehdr; 3360 size_t ehdr_size; 3361 3362 while (data_head != data_tail) { 3363 ehdr = base + (data_tail & (mmap_size - 1)); 3364 ehdr_size = ehdr->size; 3365 3366 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 3367 void *copy_start = ehdr; 3368 size_t len_first = base + mmap_size - copy_start; 3369 size_t len_secnd = ehdr_size - len_first; 3370 3371 if (*copy_size < ehdr_size) { 3372 free(*copy_mem); 3373 *copy_mem = malloc(ehdr_size); 3374 if (!*copy_mem) { 3375 *copy_size = 0; 3376 ret = LIBBPF_PERF_EVENT_ERROR; 3377 break; 3378 } 3379 *copy_size = ehdr_size; 3380 } 3381 3382 memcpy(*copy_mem, copy_start, len_first); 3383 memcpy(*copy_mem + len_first, base, len_secnd); 3384 ehdr = *copy_mem; 3385 } 3386 3387 ret = fn(ehdr, private_data); 3388 data_tail += ehdr_size; 3389 if (ret != LIBBPF_PERF_EVENT_CONT) 3390 break; 3391 } 3392 3393 ring_buffer_write_tail(header, data_tail); 3394 return ret; 3395 } 3396 3397 struct bpf_prog_info_array_desc { 3398 int array_offset; /* e.g. offset of jited_prog_insns */ 3399 int count_offset; /* e.g. offset of jited_prog_len */ 3400 int size_offset; /* > 0: offset of rec size, 3401 * < 0: fix size of -size_offset 3402 */ 3403 }; 3404 3405 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { 3406 [BPF_PROG_INFO_JITED_INSNS] = { 3407 offsetof(struct bpf_prog_info, jited_prog_insns), 3408 offsetof(struct bpf_prog_info, jited_prog_len), 3409 -1, 3410 }, 3411 [BPF_PROG_INFO_XLATED_INSNS] = { 3412 offsetof(struct bpf_prog_info, xlated_prog_insns), 3413 offsetof(struct bpf_prog_info, xlated_prog_len), 3414 -1, 3415 }, 3416 [BPF_PROG_INFO_MAP_IDS] = { 3417 offsetof(struct bpf_prog_info, map_ids), 3418 offsetof(struct bpf_prog_info, nr_map_ids), 3419 -(int)sizeof(__u32), 3420 }, 3421 [BPF_PROG_INFO_JITED_KSYMS] = { 3422 offsetof(struct bpf_prog_info, jited_ksyms), 3423 offsetof(struct bpf_prog_info, nr_jited_ksyms), 3424 -(int)sizeof(__u64), 3425 }, 3426 [BPF_PROG_INFO_JITED_FUNC_LENS] = { 3427 offsetof(struct bpf_prog_info, jited_func_lens), 3428 offsetof(struct bpf_prog_info, nr_jited_func_lens), 3429 -(int)sizeof(__u32), 3430 }, 3431 [BPF_PROG_INFO_FUNC_INFO] = { 3432 offsetof(struct bpf_prog_info, func_info), 3433 offsetof(struct bpf_prog_info, nr_func_info), 3434 offsetof(struct bpf_prog_info, func_info_rec_size), 3435 }, 3436 [BPF_PROG_INFO_LINE_INFO] = { 3437 offsetof(struct bpf_prog_info, line_info), 3438 offsetof(struct bpf_prog_info, nr_line_info), 3439 offsetof(struct bpf_prog_info, line_info_rec_size), 3440 }, 3441 [BPF_PROG_INFO_JITED_LINE_INFO] = { 3442 offsetof(struct bpf_prog_info, jited_line_info), 3443 offsetof(struct bpf_prog_info, nr_jited_line_info), 3444 offsetof(struct bpf_prog_info, jited_line_info_rec_size), 3445 }, 3446 [BPF_PROG_INFO_PROG_TAGS] = { 3447 offsetof(struct bpf_prog_info, prog_tags), 3448 offsetof(struct bpf_prog_info, nr_prog_tags), 3449 -(int)sizeof(__u8) * BPF_TAG_SIZE, 3450 }, 3451 3452 }; 3453 3454 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset) 3455 { 3456 __u32 *array = (__u32 *)info; 3457 3458 if (offset >= 0) 3459 return array[offset / sizeof(__u32)]; 3460 return -(int)offset; 3461 } 3462 3463 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset) 3464 { 3465 __u64 *array = (__u64 *)info; 3466 3467 if (offset >= 0) 3468 return array[offset / sizeof(__u64)]; 3469 return -(int)offset; 3470 } 3471 3472 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, 3473 __u32 val) 3474 { 3475 __u32 *array = (__u32 *)info; 3476 3477 if (offset >= 0) 3478 array[offset / sizeof(__u32)] = val; 3479 } 3480 3481 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, 3482 __u64 val) 3483 { 3484 __u64 *array = (__u64 *)info; 3485 3486 if (offset >= 0) 3487 array[offset / sizeof(__u64)] = val; 3488 } 3489 3490 struct bpf_prog_info_linear * 3491 bpf_program__get_prog_info_linear(int fd, __u64 arrays) 3492 { 3493 struct bpf_prog_info_linear *info_linear; 3494 struct bpf_prog_info info = {}; 3495 __u32 info_len = sizeof(info); 3496 __u32 data_len = 0; 3497 int i, err; 3498 void *ptr; 3499 3500 if (arrays >> BPF_PROG_INFO_LAST_ARRAY) 3501 return ERR_PTR(-EINVAL); 3502 3503 /* step 1: get array dimensions */ 3504 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 3505 if (err) { 3506 pr_debug("can't get prog info: %s", strerror(errno)); 3507 return ERR_PTR(-EFAULT); 3508 } 3509 3510 /* step 2: calculate total size of all arrays */ 3511 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3512 bool include_array = (arrays & (1UL << i)) > 0; 3513 struct bpf_prog_info_array_desc *desc; 3514 __u32 count, size; 3515 3516 desc = bpf_prog_info_array_desc + i; 3517 3518 /* kernel is too old to support this field */ 3519 if (info_len < desc->array_offset + sizeof(__u32) || 3520 info_len < desc->count_offset + sizeof(__u32) || 3521 (desc->size_offset > 0 && info_len < desc->size_offset)) 3522 include_array = false; 3523 3524 if (!include_array) { 3525 arrays &= ~(1UL << i); /* clear the bit */ 3526 continue; 3527 } 3528 3529 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3530 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3531 3532 data_len += count * size; 3533 } 3534 3535 /* step 3: allocate continuous memory */ 3536 data_len = roundup(data_len, sizeof(__u64)); 3537 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); 3538 if (!info_linear) 3539 return ERR_PTR(-ENOMEM); 3540 3541 /* step 4: fill data to info_linear->info */ 3542 info_linear->arrays = arrays; 3543 memset(&info_linear->info, 0, sizeof(info)); 3544 ptr = info_linear->data; 3545 3546 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3547 struct bpf_prog_info_array_desc *desc; 3548 __u32 count, size; 3549 3550 if ((arrays & (1UL << i)) == 0) 3551 continue; 3552 3553 desc = bpf_prog_info_array_desc + i; 3554 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3555 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3556 bpf_prog_info_set_offset_u32(&info_linear->info, 3557 desc->count_offset, count); 3558 bpf_prog_info_set_offset_u32(&info_linear->info, 3559 desc->size_offset, size); 3560 bpf_prog_info_set_offset_u64(&info_linear->info, 3561 desc->array_offset, 3562 ptr_to_u64(ptr)); 3563 ptr += count * size; 3564 } 3565 3566 /* step 5: call syscall again to get required arrays */ 3567 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); 3568 if (err) { 3569 pr_debug("can't get prog info: %s", strerror(errno)); 3570 free(info_linear); 3571 return ERR_PTR(-EFAULT); 3572 } 3573 3574 /* step 6: verify the data */ 3575 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3576 struct bpf_prog_info_array_desc *desc; 3577 __u32 v1, v2; 3578 3579 if ((arrays & (1UL << i)) == 0) 3580 continue; 3581 3582 desc = bpf_prog_info_array_desc + i; 3583 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); 3584 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3585 desc->count_offset); 3586 if (v1 != v2) 3587 pr_warning("%s: mismatch in element count\n", __func__); 3588 3589 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); 3590 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, 3591 desc->size_offset); 3592 if (v1 != v2) 3593 pr_warning("%s: mismatch in rec size\n", __func__); 3594 } 3595 3596 /* step 7: update info_len and data_len */ 3597 info_linear->info_len = sizeof(struct bpf_prog_info); 3598 info_linear->data_len = data_len; 3599 3600 return info_linear; 3601 } 3602 3603 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) 3604 { 3605 int i; 3606 3607 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3608 struct bpf_prog_info_array_desc *desc; 3609 __u64 addr, offs; 3610 3611 if ((info_linear->arrays & (1UL << i)) == 0) 3612 continue; 3613 3614 desc = bpf_prog_info_array_desc + i; 3615 addr = bpf_prog_info_read_offset_u64(&info_linear->info, 3616 desc->array_offset); 3617 offs = addr - ptr_to_u64(info_linear->data); 3618 bpf_prog_info_set_offset_u64(&info_linear->info, 3619 desc->array_offset, offs); 3620 } 3621 } 3622 3623 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) 3624 { 3625 int i; 3626 3627 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { 3628 struct bpf_prog_info_array_desc *desc; 3629 __u64 addr, offs; 3630 3631 if ((info_linear->arrays & (1UL << i)) == 0) 3632 continue; 3633 3634 desc = bpf_prog_info_array_desc + i; 3635 offs = bpf_prog_info_read_offset_u64(&info_linear->info, 3636 desc->array_offset); 3637 addr = offs + ptr_to_u64(info_linear->data); 3638 bpf_prog_info_set_offset_u64(&info_linear->info, 3639 desc->array_offset, addr); 3640 } 3641 } 3642