1 /* 2 * Common eBPF ELF object loading operations. 3 * 4 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 6 * Copyright (C) 2015 Huawei Inc. 7 */ 8 9 #include <stdlib.h> 10 #include <stdio.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <string.h> 14 #include <unistd.h> 15 #include <fcntl.h> 16 #include <errno.h> 17 #include <asm/unistd.h> 18 #include <linux/kernel.h> 19 #include <linux/bpf.h> 20 #include <linux/list.h> 21 #include <libelf.h> 22 #include <gelf.h> 23 24 #include "libbpf.h" 25 #include "bpf.h" 26 27 #define __printf(a, b) __attribute__((format(printf, a, b))) 28 29 __printf(1, 2) 30 static int __base_pr(const char *format, ...) 31 { 32 va_list args; 33 int err; 34 35 va_start(args, format); 36 err = vfprintf(stderr, format, args); 37 va_end(args); 38 return err; 39 } 40 41 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr; 42 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr; 43 static __printf(1, 2) libbpf_print_fn_t __pr_debug; 44 45 #define __pr(func, fmt, ...) \ 46 do { \ 47 if ((func)) \ 48 (func)("libbpf: " fmt, ##__VA_ARGS__); \ 49 } while (0) 50 51 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__) 52 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__) 53 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__) 54 55 void libbpf_set_print(libbpf_print_fn_t warn, 56 libbpf_print_fn_t info, 57 libbpf_print_fn_t debug) 58 { 59 __pr_warning = warn; 60 __pr_info = info; 61 __pr_debug = debug; 62 } 63 64 #define STRERR_BUFSIZE 128 65 66 #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) 67 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) 68 #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) 69 70 static const char *libbpf_strerror_table[NR_ERRNO] = { 71 [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", 72 [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", 73 [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", 74 [ERRCODE_OFFSET(ENDIAN)] = "Endian missmatch", 75 [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", 76 [ERRCODE_OFFSET(RELOC)] = "Relocation failed", 77 [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", 78 [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", 79 [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", 80 }; 81 82 int libbpf_strerror(int err, char *buf, size_t size) 83 { 84 if (!buf || !size) 85 return -1; 86 87 err = err > 0 ? err : -err; 88 89 if (err < __LIBBPF_ERRNO__START) { 90 int ret; 91 92 ret = strerror_r(err, buf, size); 93 buf[size - 1] = '\0'; 94 return ret; 95 } 96 97 if (err < __LIBBPF_ERRNO__END) { 98 const char *msg; 99 100 msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; 101 snprintf(buf, size, "%s", msg); 102 buf[size - 1] = '\0'; 103 return 0; 104 } 105 106 snprintf(buf, size, "Unknown libbpf error %d", err); 107 buf[size - 1] = '\0'; 108 return -1; 109 } 110 111 #define CHECK_ERR(action, err, out) do { \ 112 err = action; \ 113 if (err) \ 114 goto out; \ 115 } while(0) 116 117 118 /* Copied from tools/perf/util/util.h */ 119 #ifndef zfree 120 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 121 #endif 122 123 #ifndef zclose 124 # define zclose(fd) ({ \ 125 int ___err = 0; \ 126 if ((fd) >= 0) \ 127 ___err = close((fd)); \ 128 fd = -1; \ 129 ___err; }) 130 #endif 131 132 #ifdef HAVE_LIBELF_MMAP_SUPPORT 133 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP 134 #else 135 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 136 #endif 137 138 /* 139 * bpf_prog should be a better name but it has been used in 140 * linux/filter.h. 141 */ 142 struct bpf_program { 143 /* Index in elf obj file, for relocation use. */ 144 int idx; 145 char *section_name; 146 struct bpf_insn *insns; 147 size_t insns_cnt; 148 149 struct { 150 int insn_idx; 151 int map_idx; 152 } *reloc_desc; 153 int nr_reloc; 154 155 struct { 156 int nr; 157 int *fds; 158 } instances; 159 bpf_program_prep_t preprocessor; 160 161 struct bpf_object *obj; 162 void *priv; 163 bpf_program_clear_priv_t clear_priv; 164 }; 165 166 struct bpf_map { 167 int fd; 168 char *name; 169 struct bpf_map_def def; 170 void *priv; 171 bpf_map_clear_priv_t clear_priv; 172 }; 173 174 static LIST_HEAD(bpf_objects_list); 175 176 struct bpf_object { 177 char license[64]; 178 u32 kern_version; 179 180 struct bpf_program *programs; 181 size_t nr_programs; 182 struct bpf_map *maps; 183 size_t nr_maps; 184 185 bool loaded; 186 187 /* 188 * Information when doing elf related work. Only valid if fd 189 * is valid. 190 */ 191 struct { 192 int fd; 193 void *obj_buf; 194 size_t obj_buf_sz; 195 Elf *elf; 196 GElf_Ehdr ehdr; 197 Elf_Data *symbols; 198 size_t strtabidx; 199 struct { 200 GElf_Shdr shdr; 201 Elf_Data *data; 202 } *reloc; 203 int nr_reloc; 204 } efile; 205 /* 206 * All loaded bpf_object is linked in a list, which is 207 * hidden to caller. bpf_objects__<func> handlers deal with 208 * all objects. 209 */ 210 struct list_head list; 211 char path[]; 212 }; 213 #define obj_elf_valid(o) ((o)->efile.elf) 214 215 static void bpf_program__unload(struct bpf_program *prog) 216 { 217 int i; 218 219 if (!prog) 220 return; 221 222 /* 223 * If the object is opened but the program was never loaded, 224 * it is possible that prog->instances.nr == -1. 225 */ 226 if (prog->instances.nr > 0) { 227 for (i = 0; i < prog->instances.nr; i++) 228 zclose(prog->instances.fds[i]); 229 } else if (prog->instances.nr != -1) { 230 pr_warning("Internal error: instances.nr is %d\n", 231 prog->instances.nr); 232 } 233 234 prog->instances.nr = -1; 235 zfree(&prog->instances.fds); 236 } 237 238 static void bpf_program__exit(struct bpf_program *prog) 239 { 240 if (!prog) 241 return; 242 243 if (prog->clear_priv) 244 prog->clear_priv(prog, prog->priv); 245 246 prog->priv = NULL; 247 prog->clear_priv = NULL; 248 249 bpf_program__unload(prog); 250 zfree(&prog->section_name); 251 zfree(&prog->insns); 252 zfree(&prog->reloc_desc); 253 254 prog->nr_reloc = 0; 255 prog->insns_cnt = 0; 256 prog->idx = -1; 257 } 258 259 static int 260 bpf_program__init(void *data, size_t size, char *name, int idx, 261 struct bpf_program *prog) 262 { 263 if (size < sizeof(struct bpf_insn)) { 264 pr_warning("corrupted section '%s'\n", name); 265 return -EINVAL; 266 } 267 268 bzero(prog, sizeof(*prog)); 269 270 prog->section_name = strdup(name); 271 if (!prog->section_name) { 272 pr_warning("failed to alloc name for prog %s\n", 273 name); 274 goto errout; 275 } 276 277 prog->insns = malloc(size); 278 if (!prog->insns) { 279 pr_warning("failed to alloc insns for %s\n", name); 280 goto errout; 281 } 282 prog->insns_cnt = size / sizeof(struct bpf_insn); 283 memcpy(prog->insns, data, 284 prog->insns_cnt * sizeof(struct bpf_insn)); 285 prog->idx = idx; 286 prog->instances.fds = NULL; 287 prog->instances.nr = -1; 288 289 return 0; 290 errout: 291 bpf_program__exit(prog); 292 return -ENOMEM; 293 } 294 295 static int 296 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 297 char *name, int idx) 298 { 299 struct bpf_program prog, *progs; 300 int nr_progs, err; 301 302 err = bpf_program__init(data, size, name, idx, &prog); 303 if (err) 304 return err; 305 306 progs = obj->programs; 307 nr_progs = obj->nr_programs; 308 309 progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1)); 310 if (!progs) { 311 /* 312 * In this case the original obj->programs 313 * is still valid, so don't need special treat for 314 * bpf_close_object(). 315 */ 316 pr_warning("failed to alloc a new program '%s'\n", 317 name); 318 bpf_program__exit(&prog); 319 return -ENOMEM; 320 } 321 322 pr_debug("found program %s\n", prog.section_name); 323 obj->programs = progs; 324 obj->nr_programs = nr_progs + 1; 325 prog.obj = obj; 326 progs[nr_progs] = prog; 327 return 0; 328 } 329 330 static struct bpf_object *bpf_object__new(const char *path, 331 void *obj_buf, 332 size_t obj_buf_sz) 333 { 334 struct bpf_object *obj; 335 336 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 337 if (!obj) { 338 pr_warning("alloc memory failed for %s\n", path); 339 return ERR_PTR(-ENOMEM); 340 } 341 342 strcpy(obj->path, path); 343 obj->efile.fd = -1; 344 345 /* 346 * Caller of this function should also calls 347 * bpf_object__elf_finish() after data collection to return 348 * obj_buf to user. If not, we should duplicate the buffer to 349 * avoid user freeing them before elf finish. 350 */ 351 obj->efile.obj_buf = obj_buf; 352 obj->efile.obj_buf_sz = obj_buf_sz; 353 354 obj->loaded = false; 355 356 INIT_LIST_HEAD(&obj->list); 357 list_add(&obj->list, &bpf_objects_list); 358 return obj; 359 } 360 361 static void bpf_object__elf_finish(struct bpf_object *obj) 362 { 363 if (!obj_elf_valid(obj)) 364 return; 365 366 if (obj->efile.elf) { 367 elf_end(obj->efile.elf); 368 obj->efile.elf = NULL; 369 } 370 obj->efile.symbols = NULL; 371 372 zfree(&obj->efile.reloc); 373 obj->efile.nr_reloc = 0; 374 zclose(obj->efile.fd); 375 obj->efile.obj_buf = NULL; 376 obj->efile.obj_buf_sz = 0; 377 } 378 379 static int bpf_object__elf_init(struct bpf_object *obj) 380 { 381 int err = 0; 382 GElf_Ehdr *ep; 383 384 if (obj_elf_valid(obj)) { 385 pr_warning("elf init: internal error\n"); 386 return -LIBBPF_ERRNO__LIBELF; 387 } 388 389 if (obj->efile.obj_buf_sz > 0) { 390 /* 391 * obj_buf should have been validated by 392 * bpf_object__open_buffer(). 393 */ 394 obj->efile.elf = elf_memory(obj->efile.obj_buf, 395 obj->efile.obj_buf_sz); 396 } else { 397 obj->efile.fd = open(obj->path, O_RDONLY); 398 if (obj->efile.fd < 0) { 399 pr_warning("failed to open %s: %s\n", obj->path, 400 strerror(errno)); 401 return -errno; 402 } 403 404 obj->efile.elf = elf_begin(obj->efile.fd, 405 LIBBPF_ELF_C_READ_MMAP, 406 NULL); 407 } 408 409 if (!obj->efile.elf) { 410 pr_warning("failed to open %s as ELF file\n", 411 obj->path); 412 err = -LIBBPF_ERRNO__LIBELF; 413 goto errout; 414 } 415 416 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 417 pr_warning("failed to get EHDR from %s\n", 418 obj->path); 419 err = -LIBBPF_ERRNO__FORMAT; 420 goto errout; 421 } 422 ep = &obj->efile.ehdr; 423 424 if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) { 425 pr_warning("%s is not an eBPF object file\n", 426 obj->path); 427 err = -LIBBPF_ERRNO__FORMAT; 428 goto errout; 429 } 430 431 return 0; 432 errout: 433 bpf_object__elf_finish(obj); 434 return err; 435 } 436 437 static int 438 bpf_object__check_endianness(struct bpf_object *obj) 439 { 440 static unsigned int const endian = 1; 441 442 switch (obj->efile.ehdr.e_ident[EI_DATA]) { 443 case ELFDATA2LSB: 444 /* We are big endian, BPF obj is little endian. */ 445 if (*(unsigned char const *)&endian != 1) 446 goto mismatch; 447 break; 448 449 case ELFDATA2MSB: 450 /* We are little endian, BPF obj is big endian. */ 451 if (*(unsigned char const *)&endian != 0) 452 goto mismatch; 453 break; 454 default: 455 return -LIBBPF_ERRNO__ENDIAN; 456 } 457 458 return 0; 459 460 mismatch: 461 pr_warning("Error: endianness mismatch.\n"); 462 return -LIBBPF_ERRNO__ENDIAN; 463 } 464 465 static int 466 bpf_object__init_license(struct bpf_object *obj, 467 void *data, size_t size) 468 { 469 memcpy(obj->license, data, 470 min(size, sizeof(obj->license) - 1)); 471 pr_debug("license of %s is %s\n", obj->path, obj->license); 472 return 0; 473 } 474 475 static int 476 bpf_object__init_kversion(struct bpf_object *obj, 477 void *data, size_t size) 478 { 479 u32 kver; 480 481 if (size != sizeof(kver)) { 482 pr_warning("invalid kver section in %s\n", obj->path); 483 return -LIBBPF_ERRNO__FORMAT; 484 } 485 memcpy(&kver, data, sizeof(kver)); 486 obj->kern_version = kver; 487 pr_debug("kernel version of %s is %x\n", obj->path, 488 obj->kern_version); 489 return 0; 490 } 491 492 static int 493 bpf_object__init_maps(struct bpf_object *obj, void *data, 494 size_t size) 495 { 496 size_t nr_maps; 497 int i; 498 499 nr_maps = size / sizeof(struct bpf_map_def); 500 if (!data || !nr_maps) { 501 pr_debug("%s doesn't need map definition\n", 502 obj->path); 503 return 0; 504 } 505 506 pr_debug("maps in %s: %zd bytes\n", obj->path, size); 507 508 obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); 509 if (!obj->maps) { 510 pr_warning("alloc maps for object failed\n"); 511 return -ENOMEM; 512 } 513 obj->nr_maps = nr_maps; 514 515 for (i = 0; i < nr_maps; i++) { 516 struct bpf_map_def *def = &obj->maps[i].def; 517 518 /* 519 * fill all fd with -1 so won't close incorrect 520 * fd (fd=0 is stdin) when failure (zclose won't close 521 * negative fd)). 522 */ 523 obj->maps[i].fd = -1; 524 525 /* Save map definition into obj->maps */ 526 *def = ((struct bpf_map_def *)data)[i]; 527 } 528 return 0; 529 } 530 531 static int 532 bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx) 533 { 534 int i; 535 Elf_Data *symbols = obj->efile.symbols; 536 537 if (!symbols || maps_shndx < 0) 538 return -EINVAL; 539 540 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { 541 GElf_Sym sym; 542 size_t map_idx; 543 const char *map_name; 544 545 if (!gelf_getsym(symbols, i, &sym)) 546 continue; 547 if (sym.st_shndx != maps_shndx) 548 continue; 549 550 map_name = elf_strptr(obj->efile.elf, 551 obj->efile.strtabidx, 552 sym.st_name); 553 map_idx = sym.st_value / sizeof(struct bpf_map_def); 554 if (map_idx >= obj->nr_maps) { 555 pr_warning("index of map \"%s\" is buggy: %zu > %zu\n", 556 map_name, map_idx, obj->nr_maps); 557 continue; 558 } 559 obj->maps[map_idx].name = strdup(map_name); 560 if (!obj->maps[map_idx].name) { 561 pr_warning("failed to alloc map name\n"); 562 return -ENOMEM; 563 } 564 pr_debug("map %zu is \"%s\"\n", map_idx, 565 obj->maps[map_idx].name); 566 } 567 return 0; 568 } 569 570 static int bpf_object__elf_collect(struct bpf_object *obj) 571 { 572 Elf *elf = obj->efile.elf; 573 GElf_Ehdr *ep = &obj->efile.ehdr; 574 Elf_Scn *scn = NULL; 575 int idx = 0, err = 0, maps_shndx = -1; 576 577 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 578 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 579 pr_warning("failed to get e_shstrndx from %s\n", 580 obj->path); 581 return -LIBBPF_ERRNO__FORMAT; 582 } 583 584 while ((scn = elf_nextscn(elf, scn)) != NULL) { 585 char *name; 586 GElf_Shdr sh; 587 Elf_Data *data; 588 589 idx++; 590 if (gelf_getshdr(scn, &sh) != &sh) { 591 pr_warning("failed to get section header from %s\n", 592 obj->path); 593 err = -LIBBPF_ERRNO__FORMAT; 594 goto out; 595 } 596 597 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); 598 if (!name) { 599 pr_warning("failed to get section name from %s\n", 600 obj->path); 601 err = -LIBBPF_ERRNO__FORMAT; 602 goto out; 603 } 604 605 data = elf_getdata(scn, 0); 606 if (!data) { 607 pr_warning("failed to get section data from %s(%s)\n", 608 name, obj->path); 609 err = -LIBBPF_ERRNO__FORMAT; 610 goto out; 611 } 612 pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n", 613 name, (unsigned long)data->d_size, 614 (int)sh.sh_link, (unsigned long)sh.sh_flags, 615 (int)sh.sh_type); 616 617 if (strcmp(name, "license") == 0) 618 err = bpf_object__init_license(obj, 619 data->d_buf, 620 data->d_size); 621 else if (strcmp(name, "version") == 0) 622 err = bpf_object__init_kversion(obj, 623 data->d_buf, 624 data->d_size); 625 else if (strcmp(name, "maps") == 0) { 626 err = bpf_object__init_maps(obj, data->d_buf, 627 data->d_size); 628 maps_shndx = idx; 629 } else if (sh.sh_type == SHT_SYMTAB) { 630 if (obj->efile.symbols) { 631 pr_warning("bpf: multiple SYMTAB in %s\n", 632 obj->path); 633 err = -LIBBPF_ERRNO__FORMAT; 634 } else { 635 obj->efile.symbols = data; 636 obj->efile.strtabidx = sh.sh_link; 637 } 638 } else if ((sh.sh_type == SHT_PROGBITS) && 639 (sh.sh_flags & SHF_EXECINSTR) && 640 (data->d_size > 0)) { 641 err = bpf_object__add_program(obj, data->d_buf, 642 data->d_size, name, idx); 643 if (err) { 644 char errmsg[STRERR_BUFSIZE]; 645 646 strerror_r(-err, errmsg, sizeof(errmsg)); 647 pr_warning("failed to alloc program %s (%s): %s", 648 name, obj->path, errmsg); 649 } 650 } else if (sh.sh_type == SHT_REL) { 651 void *reloc = obj->efile.reloc; 652 int nr_reloc = obj->efile.nr_reloc + 1; 653 654 reloc = realloc(reloc, 655 sizeof(*obj->efile.reloc) * nr_reloc); 656 if (!reloc) { 657 pr_warning("realloc failed\n"); 658 err = -ENOMEM; 659 } else { 660 int n = nr_reloc - 1; 661 662 obj->efile.reloc = reloc; 663 obj->efile.nr_reloc = nr_reloc; 664 665 obj->efile.reloc[n].shdr = sh; 666 obj->efile.reloc[n].data = data; 667 } 668 } 669 if (err) 670 goto out; 671 } 672 673 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 674 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 675 return LIBBPF_ERRNO__FORMAT; 676 } 677 if (maps_shndx >= 0) 678 err = bpf_object__init_maps_name(obj, maps_shndx); 679 out: 680 return err; 681 } 682 683 static struct bpf_program * 684 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) 685 { 686 struct bpf_program *prog; 687 size_t i; 688 689 for (i = 0; i < obj->nr_programs; i++) { 690 prog = &obj->programs[i]; 691 if (prog->idx == idx) 692 return prog; 693 } 694 return NULL; 695 } 696 697 static int 698 bpf_program__collect_reloc(struct bpf_program *prog, 699 size_t nr_maps, GElf_Shdr *shdr, 700 Elf_Data *data, Elf_Data *symbols) 701 { 702 int i, nrels; 703 704 pr_debug("collecting relocating info for: '%s'\n", 705 prog->section_name); 706 nrels = shdr->sh_size / shdr->sh_entsize; 707 708 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 709 if (!prog->reloc_desc) { 710 pr_warning("failed to alloc memory in relocation\n"); 711 return -ENOMEM; 712 } 713 prog->nr_reloc = nrels; 714 715 for (i = 0; i < nrels; i++) { 716 GElf_Sym sym; 717 GElf_Rel rel; 718 unsigned int insn_idx; 719 struct bpf_insn *insns = prog->insns; 720 size_t map_idx; 721 722 if (!gelf_getrel(data, i, &rel)) { 723 pr_warning("relocation: failed to get %d reloc\n", i); 724 return -LIBBPF_ERRNO__FORMAT; 725 } 726 727 insn_idx = rel.r_offset / sizeof(struct bpf_insn); 728 pr_debug("relocation: insn_idx=%u\n", insn_idx); 729 730 if (!gelf_getsym(symbols, 731 GELF_R_SYM(rel.r_info), 732 &sym)) { 733 pr_warning("relocation: symbol %"PRIx64" not found\n", 734 GELF_R_SYM(rel.r_info)); 735 return -LIBBPF_ERRNO__FORMAT; 736 } 737 738 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { 739 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", 740 insn_idx, insns[insn_idx].code); 741 return -LIBBPF_ERRNO__RELOC; 742 } 743 744 map_idx = sym.st_value / sizeof(struct bpf_map_def); 745 if (map_idx >= nr_maps) { 746 pr_warning("bpf relocation: map_idx %d large than %d\n", 747 (int)map_idx, (int)nr_maps - 1); 748 return -LIBBPF_ERRNO__RELOC; 749 } 750 751 prog->reloc_desc[i].insn_idx = insn_idx; 752 prog->reloc_desc[i].map_idx = map_idx; 753 } 754 return 0; 755 } 756 757 static int 758 bpf_object__create_maps(struct bpf_object *obj) 759 { 760 unsigned int i; 761 762 for (i = 0; i < obj->nr_maps; i++) { 763 struct bpf_map_def *def = &obj->maps[i].def; 764 int *pfd = &obj->maps[i].fd; 765 766 *pfd = bpf_create_map(def->type, 767 def->key_size, 768 def->value_size, 769 def->max_entries); 770 if (*pfd < 0) { 771 size_t j; 772 int err = *pfd; 773 774 pr_warning("failed to create map: %s\n", 775 strerror(errno)); 776 for (j = 0; j < i; j++) 777 zclose(obj->maps[j].fd); 778 return err; 779 } 780 pr_debug("create map: fd=%d\n", *pfd); 781 } 782 783 return 0; 784 } 785 786 static int 787 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) 788 { 789 int i; 790 791 if (!prog || !prog->reloc_desc) 792 return 0; 793 794 for (i = 0; i < prog->nr_reloc; i++) { 795 int insn_idx, map_idx; 796 struct bpf_insn *insns = prog->insns; 797 798 insn_idx = prog->reloc_desc[i].insn_idx; 799 map_idx = prog->reloc_desc[i].map_idx; 800 801 if (insn_idx >= (int)prog->insns_cnt) { 802 pr_warning("relocation out of range: '%s'\n", 803 prog->section_name); 804 return -LIBBPF_ERRNO__RELOC; 805 } 806 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; 807 insns[insn_idx].imm = obj->maps[map_idx].fd; 808 } 809 810 zfree(&prog->reloc_desc); 811 prog->nr_reloc = 0; 812 return 0; 813 } 814 815 816 static int 817 bpf_object__relocate(struct bpf_object *obj) 818 { 819 struct bpf_program *prog; 820 size_t i; 821 int err; 822 823 for (i = 0; i < obj->nr_programs; i++) { 824 prog = &obj->programs[i]; 825 826 err = bpf_program__relocate(prog, obj); 827 if (err) { 828 pr_warning("failed to relocate '%s'\n", 829 prog->section_name); 830 return err; 831 } 832 } 833 return 0; 834 } 835 836 static int bpf_object__collect_reloc(struct bpf_object *obj) 837 { 838 int i, err; 839 840 if (!obj_elf_valid(obj)) { 841 pr_warning("Internal error: elf object is closed\n"); 842 return -LIBBPF_ERRNO__INTERNAL; 843 } 844 845 for (i = 0; i < obj->efile.nr_reloc; i++) { 846 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; 847 Elf_Data *data = obj->efile.reloc[i].data; 848 int idx = shdr->sh_info; 849 struct bpf_program *prog; 850 size_t nr_maps = obj->nr_maps; 851 852 if (shdr->sh_type != SHT_REL) { 853 pr_warning("internal error at %d\n", __LINE__); 854 return -LIBBPF_ERRNO__INTERNAL; 855 } 856 857 prog = bpf_object__find_prog_by_idx(obj, idx); 858 if (!prog) { 859 pr_warning("relocation failed: no %d section\n", 860 idx); 861 return -LIBBPF_ERRNO__RELOC; 862 } 863 864 err = bpf_program__collect_reloc(prog, nr_maps, 865 shdr, data, 866 obj->efile.symbols); 867 if (err) 868 return err; 869 } 870 return 0; 871 } 872 873 static int 874 load_program(struct bpf_insn *insns, int insns_cnt, 875 char *license, u32 kern_version, int *pfd) 876 { 877 int ret; 878 char *log_buf; 879 880 if (!insns || !insns_cnt) 881 return -EINVAL; 882 883 log_buf = malloc(BPF_LOG_BUF_SIZE); 884 if (!log_buf) 885 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 886 887 ret = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns, 888 insns_cnt, license, kern_version, 889 log_buf, BPF_LOG_BUF_SIZE); 890 891 if (ret >= 0) { 892 *pfd = ret; 893 ret = 0; 894 goto out; 895 } 896 897 ret = -LIBBPF_ERRNO__LOAD; 898 pr_warning("load bpf program failed: %s\n", strerror(errno)); 899 900 if (log_buf && log_buf[0] != '\0') { 901 ret = -LIBBPF_ERRNO__VERIFY; 902 pr_warning("-- BEGIN DUMP LOG ---\n"); 903 pr_warning("\n%s\n", log_buf); 904 pr_warning("-- END LOG --\n"); 905 } else { 906 if (insns_cnt >= BPF_MAXINSNS) { 907 pr_warning("Program too large (%d insns), at most %d insns\n", 908 insns_cnt, BPF_MAXINSNS); 909 ret = -LIBBPF_ERRNO__PROG2BIG; 910 } else if (log_buf) { 911 pr_warning("log buffer is empty\n"); 912 ret = -LIBBPF_ERRNO__KVER; 913 } 914 } 915 916 out: 917 free(log_buf); 918 return ret; 919 } 920 921 static int 922 bpf_program__load(struct bpf_program *prog, 923 char *license, u32 kern_version) 924 { 925 int err = 0, fd, i; 926 927 if (prog->instances.nr < 0 || !prog->instances.fds) { 928 if (prog->preprocessor) { 929 pr_warning("Internal error: can't load program '%s'\n", 930 prog->section_name); 931 return -LIBBPF_ERRNO__INTERNAL; 932 } 933 934 prog->instances.fds = malloc(sizeof(int)); 935 if (!prog->instances.fds) { 936 pr_warning("Not enough memory for BPF fds\n"); 937 return -ENOMEM; 938 } 939 prog->instances.nr = 1; 940 prog->instances.fds[0] = -1; 941 } 942 943 if (!prog->preprocessor) { 944 if (prog->instances.nr != 1) { 945 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 946 prog->section_name, prog->instances.nr); 947 } 948 err = load_program(prog->insns, prog->insns_cnt, 949 license, kern_version, &fd); 950 if (!err) 951 prog->instances.fds[0] = fd; 952 goto out; 953 } 954 955 for (i = 0; i < prog->instances.nr; i++) { 956 struct bpf_prog_prep_result result; 957 bpf_program_prep_t preprocessor = prog->preprocessor; 958 959 bzero(&result, sizeof(result)); 960 err = preprocessor(prog, i, prog->insns, 961 prog->insns_cnt, &result); 962 if (err) { 963 pr_warning("Preprocessing the %dth instance of program '%s' failed\n", 964 i, prog->section_name); 965 goto out; 966 } 967 968 if (!result.new_insn_ptr || !result.new_insn_cnt) { 969 pr_debug("Skip loading the %dth instance of program '%s'\n", 970 i, prog->section_name); 971 prog->instances.fds[i] = -1; 972 if (result.pfd) 973 *result.pfd = -1; 974 continue; 975 } 976 977 err = load_program(result.new_insn_ptr, 978 result.new_insn_cnt, 979 license, kern_version, &fd); 980 981 if (err) { 982 pr_warning("Loading the %dth instance of program '%s' failed\n", 983 i, prog->section_name); 984 goto out; 985 } 986 987 if (result.pfd) 988 *result.pfd = fd; 989 prog->instances.fds[i] = fd; 990 } 991 out: 992 if (err) 993 pr_warning("failed to load program '%s'\n", 994 prog->section_name); 995 zfree(&prog->insns); 996 prog->insns_cnt = 0; 997 return err; 998 } 999 1000 static int 1001 bpf_object__load_progs(struct bpf_object *obj) 1002 { 1003 size_t i; 1004 int err; 1005 1006 for (i = 0; i < obj->nr_programs; i++) { 1007 err = bpf_program__load(&obj->programs[i], 1008 obj->license, 1009 obj->kern_version); 1010 if (err) 1011 return err; 1012 } 1013 return 0; 1014 } 1015 1016 static int bpf_object__validate(struct bpf_object *obj) 1017 { 1018 if (obj->kern_version == 0) { 1019 pr_warning("%s doesn't provide kernel version\n", 1020 obj->path); 1021 return -LIBBPF_ERRNO__KVERSION; 1022 } 1023 return 0; 1024 } 1025 1026 static struct bpf_object * 1027 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz) 1028 { 1029 struct bpf_object *obj; 1030 int err; 1031 1032 if (elf_version(EV_CURRENT) == EV_NONE) { 1033 pr_warning("failed to init libelf for %s\n", path); 1034 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 1035 } 1036 1037 obj = bpf_object__new(path, obj_buf, obj_buf_sz); 1038 if (IS_ERR(obj)) 1039 return obj; 1040 1041 CHECK_ERR(bpf_object__elf_init(obj), err, out); 1042 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 1043 CHECK_ERR(bpf_object__elf_collect(obj), err, out); 1044 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 1045 CHECK_ERR(bpf_object__validate(obj), err, out); 1046 1047 bpf_object__elf_finish(obj); 1048 return obj; 1049 out: 1050 bpf_object__close(obj); 1051 return ERR_PTR(err); 1052 } 1053 1054 struct bpf_object *bpf_object__open(const char *path) 1055 { 1056 /* param validation */ 1057 if (!path) 1058 return NULL; 1059 1060 pr_debug("loading %s\n", path); 1061 1062 return __bpf_object__open(path, NULL, 0); 1063 } 1064 1065 struct bpf_object *bpf_object__open_buffer(void *obj_buf, 1066 size_t obj_buf_sz, 1067 const char *name) 1068 { 1069 char tmp_name[64]; 1070 1071 /* param validation */ 1072 if (!obj_buf || obj_buf_sz <= 0) 1073 return NULL; 1074 1075 if (!name) { 1076 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 1077 (unsigned long)obj_buf, 1078 (unsigned long)obj_buf_sz); 1079 tmp_name[sizeof(tmp_name) - 1] = '\0'; 1080 name = tmp_name; 1081 } 1082 pr_debug("loading object '%s' from buffer\n", 1083 name); 1084 1085 return __bpf_object__open(name, obj_buf, obj_buf_sz); 1086 } 1087 1088 int bpf_object__unload(struct bpf_object *obj) 1089 { 1090 size_t i; 1091 1092 if (!obj) 1093 return -EINVAL; 1094 1095 for (i = 0; i < obj->nr_maps; i++) 1096 zclose(obj->maps[i].fd); 1097 1098 for (i = 0; i < obj->nr_programs; i++) 1099 bpf_program__unload(&obj->programs[i]); 1100 1101 return 0; 1102 } 1103 1104 int bpf_object__load(struct bpf_object *obj) 1105 { 1106 int err; 1107 1108 if (!obj) 1109 return -EINVAL; 1110 1111 if (obj->loaded) { 1112 pr_warning("object should not be loaded twice\n"); 1113 return -EINVAL; 1114 } 1115 1116 obj->loaded = true; 1117 1118 CHECK_ERR(bpf_object__create_maps(obj), err, out); 1119 CHECK_ERR(bpf_object__relocate(obj), err, out); 1120 CHECK_ERR(bpf_object__load_progs(obj), err, out); 1121 1122 return 0; 1123 out: 1124 bpf_object__unload(obj); 1125 pr_warning("failed to load object '%s'\n", obj->path); 1126 return err; 1127 } 1128 1129 void bpf_object__close(struct bpf_object *obj) 1130 { 1131 size_t i; 1132 1133 if (!obj) 1134 return; 1135 1136 bpf_object__elf_finish(obj); 1137 bpf_object__unload(obj); 1138 1139 for (i = 0; i < obj->nr_maps; i++) { 1140 zfree(&obj->maps[i].name); 1141 if (obj->maps[i].clear_priv) 1142 obj->maps[i].clear_priv(&obj->maps[i], 1143 obj->maps[i].priv); 1144 obj->maps[i].priv = NULL; 1145 obj->maps[i].clear_priv = NULL; 1146 } 1147 zfree(&obj->maps); 1148 obj->nr_maps = 0; 1149 1150 if (obj->programs && obj->nr_programs) { 1151 for (i = 0; i < obj->nr_programs; i++) 1152 bpf_program__exit(&obj->programs[i]); 1153 } 1154 zfree(&obj->programs); 1155 1156 list_del(&obj->list); 1157 free(obj); 1158 } 1159 1160 struct bpf_object * 1161 bpf_object__next(struct bpf_object *prev) 1162 { 1163 struct bpf_object *next; 1164 1165 if (!prev) 1166 next = list_first_entry(&bpf_objects_list, 1167 struct bpf_object, 1168 list); 1169 else 1170 next = list_next_entry(prev, list); 1171 1172 /* Empty list is noticed here so don't need checking on entry. */ 1173 if (&next->list == &bpf_objects_list) 1174 return NULL; 1175 1176 return next; 1177 } 1178 1179 const char * 1180 bpf_object__get_name(struct bpf_object *obj) 1181 { 1182 if (!obj) 1183 return ERR_PTR(-EINVAL); 1184 return obj->path; 1185 } 1186 1187 unsigned int 1188 bpf_object__get_kversion(struct bpf_object *obj) 1189 { 1190 if (!obj) 1191 return 0; 1192 return obj->kern_version; 1193 } 1194 1195 struct bpf_program * 1196 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) 1197 { 1198 size_t idx; 1199 1200 if (!obj->programs) 1201 return NULL; 1202 /* First handler */ 1203 if (prev == NULL) 1204 return &obj->programs[0]; 1205 1206 if (prev->obj != obj) { 1207 pr_warning("error: program handler doesn't match object\n"); 1208 return NULL; 1209 } 1210 1211 idx = (prev - obj->programs) + 1; 1212 if (idx >= obj->nr_programs) 1213 return NULL; 1214 return &obj->programs[idx]; 1215 } 1216 1217 int bpf_program__set_private(struct bpf_program *prog, 1218 void *priv, 1219 bpf_program_clear_priv_t clear_priv) 1220 { 1221 if (prog->priv && prog->clear_priv) 1222 prog->clear_priv(prog, prog->priv); 1223 1224 prog->priv = priv; 1225 prog->clear_priv = clear_priv; 1226 return 0; 1227 } 1228 1229 int bpf_program__get_private(struct bpf_program *prog, void **ppriv) 1230 { 1231 *ppriv = prog->priv; 1232 return 0; 1233 } 1234 1235 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) 1236 { 1237 const char *title; 1238 1239 title = prog->section_name; 1240 if (needs_copy) { 1241 title = strdup(title); 1242 if (!title) { 1243 pr_warning("failed to strdup program title\n"); 1244 return ERR_PTR(-ENOMEM); 1245 } 1246 } 1247 1248 return title; 1249 } 1250 1251 int bpf_program__fd(struct bpf_program *prog) 1252 { 1253 return bpf_program__nth_fd(prog, 0); 1254 } 1255 1256 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, 1257 bpf_program_prep_t prep) 1258 { 1259 int *instances_fds; 1260 1261 if (nr_instances <= 0 || !prep) 1262 return -EINVAL; 1263 1264 if (prog->instances.nr > 0 || prog->instances.fds) { 1265 pr_warning("Can't set pre-processor after loading\n"); 1266 return -EINVAL; 1267 } 1268 1269 instances_fds = malloc(sizeof(int) * nr_instances); 1270 if (!instances_fds) { 1271 pr_warning("alloc memory failed for fds\n"); 1272 return -ENOMEM; 1273 } 1274 1275 /* fill all fd with -1 */ 1276 memset(instances_fds, -1, sizeof(int) * nr_instances); 1277 1278 prog->instances.nr = nr_instances; 1279 prog->instances.fds = instances_fds; 1280 prog->preprocessor = prep; 1281 return 0; 1282 } 1283 1284 int bpf_program__nth_fd(struct bpf_program *prog, int n) 1285 { 1286 int fd; 1287 1288 if (n >= prog->instances.nr || n < 0) { 1289 pr_warning("Can't get the %dth fd from program %s: only %d instances\n", 1290 n, prog->section_name, prog->instances.nr); 1291 return -EINVAL; 1292 } 1293 1294 fd = prog->instances.fds[n]; 1295 if (fd < 0) { 1296 pr_warning("%dth instance of program '%s' is invalid\n", 1297 n, prog->section_name); 1298 return -ENOENT; 1299 } 1300 1301 return fd; 1302 } 1303 1304 int bpf_map__get_fd(struct bpf_map *map) 1305 { 1306 if (!map) 1307 return -EINVAL; 1308 1309 return map->fd; 1310 } 1311 1312 int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef) 1313 { 1314 if (!map || !pdef) 1315 return -EINVAL; 1316 1317 *pdef = map->def; 1318 return 0; 1319 } 1320 1321 const char *bpf_map__get_name(struct bpf_map *map) 1322 { 1323 if (!map) 1324 return NULL; 1325 return map->name; 1326 } 1327 1328 int bpf_map__set_private(struct bpf_map *map, void *priv, 1329 bpf_map_clear_priv_t clear_priv) 1330 { 1331 if (!map) 1332 return -EINVAL; 1333 1334 if (map->priv) { 1335 if (map->clear_priv) 1336 map->clear_priv(map, map->priv); 1337 } 1338 1339 map->priv = priv; 1340 map->clear_priv = clear_priv; 1341 return 0; 1342 } 1343 1344 int bpf_map__get_private(struct bpf_map *map, void **ppriv) 1345 { 1346 if (!map) 1347 return -EINVAL; 1348 1349 if (ppriv) 1350 *ppriv = map->priv; 1351 return 0; 1352 } 1353 1354 struct bpf_map * 1355 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) 1356 { 1357 size_t idx; 1358 struct bpf_map *s, *e; 1359 1360 if (!obj || !obj->maps) 1361 return NULL; 1362 1363 s = obj->maps; 1364 e = obj->maps + obj->nr_maps; 1365 1366 if (prev == NULL) 1367 return s; 1368 1369 if ((prev < s) || (prev >= e)) { 1370 pr_warning("error in %s: map handler doesn't belong to object\n", 1371 __func__); 1372 return NULL; 1373 } 1374 1375 idx = (prev - obj->maps) + 1; 1376 if (idx >= obj->nr_maps) 1377 return NULL; 1378 return &obj->maps[idx]; 1379 } 1380 1381 struct bpf_map * 1382 bpf_object__get_map_by_name(struct bpf_object *obj, const char *name) 1383 { 1384 struct bpf_map *pos; 1385 1386 bpf_map__for_each(pos, obj) { 1387 if (pos->name && !strcmp(pos->name, name)) 1388 return pos; 1389 } 1390 return NULL; 1391 } 1392