1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2019 Facebook */ 3 4 #ifndef _GNU_SOURCE 5 #define _GNU_SOURCE 6 #endif 7 #include <ctype.h> 8 #include <errno.h> 9 #include <fcntl.h> 10 #include <linux/err.h> 11 #include <stdbool.h> 12 #include <stdio.h> 13 #include <string.h> 14 #include <unistd.h> 15 #include <bpf/bpf.h> 16 #include <bpf/libbpf.h> 17 #include <sys/types.h> 18 #include <sys/stat.h> 19 #include <sys/mman.h> 20 #include <bpf/btf.h> 21 22 #include "json_writer.h" 23 #include "main.h" 24 25 #define MAX_OBJ_NAME_LEN 64 26 27 static void sanitize_identifier(char *name) 28 { 29 int i; 30 31 for (i = 0; name[i]; i++) 32 if (!isalnum(name[i]) && name[i] != '_') 33 name[i] = '_'; 34 } 35 36 static bool str_has_prefix(const char *str, const char *prefix) 37 { 38 return strncmp(str, prefix, strlen(prefix)) == 0; 39 } 40 41 static bool str_has_suffix(const char *str, const char *suffix) 42 { 43 size_t i, n1 = strlen(str), n2 = strlen(suffix); 44 45 if (n1 < n2) 46 return false; 47 48 for (i = 0; i < n2; i++) { 49 if (str[n1 - i - 1] != suffix[n2 - i - 1]) 50 return false; 51 } 52 53 return true; 54 } 55 56 static void get_obj_name(char *name, const char *file) 57 { 58 /* Using basename() GNU version which doesn't modify arg. */ 59 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1); 60 name[MAX_OBJ_NAME_LEN - 1] = '\0'; 61 if (str_has_suffix(name, ".o")) 62 name[strlen(name) - 2] = '\0'; 63 sanitize_identifier(name); 64 } 65 66 static void get_header_guard(char *guard, const char *obj_name) 67 { 68 int i; 69 70 sprintf(guard, "__%s_SKEL_H__", obj_name); 71 for (i = 0; guard[i]; i++) 72 guard[i] = toupper(guard[i]); 73 } 74 75 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz) 76 { 77 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 78 const char *name = bpf_map__name(map); 79 int i, n; 80 81 if (!bpf_map__is_internal(map)) { 82 snprintf(buf, buf_sz, "%s", name); 83 return true; 84 } 85 86 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) { 87 const char *sfx = sfxs[i], *p; 88 89 p = strstr(name, sfx); 90 if (p) { 91 snprintf(buf, buf_sz, "%s", p + 1); 92 sanitize_identifier(buf); 93 return true; 94 } 95 } 96 97 return false; 98 } 99 100 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz) 101 { 102 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 103 int i, n; 104 105 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) { 106 const char *pfx = pfxs[i]; 107 108 if (str_has_prefix(sec_name, pfx)) { 109 snprintf(buf, buf_sz, "%s", sec_name + 1); 110 sanitize_identifier(buf); 111 return true; 112 } 113 } 114 115 return false; 116 } 117 118 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) 119 { 120 vprintf(fmt, args); 121 } 122 123 static int codegen_datasec_def(struct bpf_object *obj, 124 struct btf *btf, 125 struct btf_dump *d, 126 const struct btf_type *sec, 127 const char *obj_name) 128 { 129 const char *sec_name = btf__name_by_offset(btf, sec->name_off); 130 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec); 131 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); 132 char var_ident[256], sec_ident[256]; 133 bool strip_mods = false; 134 135 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) 136 return 0; 137 138 if (strcmp(sec_name, ".kconfig") != 0) 139 strip_mods = true; 140 141 printf(" struct %s__%s {\n", obj_name, sec_ident); 142 for (i = 0; i < vlen; i++, sec_var++) { 143 const struct btf_type *var = btf__type_by_id(btf, sec_var->type); 144 const char *var_name = btf__name_by_offset(btf, var->name_off); 145 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 146 .field_name = var_ident, 147 .indent_level = 2, 148 .strip_mods = strip_mods, 149 ); 150 int need_off = sec_var->offset, align_off, align; 151 __u32 var_type_id = var->type; 152 153 /* static variables are not exposed through BPF skeleton */ 154 if (btf_var(var)->linkage == BTF_VAR_STATIC) 155 continue; 156 157 if (off > need_off) { 158 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", 159 sec_name, i, need_off, off); 160 return -EINVAL; 161 } 162 163 align = btf__align_of(btf, var->type); 164 if (align <= 0) { 165 p_err("Failed to determine alignment of variable '%s': %d", 166 var_name, align); 167 return -EINVAL; 168 } 169 /* Assume 32-bit architectures when generating data section 170 * struct memory layout. Given bpftool can't know which target 171 * host architecture it's emitting skeleton for, we need to be 172 * conservative and assume 32-bit one to ensure enough padding 173 * bytes are generated for pointer and long types. This will 174 * still work correctly for 64-bit architectures, because in 175 * the worst case we'll generate unnecessary padding field, 176 * which on 64-bit architectures is not strictly necessary and 177 * would be handled by natural 8-byte alignment. But it still 178 * will be a correct memory layout, based on recorded offsets 179 * in BTF. 180 */ 181 if (align > 4) 182 align = 4; 183 184 align_off = (off + align - 1) / align * align; 185 if (align_off != need_off) { 186 printf("\t\tchar __pad%d[%d];\n", 187 pad_cnt, need_off - off); 188 pad_cnt++; 189 } 190 191 /* sanitize variable name, e.g., for static vars inside 192 * a function, it's name is '<function name>.<variable name>', 193 * which we'll turn into a '<function name>_<variable name>' 194 */ 195 var_ident[0] = '\0'; 196 strncat(var_ident, var_name, sizeof(var_ident) - 1); 197 sanitize_identifier(var_ident); 198 199 printf("\t\t"); 200 err = btf_dump__emit_type_decl(d, var_type_id, &opts); 201 if (err) 202 return err; 203 printf(";\n"); 204 205 off = sec_var->offset + sec_var->size; 206 } 207 printf(" } *%s;\n", sec_ident); 208 return 0; 209 } 210 211 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) 212 { 213 struct btf *btf = bpf_object__btf(obj); 214 int n = btf__type_cnt(btf); 215 struct btf_dump *d; 216 struct bpf_map *map; 217 const struct btf_type *sec; 218 char sec_ident[256], map_ident[256]; 219 int i, err = 0; 220 221 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); 222 err = libbpf_get_error(d); 223 if (err) 224 return err; 225 226 bpf_object__for_each_map(map, obj) { 227 /* only generate definitions for memory-mapped internal maps */ 228 if (!bpf_map__is_internal(map)) 229 continue; 230 if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 231 continue; 232 233 if (!get_map_ident(map, map_ident, sizeof(map_ident))) 234 continue; 235 236 sec = NULL; 237 for (i = 1; i < n; i++) { 238 const struct btf_type *t = btf__type_by_id(btf, i); 239 const char *name; 240 241 if (!btf_is_datasec(t)) 242 continue; 243 244 name = btf__str_by_offset(btf, t->name_off); 245 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident))) 246 continue; 247 248 if (strcmp(sec_ident, map_ident) == 0) { 249 sec = t; 250 break; 251 } 252 } 253 254 /* In some cases (e.g., sections like .rodata.cst16 containing 255 * compiler allocated string constants only) there will be 256 * special internal maps with no corresponding DATASEC BTF 257 * type. In such case, generate empty structs for each such 258 * map. It will still be memory-mapped and its contents 259 * accessible from user-space through BPF skeleton. 260 */ 261 if (!sec) { 262 printf(" struct %s__%s {\n", obj_name, map_ident); 263 printf(" } *%s;\n", map_ident); 264 } else { 265 err = codegen_datasec_def(obj, btf, d, sec, obj_name); 266 if (err) 267 goto out; 268 } 269 } 270 271 272 out: 273 btf_dump__free(d); 274 return err; 275 } 276 277 static void codegen(const char *template, ...) 278 { 279 const char *src, *end; 280 int skip_tabs = 0, n; 281 char *s, *dst; 282 va_list args; 283 char c; 284 285 n = strlen(template); 286 s = malloc(n + 1); 287 if (!s) 288 exit(-1); 289 src = template; 290 dst = s; 291 292 /* find out "baseline" indentation to skip */ 293 while ((c = *src++)) { 294 if (c == '\t') { 295 skip_tabs++; 296 } else if (c == '\n') { 297 break; 298 } else { 299 p_err("unrecognized character at pos %td in template '%s': '%c'", 300 src - template - 1, template, c); 301 free(s); 302 exit(-1); 303 } 304 } 305 306 while (*src) { 307 /* skip baseline indentation tabs */ 308 for (n = skip_tabs; n > 0; n--, src++) { 309 if (*src != '\t') { 310 p_err("not enough tabs at pos %td in template '%s'", 311 src - template - 1, template); 312 free(s); 313 exit(-1); 314 } 315 } 316 /* trim trailing whitespace */ 317 end = strchrnul(src, '\n'); 318 for (n = end - src; n > 0 && isspace(src[n - 1]); n--) 319 ; 320 memcpy(dst, src, n); 321 dst += n; 322 if (*end) 323 *dst++ = '\n'; 324 src = *end ? end + 1 : end; 325 } 326 *dst++ = '\0'; 327 328 /* print out using adjusted template */ 329 va_start(args, template); 330 n = vprintf(s, args); 331 va_end(args); 332 333 free(s); 334 } 335 336 static void print_hex(const char *data, int data_sz) 337 { 338 int i, len; 339 340 for (i = 0, len = 0; i < data_sz; i++) { 341 int w = data[i] ? 4 : 2; 342 343 len += w; 344 if (len > 78) { 345 printf("\\\n"); 346 len = w; 347 } 348 if (!data[i]) 349 printf("\\0"); 350 else 351 printf("\\x%02x", (unsigned char)data[i]); 352 } 353 } 354 355 static size_t bpf_map_mmap_sz(const struct bpf_map *map) 356 { 357 long page_sz = sysconf(_SC_PAGE_SIZE); 358 size_t map_sz; 359 360 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map); 361 map_sz = roundup(map_sz, page_sz); 362 return map_sz; 363 } 364 365 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name) 366 { 367 struct bpf_program *prog; 368 369 bpf_object__for_each_program(prog, obj) { 370 const char *tp_name; 371 372 codegen("\ 373 \n\ 374 \n\ 375 static inline int \n\ 376 %1$s__%2$s__attach(struct %1$s *skel) \n\ 377 { \n\ 378 int prog_fd = skel->progs.%2$s.prog_fd; \n\ 379 ", obj_name, bpf_program__name(prog)); 380 381 switch (bpf_program__type(prog)) { 382 case BPF_PROG_TYPE_RAW_TRACEPOINT: 383 tp_name = strchr(bpf_program__section_name(prog), '/') + 1; 384 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name); 385 break; 386 case BPF_PROG_TYPE_TRACING: 387 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER) 388 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n"); 389 else 390 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n"); 391 break; 392 default: 393 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n"); 394 break; 395 } 396 codegen("\ 397 \n\ 398 \n\ 399 if (fd > 0) \n\ 400 skel->links.%1$s_fd = fd; \n\ 401 return fd; \n\ 402 } \n\ 403 ", bpf_program__name(prog)); 404 } 405 406 codegen("\ 407 \n\ 408 \n\ 409 static inline int \n\ 410 %1$s__attach(struct %1$s *skel) \n\ 411 { \n\ 412 int ret = 0; \n\ 413 \n\ 414 ", obj_name); 415 416 bpf_object__for_each_program(prog, obj) { 417 codegen("\ 418 \n\ 419 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\ 420 ", obj_name, bpf_program__name(prog)); 421 } 422 423 codegen("\ 424 \n\ 425 return ret < 0 ? ret : 0; \n\ 426 } \n\ 427 \n\ 428 static inline void \n\ 429 %1$s__detach(struct %1$s *skel) \n\ 430 { \n\ 431 ", obj_name); 432 433 bpf_object__for_each_program(prog, obj) { 434 codegen("\ 435 \n\ 436 skel_closenz(skel->links.%1$s_fd); \n\ 437 ", bpf_program__name(prog)); 438 } 439 440 codegen("\ 441 \n\ 442 } \n\ 443 "); 444 } 445 446 static void codegen_destroy(struct bpf_object *obj, const char *obj_name) 447 { 448 struct bpf_program *prog; 449 struct bpf_map *map; 450 char ident[256]; 451 452 codegen("\ 453 \n\ 454 static void \n\ 455 %1$s__destroy(struct %1$s *skel) \n\ 456 { \n\ 457 if (!skel) \n\ 458 return; \n\ 459 %1$s__detach(skel); \n\ 460 ", 461 obj_name); 462 463 bpf_object__for_each_program(prog, obj) { 464 codegen("\ 465 \n\ 466 skel_closenz(skel->progs.%1$s.prog_fd); \n\ 467 ", bpf_program__name(prog)); 468 } 469 470 bpf_object__for_each_map(map, obj) { 471 if (!get_map_ident(map, ident, sizeof(ident))) 472 continue; 473 if (bpf_map__is_internal(map) && 474 (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 475 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n", 476 ident, bpf_map_mmap_sz(map)); 477 codegen("\ 478 \n\ 479 skel_closenz(skel->maps.%1$s.map_fd); \n\ 480 ", ident); 481 } 482 codegen("\ 483 \n\ 484 skel_free(skel); \n\ 485 } \n\ 486 ", 487 obj_name); 488 } 489 490 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard) 491 { 492 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); 493 struct bpf_map *map; 494 char ident[256]; 495 int err = 0; 496 497 err = bpf_object__gen_loader(obj, &opts); 498 if (err) 499 return err; 500 501 err = bpf_object__load(obj); 502 if (err) { 503 p_err("failed to load object file"); 504 goto out; 505 } 506 /* If there was no error during load then gen_loader_opts 507 * are populated with the loader program. 508 */ 509 510 /* finish generating 'struct skel' */ 511 codegen("\ 512 \n\ 513 }; \n\ 514 ", obj_name); 515 516 517 codegen_attach_detach(obj, obj_name); 518 519 codegen_destroy(obj, obj_name); 520 521 codegen("\ 522 \n\ 523 static inline struct %1$s * \n\ 524 %1$s__open(void) \n\ 525 { \n\ 526 struct %1$s *skel; \n\ 527 \n\ 528 skel = skel_alloc(sizeof(*skel)); \n\ 529 if (!skel) \n\ 530 goto cleanup; \n\ 531 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\ 532 ", 533 obj_name, opts.data_sz); 534 bpf_object__for_each_map(map, obj) { 535 const void *mmap_data = NULL; 536 size_t mmap_size = 0; 537 538 if (!get_map_ident(map, ident, sizeof(ident))) 539 continue; 540 541 if (!bpf_map__is_internal(map) || 542 !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 543 continue; 544 545 codegen("\ 546 \n\ 547 skel->%1$s = skel_prep_map_data((void *)\"\\ \n\ 548 ", ident); 549 mmap_data = bpf_map__initial_value(map, &mmap_size); 550 print_hex(mmap_data, mmap_size); 551 codegen("\ 552 \n\ 553 \", %1$zd, %2$zd); \n\ 554 if (!skel->%3$s) \n\ 555 goto cleanup; \n\ 556 skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\ 557 ", bpf_map_mmap_sz(map), mmap_size, ident); 558 } 559 codegen("\ 560 \n\ 561 return skel; \n\ 562 cleanup: \n\ 563 %1$s__destroy(skel); \n\ 564 return NULL; \n\ 565 } \n\ 566 \n\ 567 static inline int \n\ 568 %1$s__load(struct %1$s *skel) \n\ 569 { \n\ 570 struct bpf_load_and_run_opts opts = {}; \n\ 571 int err; \n\ 572 \n\ 573 opts.ctx = (struct bpf_loader_ctx *)skel; \n\ 574 opts.data_sz = %2$d; \n\ 575 opts.data = (void *)\"\\ \n\ 576 ", 577 obj_name, opts.data_sz); 578 print_hex(opts.data, opts.data_sz); 579 codegen("\ 580 \n\ 581 \"; \n\ 582 "); 583 584 codegen("\ 585 \n\ 586 opts.insns_sz = %d; \n\ 587 opts.insns = (void *)\"\\ \n\ 588 ", 589 opts.insns_sz); 590 print_hex(opts.insns, opts.insns_sz); 591 codegen("\ 592 \n\ 593 \"; \n\ 594 err = bpf_load_and_run(&opts); \n\ 595 if (err < 0) \n\ 596 return err; \n\ 597 ", obj_name); 598 bpf_object__for_each_map(map, obj) { 599 const char *mmap_flags; 600 601 if (!get_map_ident(map, ident, sizeof(ident))) 602 continue; 603 604 if (!bpf_map__is_internal(map) || 605 !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 606 continue; 607 608 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG) 609 mmap_flags = "PROT_READ"; 610 else 611 mmap_flags = "PROT_READ | PROT_WRITE"; 612 613 codegen("\ 614 \n\ 615 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\ 616 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\ 617 if (!skel->%1$s) \n\ 618 return -ENOMEM; \n\ 619 ", 620 ident, bpf_map_mmap_sz(map), mmap_flags); 621 } 622 codegen("\ 623 \n\ 624 return 0; \n\ 625 } \n\ 626 \n\ 627 static inline struct %1$s * \n\ 628 %1$s__open_and_load(void) \n\ 629 { \n\ 630 struct %1$s *skel; \n\ 631 \n\ 632 skel = %1$s__open(); \n\ 633 if (!skel) \n\ 634 return NULL; \n\ 635 if (%1$s__load(skel)) { \n\ 636 %1$s__destroy(skel); \n\ 637 return NULL; \n\ 638 } \n\ 639 return skel; \n\ 640 } \n\ 641 ", obj_name); 642 643 codegen("\ 644 \n\ 645 \n\ 646 #endif /* %s */ \n\ 647 ", 648 header_guard); 649 err = 0; 650 out: 651 return err; 652 } 653 654 static int do_skeleton(int argc, char **argv) 655 { 656 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; 657 size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; 658 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 659 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 660 struct bpf_object *obj = NULL; 661 const char *file; 662 char ident[256]; 663 struct bpf_program *prog; 664 int fd, err = -1; 665 struct bpf_map *map; 666 struct btf *btf; 667 struct stat st; 668 669 if (!REQ_ARGS(1)) { 670 usage(); 671 return -1; 672 } 673 file = GET_ARG(); 674 675 while (argc) { 676 if (!REQ_ARGS(2)) 677 return -1; 678 679 if (is_prefix(*argv, "name")) { 680 NEXT_ARG(); 681 682 if (obj_name[0] != '\0') { 683 p_err("object name already specified"); 684 return -1; 685 } 686 687 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); 688 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; 689 } else { 690 p_err("unknown arg %s", *argv); 691 return -1; 692 } 693 694 NEXT_ARG(); 695 } 696 697 if (argc) { 698 p_err("extra unknown arguments"); 699 return -1; 700 } 701 702 if (stat(file, &st)) { 703 p_err("failed to stat() %s: %s", file, strerror(errno)); 704 return -1; 705 } 706 file_sz = st.st_size; 707 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); 708 fd = open(file, O_RDONLY); 709 if (fd < 0) { 710 p_err("failed to open() %s: %s", file, strerror(errno)); 711 return -1; 712 } 713 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); 714 if (obj_data == MAP_FAILED) { 715 obj_data = NULL; 716 p_err("failed to mmap() %s: %s", file, strerror(errno)); 717 goto out; 718 } 719 if (obj_name[0] == '\0') 720 get_obj_name(obj_name, file); 721 opts.object_name = obj_name; 722 if (verifier_logs) 723 /* log_level1 + log_level2 + stats, but not stable UAPI */ 724 opts.kernel_log_level = 1 + 2 + 4; 725 obj = bpf_object__open_mem(obj_data, file_sz, &opts); 726 err = libbpf_get_error(obj); 727 if (err) { 728 char err_buf[256]; 729 730 libbpf_strerror(err, err_buf, sizeof(err_buf)); 731 p_err("failed to open BPF object file: %s", err_buf); 732 obj = NULL; 733 goto out; 734 } 735 736 bpf_object__for_each_map(map, obj) { 737 if (!get_map_ident(map, ident, sizeof(ident))) { 738 p_err("ignoring unrecognized internal map '%s'...", 739 bpf_map__name(map)); 740 continue; 741 } 742 map_cnt++; 743 } 744 bpf_object__for_each_program(prog, obj) { 745 prog_cnt++; 746 } 747 748 get_header_guard(header_guard, obj_name); 749 if (use_loader) { 750 codegen("\ 751 \n\ 752 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 753 /* THIS FILE IS AUTOGENERATED! */ \n\ 754 #ifndef %2$s \n\ 755 #define %2$s \n\ 756 \n\ 757 #include <bpf/skel_internal.h> \n\ 758 \n\ 759 struct %1$s { \n\ 760 struct bpf_loader_ctx ctx; \n\ 761 ", 762 obj_name, header_guard 763 ); 764 } else { 765 codegen("\ 766 \n\ 767 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 768 \n\ 769 /* THIS FILE IS AUTOGENERATED! */ \n\ 770 #ifndef %2$s \n\ 771 #define %2$s \n\ 772 \n\ 773 #include <errno.h> \n\ 774 #include <stdlib.h> \n\ 775 #include <bpf/libbpf.h> \n\ 776 \n\ 777 struct %1$s { \n\ 778 struct bpf_object_skeleton *skeleton; \n\ 779 struct bpf_object *obj; \n\ 780 ", 781 obj_name, header_guard 782 ); 783 } 784 785 if (map_cnt) { 786 printf("\tstruct {\n"); 787 bpf_object__for_each_map(map, obj) { 788 if (!get_map_ident(map, ident, sizeof(ident))) 789 continue; 790 if (use_loader) 791 printf("\t\tstruct bpf_map_desc %s;\n", ident); 792 else 793 printf("\t\tstruct bpf_map *%s;\n", ident); 794 } 795 printf("\t} maps;\n"); 796 } 797 798 if (prog_cnt) { 799 printf("\tstruct {\n"); 800 bpf_object__for_each_program(prog, obj) { 801 if (use_loader) 802 printf("\t\tstruct bpf_prog_desc %s;\n", 803 bpf_program__name(prog)); 804 else 805 printf("\t\tstruct bpf_program *%s;\n", 806 bpf_program__name(prog)); 807 } 808 printf("\t} progs;\n"); 809 printf("\tstruct {\n"); 810 bpf_object__for_each_program(prog, obj) { 811 if (use_loader) 812 printf("\t\tint %s_fd;\n", 813 bpf_program__name(prog)); 814 else 815 printf("\t\tstruct bpf_link *%s;\n", 816 bpf_program__name(prog)); 817 } 818 printf("\t} links;\n"); 819 } 820 821 btf = bpf_object__btf(obj); 822 if (btf) { 823 err = codegen_datasecs(obj, obj_name); 824 if (err) 825 goto out; 826 } 827 if (use_loader) { 828 err = gen_trace(obj, obj_name, header_guard); 829 goto out; 830 } 831 832 codegen("\ 833 \n\ 834 }; \n\ 835 \n\ 836 static void \n\ 837 %1$s__destroy(struct %1$s *obj) \n\ 838 { \n\ 839 if (!obj) \n\ 840 return; \n\ 841 if (obj->skeleton) \n\ 842 bpf_object__destroy_skeleton(obj->skeleton);\n\ 843 free(obj); \n\ 844 } \n\ 845 \n\ 846 static inline int \n\ 847 %1$s__create_skeleton(struct %1$s *obj); \n\ 848 \n\ 849 static inline struct %1$s * \n\ 850 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\ 851 { \n\ 852 struct %1$s *obj; \n\ 853 int err; \n\ 854 \n\ 855 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ 856 if (!obj) { \n\ 857 errno = ENOMEM; \n\ 858 return NULL; \n\ 859 } \n\ 860 \n\ 861 err = %1$s__create_skeleton(obj); \n\ 862 if (err) \n\ 863 goto err_out; \n\ 864 \n\ 865 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\ 866 if (err) \n\ 867 goto err_out; \n\ 868 \n\ 869 return obj; \n\ 870 err_out: \n\ 871 %1$s__destroy(obj); \n\ 872 errno = -err; \n\ 873 return NULL; \n\ 874 } \n\ 875 \n\ 876 static inline struct %1$s * \n\ 877 %1$s__open(void) \n\ 878 { \n\ 879 return %1$s__open_opts(NULL); \n\ 880 } \n\ 881 \n\ 882 static inline int \n\ 883 %1$s__load(struct %1$s *obj) \n\ 884 { \n\ 885 return bpf_object__load_skeleton(obj->skeleton); \n\ 886 } \n\ 887 \n\ 888 static inline struct %1$s * \n\ 889 %1$s__open_and_load(void) \n\ 890 { \n\ 891 struct %1$s *obj; \n\ 892 int err; \n\ 893 \n\ 894 obj = %1$s__open(); \n\ 895 if (!obj) \n\ 896 return NULL; \n\ 897 err = %1$s__load(obj); \n\ 898 if (err) { \n\ 899 %1$s__destroy(obj); \n\ 900 errno = -err; \n\ 901 return NULL; \n\ 902 } \n\ 903 return obj; \n\ 904 } \n\ 905 \n\ 906 static inline int \n\ 907 %1$s__attach(struct %1$s *obj) \n\ 908 { \n\ 909 return bpf_object__attach_skeleton(obj->skeleton); \n\ 910 } \n\ 911 \n\ 912 static inline void \n\ 913 %1$s__detach(struct %1$s *obj) \n\ 914 { \n\ 915 return bpf_object__detach_skeleton(obj->skeleton); \n\ 916 } \n\ 917 ", 918 obj_name 919 ); 920 921 codegen("\ 922 \n\ 923 \n\ 924 static inline const void *%1$s__elf_bytes(size_t *sz); \n\ 925 \n\ 926 static inline int \n\ 927 %1$s__create_skeleton(struct %1$s *obj) \n\ 928 { \n\ 929 struct bpf_object_skeleton *s; \n\ 930 \n\ 931 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ 932 if (!s) \n\ 933 goto err; \n\ 934 \n\ 935 s->sz = sizeof(*s); \n\ 936 s->name = \"%1$s\"; \n\ 937 s->obj = &obj->obj; \n\ 938 ", 939 obj_name 940 ); 941 if (map_cnt) { 942 codegen("\ 943 \n\ 944 \n\ 945 /* maps */ \n\ 946 s->map_cnt = %zu; \n\ 947 s->map_skel_sz = sizeof(*s->maps); \n\ 948 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ 949 if (!s->maps) \n\ 950 goto err; \n\ 951 ", 952 map_cnt 953 ); 954 i = 0; 955 bpf_object__for_each_map(map, obj) { 956 if (!get_map_ident(map, ident, sizeof(ident))) 957 continue; 958 959 codegen("\ 960 \n\ 961 \n\ 962 s->maps[%zu].name = \"%s\"; \n\ 963 s->maps[%zu].map = &obj->maps.%s; \n\ 964 ", 965 i, bpf_map__name(map), i, ident); 966 /* memory-mapped internal maps */ 967 if (bpf_map__is_internal(map) && 968 (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) { 969 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", 970 i, ident); 971 } 972 i++; 973 } 974 } 975 if (prog_cnt) { 976 codegen("\ 977 \n\ 978 \n\ 979 /* programs */ \n\ 980 s->prog_cnt = %zu; \n\ 981 s->prog_skel_sz = sizeof(*s->progs); \n\ 982 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ 983 if (!s->progs) \n\ 984 goto err; \n\ 985 ", 986 prog_cnt 987 ); 988 i = 0; 989 bpf_object__for_each_program(prog, obj) { 990 codegen("\ 991 \n\ 992 \n\ 993 s->progs[%1$zu].name = \"%2$s\"; \n\ 994 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\ 995 s->progs[%1$zu].link = &obj->links.%2$s;\n\ 996 ", 997 i, bpf_program__name(prog)); 998 i++; 999 } 1000 } 1001 codegen("\ 1002 \n\ 1003 \n\ 1004 s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\ 1005 \n\ 1006 obj->skeleton = s; \n\ 1007 return 0; \n\ 1008 err: \n\ 1009 bpf_object__destroy_skeleton(s); \n\ 1010 return -ENOMEM; \n\ 1011 } \n\ 1012 \n\ 1013 static inline const void *%2$s__elf_bytes(size_t *sz) \n\ 1014 { \n\ 1015 *sz = %1$d; \n\ 1016 return (const void *)\"\\ \n\ 1017 " 1018 , file_sz, obj_name); 1019 1020 /* embed contents of BPF object file */ 1021 print_hex(obj_data, file_sz); 1022 1023 codegen("\ 1024 \n\ 1025 \"; \n\ 1026 } \n\ 1027 \n\ 1028 #endif /* %s */ \n\ 1029 ", 1030 header_guard); 1031 err = 0; 1032 out: 1033 bpf_object__close(obj); 1034 if (obj_data) 1035 munmap(obj_data, mmap_sz); 1036 close(fd); 1037 return err; 1038 } 1039 1040 static int do_object(int argc, char **argv) 1041 { 1042 struct bpf_linker *linker; 1043 const char *output_file, *file; 1044 int err = 0; 1045 1046 if (!REQ_ARGS(2)) { 1047 usage(); 1048 return -1; 1049 } 1050 1051 output_file = GET_ARG(); 1052 1053 linker = bpf_linker__new(output_file, NULL); 1054 if (!linker) { 1055 p_err("failed to create BPF linker instance"); 1056 return -1; 1057 } 1058 1059 while (argc) { 1060 file = GET_ARG(); 1061 1062 err = bpf_linker__add_file(linker, file, NULL); 1063 if (err) { 1064 p_err("failed to link '%s': %s (%d)", file, strerror(err), err); 1065 goto out; 1066 } 1067 } 1068 1069 err = bpf_linker__finalize(linker); 1070 if (err) { 1071 p_err("failed to finalize ELF file: %s (%d)", strerror(err), err); 1072 goto out; 1073 } 1074 1075 err = 0; 1076 out: 1077 bpf_linker__free(linker); 1078 return err; 1079 } 1080 1081 static int do_help(int argc, char **argv) 1082 { 1083 if (json_output) { 1084 jsonw_null(json_wtr); 1085 return 0; 1086 } 1087 1088 fprintf(stderr, 1089 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n" 1090 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n" 1091 " %1$s %2$s help\n" 1092 "\n" 1093 " " HELP_SPEC_OPTIONS " |\n" 1094 " {-L|--use-loader} }\n" 1095 "", 1096 bin_name, "gen"); 1097 1098 return 0; 1099 } 1100 1101 static const struct cmd cmds[] = { 1102 { "object", do_object }, 1103 { "skeleton", do_skeleton }, 1104 { "help", do_help }, 1105 { 0 } 1106 }; 1107 1108 int do_gen(int argc, char **argv) 1109 { 1110 return cmd_select(cmds, argc, argv, do_help); 1111 } 1112