1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2019 Facebook */ 3 4 #ifndef _GNU_SOURCE 5 #define _GNU_SOURCE 6 #endif 7 #include <ctype.h> 8 #include <errno.h> 9 #include <fcntl.h> 10 #include <linux/err.h> 11 #include <stdbool.h> 12 #include <stdio.h> 13 #include <string.h> 14 #include <unistd.h> 15 #include <bpf/bpf.h> 16 #include <bpf/libbpf.h> 17 #include <bpf/libbpf_internal.h> 18 #include <sys/types.h> 19 #include <sys/stat.h> 20 #include <sys/mman.h> 21 #include <bpf/btf.h> 22 23 #include "json_writer.h" 24 #include "main.h" 25 26 #define MAX_OBJ_NAME_LEN 64 27 28 static void sanitize_identifier(char *name) 29 { 30 int i; 31 32 for (i = 0; name[i]; i++) 33 if (!isalnum(name[i]) && name[i] != '_') 34 name[i] = '_'; 35 } 36 37 static bool str_has_prefix(const char *str, const char *prefix) 38 { 39 return strncmp(str, prefix, strlen(prefix)) == 0; 40 } 41 42 static bool str_has_suffix(const char *str, const char *suffix) 43 { 44 size_t i, n1 = strlen(str), n2 = strlen(suffix); 45 46 if (n1 < n2) 47 return false; 48 49 for (i = 0; i < n2; i++) { 50 if (str[n1 - i - 1] != suffix[n2 - i - 1]) 51 return false; 52 } 53 54 return true; 55 } 56 57 static void get_obj_name(char *name, const char *file) 58 { 59 /* Using basename() GNU version which doesn't modify arg. */ 60 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1); 61 name[MAX_OBJ_NAME_LEN - 1] = '\0'; 62 if (str_has_suffix(name, ".o")) 63 name[strlen(name) - 2] = '\0'; 64 sanitize_identifier(name); 65 } 66 67 static void get_header_guard(char *guard, const char *obj_name, const char *suffix) 68 { 69 int i; 70 71 sprintf(guard, "__%s_%s__", obj_name, suffix); 72 for (i = 0; guard[i]; i++) 73 guard[i] = toupper(guard[i]); 74 } 75 76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz) 77 { 78 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 79 const char *name = bpf_map__name(map); 80 int i, n; 81 82 if (!bpf_map__is_internal(map)) { 83 snprintf(buf, buf_sz, "%s", name); 84 return true; 85 } 86 87 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) { 88 const char *sfx = sfxs[i], *p; 89 90 p = strstr(name, sfx); 91 if (p) { 92 snprintf(buf, buf_sz, "%s", p + 1); 93 sanitize_identifier(buf); 94 return true; 95 } 96 } 97 98 return false; 99 } 100 101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz) 102 { 103 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 104 int i, n; 105 106 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) { 107 const char *pfx = pfxs[i]; 108 109 if (str_has_prefix(sec_name, pfx)) { 110 snprintf(buf, buf_sz, "%s", sec_name + 1); 111 sanitize_identifier(buf); 112 return true; 113 } 114 } 115 116 return false; 117 } 118 119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) 120 { 121 vprintf(fmt, args); 122 } 123 124 static int codegen_datasec_def(struct bpf_object *obj, 125 struct btf *btf, 126 struct btf_dump *d, 127 const struct btf_type *sec, 128 const char *obj_name) 129 { 130 const char *sec_name = btf__name_by_offset(btf, sec->name_off); 131 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec); 132 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); 133 char var_ident[256], sec_ident[256]; 134 bool strip_mods = false; 135 136 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) 137 return 0; 138 139 if (strcmp(sec_name, ".kconfig") != 0) 140 strip_mods = true; 141 142 printf(" struct %s__%s {\n", obj_name, sec_ident); 143 for (i = 0; i < vlen; i++, sec_var++) { 144 const struct btf_type *var = btf__type_by_id(btf, sec_var->type); 145 const char *var_name = btf__name_by_offset(btf, var->name_off); 146 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 147 .field_name = var_ident, 148 .indent_level = 2, 149 .strip_mods = strip_mods, 150 ); 151 int need_off = sec_var->offset, align_off, align; 152 __u32 var_type_id = var->type; 153 154 /* static variables are not exposed through BPF skeleton */ 155 if (btf_var(var)->linkage == BTF_VAR_STATIC) 156 continue; 157 158 if (off > need_off) { 159 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", 160 sec_name, i, need_off, off); 161 return -EINVAL; 162 } 163 164 align = btf__align_of(btf, var->type); 165 if (align <= 0) { 166 p_err("Failed to determine alignment of variable '%s': %d", 167 var_name, align); 168 return -EINVAL; 169 } 170 /* Assume 32-bit architectures when generating data section 171 * struct memory layout. Given bpftool can't know which target 172 * host architecture it's emitting skeleton for, we need to be 173 * conservative and assume 32-bit one to ensure enough padding 174 * bytes are generated for pointer and long types. This will 175 * still work correctly for 64-bit architectures, because in 176 * the worst case we'll generate unnecessary padding field, 177 * which on 64-bit architectures is not strictly necessary and 178 * would be handled by natural 8-byte alignment. But it still 179 * will be a correct memory layout, based on recorded offsets 180 * in BTF. 181 */ 182 if (align > 4) 183 align = 4; 184 185 align_off = (off + align - 1) / align * align; 186 if (align_off != need_off) { 187 printf("\t\tchar __pad%d[%d];\n", 188 pad_cnt, need_off - off); 189 pad_cnt++; 190 } 191 192 /* sanitize variable name, e.g., for static vars inside 193 * a function, it's name is '<function name>.<variable name>', 194 * which we'll turn into a '<function name>_<variable name>' 195 */ 196 var_ident[0] = '\0'; 197 strncat(var_ident, var_name, sizeof(var_ident) - 1); 198 sanitize_identifier(var_ident); 199 200 printf("\t\t"); 201 err = btf_dump__emit_type_decl(d, var_type_id, &opts); 202 if (err) 203 return err; 204 printf(";\n"); 205 206 off = sec_var->offset + sec_var->size; 207 } 208 printf(" } *%s;\n", sec_ident); 209 return 0; 210 } 211 212 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident) 213 { 214 int n = btf__type_cnt(btf), i; 215 char sec_ident[256]; 216 217 for (i = 1; i < n; i++) { 218 const struct btf_type *t = btf__type_by_id(btf, i); 219 const char *name; 220 221 if (!btf_is_datasec(t)) 222 continue; 223 224 name = btf__str_by_offset(btf, t->name_off); 225 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident))) 226 continue; 227 228 if (strcmp(sec_ident, map_ident) == 0) 229 return t; 230 } 231 return NULL; 232 } 233 234 static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz) 235 { 236 if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 237 return false; 238 239 if (!get_map_ident(map, buf, sz)) 240 return false; 241 242 return true; 243 } 244 245 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) 246 { 247 struct btf *btf = bpf_object__btf(obj); 248 struct btf_dump *d; 249 struct bpf_map *map; 250 const struct btf_type *sec; 251 char map_ident[256]; 252 int err = 0; 253 254 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); 255 if (!d) 256 return -errno; 257 258 bpf_object__for_each_map(map, obj) { 259 /* only generate definitions for memory-mapped internal maps */ 260 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 261 continue; 262 263 sec = find_type_for_map(btf, map_ident); 264 265 /* In some cases (e.g., sections like .rodata.cst16 containing 266 * compiler allocated string constants only) there will be 267 * special internal maps with no corresponding DATASEC BTF 268 * type. In such case, generate empty structs for each such 269 * map. It will still be memory-mapped and its contents 270 * accessible from user-space through BPF skeleton. 271 */ 272 if (!sec) { 273 printf(" struct %s__%s {\n", obj_name, map_ident); 274 printf(" } *%s;\n", map_ident); 275 } else { 276 err = codegen_datasec_def(obj, btf, d, sec, obj_name); 277 if (err) 278 goto out; 279 } 280 } 281 282 283 out: 284 btf_dump__free(d); 285 return err; 286 } 287 288 static bool btf_is_ptr_to_func_proto(const struct btf *btf, 289 const struct btf_type *v) 290 { 291 return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type)); 292 } 293 294 static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name) 295 { 296 struct btf *btf = bpf_object__btf(obj); 297 struct btf_dump *d; 298 struct bpf_map *map; 299 const struct btf_type *sec, *var; 300 const struct btf_var_secinfo *sec_var; 301 int i, err = 0, vlen; 302 char map_ident[256], sec_ident[256]; 303 bool strip_mods = false, needs_typeof = false; 304 const char *sec_name, *var_name; 305 __u32 var_type_id; 306 307 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); 308 if (!d) 309 return -errno; 310 311 bpf_object__for_each_map(map, obj) { 312 /* only generate definitions for memory-mapped internal maps */ 313 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 314 continue; 315 316 sec = find_type_for_map(btf, map_ident); 317 if (!sec) 318 continue; 319 320 sec_name = btf__name_by_offset(btf, sec->name_off); 321 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) 322 continue; 323 324 strip_mods = strcmp(sec_name, ".kconfig") != 0; 325 printf(" struct %s__%s {\n", obj_name, sec_ident); 326 327 sec_var = btf_var_secinfos(sec); 328 vlen = btf_vlen(sec); 329 for (i = 0; i < vlen; i++, sec_var++) { 330 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 331 .indent_level = 2, 332 .strip_mods = strip_mods, 333 /* we'll print the name separately */ 334 .field_name = "", 335 ); 336 337 var = btf__type_by_id(btf, sec_var->type); 338 var_name = btf__name_by_offset(btf, var->name_off); 339 var_type_id = var->type; 340 341 /* static variables are not exposed through BPF skeleton */ 342 if (btf_var(var)->linkage == BTF_VAR_STATIC) 343 continue; 344 345 /* The datasec member has KIND_VAR but we want the 346 * underlying type of the variable (e.g. KIND_INT). 347 */ 348 var = skip_mods_and_typedefs(btf, var->type, NULL); 349 350 printf("\t\t"); 351 /* Func and array members require special handling. 352 * Instead of producing `typename *var`, they produce 353 * `typeof(typename) *var`. This allows us to keep a 354 * similar syntax where the identifier is just prefixed 355 * by *, allowing us to ignore C declaration minutiae. 356 */ 357 needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var); 358 if (needs_typeof) 359 printf("typeof("); 360 361 err = btf_dump__emit_type_decl(d, var_type_id, &opts); 362 if (err) 363 goto out; 364 365 if (needs_typeof) 366 printf(")"); 367 368 printf(" *%s;\n", var_name); 369 } 370 printf(" } %s;\n", sec_ident); 371 } 372 373 out: 374 btf_dump__free(d); 375 return err; 376 } 377 378 static void codegen(const char *template, ...) 379 { 380 const char *src, *end; 381 int skip_tabs = 0, n; 382 char *s, *dst; 383 va_list args; 384 char c; 385 386 n = strlen(template); 387 s = malloc(n + 1); 388 if (!s) 389 exit(-1); 390 src = template; 391 dst = s; 392 393 /* find out "baseline" indentation to skip */ 394 while ((c = *src++)) { 395 if (c == '\t') { 396 skip_tabs++; 397 } else if (c == '\n') { 398 break; 399 } else { 400 p_err("unrecognized character at pos %td in template '%s': '%c'", 401 src - template - 1, template, c); 402 free(s); 403 exit(-1); 404 } 405 } 406 407 while (*src) { 408 /* skip baseline indentation tabs */ 409 for (n = skip_tabs; n > 0; n--, src++) { 410 if (*src != '\t') { 411 p_err("not enough tabs at pos %td in template '%s'", 412 src - template - 1, template); 413 free(s); 414 exit(-1); 415 } 416 } 417 /* trim trailing whitespace */ 418 end = strchrnul(src, '\n'); 419 for (n = end - src; n > 0 && isspace(src[n - 1]); n--) 420 ; 421 memcpy(dst, src, n); 422 dst += n; 423 if (*end) 424 *dst++ = '\n'; 425 src = *end ? end + 1 : end; 426 } 427 *dst++ = '\0'; 428 429 /* print out using adjusted template */ 430 va_start(args, template); 431 n = vprintf(s, args); 432 va_end(args); 433 434 free(s); 435 } 436 437 static void print_hex(const char *data, int data_sz) 438 { 439 int i, len; 440 441 for (i = 0, len = 0; i < data_sz; i++) { 442 int w = data[i] ? 4 : 2; 443 444 len += w; 445 if (len > 78) { 446 printf("\\\n"); 447 len = w; 448 } 449 if (!data[i]) 450 printf("\\0"); 451 else 452 printf("\\x%02x", (unsigned char)data[i]); 453 } 454 } 455 456 static size_t bpf_map_mmap_sz(const struct bpf_map *map) 457 { 458 long page_sz = sysconf(_SC_PAGE_SIZE); 459 size_t map_sz; 460 461 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map); 462 map_sz = roundup(map_sz, page_sz); 463 return map_sz; 464 } 465 466 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */ 467 static void codegen_asserts(struct bpf_object *obj, const char *obj_name) 468 { 469 struct btf *btf = bpf_object__btf(obj); 470 struct bpf_map *map; 471 struct btf_var_secinfo *sec_var; 472 int i, vlen; 473 const struct btf_type *sec; 474 char map_ident[256], var_ident[256]; 475 476 if (!btf) 477 return; 478 479 codegen("\ 480 \n\ 481 __attribute__((unused)) static void \n\ 482 %1$s__assert(struct %1$s *s __attribute__((unused))) \n\ 483 { \n\ 484 #ifdef __cplusplus \n\ 485 #define _Static_assert static_assert \n\ 486 #endif \n\ 487 ", obj_name); 488 489 bpf_object__for_each_map(map, obj) { 490 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 491 continue; 492 493 sec = find_type_for_map(btf, map_ident); 494 if (!sec) { 495 /* best effort, couldn't find the type for this map */ 496 continue; 497 } 498 499 sec_var = btf_var_secinfos(sec); 500 vlen = btf_vlen(sec); 501 502 for (i = 0; i < vlen; i++, sec_var++) { 503 const struct btf_type *var = btf__type_by_id(btf, sec_var->type); 504 const char *var_name = btf__name_by_offset(btf, var->name_off); 505 long var_size; 506 507 /* static variables are not exposed through BPF skeleton */ 508 if (btf_var(var)->linkage == BTF_VAR_STATIC) 509 continue; 510 511 var_size = btf__resolve_size(btf, var->type); 512 if (var_size < 0) 513 continue; 514 515 var_ident[0] = '\0'; 516 strncat(var_ident, var_name, sizeof(var_ident) - 1); 517 sanitize_identifier(var_ident); 518 519 printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n", 520 map_ident, var_ident, var_size, var_ident); 521 } 522 } 523 codegen("\ 524 \n\ 525 #ifdef __cplusplus \n\ 526 #undef _Static_assert \n\ 527 #endif \n\ 528 } \n\ 529 "); 530 } 531 532 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name) 533 { 534 struct bpf_program *prog; 535 536 bpf_object__for_each_program(prog, obj) { 537 const char *tp_name; 538 539 codegen("\ 540 \n\ 541 \n\ 542 static inline int \n\ 543 %1$s__%2$s__attach(struct %1$s *skel) \n\ 544 { \n\ 545 int prog_fd = skel->progs.%2$s.prog_fd; \n\ 546 ", obj_name, bpf_program__name(prog)); 547 548 switch (bpf_program__type(prog)) { 549 case BPF_PROG_TYPE_RAW_TRACEPOINT: 550 tp_name = strchr(bpf_program__section_name(prog), '/') + 1; 551 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name); 552 break; 553 case BPF_PROG_TYPE_TRACING: 554 case BPF_PROG_TYPE_LSM: 555 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER) 556 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n"); 557 else 558 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n"); 559 break; 560 default: 561 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n"); 562 break; 563 } 564 codegen("\ 565 \n\ 566 \n\ 567 if (fd > 0) \n\ 568 skel->links.%1$s_fd = fd; \n\ 569 return fd; \n\ 570 } \n\ 571 ", bpf_program__name(prog)); 572 } 573 574 codegen("\ 575 \n\ 576 \n\ 577 static inline int \n\ 578 %1$s__attach(struct %1$s *skel) \n\ 579 { \n\ 580 int ret = 0; \n\ 581 \n\ 582 ", obj_name); 583 584 bpf_object__for_each_program(prog, obj) { 585 codegen("\ 586 \n\ 587 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\ 588 ", obj_name, bpf_program__name(prog)); 589 } 590 591 codegen("\ 592 \n\ 593 return ret < 0 ? ret : 0; \n\ 594 } \n\ 595 \n\ 596 static inline void \n\ 597 %1$s__detach(struct %1$s *skel) \n\ 598 { \n\ 599 ", obj_name); 600 601 bpf_object__for_each_program(prog, obj) { 602 codegen("\ 603 \n\ 604 skel_closenz(skel->links.%1$s_fd); \n\ 605 ", bpf_program__name(prog)); 606 } 607 608 codegen("\ 609 \n\ 610 } \n\ 611 "); 612 } 613 614 static void codegen_destroy(struct bpf_object *obj, const char *obj_name) 615 { 616 struct bpf_program *prog; 617 struct bpf_map *map; 618 char ident[256]; 619 620 codegen("\ 621 \n\ 622 static void \n\ 623 %1$s__destroy(struct %1$s *skel) \n\ 624 { \n\ 625 if (!skel) \n\ 626 return; \n\ 627 %1$s__detach(skel); \n\ 628 ", 629 obj_name); 630 631 bpf_object__for_each_program(prog, obj) { 632 codegen("\ 633 \n\ 634 skel_closenz(skel->progs.%1$s.prog_fd); \n\ 635 ", bpf_program__name(prog)); 636 } 637 638 bpf_object__for_each_map(map, obj) { 639 if (!get_map_ident(map, ident, sizeof(ident))) 640 continue; 641 if (bpf_map__is_internal(map) && 642 (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 643 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n", 644 ident, bpf_map_mmap_sz(map)); 645 codegen("\ 646 \n\ 647 skel_closenz(skel->maps.%1$s.map_fd); \n\ 648 ", ident); 649 } 650 codegen("\ 651 \n\ 652 skel_free(skel); \n\ 653 } \n\ 654 ", 655 obj_name); 656 } 657 658 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard) 659 { 660 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); 661 struct bpf_map *map; 662 char ident[256]; 663 int err = 0; 664 665 err = bpf_object__gen_loader(obj, &opts); 666 if (err) 667 return err; 668 669 err = bpf_object__load(obj); 670 if (err) { 671 p_err("failed to load object file"); 672 goto out; 673 } 674 /* If there was no error during load then gen_loader_opts 675 * are populated with the loader program. 676 */ 677 678 /* finish generating 'struct skel' */ 679 codegen("\ 680 \n\ 681 }; \n\ 682 ", obj_name); 683 684 685 codegen_attach_detach(obj, obj_name); 686 687 codegen_destroy(obj, obj_name); 688 689 codegen("\ 690 \n\ 691 static inline struct %1$s * \n\ 692 %1$s__open(void) \n\ 693 { \n\ 694 struct %1$s *skel; \n\ 695 \n\ 696 skel = skel_alloc(sizeof(*skel)); \n\ 697 if (!skel) \n\ 698 goto cleanup; \n\ 699 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\ 700 ", 701 obj_name, opts.data_sz); 702 bpf_object__for_each_map(map, obj) { 703 const void *mmap_data = NULL; 704 size_t mmap_size = 0; 705 706 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 707 continue; 708 709 codegen("\ 710 \n\ 711 skel->%1$s = skel_prep_map_data((void *)\"\\ \n\ 712 ", ident); 713 mmap_data = bpf_map__initial_value(map, &mmap_size); 714 print_hex(mmap_data, mmap_size); 715 codegen("\ 716 \n\ 717 \", %1$zd, %2$zd); \n\ 718 if (!skel->%3$s) \n\ 719 goto cleanup; \n\ 720 skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\ 721 ", bpf_map_mmap_sz(map), mmap_size, ident); 722 } 723 codegen("\ 724 \n\ 725 return skel; \n\ 726 cleanup: \n\ 727 %1$s__destroy(skel); \n\ 728 return NULL; \n\ 729 } \n\ 730 \n\ 731 static inline int \n\ 732 %1$s__load(struct %1$s *skel) \n\ 733 { \n\ 734 struct bpf_load_and_run_opts opts = {}; \n\ 735 int err; \n\ 736 \n\ 737 opts.ctx = (struct bpf_loader_ctx *)skel; \n\ 738 opts.data_sz = %2$d; \n\ 739 opts.data = (void *)\"\\ \n\ 740 ", 741 obj_name, opts.data_sz); 742 print_hex(opts.data, opts.data_sz); 743 codegen("\ 744 \n\ 745 \"; \n\ 746 "); 747 748 codegen("\ 749 \n\ 750 opts.insns_sz = %d; \n\ 751 opts.insns = (void *)\"\\ \n\ 752 ", 753 opts.insns_sz); 754 print_hex(opts.insns, opts.insns_sz); 755 codegen("\ 756 \n\ 757 \"; \n\ 758 err = bpf_load_and_run(&opts); \n\ 759 if (err < 0) \n\ 760 return err; \n\ 761 ", obj_name); 762 bpf_object__for_each_map(map, obj) { 763 const char *mmap_flags; 764 765 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 766 continue; 767 768 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG) 769 mmap_flags = "PROT_READ"; 770 else 771 mmap_flags = "PROT_READ | PROT_WRITE"; 772 773 codegen("\ 774 \n\ 775 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\ 776 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\ 777 if (!skel->%1$s) \n\ 778 return -ENOMEM; \n\ 779 ", 780 ident, bpf_map_mmap_sz(map), mmap_flags); 781 } 782 codegen("\ 783 \n\ 784 return 0; \n\ 785 } \n\ 786 \n\ 787 static inline struct %1$s * \n\ 788 %1$s__open_and_load(void) \n\ 789 { \n\ 790 struct %1$s *skel; \n\ 791 \n\ 792 skel = %1$s__open(); \n\ 793 if (!skel) \n\ 794 return NULL; \n\ 795 if (%1$s__load(skel)) { \n\ 796 %1$s__destroy(skel); \n\ 797 return NULL; \n\ 798 } \n\ 799 return skel; \n\ 800 } \n\ 801 \n\ 802 ", obj_name); 803 804 codegen_asserts(obj, obj_name); 805 806 codegen("\ 807 \n\ 808 \n\ 809 #endif /* %s */ \n\ 810 ", 811 header_guard); 812 err = 0; 813 out: 814 return err; 815 } 816 817 static void 818 codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) 819 { 820 struct bpf_map *map; 821 char ident[256]; 822 size_t i; 823 824 if (!map_cnt) 825 return; 826 827 codegen("\ 828 \n\ 829 \n\ 830 /* maps */ \n\ 831 s->map_cnt = %zu; \n\ 832 s->map_skel_sz = sizeof(*s->maps); \n\ 833 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ 834 if (!s->maps) { \n\ 835 err = -ENOMEM; \n\ 836 goto err; \n\ 837 } \n\ 838 ", 839 map_cnt 840 ); 841 i = 0; 842 bpf_object__for_each_map(map, obj) { 843 if (!get_map_ident(map, ident, sizeof(ident))) 844 continue; 845 846 codegen("\ 847 \n\ 848 \n\ 849 s->maps[%zu].name = \"%s\"; \n\ 850 s->maps[%zu].map = &obj->maps.%s; \n\ 851 ", 852 i, bpf_map__name(map), i, ident); 853 /* memory-mapped internal maps */ 854 if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) { 855 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", 856 i, ident); 857 } 858 i++; 859 } 860 } 861 862 static void 863 codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links) 864 { 865 struct bpf_program *prog; 866 int i; 867 868 if (!prog_cnt) 869 return; 870 871 codegen("\ 872 \n\ 873 \n\ 874 /* programs */ \n\ 875 s->prog_cnt = %zu; \n\ 876 s->prog_skel_sz = sizeof(*s->progs); \n\ 877 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ 878 if (!s->progs) { \n\ 879 err = -ENOMEM; \n\ 880 goto err; \n\ 881 } \n\ 882 ", 883 prog_cnt 884 ); 885 i = 0; 886 bpf_object__for_each_program(prog, obj) { 887 codegen("\ 888 \n\ 889 \n\ 890 s->progs[%1$zu].name = \"%2$s\"; \n\ 891 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\ 892 ", 893 i, bpf_program__name(prog)); 894 895 if (populate_links) { 896 codegen("\ 897 \n\ 898 s->progs[%1$zu].link = &obj->links.%2$s;\n\ 899 ", 900 i, bpf_program__name(prog)); 901 } 902 i++; 903 } 904 } 905 906 static int do_skeleton(int argc, char **argv) 907 { 908 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; 909 size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; 910 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 911 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 912 struct bpf_object *obj = NULL; 913 const char *file; 914 char ident[256]; 915 struct bpf_program *prog; 916 int fd, err = -1; 917 struct bpf_map *map; 918 struct btf *btf; 919 struct stat st; 920 921 if (!REQ_ARGS(1)) { 922 usage(); 923 return -1; 924 } 925 file = GET_ARG(); 926 927 while (argc) { 928 if (!REQ_ARGS(2)) 929 return -1; 930 931 if (is_prefix(*argv, "name")) { 932 NEXT_ARG(); 933 934 if (obj_name[0] != '\0') { 935 p_err("object name already specified"); 936 return -1; 937 } 938 939 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); 940 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; 941 } else { 942 p_err("unknown arg %s", *argv); 943 return -1; 944 } 945 946 NEXT_ARG(); 947 } 948 949 if (argc) { 950 p_err("extra unknown arguments"); 951 return -1; 952 } 953 954 if (stat(file, &st)) { 955 p_err("failed to stat() %s: %s", file, strerror(errno)); 956 return -1; 957 } 958 file_sz = st.st_size; 959 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); 960 fd = open(file, O_RDONLY); 961 if (fd < 0) { 962 p_err("failed to open() %s: %s", file, strerror(errno)); 963 return -1; 964 } 965 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); 966 if (obj_data == MAP_FAILED) { 967 obj_data = NULL; 968 p_err("failed to mmap() %s: %s", file, strerror(errno)); 969 goto out; 970 } 971 if (obj_name[0] == '\0') 972 get_obj_name(obj_name, file); 973 opts.object_name = obj_name; 974 if (verifier_logs) 975 /* log_level1 + log_level2 + stats, but not stable UAPI */ 976 opts.kernel_log_level = 1 + 2 + 4; 977 obj = bpf_object__open_mem(obj_data, file_sz, &opts); 978 if (!obj) { 979 char err_buf[256]; 980 981 err = -errno; 982 libbpf_strerror(err, err_buf, sizeof(err_buf)); 983 p_err("failed to open BPF object file: %s", err_buf); 984 goto out; 985 } 986 987 bpf_object__for_each_map(map, obj) { 988 if (!get_map_ident(map, ident, sizeof(ident))) { 989 p_err("ignoring unrecognized internal map '%s'...", 990 bpf_map__name(map)); 991 continue; 992 } 993 map_cnt++; 994 } 995 bpf_object__for_each_program(prog, obj) { 996 prog_cnt++; 997 } 998 999 get_header_guard(header_guard, obj_name, "SKEL_H"); 1000 if (use_loader) { 1001 codegen("\ 1002 \n\ 1003 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1004 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\ 1005 #ifndef %2$s \n\ 1006 #define %2$s \n\ 1007 \n\ 1008 #include <bpf/skel_internal.h> \n\ 1009 \n\ 1010 struct %1$s { \n\ 1011 struct bpf_loader_ctx ctx; \n\ 1012 ", 1013 obj_name, header_guard 1014 ); 1015 } else { 1016 codegen("\ 1017 \n\ 1018 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1019 \n\ 1020 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\ 1021 #ifndef %2$s \n\ 1022 #define %2$s \n\ 1023 \n\ 1024 #include <errno.h> \n\ 1025 #include <stdlib.h> \n\ 1026 #include <bpf/libbpf.h> \n\ 1027 \n\ 1028 struct %1$s { \n\ 1029 struct bpf_object_skeleton *skeleton; \n\ 1030 struct bpf_object *obj; \n\ 1031 ", 1032 obj_name, header_guard 1033 ); 1034 } 1035 1036 if (map_cnt) { 1037 printf("\tstruct {\n"); 1038 bpf_object__for_each_map(map, obj) { 1039 if (!get_map_ident(map, ident, sizeof(ident))) 1040 continue; 1041 if (use_loader) 1042 printf("\t\tstruct bpf_map_desc %s;\n", ident); 1043 else 1044 printf("\t\tstruct bpf_map *%s;\n", ident); 1045 } 1046 printf("\t} maps;\n"); 1047 } 1048 1049 if (prog_cnt) { 1050 printf("\tstruct {\n"); 1051 bpf_object__for_each_program(prog, obj) { 1052 if (use_loader) 1053 printf("\t\tstruct bpf_prog_desc %s;\n", 1054 bpf_program__name(prog)); 1055 else 1056 printf("\t\tstruct bpf_program *%s;\n", 1057 bpf_program__name(prog)); 1058 } 1059 printf("\t} progs;\n"); 1060 printf("\tstruct {\n"); 1061 bpf_object__for_each_program(prog, obj) { 1062 if (use_loader) 1063 printf("\t\tint %s_fd;\n", 1064 bpf_program__name(prog)); 1065 else 1066 printf("\t\tstruct bpf_link *%s;\n", 1067 bpf_program__name(prog)); 1068 } 1069 printf("\t} links;\n"); 1070 } 1071 1072 btf = bpf_object__btf(obj); 1073 if (btf) { 1074 err = codegen_datasecs(obj, obj_name); 1075 if (err) 1076 goto out; 1077 } 1078 if (use_loader) { 1079 err = gen_trace(obj, obj_name, header_guard); 1080 goto out; 1081 } 1082 1083 codegen("\ 1084 \n\ 1085 \n\ 1086 #ifdef __cplusplus \n\ 1087 static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\ 1088 static inline struct %1$s *open_and_load(); \n\ 1089 static inline int load(struct %1$s *skel); \n\ 1090 static inline int attach(struct %1$s *skel); \n\ 1091 static inline void detach(struct %1$s *skel); \n\ 1092 static inline void destroy(struct %1$s *skel); \n\ 1093 static inline const void *elf_bytes(size_t *sz); \n\ 1094 #endif /* __cplusplus */ \n\ 1095 }; \n\ 1096 \n\ 1097 static void \n\ 1098 %1$s__destroy(struct %1$s *obj) \n\ 1099 { \n\ 1100 if (!obj) \n\ 1101 return; \n\ 1102 if (obj->skeleton) \n\ 1103 bpf_object__destroy_skeleton(obj->skeleton);\n\ 1104 free(obj); \n\ 1105 } \n\ 1106 \n\ 1107 static inline int \n\ 1108 %1$s__create_skeleton(struct %1$s *obj); \n\ 1109 \n\ 1110 static inline struct %1$s * \n\ 1111 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\ 1112 { \n\ 1113 struct %1$s *obj; \n\ 1114 int err; \n\ 1115 \n\ 1116 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ 1117 if (!obj) { \n\ 1118 errno = ENOMEM; \n\ 1119 return NULL; \n\ 1120 } \n\ 1121 \n\ 1122 err = %1$s__create_skeleton(obj); \n\ 1123 if (err) \n\ 1124 goto err_out; \n\ 1125 \n\ 1126 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\ 1127 if (err) \n\ 1128 goto err_out; \n\ 1129 \n\ 1130 return obj; \n\ 1131 err_out: \n\ 1132 %1$s__destroy(obj); \n\ 1133 errno = -err; \n\ 1134 return NULL; \n\ 1135 } \n\ 1136 \n\ 1137 static inline struct %1$s * \n\ 1138 %1$s__open(void) \n\ 1139 { \n\ 1140 return %1$s__open_opts(NULL); \n\ 1141 } \n\ 1142 \n\ 1143 static inline int \n\ 1144 %1$s__load(struct %1$s *obj) \n\ 1145 { \n\ 1146 return bpf_object__load_skeleton(obj->skeleton); \n\ 1147 } \n\ 1148 \n\ 1149 static inline struct %1$s * \n\ 1150 %1$s__open_and_load(void) \n\ 1151 { \n\ 1152 struct %1$s *obj; \n\ 1153 int err; \n\ 1154 \n\ 1155 obj = %1$s__open(); \n\ 1156 if (!obj) \n\ 1157 return NULL; \n\ 1158 err = %1$s__load(obj); \n\ 1159 if (err) { \n\ 1160 %1$s__destroy(obj); \n\ 1161 errno = -err; \n\ 1162 return NULL; \n\ 1163 } \n\ 1164 return obj; \n\ 1165 } \n\ 1166 \n\ 1167 static inline int \n\ 1168 %1$s__attach(struct %1$s *obj) \n\ 1169 { \n\ 1170 return bpf_object__attach_skeleton(obj->skeleton); \n\ 1171 } \n\ 1172 \n\ 1173 static inline void \n\ 1174 %1$s__detach(struct %1$s *obj) \n\ 1175 { \n\ 1176 bpf_object__detach_skeleton(obj->skeleton); \n\ 1177 } \n\ 1178 ", 1179 obj_name 1180 ); 1181 1182 codegen("\ 1183 \n\ 1184 \n\ 1185 static inline const void *%1$s__elf_bytes(size_t *sz); \n\ 1186 \n\ 1187 static inline int \n\ 1188 %1$s__create_skeleton(struct %1$s *obj) \n\ 1189 { \n\ 1190 struct bpf_object_skeleton *s; \n\ 1191 int err; \n\ 1192 \n\ 1193 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ 1194 if (!s) { \n\ 1195 err = -ENOMEM; \n\ 1196 goto err; \n\ 1197 } \n\ 1198 \n\ 1199 s->sz = sizeof(*s); \n\ 1200 s->name = \"%1$s\"; \n\ 1201 s->obj = &obj->obj; \n\ 1202 ", 1203 obj_name 1204 ); 1205 1206 codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/); 1207 codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/); 1208 1209 codegen("\ 1210 \n\ 1211 \n\ 1212 s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\ 1213 \n\ 1214 obj->skeleton = s; \n\ 1215 return 0; \n\ 1216 err: \n\ 1217 bpf_object__destroy_skeleton(s); \n\ 1218 return err; \n\ 1219 } \n\ 1220 \n\ 1221 static inline const void *%2$s__elf_bytes(size_t *sz) \n\ 1222 { \n\ 1223 *sz = %1$d; \n\ 1224 return (const void *)\"\\ \n\ 1225 " 1226 , file_sz, obj_name); 1227 1228 /* embed contents of BPF object file */ 1229 print_hex(obj_data, file_sz); 1230 1231 codegen("\ 1232 \n\ 1233 \"; \n\ 1234 } \n\ 1235 \n\ 1236 #ifdef __cplusplus \n\ 1237 struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\ 1238 struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\ 1239 int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\ 1240 int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\ 1241 void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\ 1242 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\ 1243 const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\ 1244 #endif /* __cplusplus */ \n\ 1245 \n\ 1246 ", 1247 obj_name); 1248 1249 codegen_asserts(obj, obj_name); 1250 1251 codegen("\ 1252 \n\ 1253 \n\ 1254 #endif /* %1$s */ \n\ 1255 ", 1256 header_guard); 1257 err = 0; 1258 out: 1259 bpf_object__close(obj); 1260 if (obj_data) 1261 munmap(obj_data, mmap_sz); 1262 close(fd); 1263 return err; 1264 } 1265 1266 /* Subskeletons are like skeletons, except they don't own the bpf_object, 1267 * associated maps, links, etc. Instead, they know about the existence of 1268 * variables, maps, programs and are able to find their locations 1269 * _at runtime_ from an already loaded bpf_object. 1270 * 1271 * This allows for library-like BPF objects to have userspace counterparts 1272 * with access to their own items without having to know anything about the 1273 * final BPF object that the library was linked into. 1274 */ 1275 static int do_subskeleton(int argc, char **argv) 1276 { 1277 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")]; 1278 size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0; 1279 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 1280 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 1281 struct bpf_object *obj = NULL; 1282 const char *file, *var_name; 1283 char ident[256]; 1284 int fd, err = -1, map_type_id; 1285 const struct bpf_map *map; 1286 struct bpf_program *prog; 1287 struct btf *btf; 1288 const struct btf_type *map_type, *var_type; 1289 const struct btf_var_secinfo *var; 1290 struct stat st; 1291 1292 if (!REQ_ARGS(1)) { 1293 usage(); 1294 return -1; 1295 } 1296 file = GET_ARG(); 1297 1298 while (argc) { 1299 if (!REQ_ARGS(2)) 1300 return -1; 1301 1302 if (is_prefix(*argv, "name")) { 1303 NEXT_ARG(); 1304 1305 if (obj_name[0] != '\0') { 1306 p_err("object name already specified"); 1307 return -1; 1308 } 1309 1310 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); 1311 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; 1312 } else { 1313 p_err("unknown arg %s", *argv); 1314 return -1; 1315 } 1316 1317 NEXT_ARG(); 1318 } 1319 1320 if (argc) { 1321 p_err("extra unknown arguments"); 1322 return -1; 1323 } 1324 1325 if (use_loader) { 1326 p_err("cannot use loader for subskeletons"); 1327 return -1; 1328 } 1329 1330 if (stat(file, &st)) { 1331 p_err("failed to stat() %s: %s", file, strerror(errno)); 1332 return -1; 1333 } 1334 file_sz = st.st_size; 1335 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); 1336 fd = open(file, O_RDONLY); 1337 if (fd < 0) { 1338 p_err("failed to open() %s: %s", file, strerror(errno)); 1339 return -1; 1340 } 1341 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); 1342 if (obj_data == MAP_FAILED) { 1343 obj_data = NULL; 1344 p_err("failed to mmap() %s: %s", file, strerror(errno)); 1345 goto out; 1346 } 1347 if (obj_name[0] == '\0') 1348 get_obj_name(obj_name, file); 1349 1350 /* The empty object name allows us to use bpf_map__name and produce 1351 * ELF section names out of it. (".data" instead of "obj.data") 1352 */ 1353 opts.object_name = ""; 1354 obj = bpf_object__open_mem(obj_data, file_sz, &opts); 1355 if (!obj) { 1356 char err_buf[256]; 1357 1358 libbpf_strerror(errno, err_buf, sizeof(err_buf)); 1359 p_err("failed to open BPF object file: %s", err_buf); 1360 obj = NULL; 1361 goto out; 1362 } 1363 1364 btf = bpf_object__btf(obj); 1365 if (!btf) { 1366 err = -1; 1367 p_err("need btf type information for %s", obj_name); 1368 goto out; 1369 } 1370 1371 bpf_object__for_each_program(prog, obj) { 1372 prog_cnt++; 1373 } 1374 1375 /* First, count how many variables we have to find. 1376 * We need this in advance so the subskel can allocate the right 1377 * amount of storage. 1378 */ 1379 bpf_object__for_each_map(map, obj) { 1380 if (!get_map_ident(map, ident, sizeof(ident))) 1381 continue; 1382 1383 /* Also count all maps that have a name */ 1384 map_cnt++; 1385 1386 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 1387 continue; 1388 1389 map_type_id = bpf_map__btf_value_type_id(map); 1390 if (map_type_id <= 0) { 1391 err = map_type_id; 1392 goto out; 1393 } 1394 map_type = btf__type_by_id(btf, map_type_id); 1395 1396 var = btf_var_secinfos(map_type); 1397 len = btf_vlen(map_type); 1398 for (i = 0; i < len; i++, var++) { 1399 var_type = btf__type_by_id(btf, var->type); 1400 1401 if (btf_var(var_type)->linkage == BTF_VAR_STATIC) 1402 continue; 1403 1404 var_cnt++; 1405 } 1406 } 1407 1408 get_header_guard(header_guard, obj_name, "SUBSKEL_H"); 1409 codegen("\ 1410 \n\ 1411 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1412 \n\ 1413 /* THIS FILE IS AUTOGENERATED! */ \n\ 1414 #ifndef %2$s \n\ 1415 #define %2$s \n\ 1416 \n\ 1417 #include <errno.h> \n\ 1418 #include <stdlib.h> \n\ 1419 #include <bpf/libbpf.h> \n\ 1420 \n\ 1421 struct %1$s { \n\ 1422 struct bpf_object *obj; \n\ 1423 struct bpf_object_subskeleton *subskel; \n\ 1424 ", obj_name, header_guard); 1425 1426 if (map_cnt) { 1427 printf("\tstruct {\n"); 1428 bpf_object__for_each_map(map, obj) { 1429 if (!get_map_ident(map, ident, sizeof(ident))) 1430 continue; 1431 printf("\t\tstruct bpf_map *%s;\n", ident); 1432 } 1433 printf("\t} maps;\n"); 1434 } 1435 1436 if (prog_cnt) { 1437 printf("\tstruct {\n"); 1438 bpf_object__for_each_program(prog, obj) { 1439 printf("\t\tstruct bpf_program *%s;\n", 1440 bpf_program__name(prog)); 1441 } 1442 printf("\t} progs;\n"); 1443 } 1444 1445 err = codegen_subskel_datasecs(obj, obj_name); 1446 if (err) 1447 goto out; 1448 1449 /* emit code that will allocate enough storage for all symbols */ 1450 codegen("\ 1451 \n\ 1452 \n\ 1453 #ifdef __cplusplus \n\ 1454 static inline struct %1$s *open(const struct bpf_object *src);\n\ 1455 static inline void destroy(struct %1$s *skel); \n\ 1456 #endif /* __cplusplus */ \n\ 1457 }; \n\ 1458 \n\ 1459 static inline void \n\ 1460 %1$s__destroy(struct %1$s *skel) \n\ 1461 { \n\ 1462 if (!skel) \n\ 1463 return; \n\ 1464 if (skel->subskel) \n\ 1465 bpf_object__destroy_subskeleton(skel->subskel);\n\ 1466 free(skel); \n\ 1467 } \n\ 1468 \n\ 1469 static inline struct %1$s * \n\ 1470 %1$s__open(const struct bpf_object *src) \n\ 1471 { \n\ 1472 struct %1$s *obj; \n\ 1473 struct bpf_object_subskeleton *s; \n\ 1474 int err; \n\ 1475 \n\ 1476 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ 1477 if (!obj) { \n\ 1478 err = -ENOMEM; \n\ 1479 goto err; \n\ 1480 } \n\ 1481 s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\ 1482 if (!s) { \n\ 1483 err = -ENOMEM; \n\ 1484 goto err; \n\ 1485 } \n\ 1486 s->sz = sizeof(*s); \n\ 1487 s->obj = src; \n\ 1488 s->var_skel_sz = sizeof(*s->vars); \n\ 1489 obj->subskel = s; \n\ 1490 \n\ 1491 /* vars */ \n\ 1492 s->var_cnt = %2$d; \n\ 1493 s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\ 1494 if (!s->vars) { \n\ 1495 err = -ENOMEM; \n\ 1496 goto err; \n\ 1497 } \n\ 1498 ", 1499 obj_name, var_cnt 1500 ); 1501 1502 /* walk through each symbol and emit the runtime representation */ 1503 bpf_object__for_each_map(map, obj) { 1504 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 1505 continue; 1506 1507 map_type_id = bpf_map__btf_value_type_id(map); 1508 if (map_type_id <= 0) 1509 /* skip over internal maps with no type*/ 1510 continue; 1511 1512 map_type = btf__type_by_id(btf, map_type_id); 1513 var = btf_var_secinfos(map_type); 1514 len = btf_vlen(map_type); 1515 for (i = 0; i < len; i++, var++) { 1516 var_type = btf__type_by_id(btf, var->type); 1517 var_name = btf__name_by_offset(btf, var_type->name_off); 1518 1519 if (btf_var(var_type)->linkage == BTF_VAR_STATIC) 1520 continue; 1521 1522 /* Note that we use the dot prefix in .data as the 1523 * field access operator i.e. maps%s becomes maps.data 1524 */ 1525 codegen("\ 1526 \n\ 1527 \n\ 1528 s->vars[%3$d].name = \"%1$s\"; \n\ 1529 s->vars[%3$d].map = &obj->maps.%2$s; \n\ 1530 s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\ 1531 ", var_name, ident, var_idx); 1532 1533 var_idx++; 1534 } 1535 } 1536 1537 codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/); 1538 codegen_progs_skeleton(obj, prog_cnt, false /*links*/); 1539 1540 codegen("\ 1541 \n\ 1542 \n\ 1543 err = bpf_object__open_subskeleton(s); \n\ 1544 if (err) \n\ 1545 goto err; \n\ 1546 \n\ 1547 return obj; \n\ 1548 err: \n\ 1549 %1$s__destroy(obj); \n\ 1550 errno = -err; \n\ 1551 return NULL; \n\ 1552 } \n\ 1553 \n\ 1554 #ifdef __cplusplus \n\ 1555 struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\ 1556 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\ 1557 #endif /* __cplusplus */ \n\ 1558 \n\ 1559 #endif /* %2$s */ \n\ 1560 ", 1561 obj_name, header_guard); 1562 err = 0; 1563 out: 1564 bpf_object__close(obj); 1565 if (obj_data) 1566 munmap(obj_data, mmap_sz); 1567 close(fd); 1568 return err; 1569 } 1570 1571 static int do_object(int argc, char **argv) 1572 { 1573 struct bpf_linker *linker; 1574 const char *output_file, *file; 1575 int err = 0; 1576 1577 if (!REQ_ARGS(2)) { 1578 usage(); 1579 return -1; 1580 } 1581 1582 output_file = GET_ARG(); 1583 1584 linker = bpf_linker__new(output_file, NULL); 1585 if (!linker) { 1586 p_err("failed to create BPF linker instance"); 1587 return -1; 1588 } 1589 1590 while (argc) { 1591 file = GET_ARG(); 1592 1593 err = bpf_linker__add_file(linker, file, NULL); 1594 if (err) { 1595 p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno); 1596 goto out; 1597 } 1598 } 1599 1600 err = bpf_linker__finalize(linker); 1601 if (err) { 1602 p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno); 1603 goto out; 1604 } 1605 1606 err = 0; 1607 out: 1608 bpf_linker__free(linker); 1609 return err; 1610 } 1611 1612 static int do_help(int argc, char **argv) 1613 { 1614 if (json_output) { 1615 jsonw_null(json_wtr); 1616 return 0; 1617 } 1618 1619 fprintf(stderr, 1620 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n" 1621 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n" 1622 " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n" 1623 " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n" 1624 " %1$s %2$s help\n" 1625 "\n" 1626 " " HELP_SPEC_OPTIONS " |\n" 1627 " {-L|--use-loader} }\n" 1628 "", 1629 bin_name, "gen"); 1630 1631 return 0; 1632 } 1633 1634 static int btf_save_raw(const struct btf *btf, const char *path) 1635 { 1636 const void *data; 1637 FILE *f = NULL; 1638 __u32 data_sz; 1639 int err = 0; 1640 1641 data = btf__raw_data(btf, &data_sz); 1642 if (!data) 1643 return -ENOMEM; 1644 1645 f = fopen(path, "wb"); 1646 if (!f) 1647 return -errno; 1648 1649 if (fwrite(data, 1, data_sz, f) != data_sz) 1650 err = -errno; 1651 1652 fclose(f); 1653 return err; 1654 } 1655 1656 struct btfgen_info { 1657 struct btf *src_btf; 1658 struct btf *marked_btf; /* btf structure used to mark used types */ 1659 }; 1660 1661 static size_t btfgen_hash_fn(long key, void *ctx) 1662 { 1663 return key; 1664 } 1665 1666 static bool btfgen_equal_fn(long k1, long k2, void *ctx) 1667 { 1668 return k1 == k2; 1669 } 1670 1671 static void btfgen_free_info(struct btfgen_info *info) 1672 { 1673 if (!info) 1674 return; 1675 1676 btf__free(info->src_btf); 1677 btf__free(info->marked_btf); 1678 1679 free(info); 1680 } 1681 1682 static struct btfgen_info * 1683 btfgen_new_info(const char *targ_btf_path) 1684 { 1685 struct btfgen_info *info; 1686 int err; 1687 1688 info = calloc(1, sizeof(*info)); 1689 if (!info) 1690 return NULL; 1691 1692 info->src_btf = btf__parse(targ_btf_path, NULL); 1693 if (!info->src_btf) { 1694 err = -errno; 1695 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno)); 1696 goto err_out; 1697 } 1698 1699 info->marked_btf = btf__parse(targ_btf_path, NULL); 1700 if (!info->marked_btf) { 1701 err = -errno; 1702 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno)); 1703 goto err_out; 1704 } 1705 1706 return info; 1707 1708 err_out: 1709 btfgen_free_info(info); 1710 errno = -err; 1711 return NULL; 1712 } 1713 1714 #define MARKED UINT32_MAX 1715 1716 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx) 1717 { 1718 const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id); 1719 struct btf_member *m = btf_members(t) + idx; 1720 1721 m->name_off = MARKED; 1722 } 1723 1724 static int 1725 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers) 1726 { 1727 const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id); 1728 struct btf_type *cloned_type; 1729 struct btf_param *param; 1730 struct btf_array *array; 1731 int err, i; 1732 1733 if (type_id == 0) 1734 return 0; 1735 1736 /* mark type on cloned BTF as used */ 1737 cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id); 1738 cloned_type->name_off = MARKED; 1739 1740 /* recursively mark other types needed by it */ 1741 switch (btf_kind(btf_type)) { 1742 case BTF_KIND_UNKN: 1743 case BTF_KIND_INT: 1744 case BTF_KIND_FLOAT: 1745 case BTF_KIND_ENUM: 1746 case BTF_KIND_ENUM64: 1747 case BTF_KIND_STRUCT: 1748 case BTF_KIND_UNION: 1749 break; 1750 case BTF_KIND_PTR: 1751 if (follow_pointers) { 1752 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1753 if (err) 1754 return err; 1755 } 1756 break; 1757 case BTF_KIND_CONST: 1758 case BTF_KIND_RESTRICT: 1759 case BTF_KIND_VOLATILE: 1760 case BTF_KIND_TYPEDEF: 1761 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1762 if (err) 1763 return err; 1764 break; 1765 case BTF_KIND_ARRAY: 1766 array = btf_array(btf_type); 1767 1768 /* mark array type */ 1769 err = btfgen_mark_type(info, array->type, follow_pointers); 1770 /* mark array's index type */ 1771 err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers); 1772 if (err) 1773 return err; 1774 break; 1775 case BTF_KIND_FUNC_PROTO: 1776 /* mark ret type */ 1777 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1778 if (err) 1779 return err; 1780 1781 /* mark parameters types */ 1782 param = btf_params(btf_type); 1783 for (i = 0; i < btf_vlen(btf_type); i++) { 1784 err = btfgen_mark_type(info, param->type, follow_pointers); 1785 if (err) 1786 return err; 1787 param++; 1788 } 1789 break; 1790 /* tells if some other type needs to be handled */ 1791 default: 1792 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id); 1793 return -EINVAL; 1794 } 1795 1796 return 0; 1797 } 1798 1799 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1800 { 1801 struct btf *btf = info->src_btf; 1802 const struct btf_type *btf_type; 1803 struct btf_member *btf_member; 1804 struct btf_array *array; 1805 unsigned int type_id = targ_spec->root_type_id; 1806 int idx, err; 1807 1808 /* mark root type */ 1809 btf_type = btf__type_by_id(btf, type_id); 1810 err = btfgen_mark_type(info, type_id, false); 1811 if (err) 1812 return err; 1813 1814 /* mark types for complex types (arrays, unions, structures) */ 1815 for (int i = 1; i < targ_spec->raw_len; i++) { 1816 /* skip typedefs and mods */ 1817 while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) { 1818 type_id = btf_type->type; 1819 btf_type = btf__type_by_id(btf, type_id); 1820 } 1821 1822 switch (btf_kind(btf_type)) { 1823 case BTF_KIND_STRUCT: 1824 case BTF_KIND_UNION: 1825 idx = targ_spec->raw_spec[i]; 1826 btf_member = btf_members(btf_type) + idx; 1827 1828 /* mark member */ 1829 btfgen_mark_member(info, type_id, idx); 1830 1831 /* mark member's type */ 1832 type_id = btf_member->type; 1833 btf_type = btf__type_by_id(btf, type_id); 1834 err = btfgen_mark_type(info, type_id, false); 1835 if (err) 1836 return err; 1837 break; 1838 case BTF_KIND_ARRAY: 1839 array = btf_array(btf_type); 1840 type_id = array->type; 1841 btf_type = btf__type_by_id(btf, type_id); 1842 break; 1843 default: 1844 p_err("unsupported kind: %s (%d)", 1845 btf_kind_str(btf_type), btf_type->type); 1846 return -EINVAL; 1847 } 1848 } 1849 1850 return 0; 1851 } 1852 1853 /* Mark types, members, and member types. Compared to btfgen_record_field_relo, 1854 * this function does not rely on the target spec for inferring members, but 1855 * uses the associated BTF. 1856 * 1857 * The `behind_ptr` argument is used to stop marking of composite types reached 1858 * through a pointer. This way, we can keep BTF size in check while providing 1859 * reasonable match semantics. 1860 */ 1861 static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr) 1862 { 1863 const struct btf_type *btf_type; 1864 struct btf *btf = info->src_btf; 1865 struct btf_type *cloned_type; 1866 int i, err; 1867 1868 if (type_id == 0) 1869 return 0; 1870 1871 btf_type = btf__type_by_id(btf, type_id); 1872 /* mark type on cloned BTF as used */ 1873 cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id); 1874 cloned_type->name_off = MARKED; 1875 1876 switch (btf_kind(btf_type)) { 1877 case BTF_KIND_UNKN: 1878 case BTF_KIND_INT: 1879 case BTF_KIND_FLOAT: 1880 case BTF_KIND_ENUM: 1881 case BTF_KIND_ENUM64: 1882 break; 1883 case BTF_KIND_STRUCT: 1884 case BTF_KIND_UNION: { 1885 struct btf_member *m = btf_members(btf_type); 1886 __u16 vlen = btf_vlen(btf_type); 1887 1888 if (behind_ptr) 1889 break; 1890 1891 for (i = 0; i < vlen; i++, m++) { 1892 /* mark member */ 1893 btfgen_mark_member(info, type_id, i); 1894 1895 /* mark member's type */ 1896 err = btfgen_mark_type_match(info, m->type, false); 1897 if (err) 1898 return err; 1899 } 1900 break; 1901 } 1902 case BTF_KIND_CONST: 1903 case BTF_KIND_FWD: 1904 case BTF_KIND_RESTRICT: 1905 case BTF_KIND_TYPEDEF: 1906 case BTF_KIND_VOLATILE: 1907 return btfgen_mark_type_match(info, btf_type->type, behind_ptr); 1908 case BTF_KIND_PTR: 1909 return btfgen_mark_type_match(info, btf_type->type, true); 1910 case BTF_KIND_ARRAY: { 1911 struct btf_array *array; 1912 1913 array = btf_array(btf_type); 1914 /* mark array type */ 1915 err = btfgen_mark_type_match(info, array->type, false); 1916 /* mark array's index type */ 1917 err = err ? : btfgen_mark_type_match(info, array->index_type, false); 1918 if (err) 1919 return err; 1920 break; 1921 } 1922 case BTF_KIND_FUNC_PROTO: { 1923 __u16 vlen = btf_vlen(btf_type); 1924 struct btf_param *param; 1925 1926 /* mark ret type */ 1927 err = btfgen_mark_type_match(info, btf_type->type, false); 1928 if (err) 1929 return err; 1930 1931 /* mark parameters types */ 1932 param = btf_params(btf_type); 1933 for (i = 0; i < vlen; i++) { 1934 err = btfgen_mark_type_match(info, param->type, false); 1935 if (err) 1936 return err; 1937 param++; 1938 } 1939 break; 1940 } 1941 /* tells if some other type needs to be handled */ 1942 default: 1943 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id); 1944 return -EINVAL; 1945 } 1946 1947 return 0; 1948 } 1949 1950 /* Mark types, members, and member types. Compared to btfgen_record_field_relo, 1951 * this function does not rely on the target spec for inferring members, but 1952 * uses the associated BTF. 1953 */ 1954 static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1955 { 1956 return btfgen_mark_type_match(info, targ_spec->root_type_id, false); 1957 } 1958 1959 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1960 { 1961 return btfgen_mark_type(info, targ_spec->root_type_id, true); 1962 } 1963 1964 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1965 { 1966 return btfgen_mark_type(info, targ_spec->root_type_id, false); 1967 } 1968 1969 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res) 1970 { 1971 switch (res->relo_kind) { 1972 case BPF_CORE_FIELD_BYTE_OFFSET: 1973 case BPF_CORE_FIELD_BYTE_SIZE: 1974 case BPF_CORE_FIELD_EXISTS: 1975 case BPF_CORE_FIELD_SIGNED: 1976 case BPF_CORE_FIELD_LSHIFT_U64: 1977 case BPF_CORE_FIELD_RSHIFT_U64: 1978 return btfgen_record_field_relo(info, res); 1979 case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */ 1980 return 0; 1981 case BPF_CORE_TYPE_ID_TARGET: 1982 case BPF_CORE_TYPE_EXISTS: 1983 case BPF_CORE_TYPE_SIZE: 1984 return btfgen_record_type_relo(info, res); 1985 case BPF_CORE_TYPE_MATCHES: 1986 return btfgen_record_type_match_relo(info, res); 1987 case BPF_CORE_ENUMVAL_EXISTS: 1988 case BPF_CORE_ENUMVAL_VALUE: 1989 return btfgen_record_enumval_relo(info, res); 1990 default: 1991 return -EINVAL; 1992 } 1993 } 1994 1995 static struct bpf_core_cand_list * 1996 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id) 1997 { 1998 const struct btf_type *local_type; 1999 struct bpf_core_cand_list *cands = NULL; 2000 struct bpf_core_cand local_cand = {}; 2001 size_t local_essent_len; 2002 const char *local_name; 2003 int err; 2004 2005 local_cand.btf = local_btf; 2006 local_cand.id = local_id; 2007 2008 local_type = btf__type_by_id(local_btf, local_id); 2009 if (!local_type) { 2010 err = -EINVAL; 2011 goto err_out; 2012 } 2013 2014 local_name = btf__name_by_offset(local_btf, local_type->name_off); 2015 if (!local_name) { 2016 err = -EINVAL; 2017 goto err_out; 2018 } 2019 local_essent_len = bpf_core_essential_name_len(local_name); 2020 2021 cands = calloc(1, sizeof(*cands)); 2022 if (!cands) 2023 return NULL; 2024 2025 err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands); 2026 if (err) 2027 goto err_out; 2028 2029 return cands; 2030 2031 err_out: 2032 bpf_core_free_cands(cands); 2033 errno = -err; 2034 return NULL; 2035 } 2036 2037 /* Record relocation information for a single BPF object */ 2038 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path) 2039 { 2040 const struct btf_ext_info_sec *sec; 2041 const struct bpf_core_relo *relo; 2042 const struct btf_ext_info *seg; 2043 struct hashmap_entry *entry; 2044 struct hashmap *cand_cache = NULL; 2045 struct btf_ext *btf_ext = NULL; 2046 unsigned int relo_idx; 2047 struct btf *btf = NULL; 2048 size_t i; 2049 int err; 2050 2051 btf = btf__parse(obj_path, &btf_ext); 2052 if (!btf) { 2053 err = -errno; 2054 p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno)); 2055 return err; 2056 } 2057 2058 if (!btf_ext) { 2059 p_err("failed to parse BPF object '%s': section %s not found", 2060 obj_path, BTF_EXT_ELF_SEC); 2061 err = -EINVAL; 2062 goto out; 2063 } 2064 2065 if (btf_ext->core_relo_info.len == 0) { 2066 err = 0; 2067 goto out; 2068 } 2069 2070 cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL); 2071 if (IS_ERR(cand_cache)) { 2072 err = PTR_ERR(cand_cache); 2073 goto out; 2074 } 2075 2076 seg = &btf_ext->core_relo_info; 2077 for_each_btf_ext_sec(seg, sec) { 2078 for_each_btf_ext_rec(seg, sec, relo_idx, relo) { 2079 struct bpf_core_spec specs_scratch[3] = {}; 2080 struct bpf_core_relo_res targ_res = {}; 2081 struct bpf_core_cand_list *cands = NULL; 2082 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off); 2083 2084 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && 2085 !hashmap__find(cand_cache, relo->type_id, &cands)) { 2086 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id); 2087 if (!cands) { 2088 err = -errno; 2089 goto out; 2090 } 2091 2092 err = hashmap__set(cand_cache, relo->type_id, cands, 2093 NULL, NULL); 2094 if (err) 2095 goto out; 2096 } 2097 2098 err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands, 2099 specs_scratch, &targ_res); 2100 if (err) 2101 goto out; 2102 2103 /* specs_scratch[2] is the target spec */ 2104 err = btfgen_record_reloc(info, &specs_scratch[2]); 2105 if (err) 2106 goto out; 2107 } 2108 } 2109 2110 out: 2111 btf__free(btf); 2112 btf_ext__free(btf_ext); 2113 2114 if (!IS_ERR_OR_NULL(cand_cache)) { 2115 hashmap__for_each_entry(cand_cache, entry, i) { 2116 bpf_core_free_cands(entry->pvalue); 2117 } 2118 hashmap__free(cand_cache); 2119 } 2120 2121 return err; 2122 } 2123 2124 static int btfgen_remap_id(__u32 *type_id, void *ctx) 2125 { 2126 unsigned int *ids = ctx; 2127 2128 *type_id = ids[*type_id]; 2129 2130 return 0; 2131 } 2132 2133 /* Generate BTF from relocation information previously recorded */ 2134 static struct btf *btfgen_get_btf(struct btfgen_info *info) 2135 { 2136 struct btf *btf_new = NULL; 2137 unsigned int *ids = NULL; 2138 unsigned int i, n = btf__type_cnt(info->marked_btf); 2139 int err = 0; 2140 2141 btf_new = btf__new_empty(); 2142 if (!btf_new) { 2143 err = -errno; 2144 goto err_out; 2145 } 2146 2147 ids = calloc(n, sizeof(*ids)); 2148 if (!ids) { 2149 err = -errno; 2150 goto err_out; 2151 } 2152 2153 /* first pass: add all marked types to btf_new and add their new ids to the ids map */ 2154 for (i = 1; i < n; i++) { 2155 const struct btf_type *cloned_type, *type; 2156 const char *name; 2157 int new_id; 2158 2159 cloned_type = btf__type_by_id(info->marked_btf, i); 2160 2161 if (cloned_type->name_off != MARKED) 2162 continue; 2163 2164 type = btf__type_by_id(info->src_btf, i); 2165 2166 /* add members for struct and union */ 2167 if (btf_is_composite(type)) { 2168 struct btf_member *cloned_m, *m; 2169 unsigned short vlen; 2170 int idx_src; 2171 2172 name = btf__str_by_offset(info->src_btf, type->name_off); 2173 2174 if (btf_is_struct(type)) 2175 err = btf__add_struct(btf_new, name, type->size); 2176 else 2177 err = btf__add_union(btf_new, name, type->size); 2178 2179 if (err < 0) 2180 goto err_out; 2181 new_id = err; 2182 2183 cloned_m = btf_members(cloned_type); 2184 m = btf_members(type); 2185 vlen = btf_vlen(cloned_type); 2186 for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) { 2187 /* add only members that are marked as used */ 2188 if (cloned_m->name_off != MARKED) 2189 continue; 2190 2191 name = btf__str_by_offset(info->src_btf, m->name_off); 2192 err = btf__add_field(btf_new, name, m->type, 2193 btf_member_bit_offset(cloned_type, idx_src), 2194 btf_member_bitfield_size(cloned_type, idx_src)); 2195 if (err < 0) 2196 goto err_out; 2197 } 2198 } else { 2199 err = btf__add_type(btf_new, info->src_btf, type); 2200 if (err < 0) 2201 goto err_out; 2202 new_id = err; 2203 } 2204 2205 /* add ID mapping */ 2206 ids[i] = new_id; 2207 } 2208 2209 /* second pass: fix up type ids */ 2210 for (i = 1; i < btf__type_cnt(btf_new); i++) { 2211 struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i); 2212 2213 err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids); 2214 if (err) 2215 goto err_out; 2216 } 2217 2218 free(ids); 2219 return btf_new; 2220 2221 err_out: 2222 btf__free(btf_new); 2223 free(ids); 2224 errno = -err; 2225 return NULL; 2226 } 2227 2228 /* Create minimized BTF file for a set of BPF objects. 2229 * 2230 * The BTFGen algorithm is divided in two main parts: (1) collect the 2231 * BTF types that are involved in relocations and (2) generate the BTF 2232 * object using the collected types. 2233 * 2234 * In order to collect the types involved in the relocations, we parse 2235 * the BTF and BTF.ext sections of the BPF objects and use 2236 * bpf_core_calc_relo_insn() to get the target specification, this 2237 * indicates how the types and fields are used in a relocation. 2238 * 2239 * Types are recorded in different ways according to the kind of the 2240 * relocation. For field-based relocations only the members that are 2241 * actually used are saved in order to reduce the size of the generated 2242 * BTF file. For type-based relocations empty struct / unions are 2243 * generated and for enum-based relocations the whole type is saved. 2244 * 2245 * The second part of the algorithm generates the BTF object. It creates 2246 * an empty BTF object and fills it with the types recorded in the 2247 * previous step. This function takes care of only adding the structure 2248 * and union members that were marked as used and it also fixes up the 2249 * type IDs on the generated BTF object. 2250 */ 2251 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[]) 2252 { 2253 struct btfgen_info *info; 2254 struct btf *btf_new = NULL; 2255 int err, i; 2256 2257 info = btfgen_new_info(src_btf); 2258 if (!info) { 2259 err = -errno; 2260 p_err("failed to allocate info structure: %s", strerror(errno)); 2261 goto out; 2262 } 2263 2264 for (i = 0; objspaths[i] != NULL; i++) { 2265 err = btfgen_record_obj(info, objspaths[i]); 2266 if (err) { 2267 p_err("error recording relocations for %s: %s", objspaths[i], 2268 strerror(errno)); 2269 goto out; 2270 } 2271 } 2272 2273 btf_new = btfgen_get_btf(info); 2274 if (!btf_new) { 2275 err = -errno; 2276 p_err("error generating BTF: %s", strerror(errno)); 2277 goto out; 2278 } 2279 2280 err = btf_save_raw(btf_new, dst_btf); 2281 if (err) { 2282 p_err("error saving btf file: %s", strerror(errno)); 2283 goto out; 2284 } 2285 2286 out: 2287 btf__free(btf_new); 2288 btfgen_free_info(info); 2289 2290 return err; 2291 } 2292 2293 static int do_min_core_btf(int argc, char **argv) 2294 { 2295 const char *input, *output, **objs; 2296 int i, err; 2297 2298 if (!REQ_ARGS(3)) { 2299 usage(); 2300 return -1; 2301 } 2302 2303 input = GET_ARG(); 2304 output = GET_ARG(); 2305 2306 objs = (const char **) calloc(argc + 1, sizeof(*objs)); 2307 if (!objs) { 2308 p_err("failed to allocate array for object names"); 2309 return -ENOMEM; 2310 } 2311 2312 i = 0; 2313 while (argc) 2314 objs[i++] = GET_ARG(); 2315 2316 err = minimize_btf(input, output, objs); 2317 free(objs); 2318 return err; 2319 } 2320 2321 static const struct cmd cmds[] = { 2322 { "object", do_object }, 2323 { "skeleton", do_skeleton }, 2324 { "subskeleton", do_subskeleton }, 2325 { "min_core_btf", do_min_core_btf}, 2326 { "help", do_help }, 2327 { 0 } 2328 }; 2329 2330 int do_gen(int argc, char **argv) 2331 { 2332 return cmd_select(cmds, argc, argv, do_help); 2333 } 2334