1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2019 Facebook */ 3 4 #ifndef _GNU_SOURCE 5 #define _GNU_SOURCE 6 #endif 7 #include <ctype.h> 8 #include <errno.h> 9 #include <fcntl.h> 10 #include <linux/err.h> 11 #include <stdbool.h> 12 #include <stdio.h> 13 #include <string.h> 14 #include <unistd.h> 15 #include <bpf/bpf.h> 16 #include <bpf/libbpf.h> 17 #include <bpf/libbpf_internal.h> 18 #include <sys/types.h> 19 #include <sys/stat.h> 20 #include <sys/mman.h> 21 #include <bpf/btf.h> 22 23 #include "json_writer.h" 24 #include "main.h" 25 26 #define MAX_OBJ_NAME_LEN 64 27 28 static void sanitize_identifier(char *name) 29 { 30 int i; 31 32 for (i = 0; name[i]; i++) 33 if (!isalnum(name[i]) && name[i] != '_') 34 name[i] = '_'; 35 } 36 37 static bool str_has_prefix(const char *str, const char *prefix) 38 { 39 return strncmp(str, prefix, strlen(prefix)) == 0; 40 } 41 42 static bool str_has_suffix(const char *str, const char *suffix) 43 { 44 size_t i, n1 = strlen(str), n2 = strlen(suffix); 45 46 if (n1 < n2) 47 return false; 48 49 for (i = 0; i < n2; i++) { 50 if (str[n1 - i - 1] != suffix[n2 - i - 1]) 51 return false; 52 } 53 54 return true; 55 } 56 57 static void get_obj_name(char *name, const char *file) 58 { 59 /* Using basename() GNU version which doesn't modify arg. */ 60 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1); 61 name[MAX_OBJ_NAME_LEN - 1] = '\0'; 62 if (str_has_suffix(name, ".o")) 63 name[strlen(name) - 2] = '\0'; 64 sanitize_identifier(name); 65 } 66 67 static void get_header_guard(char *guard, const char *obj_name, const char *suffix) 68 { 69 int i; 70 71 sprintf(guard, "__%s_%s__", obj_name, suffix); 72 for (i = 0; guard[i]; i++) 73 guard[i] = toupper(guard[i]); 74 } 75 76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz) 77 { 78 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 79 const char *name = bpf_map__name(map); 80 int i, n; 81 82 if (!bpf_map__is_internal(map)) { 83 snprintf(buf, buf_sz, "%s", name); 84 return true; 85 } 86 87 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) { 88 const char *sfx = sfxs[i], *p; 89 90 p = strstr(name, sfx); 91 if (p) { 92 snprintf(buf, buf_sz, "%s", p + 1); 93 sanitize_identifier(buf); 94 return true; 95 } 96 } 97 98 return false; 99 } 100 101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz) 102 { 103 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 104 int i, n; 105 106 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) { 107 const char *pfx = pfxs[i]; 108 109 if (str_has_prefix(sec_name, pfx)) { 110 snprintf(buf, buf_sz, "%s", sec_name + 1); 111 sanitize_identifier(buf); 112 return true; 113 } 114 } 115 116 return false; 117 } 118 119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) 120 { 121 vprintf(fmt, args); 122 } 123 124 static int codegen_datasec_def(struct bpf_object *obj, 125 struct btf *btf, 126 struct btf_dump *d, 127 const struct btf_type *sec, 128 const char *obj_name) 129 { 130 const char *sec_name = btf__name_by_offset(btf, sec->name_off); 131 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec); 132 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); 133 char var_ident[256], sec_ident[256]; 134 bool strip_mods = false; 135 136 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) 137 return 0; 138 139 if (strcmp(sec_name, ".kconfig") != 0) 140 strip_mods = true; 141 142 printf(" struct %s__%s {\n", obj_name, sec_ident); 143 for (i = 0; i < vlen; i++, sec_var++) { 144 const struct btf_type *var = btf__type_by_id(btf, sec_var->type); 145 const char *var_name = btf__name_by_offset(btf, var->name_off); 146 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 147 .field_name = var_ident, 148 .indent_level = 2, 149 .strip_mods = strip_mods, 150 ); 151 int need_off = sec_var->offset, align_off, align; 152 __u32 var_type_id = var->type; 153 154 /* static variables are not exposed through BPF skeleton */ 155 if (btf_var(var)->linkage == BTF_VAR_STATIC) 156 continue; 157 158 if (off > need_off) { 159 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", 160 sec_name, i, need_off, off); 161 return -EINVAL; 162 } 163 164 align = btf__align_of(btf, var->type); 165 if (align <= 0) { 166 p_err("Failed to determine alignment of variable '%s': %d", 167 var_name, align); 168 return -EINVAL; 169 } 170 /* Assume 32-bit architectures when generating data section 171 * struct memory layout. Given bpftool can't know which target 172 * host architecture it's emitting skeleton for, we need to be 173 * conservative and assume 32-bit one to ensure enough padding 174 * bytes are generated for pointer and long types. This will 175 * still work correctly for 64-bit architectures, because in 176 * the worst case we'll generate unnecessary padding field, 177 * which on 64-bit architectures is not strictly necessary and 178 * would be handled by natural 8-byte alignment. But it still 179 * will be a correct memory layout, based on recorded offsets 180 * in BTF. 181 */ 182 if (align > 4) 183 align = 4; 184 185 align_off = (off + align - 1) / align * align; 186 if (align_off != need_off) { 187 printf("\t\tchar __pad%d[%d];\n", 188 pad_cnt, need_off - off); 189 pad_cnt++; 190 } 191 192 /* sanitize variable name, e.g., for static vars inside 193 * a function, it's name is '<function name>.<variable name>', 194 * which we'll turn into a '<function name>_<variable name>' 195 */ 196 var_ident[0] = '\0'; 197 strncat(var_ident, var_name, sizeof(var_ident) - 1); 198 sanitize_identifier(var_ident); 199 200 printf("\t\t"); 201 err = btf_dump__emit_type_decl(d, var_type_id, &opts); 202 if (err) 203 return err; 204 printf(";\n"); 205 206 off = sec_var->offset + sec_var->size; 207 } 208 printf(" } *%s;\n", sec_ident); 209 return 0; 210 } 211 212 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident) 213 { 214 int n = btf__type_cnt(btf), i; 215 char sec_ident[256]; 216 217 for (i = 1; i < n; i++) { 218 const struct btf_type *t = btf__type_by_id(btf, i); 219 const char *name; 220 221 if (!btf_is_datasec(t)) 222 continue; 223 224 name = btf__str_by_offset(btf, t->name_off); 225 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident))) 226 continue; 227 228 if (strcmp(sec_ident, map_ident) == 0) 229 return t; 230 } 231 return NULL; 232 } 233 234 static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz) 235 { 236 if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 237 return false; 238 239 if (!get_map_ident(map, buf, sz)) 240 return false; 241 242 return true; 243 } 244 245 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) 246 { 247 struct btf *btf = bpf_object__btf(obj); 248 struct btf_dump *d; 249 struct bpf_map *map; 250 const struct btf_type *sec; 251 char map_ident[256]; 252 int err = 0; 253 254 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); 255 if (!d) 256 return -errno; 257 258 bpf_object__for_each_map(map, obj) { 259 /* only generate definitions for memory-mapped internal maps */ 260 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 261 continue; 262 263 sec = find_type_for_map(btf, map_ident); 264 265 /* In some cases (e.g., sections like .rodata.cst16 containing 266 * compiler allocated string constants only) there will be 267 * special internal maps with no corresponding DATASEC BTF 268 * type. In such case, generate empty structs for each such 269 * map. It will still be memory-mapped and its contents 270 * accessible from user-space through BPF skeleton. 271 */ 272 if (!sec) { 273 printf(" struct %s__%s {\n", obj_name, map_ident); 274 printf(" } *%s;\n", map_ident); 275 } else { 276 err = codegen_datasec_def(obj, btf, d, sec, obj_name); 277 if (err) 278 goto out; 279 } 280 } 281 282 283 out: 284 btf_dump__free(d); 285 return err; 286 } 287 288 static bool btf_is_ptr_to_func_proto(const struct btf *btf, 289 const struct btf_type *v) 290 { 291 return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type)); 292 } 293 294 static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name) 295 { 296 struct btf *btf = bpf_object__btf(obj); 297 struct btf_dump *d; 298 struct bpf_map *map; 299 const struct btf_type *sec, *var; 300 const struct btf_var_secinfo *sec_var; 301 int i, err = 0, vlen; 302 char map_ident[256], sec_ident[256]; 303 bool strip_mods = false, needs_typeof = false; 304 const char *sec_name, *var_name; 305 __u32 var_type_id; 306 307 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); 308 if (!d) 309 return -errno; 310 311 bpf_object__for_each_map(map, obj) { 312 /* only generate definitions for memory-mapped internal maps */ 313 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 314 continue; 315 316 sec = find_type_for_map(btf, map_ident); 317 if (!sec) 318 continue; 319 320 sec_name = btf__name_by_offset(btf, sec->name_off); 321 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) 322 continue; 323 324 strip_mods = strcmp(sec_name, ".kconfig") != 0; 325 printf(" struct %s__%s {\n", obj_name, sec_ident); 326 327 sec_var = btf_var_secinfos(sec); 328 vlen = btf_vlen(sec); 329 for (i = 0; i < vlen; i++, sec_var++) { 330 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 331 .indent_level = 2, 332 .strip_mods = strip_mods, 333 /* we'll print the name separately */ 334 .field_name = "", 335 ); 336 337 var = btf__type_by_id(btf, sec_var->type); 338 var_name = btf__name_by_offset(btf, var->name_off); 339 var_type_id = var->type; 340 341 /* static variables are not exposed through BPF skeleton */ 342 if (btf_var(var)->linkage == BTF_VAR_STATIC) 343 continue; 344 345 /* The datasec member has KIND_VAR but we want the 346 * underlying type of the variable (e.g. KIND_INT). 347 */ 348 var = skip_mods_and_typedefs(btf, var->type, NULL); 349 350 printf("\t\t"); 351 /* Func and array members require special handling. 352 * Instead of producing `typename *var`, they produce 353 * `typeof(typename) *var`. This allows us to keep a 354 * similar syntax where the identifier is just prefixed 355 * by *, allowing us to ignore C declaration minutiae. 356 */ 357 needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var); 358 if (needs_typeof) 359 printf("typeof("); 360 361 err = btf_dump__emit_type_decl(d, var_type_id, &opts); 362 if (err) 363 goto out; 364 365 if (needs_typeof) 366 printf(")"); 367 368 printf(" *%s;\n", var_name); 369 } 370 printf(" } %s;\n", sec_ident); 371 } 372 373 out: 374 btf_dump__free(d); 375 return err; 376 } 377 378 static void codegen(const char *template, ...) 379 { 380 const char *src, *end; 381 int skip_tabs = 0, n; 382 char *s, *dst; 383 va_list args; 384 char c; 385 386 n = strlen(template); 387 s = malloc(n + 1); 388 if (!s) 389 exit(-1); 390 src = template; 391 dst = s; 392 393 /* find out "baseline" indentation to skip */ 394 while ((c = *src++)) { 395 if (c == '\t') { 396 skip_tabs++; 397 } else if (c == '\n') { 398 break; 399 } else { 400 p_err("unrecognized character at pos %td in template '%s': '%c'", 401 src - template - 1, template, c); 402 free(s); 403 exit(-1); 404 } 405 } 406 407 while (*src) { 408 /* skip baseline indentation tabs */ 409 for (n = skip_tabs; n > 0; n--, src++) { 410 if (*src != '\t') { 411 p_err("not enough tabs at pos %td in template '%s'", 412 src - template - 1, template); 413 free(s); 414 exit(-1); 415 } 416 } 417 /* trim trailing whitespace */ 418 end = strchrnul(src, '\n'); 419 for (n = end - src; n > 0 && isspace(src[n - 1]); n--) 420 ; 421 memcpy(dst, src, n); 422 dst += n; 423 if (*end) 424 *dst++ = '\n'; 425 src = *end ? end + 1 : end; 426 } 427 *dst++ = '\0'; 428 429 /* print out using adjusted template */ 430 va_start(args, template); 431 n = vprintf(s, args); 432 va_end(args); 433 434 free(s); 435 } 436 437 static void print_hex(const char *data, int data_sz) 438 { 439 int i, len; 440 441 for (i = 0, len = 0; i < data_sz; i++) { 442 int w = data[i] ? 4 : 2; 443 444 len += w; 445 if (len > 78) { 446 printf("\\\n"); 447 len = w; 448 } 449 if (!data[i]) 450 printf("\\0"); 451 else 452 printf("\\x%02x", (unsigned char)data[i]); 453 } 454 } 455 456 static size_t bpf_map_mmap_sz(const struct bpf_map *map) 457 { 458 long page_sz = sysconf(_SC_PAGE_SIZE); 459 size_t map_sz; 460 461 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map); 462 map_sz = roundup(map_sz, page_sz); 463 return map_sz; 464 } 465 466 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */ 467 static void codegen_asserts(struct bpf_object *obj, const char *obj_name) 468 { 469 struct btf *btf = bpf_object__btf(obj); 470 struct bpf_map *map; 471 struct btf_var_secinfo *sec_var; 472 int i, vlen; 473 const struct btf_type *sec; 474 char map_ident[256], var_ident[256]; 475 476 if (!btf) 477 return; 478 479 codegen("\ 480 \n\ 481 __attribute__((unused)) static void \n\ 482 %1$s__assert(struct %1$s *s __attribute__((unused))) \n\ 483 { \n\ 484 #ifdef __cplusplus \n\ 485 #define _Static_assert static_assert \n\ 486 #endif \n\ 487 ", obj_name); 488 489 bpf_object__for_each_map(map, obj) { 490 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 491 continue; 492 493 sec = find_type_for_map(btf, map_ident); 494 if (!sec) { 495 /* best effort, couldn't find the type for this map */ 496 continue; 497 } 498 499 sec_var = btf_var_secinfos(sec); 500 vlen = btf_vlen(sec); 501 502 for (i = 0; i < vlen; i++, sec_var++) { 503 const struct btf_type *var = btf__type_by_id(btf, sec_var->type); 504 const char *var_name = btf__name_by_offset(btf, var->name_off); 505 long var_size; 506 507 /* static variables are not exposed through BPF skeleton */ 508 if (btf_var(var)->linkage == BTF_VAR_STATIC) 509 continue; 510 511 var_size = btf__resolve_size(btf, var->type); 512 if (var_size < 0) 513 continue; 514 515 var_ident[0] = '\0'; 516 strncat(var_ident, var_name, sizeof(var_ident) - 1); 517 sanitize_identifier(var_ident); 518 519 printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n", 520 map_ident, var_ident, var_size, var_ident); 521 } 522 } 523 codegen("\ 524 \n\ 525 #ifdef __cplusplus \n\ 526 #undef _Static_assert \n\ 527 #endif \n\ 528 } \n\ 529 "); 530 } 531 532 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name) 533 { 534 struct bpf_program *prog; 535 536 bpf_object__for_each_program(prog, obj) { 537 const char *tp_name; 538 539 codegen("\ 540 \n\ 541 \n\ 542 static inline int \n\ 543 %1$s__%2$s__attach(struct %1$s *skel) \n\ 544 { \n\ 545 int prog_fd = skel->progs.%2$s.prog_fd; \n\ 546 ", obj_name, bpf_program__name(prog)); 547 548 switch (bpf_program__type(prog)) { 549 case BPF_PROG_TYPE_RAW_TRACEPOINT: 550 tp_name = strchr(bpf_program__section_name(prog), '/') + 1; 551 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name); 552 break; 553 case BPF_PROG_TYPE_TRACING: 554 case BPF_PROG_TYPE_LSM: 555 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER) 556 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n"); 557 else 558 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n"); 559 break; 560 default: 561 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n"); 562 break; 563 } 564 codegen("\ 565 \n\ 566 \n\ 567 if (fd > 0) \n\ 568 skel->links.%1$s_fd = fd; \n\ 569 return fd; \n\ 570 } \n\ 571 ", bpf_program__name(prog)); 572 } 573 574 codegen("\ 575 \n\ 576 \n\ 577 static inline int \n\ 578 %1$s__attach(struct %1$s *skel) \n\ 579 { \n\ 580 int ret = 0; \n\ 581 \n\ 582 ", obj_name); 583 584 bpf_object__for_each_program(prog, obj) { 585 codegen("\ 586 \n\ 587 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\ 588 ", obj_name, bpf_program__name(prog)); 589 } 590 591 codegen("\ 592 \n\ 593 return ret < 0 ? ret : 0; \n\ 594 } \n\ 595 \n\ 596 static inline void \n\ 597 %1$s__detach(struct %1$s *skel) \n\ 598 { \n\ 599 ", obj_name); 600 601 bpf_object__for_each_program(prog, obj) { 602 codegen("\ 603 \n\ 604 skel_closenz(skel->links.%1$s_fd); \n\ 605 ", bpf_program__name(prog)); 606 } 607 608 codegen("\ 609 \n\ 610 } \n\ 611 "); 612 } 613 614 static void codegen_destroy(struct bpf_object *obj, const char *obj_name) 615 { 616 struct bpf_program *prog; 617 struct bpf_map *map; 618 char ident[256]; 619 620 codegen("\ 621 \n\ 622 static void \n\ 623 %1$s__destroy(struct %1$s *skel) \n\ 624 { \n\ 625 if (!skel) \n\ 626 return; \n\ 627 %1$s__detach(skel); \n\ 628 ", 629 obj_name); 630 631 bpf_object__for_each_program(prog, obj) { 632 codegen("\ 633 \n\ 634 skel_closenz(skel->progs.%1$s.prog_fd); \n\ 635 ", bpf_program__name(prog)); 636 } 637 638 bpf_object__for_each_map(map, obj) { 639 if (!get_map_ident(map, ident, sizeof(ident))) 640 continue; 641 if (bpf_map__is_internal(map) && 642 (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 643 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n", 644 ident, bpf_map_mmap_sz(map)); 645 codegen("\ 646 \n\ 647 skel_closenz(skel->maps.%1$s.map_fd); \n\ 648 ", ident); 649 } 650 codegen("\ 651 \n\ 652 skel_free(skel); \n\ 653 } \n\ 654 ", 655 obj_name); 656 } 657 658 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard) 659 { 660 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); 661 struct bpf_map *map; 662 char ident[256]; 663 int err = 0; 664 665 err = bpf_object__gen_loader(obj, &opts); 666 if (err) 667 return err; 668 669 err = bpf_object__load(obj); 670 if (err) { 671 p_err("failed to load object file"); 672 goto out; 673 } 674 /* If there was no error during load then gen_loader_opts 675 * are populated with the loader program. 676 */ 677 678 /* finish generating 'struct skel' */ 679 codegen("\ 680 \n\ 681 }; \n\ 682 ", obj_name); 683 684 685 codegen_attach_detach(obj, obj_name); 686 687 codegen_destroy(obj, obj_name); 688 689 codegen("\ 690 \n\ 691 static inline struct %1$s * \n\ 692 %1$s__open(void) \n\ 693 { \n\ 694 struct %1$s *skel; \n\ 695 \n\ 696 skel = skel_alloc(sizeof(*skel)); \n\ 697 if (!skel) \n\ 698 goto cleanup; \n\ 699 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\ 700 ", 701 obj_name, opts.data_sz); 702 bpf_object__for_each_map(map, obj) { 703 const void *mmap_data = NULL; 704 size_t mmap_size = 0; 705 706 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 707 continue; 708 709 codegen("\ 710 \n\ 711 skel->%1$s = skel_prep_map_data((void *)\"\\ \n\ 712 ", ident); 713 mmap_data = bpf_map__initial_value(map, &mmap_size); 714 print_hex(mmap_data, mmap_size); 715 codegen("\ 716 \n\ 717 \", %1$zd, %2$zd); \n\ 718 if (!skel->%3$s) \n\ 719 goto cleanup; \n\ 720 skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\ 721 ", bpf_map_mmap_sz(map), mmap_size, ident); 722 } 723 codegen("\ 724 \n\ 725 return skel; \n\ 726 cleanup: \n\ 727 %1$s__destroy(skel); \n\ 728 return NULL; \n\ 729 } \n\ 730 \n\ 731 static inline int \n\ 732 %1$s__load(struct %1$s *skel) \n\ 733 { \n\ 734 struct bpf_load_and_run_opts opts = {}; \n\ 735 int err; \n\ 736 \n\ 737 opts.ctx = (struct bpf_loader_ctx *)skel; \n\ 738 opts.data_sz = %2$d; \n\ 739 opts.data = (void *)\"\\ \n\ 740 ", 741 obj_name, opts.data_sz); 742 print_hex(opts.data, opts.data_sz); 743 codegen("\ 744 \n\ 745 \"; \n\ 746 "); 747 748 codegen("\ 749 \n\ 750 opts.insns_sz = %d; \n\ 751 opts.insns = (void *)\"\\ \n\ 752 ", 753 opts.insns_sz); 754 print_hex(opts.insns, opts.insns_sz); 755 codegen("\ 756 \n\ 757 \"; \n\ 758 err = bpf_load_and_run(&opts); \n\ 759 if (err < 0) \n\ 760 return err; \n\ 761 ", obj_name); 762 bpf_object__for_each_map(map, obj) { 763 const char *mmap_flags; 764 765 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 766 continue; 767 768 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG) 769 mmap_flags = "PROT_READ"; 770 else 771 mmap_flags = "PROT_READ | PROT_WRITE"; 772 773 codegen("\ 774 \n\ 775 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\ 776 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\ 777 if (!skel->%1$s) \n\ 778 return -ENOMEM; \n\ 779 ", 780 ident, bpf_map_mmap_sz(map), mmap_flags); 781 } 782 codegen("\ 783 \n\ 784 return 0; \n\ 785 } \n\ 786 \n\ 787 static inline struct %1$s * \n\ 788 %1$s__open_and_load(void) \n\ 789 { \n\ 790 struct %1$s *skel; \n\ 791 \n\ 792 skel = %1$s__open(); \n\ 793 if (!skel) \n\ 794 return NULL; \n\ 795 if (%1$s__load(skel)) { \n\ 796 %1$s__destroy(skel); \n\ 797 return NULL; \n\ 798 } \n\ 799 return skel; \n\ 800 } \n\ 801 \n\ 802 ", obj_name); 803 804 codegen_asserts(obj, obj_name); 805 806 codegen("\ 807 \n\ 808 \n\ 809 #endif /* %s */ \n\ 810 ", 811 header_guard); 812 err = 0; 813 out: 814 return err; 815 } 816 817 static void 818 codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) 819 { 820 struct bpf_map *map; 821 char ident[256]; 822 size_t i; 823 824 if (!map_cnt) 825 return; 826 827 codegen("\ 828 \n\ 829 \n\ 830 /* maps */ \n\ 831 s->map_cnt = %zu; \n\ 832 s->map_skel_sz = sizeof(*s->maps); \n\ 833 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ 834 if (!s->maps) { \n\ 835 err = -ENOMEM; \n\ 836 goto err; \n\ 837 } \n\ 838 ", 839 map_cnt 840 ); 841 i = 0; 842 bpf_object__for_each_map(map, obj) { 843 if (!get_map_ident(map, ident, sizeof(ident))) 844 continue; 845 846 codegen("\ 847 \n\ 848 \n\ 849 s->maps[%zu].name = \"%s\"; \n\ 850 s->maps[%zu].map = &obj->maps.%s; \n\ 851 ", 852 i, bpf_map__name(map), i, ident); 853 /* memory-mapped internal maps */ 854 if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) { 855 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", 856 i, ident); 857 } 858 i++; 859 } 860 } 861 862 static void 863 codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links) 864 { 865 struct bpf_program *prog; 866 int i; 867 868 if (!prog_cnt) 869 return; 870 871 codegen("\ 872 \n\ 873 \n\ 874 /* programs */ \n\ 875 s->prog_cnt = %zu; \n\ 876 s->prog_skel_sz = sizeof(*s->progs); \n\ 877 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ 878 if (!s->progs) { \n\ 879 err = -ENOMEM; \n\ 880 goto err; \n\ 881 } \n\ 882 ", 883 prog_cnt 884 ); 885 i = 0; 886 bpf_object__for_each_program(prog, obj) { 887 codegen("\ 888 \n\ 889 \n\ 890 s->progs[%1$zu].name = \"%2$s\"; \n\ 891 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\ 892 ", 893 i, bpf_program__name(prog)); 894 895 if (populate_links) { 896 codegen("\ 897 \n\ 898 s->progs[%1$zu].link = &obj->links.%2$s;\n\ 899 ", 900 i, bpf_program__name(prog)); 901 } 902 i++; 903 } 904 } 905 906 static int do_skeleton(int argc, char **argv) 907 { 908 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; 909 size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; 910 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 911 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 912 struct bpf_object *obj = NULL; 913 const char *file; 914 char ident[256]; 915 struct bpf_program *prog; 916 int fd, err = -1; 917 struct bpf_map *map; 918 struct btf *btf; 919 struct stat st; 920 921 if (!REQ_ARGS(1)) { 922 usage(); 923 return -1; 924 } 925 file = GET_ARG(); 926 927 while (argc) { 928 if (!REQ_ARGS(2)) 929 return -1; 930 931 if (is_prefix(*argv, "name")) { 932 NEXT_ARG(); 933 934 if (obj_name[0] != '\0') { 935 p_err("object name already specified"); 936 return -1; 937 } 938 939 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); 940 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; 941 } else { 942 p_err("unknown arg %s", *argv); 943 return -1; 944 } 945 946 NEXT_ARG(); 947 } 948 949 if (argc) { 950 p_err("extra unknown arguments"); 951 return -1; 952 } 953 954 if (stat(file, &st)) { 955 p_err("failed to stat() %s: %s", file, strerror(errno)); 956 return -1; 957 } 958 file_sz = st.st_size; 959 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); 960 fd = open(file, O_RDONLY); 961 if (fd < 0) { 962 p_err("failed to open() %s: %s", file, strerror(errno)); 963 return -1; 964 } 965 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); 966 if (obj_data == MAP_FAILED) { 967 obj_data = NULL; 968 p_err("failed to mmap() %s: %s", file, strerror(errno)); 969 goto out; 970 } 971 if (obj_name[0] == '\0') 972 get_obj_name(obj_name, file); 973 opts.object_name = obj_name; 974 if (verifier_logs) 975 /* log_level1 + log_level2 + stats, but not stable UAPI */ 976 opts.kernel_log_level = 1 + 2 + 4; 977 obj = bpf_object__open_mem(obj_data, file_sz, &opts); 978 if (!obj) { 979 char err_buf[256]; 980 981 err = -errno; 982 libbpf_strerror(err, err_buf, sizeof(err_buf)); 983 p_err("failed to open BPF object file: %s", err_buf); 984 goto out; 985 } 986 987 bpf_object__for_each_map(map, obj) { 988 if (!get_map_ident(map, ident, sizeof(ident))) { 989 p_err("ignoring unrecognized internal map '%s'...", 990 bpf_map__name(map)); 991 continue; 992 } 993 map_cnt++; 994 } 995 bpf_object__for_each_program(prog, obj) { 996 prog_cnt++; 997 } 998 999 get_header_guard(header_guard, obj_name, "SKEL_H"); 1000 if (use_loader) { 1001 codegen("\ 1002 \n\ 1003 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1004 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\ 1005 #ifndef %2$s \n\ 1006 #define %2$s \n\ 1007 \n\ 1008 #include <bpf/skel_internal.h> \n\ 1009 \n\ 1010 struct %1$s { \n\ 1011 struct bpf_loader_ctx ctx; \n\ 1012 ", 1013 obj_name, header_guard 1014 ); 1015 } else { 1016 codegen("\ 1017 \n\ 1018 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1019 \n\ 1020 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\ 1021 #ifndef %2$s \n\ 1022 #define %2$s \n\ 1023 \n\ 1024 #include <errno.h> \n\ 1025 #include <stdlib.h> \n\ 1026 #include <bpf/libbpf.h> \n\ 1027 \n\ 1028 struct %1$s { \n\ 1029 struct bpf_object_skeleton *skeleton; \n\ 1030 struct bpf_object *obj; \n\ 1031 ", 1032 obj_name, header_guard 1033 ); 1034 } 1035 1036 if (map_cnt) { 1037 printf("\tstruct {\n"); 1038 bpf_object__for_each_map(map, obj) { 1039 if (!get_map_ident(map, ident, sizeof(ident))) 1040 continue; 1041 if (use_loader) 1042 printf("\t\tstruct bpf_map_desc %s;\n", ident); 1043 else 1044 printf("\t\tstruct bpf_map *%s;\n", ident); 1045 } 1046 printf("\t} maps;\n"); 1047 } 1048 1049 if (prog_cnt) { 1050 printf("\tstruct {\n"); 1051 bpf_object__for_each_program(prog, obj) { 1052 if (use_loader) 1053 printf("\t\tstruct bpf_prog_desc %s;\n", 1054 bpf_program__name(prog)); 1055 else 1056 printf("\t\tstruct bpf_program *%s;\n", 1057 bpf_program__name(prog)); 1058 } 1059 printf("\t} progs;\n"); 1060 printf("\tstruct {\n"); 1061 bpf_object__for_each_program(prog, obj) { 1062 if (use_loader) 1063 printf("\t\tint %s_fd;\n", 1064 bpf_program__name(prog)); 1065 else 1066 printf("\t\tstruct bpf_link *%s;\n", 1067 bpf_program__name(prog)); 1068 } 1069 printf("\t} links;\n"); 1070 } 1071 1072 btf = bpf_object__btf(obj); 1073 if (btf) { 1074 err = codegen_datasecs(obj, obj_name); 1075 if (err) 1076 goto out; 1077 } 1078 if (use_loader) { 1079 err = gen_trace(obj, obj_name, header_guard); 1080 goto out; 1081 } 1082 1083 codegen("\ 1084 \n\ 1085 \n\ 1086 #ifdef __cplusplus \n\ 1087 static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\ 1088 static inline struct %1$s *open_and_load(); \n\ 1089 static inline int load(struct %1$s *skel); \n\ 1090 static inline int attach(struct %1$s *skel); \n\ 1091 static inline void detach(struct %1$s *skel); \n\ 1092 static inline void destroy(struct %1$s *skel); \n\ 1093 static inline const void *elf_bytes(size_t *sz); \n\ 1094 #endif /* __cplusplus */ \n\ 1095 }; \n\ 1096 \n\ 1097 static void \n\ 1098 %1$s__destroy(struct %1$s *obj) \n\ 1099 { \n\ 1100 if (!obj) \n\ 1101 return; \n\ 1102 if (obj->skeleton) \n\ 1103 bpf_object__destroy_skeleton(obj->skeleton);\n\ 1104 free(obj); \n\ 1105 } \n\ 1106 \n\ 1107 static inline int \n\ 1108 %1$s__create_skeleton(struct %1$s *obj); \n\ 1109 \n\ 1110 static inline struct %1$s * \n\ 1111 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\ 1112 { \n\ 1113 struct %1$s *obj; \n\ 1114 int err; \n\ 1115 \n\ 1116 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ 1117 if (!obj) { \n\ 1118 errno = ENOMEM; \n\ 1119 return NULL; \n\ 1120 } \n\ 1121 \n\ 1122 err = %1$s__create_skeleton(obj); \n\ 1123 if (err) \n\ 1124 goto err_out; \n\ 1125 \n\ 1126 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\ 1127 if (err) \n\ 1128 goto err_out; \n\ 1129 \n\ 1130 return obj; \n\ 1131 err_out: \n\ 1132 %1$s__destroy(obj); \n\ 1133 errno = -err; \n\ 1134 return NULL; \n\ 1135 } \n\ 1136 \n\ 1137 static inline struct %1$s * \n\ 1138 %1$s__open(void) \n\ 1139 { \n\ 1140 return %1$s__open_opts(NULL); \n\ 1141 } \n\ 1142 \n\ 1143 static inline int \n\ 1144 %1$s__load(struct %1$s *obj) \n\ 1145 { \n\ 1146 return bpf_object__load_skeleton(obj->skeleton); \n\ 1147 } \n\ 1148 \n\ 1149 static inline struct %1$s * \n\ 1150 %1$s__open_and_load(void) \n\ 1151 { \n\ 1152 struct %1$s *obj; \n\ 1153 int err; \n\ 1154 \n\ 1155 obj = %1$s__open(); \n\ 1156 if (!obj) \n\ 1157 return NULL; \n\ 1158 err = %1$s__load(obj); \n\ 1159 if (err) { \n\ 1160 %1$s__destroy(obj); \n\ 1161 errno = -err; \n\ 1162 return NULL; \n\ 1163 } \n\ 1164 return obj; \n\ 1165 } \n\ 1166 \n\ 1167 static inline int \n\ 1168 %1$s__attach(struct %1$s *obj) \n\ 1169 { \n\ 1170 return bpf_object__attach_skeleton(obj->skeleton); \n\ 1171 } \n\ 1172 \n\ 1173 static inline void \n\ 1174 %1$s__detach(struct %1$s *obj) \n\ 1175 { \n\ 1176 bpf_object__detach_skeleton(obj->skeleton); \n\ 1177 } \n\ 1178 ", 1179 obj_name 1180 ); 1181 1182 codegen("\ 1183 \n\ 1184 \n\ 1185 static inline const void *%1$s__elf_bytes(size_t *sz); \n\ 1186 \n\ 1187 static inline int \n\ 1188 %1$s__create_skeleton(struct %1$s *obj) \n\ 1189 { \n\ 1190 struct bpf_object_skeleton *s; \n\ 1191 int err; \n\ 1192 \n\ 1193 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ 1194 if (!s) { \n\ 1195 err = -ENOMEM; \n\ 1196 goto err; \n\ 1197 } \n\ 1198 \n\ 1199 s->sz = sizeof(*s); \n\ 1200 s->name = \"%1$s\"; \n\ 1201 s->obj = &obj->obj; \n\ 1202 ", 1203 obj_name 1204 ); 1205 1206 codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/); 1207 codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/); 1208 1209 codegen("\ 1210 \n\ 1211 \n\ 1212 s->data = %1$s__elf_bytes(&s->data_sz); \n\ 1213 \n\ 1214 obj->skeleton = s; \n\ 1215 return 0; \n\ 1216 err: \n\ 1217 bpf_object__destroy_skeleton(s); \n\ 1218 return err; \n\ 1219 } \n\ 1220 \n\ 1221 static inline const void *%1$s__elf_bytes(size_t *sz) \n\ 1222 { \n\ 1223 static const char data[] __attribute__((__aligned__(8))) = \"\\\n\ 1224 ", 1225 obj_name 1226 ); 1227 1228 /* embed contents of BPF object file */ 1229 print_hex(obj_data, file_sz); 1230 1231 codegen("\ 1232 \n\ 1233 \"; \n\ 1234 \n\ 1235 *sz = sizeof(data) - 1; \n\ 1236 return (const void *)data; \n\ 1237 } \n\ 1238 \n\ 1239 #ifdef __cplusplus \n\ 1240 struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\ 1241 struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\ 1242 int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\ 1243 int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\ 1244 void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\ 1245 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\ 1246 const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\ 1247 #endif /* __cplusplus */ \n\ 1248 \n\ 1249 ", 1250 obj_name); 1251 1252 codegen_asserts(obj, obj_name); 1253 1254 codegen("\ 1255 \n\ 1256 \n\ 1257 #endif /* %1$s */ \n\ 1258 ", 1259 header_guard); 1260 err = 0; 1261 out: 1262 bpf_object__close(obj); 1263 if (obj_data) 1264 munmap(obj_data, mmap_sz); 1265 close(fd); 1266 return err; 1267 } 1268 1269 /* Subskeletons are like skeletons, except they don't own the bpf_object, 1270 * associated maps, links, etc. Instead, they know about the existence of 1271 * variables, maps, programs and are able to find their locations 1272 * _at runtime_ from an already loaded bpf_object. 1273 * 1274 * This allows for library-like BPF objects to have userspace counterparts 1275 * with access to their own items without having to know anything about the 1276 * final BPF object that the library was linked into. 1277 */ 1278 static int do_subskeleton(int argc, char **argv) 1279 { 1280 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")]; 1281 size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0; 1282 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 1283 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 1284 struct bpf_object *obj = NULL; 1285 const char *file, *var_name; 1286 char ident[256]; 1287 int fd, err = -1, map_type_id; 1288 const struct bpf_map *map; 1289 struct bpf_program *prog; 1290 struct btf *btf; 1291 const struct btf_type *map_type, *var_type; 1292 const struct btf_var_secinfo *var; 1293 struct stat st; 1294 1295 if (!REQ_ARGS(1)) { 1296 usage(); 1297 return -1; 1298 } 1299 file = GET_ARG(); 1300 1301 while (argc) { 1302 if (!REQ_ARGS(2)) 1303 return -1; 1304 1305 if (is_prefix(*argv, "name")) { 1306 NEXT_ARG(); 1307 1308 if (obj_name[0] != '\0') { 1309 p_err("object name already specified"); 1310 return -1; 1311 } 1312 1313 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); 1314 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; 1315 } else { 1316 p_err("unknown arg %s", *argv); 1317 return -1; 1318 } 1319 1320 NEXT_ARG(); 1321 } 1322 1323 if (argc) { 1324 p_err("extra unknown arguments"); 1325 return -1; 1326 } 1327 1328 if (use_loader) { 1329 p_err("cannot use loader for subskeletons"); 1330 return -1; 1331 } 1332 1333 if (stat(file, &st)) { 1334 p_err("failed to stat() %s: %s", file, strerror(errno)); 1335 return -1; 1336 } 1337 file_sz = st.st_size; 1338 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); 1339 fd = open(file, O_RDONLY); 1340 if (fd < 0) { 1341 p_err("failed to open() %s: %s", file, strerror(errno)); 1342 return -1; 1343 } 1344 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); 1345 if (obj_data == MAP_FAILED) { 1346 obj_data = NULL; 1347 p_err("failed to mmap() %s: %s", file, strerror(errno)); 1348 goto out; 1349 } 1350 if (obj_name[0] == '\0') 1351 get_obj_name(obj_name, file); 1352 1353 /* The empty object name allows us to use bpf_map__name and produce 1354 * ELF section names out of it. (".data" instead of "obj.data") 1355 */ 1356 opts.object_name = ""; 1357 obj = bpf_object__open_mem(obj_data, file_sz, &opts); 1358 if (!obj) { 1359 char err_buf[256]; 1360 1361 libbpf_strerror(errno, err_buf, sizeof(err_buf)); 1362 p_err("failed to open BPF object file: %s", err_buf); 1363 obj = NULL; 1364 goto out; 1365 } 1366 1367 btf = bpf_object__btf(obj); 1368 if (!btf) { 1369 err = -1; 1370 p_err("need btf type information for %s", obj_name); 1371 goto out; 1372 } 1373 1374 bpf_object__for_each_program(prog, obj) { 1375 prog_cnt++; 1376 } 1377 1378 /* First, count how many variables we have to find. 1379 * We need this in advance so the subskel can allocate the right 1380 * amount of storage. 1381 */ 1382 bpf_object__for_each_map(map, obj) { 1383 if (!get_map_ident(map, ident, sizeof(ident))) 1384 continue; 1385 1386 /* Also count all maps that have a name */ 1387 map_cnt++; 1388 1389 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 1390 continue; 1391 1392 map_type_id = bpf_map__btf_value_type_id(map); 1393 if (map_type_id <= 0) { 1394 err = map_type_id; 1395 goto out; 1396 } 1397 map_type = btf__type_by_id(btf, map_type_id); 1398 1399 var = btf_var_secinfos(map_type); 1400 len = btf_vlen(map_type); 1401 for (i = 0; i < len; i++, var++) { 1402 var_type = btf__type_by_id(btf, var->type); 1403 1404 if (btf_var(var_type)->linkage == BTF_VAR_STATIC) 1405 continue; 1406 1407 var_cnt++; 1408 } 1409 } 1410 1411 get_header_guard(header_guard, obj_name, "SUBSKEL_H"); 1412 codegen("\ 1413 \n\ 1414 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1415 \n\ 1416 /* THIS FILE IS AUTOGENERATED! */ \n\ 1417 #ifndef %2$s \n\ 1418 #define %2$s \n\ 1419 \n\ 1420 #include <errno.h> \n\ 1421 #include <stdlib.h> \n\ 1422 #include <bpf/libbpf.h> \n\ 1423 \n\ 1424 struct %1$s { \n\ 1425 struct bpf_object *obj; \n\ 1426 struct bpf_object_subskeleton *subskel; \n\ 1427 ", obj_name, header_guard); 1428 1429 if (map_cnt) { 1430 printf("\tstruct {\n"); 1431 bpf_object__for_each_map(map, obj) { 1432 if (!get_map_ident(map, ident, sizeof(ident))) 1433 continue; 1434 printf("\t\tstruct bpf_map *%s;\n", ident); 1435 } 1436 printf("\t} maps;\n"); 1437 } 1438 1439 if (prog_cnt) { 1440 printf("\tstruct {\n"); 1441 bpf_object__for_each_program(prog, obj) { 1442 printf("\t\tstruct bpf_program *%s;\n", 1443 bpf_program__name(prog)); 1444 } 1445 printf("\t} progs;\n"); 1446 } 1447 1448 err = codegen_subskel_datasecs(obj, obj_name); 1449 if (err) 1450 goto out; 1451 1452 /* emit code that will allocate enough storage for all symbols */ 1453 codegen("\ 1454 \n\ 1455 \n\ 1456 #ifdef __cplusplus \n\ 1457 static inline struct %1$s *open(const struct bpf_object *src);\n\ 1458 static inline void destroy(struct %1$s *skel); \n\ 1459 #endif /* __cplusplus */ \n\ 1460 }; \n\ 1461 \n\ 1462 static inline void \n\ 1463 %1$s__destroy(struct %1$s *skel) \n\ 1464 { \n\ 1465 if (!skel) \n\ 1466 return; \n\ 1467 if (skel->subskel) \n\ 1468 bpf_object__destroy_subskeleton(skel->subskel);\n\ 1469 free(skel); \n\ 1470 } \n\ 1471 \n\ 1472 static inline struct %1$s * \n\ 1473 %1$s__open(const struct bpf_object *src) \n\ 1474 { \n\ 1475 struct %1$s *obj; \n\ 1476 struct bpf_object_subskeleton *s; \n\ 1477 int err; \n\ 1478 \n\ 1479 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ 1480 if (!obj) { \n\ 1481 err = -ENOMEM; \n\ 1482 goto err; \n\ 1483 } \n\ 1484 s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\ 1485 if (!s) { \n\ 1486 err = -ENOMEM; \n\ 1487 goto err; \n\ 1488 } \n\ 1489 s->sz = sizeof(*s); \n\ 1490 s->obj = src; \n\ 1491 s->var_skel_sz = sizeof(*s->vars); \n\ 1492 obj->subskel = s; \n\ 1493 \n\ 1494 /* vars */ \n\ 1495 s->var_cnt = %2$d; \n\ 1496 s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\ 1497 if (!s->vars) { \n\ 1498 err = -ENOMEM; \n\ 1499 goto err; \n\ 1500 } \n\ 1501 ", 1502 obj_name, var_cnt 1503 ); 1504 1505 /* walk through each symbol and emit the runtime representation */ 1506 bpf_object__for_each_map(map, obj) { 1507 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 1508 continue; 1509 1510 map_type_id = bpf_map__btf_value_type_id(map); 1511 if (map_type_id <= 0) 1512 /* skip over internal maps with no type*/ 1513 continue; 1514 1515 map_type = btf__type_by_id(btf, map_type_id); 1516 var = btf_var_secinfos(map_type); 1517 len = btf_vlen(map_type); 1518 for (i = 0; i < len; i++, var++) { 1519 var_type = btf__type_by_id(btf, var->type); 1520 var_name = btf__name_by_offset(btf, var_type->name_off); 1521 1522 if (btf_var(var_type)->linkage == BTF_VAR_STATIC) 1523 continue; 1524 1525 /* Note that we use the dot prefix in .data as the 1526 * field access operator i.e. maps%s becomes maps.data 1527 */ 1528 codegen("\ 1529 \n\ 1530 \n\ 1531 s->vars[%3$d].name = \"%1$s\"; \n\ 1532 s->vars[%3$d].map = &obj->maps.%2$s; \n\ 1533 s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\ 1534 ", var_name, ident, var_idx); 1535 1536 var_idx++; 1537 } 1538 } 1539 1540 codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/); 1541 codegen_progs_skeleton(obj, prog_cnt, false /*links*/); 1542 1543 codegen("\ 1544 \n\ 1545 \n\ 1546 err = bpf_object__open_subskeleton(s); \n\ 1547 if (err) \n\ 1548 goto err; \n\ 1549 \n\ 1550 return obj; \n\ 1551 err: \n\ 1552 %1$s__destroy(obj); \n\ 1553 errno = -err; \n\ 1554 return NULL; \n\ 1555 } \n\ 1556 \n\ 1557 #ifdef __cplusplus \n\ 1558 struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\ 1559 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\ 1560 #endif /* __cplusplus */ \n\ 1561 \n\ 1562 #endif /* %2$s */ \n\ 1563 ", 1564 obj_name, header_guard); 1565 err = 0; 1566 out: 1567 bpf_object__close(obj); 1568 if (obj_data) 1569 munmap(obj_data, mmap_sz); 1570 close(fd); 1571 return err; 1572 } 1573 1574 static int do_object(int argc, char **argv) 1575 { 1576 struct bpf_linker *linker; 1577 const char *output_file, *file; 1578 int err = 0; 1579 1580 if (!REQ_ARGS(2)) { 1581 usage(); 1582 return -1; 1583 } 1584 1585 output_file = GET_ARG(); 1586 1587 linker = bpf_linker__new(output_file, NULL); 1588 if (!linker) { 1589 p_err("failed to create BPF linker instance"); 1590 return -1; 1591 } 1592 1593 while (argc) { 1594 file = GET_ARG(); 1595 1596 err = bpf_linker__add_file(linker, file, NULL); 1597 if (err) { 1598 p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno); 1599 goto out; 1600 } 1601 } 1602 1603 err = bpf_linker__finalize(linker); 1604 if (err) { 1605 p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno); 1606 goto out; 1607 } 1608 1609 err = 0; 1610 out: 1611 bpf_linker__free(linker); 1612 return err; 1613 } 1614 1615 static int do_help(int argc, char **argv) 1616 { 1617 if (json_output) { 1618 jsonw_null(json_wtr); 1619 return 0; 1620 } 1621 1622 fprintf(stderr, 1623 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n" 1624 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n" 1625 " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n" 1626 " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n" 1627 " %1$s %2$s help\n" 1628 "\n" 1629 " " HELP_SPEC_OPTIONS " |\n" 1630 " {-L|--use-loader} }\n" 1631 "", 1632 bin_name, "gen"); 1633 1634 return 0; 1635 } 1636 1637 static int btf_save_raw(const struct btf *btf, const char *path) 1638 { 1639 const void *data; 1640 FILE *f = NULL; 1641 __u32 data_sz; 1642 int err = 0; 1643 1644 data = btf__raw_data(btf, &data_sz); 1645 if (!data) 1646 return -ENOMEM; 1647 1648 f = fopen(path, "wb"); 1649 if (!f) 1650 return -errno; 1651 1652 if (fwrite(data, 1, data_sz, f) != data_sz) 1653 err = -errno; 1654 1655 fclose(f); 1656 return err; 1657 } 1658 1659 struct btfgen_info { 1660 struct btf *src_btf; 1661 struct btf *marked_btf; /* btf structure used to mark used types */ 1662 }; 1663 1664 static size_t btfgen_hash_fn(long key, void *ctx) 1665 { 1666 return key; 1667 } 1668 1669 static bool btfgen_equal_fn(long k1, long k2, void *ctx) 1670 { 1671 return k1 == k2; 1672 } 1673 1674 static void btfgen_free_info(struct btfgen_info *info) 1675 { 1676 if (!info) 1677 return; 1678 1679 btf__free(info->src_btf); 1680 btf__free(info->marked_btf); 1681 1682 free(info); 1683 } 1684 1685 static struct btfgen_info * 1686 btfgen_new_info(const char *targ_btf_path) 1687 { 1688 struct btfgen_info *info; 1689 int err; 1690 1691 info = calloc(1, sizeof(*info)); 1692 if (!info) 1693 return NULL; 1694 1695 info->src_btf = btf__parse(targ_btf_path, NULL); 1696 if (!info->src_btf) { 1697 err = -errno; 1698 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno)); 1699 goto err_out; 1700 } 1701 1702 info->marked_btf = btf__parse(targ_btf_path, NULL); 1703 if (!info->marked_btf) { 1704 err = -errno; 1705 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno)); 1706 goto err_out; 1707 } 1708 1709 return info; 1710 1711 err_out: 1712 btfgen_free_info(info); 1713 errno = -err; 1714 return NULL; 1715 } 1716 1717 #define MARKED UINT32_MAX 1718 1719 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx) 1720 { 1721 const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id); 1722 struct btf_member *m = btf_members(t) + idx; 1723 1724 m->name_off = MARKED; 1725 } 1726 1727 static int 1728 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers) 1729 { 1730 const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id); 1731 struct btf_type *cloned_type; 1732 struct btf_param *param; 1733 struct btf_array *array; 1734 int err, i; 1735 1736 if (type_id == 0) 1737 return 0; 1738 1739 /* mark type on cloned BTF as used */ 1740 cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id); 1741 cloned_type->name_off = MARKED; 1742 1743 /* recursively mark other types needed by it */ 1744 switch (btf_kind(btf_type)) { 1745 case BTF_KIND_UNKN: 1746 case BTF_KIND_INT: 1747 case BTF_KIND_FLOAT: 1748 case BTF_KIND_ENUM: 1749 case BTF_KIND_ENUM64: 1750 case BTF_KIND_STRUCT: 1751 case BTF_KIND_UNION: 1752 break; 1753 case BTF_KIND_PTR: 1754 if (follow_pointers) { 1755 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1756 if (err) 1757 return err; 1758 } 1759 break; 1760 case BTF_KIND_CONST: 1761 case BTF_KIND_RESTRICT: 1762 case BTF_KIND_VOLATILE: 1763 case BTF_KIND_TYPEDEF: 1764 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1765 if (err) 1766 return err; 1767 break; 1768 case BTF_KIND_ARRAY: 1769 array = btf_array(btf_type); 1770 1771 /* mark array type */ 1772 err = btfgen_mark_type(info, array->type, follow_pointers); 1773 /* mark array's index type */ 1774 err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers); 1775 if (err) 1776 return err; 1777 break; 1778 case BTF_KIND_FUNC_PROTO: 1779 /* mark ret type */ 1780 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1781 if (err) 1782 return err; 1783 1784 /* mark parameters types */ 1785 param = btf_params(btf_type); 1786 for (i = 0; i < btf_vlen(btf_type); i++) { 1787 err = btfgen_mark_type(info, param->type, follow_pointers); 1788 if (err) 1789 return err; 1790 param++; 1791 } 1792 break; 1793 /* tells if some other type needs to be handled */ 1794 default: 1795 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id); 1796 return -EINVAL; 1797 } 1798 1799 return 0; 1800 } 1801 1802 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1803 { 1804 struct btf *btf = info->src_btf; 1805 const struct btf_type *btf_type; 1806 struct btf_member *btf_member; 1807 struct btf_array *array; 1808 unsigned int type_id = targ_spec->root_type_id; 1809 int idx, err; 1810 1811 /* mark root type */ 1812 btf_type = btf__type_by_id(btf, type_id); 1813 err = btfgen_mark_type(info, type_id, false); 1814 if (err) 1815 return err; 1816 1817 /* mark types for complex types (arrays, unions, structures) */ 1818 for (int i = 1; i < targ_spec->raw_len; i++) { 1819 /* skip typedefs and mods */ 1820 while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) { 1821 type_id = btf_type->type; 1822 btf_type = btf__type_by_id(btf, type_id); 1823 } 1824 1825 switch (btf_kind(btf_type)) { 1826 case BTF_KIND_STRUCT: 1827 case BTF_KIND_UNION: 1828 idx = targ_spec->raw_spec[i]; 1829 btf_member = btf_members(btf_type) + idx; 1830 1831 /* mark member */ 1832 btfgen_mark_member(info, type_id, idx); 1833 1834 /* mark member's type */ 1835 type_id = btf_member->type; 1836 btf_type = btf__type_by_id(btf, type_id); 1837 err = btfgen_mark_type(info, type_id, false); 1838 if (err) 1839 return err; 1840 break; 1841 case BTF_KIND_ARRAY: 1842 array = btf_array(btf_type); 1843 type_id = array->type; 1844 btf_type = btf__type_by_id(btf, type_id); 1845 break; 1846 default: 1847 p_err("unsupported kind: %s (%d)", 1848 btf_kind_str(btf_type), btf_type->type); 1849 return -EINVAL; 1850 } 1851 } 1852 1853 return 0; 1854 } 1855 1856 /* Mark types, members, and member types. Compared to btfgen_record_field_relo, 1857 * this function does not rely on the target spec for inferring members, but 1858 * uses the associated BTF. 1859 * 1860 * The `behind_ptr` argument is used to stop marking of composite types reached 1861 * through a pointer. This way, we can keep BTF size in check while providing 1862 * reasonable match semantics. 1863 */ 1864 static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr) 1865 { 1866 const struct btf_type *btf_type; 1867 struct btf *btf = info->src_btf; 1868 struct btf_type *cloned_type; 1869 int i, err; 1870 1871 if (type_id == 0) 1872 return 0; 1873 1874 btf_type = btf__type_by_id(btf, type_id); 1875 /* mark type on cloned BTF as used */ 1876 cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id); 1877 cloned_type->name_off = MARKED; 1878 1879 switch (btf_kind(btf_type)) { 1880 case BTF_KIND_UNKN: 1881 case BTF_KIND_INT: 1882 case BTF_KIND_FLOAT: 1883 case BTF_KIND_ENUM: 1884 case BTF_KIND_ENUM64: 1885 break; 1886 case BTF_KIND_STRUCT: 1887 case BTF_KIND_UNION: { 1888 struct btf_member *m = btf_members(btf_type); 1889 __u16 vlen = btf_vlen(btf_type); 1890 1891 if (behind_ptr) 1892 break; 1893 1894 for (i = 0; i < vlen; i++, m++) { 1895 /* mark member */ 1896 btfgen_mark_member(info, type_id, i); 1897 1898 /* mark member's type */ 1899 err = btfgen_mark_type_match(info, m->type, false); 1900 if (err) 1901 return err; 1902 } 1903 break; 1904 } 1905 case BTF_KIND_CONST: 1906 case BTF_KIND_FWD: 1907 case BTF_KIND_RESTRICT: 1908 case BTF_KIND_TYPEDEF: 1909 case BTF_KIND_VOLATILE: 1910 return btfgen_mark_type_match(info, btf_type->type, behind_ptr); 1911 case BTF_KIND_PTR: 1912 return btfgen_mark_type_match(info, btf_type->type, true); 1913 case BTF_KIND_ARRAY: { 1914 struct btf_array *array; 1915 1916 array = btf_array(btf_type); 1917 /* mark array type */ 1918 err = btfgen_mark_type_match(info, array->type, false); 1919 /* mark array's index type */ 1920 err = err ? : btfgen_mark_type_match(info, array->index_type, false); 1921 if (err) 1922 return err; 1923 break; 1924 } 1925 case BTF_KIND_FUNC_PROTO: { 1926 __u16 vlen = btf_vlen(btf_type); 1927 struct btf_param *param; 1928 1929 /* mark ret type */ 1930 err = btfgen_mark_type_match(info, btf_type->type, false); 1931 if (err) 1932 return err; 1933 1934 /* mark parameters types */ 1935 param = btf_params(btf_type); 1936 for (i = 0; i < vlen; i++) { 1937 err = btfgen_mark_type_match(info, param->type, false); 1938 if (err) 1939 return err; 1940 param++; 1941 } 1942 break; 1943 } 1944 /* tells if some other type needs to be handled */ 1945 default: 1946 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id); 1947 return -EINVAL; 1948 } 1949 1950 return 0; 1951 } 1952 1953 /* Mark types, members, and member types. Compared to btfgen_record_field_relo, 1954 * this function does not rely on the target spec for inferring members, but 1955 * uses the associated BTF. 1956 */ 1957 static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1958 { 1959 return btfgen_mark_type_match(info, targ_spec->root_type_id, false); 1960 } 1961 1962 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1963 { 1964 return btfgen_mark_type(info, targ_spec->root_type_id, true); 1965 } 1966 1967 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1968 { 1969 return btfgen_mark_type(info, targ_spec->root_type_id, false); 1970 } 1971 1972 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res) 1973 { 1974 switch (res->relo_kind) { 1975 case BPF_CORE_FIELD_BYTE_OFFSET: 1976 case BPF_CORE_FIELD_BYTE_SIZE: 1977 case BPF_CORE_FIELD_EXISTS: 1978 case BPF_CORE_FIELD_SIGNED: 1979 case BPF_CORE_FIELD_LSHIFT_U64: 1980 case BPF_CORE_FIELD_RSHIFT_U64: 1981 return btfgen_record_field_relo(info, res); 1982 case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */ 1983 return 0; 1984 case BPF_CORE_TYPE_ID_TARGET: 1985 case BPF_CORE_TYPE_EXISTS: 1986 case BPF_CORE_TYPE_SIZE: 1987 return btfgen_record_type_relo(info, res); 1988 case BPF_CORE_TYPE_MATCHES: 1989 return btfgen_record_type_match_relo(info, res); 1990 case BPF_CORE_ENUMVAL_EXISTS: 1991 case BPF_CORE_ENUMVAL_VALUE: 1992 return btfgen_record_enumval_relo(info, res); 1993 default: 1994 return -EINVAL; 1995 } 1996 } 1997 1998 static struct bpf_core_cand_list * 1999 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id) 2000 { 2001 const struct btf_type *local_type; 2002 struct bpf_core_cand_list *cands = NULL; 2003 struct bpf_core_cand local_cand = {}; 2004 size_t local_essent_len; 2005 const char *local_name; 2006 int err; 2007 2008 local_cand.btf = local_btf; 2009 local_cand.id = local_id; 2010 2011 local_type = btf__type_by_id(local_btf, local_id); 2012 if (!local_type) { 2013 err = -EINVAL; 2014 goto err_out; 2015 } 2016 2017 local_name = btf__name_by_offset(local_btf, local_type->name_off); 2018 if (!local_name) { 2019 err = -EINVAL; 2020 goto err_out; 2021 } 2022 local_essent_len = bpf_core_essential_name_len(local_name); 2023 2024 cands = calloc(1, sizeof(*cands)); 2025 if (!cands) 2026 return NULL; 2027 2028 err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands); 2029 if (err) 2030 goto err_out; 2031 2032 return cands; 2033 2034 err_out: 2035 bpf_core_free_cands(cands); 2036 errno = -err; 2037 return NULL; 2038 } 2039 2040 /* Record relocation information for a single BPF object */ 2041 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path) 2042 { 2043 const struct btf_ext_info_sec *sec; 2044 const struct bpf_core_relo *relo; 2045 const struct btf_ext_info *seg; 2046 struct hashmap_entry *entry; 2047 struct hashmap *cand_cache = NULL; 2048 struct btf_ext *btf_ext = NULL; 2049 unsigned int relo_idx; 2050 struct btf *btf = NULL; 2051 size_t i; 2052 int err; 2053 2054 btf = btf__parse(obj_path, &btf_ext); 2055 if (!btf) { 2056 err = -errno; 2057 p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno)); 2058 return err; 2059 } 2060 2061 if (!btf_ext) { 2062 p_err("failed to parse BPF object '%s': section %s not found", 2063 obj_path, BTF_EXT_ELF_SEC); 2064 err = -EINVAL; 2065 goto out; 2066 } 2067 2068 if (btf_ext->core_relo_info.len == 0) { 2069 err = 0; 2070 goto out; 2071 } 2072 2073 cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL); 2074 if (IS_ERR(cand_cache)) { 2075 err = PTR_ERR(cand_cache); 2076 goto out; 2077 } 2078 2079 seg = &btf_ext->core_relo_info; 2080 for_each_btf_ext_sec(seg, sec) { 2081 for_each_btf_ext_rec(seg, sec, relo_idx, relo) { 2082 struct bpf_core_spec specs_scratch[3] = {}; 2083 struct bpf_core_relo_res targ_res = {}; 2084 struct bpf_core_cand_list *cands = NULL; 2085 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off); 2086 2087 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && 2088 !hashmap__find(cand_cache, relo->type_id, &cands)) { 2089 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id); 2090 if (!cands) { 2091 err = -errno; 2092 goto out; 2093 } 2094 2095 err = hashmap__set(cand_cache, relo->type_id, cands, 2096 NULL, NULL); 2097 if (err) 2098 goto out; 2099 } 2100 2101 err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands, 2102 specs_scratch, &targ_res); 2103 if (err) 2104 goto out; 2105 2106 /* specs_scratch[2] is the target spec */ 2107 err = btfgen_record_reloc(info, &specs_scratch[2]); 2108 if (err) 2109 goto out; 2110 } 2111 } 2112 2113 out: 2114 btf__free(btf); 2115 btf_ext__free(btf_ext); 2116 2117 if (!IS_ERR_OR_NULL(cand_cache)) { 2118 hashmap__for_each_entry(cand_cache, entry, i) { 2119 bpf_core_free_cands(entry->pvalue); 2120 } 2121 hashmap__free(cand_cache); 2122 } 2123 2124 return err; 2125 } 2126 2127 static int btfgen_remap_id(__u32 *type_id, void *ctx) 2128 { 2129 unsigned int *ids = ctx; 2130 2131 *type_id = ids[*type_id]; 2132 2133 return 0; 2134 } 2135 2136 /* Generate BTF from relocation information previously recorded */ 2137 static struct btf *btfgen_get_btf(struct btfgen_info *info) 2138 { 2139 struct btf *btf_new = NULL; 2140 unsigned int *ids = NULL; 2141 unsigned int i, n = btf__type_cnt(info->marked_btf); 2142 int err = 0; 2143 2144 btf_new = btf__new_empty(); 2145 if (!btf_new) { 2146 err = -errno; 2147 goto err_out; 2148 } 2149 2150 ids = calloc(n, sizeof(*ids)); 2151 if (!ids) { 2152 err = -errno; 2153 goto err_out; 2154 } 2155 2156 /* first pass: add all marked types to btf_new and add their new ids to the ids map */ 2157 for (i = 1; i < n; i++) { 2158 const struct btf_type *cloned_type, *type; 2159 const char *name; 2160 int new_id; 2161 2162 cloned_type = btf__type_by_id(info->marked_btf, i); 2163 2164 if (cloned_type->name_off != MARKED) 2165 continue; 2166 2167 type = btf__type_by_id(info->src_btf, i); 2168 2169 /* add members for struct and union */ 2170 if (btf_is_composite(type)) { 2171 struct btf_member *cloned_m, *m; 2172 unsigned short vlen; 2173 int idx_src; 2174 2175 name = btf__str_by_offset(info->src_btf, type->name_off); 2176 2177 if (btf_is_struct(type)) 2178 err = btf__add_struct(btf_new, name, type->size); 2179 else 2180 err = btf__add_union(btf_new, name, type->size); 2181 2182 if (err < 0) 2183 goto err_out; 2184 new_id = err; 2185 2186 cloned_m = btf_members(cloned_type); 2187 m = btf_members(type); 2188 vlen = btf_vlen(cloned_type); 2189 for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) { 2190 /* add only members that are marked as used */ 2191 if (cloned_m->name_off != MARKED) 2192 continue; 2193 2194 name = btf__str_by_offset(info->src_btf, m->name_off); 2195 err = btf__add_field(btf_new, name, m->type, 2196 btf_member_bit_offset(cloned_type, idx_src), 2197 btf_member_bitfield_size(cloned_type, idx_src)); 2198 if (err < 0) 2199 goto err_out; 2200 } 2201 } else { 2202 err = btf__add_type(btf_new, info->src_btf, type); 2203 if (err < 0) 2204 goto err_out; 2205 new_id = err; 2206 } 2207 2208 /* add ID mapping */ 2209 ids[i] = new_id; 2210 } 2211 2212 /* second pass: fix up type ids */ 2213 for (i = 1; i < btf__type_cnt(btf_new); i++) { 2214 struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i); 2215 2216 err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids); 2217 if (err) 2218 goto err_out; 2219 } 2220 2221 free(ids); 2222 return btf_new; 2223 2224 err_out: 2225 btf__free(btf_new); 2226 free(ids); 2227 errno = -err; 2228 return NULL; 2229 } 2230 2231 /* Create minimized BTF file for a set of BPF objects. 2232 * 2233 * The BTFGen algorithm is divided in two main parts: (1) collect the 2234 * BTF types that are involved in relocations and (2) generate the BTF 2235 * object using the collected types. 2236 * 2237 * In order to collect the types involved in the relocations, we parse 2238 * the BTF and BTF.ext sections of the BPF objects and use 2239 * bpf_core_calc_relo_insn() to get the target specification, this 2240 * indicates how the types and fields are used in a relocation. 2241 * 2242 * Types are recorded in different ways according to the kind of the 2243 * relocation. For field-based relocations only the members that are 2244 * actually used are saved in order to reduce the size of the generated 2245 * BTF file. For type-based relocations empty struct / unions are 2246 * generated and for enum-based relocations the whole type is saved. 2247 * 2248 * The second part of the algorithm generates the BTF object. It creates 2249 * an empty BTF object and fills it with the types recorded in the 2250 * previous step. This function takes care of only adding the structure 2251 * and union members that were marked as used and it also fixes up the 2252 * type IDs on the generated BTF object. 2253 */ 2254 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[]) 2255 { 2256 struct btfgen_info *info; 2257 struct btf *btf_new = NULL; 2258 int err, i; 2259 2260 info = btfgen_new_info(src_btf); 2261 if (!info) { 2262 err = -errno; 2263 p_err("failed to allocate info structure: %s", strerror(errno)); 2264 goto out; 2265 } 2266 2267 for (i = 0; objspaths[i] != NULL; i++) { 2268 err = btfgen_record_obj(info, objspaths[i]); 2269 if (err) { 2270 p_err("error recording relocations for %s: %s", objspaths[i], 2271 strerror(errno)); 2272 goto out; 2273 } 2274 } 2275 2276 btf_new = btfgen_get_btf(info); 2277 if (!btf_new) { 2278 err = -errno; 2279 p_err("error generating BTF: %s", strerror(errno)); 2280 goto out; 2281 } 2282 2283 err = btf_save_raw(btf_new, dst_btf); 2284 if (err) { 2285 p_err("error saving btf file: %s", strerror(errno)); 2286 goto out; 2287 } 2288 2289 out: 2290 btf__free(btf_new); 2291 btfgen_free_info(info); 2292 2293 return err; 2294 } 2295 2296 static int do_min_core_btf(int argc, char **argv) 2297 { 2298 const char *input, *output, **objs; 2299 int i, err; 2300 2301 if (!REQ_ARGS(3)) { 2302 usage(); 2303 return -1; 2304 } 2305 2306 input = GET_ARG(); 2307 output = GET_ARG(); 2308 2309 objs = (const char **) calloc(argc + 1, sizeof(*objs)); 2310 if (!objs) { 2311 p_err("failed to allocate array for object names"); 2312 return -ENOMEM; 2313 } 2314 2315 i = 0; 2316 while (argc) 2317 objs[i++] = GET_ARG(); 2318 2319 err = minimize_btf(input, output, objs); 2320 free(objs); 2321 return err; 2322 } 2323 2324 static const struct cmd cmds[] = { 2325 { "object", do_object }, 2326 { "skeleton", do_skeleton }, 2327 { "subskeleton", do_subskeleton }, 2328 { "min_core_btf", do_min_core_btf}, 2329 { "help", do_help }, 2330 { 0 } 2331 }; 2332 2333 int do_gen(int argc, char **argv) 2334 { 2335 return cmd_select(cmds, argc, argv, do_help); 2336 } 2337