1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <endian.h> 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <errno.h> 11 #include <sys/utsname.h> 12 #include <sys/param.h> 13 #include <sys/stat.h> 14 #include <linux/kernel.h> 15 #include <linux/err.h> 16 #include <linux/btf.h> 17 #include <gelf.h> 18 #include "btf.h" 19 #include "bpf.h" 20 #include "libbpf.h" 21 #include "libbpf_internal.h" 22 #include "hashmap.h" 23 24 /* make sure libbpf doesn't use kernel-only integer typedefs */ 25 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 26 27 #define BTF_MAX_NR_TYPES 0x7fffffffU 28 #define BTF_MAX_STR_OFFSET 0x7fffffffU 29 30 static struct btf_type btf_void; 31 32 struct btf { 33 union { 34 struct btf_header *hdr; 35 void *data; 36 }; 37 struct btf_type **types; 38 const char *strings; 39 void *nohdr_data; 40 __u32 nr_types; 41 __u32 types_size; 42 __u32 data_size; 43 int fd; 44 }; 45 46 static inline __u64 ptr_to_u64(const void *ptr) 47 { 48 return (__u64) (unsigned long) ptr; 49 } 50 51 static int btf_add_type(struct btf *btf, struct btf_type *t) 52 { 53 if (btf->types_size - btf->nr_types < 2) { 54 struct btf_type **new_types; 55 __u32 expand_by, new_size; 56 57 if (btf->types_size == BTF_MAX_NR_TYPES) 58 return -E2BIG; 59 60 expand_by = max(btf->types_size >> 2, 16U); 61 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by); 62 63 new_types = realloc(btf->types, sizeof(*new_types) * new_size); 64 if (!new_types) 65 return -ENOMEM; 66 67 if (btf->nr_types == 0) 68 new_types[0] = &btf_void; 69 70 btf->types = new_types; 71 btf->types_size = new_size; 72 } 73 74 btf->types[++(btf->nr_types)] = t; 75 76 return 0; 77 } 78 79 static int btf_parse_hdr(struct btf *btf) 80 { 81 const struct btf_header *hdr = btf->hdr; 82 __u32 meta_left; 83 84 if (btf->data_size < sizeof(struct btf_header)) { 85 pr_debug("BTF header not found\n"); 86 return -EINVAL; 87 } 88 89 if (hdr->magic != BTF_MAGIC) { 90 pr_debug("Invalid BTF magic:%x\n", hdr->magic); 91 return -EINVAL; 92 } 93 94 if (hdr->version != BTF_VERSION) { 95 pr_debug("Unsupported BTF version:%u\n", hdr->version); 96 return -ENOTSUP; 97 } 98 99 if (hdr->flags) { 100 pr_debug("Unsupported BTF flags:%x\n", hdr->flags); 101 return -ENOTSUP; 102 } 103 104 meta_left = btf->data_size - sizeof(*hdr); 105 if (!meta_left) { 106 pr_debug("BTF has no data\n"); 107 return -EINVAL; 108 } 109 110 if (meta_left < hdr->type_off) { 111 pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off); 112 return -EINVAL; 113 } 114 115 if (meta_left < hdr->str_off) { 116 pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off); 117 return -EINVAL; 118 } 119 120 if (hdr->type_off >= hdr->str_off) { 121 pr_debug("BTF type section offset >= string section offset. No type?\n"); 122 return -EINVAL; 123 } 124 125 if (hdr->type_off & 0x02) { 126 pr_debug("BTF type section is not aligned to 4 bytes\n"); 127 return -EINVAL; 128 } 129 130 btf->nohdr_data = btf->hdr + 1; 131 132 return 0; 133 } 134 135 static int btf_parse_str_sec(struct btf *btf) 136 { 137 const struct btf_header *hdr = btf->hdr; 138 const char *start = btf->nohdr_data + hdr->str_off; 139 const char *end = start + btf->hdr->str_len; 140 141 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || 142 start[0] || end[-1]) { 143 pr_debug("Invalid BTF string section\n"); 144 return -EINVAL; 145 } 146 147 btf->strings = start; 148 149 return 0; 150 } 151 152 static int btf_type_size(struct btf_type *t) 153 { 154 int base_size = sizeof(struct btf_type); 155 __u16 vlen = btf_vlen(t); 156 157 switch (btf_kind(t)) { 158 case BTF_KIND_FWD: 159 case BTF_KIND_CONST: 160 case BTF_KIND_VOLATILE: 161 case BTF_KIND_RESTRICT: 162 case BTF_KIND_PTR: 163 case BTF_KIND_TYPEDEF: 164 case BTF_KIND_FUNC: 165 return base_size; 166 case BTF_KIND_INT: 167 return base_size + sizeof(__u32); 168 case BTF_KIND_ENUM: 169 return base_size + vlen * sizeof(struct btf_enum); 170 case BTF_KIND_ARRAY: 171 return base_size + sizeof(struct btf_array); 172 case BTF_KIND_STRUCT: 173 case BTF_KIND_UNION: 174 return base_size + vlen * sizeof(struct btf_member); 175 case BTF_KIND_FUNC_PROTO: 176 return base_size + vlen * sizeof(struct btf_param); 177 case BTF_KIND_VAR: 178 return base_size + sizeof(struct btf_var); 179 case BTF_KIND_DATASEC: 180 return base_size + vlen * sizeof(struct btf_var_secinfo); 181 default: 182 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 183 return -EINVAL; 184 } 185 } 186 187 static int btf_parse_type_sec(struct btf *btf) 188 { 189 struct btf_header *hdr = btf->hdr; 190 void *nohdr_data = btf->nohdr_data; 191 void *next_type = nohdr_data + hdr->type_off; 192 void *end_type = nohdr_data + hdr->str_off; 193 194 while (next_type < end_type) { 195 struct btf_type *t = next_type; 196 int type_size; 197 int err; 198 199 type_size = btf_type_size(t); 200 if (type_size < 0) 201 return type_size; 202 next_type += type_size; 203 err = btf_add_type(btf, t); 204 if (err) 205 return err; 206 } 207 208 return 0; 209 } 210 211 __u32 btf__get_nr_types(const struct btf *btf) 212 { 213 return btf->nr_types; 214 } 215 216 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 217 { 218 if (type_id > btf->nr_types) 219 return NULL; 220 221 return btf->types[type_id]; 222 } 223 224 static bool btf_type_is_void(const struct btf_type *t) 225 { 226 return t == &btf_void || btf_is_fwd(t); 227 } 228 229 static bool btf_type_is_void_or_null(const struct btf_type *t) 230 { 231 return !t || btf_type_is_void(t); 232 } 233 234 #define MAX_RESOLVE_DEPTH 32 235 236 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) 237 { 238 const struct btf_array *array; 239 const struct btf_type *t; 240 __u32 nelems = 1; 241 __s64 size = -1; 242 int i; 243 244 t = btf__type_by_id(btf, type_id); 245 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); 246 i++) { 247 switch (btf_kind(t)) { 248 case BTF_KIND_INT: 249 case BTF_KIND_STRUCT: 250 case BTF_KIND_UNION: 251 case BTF_KIND_ENUM: 252 case BTF_KIND_DATASEC: 253 size = t->size; 254 goto done; 255 case BTF_KIND_PTR: 256 size = sizeof(void *); 257 goto done; 258 case BTF_KIND_TYPEDEF: 259 case BTF_KIND_VOLATILE: 260 case BTF_KIND_CONST: 261 case BTF_KIND_RESTRICT: 262 case BTF_KIND_VAR: 263 type_id = t->type; 264 break; 265 case BTF_KIND_ARRAY: 266 array = btf_array(t); 267 if (nelems && array->nelems > UINT32_MAX / nelems) 268 return -E2BIG; 269 nelems *= array->nelems; 270 type_id = array->type; 271 break; 272 default: 273 return -EINVAL; 274 } 275 276 t = btf__type_by_id(btf, type_id); 277 } 278 279 done: 280 if (size < 0) 281 return -EINVAL; 282 if (nelems && size > UINT32_MAX / nelems) 283 return -E2BIG; 284 285 return nelems * size; 286 } 287 288 int btf__align_of(const struct btf *btf, __u32 id) 289 { 290 const struct btf_type *t = btf__type_by_id(btf, id); 291 __u16 kind = btf_kind(t); 292 293 switch (kind) { 294 case BTF_KIND_INT: 295 case BTF_KIND_ENUM: 296 return min(sizeof(void *), (size_t)t->size); 297 case BTF_KIND_PTR: 298 return sizeof(void *); 299 case BTF_KIND_TYPEDEF: 300 case BTF_KIND_VOLATILE: 301 case BTF_KIND_CONST: 302 case BTF_KIND_RESTRICT: 303 return btf__align_of(btf, t->type); 304 case BTF_KIND_ARRAY: 305 return btf__align_of(btf, btf_array(t)->type); 306 case BTF_KIND_STRUCT: 307 case BTF_KIND_UNION: { 308 const struct btf_member *m = btf_members(t); 309 __u16 vlen = btf_vlen(t); 310 int i, max_align = 1, align; 311 312 for (i = 0; i < vlen; i++, m++) { 313 align = btf__align_of(btf, m->type); 314 if (align <= 0) 315 return align; 316 max_align = max(max_align, align); 317 } 318 319 return max_align; 320 } 321 default: 322 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t)); 323 return 0; 324 } 325 } 326 327 int btf__resolve_type(const struct btf *btf, __u32 type_id) 328 { 329 const struct btf_type *t; 330 int depth = 0; 331 332 t = btf__type_by_id(btf, type_id); 333 while (depth < MAX_RESOLVE_DEPTH && 334 !btf_type_is_void_or_null(t) && 335 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) { 336 type_id = t->type; 337 t = btf__type_by_id(btf, type_id); 338 depth++; 339 } 340 341 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) 342 return -EINVAL; 343 344 return type_id; 345 } 346 347 __s32 btf__find_by_name(const struct btf *btf, const char *type_name) 348 { 349 __u32 i; 350 351 if (!strcmp(type_name, "void")) 352 return 0; 353 354 for (i = 1; i <= btf->nr_types; i++) { 355 const struct btf_type *t = btf->types[i]; 356 const char *name = btf__name_by_offset(btf, t->name_off); 357 358 if (name && !strcmp(type_name, name)) 359 return i; 360 } 361 362 return -ENOENT; 363 } 364 365 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, 366 __u32 kind) 367 { 368 __u32 i; 369 370 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) 371 return 0; 372 373 for (i = 1; i <= btf->nr_types; i++) { 374 const struct btf_type *t = btf->types[i]; 375 const char *name; 376 377 if (btf_kind(t) != kind) 378 continue; 379 name = btf__name_by_offset(btf, t->name_off); 380 if (name && !strcmp(type_name, name)) 381 return i; 382 } 383 384 return -ENOENT; 385 } 386 387 void btf__free(struct btf *btf) 388 { 389 if (!btf) 390 return; 391 392 if (btf->fd != -1) 393 close(btf->fd); 394 395 free(btf->data); 396 free(btf->types); 397 free(btf); 398 } 399 400 struct btf *btf__new(__u8 *data, __u32 size) 401 { 402 struct btf *btf; 403 int err; 404 405 btf = calloc(1, sizeof(struct btf)); 406 if (!btf) 407 return ERR_PTR(-ENOMEM); 408 409 btf->fd = -1; 410 411 btf->data = malloc(size); 412 if (!btf->data) { 413 err = -ENOMEM; 414 goto done; 415 } 416 417 memcpy(btf->data, data, size); 418 btf->data_size = size; 419 420 err = btf_parse_hdr(btf); 421 if (err) 422 goto done; 423 424 err = btf_parse_str_sec(btf); 425 if (err) 426 goto done; 427 428 err = btf_parse_type_sec(btf); 429 430 done: 431 if (err) { 432 btf__free(btf); 433 return ERR_PTR(err); 434 } 435 436 return btf; 437 } 438 439 static bool btf_check_endianness(const GElf_Ehdr *ehdr) 440 { 441 #if __BYTE_ORDER == __LITTLE_ENDIAN 442 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB; 443 #elif __BYTE_ORDER == __BIG_ENDIAN 444 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB; 445 #else 446 # error "Unrecognized __BYTE_ORDER__" 447 #endif 448 } 449 450 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) 451 { 452 Elf_Data *btf_data = NULL, *btf_ext_data = NULL; 453 int err = 0, fd = -1, idx = 0; 454 struct btf *btf = NULL; 455 Elf_Scn *scn = NULL; 456 Elf *elf = NULL; 457 GElf_Ehdr ehdr; 458 459 if (elf_version(EV_CURRENT) == EV_NONE) { 460 pr_warn("failed to init libelf for %s\n", path); 461 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 462 } 463 464 fd = open(path, O_RDONLY); 465 if (fd < 0) { 466 err = -errno; 467 pr_warn("failed to open %s: %s\n", path, strerror(errno)); 468 return ERR_PTR(err); 469 } 470 471 err = -LIBBPF_ERRNO__FORMAT; 472 473 elf = elf_begin(fd, ELF_C_READ, NULL); 474 if (!elf) { 475 pr_warn("failed to open %s as ELF file\n", path); 476 goto done; 477 } 478 if (!gelf_getehdr(elf, &ehdr)) { 479 pr_warn("failed to get EHDR from %s\n", path); 480 goto done; 481 } 482 if (!btf_check_endianness(&ehdr)) { 483 pr_warn("non-native ELF endianness is not supported\n"); 484 goto done; 485 } 486 if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { 487 pr_warn("failed to get e_shstrndx from %s\n", path); 488 goto done; 489 } 490 491 while ((scn = elf_nextscn(elf, scn)) != NULL) { 492 GElf_Shdr sh; 493 char *name; 494 495 idx++; 496 if (gelf_getshdr(scn, &sh) != &sh) { 497 pr_warn("failed to get section(%d) header from %s\n", 498 idx, path); 499 goto done; 500 } 501 name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); 502 if (!name) { 503 pr_warn("failed to get section(%d) name from %s\n", 504 idx, path); 505 goto done; 506 } 507 if (strcmp(name, BTF_ELF_SEC) == 0) { 508 btf_data = elf_getdata(scn, 0); 509 if (!btf_data) { 510 pr_warn("failed to get section(%d, %s) data from %s\n", 511 idx, name, path); 512 goto done; 513 } 514 continue; 515 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { 516 btf_ext_data = elf_getdata(scn, 0); 517 if (!btf_ext_data) { 518 pr_warn("failed to get section(%d, %s) data from %s\n", 519 idx, name, path); 520 goto done; 521 } 522 continue; 523 } 524 } 525 526 err = 0; 527 528 if (!btf_data) { 529 err = -ENOENT; 530 goto done; 531 } 532 btf = btf__new(btf_data->d_buf, btf_data->d_size); 533 if (IS_ERR(btf)) 534 goto done; 535 536 if (btf_ext && btf_ext_data) { 537 *btf_ext = btf_ext__new(btf_ext_data->d_buf, 538 btf_ext_data->d_size); 539 if (IS_ERR(*btf_ext)) 540 goto done; 541 } else if (btf_ext) { 542 *btf_ext = NULL; 543 } 544 done: 545 if (elf) 546 elf_end(elf); 547 close(fd); 548 549 if (err) 550 return ERR_PTR(err); 551 /* 552 * btf is always parsed before btf_ext, so no need to clean up 553 * btf_ext, if btf loading failed 554 */ 555 if (IS_ERR(btf)) 556 return btf; 557 if (btf_ext && IS_ERR(*btf_ext)) { 558 btf__free(btf); 559 err = PTR_ERR(*btf_ext); 560 return ERR_PTR(err); 561 } 562 return btf; 563 } 564 565 static int compare_vsi_off(const void *_a, const void *_b) 566 { 567 const struct btf_var_secinfo *a = _a; 568 const struct btf_var_secinfo *b = _b; 569 570 return a->offset - b->offset; 571 } 572 573 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, 574 struct btf_type *t) 575 { 576 __u32 size = 0, off = 0, i, vars = btf_vlen(t); 577 const char *name = btf__name_by_offset(btf, t->name_off); 578 const struct btf_type *t_var; 579 struct btf_var_secinfo *vsi; 580 const struct btf_var *var; 581 int ret; 582 583 if (!name) { 584 pr_debug("No name found in string section for DATASEC kind.\n"); 585 return -ENOENT; 586 } 587 588 /* .extern datasec size and var offsets were set correctly during 589 * extern collection step, so just skip straight to sorting variables 590 */ 591 if (t->size) 592 goto sort_vars; 593 594 ret = bpf_object__section_size(obj, name, &size); 595 if (ret || !size || (t->size && t->size != size)) { 596 pr_debug("Invalid size for section %s: %u bytes\n", name, size); 597 return -ENOENT; 598 } 599 600 t->size = size; 601 602 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { 603 t_var = btf__type_by_id(btf, vsi->type); 604 var = btf_var(t_var); 605 606 if (!btf_is_var(t_var)) { 607 pr_debug("Non-VAR type seen in section %s\n", name); 608 return -EINVAL; 609 } 610 611 if (var->linkage == BTF_VAR_STATIC) 612 continue; 613 614 name = btf__name_by_offset(btf, t_var->name_off); 615 if (!name) { 616 pr_debug("No name found in string section for VAR kind\n"); 617 return -ENOENT; 618 } 619 620 ret = bpf_object__variable_offset(obj, name, &off); 621 if (ret) { 622 pr_debug("No offset found in symbol table for VAR %s\n", 623 name); 624 return -ENOENT; 625 } 626 627 vsi->offset = off; 628 } 629 630 sort_vars: 631 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); 632 return 0; 633 } 634 635 int btf__finalize_data(struct bpf_object *obj, struct btf *btf) 636 { 637 int err = 0; 638 __u32 i; 639 640 for (i = 1; i <= btf->nr_types; i++) { 641 struct btf_type *t = btf->types[i]; 642 643 /* Loader needs to fix up some of the things compiler 644 * couldn't get its hands on while emitting BTF. This 645 * is section size and global variable offset. We use 646 * the info from the ELF itself for this purpose. 647 */ 648 if (btf_is_datasec(t)) { 649 err = btf_fixup_datasec(obj, btf, t); 650 if (err) 651 break; 652 } 653 } 654 655 return err; 656 } 657 658 int btf__load(struct btf *btf) 659 { 660 __u32 log_buf_size = BPF_LOG_BUF_SIZE; 661 char *log_buf = NULL; 662 int err = 0; 663 664 if (btf->fd >= 0) 665 return -EEXIST; 666 667 log_buf = malloc(log_buf_size); 668 if (!log_buf) 669 return -ENOMEM; 670 671 *log_buf = 0; 672 673 btf->fd = bpf_load_btf(btf->data, btf->data_size, 674 log_buf, log_buf_size, false); 675 if (btf->fd < 0) { 676 err = -errno; 677 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); 678 if (*log_buf) 679 pr_warn("%s\n", log_buf); 680 goto done; 681 } 682 683 done: 684 free(log_buf); 685 return err; 686 } 687 688 int btf__fd(const struct btf *btf) 689 { 690 return btf->fd; 691 } 692 693 const void *btf__get_raw_data(const struct btf *btf, __u32 *size) 694 { 695 *size = btf->data_size; 696 return btf->data; 697 } 698 699 const char *btf__name_by_offset(const struct btf *btf, __u32 offset) 700 { 701 if (offset < btf->hdr->str_len) 702 return &btf->strings[offset]; 703 else 704 return NULL; 705 } 706 707 int btf__get_from_id(__u32 id, struct btf **btf) 708 { 709 struct bpf_btf_info btf_info = { 0 }; 710 __u32 len = sizeof(btf_info); 711 __u32 last_size; 712 int btf_fd; 713 void *ptr; 714 int err; 715 716 err = 0; 717 *btf = NULL; 718 btf_fd = bpf_btf_get_fd_by_id(id); 719 if (btf_fd < 0) 720 return 0; 721 722 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so 723 * let's start with a sane default - 4KiB here - and resize it only if 724 * bpf_obj_get_info_by_fd() needs a bigger buffer. 725 */ 726 btf_info.btf_size = 4096; 727 last_size = btf_info.btf_size; 728 ptr = malloc(last_size); 729 if (!ptr) { 730 err = -ENOMEM; 731 goto exit_free; 732 } 733 734 memset(ptr, 0, last_size); 735 btf_info.btf = ptr_to_u64(ptr); 736 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 737 738 if (!err && btf_info.btf_size > last_size) { 739 void *temp_ptr; 740 741 last_size = btf_info.btf_size; 742 temp_ptr = realloc(ptr, last_size); 743 if (!temp_ptr) { 744 err = -ENOMEM; 745 goto exit_free; 746 } 747 ptr = temp_ptr; 748 memset(ptr, 0, last_size); 749 btf_info.btf = ptr_to_u64(ptr); 750 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 751 } 752 753 if (err || btf_info.btf_size > last_size) { 754 err = errno; 755 goto exit_free; 756 } 757 758 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size); 759 if (IS_ERR(*btf)) { 760 err = PTR_ERR(*btf); 761 *btf = NULL; 762 } 763 764 exit_free: 765 close(btf_fd); 766 free(ptr); 767 768 return err; 769 } 770 771 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, 772 __u32 expected_key_size, __u32 expected_value_size, 773 __u32 *key_type_id, __u32 *value_type_id) 774 { 775 const struct btf_type *container_type; 776 const struct btf_member *key, *value; 777 const size_t max_name = 256; 778 char container_name[max_name]; 779 __s64 key_size, value_size; 780 __s32 container_id; 781 782 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == 783 max_name) { 784 pr_warn("map:%s length of '____btf_map_%s' is too long\n", 785 map_name, map_name); 786 return -EINVAL; 787 } 788 789 container_id = btf__find_by_name(btf, container_name); 790 if (container_id < 0) { 791 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", 792 map_name, container_name); 793 return container_id; 794 } 795 796 container_type = btf__type_by_id(btf, container_id); 797 if (!container_type) { 798 pr_warn("map:%s cannot find BTF type for container_id:%u\n", 799 map_name, container_id); 800 return -EINVAL; 801 } 802 803 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { 804 pr_warn("map:%s container_name:%s is an invalid container struct\n", 805 map_name, container_name); 806 return -EINVAL; 807 } 808 809 key = btf_members(container_type); 810 value = key + 1; 811 812 key_size = btf__resolve_size(btf, key->type); 813 if (key_size < 0) { 814 pr_warn("map:%s invalid BTF key_type_size\n", map_name); 815 return key_size; 816 } 817 818 if (expected_key_size != key_size) { 819 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", 820 map_name, (__u32)key_size, expected_key_size); 821 return -EINVAL; 822 } 823 824 value_size = btf__resolve_size(btf, value->type); 825 if (value_size < 0) { 826 pr_warn("map:%s invalid BTF value_type_size\n", map_name); 827 return value_size; 828 } 829 830 if (expected_value_size != value_size) { 831 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", 832 map_name, (__u32)value_size, expected_value_size); 833 return -EINVAL; 834 } 835 836 *key_type_id = key->type; 837 *value_type_id = value->type; 838 839 return 0; 840 } 841 842 struct btf_ext_sec_setup_param { 843 __u32 off; 844 __u32 len; 845 __u32 min_rec_size; 846 struct btf_ext_info *ext_info; 847 const char *desc; 848 }; 849 850 static int btf_ext_setup_info(struct btf_ext *btf_ext, 851 struct btf_ext_sec_setup_param *ext_sec) 852 { 853 const struct btf_ext_info_sec *sinfo; 854 struct btf_ext_info *ext_info; 855 __u32 info_left, record_size; 856 /* The start of the info sec (including the __u32 record_size). */ 857 void *info; 858 859 if (ext_sec->len == 0) 860 return 0; 861 862 if (ext_sec->off & 0x03) { 863 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", 864 ext_sec->desc); 865 return -EINVAL; 866 } 867 868 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; 869 info_left = ext_sec->len; 870 871 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { 872 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", 873 ext_sec->desc, ext_sec->off, ext_sec->len); 874 return -EINVAL; 875 } 876 877 /* At least a record size */ 878 if (info_left < sizeof(__u32)) { 879 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); 880 return -EINVAL; 881 } 882 883 /* The record size needs to meet the minimum standard */ 884 record_size = *(__u32 *)info; 885 if (record_size < ext_sec->min_rec_size || 886 record_size & 0x03) { 887 pr_debug("%s section in .BTF.ext has invalid record size %u\n", 888 ext_sec->desc, record_size); 889 return -EINVAL; 890 } 891 892 sinfo = info + sizeof(__u32); 893 info_left -= sizeof(__u32); 894 895 /* If no records, return failure now so .BTF.ext won't be used. */ 896 if (!info_left) { 897 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); 898 return -EINVAL; 899 } 900 901 while (info_left) { 902 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); 903 __u64 total_record_size; 904 __u32 num_records; 905 906 if (info_left < sec_hdrlen) { 907 pr_debug("%s section header is not found in .BTF.ext\n", 908 ext_sec->desc); 909 return -EINVAL; 910 } 911 912 num_records = sinfo->num_info; 913 if (num_records == 0) { 914 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 915 ext_sec->desc); 916 return -EINVAL; 917 } 918 919 total_record_size = sec_hdrlen + 920 (__u64)num_records * record_size; 921 if (info_left < total_record_size) { 922 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 923 ext_sec->desc); 924 return -EINVAL; 925 } 926 927 info_left -= total_record_size; 928 sinfo = (void *)sinfo + total_record_size; 929 } 930 931 ext_info = ext_sec->ext_info; 932 ext_info->len = ext_sec->len - sizeof(__u32); 933 ext_info->rec_size = record_size; 934 ext_info->info = info + sizeof(__u32); 935 936 return 0; 937 } 938 939 static int btf_ext_setup_func_info(struct btf_ext *btf_ext) 940 { 941 struct btf_ext_sec_setup_param param = { 942 .off = btf_ext->hdr->func_info_off, 943 .len = btf_ext->hdr->func_info_len, 944 .min_rec_size = sizeof(struct bpf_func_info_min), 945 .ext_info = &btf_ext->func_info, 946 .desc = "func_info" 947 }; 948 949 return btf_ext_setup_info(btf_ext, ¶m); 950 } 951 952 static int btf_ext_setup_line_info(struct btf_ext *btf_ext) 953 { 954 struct btf_ext_sec_setup_param param = { 955 .off = btf_ext->hdr->line_info_off, 956 .len = btf_ext->hdr->line_info_len, 957 .min_rec_size = sizeof(struct bpf_line_info_min), 958 .ext_info = &btf_ext->line_info, 959 .desc = "line_info", 960 }; 961 962 return btf_ext_setup_info(btf_ext, ¶m); 963 } 964 965 static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext) 966 { 967 struct btf_ext_sec_setup_param param = { 968 .off = btf_ext->hdr->field_reloc_off, 969 .len = btf_ext->hdr->field_reloc_len, 970 .min_rec_size = sizeof(struct bpf_field_reloc), 971 .ext_info = &btf_ext->field_reloc_info, 972 .desc = "field_reloc", 973 }; 974 975 return btf_ext_setup_info(btf_ext, ¶m); 976 } 977 978 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) 979 { 980 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 981 982 if (data_size < offsetofend(struct btf_ext_header, hdr_len) || 983 data_size < hdr->hdr_len) { 984 pr_debug("BTF.ext header not found"); 985 return -EINVAL; 986 } 987 988 if (hdr->magic != BTF_MAGIC) { 989 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); 990 return -EINVAL; 991 } 992 993 if (hdr->version != BTF_VERSION) { 994 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); 995 return -ENOTSUP; 996 } 997 998 if (hdr->flags) { 999 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); 1000 return -ENOTSUP; 1001 } 1002 1003 if (data_size == hdr->hdr_len) { 1004 pr_debug("BTF.ext has no data\n"); 1005 return -EINVAL; 1006 } 1007 1008 return 0; 1009 } 1010 1011 void btf_ext__free(struct btf_ext *btf_ext) 1012 { 1013 if (!btf_ext) 1014 return; 1015 free(btf_ext->data); 1016 free(btf_ext); 1017 } 1018 1019 struct btf_ext *btf_ext__new(__u8 *data, __u32 size) 1020 { 1021 struct btf_ext *btf_ext; 1022 int err; 1023 1024 err = btf_ext_parse_hdr(data, size); 1025 if (err) 1026 return ERR_PTR(err); 1027 1028 btf_ext = calloc(1, sizeof(struct btf_ext)); 1029 if (!btf_ext) 1030 return ERR_PTR(-ENOMEM); 1031 1032 btf_ext->data_size = size; 1033 btf_ext->data = malloc(size); 1034 if (!btf_ext->data) { 1035 err = -ENOMEM; 1036 goto done; 1037 } 1038 memcpy(btf_ext->data, data, size); 1039 1040 if (btf_ext->hdr->hdr_len < 1041 offsetofend(struct btf_ext_header, line_info_len)) 1042 goto done; 1043 err = btf_ext_setup_func_info(btf_ext); 1044 if (err) 1045 goto done; 1046 1047 err = btf_ext_setup_line_info(btf_ext); 1048 if (err) 1049 goto done; 1050 1051 if (btf_ext->hdr->hdr_len < 1052 offsetofend(struct btf_ext_header, field_reloc_len)) 1053 goto done; 1054 err = btf_ext_setup_field_reloc(btf_ext); 1055 if (err) 1056 goto done; 1057 1058 done: 1059 if (err) { 1060 btf_ext__free(btf_ext); 1061 return ERR_PTR(err); 1062 } 1063 1064 return btf_ext; 1065 } 1066 1067 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size) 1068 { 1069 *size = btf_ext->data_size; 1070 return btf_ext->data; 1071 } 1072 1073 static int btf_ext_reloc_info(const struct btf *btf, 1074 const struct btf_ext_info *ext_info, 1075 const char *sec_name, __u32 insns_cnt, 1076 void **info, __u32 *cnt) 1077 { 1078 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec); 1079 __u32 i, record_size, existing_len, records_len; 1080 struct btf_ext_info_sec *sinfo; 1081 const char *info_sec_name; 1082 __u64 remain_len; 1083 void *data; 1084 1085 record_size = ext_info->rec_size; 1086 sinfo = ext_info->info; 1087 remain_len = ext_info->len; 1088 while (remain_len > 0) { 1089 records_len = sinfo->num_info * record_size; 1090 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off); 1091 if (strcmp(info_sec_name, sec_name)) { 1092 remain_len -= sec_hdrlen + records_len; 1093 sinfo = (void *)sinfo + sec_hdrlen + records_len; 1094 continue; 1095 } 1096 1097 existing_len = (*cnt) * record_size; 1098 data = realloc(*info, existing_len + records_len); 1099 if (!data) 1100 return -ENOMEM; 1101 1102 memcpy(data + existing_len, sinfo->data, records_len); 1103 /* adjust insn_off only, the rest data will be passed 1104 * to the kernel. 1105 */ 1106 for (i = 0; i < sinfo->num_info; i++) { 1107 __u32 *insn_off; 1108 1109 insn_off = data + existing_len + (i * record_size); 1110 *insn_off = *insn_off / sizeof(struct bpf_insn) + 1111 insns_cnt; 1112 } 1113 *info = data; 1114 *cnt += sinfo->num_info; 1115 return 0; 1116 } 1117 1118 return -ENOENT; 1119 } 1120 1121 int btf_ext__reloc_func_info(const struct btf *btf, 1122 const struct btf_ext *btf_ext, 1123 const char *sec_name, __u32 insns_cnt, 1124 void **func_info, __u32 *cnt) 1125 { 1126 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name, 1127 insns_cnt, func_info, cnt); 1128 } 1129 1130 int btf_ext__reloc_line_info(const struct btf *btf, 1131 const struct btf_ext *btf_ext, 1132 const char *sec_name, __u32 insns_cnt, 1133 void **line_info, __u32 *cnt) 1134 { 1135 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name, 1136 insns_cnt, line_info, cnt); 1137 } 1138 1139 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext) 1140 { 1141 return btf_ext->func_info.rec_size; 1142 } 1143 1144 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) 1145 { 1146 return btf_ext->line_info.rec_size; 1147 } 1148 1149 struct btf_dedup; 1150 1151 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 1152 const struct btf_dedup_opts *opts); 1153 static void btf_dedup_free(struct btf_dedup *d); 1154 static int btf_dedup_strings(struct btf_dedup *d); 1155 static int btf_dedup_prim_types(struct btf_dedup *d); 1156 static int btf_dedup_struct_types(struct btf_dedup *d); 1157 static int btf_dedup_ref_types(struct btf_dedup *d); 1158 static int btf_dedup_compact_types(struct btf_dedup *d); 1159 static int btf_dedup_remap_types(struct btf_dedup *d); 1160 1161 /* 1162 * Deduplicate BTF types and strings. 1163 * 1164 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF 1165 * section with all BTF type descriptors and string data. It overwrites that 1166 * memory in-place with deduplicated types and strings without any loss of 1167 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section 1168 * is provided, all the strings referenced from .BTF.ext section are honored 1169 * and updated to point to the right offsets after deduplication. 1170 * 1171 * If function returns with error, type/string data might be garbled and should 1172 * be discarded. 1173 * 1174 * More verbose and detailed description of both problem btf_dedup is solving, 1175 * as well as solution could be found at: 1176 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html 1177 * 1178 * Problem description and justification 1179 * ===================================== 1180 * 1181 * BTF type information is typically emitted either as a result of conversion 1182 * from DWARF to BTF or directly by compiler. In both cases, each compilation 1183 * unit contains information about a subset of all the types that are used 1184 * in an application. These subsets are frequently overlapping and contain a lot 1185 * of duplicated information when later concatenated together into a single 1186 * binary. This algorithm ensures that each unique type is represented by single 1187 * BTF type descriptor, greatly reducing resulting size of BTF data. 1188 * 1189 * Compilation unit isolation and subsequent duplication of data is not the only 1190 * problem. The same type hierarchy (e.g., struct and all the type that struct 1191 * references) in different compilation units can be represented in BTF to 1192 * various degrees of completeness (or, rather, incompleteness) due to 1193 * struct/union forward declarations. 1194 * 1195 * Let's take a look at an example, that we'll use to better understand the 1196 * problem (and solution). Suppose we have two compilation units, each using 1197 * same `struct S`, but each of them having incomplete type information about 1198 * struct's fields: 1199 * 1200 * // CU #1: 1201 * struct S; 1202 * struct A { 1203 * int a; 1204 * struct A* self; 1205 * struct S* parent; 1206 * }; 1207 * struct B; 1208 * struct S { 1209 * struct A* a_ptr; 1210 * struct B* b_ptr; 1211 * }; 1212 * 1213 * // CU #2: 1214 * struct S; 1215 * struct A; 1216 * struct B { 1217 * int b; 1218 * struct B* self; 1219 * struct S* parent; 1220 * }; 1221 * struct S { 1222 * struct A* a_ptr; 1223 * struct B* b_ptr; 1224 * }; 1225 * 1226 * In case of CU #1, BTF data will know only that `struct B` exist (but no 1227 * more), but will know the complete type information about `struct A`. While 1228 * for CU #2, it will know full type information about `struct B`, but will 1229 * only know about forward declaration of `struct A` (in BTF terms, it will 1230 * have `BTF_KIND_FWD` type descriptor with name `B`). 1231 * 1232 * This compilation unit isolation means that it's possible that there is no 1233 * single CU with complete type information describing structs `S`, `A`, and 1234 * `B`. Also, we might get tons of duplicated and redundant type information. 1235 * 1236 * Additional complication we need to keep in mind comes from the fact that 1237 * types, in general, can form graphs containing cycles, not just DAGs. 1238 * 1239 * While algorithm does deduplication, it also merges and resolves type 1240 * information (unless disabled throught `struct btf_opts`), whenever possible. 1241 * E.g., in the example above with two compilation units having partial type 1242 * information for structs `A` and `B`, the output of algorithm will emit 1243 * a single copy of each BTF type that describes structs `A`, `B`, and `S` 1244 * (as well as type information for `int` and pointers), as if they were defined 1245 * in a single compilation unit as: 1246 * 1247 * struct A { 1248 * int a; 1249 * struct A* self; 1250 * struct S* parent; 1251 * }; 1252 * struct B { 1253 * int b; 1254 * struct B* self; 1255 * struct S* parent; 1256 * }; 1257 * struct S { 1258 * struct A* a_ptr; 1259 * struct B* b_ptr; 1260 * }; 1261 * 1262 * Algorithm summary 1263 * ================= 1264 * 1265 * Algorithm completes its work in 6 separate passes: 1266 * 1267 * 1. Strings deduplication. 1268 * 2. Primitive types deduplication (int, enum, fwd). 1269 * 3. Struct/union types deduplication. 1270 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func 1271 * protos, and const/volatile/restrict modifiers). 1272 * 5. Types compaction. 1273 * 6. Types remapping. 1274 * 1275 * Algorithm determines canonical type descriptor, which is a single 1276 * representative type for each truly unique type. This canonical type is the 1277 * one that will go into final deduplicated BTF type information. For 1278 * struct/unions, it is also the type that algorithm will merge additional type 1279 * information into (while resolving FWDs), as it discovers it from data in 1280 * other CUs. Each input BTF type eventually gets either mapped to itself, if 1281 * that type is canonical, or to some other type, if that type is equivalent 1282 * and was chosen as canonical representative. This mapping is stored in 1283 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that 1284 * FWD type got resolved to. 1285 * 1286 * To facilitate fast discovery of canonical types, we also maintain canonical 1287 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash 1288 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types 1289 * that match that signature. With sufficiently good choice of type signature 1290 * hashing function, we can limit number of canonical types for each unique type 1291 * signature to a very small number, allowing to find canonical type for any 1292 * duplicated type very quickly. 1293 * 1294 * Struct/union deduplication is the most critical part and algorithm for 1295 * deduplicating structs/unions is described in greater details in comments for 1296 * `btf_dedup_is_equiv` function. 1297 */ 1298 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, 1299 const struct btf_dedup_opts *opts) 1300 { 1301 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); 1302 int err; 1303 1304 if (IS_ERR(d)) { 1305 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); 1306 return -EINVAL; 1307 } 1308 1309 err = btf_dedup_strings(d); 1310 if (err < 0) { 1311 pr_debug("btf_dedup_strings failed:%d\n", err); 1312 goto done; 1313 } 1314 err = btf_dedup_prim_types(d); 1315 if (err < 0) { 1316 pr_debug("btf_dedup_prim_types failed:%d\n", err); 1317 goto done; 1318 } 1319 err = btf_dedup_struct_types(d); 1320 if (err < 0) { 1321 pr_debug("btf_dedup_struct_types failed:%d\n", err); 1322 goto done; 1323 } 1324 err = btf_dedup_ref_types(d); 1325 if (err < 0) { 1326 pr_debug("btf_dedup_ref_types failed:%d\n", err); 1327 goto done; 1328 } 1329 err = btf_dedup_compact_types(d); 1330 if (err < 0) { 1331 pr_debug("btf_dedup_compact_types failed:%d\n", err); 1332 goto done; 1333 } 1334 err = btf_dedup_remap_types(d); 1335 if (err < 0) { 1336 pr_debug("btf_dedup_remap_types failed:%d\n", err); 1337 goto done; 1338 } 1339 1340 done: 1341 btf_dedup_free(d); 1342 return err; 1343 } 1344 1345 #define BTF_UNPROCESSED_ID ((__u32)-1) 1346 #define BTF_IN_PROGRESS_ID ((__u32)-2) 1347 1348 struct btf_dedup { 1349 /* .BTF section to be deduped in-place */ 1350 struct btf *btf; 1351 /* 1352 * Optional .BTF.ext section. When provided, any strings referenced 1353 * from it will be taken into account when deduping strings 1354 */ 1355 struct btf_ext *btf_ext; 1356 /* 1357 * This is a map from any type's signature hash to a list of possible 1358 * canonical representative type candidates. Hash collisions are 1359 * ignored, so even types of various kinds can share same list of 1360 * candidates, which is fine because we rely on subsequent 1361 * btf_xxx_equal() checks to authoritatively verify type equality. 1362 */ 1363 struct hashmap *dedup_table; 1364 /* Canonical types map */ 1365 __u32 *map; 1366 /* Hypothetical mapping, used during type graph equivalence checks */ 1367 __u32 *hypot_map; 1368 __u32 *hypot_list; 1369 size_t hypot_cnt; 1370 size_t hypot_cap; 1371 /* Various option modifying behavior of algorithm */ 1372 struct btf_dedup_opts opts; 1373 }; 1374 1375 struct btf_str_ptr { 1376 const char *str; 1377 __u32 new_off; 1378 bool used; 1379 }; 1380 1381 struct btf_str_ptrs { 1382 struct btf_str_ptr *ptrs; 1383 const char *data; 1384 __u32 cnt; 1385 __u32 cap; 1386 }; 1387 1388 static long hash_combine(long h, long value) 1389 { 1390 return h * 31 + value; 1391 } 1392 1393 #define for_each_dedup_cand(d, node, hash) \ 1394 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash) 1395 1396 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) 1397 { 1398 return hashmap__append(d->dedup_table, 1399 (void *)hash, (void *)(long)type_id); 1400 } 1401 1402 static int btf_dedup_hypot_map_add(struct btf_dedup *d, 1403 __u32 from_id, __u32 to_id) 1404 { 1405 if (d->hypot_cnt == d->hypot_cap) { 1406 __u32 *new_list; 1407 1408 d->hypot_cap += max((size_t)16, d->hypot_cap / 2); 1409 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); 1410 if (!new_list) 1411 return -ENOMEM; 1412 d->hypot_list = new_list; 1413 } 1414 d->hypot_list[d->hypot_cnt++] = from_id; 1415 d->hypot_map[from_id] = to_id; 1416 return 0; 1417 } 1418 1419 static void btf_dedup_clear_hypot_map(struct btf_dedup *d) 1420 { 1421 int i; 1422 1423 for (i = 0; i < d->hypot_cnt; i++) 1424 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; 1425 d->hypot_cnt = 0; 1426 } 1427 1428 static void btf_dedup_free(struct btf_dedup *d) 1429 { 1430 hashmap__free(d->dedup_table); 1431 d->dedup_table = NULL; 1432 1433 free(d->map); 1434 d->map = NULL; 1435 1436 free(d->hypot_map); 1437 d->hypot_map = NULL; 1438 1439 free(d->hypot_list); 1440 d->hypot_list = NULL; 1441 1442 free(d); 1443 } 1444 1445 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx) 1446 { 1447 return (size_t)key; 1448 } 1449 1450 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx) 1451 { 1452 return 0; 1453 } 1454 1455 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) 1456 { 1457 return k1 == k2; 1458 } 1459 1460 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 1461 const struct btf_dedup_opts *opts) 1462 { 1463 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); 1464 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; 1465 int i, err = 0; 1466 1467 if (!d) 1468 return ERR_PTR(-ENOMEM); 1469 1470 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; 1471 /* dedup_table_size is now used only to force collisions in tests */ 1472 if (opts && opts->dedup_table_size == 1) 1473 hash_fn = btf_dedup_collision_hash_fn; 1474 1475 d->btf = btf; 1476 d->btf_ext = btf_ext; 1477 1478 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); 1479 if (IS_ERR(d->dedup_table)) { 1480 err = PTR_ERR(d->dedup_table); 1481 d->dedup_table = NULL; 1482 goto done; 1483 } 1484 1485 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1486 if (!d->map) { 1487 err = -ENOMEM; 1488 goto done; 1489 } 1490 /* special BTF "void" type is made canonical immediately */ 1491 d->map[0] = 0; 1492 for (i = 1; i <= btf->nr_types; i++) { 1493 struct btf_type *t = d->btf->types[i]; 1494 1495 /* VAR and DATASEC are never deduped and are self-canonical */ 1496 if (btf_is_var(t) || btf_is_datasec(t)) 1497 d->map[i] = i; 1498 else 1499 d->map[i] = BTF_UNPROCESSED_ID; 1500 } 1501 1502 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1503 if (!d->hypot_map) { 1504 err = -ENOMEM; 1505 goto done; 1506 } 1507 for (i = 0; i <= btf->nr_types; i++) 1508 d->hypot_map[i] = BTF_UNPROCESSED_ID; 1509 1510 done: 1511 if (err) { 1512 btf_dedup_free(d); 1513 return ERR_PTR(err); 1514 } 1515 1516 return d; 1517 } 1518 1519 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx); 1520 1521 /* 1522 * Iterate over all possible places in .BTF and .BTF.ext that can reference 1523 * string and pass pointer to it to a provided callback `fn`. 1524 */ 1525 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) 1526 { 1527 void *line_data_cur, *line_data_end; 1528 int i, j, r, rec_size; 1529 struct btf_type *t; 1530 1531 for (i = 1; i <= d->btf->nr_types; i++) { 1532 t = d->btf->types[i]; 1533 r = fn(&t->name_off, ctx); 1534 if (r) 1535 return r; 1536 1537 switch (btf_kind(t)) { 1538 case BTF_KIND_STRUCT: 1539 case BTF_KIND_UNION: { 1540 struct btf_member *m = btf_members(t); 1541 __u16 vlen = btf_vlen(t); 1542 1543 for (j = 0; j < vlen; j++) { 1544 r = fn(&m->name_off, ctx); 1545 if (r) 1546 return r; 1547 m++; 1548 } 1549 break; 1550 } 1551 case BTF_KIND_ENUM: { 1552 struct btf_enum *m = btf_enum(t); 1553 __u16 vlen = btf_vlen(t); 1554 1555 for (j = 0; j < vlen; j++) { 1556 r = fn(&m->name_off, ctx); 1557 if (r) 1558 return r; 1559 m++; 1560 } 1561 break; 1562 } 1563 case BTF_KIND_FUNC_PROTO: { 1564 struct btf_param *m = btf_params(t); 1565 __u16 vlen = btf_vlen(t); 1566 1567 for (j = 0; j < vlen; j++) { 1568 r = fn(&m->name_off, ctx); 1569 if (r) 1570 return r; 1571 m++; 1572 } 1573 break; 1574 } 1575 default: 1576 break; 1577 } 1578 } 1579 1580 if (!d->btf_ext) 1581 return 0; 1582 1583 line_data_cur = d->btf_ext->line_info.info; 1584 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len; 1585 rec_size = d->btf_ext->line_info.rec_size; 1586 1587 while (line_data_cur < line_data_end) { 1588 struct btf_ext_info_sec *sec = line_data_cur; 1589 struct bpf_line_info_min *line_info; 1590 __u32 num_info = sec->num_info; 1591 1592 r = fn(&sec->sec_name_off, ctx); 1593 if (r) 1594 return r; 1595 1596 line_data_cur += sizeof(struct btf_ext_info_sec); 1597 for (i = 0; i < num_info; i++) { 1598 line_info = line_data_cur; 1599 r = fn(&line_info->file_name_off, ctx); 1600 if (r) 1601 return r; 1602 r = fn(&line_info->line_off, ctx); 1603 if (r) 1604 return r; 1605 line_data_cur += rec_size; 1606 } 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int str_sort_by_content(const void *a1, const void *a2) 1613 { 1614 const struct btf_str_ptr *p1 = a1; 1615 const struct btf_str_ptr *p2 = a2; 1616 1617 return strcmp(p1->str, p2->str); 1618 } 1619 1620 static int str_sort_by_offset(const void *a1, const void *a2) 1621 { 1622 const struct btf_str_ptr *p1 = a1; 1623 const struct btf_str_ptr *p2 = a2; 1624 1625 if (p1->str != p2->str) 1626 return p1->str < p2->str ? -1 : 1; 1627 return 0; 1628 } 1629 1630 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem) 1631 { 1632 const struct btf_str_ptr *p = pelem; 1633 1634 if (str_ptr != p->str) 1635 return (const char *)str_ptr < p->str ? -1 : 1; 1636 return 0; 1637 } 1638 1639 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx) 1640 { 1641 struct btf_str_ptrs *strs; 1642 struct btf_str_ptr *s; 1643 1644 if (*str_off_ptr == 0) 1645 return 0; 1646 1647 strs = ctx; 1648 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 1649 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 1650 if (!s) 1651 return -EINVAL; 1652 s->used = true; 1653 return 0; 1654 } 1655 1656 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) 1657 { 1658 struct btf_str_ptrs *strs; 1659 struct btf_str_ptr *s; 1660 1661 if (*str_off_ptr == 0) 1662 return 0; 1663 1664 strs = ctx; 1665 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 1666 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 1667 if (!s) 1668 return -EINVAL; 1669 *str_off_ptr = s->new_off; 1670 return 0; 1671 } 1672 1673 /* 1674 * Dedup string and filter out those that are not referenced from either .BTF 1675 * or .BTF.ext (if provided) sections. 1676 * 1677 * This is done by building index of all strings in BTF's string section, 1678 * then iterating over all entities that can reference strings (e.g., type 1679 * names, struct field names, .BTF.ext line info, etc) and marking corresponding 1680 * strings as used. After that all used strings are deduped and compacted into 1681 * sequential blob of memory and new offsets are calculated. Then all the string 1682 * references are iterated again and rewritten using new offsets. 1683 */ 1684 static int btf_dedup_strings(struct btf_dedup *d) 1685 { 1686 const struct btf_header *hdr = d->btf->hdr; 1687 char *start = (char *)d->btf->nohdr_data + hdr->str_off; 1688 char *end = start + d->btf->hdr->str_len; 1689 char *p = start, *tmp_strs = NULL; 1690 struct btf_str_ptrs strs = { 1691 .cnt = 0, 1692 .cap = 0, 1693 .ptrs = NULL, 1694 .data = start, 1695 }; 1696 int i, j, err = 0, grp_idx; 1697 bool grp_used; 1698 1699 /* build index of all strings */ 1700 while (p < end) { 1701 if (strs.cnt + 1 > strs.cap) { 1702 struct btf_str_ptr *new_ptrs; 1703 1704 strs.cap += max(strs.cnt / 2, 16U); 1705 new_ptrs = realloc(strs.ptrs, 1706 sizeof(strs.ptrs[0]) * strs.cap); 1707 if (!new_ptrs) { 1708 err = -ENOMEM; 1709 goto done; 1710 } 1711 strs.ptrs = new_ptrs; 1712 } 1713 1714 strs.ptrs[strs.cnt].str = p; 1715 strs.ptrs[strs.cnt].used = false; 1716 1717 p += strlen(p) + 1; 1718 strs.cnt++; 1719 } 1720 1721 /* temporary storage for deduplicated strings */ 1722 tmp_strs = malloc(d->btf->hdr->str_len); 1723 if (!tmp_strs) { 1724 err = -ENOMEM; 1725 goto done; 1726 } 1727 1728 /* mark all used strings */ 1729 strs.ptrs[0].used = true; 1730 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs); 1731 if (err) 1732 goto done; 1733 1734 /* sort strings by context, so that we can identify duplicates */ 1735 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content); 1736 1737 /* 1738 * iterate groups of equal strings and if any instance in a group was 1739 * referenced, emit single instance and remember new offset 1740 */ 1741 p = tmp_strs; 1742 grp_idx = 0; 1743 grp_used = strs.ptrs[0].used; 1744 /* iterate past end to avoid code duplication after loop */ 1745 for (i = 1; i <= strs.cnt; i++) { 1746 /* 1747 * when i == strs.cnt, we want to skip string comparison and go 1748 * straight to handling last group of strings (otherwise we'd 1749 * need to handle last group after the loop w/ duplicated code) 1750 */ 1751 if (i < strs.cnt && 1752 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) { 1753 grp_used = grp_used || strs.ptrs[i].used; 1754 continue; 1755 } 1756 1757 /* 1758 * this check would have been required after the loop to handle 1759 * last group of strings, but due to <= condition in a loop 1760 * we avoid that duplication 1761 */ 1762 if (grp_used) { 1763 int new_off = p - tmp_strs; 1764 __u32 len = strlen(strs.ptrs[grp_idx].str); 1765 1766 memmove(p, strs.ptrs[grp_idx].str, len + 1); 1767 for (j = grp_idx; j < i; j++) 1768 strs.ptrs[j].new_off = new_off; 1769 p += len + 1; 1770 } 1771 1772 if (i < strs.cnt) { 1773 grp_idx = i; 1774 grp_used = strs.ptrs[i].used; 1775 } 1776 } 1777 1778 /* replace original strings with deduped ones */ 1779 d->btf->hdr->str_len = p - tmp_strs; 1780 memmove(start, tmp_strs, d->btf->hdr->str_len); 1781 end = start + d->btf->hdr->str_len; 1782 1783 /* restore original order for further binary search lookups */ 1784 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset); 1785 1786 /* remap string offsets */ 1787 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs); 1788 if (err) 1789 goto done; 1790 1791 d->btf->hdr->str_len = end - start; 1792 1793 done: 1794 free(tmp_strs); 1795 free(strs.ptrs); 1796 return err; 1797 } 1798 1799 static long btf_hash_common(struct btf_type *t) 1800 { 1801 long h; 1802 1803 h = hash_combine(0, t->name_off); 1804 h = hash_combine(h, t->info); 1805 h = hash_combine(h, t->size); 1806 return h; 1807 } 1808 1809 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) 1810 { 1811 return t1->name_off == t2->name_off && 1812 t1->info == t2->info && 1813 t1->size == t2->size; 1814 } 1815 1816 /* Calculate type signature hash of INT. */ 1817 static long btf_hash_int(struct btf_type *t) 1818 { 1819 __u32 info = *(__u32 *)(t + 1); 1820 long h; 1821 1822 h = btf_hash_common(t); 1823 h = hash_combine(h, info); 1824 return h; 1825 } 1826 1827 /* Check structural equality of two INTs. */ 1828 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) 1829 { 1830 __u32 info1, info2; 1831 1832 if (!btf_equal_common(t1, t2)) 1833 return false; 1834 info1 = *(__u32 *)(t1 + 1); 1835 info2 = *(__u32 *)(t2 + 1); 1836 return info1 == info2; 1837 } 1838 1839 /* Calculate type signature hash of ENUM. */ 1840 static long btf_hash_enum(struct btf_type *t) 1841 { 1842 long h; 1843 1844 /* don't hash vlen and enum members to support enum fwd resolving */ 1845 h = hash_combine(0, t->name_off); 1846 h = hash_combine(h, t->info & ~0xffff); 1847 h = hash_combine(h, t->size); 1848 return h; 1849 } 1850 1851 /* Check structural equality of two ENUMs. */ 1852 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) 1853 { 1854 const struct btf_enum *m1, *m2; 1855 __u16 vlen; 1856 int i; 1857 1858 if (!btf_equal_common(t1, t2)) 1859 return false; 1860 1861 vlen = btf_vlen(t1); 1862 m1 = btf_enum(t1); 1863 m2 = btf_enum(t2); 1864 for (i = 0; i < vlen; i++) { 1865 if (m1->name_off != m2->name_off || m1->val != m2->val) 1866 return false; 1867 m1++; 1868 m2++; 1869 } 1870 return true; 1871 } 1872 1873 static inline bool btf_is_enum_fwd(struct btf_type *t) 1874 { 1875 return btf_is_enum(t) && btf_vlen(t) == 0; 1876 } 1877 1878 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) 1879 { 1880 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) 1881 return btf_equal_enum(t1, t2); 1882 /* ignore vlen when comparing */ 1883 return t1->name_off == t2->name_off && 1884 (t1->info & ~0xffff) == (t2->info & ~0xffff) && 1885 t1->size == t2->size; 1886 } 1887 1888 /* 1889 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, 1890 * as referenced type IDs equivalence is established separately during type 1891 * graph equivalence check algorithm. 1892 */ 1893 static long btf_hash_struct(struct btf_type *t) 1894 { 1895 const struct btf_member *member = btf_members(t); 1896 __u32 vlen = btf_vlen(t); 1897 long h = btf_hash_common(t); 1898 int i; 1899 1900 for (i = 0; i < vlen; i++) { 1901 h = hash_combine(h, member->name_off); 1902 h = hash_combine(h, member->offset); 1903 /* no hashing of referenced type ID, it can be unresolved yet */ 1904 member++; 1905 } 1906 return h; 1907 } 1908 1909 /* 1910 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 1911 * IDs. This check is performed during type graph equivalence check and 1912 * referenced types equivalence is checked separately. 1913 */ 1914 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) 1915 { 1916 const struct btf_member *m1, *m2; 1917 __u16 vlen; 1918 int i; 1919 1920 if (!btf_equal_common(t1, t2)) 1921 return false; 1922 1923 vlen = btf_vlen(t1); 1924 m1 = btf_members(t1); 1925 m2 = btf_members(t2); 1926 for (i = 0; i < vlen; i++) { 1927 if (m1->name_off != m2->name_off || m1->offset != m2->offset) 1928 return false; 1929 m1++; 1930 m2++; 1931 } 1932 return true; 1933 } 1934 1935 /* 1936 * Calculate type signature hash of ARRAY, including referenced type IDs, 1937 * under assumption that they were already resolved to canonical type IDs and 1938 * are not going to change. 1939 */ 1940 static long btf_hash_array(struct btf_type *t) 1941 { 1942 const struct btf_array *info = btf_array(t); 1943 long h = btf_hash_common(t); 1944 1945 h = hash_combine(h, info->type); 1946 h = hash_combine(h, info->index_type); 1947 h = hash_combine(h, info->nelems); 1948 return h; 1949 } 1950 1951 /* 1952 * Check exact equality of two ARRAYs, taking into account referenced 1953 * type IDs, under assumption that they were already resolved to canonical 1954 * type IDs and are not going to change. 1955 * This function is called during reference types deduplication to compare 1956 * ARRAY to potential canonical representative. 1957 */ 1958 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) 1959 { 1960 const struct btf_array *info1, *info2; 1961 1962 if (!btf_equal_common(t1, t2)) 1963 return false; 1964 1965 info1 = btf_array(t1); 1966 info2 = btf_array(t2); 1967 return info1->type == info2->type && 1968 info1->index_type == info2->index_type && 1969 info1->nelems == info2->nelems; 1970 } 1971 1972 /* 1973 * Check structural compatibility of two ARRAYs, ignoring referenced type 1974 * IDs. This check is performed during type graph equivalence check and 1975 * referenced types equivalence is checked separately. 1976 */ 1977 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) 1978 { 1979 if (!btf_equal_common(t1, t2)) 1980 return false; 1981 1982 return btf_array(t1)->nelems == btf_array(t2)->nelems; 1983 } 1984 1985 /* 1986 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, 1987 * under assumption that they were already resolved to canonical type IDs and 1988 * are not going to change. 1989 */ 1990 static long btf_hash_fnproto(struct btf_type *t) 1991 { 1992 const struct btf_param *member = btf_params(t); 1993 __u16 vlen = btf_vlen(t); 1994 long h = btf_hash_common(t); 1995 int i; 1996 1997 for (i = 0; i < vlen; i++) { 1998 h = hash_combine(h, member->name_off); 1999 h = hash_combine(h, member->type); 2000 member++; 2001 } 2002 return h; 2003 } 2004 2005 /* 2006 * Check exact equality of two FUNC_PROTOs, taking into account referenced 2007 * type IDs, under assumption that they were already resolved to canonical 2008 * type IDs and are not going to change. 2009 * This function is called during reference types deduplication to compare 2010 * FUNC_PROTO to potential canonical representative. 2011 */ 2012 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) 2013 { 2014 const struct btf_param *m1, *m2; 2015 __u16 vlen; 2016 int i; 2017 2018 if (!btf_equal_common(t1, t2)) 2019 return false; 2020 2021 vlen = btf_vlen(t1); 2022 m1 = btf_params(t1); 2023 m2 = btf_params(t2); 2024 for (i = 0; i < vlen; i++) { 2025 if (m1->name_off != m2->name_off || m1->type != m2->type) 2026 return false; 2027 m1++; 2028 m2++; 2029 } 2030 return true; 2031 } 2032 2033 /* 2034 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 2035 * IDs. This check is performed during type graph equivalence check and 2036 * referenced types equivalence is checked separately. 2037 */ 2038 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) 2039 { 2040 const struct btf_param *m1, *m2; 2041 __u16 vlen; 2042 int i; 2043 2044 /* skip return type ID */ 2045 if (t1->name_off != t2->name_off || t1->info != t2->info) 2046 return false; 2047 2048 vlen = btf_vlen(t1); 2049 m1 = btf_params(t1); 2050 m2 = btf_params(t2); 2051 for (i = 0; i < vlen; i++) { 2052 if (m1->name_off != m2->name_off) 2053 return false; 2054 m1++; 2055 m2++; 2056 } 2057 return true; 2058 } 2059 2060 /* 2061 * Deduplicate primitive types, that can't reference other types, by calculating 2062 * their type signature hash and comparing them with any possible canonical 2063 * candidate. If no canonical candidate matches, type itself is marked as 2064 * canonical and is added into `btf_dedup->dedup_table` as another candidate. 2065 */ 2066 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) 2067 { 2068 struct btf_type *t = d->btf->types[type_id]; 2069 struct hashmap_entry *hash_entry; 2070 struct btf_type *cand; 2071 /* if we don't find equivalent type, then we are canonical */ 2072 __u32 new_id = type_id; 2073 __u32 cand_id; 2074 long h; 2075 2076 switch (btf_kind(t)) { 2077 case BTF_KIND_CONST: 2078 case BTF_KIND_VOLATILE: 2079 case BTF_KIND_RESTRICT: 2080 case BTF_KIND_PTR: 2081 case BTF_KIND_TYPEDEF: 2082 case BTF_KIND_ARRAY: 2083 case BTF_KIND_STRUCT: 2084 case BTF_KIND_UNION: 2085 case BTF_KIND_FUNC: 2086 case BTF_KIND_FUNC_PROTO: 2087 case BTF_KIND_VAR: 2088 case BTF_KIND_DATASEC: 2089 return 0; 2090 2091 case BTF_KIND_INT: 2092 h = btf_hash_int(t); 2093 for_each_dedup_cand(d, hash_entry, h) { 2094 cand_id = (__u32)(long)hash_entry->value; 2095 cand = d->btf->types[cand_id]; 2096 if (btf_equal_int(t, cand)) { 2097 new_id = cand_id; 2098 break; 2099 } 2100 } 2101 break; 2102 2103 case BTF_KIND_ENUM: 2104 h = btf_hash_enum(t); 2105 for_each_dedup_cand(d, hash_entry, h) { 2106 cand_id = (__u32)(long)hash_entry->value; 2107 cand = d->btf->types[cand_id]; 2108 if (btf_equal_enum(t, cand)) { 2109 new_id = cand_id; 2110 break; 2111 } 2112 if (d->opts.dont_resolve_fwds) 2113 continue; 2114 if (btf_compat_enum(t, cand)) { 2115 if (btf_is_enum_fwd(t)) { 2116 /* resolve fwd to full enum */ 2117 new_id = cand_id; 2118 break; 2119 } 2120 /* resolve canonical enum fwd to full enum */ 2121 d->map[cand_id] = type_id; 2122 } 2123 } 2124 break; 2125 2126 case BTF_KIND_FWD: 2127 h = btf_hash_common(t); 2128 for_each_dedup_cand(d, hash_entry, h) { 2129 cand_id = (__u32)(long)hash_entry->value; 2130 cand = d->btf->types[cand_id]; 2131 if (btf_equal_common(t, cand)) { 2132 new_id = cand_id; 2133 break; 2134 } 2135 } 2136 break; 2137 2138 default: 2139 return -EINVAL; 2140 } 2141 2142 d->map[type_id] = new_id; 2143 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2144 return -ENOMEM; 2145 2146 return 0; 2147 } 2148 2149 static int btf_dedup_prim_types(struct btf_dedup *d) 2150 { 2151 int i, err; 2152 2153 for (i = 1; i <= d->btf->nr_types; i++) { 2154 err = btf_dedup_prim_type(d, i); 2155 if (err) 2156 return err; 2157 } 2158 return 0; 2159 } 2160 2161 /* 2162 * Check whether type is already mapped into canonical one (could be to itself). 2163 */ 2164 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) 2165 { 2166 return d->map[type_id] <= BTF_MAX_NR_TYPES; 2167 } 2168 2169 /* 2170 * Resolve type ID into its canonical type ID, if any; otherwise return original 2171 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow 2172 * STRUCT/UNION link and resolve it into canonical type ID as well. 2173 */ 2174 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) 2175 { 2176 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2177 type_id = d->map[type_id]; 2178 return type_id; 2179 } 2180 2181 /* 2182 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original 2183 * type ID. 2184 */ 2185 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) 2186 { 2187 __u32 orig_type_id = type_id; 2188 2189 if (!btf_is_fwd(d->btf->types[type_id])) 2190 return type_id; 2191 2192 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2193 type_id = d->map[type_id]; 2194 2195 if (!btf_is_fwd(d->btf->types[type_id])) 2196 return type_id; 2197 2198 return orig_type_id; 2199 } 2200 2201 2202 static inline __u16 btf_fwd_kind(struct btf_type *t) 2203 { 2204 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; 2205 } 2206 2207 /* 2208 * Check equivalence of BTF type graph formed by candidate struct/union (we'll 2209 * call it "candidate graph" in this description for brevity) to a type graph 2210 * formed by (potential) canonical struct/union ("canonical graph" for brevity 2211 * here, though keep in mind that not all types in canonical graph are 2212 * necessarily canonical representatives themselves, some of them might be 2213 * duplicates or its uniqueness might not have been established yet). 2214 * Returns: 2215 * - >0, if type graphs are equivalent; 2216 * - 0, if not equivalent; 2217 * - <0, on error. 2218 * 2219 * Algorithm performs side-by-side DFS traversal of both type graphs and checks 2220 * equivalence of BTF types at each step. If at any point BTF types in candidate 2221 * and canonical graphs are not compatible structurally, whole graphs are 2222 * incompatible. If types are structurally equivalent (i.e., all information 2223 * except referenced type IDs is exactly the same), a mapping from `canon_id` to 2224 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). 2225 * If a type references other types, then those referenced types are checked 2226 * for equivalence recursively. 2227 * 2228 * During DFS traversal, if we find that for current `canon_id` type we 2229 * already have some mapping in hypothetical map, we check for two possible 2230 * situations: 2231 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will 2232 * happen when type graphs have cycles. In this case we assume those two 2233 * types are equivalent. 2234 * - `canon_id` is mapped to different type. This is contradiction in our 2235 * hypothetical mapping, because same graph in canonical graph corresponds 2236 * to two different types in candidate graph, which for equivalent type 2237 * graphs shouldn't happen. This condition terminates equivalence check 2238 * with negative result. 2239 * 2240 * If type graphs traversal exhausts types to check and find no contradiction, 2241 * then type graphs are equivalent. 2242 * 2243 * When checking types for equivalence, there is one special case: FWD types. 2244 * If FWD type resolution is allowed and one of the types (either from canonical 2245 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind 2246 * flag) and their names match, hypothetical mapping is updated to point from 2247 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, 2248 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. 2249 * 2250 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, 2251 * if there are two exactly named (or anonymous) structs/unions that are 2252 * compatible structurally, one of which has FWD field, while other is concrete 2253 * STRUCT/UNION, but according to C sources they are different structs/unions 2254 * that are referencing different types with the same name. This is extremely 2255 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if 2256 * this logic is causing problems. 2257 * 2258 * Doing FWD resolution means that both candidate and/or canonical graphs can 2259 * consists of portions of the graph that come from multiple compilation units. 2260 * This is due to the fact that types within single compilation unit are always 2261 * deduplicated and FWDs are already resolved, if referenced struct/union 2262 * definiton is available. So, if we had unresolved FWD and found corresponding 2263 * STRUCT/UNION, they will be from different compilation units. This 2264 * consequently means that when we "link" FWD to corresponding STRUCT/UNION, 2265 * type graph will likely have at least two different BTF types that describe 2266 * same type (e.g., most probably there will be two different BTF types for the 2267 * same 'int' primitive type) and could even have "overlapping" parts of type 2268 * graph that describe same subset of types. 2269 * 2270 * This in turn means that our assumption that each type in canonical graph 2271 * must correspond to exactly one type in candidate graph might not hold 2272 * anymore and will make it harder to detect contradictions using hypothetical 2273 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION 2274 * resolution only in canonical graph. FWDs in candidate graphs are never 2275 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs 2276 * that can occur: 2277 * - Both types in canonical and candidate graphs are FWDs. If they are 2278 * structurally equivalent, then they can either be both resolved to the 2279 * same STRUCT/UNION or not resolved at all. In both cases they are 2280 * equivalent and there is no need to resolve FWD on candidate side. 2281 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, 2282 * so nothing to resolve as well, algorithm will check equivalence anyway. 2283 * - Type in canonical graph is FWD, while type in candidate is concrete 2284 * STRUCT/UNION. In this case candidate graph comes from single compilation 2285 * unit, so there is exactly one BTF type for each unique C type. After 2286 * resolving FWD into STRUCT/UNION, there might be more than one BTF type 2287 * in canonical graph mapping to single BTF type in candidate graph, but 2288 * because hypothetical mapping maps from canonical to candidate types, it's 2289 * alright, and we still maintain the property of having single `canon_id` 2290 * mapping to single `cand_id` (there could be two different `canon_id` 2291 * mapped to the same `cand_id`, but it's not contradictory). 2292 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate 2293 * graph is FWD. In this case we are just going to check compatibility of 2294 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll 2295 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to 2296 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs 2297 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from 2298 * canonical graph. 2299 */ 2300 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, 2301 __u32 canon_id) 2302 { 2303 struct btf_type *cand_type; 2304 struct btf_type *canon_type; 2305 __u32 hypot_type_id; 2306 __u16 cand_kind; 2307 __u16 canon_kind; 2308 int i, eq; 2309 2310 /* if both resolve to the same canonical, they must be equivalent */ 2311 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) 2312 return 1; 2313 2314 canon_id = resolve_fwd_id(d, canon_id); 2315 2316 hypot_type_id = d->hypot_map[canon_id]; 2317 if (hypot_type_id <= BTF_MAX_NR_TYPES) 2318 return hypot_type_id == cand_id; 2319 2320 if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) 2321 return -ENOMEM; 2322 2323 cand_type = d->btf->types[cand_id]; 2324 canon_type = d->btf->types[canon_id]; 2325 cand_kind = btf_kind(cand_type); 2326 canon_kind = btf_kind(canon_type); 2327 2328 if (cand_type->name_off != canon_type->name_off) 2329 return 0; 2330 2331 /* FWD <--> STRUCT/UNION equivalence check, if enabled */ 2332 if (!d->opts.dont_resolve_fwds 2333 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) 2334 && cand_kind != canon_kind) { 2335 __u16 real_kind; 2336 __u16 fwd_kind; 2337 2338 if (cand_kind == BTF_KIND_FWD) { 2339 real_kind = canon_kind; 2340 fwd_kind = btf_fwd_kind(cand_type); 2341 } else { 2342 real_kind = cand_kind; 2343 fwd_kind = btf_fwd_kind(canon_type); 2344 } 2345 return fwd_kind == real_kind; 2346 } 2347 2348 if (cand_kind != canon_kind) 2349 return 0; 2350 2351 switch (cand_kind) { 2352 case BTF_KIND_INT: 2353 return btf_equal_int(cand_type, canon_type); 2354 2355 case BTF_KIND_ENUM: 2356 if (d->opts.dont_resolve_fwds) 2357 return btf_equal_enum(cand_type, canon_type); 2358 else 2359 return btf_compat_enum(cand_type, canon_type); 2360 2361 case BTF_KIND_FWD: 2362 return btf_equal_common(cand_type, canon_type); 2363 2364 case BTF_KIND_CONST: 2365 case BTF_KIND_VOLATILE: 2366 case BTF_KIND_RESTRICT: 2367 case BTF_KIND_PTR: 2368 case BTF_KIND_TYPEDEF: 2369 case BTF_KIND_FUNC: 2370 if (cand_type->info != canon_type->info) 2371 return 0; 2372 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 2373 2374 case BTF_KIND_ARRAY: { 2375 const struct btf_array *cand_arr, *canon_arr; 2376 2377 if (!btf_compat_array(cand_type, canon_type)) 2378 return 0; 2379 cand_arr = btf_array(cand_type); 2380 canon_arr = btf_array(canon_type); 2381 eq = btf_dedup_is_equiv(d, 2382 cand_arr->index_type, canon_arr->index_type); 2383 if (eq <= 0) 2384 return eq; 2385 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); 2386 } 2387 2388 case BTF_KIND_STRUCT: 2389 case BTF_KIND_UNION: { 2390 const struct btf_member *cand_m, *canon_m; 2391 __u16 vlen; 2392 2393 if (!btf_shallow_equal_struct(cand_type, canon_type)) 2394 return 0; 2395 vlen = btf_vlen(cand_type); 2396 cand_m = btf_members(cand_type); 2397 canon_m = btf_members(canon_type); 2398 for (i = 0; i < vlen; i++) { 2399 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); 2400 if (eq <= 0) 2401 return eq; 2402 cand_m++; 2403 canon_m++; 2404 } 2405 2406 return 1; 2407 } 2408 2409 case BTF_KIND_FUNC_PROTO: { 2410 const struct btf_param *cand_p, *canon_p; 2411 __u16 vlen; 2412 2413 if (!btf_compat_fnproto(cand_type, canon_type)) 2414 return 0; 2415 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 2416 if (eq <= 0) 2417 return eq; 2418 vlen = btf_vlen(cand_type); 2419 cand_p = btf_params(cand_type); 2420 canon_p = btf_params(canon_type); 2421 for (i = 0; i < vlen; i++) { 2422 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); 2423 if (eq <= 0) 2424 return eq; 2425 cand_p++; 2426 canon_p++; 2427 } 2428 return 1; 2429 } 2430 2431 default: 2432 return -EINVAL; 2433 } 2434 return 0; 2435 } 2436 2437 /* 2438 * Use hypothetical mapping, produced by successful type graph equivalence 2439 * check, to augment existing struct/union canonical mapping, where possible. 2440 * 2441 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record 2442 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: 2443 * it doesn't matter if FWD type was part of canonical graph or candidate one, 2444 * we are recording the mapping anyway. As opposed to carefulness required 2445 * for struct/union correspondence mapping (described below), for FWD resolution 2446 * it's not important, as by the time that FWD type (reference type) will be 2447 * deduplicated all structs/unions will be deduped already anyway. 2448 * 2449 * Recording STRUCT/UNION mapping is purely a performance optimization and is 2450 * not required for correctness. It needs to be done carefully to ensure that 2451 * struct/union from candidate's type graph is not mapped into corresponding 2452 * struct/union from canonical type graph that itself hasn't been resolved into 2453 * canonical representative. The only guarantee we have is that canonical 2454 * struct/union was determined as canonical and that won't change. But any 2455 * types referenced through that struct/union fields could have been not yet 2456 * resolved, so in case like that it's too early to establish any kind of 2457 * correspondence between structs/unions. 2458 * 2459 * No canonical correspondence is derived for primitive types (they are already 2460 * deduplicated completely already anyway) or reference types (they rely on 2461 * stability of struct/union canonical relationship for equivalence checks). 2462 */ 2463 static void btf_dedup_merge_hypot_map(struct btf_dedup *d) 2464 { 2465 __u32 cand_type_id, targ_type_id; 2466 __u16 t_kind, c_kind; 2467 __u32 t_id, c_id; 2468 int i; 2469 2470 for (i = 0; i < d->hypot_cnt; i++) { 2471 cand_type_id = d->hypot_list[i]; 2472 targ_type_id = d->hypot_map[cand_type_id]; 2473 t_id = resolve_type_id(d, targ_type_id); 2474 c_id = resolve_type_id(d, cand_type_id); 2475 t_kind = btf_kind(d->btf->types[t_id]); 2476 c_kind = btf_kind(d->btf->types[c_id]); 2477 /* 2478 * Resolve FWD into STRUCT/UNION. 2479 * It's ok to resolve FWD into STRUCT/UNION that's not yet 2480 * mapped to canonical representative (as opposed to 2481 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because 2482 * eventually that struct is going to be mapped and all resolved 2483 * FWDs will automatically resolve to correct canonical 2484 * representative. This will happen before ref type deduping, 2485 * which critically depends on stability of these mapping. This 2486 * stability is not a requirement for STRUCT/UNION equivalence 2487 * checks, though. 2488 */ 2489 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) 2490 d->map[c_id] = t_id; 2491 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) 2492 d->map[t_id] = c_id; 2493 2494 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && 2495 c_kind != BTF_KIND_FWD && 2496 is_type_mapped(d, c_id) && 2497 !is_type_mapped(d, t_id)) { 2498 /* 2499 * as a perf optimization, we can map struct/union 2500 * that's part of type graph we just verified for 2501 * equivalence. We can do that for struct/union that has 2502 * canonical representative only, though. 2503 */ 2504 d->map[t_id] = c_id; 2505 } 2506 } 2507 } 2508 2509 /* 2510 * Deduplicate struct/union types. 2511 * 2512 * For each struct/union type its type signature hash is calculated, taking 2513 * into account type's name, size, number, order and names of fields, but 2514 * ignoring type ID's referenced from fields, because they might not be deduped 2515 * completely until after reference types deduplication phase. This type hash 2516 * is used to iterate over all potential canonical types, sharing same hash. 2517 * For each canonical candidate we check whether type graphs that they form 2518 * (through referenced types in fields and so on) are equivalent using algorithm 2519 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and 2520 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping 2521 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence 2522 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to 2523 * potentially map other structs/unions to their canonical representatives, 2524 * if such relationship hasn't yet been established. This speeds up algorithm 2525 * by eliminating some of the duplicate work. 2526 * 2527 * If no matching canonical representative was found, struct/union is marked 2528 * as canonical for itself and is added into btf_dedup->dedup_table hash map 2529 * for further look ups. 2530 */ 2531 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) 2532 { 2533 struct btf_type *cand_type, *t; 2534 struct hashmap_entry *hash_entry; 2535 /* if we don't find equivalent type, then we are canonical */ 2536 __u32 new_id = type_id; 2537 __u16 kind; 2538 long h; 2539 2540 /* already deduped or is in process of deduping (loop detected) */ 2541 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2542 return 0; 2543 2544 t = d->btf->types[type_id]; 2545 kind = btf_kind(t); 2546 2547 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) 2548 return 0; 2549 2550 h = btf_hash_struct(t); 2551 for_each_dedup_cand(d, hash_entry, h) { 2552 __u32 cand_id = (__u32)(long)hash_entry->value; 2553 int eq; 2554 2555 /* 2556 * Even though btf_dedup_is_equiv() checks for 2557 * btf_shallow_equal_struct() internally when checking two 2558 * structs (unions) for equivalence, we need to guard here 2559 * from picking matching FWD type as a dedup candidate. 2560 * This can happen due to hash collision. In such case just 2561 * relying on btf_dedup_is_equiv() would lead to potentially 2562 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because 2563 * FWD and compatible STRUCT/UNION are considered equivalent. 2564 */ 2565 cand_type = d->btf->types[cand_id]; 2566 if (!btf_shallow_equal_struct(t, cand_type)) 2567 continue; 2568 2569 btf_dedup_clear_hypot_map(d); 2570 eq = btf_dedup_is_equiv(d, type_id, cand_id); 2571 if (eq < 0) 2572 return eq; 2573 if (!eq) 2574 continue; 2575 new_id = cand_id; 2576 btf_dedup_merge_hypot_map(d); 2577 break; 2578 } 2579 2580 d->map[type_id] = new_id; 2581 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2582 return -ENOMEM; 2583 2584 return 0; 2585 } 2586 2587 static int btf_dedup_struct_types(struct btf_dedup *d) 2588 { 2589 int i, err; 2590 2591 for (i = 1; i <= d->btf->nr_types; i++) { 2592 err = btf_dedup_struct_type(d, i); 2593 if (err) 2594 return err; 2595 } 2596 return 0; 2597 } 2598 2599 /* 2600 * Deduplicate reference type. 2601 * 2602 * Once all primitive and struct/union types got deduplicated, we can easily 2603 * deduplicate all other (reference) BTF types. This is done in two steps: 2604 * 2605 * 1. Resolve all referenced type IDs into their canonical type IDs. This 2606 * resolution can be done either immediately for primitive or struct/union types 2607 * (because they were deduped in previous two phases) or recursively for 2608 * reference types. Recursion will always terminate at either primitive or 2609 * struct/union type, at which point we can "unwind" chain of reference types 2610 * one by one. There is no danger of encountering cycles because in C type 2611 * system the only way to form type cycle is through struct/union, so any chain 2612 * of reference types, even those taking part in a type cycle, will inevitably 2613 * reach struct/union at some point. 2614 * 2615 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type 2616 * becomes "stable", in the sense that no further deduplication will cause 2617 * any changes to it. With that, it's now possible to calculate type's signature 2618 * hash (this time taking into account referenced type IDs) and loop over all 2619 * potential canonical representatives. If no match was found, current type 2620 * will become canonical representative of itself and will be added into 2621 * btf_dedup->dedup_table as another possible canonical representative. 2622 */ 2623 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) 2624 { 2625 struct hashmap_entry *hash_entry; 2626 __u32 new_id = type_id, cand_id; 2627 struct btf_type *t, *cand; 2628 /* if we don't find equivalent type, then we are representative type */ 2629 int ref_type_id; 2630 long h; 2631 2632 if (d->map[type_id] == BTF_IN_PROGRESS_ID) 2633 return -ELOOP; 2634 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2635 return resolve_type_id(d, type_id); 2636 2637 t = d->btf->types[type_id]; 2638 d->map[type_id] = BTF_IN_PROGRESS_ID; 2639 2640 switch (btf_kind(t)) { 2641 case BTF_KIND_CONST: 2642 case BTF_KIND_VOLATILE: 2643 case BTF_KIND_RESTRICT: 2644 case BTF_KIND_PTR: 2645 case BTF_KIND_TYPEDEF: 2646 case BTF_KIND_FUNC: 2647 ref_type_id = btf_dedup_ref_type(d, t->type); 2648 if (ref_type_id < 0) 2649 return ref_type_id; 2650 t->type = ref_type_id; 2651 2652 h = btf_hash_common(t); 2653 for_each_dedup_cand(d, hash_entry, h) { 2654 cand_id = (__u32)(long)hash_entry->value; 2655 cand = d->btf->types[cand_id]; 2656 if (btf_equal_common(t, cand)) { 2657 new_id = cand_id; 2658 break; 2659 } 2660 } 2661 break; 2662 2663 case BTF_KIND_ARRAY: { 2664 struct btf_array *info = btf_array(t); 2665 2666 ref_type_id = btf_dedup_ref_type(d, info->type); 2667 if (ref_type_id < 0) 2668 return ref_type_id; 2669 info->type = ref_type_id; 2670 2671 ref_type_id = btf_dedup_ref_type(d, info->index_type); 2672 if (ref_type_id < 0) 2673 return ref_type_id; 2674 info->index_type = ref_type_id; 2675 2676 h = btf_hash_array(t); 2677 for_each_dedup_cand(d, hash_entry, h) { 2678 cand_id = (__u32)(long)hash_entry->value; 2679 cand = d->btf->types[cand_id]; 2680 if (btf_equal_array(t, cand)) { 2681 new_id = cand_id; 2682 break; 2683 } 2684 } 2685 break; 2686 } 2687 2688 case BTF_KIND_FUNC_PROTO: { 2689 struct btf_param *param; 2690 __u16 vlen; 2691 int i; 2692 2693 ref_type_id = btf_dedup_ref_type(d, t->type); 2694 if (ref_type_id < 0) 2695 return ref_type_id; 2696 t->type = ref_type_id; 2697 2698 vlen = btf_vlen(t); 2699 param = btf_params(t); 2700 for (i = 0; i < vlen; i++) { 2701 ref_type_id = btf_dedup_ref_type(d, param->type); 2702 if (ref_type_id < 0) 2703 return ref_type_id; 2704 param->type = ref_type_id; 2705 param++; 2706 } 2707 2708 h = btf_hash_fnproto(t); 2709 for_each_dedup_cand(d, hash_entry, h) { 2710 cand_id = (__u32)(long)hash_entry->value; 2711 cand = d->btf->types[cand_id]; 2712 if (btf_equal_fnproto(t, cand)) { 2713 new_id = cand_id; 2714 break; 2715 } 2716 } 2717 break; 2718 } 2719 2720 default: 2721 return -EINVAL; 2722 } 2723 2724 d->map[type_id] = new_id; 2725 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2726 return -ENOMEM; 2727 2728 return new_id; 2729 } 2730 2731 static int btf_dedup_ref_types(struct btf_dedup *d) 2732 { 2733 int i, err; 2734 2735 for (i = 1; i <= d->btf->nr_types; i++) { 2736 err = btf_dedup_ref_type(d, i); 2737 if (err < 0) 2738 return err; 2739 } 2740 /* we won't need d->dedup_table anymore */ 2741 hashmap__free(d->dedup_table); 2742 d->dedup_table = NULL; 2743 return 0; 2744 } 2745 2746 /* 2747 * Compact types. 2748 * 2749 * After we established for each type its corresponding canonical representative 2750 * type, we now can eliminate types that are not canonical and leave only 2751 * canonical ones layed out sequentially in memory by copying them over 2752 * duplicates. During compaction btf_dedup->hypot_map array is reused to store 2753 * a map from original type ID to a new compacted type ID, which will be used 2754 * during next phase to "fix up" type IDs, referenced from struct/union and 2755 * reference types. 2756 */ 2757 static int btf_dedup_compact_types(struct btf_dedup *d) 2758 { 2759 struct btf_type **new_types; 2760 __u32 next_type_id = 1; 2761 char *types_start, *p; 2762 int i, len; 2763 2764 /* we are going to reuse hypot_map to store compaction remapping */ 2765 d->hypot_map[0] = 0; 2766 for (i = 1; i <= d->btf->nr_types; i++) 2767 d->hypot_map[i] = BTF_UNPROCESSED_ID; 2768 2769 types_start = d->btf->nohdr_data + d->btf->hdr->type_off; 2770 p = types_start; 2771 2772 for (i = 1; i <= d->btf->nr_types; i++) { 2773 if (d->map[i] != i) 2774 continue; 2775 2776 len = btf_type_size(d->btf->types[i]); 2777 if (len < 0) 2778 return len; 2779 2780 memmove(p, d->btf->types[i], len); 2781 d->hypot_map[i] = next_type_id; 2782 d->btf->types[next_type_id] = (struct btf_type *)p; 2783 p += len; 2784 next_type_id++; 2785 } 2786 2787 /* shrink struct btf's internal types index and update btf_header */ 2788 d->btf->nr_types = next_type_id - 1; 2789 d->btf->types_size = d->btf->nr_types; 2790 d->btf->hdr->type_len = p - types_start; 2791 new_types = realloc(d->btf->types, 2792 (1 + d->btf->nr_types) * sizeof(struct btf_type *)); 2793 if (!new_types) 2794 return -ENOMEM; 2795 d->btf->types = new_types; 2796 2797 /* make sure string section follows type information without gaps */ 2798 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; 2799 memmove(p, d->btf->strings, d->btf->hdr->str_len); 2800 d->btf->strings = p; 2801 p += d->btf->hdr->str_len; 2802 2803 d->btf->data_size = p - (char *)d->btf->data; 2804 return 0; 2805 } 2806 2807 /* 2808 * Figure out final (deduplicated and compacted) type ID for provided original 2809 * `type_id` by first resolving it into corresponding canonical type ID and 2810 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, 2811 * which is populated during compaction phase. 2812 */ 2813 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id) 2814 { 2815 __u32 resolved_type_id, new_type_id; 2816 2817 resolved_type_id = resolve_type_id(d, type_id); 2818 new_type_id = d->hypot_map[resolved_type_id]; 2819 if (new_type_id > BTF_MAX_NR_TYPES) 2820 return -EINVAL; 2821 return new_type_id; 2822 } 2823 2824 /* 2825 * Remap referenced type IDs into deduped type IDs. 2826 * 2827 * After BTF types are deduplicated and compacted, their final type IDs may 2828 * differ from original ones. The map from original to a corresponding 2829 * deduped type ID is stored in btf_dedup->hypot_map and is populated during 2830 * compaction phase. During remapping phase we are rewriting all type IDs 2831 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to 2832 * their final deduped type IDs. 2833 */ 2834 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) 2835 { 2836 struct btf_type *t = d->btf->types[type_id]; 2837 int i, r; 2838 2839 switch (btf_kind(t)) { 2840 case BTF_KIND_INT: 2841 case BTF_KIND_ENUM: 2842 break; 2843 2844 case BTF_KIND_FWD: 2845 case BTF_KIND_CONST: 2846 case BTF_KIND_VOLATILE: 2847 case BTF_KIND_RESTRICT: 2848 case BTF_KIND_PTR: 2849 case BTF_KIND_TYPEDEF: 2850 case BTF_KIND_FUNC: 2851 case BTF_KIND_VAR: 2852 r = btf_dedup_remap_type_id(d, t->type); 2853 if (r < 0) 2854 return r; 2855 t->type = r; 2856 break; 2857 2858 case BTF_KIND_ARRAY: { 2859 struct btf_array *arr_info = btf_array(t); 2860 2861 r = btf_dedup_remap_type_id(d, arr_info->type); 2862 if (r < 0) 2863 return r; 2864 arr_info->type = r; 2865 r = btf_dedup_remap_type_id(d, arr_info->index_type); 2866 if (r < 0) 2867 return r; 2868 arr_info->index_type = r; 2869 break; 2870 } 2871 2872 case BTF_KIND_STRUCT: 2873 case BTF_KIND_UNION: { 2874 struct btf_member *member = btf_members(t); 2875 __u16 vlen = btf_vlen(t); 2876 2877 for (i = 0; i < vlen; i++) { 2878 r = btf_dedup_remap_type_id(d, member->type); 2879 if (r < 0) 2880 return r; 2881 member->type = r; 2882 member++; 2883 } 2884 break; 2885 } 2886 2887 case BTF_KIND_FUNC_PROTO: { 2888 struct btf_param *param = btf_params(t); 2889 __u16 vlen = btf_vlen(t); 2890 2891 r = btf_dedup_remap_type_id(d, t->type); 2892 if (r < 0) 2893 return r; 2894 t->type = r; 2895 2896 for (i = 0; i < vlen; i++) { 2897 r = btf_dedup_remap_type_id(d, param->type); 2898 if (r < 0) 2899 return r; 2900 param->type = r; 2901 param++; 2902 } 2903 break; 2904 } 2905 2906 case BTF_KIND_DATASEC: { 2907 struct btf_var_secinfo *var = btf_var_secinfos(t); 2908 __u16 vlen = btf_vlen(t); 2909 2910 for (i = 0; i < vlen; i++) { 2911 r = btf_dedup_remap_type_id(d, var->type); 2912 if (r < 0) 2913 return r; 2914 var->type = r; 2915 var++; 2916 } 2917 break; 2918 } 2919 2920 default: 2921 return -EINVAL; 2922 } 2923 2924 return 0; 2925 } 2926 2927 static int btf_dedup_remap_types(struct btf_dedup *d) 2928 { 2929 int i, r; 2930 2931 for (i = 1; i <= d->btf->nr_types; i++) { 2932 r = btf_dedup_remap_type(d, i); 2933 if (r < 0) 2934 return r; 2935 } 2936 return 0; 2937 } 2938 2939 static struct btf *btf_load_raw(const char *path) 2940 { 2941 struct btf *btf; 2942 size_t read_cnt; 2943 struct stat st; 2944 void *data; 2945 FILE *f; 2946 2947 if (stat(path, &st)) 2948 return ERR_PTR(-errno); 2949 2950 data = malloc(st.st_size); 2951 if (!data) 2952 return ERR_PTR(-ENOMEM); 2953 2954 f = fopen(path, "rb"); 2955 if (!f) { 2956 btf = ERR_PTR(-errno); 2957 goto cleanup; 2958 } 2959 2960 read_cnt = fread(data, 1, st.st_size, f); 2961 fclose(f); 2962 if (read_cnt < st.st_size) { 2963 btf = ERR_PTR(-EBADF); 2964 goto cleanup; 2965 } 2966 2967 btf = btf__new(data, read_cnt); 2968 2969 cleanup: 2970 free(data); 2971 return btf; 2972 } 2973 2974 /* 2975 * Probe few well-known locations for vmlinux kernel image and try to load BTF 2976 * data out of it to use for target BTF. 2977 */ 2978 struct btf *libbpf_find_kernel_btf(void) 2979 { 2980 struct { 2981 const char *path_fmt; 2982 bool raw_btf; 2983 } locations[] = { 2984 /* try canonical vmlinux BTF through sysfs first */ 2985 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ }, 2986 /* fall back to trying to find vmlinux ELF on disk otherwise */ 2987 { "/boot/vmlinux-%1$s" }, 2988 { "/lib/modules/%1$s/vmlinux-%1$s" }, 2989 { "/lib/modules/%1$s/build/vmlinux" }, 2990 { "/usr/lib/modules/%1$s/kernel/vmlinux" }, 2991 { "/usr/lib/debug/boot/vmlinux-%1$s" }, 2992 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" }, 2993 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" }, 2994 }; 2995 char path[PATH_MAX + 1]; 2996 struct utsname buf; 2997 struct btf *btf; 2998 int i; 2999 3000 uname(&buf); 3001 3002 for (i = 0; i < ARRAY_SIZE(locations); i++) { 3003 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release); 3004 3005 if (access(path, R_OK)) 3006 continue; 3007 3008 if (locations[i].raw_btf) 3009 btf = btf_load_raw(path); 3010 else 3011 btf = btf__parse_elf(path, NULL); 3012 3013 pr_debug("loading kernel BTF '%s': %ld\n", 3014 path, IS_ERR(btf) ? PTR_ERR(btf) : 0); 3015 if (IS_ERR(btf)) 3016 continue; 3017 3018 return btf; 3019 } 3020 3021 pr_warn("failed to find valid kernel BTF\n"); 3022 return ERR_PTR(-ESRCH); 3023 } 3024