1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <endian.h> 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <errno.h> 11 #include <linux/err.h> 12 #include <linux/btf.h> 13 #include <gelf.h> 14 #include "btf.h" 15 #include "bpf.h" 16 #include "libbpf.h" 17 #include "libbpf_internal.h" 18 #include "hashmap.h" 19 20 #define BTF_MAX_NR_TYPES 0x7fffffff 21 #define BTF_MAX_STR_OFFSET 0x7fffffff 22 23 static struct btf_type btf_void; 24 25 struct btf { 26 union { 27 struct btf_header *hdr; 28 void *data; 29 }; 30 struct btf_type **types; 31 const char *strings; 32 void *nohdr_data; 33 __u32 nr_types; 34 __u32 types_size; 35 __u32 data_size; 36 int fd; 37 }; 38 39 static inline __u64 ptr_to_u64(const void *ptr) 40 { 41 return (__u64) (unsigned long) ptr; 42 } 43 44 static int btf_add_type(struct btf *btf, struct btf_type *t) 45 { 46 if (btf->types_size - btf->nr_types < 2) { 47 struct btf_type **new_types; 48 __u32 expand_by, new_size; 49 50 if (btf->types_size == BTF_MAX_NR_TYPES) 51 return -E2BIG; 52 53 expand_by = max(btf->types_size >> 2, 16); 54 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by); 55 56 new_types = realloc(btf->types, sizeof(*new_types) * new_size); 57 if (!new_types) 58 return -ENOMEM; 59 60 if (btf->nr_types == 0) 61 new_types[0] = &btf_void; 62 63 btf->types = new_types; 64 btf->types_size = new_size; 65 } 66 67 btf->types[++(btf->nr_types)] = t; 68 69 return 0; 70 } 71 72 static int btf_parse_hdr(struct btf *btf) 73 { 74 const struct btf_header *hdr = btf->hdr; 75 __u32 meta_left; 76 77 if (btf->data_size < sizeof(struct btf_header)) { 78 pr_debug("BTF header not found\n"); 79 return -EINVAL; 80 } 81 82 if (hdr->magic != BTF_MAGIC) { 83 pr_debug("Invalid BTF magic:%x\n", hdr->magic); 84 return -EINVAL; 85 } 86 87 if (hdr->version != BTF_VERSION) { 88 pr_debug("Unsupported BTF version:%u\n", hdr->version); 89 return -ENOTSUP; 90 } 91 92 if (hdr->flags) { 93 pr_debug("Unsupported BTF flags:%x\n", hdr->flags); 94 return -ENOTSUP; 95 } 96 97 meta_left = btf->data_size - sizeof(*hdr); 98 if (!meta_left) { 99 pr_debug("BTF has no data\n"); 100 return -EINVAL; 101 } 102 103 if (meta_left < hdr->type_off) { 104 pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off); 105 return -EINVAL; 106 } 107 108 if (meta_left < hdr->str_off) { 109 pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off); 110 return -EINVAL; 111 } 112 113 if (hdr->type_off >= hdr->str_off) { 114 pr_debug("BTF type section offset >= string section offset. No type?\n"); 115 return -EINVAL; 116 } 117 118 if (hdr->type_off & 0x02) { 119 pr_debug("BTF type section is not aligned to 4 bytes\n"); 120 return -EINVAL; 121 } 122 123 btf->nohdr_data = btf->hdr + 1; 124 125 return 0; 126 } 127 128 static int btf_parse_str_sec(struct btf *btf) 129 { 130 const struct btf_header *hdr = btf->hdr; 131 const char *start = btf->nohdr_data + hdr->str_off; 132 const char *end = start + btf->hdr->str_len; 133 134 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || 135 start[0] || end[-1]) { 136 pr_debug("Invalid BTF string section\n"); 137 return -EINVAL; 138 } 139 140 btf->strings = start; 141 142 return 0; 143 } 144 145 static int btf_type_size(struct btf_type *t) 146 { 147 int base_size = sizeof(struct btf_type); 148 __u16 vlen = btf_vlen(t); 149 150 switch (btf_kind(t)) { 151 case BTF_KIND_FWD: 152 case BTF_KIND_CONST: 153 case BTF_KIND_VOLATILE: 154 case BTF_KIND_RESTRICT: 155 case BTF_KIND_PTR: 156 case BTF_KIND_TYPEDEF: 157 case BTF_KIND_FUNC: 158 return base_size; 159 case BTF_KIND_INT: 160 return base_size + sizeof(__u32); 161 case BTF_KIND_ENUM: 162 return base_size + vlen * sizeof(struct btf_enum); 163 case BTF_KIND_ARRAY: 164 return base_size + sizeof(struct btf_array); 165 case BTF_KIND_STRUCT: 166 case BTF_KIND_UNION: 167 return base_size + vlen * sizeof(struct btf_member); 168 case BTF_KIND_FUNC_PROTO: 169 return base_size + vlen * sizeof(struct btf_param); 170 case BTF_KIND_VAR: 171 return base_size + sizeof(struct btf_var); 172 case BTF_KIND_DATASEC: 173 return base_size + vlen * sizeof(struct btf_var_secinfo); 174 default: 175 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 176 return -EINVAL; 177 } 178 } 179 180 static int btf_parse_type_sec(struct btf *btf) 181 { 182 struct btf_header *hdr = btf->hdr; 183 void *nohdr_data = btf->nohdr_data; 184 void *next_type = nohdr_data + hdr->type_off; 185 void *end_type = nohdr_data + hdr->str_off; 186 187 while (next_type < end_type) { 188 struct btf_type *t = next_type; 189 int type_size; 190 int err; 191 192 type_size = btf_type_size(t); 193 if (type_size < 0) 194 return type_size; 195 next_type += type_size; 196 err = btf_add_type(btf, t); 197 if (err) 198 return err; 199 } 200 201 return 0; 202 } 203 204 __u32 btf__get_nr_types(const struct btf *btf) 205 { 206 return btf->nr_types; 207 } 208 209 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 210 { 211 if (type_id > btf->nr_types) 212 return NULL; 213 214 return btf->types[type_id]; 215 } 216 217 static bool btf_type_is_void(const struct btf_type *t) 218 { 219 return t == &btf_void || btf_is_fwd(t); 220 } 221 222 static bool btf_type_is_void_or_null(const struct btf_type *t) 223 { 224 return !t || btf_type_is_void(t); 225 } 226 227 #define MAX_RESOLVE_DEPTH 32 228 229 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) 230 { 231 const struct btf_array *array; 232 const struct btf_type *t; 233 __u32 nelems = 1; 234 __s64 size = -1; 235 int i; 236 237 t = btf__type_by_id(btf, type_id); 238 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); 239 i++) { 240 switch (btf_kind(t)) { 241 case BTF_KIND_INT: 242 case BTF_KIND_STRUCT: 243 case BTF_KIND_UNION: 244 case BTF_KIND_ENUM: 245 case BTF_KIND_DATASEC: 246 size = t->size; 247 goto done; 248 case BTF_KIND_PTR: 249 size = sizeof(void *); 250 goto done; 251 case BTF_KIND_TYPEDEF: 252 case BTF_KIND_VOLATILE: 253 case BTF_KIND_CONST: 254 case BTF_KIND_RESTRICT: 255 case BTF_KIND_VAR: 256 type_id = t->type; 257 break; 258 case BTF_KIND_ARRAY: 259 array = btf_array(t); 260 if (nelems && array->nelems > UINT32_MAX / nelems) 261 return -E2BIG; 262 nelems *= array->nelems; 263 type_id = array->type; 264 break; 265 default: 266 return -EINVAL; 267 } 268 269 t = btf__type_by_id(btf, type_id); 270 } 271 272 if (size < 0) 273 return -EINVAL; 274 275 done: 276 if (nelems && size > UINT32_MAX / nelems) 277 return -E2BIG; 278 279 return nelems * size; 280 } 281 282 int btf__resolve_type(const struct btf *btf, __u32 type_id) 283 { 284 const struct btf_type *t; 285 int depth = 0; 286 287 t = btf__type_by_id(btf, type_id); 288 while (depth < MAX_RESOLVE_DEPTH && 289 !btf_type_is_void_or_null(t) && 290 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) { 291 type_id = t->type; 292 t = btf__type_by_id(btf, type_id); 293 depth++; 294 } 295 296 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) 297 return -EINVAL; 298 299 return type_id; 300 } 301 302 __s32 btf__find_by_name(const struct btf *btf, const char *type_name) 303 { 304 __u32 i; 305 306 if (!strcmp(type_name, "void")) 307 return 0; 308 309 for (i = 1; i <= btf->nr_types; i++) { 310 const struct btf_type *t = btf->types[i]; 311 const char *name = btf__name_by_offset(btf, t->name_off); 312 313 if (name && !strcmp(type_name, name)) 314 return i; 315 } 316 317 return -ENOENT; 318 } 319 320 void btf__free(struct btf *btf) 321 { 322 if (!btf) 323 return; 324 325 if (btf->fd != -1) 326 close(btf->fd); 327 328 free(btf->data); 329 free(btf->types); 330 free(btf); 331 } 332 333 struct btf *btf__new(__u8 *data, __u32 size) 334 { 335 struct btf *btf; 336 int err; 337 338 btf = calloc(1, sizeof(struct btf)); 339 if (!btf) 340 return ERR_PTR(-ENOMEM); 341 342 btf->fd = -1; 343 344 btf->data = malloc(size); 345 if (!btf->data) { 346 err = -ENOMEM; 347 goto done; 348 } 349 350 memcpy(btf->data, data, size); 351 btf->data_size = size; 352 353 err = btf_parse_hdr(btf); 354 if (err) 355 goto done; 356 357 err = btf_parse_str_sec(btf); 358 if (err) 359 goto done; 360 361 err = btf_parse_type_sec(btf); 362 363 done: 364 if (err) { 365 btf__free(btf); 366 return ERR_PTR(err); 367 } 368 369 return btf; 370 } 371 372 static bool btf_check_endianness(const GElf_Ehdr *ehdr) 373 { 374 #if __BYTE_ORDER == __LITTLE_ENDIAN 375 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB; 376 #elif __BYTE_ORDER == __BIG_ENDIAN 377 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB; 378 #else 379 # error "Unrecognized __BYTE_ORDER__" 380 #endif 381 } 382 383 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) 384 { 385 Elf_Data *btf_data = NULL, *btf_ext_data = NULL; 386 int err = 0, fd = -1, idx = 0; 387 struct btf *btf = NULL; 388 Elf_Scn *scn = NULL; 389 Elf *elf = NULL; 390 GElf_Ehdr ehdr; 391 392 if (elf_version(EV_CURRENT) == EV_NONE) { 393 pr_warning("failed to init libelf for %s\n", path); 394 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 395 } 396 397 fd = open(path, O_RDONLY); 398 if (fd < 0) { 399 err = -errno; 400 pr_warning("failed to open %s: %s\n", path, strerror(errno)); 401 return ERR_PTR(err); 402 } 403 404 err = -LIBBPF_ERRNO__FORMAT; 405 406 elf = elf_begin(fd, ELF_C_READ, NULL); 407 if (!elf) { 408 pr_warning("failed to open %s as ELF file\n", path); 409 goto done; 410 } 411 if (!gelf_getehdr(elf, &ehdr)) { 412 pr_warning("failed to get EHDR from %s\n", path); 413 goto done; 414 } 415 if (!btf_check_endianness(&ehdr)) { 416 pr_warning("non-native ELF endianness is not supported\n"); 417 goto done; 418 } 419 if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { 420 pr_warning("failed to get e_shstrndx from %s\n", path); 421 goto done; 422 } 423 424 while ((scn = elf_nextscn(elf, scn)) != NULL) { 425 GElf_Shdr sh; 426 char *name; 427 428 idx++; 429 if (gelf_getshdr(scn, &sh) != &sh) { 430 pr_warning("failed to get section(%d) header from %s\n", 431 idx, path); 432 goto done; 433 } 434 name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); 435 if (!name) { 436 pr_warning("failed to get section(%d) name from %s\n", 437 idx, path); 438 goto done; 439 } 440 if (strcmp(name, BTF_ELF_SEC) == 0) { 441 btf_data = elf_getdata(scn, 0); 442 if (!btf_data) { 443 pr_warning("failed to get section(%d, %s) data from %s\n", 444 idx, name, path); 445 goto done; 446 } 447 continue; 448 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { 449 btf_ext_data = elf_getdata(scn, 0); 450 if (!btf_ext_data) { 451 pr_warning("failed to get section(%d, %s) data from %s\n", 452 idx, name, path); 453 goto done; 454 } 455 continue; 456 } 457 } 458 459 err = 0; 460 461 if (!btf_data) { 462 err = -ENOENT; 463 goto done; 464 } 465 btf = btf__new(btf_data->d_buf, btf_data->d_size); 466 if (IS_ERR(btf)) 467 goto done; 468 469 if (btf_ext && btf_ext_data) { 470 *btf_ext = btf_ext__new(btf_ext_data->d_buf, 471 btf_ext_data->d_size); 472 if (IS_ERR(*btf_ext)) 473 goto done; 474 } else if (btf_ext) { 475 *btf_ext = NULL; 476 } 477 done: 478 if (elf) 479 elf_end(elf); 480 close(fd); 481 482 if (err) 483 return ERR_PTR(err); 484 /* 485 * btf is always parsed before btf_ext, so no need to clean up 486 * btf_ext, if btf loading failed 487 */ 488 if (IS_ERR(btf)) 489 return btf; 490 if (btf_ext && IS_ERR(*btf_ext)) { 491 btf__free(btf); 492 err = PTR_ERR(*btf_ext); 493 return ERR_PTR(err); 494 } 495 return btf; 496 } 497 498 static int compare_vsi_off(const void *_a, const void *_b) 499 { 500 const struct btf_var_secinfo *a = _a; 501 const struct btf_var_secinfo *b = _b; 502 503 return a->offset - b->offset; 504 } 505 506 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, 507 struct btf_type *t) 508 { 509 __u32 size = 0, off = 0, i, vars = btf_vlen(t); 510 const char *name = btf__name_by_offset(btf, t->name_off); 511 const struct btf_type *t_var; 512 struct btf_var_secinfo *vsi; 513 const struct btf_var *var; 514 int ret; 515 516 if (!name) { 517 pr_debug("No name found in string section for DATASEC kind.\n"); 518 return -ENOENT; 519 } 520 521 ret = bpf_object__section_size(obj, name, &size); 522 if (ret || !size || (t->size && t->size != size)) { 523 pr_debug("Invalid size for section %s: %u bytes\n", name, size); 524 return -ENOENT; 525 } 526 527 t->size = size; 528 529 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { 530 t_var = btf__type_by_id(btf, vsi->type); 531 var = btf_var(t_var); 532 533 if (!btf_is_var(t_var)) { 534 pr_debug("Non-VAR type seen in section %s\n", name); 535 return -EINVAL; 536 } 537 538 if (var->linkage == BTF_VAR_STATIC) 539 continue; 540 541 name = btf__name_by_offset(btf, t_var->name_off); 542 if (!name) { 543 pr_debug("No name found in string section for VAR kind\n"); 544 return -ENOENT; 545 } 546 547 ret = bpf_object__variable_offset(obj, name, &off); 548 if (ret) { 549 pr_debug("No offset found in symbol table for VAR %s\n", 550 name); 551 return -ENOENT; 552 } 553 554 vsi->offset = off; 555 } 556 557 qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off); 558 return 0; 559 } 560 561 int btf__finalize_data(struct bpf_object *obj, struct btf *btf) 562 { 563 int err = 0; 564 __u32 i; 565 566 for (i = 1; i <= btf->nr_types; i++) { 567 struct btf_type *t = btf->types[i]; 568 569 /* Loader needs to fix up some of the things compiler 570 * couldn't get its hands on while emitting BTF. This 571 * is section size and global variable offset. We use 572 * the info from the ELF itself for this purpose. 573 */ 574 if (btf_is_datasec(t)) { 575 err = btf_fixup_datasec(obj, btf, t); 576 if (err) 577 break; 578 } 579 } 580 581 return err; 582 } 583 584 int btf__load(struct btf *btf) 585 { 586 __u32 log_buf_size = BPF_LOG_BUF_SIZE; 587 char *log_buf = NULL; 588 int err = 0; 589 590 if (btf->fd >= 0) 591 return -EEXIST; 592 593 log_buf = malloc(log_buf_size); 594 if (!log_buf) 595 return -ENOMEM; 596 597 *log_buf = 0; 598 599 btf->fd = bpf_load_btf(btf->data, btf->data_size, 600 log_buf, log_buf_size, false); 601 if (btf->fd < 0) { 602 err = -errno; 603 pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno); 604 if (*log_buf) 605 pr_warning("%s\n", log_buf); 606 goto done; 607 } 608 609 done: 610 free(log_buf); 611 return err; 612 } 613 614 int btf__fd(const struct btf *btf) 615 { 616 return btf->fd; 617 } 618 619 const void *btf__get_raw_data(const struct btf *btf, __u32 *size) 620 { 621 *size = btf->data_size; 622 return btf->data; 623 } 624 625 const char *btf__name_by_offset(const struct btf *btf, __u32 offset) 626 { 627 if (offset < btf->hdr->str_len) 628 return &btf->strings[offset]; 629 else 630 return NULL; 631 } 632 633 int btf__get_from_id(__u32 id, struct btf **btf) 634 { 635 struct bpf_btf_info btf_info = { 0 }; 636 __u32 len = sizeof(btf_info); 637 __u32 last_size; 638 int btf_fd; 639 void *ptr; 640 int err; 641 642 err = 0; 643 *btf = NULL; 644 btf_fd = bpf_btf_get_fd_by_id(id); 645 if (btf_fd < 0) 646 return 0; 647 648 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so 649 * let's start with a sane default - 4KiB here - and resize it only if 650 * bpf_obj_get_info_by_fd() needs a bigger buffer. 651 */ 652 btf_info.btf_size = 4096; 653 last_size = btf_info.btf_size; 654 ptr = malloc(last_size); 655 if (!ptr) { 656 err = -ENOMEM; 657 goto exit_free; 658 } 659 660 memset(ptr, 0, last_size); 661 btf_info.btf = ptr_to_u64(ptr); 662 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 663 664 if (!err && btf_info.btf_size > last_size) { 665 void *temp_ptr; 666 667 last_size = btf_info.btf_size; 668 temp_ptr = realloc(ptr, last_size); 669 if (!temp_ptr) { 670 err = -ENOMEM; 671 goto exit_free; 672 } 673 ptr = temp_ptr; 674 memset(ptr, 0, last_size); 675 btf_info.btf = ptr_to_u64(ptr); 676 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 677 } 678 679 if (err || btf_info.btf_size > last_size) { 680 err = errno; 681 goto exit_free; 682 } 683 684 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size); 685 if (IS_ERR(*btf)) { 686 err = PTR_ERR(*btf); 687 *btf = NULL; 688 } 689 690 exit_free: 691 close(btf_fd); 692 free(ptr); 693 694 return err; 695 } 696 697 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, 698 __u32 expected_key_size, __u32 expected_value_size, 699 __u32 *key_type_id, __u32 *value_type_id) 700 { 701 const struct btf_type *container_type; 702 const struct btf_member *key, *value; 703 const size_t max_name = 256; 704 char container_name[max_name]; 705 __s64 key_size, value_size; 706 __s32 container_id; 707 708 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == 709 max_name) { 710 pr_warning("map:%s length of '____btf_map_%s' is too long\n", 711 map_name, map_name); 712 return -EINVAL; 713 } 714 715 container_id = btf__find_by_name(btf, container_name); 716 if (container_id < 0) { 717 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", 718 map_name, container_name); 719 return container_id; 720 } 721 722 container_type = btf__type_by_id(btf, container_id); 723 if (!container_type) { 724 pr_warning("map:%s cannot find BTF type for container_id:%u\n", 725 map_name, container_id); 726 return -EINVAL; 727 } 728 729 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { 730 pr_warning("map:%s container_name:%s is an invalid container struct\n", 731 map_name, container_name); 732 return -EINVAL; 733 } 734 735 key = btf_members(container_type); 736 value = key + 1; 737 738 key_size = btf__resolve_size(btf, key->type); 739 if (key_size < 0) { 740 pr_warning("map:%s invalid BTF key_type_size\n", map_name); 741 return key_size; 742 } 743 744 if (expected_key_size != key_size) { 745 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", 746 map_name, (__u32)key_size, expected_key_size); 747 return -EINVAL; 748 } 749 750 value_size = btf__resolve_size(btf, value->type); 751 if (value_size < 0) { 752 pr_warning("map:%s invalid BTF value_type_size\n", map_name); 753 return value_size; 754 } 755 756 if (expected_value_size != value_size) { 757 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", 758 map_name, (__u32)value_size, expected_value_size); 759 return -EINVAL; 760 } 761 762 *key_type_id = key->type; 763 *value_type_id = value->type; 764 765 return 0; 766 } 767 768 struct btf_ext_sec_setup_param { 769 __u32 off; 770 __u32 len; 771 __u32 min_rec_size; 772 struct btf_ext_info *ext_info; 773 const char *desc; 774 }; 775 776 static int btf_ext_setup_info(struct btf_ext *btf_ext, 777 struct btf_ext_sec_setup_param *ext_sec) 778 { 779 const struct btf_ext_info_sec *sinfo; 780 struct btf_ext_info *ext_info; 781 __u32 info_left, record_size; 782 /* The start of the info sec (including the __u32 record_size). */ 783 void *info; 784 785 if (ext_sec->len == 0) 786 return 0; 787 788 if (ext_sec->off & 0x03) { 789 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", 790 ext_sec->desc); 791 return -EINVAL; 792 } 793 794 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; 795 info_left = ext_sec->len; 796 797 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { 798 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", 799 ext_sec->desc, ext_sec->off, ext_sec->len); 800 return -EINVAL; 801 } 802 803 /* At least a record size */ 804 if (info_left < sizeof(__u32)) { 805 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); 806 return -EINVAL; 807 } 808 809 /* The record size needs to meet the minimum standard */ 810 record_size = *(__u32 *)info; 811 if (record_size < ext_sec->min_rec_size || 812 record_size & 0x03) { 813 pr_debug("%s section in .BTF.ext has invalid record size %u\n", 814 ext_sec->desc, record_size); 815 return -EINVAL; 816 } 817 818 sinfo = info + sizeof(__u32); 819 info_left -= sizeof(__u32); 820 821 /* If no records, return failure now so .BTF.ext won't be used. */ 822 if (!info_left) { 823 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); 824 return -EINVAL; 825 } 826 827 while (info_left) { 828 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); 829 __u64 total_record_size; 830 __u32 num_records; 831 832 if (info_left < sec_hdrlen) { 833 pr_debug("%s section header is not found in .BTF.ext\n", 834 ext_sec->desc); 835 return -EINVAL; 836 } 837 838 num_records = sinfo->num_info; 839 if (num_records == 0) { 840 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 841 ext_sec->desc); 842 return -EINVAL; 843 } 844 845 total_record_size = sec_hdrlen + 846 (__u64)num_records * record_size; 847 if (info_left < total_record_size) { 848 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 849 ext_sec->desc); 850 return -EINVAL; 851 } 852 853 info_left -= total_record_size; 854 sinfo = (void *)sinfo + total_record_size; 855 } 856 857 ext_info = ext_sec->ext_info; 858 ext_info->len = ext_sec->len - sizeof(__u32); 859 ext_info->rec_size = record_size; 860 ext_info->info = info + sizeof(__u32); 861 862 return 0; 863 } 864 865 static int btf_ext_setup_func_info(struct btf_ext *btf_ext) 866 { 867 struct btf_ext_sec_setup_param param = { 868 .off = btf_ext->hdr->func_info_off, 869 .len = btf_ext->hdr->func_info_len, 870 .min_rec_size = sizeof(struct bpf_func_info_min), 871 .ext_info = &btf_ext->func_info, 872 .desc = "func_info" 873 }; 874 875 return btf_ext_setup_info(btf_ext, ¶m); 876 } 877 878 static int btf_ext_setup_line_info(struct btf_ext *btf_ext) 879 { 880 struct btf_ext_sec_setup_param param = { 881 .off = btf_ext->hdr->line_info_off, 882 .len = btf_ext->hdr->line_info_len, 883 .min_rec_size = sizeof(struct bpf_line_info_min), 884 .ext_info = &btf_ext->line_info, 885 .desc = "line_info", 886 }; 887 888 return btf_ext_setup_info(btf_ext, ¶m); 889 } 890 891 static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext) 892 { 893 struct btf_ext_sec_setup_param param = { 894 .off = btf_ext->hdr->offset_reloc_off, 895 .len = btf_ext->hdr->offset_reloc_len, 896 .min_rec_size = sizeof(struct bpf_offset_reloc), 897 .ext_info = &btf_ext->offset_reloc_info, 898 .desc = "offset_reloc", 899 }; 900 901 return btf_ext_setup_info(btf_ext, ¶m); 902 } 903 904 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) 905 { 906 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 907 908 if (data_size < offsetofend(struct btf_ext_header, hdr_len) || 909 data_size < hdr->hdr_len) { 910 pr_debug("BTF.ext header not found"); 911 return -EINVAL; 912 } 913 914 if (hdr->magic != BTF_MAGIC) { 915 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); 916 return -EINVAL; 917 } 918 919 if (hdr->version != BTF_VERSION) { 920 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); 921 return -ENOTSUP; 922 } 923 924 if (hdr->flags) { 925 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); 926 return -ENOTSUP; 927 } 928 929 if (data_size == hdr->hdr_len) { 930 pr_debug("BTF.ext has no data\n"); 931 return -EINVAL; 932 } 933 934 return 0; 935 } 936 937 void btf_ext__free(struct btf_ext *btf_ext) 938 { 939 if (!btf_ext) 940 return; 941 free(btf_ext->data); 942 free(btf_ext); 943 } 944 945 struct btf_ext *btf_ext__new(__u8 *data, __u32 size) 946 { 947 struct btf_ext *btf_ext; 948 int err; 949 950 err = btf_ext_parse_hdr(data, size); 951 if (err) 952 return ERR_PTR(err); 953 954 btf_ext = calloc(1, sizeof(struct btf_ext)); 955 if (!btf_ext) 956 return ERR_PTR(-ENOMEM); 957 958 btf_ext->data_size = size; 959 btf_ext->data = malloc(size); 960 if (!btf_ext->data) { 961 err = -ENOMEM; 962 goto done; 963 } 964 memcpy(btf_ext->data, data, size); 965 966 if (btf_ext->hdr->hdr_len < 967 offsetofend(struct btf_ext_header, line_info_len)) 968 goto done; 969 err = btf_ext_setup_func_info(btf_ext); 970 if (err) 971 goto done; 972 973 err = btf_ext_setup_line_info(btf_ext); 974 if (err) 975 goto done; 976 977 if (btf_ext->hdr->hdr_len < 978 offsetofend(struct btf_ext_header, offset_reloc_len)) 979 goto done; 980 err = btf_ext_setup_offset_reloc(btf_ext); 981 if (err) 982 goto done; 983 984 done: 985 if (err) { 986 btf_ext__free(btf_ext); 987 return ERR_PTR(err); 988 } 989 990 return btf_ext; 991 } 992 993 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size) 994 { 995 *size = btf_ext->data_size; 996 return btf_ext->data; 997 } 998 999 static int btf_ext_reloc_info(const struct btf *btf, 1000 const struct btf_ext_info *ext_info, 1001 const char *sec_name, __u32 insns_cnt, 1002 void **info, __u32 *cnt) 1003 { 1004 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec); 1005 __u32 i, record_size, existing_len, records_len; 1006 struct btf_ext_info_sec *sinfo; 1007 const char *info_sec_name; 1008 __u64 remain_len; 1009 void *data; 1010 1011 record_size = ext_info->rec_size; 1012 sinfo = ext_info->info; 1013 remain_len = ext_info->len; 1014 while (remain_len > 0) { 1015 records_len = sinfo->num_info * record_size; 1016 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off); 1017 if (strcmp(info_sec_name, sec_name)) { 1018 remain_len -= sec_hdrlen + records_len; 1019 sinfo = (void *)sinfo + sec_hdrlen + records_len; 1020 continue; 1021 } 1022 1023 existing_len = (*cnt) * record_size; 1024 data = realloc(*info, existing_len + records_len); 1025 if (!data) 1026 return -ENOMEM; 1027 1028 memcpy(data + existing_len, sinfo->data, records_len); 1029 /* adjust insn_off only, the rest data will be passed 1030 * to the kernel. 1031 */ 1032 for (i = 0; i < sinfo->num_info; i++) { 1033 __u32 *insn_off; 1034 1035 insn_off = data + existing_len + (i * record_size); 1036 *insn_off = *insn_off / sizeof(struct bpf_insn) + 1037 insns_cnt; 1038 } 1039 *info = data; 1040 *cnt += sinfo->num_info; 1041 return 0; 1042 } 1043 1044 return -ENOENT; 1045 } 1046 1047 int btf_ext__reloc_func_info(const struct btf *btf, 1048 const struct btf_ext *btf_ext, 1049 const char *sec_name, __u32 insns_cnt, 1050 void **func_info, __u32 *cnt) 1051 { 1052 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name, 1053 insns_cnt, func_info, cnt); 1054 } 1055 1056 int btf_ext__reloc_line_info(const struct btf *btf, 1057 const struct btf_ext *btf_ext, 1058 const char *sec_name, __u32 insns_cnt, 1059 void **line_info, __u32 *cnt) 1060 { 1061 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name, 1062 insns_cnt, line_info, cnt); 1063 } 1064 1065 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext) 1066 { 1067 return btf_ext->func_info.rec_size; 1068 } 1069 1070 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) 1071 { 1072 return btf_ext->line_info.rec_size; 1073 } 1074 1075 struct btf_dedup; 1076 1077 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 1078 const struct btf_dedup_opts *opts); 1079 static void btf_dedup_free(struct btf_dedup *d); 1080 static int btf_dedup_strings(struct btf_dedup *d); 1081 static int btf_dedup_prim_types(struct btf_dedup *d); 1082 static int btf_dedup_struct_types(struct btf_dedup *d); 1083 static int btf_dedup_ref_types(struct btf_dedup *d); 1084 static int btf_dedup_compact_types(struct btf_dedup *d); 1085 static int btf_dedup_remap_types(struct btf_dedup *d); 1086 1087 /* 1088 * Deduplicate BTF types and strings. 1089 * 1090 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF 1091 * section with all BTF type descriptors and string data. It overwrites that 1092 * memory in-place with deduplicated types and strings without any loss of 1093 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section 1094 * is provided, all the strings referenced from .BTF.ext section are honored 1095 * and updated to point to the right offsets after deduplication. 1096 * 1097 * If function returns with error, type/string data might be garbled and should 1098 * be discarded. 1099 * 1100 * More verbose and detailed description of both problem btf_dedup is solving, 1101 * as well as solution could be found at: 1102 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html 1103 * 1104 * Problem description and justification 1105 * ===================================== 1106 * 1107 * BTF type information is typically emitted either as a result of conversion 1108 * from DWARF to BTF or directly by compiler. In both cases, each compilation 1109 * unit contains information about a subset of all the types that are used 1110 * in an application. These subsets are frequently overlapping and contain a lot 1111 * of duplicated information when later concatenated together into a single 1112 * binary. This algorithm ensures that each unique type is represented by single 1113 * BTF type descriptor, greatly reducing resulting size of BTF data. 1114 * 1115 * Compilation unit isolation and subsequent duplication of data is not the only 1116 * problem. The same type hierarchy (e.g., struct and all the type that struct 1117 * references) in different compilation units can be represented in BTF to 1118 * various degrees of completeness (or, rather, incompleteness) due to 1119 * struct/union forward declarations. 1120 * 1121 * Let's take a look at an example, that we'll use to better understand the 1122 * problem (and solution). Suppose we have two compilation units, each using 1123 * same `struct S`, but each of them having incomplete type information about 1124 * struct's fields: 1125 * 1126 * // CU #1: 1127 * struct S; 1128 * struct A { 1129 * int a; 1130 * struct A* self; 1131 * struct S* parent; 1132 * }; 1133 * struct B; 1134 * struct S { 1135 * struct A* a_ptr; 1136 * struct B* b_ptr; 1137 * }; 1138 * 1139 * // CU #2: 1140 * struct S; 1141 * struct A; 1142 * struct B { 1143 * int b; 1144 * struct B* self; 1145 * struct S* parent; 1146 * }; 1147 * struct S { 1148 * struct A* a_ptr; 1149 * struct B* b_ptr; 1150 * }; 1151 * 1152 * In case of CU #1, BTF data will know only that `struct B` exist (but no 1153 * more), but will know the complete type information about `struct A`. While 1154 * for CU #2, it will know full type information about `struct B`, but will 1155 * only know about forward declaration of `struct A` (in BTF terms, it will 1156 * have `BTF_KIND_FWD` type descriptor with name `B`). 1157 * 1158 * This compilation unit isolation means that it's possible that there is no 1159 * single CU with complete type information describing structs `S`, `A`, and 1160 * `B`. Also, we might get tons of duplicated and redundant type information. 1161 * 1162 * Additional complication we need to keep in mind comes from the fact that 1163 * types, in general, can form graphs containing cycles, not just DAGs. 1164 * 1165 * While algorithm does deduplication, it also merges and resolves type 1166 * information (unless disabled throught `struct btf_opts`), whenever possible. 1167 * E.g., in the example above with two compilation units having partial type 1168 * information for structs `A` and `B`, the output of algorithm will emit 1169 * a single copy of each BTF type that describes structs `A`, `B`, and `S` 1170 * (as well as type information for `int` and pointers), as if they were defined 1171 * in a single compilation unit as: 1172 * 1173 * struct A { 1174 * int a; 1175 * struct A* self; 1176 * struct S* parent; 1177 * }; 1178 * struct B { 1179 * int b; 1180 * struct B* self; 1181 * struct S* parent; 1182 * }; 1183 * struct S { 1184 * struct A* a_ptr; 1185 * struct B* b_ptr; 1186 * }; 1187 * 1188 * Algorithm summary 1189 * ================= 1190 * 1191 * Algorithm completes its work in 6 separate passes: 1192 * 1193 * 1. Strings deduplication. 1194 * 2. Primitive types deduplication (int, enum, fwd). 1195 * 3. Struct/union types deduplication. 1196 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func 1197 * protos, and const/volatile/restrict modifiers). 1198 * 5. Types compaction. 1199 * 6. Types remapping. 1200 * 1201 * Algorithm determines canonical type descriptor, which is a single 1202 * representative type for each truly unique type. This canonical type is the 1203 * one that will go into final deduplicated BTF type information. For 1204 * struct/unions, it is also the type that algorithm will merge additional type 1205 * information into (while resolving FWDs), as it discovers it from data in 1206 * other CUs. Each input BTF type eventually gets either mapped to itself, if 1207 * that type is canonical, or to some other type, if that type is equivalent 1208 * and was chosen as canonical representative. This mapping is stored in 1209 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that 1210 * FWD type got resolved to. 1211 * 1212 * To facilitate fast discovery of canonical types, we also maintain canonical 1213 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash 1214 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types 1215 * that match that signature. With sufficiently good choice of type signature 1216 * hashing function, we can limit number of canonical types for each unique type 1217 * signature to a very small number, allowing to find canonical type for any 1218 * duplicated type very quickly. 1219 * 1220 * Struct/union deduplication is the most critical part and algorithm for 1221 * deduplicating structs/unions is described in greater details in comments for 1222 * `btf_dedup_is_equiv` function. 1223 */ 1224 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, 1225 const struct btf_dedup_opts *opts) 1226 { 1227 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); 1228 int err; 1229 1230 if (IS_ERR(d)) { 1231 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); 1232 return -EINVAL; 1233 } 1234 1235 err = btf_dedup_strings(d); 1236 if (err < 0) { 1237 pr_debug("btf_dedup_strings failed:%d\n", err); 1238 goto done; 1239 } 1240 err = btf_dedup_prim_types(d); 1241 if (err < 0) { 1242 pr_debug("btf_dedup_prim_types failed:%d\n", err); 1243 goto done; 1244 } 1245 err = btf_dedup_struct_types(d); 1246 if (err < 0) { 1247 pr_debug("btf_dedup_struct_types failed:%d\n", err); 1248 goto done; 1249 } 1250 err = btf_dedup_ref_types(d); 1251 if (err < 0) { 1252 pr_debug("btf_dedup_ref_types failed:%d\n", err); 1253 goto done; 1254 } 1255 err = btf_dedup_compact_types(d); 1256 if (err < 0) { 1257 pr_debug("btf_dedup_compact_types failed:%d\n", err); 1258 goto done; 1259 } 1260 err = btf_dedup_remap_types(d); 1261 if (err < 0) { 1262 pr_debug("btf_dedup_remap_types failed:%d\n", err); 1263 goto done; 1264 } 1265 1266 done: 1267 btf_dedup_free(d); 1268 return err; 1269 } 1270 1271 #define BTF_UNPROCESSED_ID ((__u32)-1) 1272 #define BTF_IN_PROGRESS_ID ((__u32)-2) 1273 1274 struct btf_dedup { 1275 /* .BTF section to be deduped in-place */ 1276 struct btf *btf; 1277 /* 1278 * Optional .BTF.ext section. When provided, any strings referenced 1279 * from it will be taken into account when deduping strings 1280 */ 1281 struct btf_ext *btf_ext; 1282 /* 1283 * This is a map from any type's signature hash to a list of possible 1284 * canonical representative type candidates. Hash collisions are 1285 * ignored, so even types of various kinds can share same list of 1286 * candidates, which is fine because we rely on subsequent 1287 * btf_xxx_equal() checks to authoritatively verify type equality. 1288 */ 1289 struct hashmap *dedup_table; 1290 /* Canonical types map */ 1291 __u32 *map; 1292 /* Hypothetical mapping, used during type graph equivalence checks */ 1293 __u32 *hypot_map; 1294 __u32 *hypot_list; 1295 size_t hypot_cnt; 1296 size_t hypot_cap; 1297 /* Various option modifying behavior of algorithm */ 1298 struct btf_dedup_opts opts; 1299 }; 1300 1301 struct btf_str_ptr { 1302 const char *str; 1303 __u32 new_off; 1304 bool used; 1305 }; 1306 1307 struct btf_str_ptrs { 1308 struct btf_str_ptr *ptrs; 1309 const char *data; 1310 __u32 cnt; 1311 __u32 cap; 1312 }; 1313 1314 static long hash_combine(long h, long value) 1315 { 1316 return h * 31 + value; 1317 } 1318 1319 #define for_each_dedup_cand(d, node, hash) \ 1320 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash) 1321 1322 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) 1323 { 1324 return hashmap__append(d->dedup_table, 1325 (void *)hash, (void *)(long)type_id); 1326 } 1327 1328 static int btf_dedup_hypot_map_add(struct btf_dedup *d, 1329 __u32 from_id, __u32 to_id) 1330 { 1331 if (d->hypot_cnt == d->hypot_cap) { 1332 __u32 *new_list; 1333 1334 d->hypot_cap += max(16, d->hypot_cap / 2); 1335 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); 1336 if (!new_list) 1337 return -ENOMEM; 1338 d->hypot_list = new_list; 1339 } 1340 d->hypot_list[d->hypot_cnt++] = from_id; 1341 d->hypot_map[from_id] = to_id; 1342 return 0; 1343 } 1344 1345 static void btf_dedup_clear_hypot_map(struct btf_dedup *d) 1346 { 1347 int i; 1348 1349 for (i = 0; i < d->hypot_cnt; i++) 1350 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; 1351 d->hypot_cnt = 0; 1352 } 1353 1354 static void btf_dedup_free(struct btf_dedup *d) 1355 { 1356 hashmap__free(d->dedup_table); 1357 d->dedup_table = NULL; 1358 1359 free(d->map); 1360 d->map = NULL; 1361 1362 free(d->hypot_map); 1363 d->hypot_map = NULL; 1364 1365 free(d->hypot_list); 1366 d->hypot_list = NULL; 1367 1368 free(d); 1369 } 1370 1371 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx) 1372 { 1373 return (size_t)key; 1374 } 1375 1376 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx) 1377 { 1378 return 0; 1379 } 1380 1381 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) 1382 { 1383 return k1 == k2; 1384 } 1385 1386 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 1387 const struct btf_dedup_opts *opts) 1388 { 1389 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); 1390 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; 1391 int i, err = 0; 1392 1393 if (!d) 1394 return ERR_PTR(-ENOMEM); 1395 1396 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; 1397 /* dedup_table_size is now used only to force collisions in tests */ 1398 if (opts && opts->dedup_table_size == 1) 1399 hash_fn = btf_dedup_collision_hash_fn; 1400 1401 d->btf = btf; 1402 d->btf_ext = btf_ext; 1403 1404 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); 1405 if (IS_ERR(d->dedup_table)) { 1406 err = PTR_ERR(d->dedup_table); 1407 d->dedup_table = NULL; 1408 goto done; 1409 } 1410 1411 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1412 if (!d->map) { 1413 err = -ENOMEM; 1414 goto done; 1415 } 1416 /* special BTF "void" type is made canonical immediately */ 1417 d->map[0] = 0; 1418 for (i = 1; i <= btf->nr_types; i++) { 1419 struct btf_type *t = d->btf->types[i]; 1420 1421 /* VAR and DATASEC are never deduped and are self-canonical */ 1422 if (btf_is_var(t) || btf_is_datasec(t)) 1423 d->map[i] = i; 1424 else 1425 d->map[i] = BTF_UNPROCESSED_ID; 1426 } 1427 1428 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1429 if (!d->hypot_map) { 1430 err = -ENOMEM; 1431 goto done; 1432 } 1433 for (i = 0; i <= btf->nr_types; i++) 1434 d->hypot_map[i] = BTF_UNPROCESSED_ID; 1435 1436 done: 1437 if (err) { 1438 btf_dedup_free(d); 1439 return ERR_PTR(err); 1440 } 1441 1442 return d; 1443 } 1444 1445 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx); 1446 1447 /* 1448 * Iterate over all possible places in .BTF and .BTF.ext that can reference 1449 * string and pass pointer to it to a provided callback `fn`. 1450 */ 1451 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) 1452 { 1453 void *line_data_cur, *line_data_end; 1454 int i, j, r, rec_size; 1455 struct btf_type *t; 1456 1457 for (i = 1; i <= d->btf->nr_types; i++) { 1458 t = d->btf->types[i]; 1459 r = fn(&t->name_off, ctx); 1460 if (r) 1461 return r; 1462 1463 switch (btf_kind(t)) { 1464 case BTF_KIND_STRUCT: 1465 case BTF_KIND_UNION: { 1466 struct btf_member *m = btf_members(t); 1467 __u16 vlen = btf_vlen(t); 1468 1469 for (j = 0; j < vlen; j++) { 1470 r = fn(&m->name_off, ctx); 1471 if (r) 1472 return r; 1473 m++; 1474 } 1475 break; 1476 } 1477 case BTF_KIND_ENUM: { 1478 struct btf_enum *m = btf_enum(t); 1479 __u16 vlen = btf_vlen(t); 1480 1481 for (j = 0; j < vlen; j++) { 1482 r = fn(&m->name_off, ctx); 1483 if (r) 1484 return r; 1485 m++; 1486 } 1487 break; 1488 } 1489 case BTF_KIND_FUNC_PROTO: { 1490 struct btf_param *m = btf_params(t); 1491 __u16 vlen = btf_vlen(t); 1492 1493 for (j = 0; j < vlen; j++) { 1494 r = fn(&m->name_off, ctx); 1495 if (r) 1496 return r; 1497 m++; 1498 } 1499 break; 1500 } 1501 default: 1502 break; 1503 } 1504 } 1505 1506 if (!d->btf_ext) 1507 return 0; 1508 1509 line_data_cur = d->btf_ext->line_info.info; 1510 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len; 1511 rec_size = d->btf_ext->line_info.rec_size; 1512 1513 while (line_data_cur < line_data_end) { 1514 struct btf_ext_info_sec *sec = line_data_cur; 1515 struct bpf_line_info_min *line_info; 1516 __u32 num_info = sec->num_info; 1517 1518 r = fn(&sec->sec_name_off, ctx); 1519 if (r) 1520 return r; 1521 1522 line_data_cur += sizeof(struct btf_ext_info_sec); 1523 for (i = 0; i < num_info; i++) { 1524 line_info = line_data_cur; 1525 r = fn(&line_info->file_name_off, ctx); 1526 if (r) 1527 return r; 1528 r = fn(&line_info->line_off, ctx); 1529 if (r) 1530 return r; 1531 line_data_cur += rec_size; 1532 } 1533 } 1534 1535 return 0; 1536 } 1537 1538 static int str_sort_by_content(const void *a1, const void *a2) 1539 { 1540 const struct btf_str_ptr *p1 = a1; 1541 const struct btf_str_ptr *p2 = a2; 1542 1543 return strcmp(p1->str, p2->str); 1544 } 1545 1546 static int str_sort_by_offset(const void *a1, const void *a2) 1547 { 1548 const struct btf_str_ptr *p1 = a1; 1549 const struct btf_str_ptr *p2 = a2; 1550 1551 if (p1->str != p2->str) 1552 return p1->str < p2->str ? -1 : 1; 1553 return 0; 1554 } 1555 1556 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem) 1557 { 1558 const struct btf_str_ptr *p = pelem; 1559 1560 if (str_ptr != p->str) 1561 return (const char *)str_ptr < p->str ? -1 : 1; 1562 return 0; 1563 } 1564 1565 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx) 1566 { 1567 struct btf_str_ptrs *strs; 1568 struct btf_str_ptr *s; 1569 1570 if (*str_off_ptr == 0) 1571 return 0; 1572 1573 strs = ctx; 1574 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 1575 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 1576 if (!s) 1577 return -EINVAL; 1578 s->used = true; 1579 return 0; 1580 } 1581 1582 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) 1583 { 1584 struct btf_str_ptrs *strs; 1585 struct btf_str_ptr *s; 1586 1587 if (*str_off_ptr == 0) 1588 return 0; 1589 1590 strs = ctx; 1591 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 1592 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 1593 if (!s) 1594 return -EINVAL; 1595 *str_off_ptr = s->new_off; 1596 return 0; 1597 } 1598 1599 /* 1600 * Dedup string and filter out those that are not referenced from either .BTF 1601 * or .BTF.ext (if provided) sections. 1602 * 1603 * This is done by building index of all strings in BTF's string section, 1604 * then iterating over all entities that can reference strings (e.g., type 1605 * names, struct field names, .BTF.ext line info, etc) and marking corresponding 1606 * strings as used. After that all used strings are deduped and compacted into 1607 * sequential blob of memory and new offsets are calculated. Then all the string 1608 * references are iterated again and rewritten using new offsets. 1609 */ 1610 static int btf_dedup_strings(struct btf_dedup *d) 1611 { 1612 const struct btf_header *hdr = d->btf->hdr; 1613 char *start = (char *)d->btf->nohdr_data + hdr->str_off; 1614 char *end = start + d->btf->hdr->str_len; 1615 char *p = start, *tmp_strs = NULL; 1616 struct btf_str_ptrs strs = { 1617 .cnt = 0, 1618 .cap = 0, 1619 .ptrs = NULL, 1620 .data = start, 1621 }; 1622 int i, j, err = 0, grp_idx; 1623 bool grp_used; 1624 1625 /* build index of all strings */ 1626 while (p < end) { 1627 if (strs.cnt + 1 > strs.cap) { 1628 struct btf_str_ptr *new_ptrs; 1629 1630 strs.cap += max(strs.cnt / 2, 16); 1631 new_ptrs = realloc(strs.ptrs, 1632 sizeof(strs.ptrs[0]) * strs.cap); 1633 if (!new_ptrs) { 1634 err = -ENOMEM; 1635 goto done; 1636 } 1637 strs.ptrs = new_ptrs; 1638 } 1639 1640 strs.ptrs[strs.cnt].str = p; 1641 strs.ptrs[strs.cnt].used = false; 1642 1643 p += strlen(p) + 1; 1644 strs.cnt++; 1645 } 1646 1647 /* temporary storage for deduplicated strings */ 1648 tmp_strs = malloc(d->btf->hdr->str_len); 1649 if (!tmp_strs) { 1650 err = -ENOMEM; 1651 goto done; 1652 } 1653 1654 /* mark all used strings */ 1655 strs.ptrs[0].used = true; 1656 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs); 1657 if (err) 1658 goto done; 1659 1660 /* sort strings by context, so that we can identify duplicates */ 1661 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content); 1662 1663 /* 1664 * iterate groups of equal strings and if any instance in a group was 1665 * referenced, emit single instance and remember new offset 1666 */ 1667 p = tmp_strs; 1668 grp_idx = 0; 1669 grp_used = strs.ptrs[0].used; 1670 /* iterate past end to avoid code duplication after loop */ 1671 for (i = 1; i <= strs.cnt; i++) { 1672 /* 1673 * when i == strs.cnt, we want to skip string comparison and go 1674 * straight to handling last group of strings (otherwise we'd 1675 * need to handle last group after the loop w/ duplicated code) 1676 */ 1677 if (i < strs.cnt && 1678 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) { 1679 grp_used = grp_used || strs.ptrs[i].used; 1680 continue; 1681 } 1682 1683 /* 1684 * this check would have been required after the loop to handle 1685 * last group of strings, but due to <= condition in a loop 1686 * we avoid that duplication 1687 */ 1688 if (grp_used) { 1689 int new_off = p - tmp_strs; 1690 __u32 len = strlen(strs.ptrs[grp_idx].str); 1691 1692 memmove(p, strs.ptrs[grp_idx].str, len + 1); 1693 for (j = grp_idx; j < i; j++) 1694 strs.ptrs[j].new_off = new_off; 1695 p += len + 1; 1696 } 1697 1698 if (i < strs.cnt) { 1699 grp_idx = i; 1700 grp_used = strs.ptrs[i].used; 1701 } 1702 } 1703 1704 /* replace original strings with deduped ones */ 1705 d->btf->hdr->str_len = p - tmp_strs; 1706 memmove(start, tmp_strs, d->btf->hdr->str_len); 1707 end = start + d->btf->hdr->str_len; 1708 1709 /* restore original order for further binary search lookups */ 1710 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset); 1711 1712 /* remap string offsets */ 1713 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs); 1714 if (err) 1715 goto done; 1716 1717 d->btf->hdr->str_len = end - start; 1718 1719 done: 1720 free(tmp_strs); 1721 free(strs.ptrs); 1722 return err; 1723 } 1724 1725 static long btf_hash_common(struct btf_type *t) 1726 { 1727 long h; 1728 1729 h = hash_combine(0, t->name_off); 1730 h = hash_combine(h, t->info); 1731 h = hash_combine(h, t->size); 1732 return h; 1733 } 1734 1735 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) 1736 { 1737 return t1->name_off == t2->name_off && 1738 t1->info == t2->info && 1739 t1->size == t2->size; 1740 } 1741 1742 /* Calculate type signature hash of INT. */ 1743 static long btf_hash_int(struct btf_type *t) 1744 { 1745 __u32 info = *(__u32 *)(t + 1); 1746 long h; 1747 1748 h = btf_hash_common(t); 1749 h = hash_combine(h, info); 1750 return h; 1751 } 1752 1753 /* Check structural equality of two INTs. */ 1754 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) 1755 { 1756 __u32 info1, info2; 1757 1758 if (!btf_equal_common(t1, t2)) 1759 return false; 1760 info1 = *(__u32 *)(t1 + 1); 1761 info2 = *(__u32 *)(t2 + 1); 1762 return info1 == info2; 1763 } 1764 1765 /* Calculate type signature hash of ENUM. */ 1766 static long btf_hash_enum(struct btf_type *t) 1767 { 1768 long h; 1769 1770 /* don't hash vlen and enum members to support enum fwd resolving */ 1771 h = hash_combine(0, t->name_off); 1772 h = hash_combine(h, t->info & ~0xffff); 1773 h = hash_combine(h, t->size); 1774 return h; 1775 } 1776 1777 /* Check structural equality of two ENUMs. */ 1778 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) 1779 { 1780 const struct btf_enum *m1, *m2; 1781 __u16 vlen; 1782 int i; 1783 1784 if (!btf_equal_common(t1, t2)) 1785 return false; 1786 1787 vlen = btf_vlen(t1); 1788 m1 = btf_enum(t1); 1789 m2 = btf_enum(t2); 1790 for (i = 0; i < vlen; i++) { 1791 if (m1->name_off != m2->name_off || m1->val != m2->val) 1792 return false; 1793 m1++; 1794 m2++; 1795 } 1796 return true; 1797 } 1798 1799 static inline bool btf_is_enum_fwd(struct btf_type *t) 1800 { 1801 return btf_is_enum(t) && btf_vlen(t) == 0; 1802 } 1803 1804 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) 1805 { 1806 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) 1807 return btf_equal_enum(t1, t2); 1808 /* ignore vlen when comparing */ 1809 return t1->name_off == t2->name_off && 1810 (t1->info & ~0xffff) == (t2->info & ~0xffff) && 1811 t1->size == t2->size; 1812 } 1813 1814 /* 1815 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, 1816 * as referenced type IDs equivalence is established separately during type 1817 * graph equivalence check algorithm. 1818 */ 1819 static long btf_hash_struct(struct btf_type *t) 1820 { 1821 const struct btf_member *member = btf_members(t); 1822 __u32 vlen = btf_vlen(t); 1823 long h = btf_hash_common(t); 1824 int i; 1825 1826 for (i = 0; i < vlen; i++) { 1827 h = hash_combine(h, member->name_off); 1828 h = hash_combine(h, member->offset); 1829 /* no hashing of referenced type ID, it can be unresolved yet */ 1830 member++; 1831 } 1832 return h; 1833 } 1834 1835 /* 1836 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 1837 * IDs. This check is performed during type graph equivalence check and 1838 * referenced types equivalence is checked separately. 1839 */ 1840 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) 1841 { 1842 const struct btf_member *m1, *m2; 1843 __u16 vlen; 1844 int i; 1845 1846 if (!btf_equal_common(t1, t2)) 1847 return false; 1848 1849 vlen = btf_vlen(t1); 1850 m1 = btf_members(t1); 1851 m2 = btf_members(t2); 1852 for (i = 0; i < vlen; i++) { 1853 if (m1->name_off != m2->name_off || m1->offset != m2->offset) 1854 return false; 1855 m1++; 1856 m2++; 1857 } 1858 return true; 1859 } 1860 1861 /* 1862 * Calculate type signature hash of ARRAY, including referenced type IDs, 1863 * under assumption that they were already resolved to canonical type IDs and 1864 * are not going to change. 1865 */ 1866 static long btf_hash_array(struct btf_type *t) 1867 { 1868 const struct btf_array *info = btf_array(t); 1869 long h = btf_hash_common(t); 1870 1871 h = hash_combine(h, info->type); 1872 h = hash_combine(h, info->index_type); 1873 h = hash_combine(h, info->nelems); 1874 return h; 1875 } 1876 1877 /* 1878 * Check exact equality of two ARRAYs, taking into account referenced 1879 * type IDs, under assumption that they were already resolved to canonical 1880 * type IDs and are not going to change. 1881 * This function is called during reference types deduplication to compare 1882 * ARRAY to potential canonical representative. 1883 */ 1884 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) 1885 { 1886 const struct btf_array *info1, *info2; 1887 1888 if (!btf_equal_common(t1, t2)) 1889 return false; 1890 1891 info1 = btf_array(t1); 1892 info2 = btf_array(t2); 1893 return info1->type == info2->type && 1894 info1->index_type == info2->index_type && 1895 info1->nelems == info2->nelems; 1896 } 1897 1898 /* 1899 * Check structural compatibility of two ARRAYs, ignoring referenced type 1900 * IDs. This check is performed during type graph equivalence check and 1901 * referenced types equivalence is checked separately. 1902 */ 1903 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) 1904 { 1905 if (!btf_equal_common(t1, t2)) 1906 return false; 1907 1908 return btf_array(t1)->nelems == btf_array(t2)->nelems; 1909 } 1910 1911 /* 1912 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, 1913 * under assumption that they were already resolved to canonical type IDs and 1914 * are not going to change. 1915 */ 1916 static long btf_hash_fnproto(struct btf_type *t) 1917 { 1918 const struct btf_param *member = btf_params(t); 1919 __u16 vlen = btf_vlen(t); 1920 long h = btf_hash_common(t); 1921 int i; 1922 1923 for (i = 0; i < vlen; i++) { 1924 h = hash_combine(h, member->name_off); 1925 h = hash_combine(h, member->type); 1926 member++; 1927 } 1928 return h; 1929 } 1930 1931 /* 1932 * Check exact equality of two FUNC_PROTOs, taking into account referenced 1933 * type IDs, under assumption that they were already resolved to canonical 1934 * type IDs and are not going to change. 1935 * This function is called during reference types deduplication to compare 1936 * FUNC_PROTO to potential canonical representative. 1937 */ 1938 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) 1939 { 1940 const struct btf_param *m1, *m2; 1941 __u16 vlen; 1942 int i; 1943 1944 if (!btf_equal_common(t1, t2)) 1945 return false; 1946 1947 vlen = btf_vlen(t1); 1948 m1 = btf_params(t1); 1949 m2 = btf_params(t2); 1950 for (i = 0; i < vlen; i++) { 1951 if (m1->name_off != m2->name_off || m1->type != m2->type) 1952 return false; 1953 m1++; 1954 m2++; 1955 } 1956 return true; 1957 } 1958 1959 /* 1960 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 1961 * IDs. This check is performed during type graph equivalence check and 1962 * referenced types equivalence is checked separately. 1963 */ 1964 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) 1965 { 1966 const struct btf_param *m1, *m2; 1967 __u16 vlen; 1968 int i; 1969 1970 /* skip return type ID */ 1971 if (t1->name_off != t2->name_off || t1->info != t2->info) 1972 return false; 1973 1974 vlen = btf_vlen(t1); 1975 m1 = btf_params(t1); 1976 m2 = btf_params(t2); 1977 for (i = 0; i < vlen; i++) { 1978 if (m1->name_off != m2->name_off) 1979 return false; 1980 m1++; 1981 m2++; 1982 } 1983 return true; 1984 } 1985 1986 /* 1987 * Deduplicate primitive types, that can't reference other types, by calculating 1988 * their type signature hash and comparing them with any possible canonical 1989 * candidate. If no canonical candidate matches, type itself is marked as 1990 * canonical and is added into `btf_dedup->dedup_table` as another candidate. 1991 */ 1992 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) 1993 { 1994 struct btf_type *t = d->btf->types[type_id]; 1995 struct hashmap_entry *hash_entry; 1996 struct btf_type *cand; 1997 /* if we don't find equivalent type, then we are canonical */ 1998 __u32 new_id = type_id; 1999 __u32 cand_id; 2000 long h; 2001 2002 switch (btf_kind(t)) { 2003 case BTF_KIND_CONST: 2004 case BTF_KIND_VOLATILE: 2005 case BTF_KIND_RESTRICT: 2006 case BTF_KIND_PTR: 2007 case BTF_KIND_TYPEDEF: 2008 case BTF_KIND_ARRAY: 2009 case BTF_KIND_STRUCT: 2010 case BTF_KIND_UNION: 2011 case BTF_KIND_FUNC: 2012 case BTF_KIND_FUNC_PROTO: 2013 case BTF_KIND_VAR: 2014 case BTF_KIND_DATASEC: 2015 return 0; 2016 2017 case BTF_KIND_INT: 2018 h = btf_hash_int(t); 2019 for_each_dedup_cand(d, hash_entry, h) { 2020 cand_id = (__u32)(long)hash_entry->value; 2021 cand = d->btf->types[cand_id]; 2022 if (btf_equal_int(t, cand)) { 2023 new_id = cand_id; 2024 break; 2025 } 2026 } 2027 break; 2028 2029 case BTF_KIND_ENUM: 2030 h = btf_hash_enum(t); 2031 for_each_dedup_cand(d, hash_entry, h) { 2032 cand_id = (__u32)(long)hash_entry->value; 2033 cand = d->btf->types[cand_id]; 2034 if (btf_equal_enum(t, cand)) { 2035 new_id = cand_id; 2036 break; 2037 } 2038 if (d->opts.dont_resolve_fwds) 2039 continue; 2040 if (btf_compat_enum(t, cand)) { 2041 if (btf_is_enum_fwd(t)) { 2042 /* resolve fwd to full enum */ 2043 new_id = cand_id; 2044 break; 2045 } 2046 /* resolve canonical enum fwd to full enum */ 2047 d->map[cand_id] = type_id; 2048 } 2049 } 2050 break; 2051 2052 case BTF_KIND_FWD: 2053 h = btf_hash_common(t); 2054 for_each_dedup_cand(d, hash_entry, h) { 2055 cand_id = (__u32)(long)hash_entry->value; 2056 cand = d->btf->types[cand_id]; 2057 if (btf_equal_common(t, cand)) { 2058 new_id = cand_id; 2059 break; 2060 } 2061 } 2062 break; 2063 2064 default: 2065 return -EINVAL; 2066 } 2067 2068 d->map[type_id] = new_id; 2069 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2070 return -ENOMEM; 2071 2072 return 0; 2073 } 2074 2075 static int btf_dedup_prim_types(struct btf_dedup *d) 2076 { 2077 int i, err; 2078 2079 for (i = 1; i <= d->btf->nr_types; i++) { 2080 err = btf_dedup_prim_type(d, i); 2081 if (err) 2082 return err; 2083 } 2084 return 0; 2085 } 2086 2087 /* 2088 * Check whether type is already mapped into canonical one (could be to itself). 2089 */ 2090 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) 2091 { 2092 return d->map[type_id] <= BTF_MAX_NR_TYPES; 2093 } 2094 2095 /* 2096 * Resolve type ID into its canonical type ID, if any; otherwise return original 2097 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow 2098 * STRUCT/UNION link and resolve it into canonical type ID as well. 2099 */ 2100 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) 2101 { 2102 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2103 type_id = d->map[type_id]; 2104 return type_id; 2105 } 2106 2107 /* 2108 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original 2109 * type ID. 2110 */ 2111 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) 2112 { 2113 __u32 orig_type_id = type_id; 2114 2115 if (!btf_is_fwd(d->btf->types[type_id])) 2116 return type_id; 2117 2118 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2119 type_id = d->map[type_id]; 2120 2121 if (!btf_is_fwd(d->btf->types[type_id])) 2122 return type_id; 2123 2124 return orig_type_id; 2125 } 2126 2127 2128 static inline __u16 btf_fwd_kind(struct btf_type *t) 2129 { 2130 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; 2131 } 2132 2133 /* 2134 * Check equivalence of BTF type graph formed by candidate struct/union (we'll 2135 * call it "candidate graph" in this description for brevity) to a type graph 2136 * formed by (potential) canonical struct/union ("canonical graph" for brevity 2137 * here, though keep in mind that not all types in canonical graph are 2138 * necessarily canonical representatives themselves, some of them might be 2139 * duplicates or its uniqueness might not have been established yet). 2140 * Returns: 2141 * - >0, if type graphs are equivalent; 2142 * - 0, if not equivalent; 2143 * - <0, on error. 2144 * 2145 * Algorithm performs side-by-side DFS traversal of both type graphs and checks 2146 * equivalence of BTF types at each step. If at any point BTF types in candidate 2147 * and canonical graphs are not compatible structurally, whole graphs are 2148 * incompatible. If types are structurally equivalent (i.e., all information 2149 * except referenced type IDs is exactly the same), a mapping from `canon_id` to 2150 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). 2151 * If a type references other types, then those referenced types are checked 2152 * for equivalence recursively. 2153 * 2154 * During DFS traversal, if we find that for current `canon_id` type we 2155 * already have some mapping in hypothetical map, we check for two possible 2156 * situations: 2157 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will 2158 * happen when type graphs have cycles. In this case we assume those two 2159 * types are equivalent. 2160 * - `canon_id` is mapped to different type. This is contradiction in our 2161 * hypothetical mapping, because same graph in canonical graph corresponds 2162 * to two different types in candidate graph, which for equivalent type 2163 * graphs shouldn't happen. This condition terminates equivalence check 2164 * with negative result. 2165 * 2166 * If type graphs traversal exhausts types to check and find no contradiction, 2167 * then type graphs are equivalent. 2168 * 2169 * When checking types for equivalence, there is one special case: FWD types. 2170 * If FWD type resolution is allowed and one of the types (either from canonical 2171 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind 2172 * flag) and their names match, hypothetical mapping is updated to point from 2173 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, 2174 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. 2175 * 2176 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, 2177 * if there are two exactly named (or anonymous) structs/unions that are 2178 * compatible structurally, one of which has FWD field, while other is concrete 2179 * STRUCT/UNION, but according to C sources they are different structs/unions 2180 * that are referencing different types with the same name. This is extremely 2181 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if 2182 * this logic is causing problems. 2183 * 2184 * Doing FWD resolution means that both candidate and/or canonical graphs can 2185 * consists of portions of the graph that come from multiple compilation units. 2186 * This is due to the fact that types within single compilation unit are always 2187 * deduplicated and FWDs are already resolved, if referenced struct/union 2188 * definiton is available. So, if we had unresolved FWD and found corresponding 2189 * STRUCT/UNION, they will be from different compilation units. This 2190 * consequently means that when we "link" FWD to corresponding STRUCT/UNION, 2191 * type graph will likely have at least two different BTF types that describe 2192 * same type (e.g., most probably there will be two different BTF types for the 2193 * same 'int' primitive type) and could even have "overlapping" parts of type 2194 * graph that describe same subset of types. 2195 * 2196 * This in turn means that our assumption that each type in canonical graph 2197 * must correspond to exactly one type in candidate graph might not hold 2198 * anymore and will make it harder to detect contradictions using hypothetical 2199 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION 2200 * resolution only in canonical graph. FWDs in candidate graphs are never 2201 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs 2202 * that can occur: 2203 * - Both types in canonical and candidate graphs are FWDs. If they are 2204 * structurally equivalent, then they can either be both resolved to the 2205 * same STRUCT/UNION or not resolved at all. In both cases they are 2206 * equivalent and there is no need to resolve FWD on candidate side. 2207 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, 2208 * so nothing to resolve as well, algorithm will check equivalence anyway. 2209 * - Type in canonical graph is FWD, while type in candidate is concrete 2210 * STRUCT/UNION. In this case candidate graph comes from single compilation 2211 * unit, so there is exactly one BTF type for each unique C type. After 2212 * resolving FWD into STRUCT/UNION, there might be more than one BTF type 2213 * in canonical graph mapping to single BTF type in candidate graph, but 2214 * because hypothetical mapping maps from canonical to candidate types, it's 2215 * alright, and we still maintain the property of having single `canon_id` 2216 * mapping to single `cand_id` (there could be two different `canon_id` 2217 * mapped to the same `cand_id`, but it's not contradictory). 2218 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate 2219 * graph is FWD. In this case we are just going to check compatibility of 2220 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll 2221 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to 2222 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs 2223 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from 2224 * canonical graph. 2225 */ 2226 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, 2227 __u32 canon_id) 2228 { 2229 struct btf_type *cand_type; 2230 struct btf_type *canon_type; 2231 __u32 hypot_type_id; 2232 __u16 cand_kind; 2233 __u16 canon_kind; 2234 int i, eq; 2235 2236 /* if both resolve to the same canonical, they must be equivalent */ 2237 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) 2238 return 1; 2239 2240 canon_id = resolve_fwd_id(d, canon_id); 2241 2242 hypot_type_id = d->hypot_map[canon_id]; 2243 if (hypot_type_id <= BTF_MAX_NR_TYPES) 2244 return hypot_type_id == cand_id; 2245 2246 if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) 2247 return -ENOMEM; 2248 2249 cand_type = d->btf->types[cand_id]; 2250 canon_type = d->btf->types[canon_id]; 2251 cand_kind = btf_kind(cand_type); 2252 canon_kind = btf_kind(canon_type); 2253 2254 if (cand_type->name_off != canon_type->name_off) 2255 return 0; 2256 2257 /* FWD <--> STRUCT/UNION equivalence check, if enabled */ 2258 if (!d->opts.dont_resolve_fwds 2259 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) 2260 && cand_kind != canon_kind) { 2261 __u16 real_kind; 2262 __u16 fwd_kind; 2263 2264 if (cand_kind == BTF_KIND_FWD) { 2265 real_kind = canon_kind; 2266 fwd_kind = btf_fwd_kind(cand_type); 2267 } else { 2268 real_kind = cand_kind; 2269 fwd_kind = btf_fwd_kind(canon_type); 2270 } 2271 return fwd_kind == real_kind; 2272 } 2273 2274 if (cand_kind != canon_kind) 2275 return 0; 2276 2277 switch (cand_kind) { 2278 case BTF_KIND_INT: 2279 return btf_equal_int(cand_type, canon_type); 2280 2281 case BTF_KIND_ENUM: 2282 if (d->opts.dont_resolve_fwds) 2283 return btf_equal_enum(cand_type, canon_type); 2284 else 2285 return btf_compat_enum(cand_type, canon_type); 2286 2287 case BTF_KIND_FWD: 2288 return btf_equal_common(cand_type, canon_type); 2289 2290 case BTF_KIND_CONST: 2291 case BTF_KIND_VOLATILE: 2292 case BTF_KIND_RESTRICT: 2293 case BTF_KIND_PTR: 2294 case BTF_KIND_TYPEDEF: 2295 case BTF_KIND_FUNC: 2296 if (cand_type->info != canon_type->info) 2297 return 0; 2298 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 2299 2300 case BTF_KIND_ARRAY: { 2301 const struct btf_array *cand_arr, *canon_arr; 2302 2303 if (!btf_compat_array(cand_type, canon_type)) 2304 return 0; 2305 cand_arr = btf_array(cand_type); 2306 canon_arr = btf_array(canon_type); 2307 eq = btf_dedup_is_equiv(d, 2308 cand_arr->index_type, canon_arr->index_type); 2309 if (eq <= 0) 2310 return eq; 2311 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); 2312 } 2313 2314 case BTF_KIND_STRUCT: 2315 case BTF_KIND_UNION: { 2316 const struct btf_member *cand_m, *canon_m; 2317 __u16 vlen; 2318 2319 if (!btf_shallow_equal_struct(cand_type, canon_type)) 2320 return 0; 2321 vlen = btf_vlen(cand_type); 2322 cand_m = btf_members(cand_type); 2323 canon_m = btf_members(canon_type); 2324 for (i = 0; i < vlen; i++) { 2325 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); 2326 if (eq <= 0) 2327 return eq; 2328 cand_m++; 2329 canon_m++; 2330 } 2331 2332 return 1; 2333 } 2334 2335 case BTF_KIND_FUNC_PROTO: { 2336 const struct btf_param *cand_p, *canon_p; 2337 __u16 vlen; 2338 2339 if (!btf_compat_fnproto(cand_type, canon_type)) 2340 return 0; 2341 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 2342 if (eq <= 0) 2343 return eq; 2344 vlen = btf_vlen(cand_type); 2345 cand_p = btf_params(cand_type); 2346 canon_p = btf_params(canon_type); 2347 for (i = 0; i < vlen; i++) { 2348 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); 2349 if (eq <= 0) 2350 return eq; 2351 cand_p++; 2352 canon_p++; 2353 } 2354 return 1; 2355 } 2356 2357 default: 2358 return -EINVAL; 2359 } 2360 return 0; 2361 } 2362 2363 /* 2364 * Use hypothetical mapping, produced by successful type graph equivalence 2365 * check, to augment existing struct/union canonical mapping, where possible. 2366 * 2367 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record 2368 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: 2369 * it doesn't matter if FWD type was part of canonical graph or candidate one, 2370 * we are recording the mapping anyway. As opposed to carefulness required 2371 * for struct/union correspondence mapping (described below), for FWD resolution 2372 * it's not important, as by the time that FWD type (reference type) will be 2373 * deduplicated all structs/unions will be deduped already anyway. 2374 * 2375 * Recording STRUCT/UNION mapping is purely a performance optimization and is 2376 * not required for correctness. It needs to be done carefully to ensure that 2377 * struct/union from candidate's type graph is not mapped into corresponding 2378 * struct/union from canonical type graph that itself hasn't been resolved into 2379 * canonical representative. The only guarantee we have is that canonical 2380 * struct/union was determined as canonical and that won't change. But any 2381 * types referenced through that struct/union fields could have been not yet 2382 * resolved, so in case like that it's too early to establish any kind of 2383 * correspondence between structs/unions. 2384 * 2385 * No canonical correspondence is derived for primitive types (they are already 2386 * deduplicated completely already anyway) or reference types (they rely on 2387 * stability of struct/union canonical relationship for equivalence checks). 2388 */ 2389 static void btf_dedup_merge_hypot_map(struct btf_dedup *d) 2390 { 2391 __u32 cand_type_id, targ_type_id; 2392 __u16 t_kind, c_kind; 2393 __u32 t_id, c_id; 2394 int i; 2395 2396 for (i = 0; i < d->hypot_cnt; i++) { 2397 cand_type_id = d->hypot_list[i]; 2398 targ_type_id = d->hypot_map[cand_type_id]; 2399 t_id = resolve_type_id(d, targ_type_id); 2400 c_id = resolve_type_id(d, cand_type_id); 2401 t_kind = btf_kind(d->btf->types[t_id]); 2402 c_kind = btf_kind(d->btf->types[c_id]); 2403 /* 2404 * Resolve FWD into STRUCT/UNION. 2405 * It's ok to resolve FWD into STRUCT/UNION that's not yet 2406 * mapped to canonical representative (as opposed to 2407 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because 2408 * eventually that struct is going to be mapped and all resolved 2409 * FWDs will automatically resolve to correct canonical 2410 * representative. This will happen before ref type deduping, 2411 * which critically depends on stability of these mapping. This 2412 * stability is not a requirement for STRUCT/UNION equivalence 2413 * checks, though. 2414 */ 2415 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) 2416 d->map[c_id] = t_id; 2417 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) 2418 d->map[t_id] = c_id; 2419 2420 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && 2421 c_kind != BTF_KIND_FWD && 2422 is_type_mapped(d, c_id) && 2423 !is_type_mapped(d, t_id)) { 2424 /* 2425 * as a perf optimization, we can map struct/union 2426 * that's part of type graph we just verified for 2427 * equivalence. We can do that for struct/union that has 2428 * canonical representative only, though. 2429 */ 2430 d->map[t_id] = c_id; 2431 } 2432 } 2433 } 2434 2435 /* 2436 * Deduplicate struct/union types. 2437 * 2438 * For each struct/union type its type signature hash is calculated, taking 2439 * into account type's name, size, number, order and names of fields, but 2440 * ignoring type ID's referenced from fields, because they might not be deduped 2441 * completely until after reference types deduplication phase. This type hash 2442 * is used to iterate over all potential canonical types, sharing same hash. 2443 * For each canonical candidate we check whether type graphs that they form 2444 * (through referenced types in fields and so on) are equivalent using algorithm 2445 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and 2446 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping 2447 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence 2448 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to 2449 * potentially map other structs/unions to their canonical representatives, 2450 * if such relationship hasn't yet been established. This speeds up algorithm 2451 * by eliminating some of the duplicate work. 2452 * 2453 * If no matching canonical representative was found, struct/union is marked 2454 * as canonical for itself and is added into btf_dedup->dedup_table hash map 2455 * for further look ups. 2456 */ 2457 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) 2458 { 2459 struct btf_type *cand_type, *t; 2460 struct hashmap_entry *hash_entry; 2461 /* if we don't find equivalent type, then we are canonical */ 2462 __u32 new_id = type_id; 2463 __u16 kind; 2464 long h; 2465 2466 /* already deduped or is in process of deduping (loop detected) */ 2467 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2468 return 0; 2469 2470 t = d->btf->types[type_id]; 2471 kind = btf_kind(t); 2472 2473 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) 2474 return 0; 2475 2476 h = btf_hash_struct(t); 2477 for_each_dedup_cand(d, hash_entry, h) { 2478 __u32 cand_id = (__u32)(long)hash_entry->value; 2479 int eq; 2480 2481 /* 2482 * Even though btf_dedup_is_equiv() checks for 2483 * btf_shallow_equal_struct() internally when checking two 2484 * structs (unions) for equivalence, we need to guard here 2485 * from picking matching FWD type as a dedup candidate. 2486 * This can happen due to hash collision. In such case just 2487 * relying on btf_dedup_is_equiv() would lead to potentially 2488 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because 2489 * FWD and compatible STRUCT/UNION are considered equivalent. 2490 */ 2491 cand_type = d->btf->types[cand_id]; 2492 if (!btf_shallow_equal_struct(t, cand_type)) 2493 continue; 2494 2495 btf_dedup_clear_hypot_map(d); 2496 eq = btf_dedup_is_equiv(d, type_id, cand_id); 2497 if (eq < 0) 2498 return eq; 2499 if (!eq) 2500 continue; 2501 new_id = cand_id; 2502 btf_dedup_merge_hypot_map(d); 2503 break; 2504 } 2505 2506 d->map[type_id] = new_id; 2507 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2508 return -ENOMEM; 2509 2510 return 0; 2511 } 2512 2513 static int btf_dedup_struct_types(struct btf_dedup *d) 2514 { 2515 int i, err; 2516 2517 for (i = 1; i <= d->btf->nr_types; i++) { 2518 err = btf_dedup_struct_type(d, i); 2519 if (err) 2520 return err; 2521 } 2522 return 0; 2523 } 2524 2525 /* 2526 * Deduplicate reference type. 2527 * 2528 * Once all primitive and struct/union types got deduplicated, we can easily 2529 * deduplicate all other (reference) BTF types. This is done in two steps: 2530 * 2531 * 1. Resolve all referenced type IDs into their canonical type IDs. This 2532 * resolution can be done either immediately for primitive or struct/union types 2533 * (because they were deduped in previous two phases) or recursively for 2534 * reference types. Recursion will always terminate at either primitive or 2535 * struct/union type, at which point we can "unwind" chain of reference types 2536 * one by one. There is no danger of encountering cycles because in C type 2537 * system the only way to form type cycle is through struct/union, so any chain 2538 * of reference types, even those taking part in a type cycle, will inevitably 2539 * reach struct/union at some point. 2540 * 2541 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type 2542 * becomes "stable", in the sense that no further deduplication will cause 2543 * any changes to it. With that, it's now possible to calculate type's signature 2544 * hash (this time taking into account referenced type IDs) and loop over all 2545 * potential canonical representatives. If no match was found, current type 2546 * will become canonical representative of itself and will be added into 2547 * btf_dedup->dedup_table as another possible canonical representative. 2548 */ 2549 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) 2550 { 2551 struct hashmap_entry *hash_entry; 2552 __u32 new_id = type_id, cand_id; 2553 struct btf_type *t, *cand; 2554 /* if we don't find equivalent type, then we are representative type */ 2555 int ref_type_id; 2556 long h; 2557 2558 if (d->map[type_id] == BTF_IN_PROGRESS_ID) 2559 return -ELOOP; 2560 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2561 return resolve_type_id(d, type_id); 2562 2563 t = d->btf->types[type_id]; 2564 d->map[type_id] = BTF_IN_PROGRESS_ID; 2565 2566 switch (btf_kind(t)) { 2567 case BTF_KIND_CONST: 2568 case BTF_KIND_VOLATILE: 2569 case BTF_KIND_RESTRICT: 2570 case BTF_KIND_PTR: 2571 case BTF_KIND_TYPEDEF: 2572 case BTF_KIND_FUNC: 2573 ref_type_id = btf_dedup_ref_type(d, t->type); 2574 if (ref_type_id < 0) 2575 return ref_type_id; 2576 t->type = ref_type_id; 2577 2578 h = btf_hash_common(t); 2579 for_each_dedup_cand(d, hash_entry, h) { 2580 cand_id = (__u32)(long)hash_entry->value; 2581 cand = d->btf->types[cand_id]; 2582 if (btf_equal_common(t, cand)) { 2583 new_id = cand_id; 2584 break; 2585 } 2586 } 2587 break; 2588 2589 case BTF_KIND_ARRAY: { 2590 struct btf_array *info = btf_array(t); 2591 2592 ref_type_id = btf_dedup_ref_type(d, info->type); 2593 if (ref_type_id < 0) 2594 return ref_type_id; 2595 info->type = ref_type_id; 2596 2597 ref_type_id = btf_dedup_ref_type(d, info->index_type); 2598 if (ref_type_id < 0) 2599 return ref_type_id; 2600 info->index_type = ref_type_id; 2601 2602 h = btf_hash_array(t); 2603 for_each_dedup_cand(d, hash_entry, h) { 2604 cand_id = (__u32)(long)hash_entry->value; 2605 cand = d->btf->types[cand_id]; 2606 if (btf_equal_array(t, cand)) { 2607 new_id = cand_id; 2608 break; 2609 } 2610 } 2611 break; 2612 } 2613 2614 case BTF_KIND_FUNC_PROTO: { 2615 struct btf_param *param; 2616 __u16 vlen; 2617 int i; 2618 2619 ref_type_id = btf_dedup_ref_type(d, t->type); 2620 if (ref_type_id < 0) 2621 return ref_type_id; 2622 t->type = ref_type_id; 2623 2624 vlen = btf_vlen(t); 2625 param = btf_params(t); 2626 for (i = 0; i < vlen; i++) { 2627 ref_type_id = btf_dedup_ref_type(d, param->type); 2628 if (ref_type_id < 0) 2629 return ref_type_id; 2630 param->type = ref_type_id; 2631 param++; 2632 } 2633 2634 h = btf_hash_fnproto(t); 2635 for_each_dedup_cand(d, hash_entry, h) { 2636 cand_id = (__u32)(long)hash_entry->value; 2637 cand = d->btf->types[cand_id]; 2638 if (btf_equal_fnproto(t, cand)) { 2639 new_id = cand_id; 2640 break; 2641 } 2642 } 2643 break; 2644 } 2645 2646 default: 2647 return -EINVAL; 2648 } 2649 2650 d->map[type_id] = new_id; 2651 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2652 return -ENOMEM; 2653 2654 return new_id; 2655 } 2656 2657 static int btf_dedup_ref_types(struct btf_dedup *d) 2658 { 2659 int i, err; 2660 2661 for (i = 1; i <= d->btf->nr_types; i++) { 2662 err = btf_dedup_ref_type(d, i); 2663 if (err < 0) 2664 return err; 2665 } 2666 /* we won't need d->dedup_table anymore */ 2667 hashmap__free(d->dedup_table); 2668 d->dedup_table = NULL; 2669 return 0; 2670 } 2671 2672 /* 2673 * Compact types. 2674 * 2675 * After we established for each type its corresponding canonical representative 2676 * type, we now can eliminate types that are not canonical and leave only 2677 * canonical ones layed out sequentially in memory by copying them over 2678 * duplicates. During compaction btf_dedup->hypot_map array is reused to store 2679 * a map from original type ID to a new compacted type ID, which will be used 2680 * during next phase to "fix up" type IDs, referenced from struct/union and 2681 * reference types. 2682 */ 2683 static int btf_dedup_compact_types(struct btf_dedup *d) 2684 { 2685 struct btf_type **new_types; 2686 __u32 next_type_id = 1; 2687 char *types_start, *p; 2688 int i, len; 2689 2690 /* we are going to reuse hypot_map to store compaction remapping */ 2691 d->hypot_map[0] = 0; 2692 for (i = 1; i <= d->btf->nr_types; i++) 2693 d->hypot_map[i] = BTF_UNPROCESSED_ID; 2694 2695 types_start = d->btf->nohdr_data + d->btf->hdr->type_off; 2696 p = types_start; 2697 2698 for (i = 1; i <= d->btf->nr_types; i++) { 2699 if (d->map[i] != i) 2700 continue; 2701 2702 len = btf_type_size(d->btf->types[i]); 2703 if (len < 0) 2704 return len; 2705 2706 memmove(p, d->btf->types[i], len); 2707 d->hypot_map[i] = next_type_id; 2708 d->btf->types[next_type_id] = (struct btf_type *)p; 2709 p += len; 2710 next_type_id++; 2711 } 2712 2713 /* shrink struct btf's internal types index and update btf_header */ 2714 d->btf->nr_types = next_type_id - 1; 2715 d->btf->types_size = d->btf->nr_types; 2716 d->btf->hdr->type_len = p - types_start; 2717 new_types = realloc(d->btf->types, 2718 (1 + d->btf->nr_types) * sizeof(struct btf_type *)); 2719 if (!new_types) 2720 return -ENOMEM; 2721 d->btf->types = new_types; 2722 2723 /* make sure string section follows type information without gaps */ 2724 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; 2725 memmove(p, d->btf->strings, d->btf->hdr->str_len); 2726 d->btf->strings = p; 2727 p += d->btf->hdr->str_len; 2728 2729 d->btf->data_size = p - (char *)d->btf->data; 2730 return 0; 2731 } 2732 2733 /* 2734 * Figure out final (deduplicated and compacted) type ID for provided original 2735 * `type_id` by first resolving it into corresponding canonical type ID and 2736 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, 2737 * which is populated during compaction phase. 2738 */ 2739 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id) 2740 { 2741 __u32 resolved_type_id, new_type_id; 2742 2743 resolved_type_id = resolve_type_id(d, type_id); 2744 new_type_id = d->hypot_map[resolved_type_id]; 2745 if (new_type_id > BTF_MAX_NR_TYPES) 2746 return -EINVAL; 2747 return new_type_id; 2748 } 2749 2750 /* 2751 * Remap referenced type IDs into deduped type IDs. 2752 * 2753 * After BTF types are deduplicated and compacted, their final type IDs may 2754 * differ from original ones. The map from original to a corresponding 2755 * deduped type ID is stored in btf_dedup->hypot_map and is populated during 2756 * compaction phase. During remapping phase we are rewriting all type IDs 2757 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to 2758 * their final deduped type IDs. 2759 */ 2760 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) 2761 { 2762 struct btf_type *t = d->btf->types[type_id]; 2763 int i, r; 2764 2765 switch (btf_kind(t)) { 2766 case BTF_KIND_INT: 2767 case BTF_KIND_ENUM: 2768 break; 2769 2770 case BTF_KIND_FWD: 2771 case BTF_KIND_CONST: 2772 case BTF_KIND_VOLATILE: 2773 case BTF_KIND_RESTRICT: 2774 case BTF_KIND_PTR: 2775 case BTF_KIND_TYPEDEF: 2776 case BTF_KIND_FUNC: 2777 case BTF_KIND_VAR: 2778 r = btf_dedup_remap_type_id(d, t->type); 2779 if (r < 0) 2780 return r; 2781 t->type = r; 2782 break; 2783 2784 case BTF_KIND_ARRAY: { 2785 struct btf_array *arr_info = btf_array(t); 2786 2787 r = btf_dedup_remap_type_id(d, arr_info->type); 2788 if (r < 0) 2789 return r; 2790 arr_info->type = r; 2791 r = btf_dedup_remap_type_id(d, arr_info->index_type); 2792 if (r < 0) 2793 return r; 2794 arr_info->index_type = r; 2795 break; 2796 } 2797 2798 case BTF_KIND_STRUCT: 2799 case BTF_KIND_UNION: { 2800 struct btf_member *member = btf_members(t); 2801 __u16 vlen = btf_vlen(t); 2802 2803 for (i = 0; i < vlen; i++) { 2804 r = btf_dedup_remap_type_id(d, member->type); 2805 if (r < 0) 2806 return r; 2807 member->type = r; 2808 member++; 2809 } 2810 break; 2811 } 2812 2813 case BTF_KIND_FUNC_PROTO: { 2814 struct btf_param *param = btf_params(t); 2815 __u16 vlen = btf_vlen(t); 2816 2817 r = btf_dedup_remap_type_id(d, t->type); 2818 if (r < 0) 2819 return r; 2820 t->type = r; 2821 2822 for (i = 0; i < vlen; i++) { 2823 r = btf_dedup_remap_type_id(d, param->type); 2824 if (r < 0) 2825 return r; 2826 param->type = r; 2827 param++; 2828 } 2829 break; 2830 } 2831 2832 case BTF_KIND_DATASEC: { 2833 struct btf_var_secinfo *var = btf_var_secinfos(t); 2834 __u16 vlen = btf_vlen(t); 2835 2836 for (i = 0; i < vlen; i++) { 2837 r = btf_dedup_remap_type_id(d, var->type); 2838 if (r < 0) 2839 return r; 2840 var->type = r; 2841 var++; 2842 } 2843 break; 2844 } 2845 2846 default: 2847 return -EINVAL; 2848 } 2849 2850 return 0; 2851 } 2852 2853 static int btf_dedup_remap_types(struct btf_dedup *d) 2854 { 2855 int i, r; 2856 2857 for (i = 1; i <= d->btf->nr_types; i++) { 2858 r = btf_dedup_remap_type(d, i); 2859 if (r < 0) 2860 return r; 2861 } 2862 return 0; 2863 } 2864