btf.c (abc7220b2233df9b13d8e0e312fb6f31e5402d7d) | btf.c (740e69c3c511aa207155ba993a854b5bee79cdc2) |
---|---|
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2/* Copyright (c) 2018 Facebook */ 3 4#include <endian.h> 5#include <stdio.h> 6#include <stdlib.h> 7#include <string.h> 8#include <fcntl.h> --- 7 unchanged lines hidden (view full) --- 16#include <linux/btf.h> 17#include <gelf.h> 18#include "btf.h" 19#include "bpf.h" 20#include "libbpf.h" 21#include "libbpf_internal.h" 22#include "hashmap.h" 23 | 1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2/* Copyright (c) 2018 Facebook */ 3 4#include <endian.h> 5#include <stdio.h> 6#include <stdlib.h> 7#include <string.h> 8#include <fcntl.h> --- 7 unchanged lines hidden (view full) --- 16#include <linux/btf.h> 17#include <gelf.h> 18#include "btf.h" 19#include "bpf.h" 20#include "libbpf.h" 21#include "libbpf_internal.h" 22#include "hashmap.h" 23 |
24/* make sure libbpf doesn't use kernel-only integer typedefs */ 25#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 26 | |
27#define BTF_MAX_NR_TYPES 0x7fffffffU 28#define BTF_MAX_STR_OFFSET 0x7fffffffU 29 30static struct btf_type btf_void; 31 32struct btf { 33 union { 34 struct btf_header *hdr; 35 void *data; 36 }; | 24#define BTF_MAX_NR_TYPES 0x7fffffffU 25#define BTF_MAX_STR_OFFSET 0x7fffffffU 26 27static struct btf_type btf_void; 28 29struct btf { 30 union { 31 struct btf_header *hdr; 32 void *data; 33 }; |
37 struct btf_type **types; | 34 __u32 *type_offs; 35 __u32 type_offs_cap; |
38 const char *strings; 39 void *nohdr_data; | 36 const char *strings; 37 void *nohdr_data; |
38 void *types_data; |
|
40 __u32 nr_types; | 39 __u32 nr_types; |
41 __u32 types_size; | |
42 __u32 data_size; 43 int fd; 44 int ptr_sz; 45}; 46 47static inline __u64 ptr_to_u64(const void *ptr) 48{ 49 return (__u64) (unsigned long) ptr; 50} 51 | 40 __u32 data_size; 41 int fd; 42 int ptr_sz; 43}; 44 45static inline __u64 ptr_to_u64(const void *ptr) 46{ 47 return (__u64) (unsigned long) ptr; 48} 49 |
52static int btf_add_type(struct btf *btf, struct btf_type *t) | 50static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) |
53{ | 51{ |
54 if (btf->types_size - btf->nr_types < 2) { 55 struct btf_type **new_types; | 52 /* nr_types is 1-based, so N types means we need N+1-sized array */ 53 if (btf->nr_types + 2 > btf->type_offs_cap) { 54 __u32 *new_offs; |
56 __u32 expand_by, new_size; 57 | 55 __u32 expand_by, new_size; 56 |
58 if (btf->types_size == BTF_MAX_NR_TYPES) | 57 if (btf->type_offs_cap == BTF_MAX_NR_TYPES) |
59 return -E2BIG; 60 | 58 return -E2BIG; 59 |
61 expand_by = max(btf->types_size >> 2, 16U); 62 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by); | 60 expand_by = max(btf->type_offs_cap / 4, 16U); 61 new_size = min(BTF_MAX_NR_TYPES, btf->type_offs_cap + expand_by); |
63 | 62 |
64 new_types = realloc(btf->types, sizeof(*new_types) * new_size); 65 if (!new_types) | 63 new_offs = libbpf_reallocarray(btf->type_offs, new_size, sizeof(*new_offs)); 64 if (!new_offs) |
66 return -ENOMEM; 67 | 65 return -ENOMEM; 66 |
68 if (btf->nr_types == 0) 69 new_types[0] = &btf_void; | 67 new_offs[0] = UINT_MAX; /* VOID is specially handled */ |
70 | 68 |
71 btf->types = new_types; 72 btf->types_size = new_size; | 69 btf->type_offs = new_offs; 70 btf->type_offs_cap = new_size; |
73 } 74 | 71 } 72 |
75 btf->types[++(btf->nr_types)] = t; | 73 btf->type_offs[btf->nr_types + 1] = type_off; |
76 77 return 0; 78} 79 80static int btf_parse_hdr(struct btf *btf) 81{ 82 const struct btf_header *hdr = btf->hdr; 83 __u32 meta_left; --- 61 unchanged lines hidden (view full) --- 145 return -EINVAL; 146 } 147 148 btf->strings = start; 149 150 return 0; 151} 152 | 74 75 return 0; 76} 77 78static int btf_parse_hdr(struct btf *btf) 79{ 80 const struct btf_header *hdr = btf->hdr; 81 __u32 meta_left; --- 61 unchanged lines hidden (view full) --- 143 return -EINVAL; 144 } 145 146 btf->strings = start; 147 148 return 0; 149} 150 |
153static int btf_type_size(struct btf_type *t) | 151static int btf_type_size(const struct btf_type *t) |
154{ 155 int base_size = sizeof(struct btf_type); 156 __u16 vlen = btf_vlen(t); 157 158 switch (btf_kind(t)) { 159 case BTF_KIND_FWD: 160 case BTF_KIND_CONST: 161 case BTF_KIND_VOLATILE: --- 21 unchanged lines hidden (view full) --- 183 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 184 return -EINVAL; 185 } 186} 187 188static int btf_parse_type_sec(struct btf *btf) 189{ 190 struct btf_header *hdr = btf->hdr; | 152{ 153 int base_size = sizeof(struct btf_type); 154 __u16 vlen = btf_vlen(t); 155 156 switch (btf_kind(t)) { 157 case BTF_KIND_FWD: 158 case BTF_KIND_CONST: 159 case BTF_KIND_VOLATILE: --- 21 unchanged lines hidden (view full) --- 181 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 182 return -EINVAL; 183 } 184} 185 186static int btf_parse_type_sec(struct btf *btf) 187{ 188 struct btf_header *hdr = btf->hdr; |
191 void *nohdr_data = btf->nohdr_data; 192 void *next_type = nohdr_data + hdr->type_off; 193 void *end_type = nohdr_data + hdr->str_off; | 189 void *next_type = btf->nohdr_data + hdr->type_off; 190 void *end_type = next_type + hdr->type_len; |
194 | 191 |
192 btf->types_data = next_type; 193 |
|
195 while (next_type < end_type) { | 194 while (next_type < end_type) { |
196 struct btf_type *t = next_type; | |
197 int type_size; 198 int err; 199 | 195 int type_size; 196 int err; 197 |
200 type_size = btf_type_size(t); | 198 err = btf_add_type_idx_entry(btf, next_type - btf->types_data); 199 if (err) 200 return err; 201 202 type_size = btf_type_size(next_type); |
201 if (type_size < 0) 202 return type_size; | 203 if (type_size < 0) 204 return type_size; |
205 |
|
203 next_type += type_size; | 206 next_type += type_size; |
204 err = btf_add_type(btf, t); 205 if (err) 206 return err; | 207 btf->nr_types++; |
207 } 208 209 return 0; 210} 211 212__u32 btf__get_nr_types(const struct btf *btf) 213{ 214 return btf->nr_types; 215} 216 | 208 } 209 210 return 0; 211} 212 213__u32 btf__get_nr_types(const struct btf *btf) 214{ 215 return btf->nr_types; 216} 217 |
218/* internal helper returning non-const pointer to a type */ 219static struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id) 220{ 221 if (type_id == 0) 222 return &btf_void; 223 224 return btf->types_data + btf->type_offs[type_id]; 225} 226 |
|
217const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 218{ 219 if (type_id > btf->nr_types) 220 return NULL; | 227const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 228{ 229 if (type_id > btf->nr_types) 230 return NULL; |
221 222 return btf->types[type_id]; | 231 return btf_type_by_id((struct btf *)btf, type_id); |
223} 224 225static int determine_ptr_size(const struct btf *btf) 226{ 227 const struct btf_type *t; 228 const char *name; 229 int i; 230 --- 181 unchanged lines hidden (view full) --- 412__s32 btf__find_by_name(const struct btf *btf, const char *type_name) 413{ 414 __u32 i; 415 416 if (!strcmp(type_name, "void")) 417 return 0; 418 419 for (i = 1; i <= btf->nr_types; i++) { | 232} 233 234static int determine_ptr_size(const struct btf *btf) 235{ 236 const struct btf_type *t; 237 const char *name; 238 int i; 239 --- 181 unchanged lines hidden (view full) --- 421__s32 btf__find_by_name(const struct btf *btf, const char *type_name) 422{ 423 __u32 i; 424 425 if (!strcmp(type_name, "void")) 426 return 0; 427 428 for (i = 1; i <= btf->nr_types; i++) { |
420 const struct btf_type *t = btf->types[i]; | 429 const struct btf_type *t = btf__type_by_id(btf, i); |
421 const char *name = btf__name_by_offset(btf, t->name_off); 422 423 if (name && !strcmp(type_name, name)) 424 return i; 425 } 426 427 return -ENOENT; 428} 429 430__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, 431 __u32 kind) 432{ 433 __u32 i; 434 435 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) 436 return 0; 437 438 for (i = 1; i <= btf->nr_types; i++) { | 430 const char *name = btf__name_by_offset(btf, t->name_off); 431 432 if (name && !strcmp(type_name, name)) 433 return i; 434 } 435 436 return -ENOENT; 437} 438 439__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, 440 __u32 kind) 441{ 442 __u32 i; 443 444 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) 445 return 0; 446 447 for (i = 1; i <= btf->nr_types; i++) { |
439 const struct btf_type *t = btf->types[i]; | 448 const struct btf_type *t = btf__type_by_id(btf, i); |
440 const char *name; 441 442 if (btf_kind(t) != kind) 443 continue; 444 name = btf__name_by_offset(btf, t->name_off); 445 if (name && !strcmp(type_name, name)) 446 return i; 447 } --- 5 unchanged lines hidden (view full) --- 453{ 454 if (IS_ERR_OR_NULL(btf)) 455 return; 456 457 if (btf->fd >= 0) 458 close(btf->fd); 459 460 free(btf->data); | 449 const char *name; 450 451 if (btf_kind(t) != kind) 452 continue; 453 name = btf__name_by_offset(btf, t->name_off); 454 if (name && !strcmp(type_name, name)) 455 return i; 456 } --- 5 unchanged lines hidden (view full) --- 462{ 463 if (IS_ERR_OR_NULL(btf)) 464 return; 465 466 if (btf->fd >= 0) 467 close(btf->fd); 468 469 free(btf->data); |
461 free(btf->types); | 470 free(btf->type_offs); |
462 free(btf); 463} 464 465struct btf *btf__new(const void *data, __u32 size) 466{ 467 struct btf *btf; 468 int err; 469 --- 317 unchanged lines hidden (view full) --- 787} 788 789int btf__finalize_data(struct bpf_object *obj, struct btf *btf) 790{ 791 int err = 0; 792 __u32 i; 793 794 for (i = 1; i <= btf->nr_types; i++) { | 471 free(btf); 472} 473 474struct btf *btf__new(const void *data, __u32 size) 475{ 476 struct btf *btf; 477 int err; 478 --- 317 unchanged lines hidden (view full) --- 796} 797 798int btf__finalize_data(struct bpf_object *obj, struct btf *btf) 799{ 800 int err = 0; 801 __u32 i; 802 803 for (i = 1; i <= btf->nr_types; i++) { |
795 struct btf_type *t = btf->types[i]; | 804 struct btf_type *t = btf_type_by_id(btf, i); |
796 797 /* Loader needs to fix up some of the things compiler 798 * couldn't get its hands on while emitting BTF. This 799 * is section size and global variable offset. We use 800 * the info from the ELF itself for this purpose. 801 */ 802 if (btf_is_datasec(t)) { 803 err = btf_fixup_datasec(obj, btf, t); --- 322 unchanged lines hidden (view full) --- 1126 .min_rec_size = sizeof(struct bpf_line_info_min), 1127 .ext_info = &btf_ext->line_info, 1128 .desc = "line_info", 1129 }; 1130 1131 return btf_ext_setup_info(btf_ext, ¶m); 1132} 1133 | 805 806 /* Loader needs to fix up some of the things compiler 807 * couldn't get its hands on while emitting BTF. This 808 * is section size and global variable offset. We use 809 * the info from the ELF itself for this purpose. 810 */ 811 if (btf_is_datasec(t)) { 812 err = btf_fixup_datasec(obj, btf, t); --- 322 unchanged lines hidden (view full) --- 1135 .min_rec_size = sizeof(struct bpf_line_info_min), 1136 .ext_info = &btf_ext->line_info, 1137 .desc = "line_info", 1138 }; 1139 1140 return btf_ext_setup_info(btf_ext, ¶m); 1141} 1142 |
1134static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext) | 1143static int btf_ext_setup_core_relos(struct btf_ext *btf_ext) |
1135{ 1136 struct btf_ext_sec_setup_param param = { | 1144{ 1145 struct btf_ext_sec_setup_param param = { |
1137 .off = btf_ext->hdr->field_reloc_off, 1138 .len = btf_ext->hdr->field_reloc_len, 1139 .min_rec_size = sizeof(struct bpf_field_reloc), 1140 .ext_info = &btf_ext->field_reloc_info, 1141 .desc = "field_reloc", | 1146 .off = btf_ext->hdr->core_relo_off, 1147 .len = btf_ext->hdr->core_relo_len, 1148 .min_rec_size = sizeof(struct bpf_core_relo), 1149 .ext_info = &btf_ext->core_relo_info, 1150 .desc = "core_relo", |
1142 }; 1143 1144 return btf_ext_setup_info(btf_ext, ¶m); 1145} 1146 1147static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) 1148{ 1149 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; --- 62 unchanged lines hidden (view full) --- 1212 err = btf_ext_setup_func_info(btf_ext); 1213 if (err) 1214 goto done; 1215 1216 err = btf_ext_setup_line_info(btf_ext); 1217 if (err) 1218 goto done; 1219 | 1151 }; 1152 1153 return btf_ext_setup_info(btf_ext, ¶m); 1154} 1155 1156static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) 1157{ 1158 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; --- 62 unchanged lines hidden (view full) --- 1221 err = btf_ext_setup_func_info(btf_ext); 1222 if (err) 1223 goto done; 1224 1225 err = btf_ext_setup_line_info(btf_ext); 1226 if (err) 1227 goto done; 1228 |
1220 if (btf_ext->hdr->hdr_len < 1221 offsetofend(struct btf_ext_header, field_reloc_len)) | 1229 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) |
1222 goto done; | 1230 goto done; |
1223 err = btf_ext_setup_field_reloc(btf_ext); | 1231 err = btf_ext_setup_core_relos(btf_ext); |
1224 if (err) 1225 goto done; 1226 1227done: 1228 if (err) { 1229 btf_ext__free(btf_ext); 1230 return ERR_PTR(err); 1231 } --- 338 unchanged lines hidden (view full) --- 1570 1571static int btf_dedup_hypot_map_add(struct btf_dedup *d, 1572 __u32 from_id, __u32 to_id) 1573{ 1574 if (d->hypot_cnt == d->hypot_cap) { 1575 __u32 *new_list; 1576 1577 d->hypot_cap += max((size_t)16, d->hypot_cap / 2); | 1232 if (err) 1233 goto done; 1234 1235done: 1236 if (err) { 1237 btf_ext__free(btf_ext); 1238 return ERR_PTR(err); 1239 } --- 338 unchanged lines hidden (view full) --- 1578 1579static int btf_dedup_hypot_map_add(struct btf_dedup *d, 1580 __u32 from_id, __u32 to_id) 1581{ 1582 if (d->hypot_cnt == d->hypot_cap) { 1583 __u32 *new_list; 1584 1585 d->hypot_cap += max((size_t)16, d->hypot_cap / 2); |
1578 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); | 1586 new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32)); |
1579 if (!new_list) 1580 return -ENOMEM; 1581 d->hypot_list = new_list; 1582 } 1583 d->hypot_list[d->hypot_cnt++] = from_id; 1584 d->hypot_map[from_id] = to_id; 1585 return 0; 1586} --- 67 unchanged lines hidden (view full) --- 1654 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1655 if (!d->map) { 1656 err = -ENOMEM; 1657 goto done; 1658 } 1659 /* special BTF "void" type is made canonical immediately */ 1660 d->map[0] = 0; 1661 for (i = 1; i <= btf->nr_types; i++) { | 1587 if (!new_list) 1588 return -ENOMEM; 1589 d->hypot_list = new_list; 1590 } 1591 d->hypot_list[d->hypot_cnt++] = from_id; 1592 d->hypot_map[from_id] = to_id; 1593 return 0; 1594} --- 67 unchanged lines hidden (view full) --- 1662 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1663 if (!d->map) { 1664 err = -ENOMEM; 1665 goto done; 1666 } 1667 /* special BTF "void" type is made canonical immediately */ 1668 d->map[0] = 0; 1669 for (i = 1; i <= btf->nr_types; i++) { |
1662 struct btf_type *t = d->btf->types[i]; | 1670 struct btf_type *t = btf_type_by_id(d->btf, i); |
1663 1664 /* VAR and DATASEC are never deduped and are self-canonical */ 1665 if (btf_is_var(t) || btf_is_datasec(t)) 1666 d->map[i] = i; 1667 else 1668 d->map[i] = BTF_UNPROCESSED_ID; 1669 } 1670 --- 22 unchanged lines hidden (view full) --- 1693 */ 1694static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) 1695{ 1696 void *line_data_cur, *line_data_end; 1697 int i, j, r, rec_size; 1698 struct btf_type *t; 1699 1700 for (i = 1; i <= d->btf->nr_types; i++) { | 1671 1672 /* VAR and DATASEC are never deduped and are self-canonical */ 1673 if (btf_is_var(t) || btf_is_datasec(t)) 1674 d->map[i] = i; 1675 else 1676 d->map[i] = BTF_UNPROCESSED_ID; 1677 } 1678 --- 22 unchanged lines hidden (view full) --- 1701 */ 1702static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) 1703{ 1704 void *line_data_cur, *line_data_end; 1705 int i, j, r, rec_size; 1706 struct btf_type *t; 1707 1708 for (i = 1; i <= d->btf->nr_types; i++) { |
1701 t = d->btf->types[i]; | 1709 t = btf_type_by_id(d->btf, i); |
1702 r = fn(&t->name_off, ctx); 1703 if (r) 1704 return r; 1705 1706 switch (btf_kind(t)) { 1707 case BTF_KIND_STRUCT: 1708 case BTF_KIND_UNION: { 1709 struct btf_member *m = btf_members(t); --- 156 unchanged lines hidden (view full) --- 1866 bool grp_used; 1867 1868 /* build index of all strings */ 1869 while (p < end) { 1870 if (strs.cnt + 1 > strs.cap) { 1871 struct btf_str_ptr *new_ptrs; 1872 1873 strs.cap += max(strs.cnt / 2, 16U); | 1710 r = fn(&t->name_off, ctx); 1711 if (r) 1712 return r; 1713 1714 switch (btf_kind(t)) { 1715 case BTF_KIND_STRUCT: 1716 case BTF_KIND_UNION: { 1717 struct btf_member *m = btf_members(t); --- 156 unchanged lines hidden (view full) --- 1874 bool grp_used; 1875 1876 /* build index of all strings */ 1877 while (p < end) { 1878 if (strs.cnt + 1 > strs.cap) { 1879 struct btf_str_ptr *new_ptrs; 1880 1881 strs.cap += max(strs.cnt / 2, 16U); |
1874 new_ptrs = realloc(strs.ptrs, 1875 sizeof(strs.ptrs[0]) * strs.cap); | 1882 new_ptrs = libbpf_reallocarray(strs.ptrs, strs.cap, sizeof(strs.ptrs[0])); |
1876 if (!new_ptrs) { 1877 err = -ENOMEM; 1878 goto done; 1879 } 1880 strs.ptrs = new_ptrs; 1881 } 1882 1883 strs.ptrs[strs.cnt].str = p; --- 345 unchanged lines hidden (view full) --- 2229/* 2230 * Deduplicate primitive types, that can't reference other types, by calculating 2231 * their type signature hash and comparing them with any possible canonical 2232 * candidate. If no canonical candidate matches, type itself is marked as 2233 * canonical and is added into `btf_dedup->dedup_table` as another candidate. 2234 */ 2235static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) 2236{ | 1883 if (!new_ptrs) { 1884 err = -ENOMEM; 1885 goto done; 1886 } 1887 strs.ptrs = new_ptrs; 1888 } 1889 1890 strs.ptrs[strs.cnt].str = p; --- 345 unchanged lines hidden (view full) --- 2236/* 2237 * Deduplicate primitive types, that can't reference other types, by calculating 2238 * their type signature hash and comparing them with any possible canonical 2239 * candidate. If no canonical candidate matches, type itself is marked as 2240 * canonical and is added into `btf_dedup->dedup_table` as another candidate. 2241 */ 2242static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) 2243{ |
2237 struct btf_type *t = d->btf->types[type_id]; | 2244 struct btf_type *t = btf_type_by_id(d->btf, type_id); |
2238 struct hashmap_entry *hash_entry; 2239 struct btf_type *cand; 2240 /* if we don't find equivalent type, then we are canonical */ 2241 __u32 new_id = type_id; 2242 __u32 cand_id; 2243 long h; 2244 2245 switch (btf_kind(t)) { --- 10 unchanged lines hidden (view full) --- 2256 case BTF_KIND_VAR: 2257 case BTF_KIND_DATASEC: 2258 return 0; 2259 2260 case BTF_KIND_INT: 2261 h = btf_hash_int(t); 2262 for_each_dedup_cand(d, hash_entry, h) { 2263 cand_id = (__u32)(long)hash_entry->value; | 2245 struct hashmap_entry *hash_entry; 2246 struct btf_type *cand; 2247 /* if we don't find equivalent type, then we are canonical */ 2248 __u32 new_id = type_id; 2249 __u32 cand_id; 2250 long h; 2251 2252 switch (btf_kind(t)) { --- 10 unchanged lines hidden (view full) --- 2263 case BTF_KIND_VAR: 2264 case BTF_KIND_DATASEC: 2265 return 0; 2266 2267 case BTF_KIND_INT: 2268 h = btf_hash_int(t); 2269 for_each_dedup_cand(d, hash_entry, h) { 2270 cand_id = (__u32)(long)hash_entry->value; |
2264 cand = d->btf->types[cand_id]; | 2271 cand = btf_type_by_id(d->btf, cand_id); |
2265 if (btf_equal_int(t, cand)) { 2266 new_id = cand_id; 2267 break; 2268 } 2269 } 2270 break; 2271 2272 case BTF_KIND_ENUM: 2273 h = btf_hash_enum(t); 2274 for_each_dedup_cand(d, hash_entry, h) { 2275 cand_id = (__u32)(long)hash_entry->value; | 2272 if (btf_equal_int(t, cand)) { 2273 new_id = cand_id; 2274 break; 2275 } 2276 } 2277 break; 2278 2279 case BTF_KIND_ENUM: 2280 h = btf_hash_enum(t); 2281 for_each_dedup_cand(d, hash_entry, h) { 2282 cand_id = (__u32)(long)hash_entry->value; |
2276 cand = d->btf->types[cand_id]; | 2283 cand = btf_type_by_id(d->btf, cand_id); |
2277 if (btf_equal_enum(t, cand)) { 2278 new_id = cand_id; 2279 break; 2280 } 2281 if (d->opts.dont_resolve_fwds) 2282 continue; 2283 if (btf_compat_enum(t, cand)) { 2284 if (btf_is_enum_fwd(t)) { --- 6 unchanged lines hidden (view full) --- 2291 } 2292 } 2293 break; 2294 2295 case BTF_KIND_FWD: 2296 h = btf_hash_common(t); 2297 for_each_dedup_cand(d, hash_entry, h) { 2298 cand_id = (__u32)(long)hash_entry->value; | 2284 if (btf_equal_enum(t, cand)) { 2285 new_id = cand_id; 2286 break; 2287 } 2288 if (d->opts.dont_resolve_fwds) 2289 continue; 2290 if (btf_compat_enum(t, cand)) { 2291 if (btf_is_enum_fwd(t)) { --- 6 unchanged lines hidden (view full) --- 2298 } 2299 } 2300 break; 2301 2302 case BTF_KIND_FWD: 2303 h = btf_hash_common(t); 2304 for_each_dedup_cand(d, hash_entry, h) { 2305 cand_id = (__u32)(long)hash_entry->value; |
2299 cand = d->btf->types[cand_id]; | 2306 cand = btf_type_by_id(d->btf, cand_id); |
2300 if (btf_equal_common(t, cand)) { 2301 new_id = cand_id; 2302 break; 2303 } 2304 } 2305 break; 2306 2307 default: --- 42 unchanged lines hidden (view full) --- 2350/* 2351 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original 2352 * type ID. 2353 */ 2354static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) 2355{ 2356 __u32 orig_type_id = type_id; 2357 | 2307 if (btf_equal_common(t, cand)) { 2308 new_id = cand_id; 2309 break; 2310 } 2311 } 2312 break; 2313 2314 default: --- 42 unchanged lines hidden (view full) --- 2357/* 2358 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original 2359 * type ID. 2360 */ 2361static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) 2362{ 2363 __u32 orig_type_id = type_id; 2364 |
2358 if (!btf_is_fwd(d->btf->types[type_id])) | 2365 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) |
2359 return type_id; 2360 2361 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2362 type_id = d->map[type_id]; 2363 | 2366 return type_id; 2367 2368 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2369 type_id = d->map[type_id]; 2370 |
2364 if (!btf_is_fwd(d->btf->types[type_id])) | 2371 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) |
2365 return type_id; 2366 2367 return orig_type_id; 2368} 2369 2370 2371static inline __u16 btf_fwd_kind(struct btf_type *t) 2372{ --- 111 unchanged lines hidden (view full) --- 2484 2485 hypot_type_id = d->hypot_map[canon_id]; 2486 if (hypot_type_id <= BTF_MAX_NR_TYPES) 2487 return hypot_type_id == cand_id; 2488 2489 if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) 2490 return -ENOMEM; 2491 | 2372 return type_id; 2373 2374 return orig_type_id; 2375} 2376 2377 2378static inline __u16 btf_fwd_kind(struct btf_type *t) 2379{ --- 111 unchanged lines hidden (view full) --- 2491 2492 hypot_type_id = d->hypot_map[canon_id]; 2493 if (hypot_type_id <= BTF_MAX_NR_TYPES) 2494 return hypot_type_id == cand_id; 2495 2496 if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) 2497 return -ENOMEM; 2498 |
2492 cand_type = d->btf->types[cand_id]; 2493 canon_type = d->btf->types[canon_id]; | 2499 cand_type = btf_type_by_id(d->btf, cand_id); 2500 canon_type = btf_type_by_id(d->btf, canon_id); |
2494 cand_kind = btf_kind(cand_type); 2495 canon_kind = btf_kind(canon_type); 2496 2497 if (cand_type->name_off != canon_type->name_off) 2498 return 0; 2499 2500 /* FWD <--> STRUCT/UNION equivalence check, if enabled */ 2501 if (!d->opts.dont_resolve_fwds --- 134 unchanged lines hidden (view full) --- 2636 __u32 t_id, c_id; 2637 int i; 2638 2639 for (i = 0; i < d->hypot_cnt; i++) { 2640 cand_type_id = d->hypot_list[i]; 2641 targ_type_id = d->hypot_map[cand_type_id]; 2642 t_id = resolve_type_id(d, targ_type_id); 2643 c_id = resolve_type_id(d, cand_type_id); | 2501 cand_kind = btf_kind(cand_type); 2502 canon_kind = btf_kind(canon_type); 2503 2504 if (cand_type->name_off != canon_type->name_off) 2505 return 0; 2506 2507 /* FWD <--> STRUCT/UNION equivalence check, if enabled */ 2508 if (!d->opts.dont_resolve_fwds --- 134 unchanged lines hidden (view full) --- 2643 __u32 t_id, c_id; 2644 int i; 2645 2646 for (i = 0; i < d->hypot_cnt; i++) { 2647 cand_type_id = d->hypot_list[i]; 2648 targ_type_id = d->hypot_map[cand_type_id]; 2649 t_id = resolve_type_id(d, targ_type_id); 2650 c_id = resolve_type_id(d, cand_type_id); |
2644 t_kind = btf_kind(d->btf->types[t_id]); 2645 c_kind = btf_kind(d->btf->types[c_id]); | 2651 t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); 2652 c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); |
2646 /* 2647 * Resolve FWD into STRUCT/UNION. 2648 * It's ok to resolve FWD into STRUCT/UNION that's not yet 2649 * mapped to canonical representative (as opposed to 2650 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because 2651 * eventually that struct is going to be mapped and all resolved 2652 * FWDs will automatically resolve to correct canonical 2653 * representative. This will happen before ref type deduping, --- 51 unchanged lines hidden (view full) --- 2705 __u32 new_id = type_id; 2706 __u16 kind; 2707 long h; 2708 2709 /* already deduped or is in process of deduping (loop detected) */ 2710 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2711 return 0; 2712 | 2653 /* 2654 * Resolve FWD into STRUCT/UNION. 2655 * It's ok to resolve FWD into STRUCT/UNION that's not yet 2656 * mapped to canonical representative (as opposed to 2657 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because 2658 * eventually that struct is going to be mapped and all resolved 2659 * FWDs will automatically resolve to correct canonical 2660 * representative. This will happen before ref type deduping, --- 51 unchanged lines hidden (view full) --- 2712 __u32 new_id = type_id; 2713 __u16 kind; 2714 long h; 2715 2716 /* already deduped or is in process of deduping (loop detected) */ 2717 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2718 return 0; 2719 |
2713 t = d->btf->types[type_id]; | 2720 t = btf_type_by_id(d->btf, type_id); |
2714 kind = btf_kind(t); 2715 2716 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) 2717 return 0; 2718 2719 h = btf_hash_struct(t); 2720 for_each_dedup_cand(d, hash_entry, h) { 2721 __u32 cand_id = (__u32)(long)hash_entry->value; --- 4 unchanged lines hidden (view full) --- 2726 * btf_shallow_equal_struct() internally when checking two 2727 * structs (unions) for equivalence, we need to guard here 2728 * from picking matching FWD type as a dedup candidate. 2729 * This can happen due to hash collision. In such case just 2730 * relying on btf_dedup_is_equiv() would lead to potentially 2731 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because 2732 * FWD and compatible STRUCT/UNION are considered equivalent. 2733 */ | 2721 kind = btf_kind(t); 2722 2723 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) 2724 return 0; 2725 2726 h = btf_hash_struct(t); 2727 for_each_dedup_cand(d, hash_entry, h) { 2728 __u32 cand_id = (__u32)(long)hash_entry->value; --- 4 unchanged lines hidden (view full) --- 2733 * btf_shallow_equal_struct() internally when checking two 2734 * structs (unions) for equivalence, we need to guard here 2735 * from picking matching FWD type as a dedup candidate. 2736 * This can happen due to hash collision. In such case just 2737 * relying on btf_dedup_is_equiv() would lead to potentially 2738 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because 2739 * FWD and compatible STRUCT/UNION are considered equivalent. 2740 */ |
2734 cand_type = d->btf->types[cand_id]; | 2741 cand_type = btf_type_by_id(d->btf, cand_id); |
2735 if (!btf_shallow_equal_struct(t, cand_type)) 2736 continue; 2737 2738 btf_dedup_clear_hypot_map(d); 2739 eq = btf_dedup_is_equiv(d, type_id, cand_id); 2740 if (eq < 0) 2741 return eq; 2742 if (!eq) --- 55 unchanged lines hidden (view full) --- 2798 int ref_type_id; 2799 long h; 2800 2801 if (d->map[type_id] == BTF_IN_PROGRESS_ID) 2802 return -ELOOP; 2803 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2804 return resolve_type_id(d, type_id); 2805 | 2742 if (!btf_shallow_equal_struct(t, cand_type)) 2743 continue; 2744 2745 btf_dedup_clear_hypot_map(d); 2746 eq = btf_dedup_is_equiv(d, type_id, cand_id); 2747 if (eq < 0) 2748 return eq; 2749 if (!eq) --- 55 unchanged lines hidden (view full) --- 2805 int ref_type_id; 2806 long h; 2807 2808 if (d->map[type_id] == BTF_IN_PROGRESS_ID) 2809 return -ELOOP; 2810 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2811 return resolve_type_id(d, type_id); 2812 |
2806 t = d->btf->types[type_id]; | 2813 t = btf_type_by_id(d->btf, type_id); |
2807 d->map[type_id] = BTF_IN_PROGRESS_ID; 2808 2809 switch (btf_kind(t)) { 2810 case BTF_KIND_CONST: 2811 case BTF_KIND_VOLATILE: 2812 case BTF_KIND_RESTRICT: 2813 case BTF_KIND_PTR: 2814 case BTF_KIND_TYPEDEF: 2815 case BTF_KIND_FUNC: 2816 ref_type_id = btf_dedup_ref_type(d, t->type); 2817 if (ref_type_id < 0) 2818 return ref_type_id; 2819 t->type = ref_type_id; 2820 2821 h = btf_hash_common(t); 2822 for_each_dedup_cand(d, hash_entry, h) { 2823 cand_id = (__u32)(long)hash_entry->value; | 2814 d->map[type_id] = BTF_IN_PROGRESS_ID; 2815 2816 switch (btf_kind(t)) { 2817 case BTF_KIND_CONST: 2818 case BTF_KIND_VOLATILE: 2819 case BTF_KIND_RESTRICT: 2820 case BTF_KIND_PTR: 2821 case BTF_KIND_TYPEDEF: 2822 case BTF_KIND_FUNC: 2823 ref_type_id = btf_dedup_ref_type(d, t->type); 2824 if (ref_type_id < 0) 2825 return ref_type_id; 2826 t->type = ref_type_id; 2827 2828 h = btf_hash_common(t); 2829 for_each_dedup_cand(d, hash_entry, h) { 2830 cand_id = (__u32)(long)hash_entry->value; |
2824 cand = d->btf->types[cand_id]; | 2831 cand = btf_type_by_id(d->btf, cand_id); |
2825 if (btf_equal_common(t, cand)) { 2826 new_id = cand_id; 2827 break; 2828 } 2829 } 2830 break; 2831 2832 case BTF_KIND_ARRAY: { --- 7 unchanged lines hidden (view full) --- 2840 ref_type_id = btf_dedup_ref_type(d, info->index_type); 2841 if (ref_type_id < 0) 2842 return ref_type_id; 2843 info->index_type = ref_type_id; 2844 2845 h = btf_hash_array(t); 2846 for_each_dedup_cand(d, hash_entry, h) { 2847 cand_id = (__u32)(long)hash_entry->value; | 2832 if (btf_equal_common(t, cand)) { 2833 new_id = cand_id; 2834 break; 2835 } 2836 } 2837 break; 2838 2839 case BTF_KIND_ARRAY: { --- 7 unchanged lines hidden (view full) --- 2847 ref_type_id = btf_dedup_ref_type(d, info->index_type); 2848 if (ref_type_id < 0) 2849 return ref_type_id; 2850 info->index_type = ref_type_id; 2851 2852 h = btf_hash_array(t); 2853 for_each_dedup_cand(d, hash_entry, h) { 2854 cand_id = (__u32)(long)hash_entry->value; |
2848 cand = d->btf->types[cand_id]; | 2855 cand = btf_type_by_id(d->btf, cand_id); |
2849 if (btf_equal_array(t, cand)) { 2850 new_id = cand_id; 2851 break; 2852 } 2853 } 2854 break; 2855 } 2856 --- 15 unchanged lines hidden (view full) --- 2872 return ref_type_id; 2873 param->type = ref_type_id; 2874 param++; 2875 } 2876 2877 h = btf_hash_fnproto(t); 2878 for_each_dedup_cand(d, hash_entry, h) { 2879 cand_id = (__u32)(long)hash_entry->value; | 2856 if (btf_equal_array(t, cand)) { 2857 new_id = cand_id; 2858 break; 2859 } 2860 } 2861 break; 2862 } 2863 --- 15 unchanged lines hidden (view full) --- 2879 return ref_type_id; 2880 param->type = ref_type_id; 2881 param++; 2882 } 2883 2884 h = btf_hash_fnproto(t); 2885 for_each_dedup_cand(d, hash_entry, h) { 2886 cand_id = (__u32)(long)hash_entry->value; |
2880 cand = d->btf->types[cand_id]; | 2887 cand = btf_type_by_id(d->btf, cand_id); |
2881 if (btf_equal_fnproto(t, cand)) { 2882 new_id = cand_id; 2883 break; 2884 } 2885 } 2886 break; 2887 } 2888 --- 31 unchanged lines hidden (view full) --- 2920 * canonical ones layed out sequentially in memory by copying them over 2921 * duplicates. During compaction btf_dedup->hypot_map array is reused to store 2922 * a map from original type ID to a new compacted type ID, which will be used 2923 * during next phase to "fix up" type IDs, referenced from struct/union and 2924 * reference types. 2925 */ 2926static int btf_dedup_compact_types(struct btf_dedup *d) 2927{ | 2888 if (btf_equal_fnproto(t, cand)) { 2889 new_id = cand_id; 2890 break; 2891 } 2892 } 2893 break; 2894 } 2895 --- 31 unchanged lines hidden (view full) --- 2927 * canonical ones layed out sequentially in memory by copying them over 2928 * duplicates. During compaction btf_dedup->hypot_map array is reused to store 2929 * a map from original type ID to a new compacted type ID, which will be used 2930 * during next phase to "fix up" type IDs, referenced from struct/union and 2931 * reference types. 2932 */ 2933static int btf_dedup_compact_types(struct btf_dedup *d) 2934{ |
2928 struct btf_type **new_types; | 2935 __u32 *new_offs; |
2929 __u32 next_type_id = 1; | 2936 __u32 next_type_id = 1; |
2930 char *types_start, *p; | 2937 void *p; |
2931 int i, len; 2932 2933 /* we are going to reuse hypot_map to store compaction remapping */ 2934 d->hypot_map[0] = 0; 2935 for (i = 1; i <= d->btf->nr_types; i++) 2936 d->hypot_map[i] = BTF_UNPROCESSED_ID; 2937 | 2938 int i, len; 2939 2940 /* we are going to reuse hypot_map to store compaction remapping */ 2941 d->hypot_map[0] = 0; 2942 for (i = 1; i <= d->btf->nr_types; i++) 2943 d->hypot_map[i] = BTF_UNPROCESSED_ID; 2944 |
2938 types_start = d->btf->nohdr_data + d->btf->hdr->type_off; 2939 p = types_start; | 2945 p = d->btf->types_data; |
2940 2941 for (i = 1; i <= d->btf->nr_types; i++) { 2942 if (d->map[i] != i) 2943 continue; 2944 | 2946 2947 for (i = 1; i <= d->btf->nr_types; i++) { 2948 if (d->map[i] != i) 2949 continue; 2950 |
2945 len = btf_type_size(d->btf->types[i]); | 2951 len = btf_type_size(btf__type_by_id(d->btf, i)); |
2946 if (len < 0) 2947 return len; 2948 | 2952 if (len < 0) 2953 return len; 2954 |
2949 memmove(p, d->btf->types[i], len); | 2955 memmove(p, btf__type_by_id(d->btf, i), len); |
2950 d->hypot_map[i] = next_type_id; | 2956 d->hypot_map[i] = next_type_id; |
2951 d->btf->types[next_type_id] = (struct btf_type *)p; | 2957 d->btf->type_offs[next_type_id] = p - d->btf->types_data; |
2952 p += len; 2953 next_type_id++; 2954 } 2955 2956 /* shrink struct btf's internal types index and update btf_header */ 2957 d->btf->nr_types = next_type_id - 1; | 2958 p += len; 2959 next_type_id++; 2960 } 2961 2962 /* shrink struct btf's internal types index and update btf_header */ 2963 d->btf->nr_types = next_type_id - 1; |
2958 d->btf->types_size = d->btf->nr_types; 2959 d->btf->hdr->type_len = p - types_start; 2960 new_types = realloc(d->btf->types, 2961 (1 + d->btf->nr_types) * sizeof(struct btf_type *)); 2962 if (!new_types) | 2964 d->btf->type_offs_cap = d->btf->nr_types + 1; 2965 d->btf->hdr->type_len = p - d->btf->types_data; 2966 new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, 2967 sizeof(*new_offs)); 2968 if (!new_offs) |
2963 return -ENOMEM; | 2969 return -ENOMEM; |
2964 d->btf->types = new_types; | 2970 d->btf->type_offs = new_offs; |
2965 2966 /* make sure string section follows type information without gaps */ | 2971 2972 /* make sure string section follows type information without gaps */ |
2967 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; | 2973 d->btf->hdr->str_off = p - d->btf->nohdr_data; |
2968 memmove(p, d->btf->strings, d->btf->hdr->str_len); 2969 d->btf->strings = p; 2970 p += d->btf->hdr->str_len; 2971 | 2974 memmove(p, d->btf->strings, d->btf->hdr->str_len); 2975 d->btf->strings = p; 2976 p += d->btf->hdr->str_len; 2977 |
2972 d->btf->data_size = p - (char *)d->btf->data; | 2978 d->btf->data_size = p - d->btf->data; |
2973 return 0; 2974} 2975 2976/* 2977 * Figure out final (deduplicated and compacted) type ID for provided original 2978 * `type_id` by first resolving it into corresponding canonical type ID and 2979 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, 2980 * which is populated during compaction phase. --- 16 unchanged lines hidden (view full) --- 2997 * differ from original ones. The map from original to a corresponding 2998 * deduped type ID is stored in btf_dedup->hypot_map and is populated during 2999 * compaction phase. During remapping phase we are rewriting all type IDs 3000 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to 3001 * their final deduped type IDs. 3002 */ 3003static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) 3004{ | 2979 return 0; 2980} 2981 2982/* 2983 * Figure out final (deduplicated and compacted) type ID for provided original 2984 * `type_id` by first resolving it into corresponding canonical type ID and 2985 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, 2986 * which is populated during compaction phase. --- 16 unchanged lines hidden (view full) --- 3003 * differ from original ones. The map from original to a corresponding 3004 * deduped type ID is stored in btf_dedup->hypot_map and is populated during 3005 * compaction phase. During remapping phase we are rewriting all type IDs 3006 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to 3007 * their final deduped type IDs. 3008 */ 3009static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) 3010{ |
3005 struct btf_type *t = d->btf->types[type_id]; | 3011 struct btf_type *t = btf_type_by_id(d->btf, type_id); |
3006 int i, r; 3007 3008 switch (btf_kind(t)) { 3009 case BTF_KIND_INT: 3010 case BTF_KIND_ENUM: 3011 break; 3012 3013 case BTF_KIND_FWD: --- 144 unchanged lines hidden --- | 3012 int i, r; 3013 3014 switch (btf_kind(t)) { 3015 case BTF_KIND_INT: 3016 case BTF_KIND_ENUM: 3017 break; 3018 3019 case BTF_KIND_FWD: --- 144 unchanged lines hidden --- |