xref: /openbmc/linux/tools/lib/bpf/btf.c (revision b03afaa8)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <endian.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <sys/utsname.h>
12 #include <sys/param.h>
13 #include <sys/stat.h>
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/btf.h>
17 #include <gelf.h>
18 #include "btf.h"
19 #include "bpf.h"
20 #include "libbpf.h"
21 #include "libbpf_internal.h"
22 #include "hashmap.h"
23 
24 /* make sure libbpf doesn't use kernel-only integer typedefs */
25 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
26 
27 #define BTF_MAX_NR_TYPES 0x7fffffffU
28 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 
30 static struct btf_type btf_void;
31 
32 struct btf {
33 	union {
34 		struct btf_header *hdr;
35 		void *data;
36 	};
37 	struct btf_type **types;
38 	const char *strings;
39 	void *nohdr_data;
40 	__u32 nr_types;
41 	__u32 types_size;
42 	__u32 data_size;
43 	int fd;
44 };
45 
46 static inline __u64 ptr_to_u64(const void *ptr)
47 {
48 	return (__u64) (unsigned long) ptr;
49 }
50 
51 static int btf_add_type(struct btf *btf, struct btf_type *t)
52 {
53 	if (btf->types_size - btf->nr_types < 2) {
54 		struct btf_type **new_types;
55 		__u32 expand_by, new_size;
56 
57 		if (btf->types_size == BTF_MAX_NR_TYPES)
58 			return -E2BIG;
59 
60 		expand_by = max(btf->types_size >> 2, 16U);
61 		new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
62 
63 		new_types = realloc(btf->types, sizeof(*new_types) * new_size);
64 		if (!new_types)
65 			return -ENOMEM;
66 
67 		if (btf->nr_types == 0)
68 			new_types[0] = &btf_void;
69 
70 		btf->types = new_types;
71 		btf->types_size = new_size;
72 	}
73 
74 	btf->types[++(btf->nr_types)] = t;
75 
76 	return 0;
77 }
78 
79 static int btf_parse_hdr(struct btf *btf)
80 {
81 	const struct btf_header *hdr = btf->hdr;
82 	__u32 meta_left;
83 
84 	if (btf->data_size < sizeof(struct btf_header)) {
85 		pr_debug("BTF header not found\n");
86 		return -EINVAL;
87 	}
88 
89 	if (hdr->magic != BTF_MAGIC) {
90 		pr_debug("Invalid BTF magic:%x\n", hdr->magic);
91 		return -EINVAL;
92 	}
93 
94 	if (hdr->version != BTF_VERSION) {
95 		pr_debug("Unsupported BTF version:%u\n", hdr->version);
96 		return -ENOTSUP;
97 	}
98 
99 	if (hdr->flags) {
100 		pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
101 		return -ENOTSUP;
102 	}
103 
104 	meta_left = btf->data_size - sizeof(*hdr);
105 	if (!meta_left) {
106 		pr_debug("BTF has no data\n");
107 		return -EINVAL;
108 	}
109 
110 	if (meta_left < hdr->type_off) {
111 		pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
112 		return -EINVAL;
113 	}
114 
115 	if (meta_left < hdr->str_off) {
116 		pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
117 		return -EINVAL;
118 	}
119 
120 	if (hdr->type_off >= hdr->str_off) {
121 		pr_debug("BTF type section offset >= string section offset. No type?\n");
122 		return -EINVAL;
123 	}
124 
125 	if (hdr->type_off & 0x02) {
126 		pr_debug("BTF type section is not aligned to 4 bytes\n");
127 		return -EINVAL;
128 	}
129 
130 	btf->nohdr_data = btf->hdr + 1;
131 
132 	return 0;
133 }
134 
135 static int btf_parse_str_sec(struct btf *btf)
136 {
137 	const struct btf_header *hdr = btf->hdr;
138 	const char *start = btf->nohdr_data + hdr->str_off;
139 	const char *end = start + btf->hdr->str_len;
140 
141 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
142 	    start[0] || end[-1]) {
143 		pr_debug("Invalid BTF string section\n");
144 		return -EINVAL;
145 	}
146 
147 	btf->strings = start;
148 
149 	return 0;
150 }
151 
152 static int btf_type_size(struct btf_type *t)
153 {
154 	int base_size = sizeof(struct btf_type);
155 	__u16 vlen = btf_vlen(t);
156 
157 	switch (btf_kind(t)) {
158 	case BTF_KIND_FWD:
159 	case BTF_KIND_CONST:
160 	case BTF_KIND_VOLATILE:
161 	case BTF_KIND_RESTRICT:
162 	case BTF_KIND_PTR:
163 	case BTF_KIND_TYPEDEF:
164 	case BTF_KIND_FUNC:
165 		return base_size;
166 	case BTF_KIND_INT:
167 		return base_size + sizeof(__u32);
168 	case BTF_KIND_ENUM:
169 		return base_size + vlen * sizeof(struct btf_enum);
170 	case BTF_KIND_ARRAY:
171 		return base_size + sizeof(struct btf_array);
172 	case BTF_KIND_STRUCT:
173 	case BTF_KIND_UNION:
174 		return base_size + vlen * sizeof(struct btf_member);
175 	case BTF_KIND_FUNC_PROTO:
176 		return base_size + vlen * sizeof(struct btf_param);
177 	case BTF_KIND_VAR:
178 		return base_size + sizeof(struct btf_var);
179 	case BTF_KIND_DATASEC:
180 		return base_size + vlen * sizeof(struct btf_var_secinfo);
181 	default:
182 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
183 		return -EINVAL;
184 	}
185 }
186 
187 static int btf_parse_type_sec(struct btf *btf)
188 {
189 	struct btf_header *hdr = btf->hdr;
190 	void *nohdr_data = btf->nohdr_data;
191 	void *next_type = nohdr_data + hdr->type_off;
192 	void *end_type = nohdr_data + hdr->str_off;
193 
194 	while (next_type < end_type) {
195 		struct btf_type *t = next_type;
196 		int type_size;
197 		int err;
198 
199 		type_size = btf_type_size(t);
200 		if (type_size < 0)
201 			return type_size;
202 		next_type += type_size;
203 		err = btf_add_type(btf, t);
204 		if (err)
205 			return err;
206 	}
207 
208 	return 0;
209 }
210 
211 __u32 btf__get_nr_types(const struct btf *btf)
212 {
213 	return btf->nr_types;
214 }
215 
216 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
217 {
218 	if (type_id > btf->nr_types)
219 		return NULL;
220 
221 	return btf->types[type_id];
222 }
223 
224 static bool btf_type_is_void(const struct btf_type *t)
225 {
226 	return t == &btf_void || btf_is_fwd(t);
227 }
228 
229 static bool btf_type_is_void_or_null(const struct btf_type *t)
230 {
231 	return !t || btf_type_is_void(t);
232 }
233 
234 #define MAX_RESOLVE_DEPTH 32
235 
236 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
237 {
238 	const struct btf_array *array;
239 	const struct btf_type *t;
240 	__u32 nelems = 1;
241 	__s64 size = -1;
242 	int i;
243 
244 	t = btf__type_by_id(btf, type_id);
245 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
246 	     i++) {
247 		switch (btf_kind(t)) {
248 		case BTF_KIND_INT:
249 		case BTF_KIND_STRUCT:
250 		case BTF_KIND_UNION:
251 		case BTF_KIND_ENUM:
252 		case BTF_KIND_DATASEC:
253 			size = t->size;
254 			goto done;
255 		case BTF_KIND_PTR:
256 			size = sizeof(void *);
257 			goto done;
258 		case BTF_KIND_TYPEDEF:
259 		case BTF_KIND_VOLATILE:
260 		case BTF_KIND_CONST:
261 		case BTF_KIND_RESTRICT:
262 		case BTF_KIND_VAR:
263 			type_id = t->type;
264 			break;
265 		case BTF_KIND_ARRAY:
266 			array = btf_array(t);
267 			if (nelems && array->nelems > UINT32_MAX / nelems)
268 				return -E2BIG;
269 			nelems *= array->nelems;
270 			type_id = array->type;
271 			break;
272 		default:
273 			return -EINVAL;
274 		}
275 
276 		t = btf__type_by_id(btf, type_id);
277 	}
278 
279 done:
280 	if (size < 0)
281 		return -EINVAL;
282 	if (nelems && size > UINT32_MAX / nelems)
283 		return -E2BIG;
284 
285 	return nelems * size;
286 }
287 
288 int btf__align_of(const struct btf *btf, __u32 id)
289 {
290 	const struct btf_type *t = btf__type_by_id(btf, id);
291 	__u16 kind = btf_kind(t);
292 
293 	switch (kind) {
294 	case BTF_KIND_INT:
295 	case BTF_KIND_ENUM:
296 		return min(sizeof(void *), (size_t)t->size);
297 	case BTF_KIND_PTR:
298 		return sizeof(void *);
299 	case BTF_KIND_TYPEDEF:
300 	case BTF_KIND_VOLATILE:
301 	case BTF_KIND_CONST:
302 	case BTF_KIND_RESTRICT:
303 		return btf__align_of(btf, t->type);
304 	case BTF_KIND_ARRAY:
305 		return btf__align_of(btf, btf_array(t)->type);
306 	case BTF_KIND_STRUCT:
307 	case BTF_KIND_UNION: {
308 		const struct btf_member *m = btf_members(t);
309 		__u16 vlen = btf_vlen(t);
310 		int i, max_align = 1, align;
311 
312 		for (i = 0; i < vlen; i++, m++) {
313 			align = btf__align_of(btf, m->type);
314 			if (align <= 0)
315 				return align;
316 			max_align = max(max_align, align);
317 		}
318 
319 		return max_align;
320 	}
321 	default:
322 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
323 		return 0;
324 	}
325 }
326 
327 int btf__resolve_type(const struct btf *btf, __u32 type_id)
328 {
329 	const struct btf_type *t;
330 	int depth = 0;
331 
332 	t = btf__type_by_id(btf, type_id);
333 	while (depth < MAX_RESOLVE_DEPTH &&
334 	       !btf_type_is_void_or_null(t) &&
335 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
336 		type_id = t->type;
337 		t = btf__type_by_id(btf, type_id);
338 		depth++;
339 	}
340 
341 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
342 		return -EINVAL;
343 
344 	return type_id;
345 }
346 
347 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
348 {
349 	__u32 i;
350 
351 	if (!strcmp(type_name, "void"))
352 		return 0;
353 
354 	for (i = 1; i <= btf->nr_types; i++) {
355 		const struct btf_type *t = btf->types[i];
356 		const char *name = btf__name_by_offset(btf, t->name_off);
357 
358 		if (name && !strcmp(type_name, name))
359 			return i;
360 	}
361 
362 	return -ENOENT;
363 }
364 
365 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
366 			     __u32 kind)
367 {
368 	__u32 i;
369 
370 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
371 		return 0;
372 
373 	for (i = 1; i <= btf->nr_types; i++) {
374 		const struct btf_type *t = btf->types[i];
375 		const char *name;
376 
377 		if (btf_kind(t) != kind)
378 			continue;
379 		name = btf__name_by_offset(btf, t->name_off);
380 		if (name && !strcmp(type_name, name))
381 			return i;
382 	}
383 
384 	return -ENOENT;
385 }
386 
387 void btf__free(struct btf *btf)
388 {
389 	if (!btf)
390 		return;
391 
392 	if (btf->fd >= 0)
393 		close(btf->fd);
394 
395 	free(btf->data);
396 	free(btf->types);
397 	free(btf);
398 }
399 
400 struct btf *btf__new(const void *data, __u32 size)
401 {
402 	struct btf *btf;
403 	int err;
404 
405 	btf = calloc(1, sizeof(struct btf));
406 	if (!btf)
407 		return ERR_PTR(-ENOMEM);
408 
409 	btf->fd = -1;
410 
411 	btf->data = malloc(size);
412 	if (!btf->data) {
413 		err = -ENOMEM;
414 		goto done;
415 	}
416 
417 	memcpy(btf->data, data, size);
418 	btf->data_size = size;
419 
420 	err = btf_parse_hdr(btf);
421 	if (err)
422 		goto done;
423 
424 	err = btf_parse_str_sec(btf);
425 	if (err)
426 		goto done;
427 
428 	err = btf_parse_type_sec(btf);
429 
430 done:
431 	if (err) {
432 		btf__free(btf);
433 		return ERR_PTR(err);
434 	}
435 
436 	return btf;
437 }
438 
439 static bool btf_check_endianness(const GElf_Ehdr *ehdr)
440 {
441 #if __BYTE_ORDER == __LITTLE_ENDIAN
442 	return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
443 #elif __BYTE_ORDER == __BIG_ENDIAN
444 	return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
445 #else
446 # error "Unrecognized __BYTE_ORDER__"
447 #endif
448 }
449 
450 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
451 {
452 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
453 	int err = 0, fd = -1, idx = 0;
454 	struct btf *btf = NULL;
455 	Elf_Scn *scn = NULL;
456 	Elf *elf = NULL;
457 	GElf_Ehdr ehdr;
458 
459 	if (elf_version(EV_CURRENT) == EV_NONE) {
460 		pr_warn("failed to init libelf for %s\n", path);
461 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
462 	}
463 
464 	fd = open(path, O_RDONLY);
465 	if (fd < 0) {
466 		err = -errno;
467 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
468 		return ERR_PTR(err);
469 	}
470 
471 	err = -LIBBPF_ERRNO__FORMAT;
472 
473 	elf = elf_begin(fd, ELF_C_READ, NULL);
474 	if (!elf) {
475 		pr_warn("failed to open %s as ELF file\n", path);
476 		goto done;
477 	}
478 	if (!gelf_getehdr(elf, &ehdr)) {
479 		pr_warn("failed to get EHDR from %s\n", path);
480 		goto done;
481 	}
482 	if (!btf_check_endianness(&ehdr)) {
483 		pr_warn("non-native ELF endianness is not supported\n");
484 		goto done;
485 	}
486 	if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
487 		pr_warn("failed to get e_shstrndx from %s\n", path);
488 		goto done;
489 	}
490 
491 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
492 		GElf_Shdr sh;
493 		char *name;
494 
495 		idx++;
496 		if (gelf_getshdr(scn, &sh) != &sh) {
497 			pr_warn("failed to get section(%d) header from %s\n",
498 				idx, path);
499 			goto done;
500 		}
501 		name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
502 		if (!name) {
503 			pr_warn("failed to get section(%d) name from %s\n",
504 				idx, path);
505 			goto done;
506 		}
507 		if (strcmp(name, BTF_ELF_SEC) == 0) {
508 			btf_data = elf_getdata(scn, 0);
509 			if (!btf_data) {
510 				pr_warn("failed to get section(%d, %s) data from %s\n",
511 					idx, name, path);
512 				goto done;
513 			}
514 			continue;
515 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
516 			btf_ext_data = elf_getdata(scn, 0);
517 			if (!btf_ext_data) {
518 				pr_warn("failed to get section(%d, %s) data from %s\n",
519 					idx, name, path);
520 				goto done;
521 			}
522 			continue;
523 		}
524 	}
525 
526 	err = 0;
527 
528 	if (!btf_data) {
529 		err = -ENOENT;
530 		goto done;
531 	}
532 	btf = btf__new(btf_data->d_buf, btf_data->d_size);
533 	if (IS_ERR(btf))
534 		goto done;
535 
536 	if (btf_ext && btf_ext_data) {
537 		*btf_ext = btf_ext__new(btf_ext_data->d_buf,
538 					btf_ext_data->d_size);
539 		if (IS_ERR(*btf_ext))
540 			goto done;
541 	} else if (btf_ext) {
542 		*btf_ext = NULL;
543 	}
544 done:
545 	if (elf)
546 		elf_end(elf);
547 	close(fd);
548 
549 	if (err)
550 		return ERR_PTR(err);
551 	/*
552 	 * btf is always parsed before btf_ext, so no need to clean up
553 	 * btf_ext, if btf loading failed
554 	 */
555 	if (IS_ERR(btf))
556 		return btf;
557 	if (btf_ext && IS_ERR(*btf_ext)) {
558 		btf__free(btf);
559 		err = PTR_ERR(*btf_ext);
560 		return ERR_PTR(err);
561 	}
562 	return btf;
563 }
564 
565 static int compare_vsi_off(const void *_a, const void *_b)
566 {
567 	const struct btf_var_secinfo *a = _a;
568 	const struct btf_var_secinfo *b = _b;
569 
570 	return a->offset - b->offset;
571 }
572 
573 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
574 			     struct btf_type *t)
575 {
576 	__u32 size = 0, off = 0, i, vars = btf_vlen(t);
577 	const char *name = btf__name_by_offset(btf, t->name_off);
578 	const struct btf_type *t_var;
579 	struct btf_var_secinfo *vsi;
580 	const struct btf_var *var;
581 	int ret;
582 
583 	if (!name) {
584 		pr_debug("No name found in string section for DATASEC kind.\n");
585 		return -ENOENT;
586 	}
587 
588 	/* .extern datasec size and var offsets were set correctly during
589 	 * extern collection step, so just skip straight to sorting variables
590 	 */
591 	if (t->size)
592 		goto sort_vars;
593 
594 	ret = bpf_object__section_size(obj, name, &size);
595 	if (ret || !size || (t->size && t->size != size)) {
596 		pr_debug("Invalid size for section %s: %u bytes\n", name, size);
597 		return -ENOENT;
598 	}
599 
600 	t->size = size;
601 
602 	for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
603 		t_var = btf__type_by_id(btf, vsi->type);
604 		var = btf_var(t_var);
605 
606 		if (!btf_is_var(t_var)) {
607 			pr_debug("Non-VAR type seen in section %s\n", name);
608 			return -EINVAL;
609 		}
610 
611 		if (var->linkage == BTF_VAR_STATIC)
612 			continue;
613 
614 		name = btf__name_by_offset(btf, t_var->name_off);
615 		if (!name) {
616 			pr_debug("No name found in string section for VAR kind\n");
617 			return -ENOENT;
618 		}
619 
620 		ret = bpf_object__variable_offset(obj, name, &off);
621 		if (ret) {
622 			pr_debug("No offset found in symbol table for VAR %s\n",
623 				 name);
624 			return -ENOENT;
625 		}
626 
627 		vsi->offset = off;
628 	}
629 
630 sort_vars:
631 	qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
632 	return 0;
633 }
634 
635 int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
636 {
637 	int err = 0;
638 	__u32 i;
639 
640 	for (i = 1; i <= btf->nr_types; i++) {
641 		struct btf_type *t = btf->types[i];
642 
643 		/* Loader needs to fix up some of the things compiler
644 		 * couldn't get its hands on while emitting BTF. This
645 		 * is section size and global variable offset. We use
646 		 * the info from the ELF itself for this purpose.
647 		 */
648 		if (btf_is_datasec(t)) {
649 			err = btf_fixup_datasec(obj, btf, t);
650 			if (err)
651 				break;
652 		}
653 	}
654 
655 	return err;
656 }
657 
658 int btf__load(struct btf *btf)
659 {
660 	__u32 log_buf_size = 0;
661 	char *log_buf = NULL;
662 	int err = 0;
663 
664 	if (btf->fd >= 0)
665 		return -EEXIST;
666 
667 retry_load:
668 	if (log_buf_size) {
669 		log_buf = malloc(log_buf_size);
670 		if (!log_buf)
671 			return -ENOMEM;
672 
673 		*log_buf = 0;
674 	}
675 
676 	btf->fd = bpf_load_btf(btf->data, btf->data_size,
677 			       log_buf, log_buf_size, false);
678 	if (btf->fd < 0) {
679 		if (!log_buf || errno == ENOSPC) {
680 			log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
681 					   log_buf_size << 1);
682 			free(log_buf);
683 			goto retry_load;
684 		}
685 
686 		err = -errno;
687 		pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
688 		if (*log_buf)
689 			pr_warn("%s\n", log_buf);
690 		goto done;
691 	}
692 
693 done:
694 	free(log_buf);
695 	return err;
696 }
697 
698 int btf__fd(const struct btf *btf)
699 {
700 	return btf->fd;
701 }
702 
703 void btf__set_fd(struct btf *btf, int fd)
704 {
705 	btf->fd = fd;
706 }
707 
708 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
709 {
710 	*size = btf->data_size;
711 	return btf->data;
712 }
713 
714 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
715 {
716 	if (offset < btf->hdr->str_len)
717 		return &btf->strings[offset];
718 	else
719 		return NULL;
720 }
721 
722 int btf__get_from_id(__u32 id, struct btf **btf)
723 {
724 	struct bpf_btf_info btf_info = { 0 };
725 	__u32 len = sizeof(btf_info);
726 	__u32 last_size;
727 	int btf_fd;
728 	void *ptr;
729 	int err;
730 
731 	err = 0;
732 	*btf = NULL;
733 	btf_fd = bpf_btf_get_fd_by_id(id);
734 	if (btf_fd < 0)
735 		return 0;
736 
737 	/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
738 	 * let's start with a sane default - 4KiB here - and resize it only if
739 	 * bpf_obj_get_info_by_fd() needs a bigger buffer.
740 	 */
741 	btf_info.btf_size = 4096;
742 	last_size = btf_info.btf_size;
743 	ptr = malloc(last_size);
744 	if (!ptr) {
745 		err = -ENOMEM;
746 		goto exit_free;
747 	}
748 
749 	memset(ptr, 0, last_size);
750 	btf_info.btf = ptr_to_u64(ptr);
751 	err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
752 
753 	if (!err && btf_info.btf_size > last_size) {
754 		void *temp_ptr;
755 
756 		last_size = btf_info.btf_size;
757 		temp_ptr = realloc(ptr, last_size);
758 		if (!temp_ptr) {
759 			err = -ENOMEM;
760 			goto exit_free;
761 		}
762 		ptr = temp_ptr;
763 		memset(ptr, 0, last_size);
764 		btf_info.btf = ptr_to_u64(ptr);
765 		err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
766 	}
767 
768 	if (err || btf_info.btf_size > last_size) {
769 		err = errno;
770 		goto exit_free;
771 	}
772 
773 	*btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
774 	if (IS_ERR(*btf)) {
775 		err = PTR_ERR(*btf);
776 		*btf = NULL;
777 	}
778 
779 exit_free:
780 	close(btf_fd);
781 	free(ptr);
782 
783 	return err;
784 }
785 
786 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
787 			 __u32 expected_key_size, __u32 expected_value_size,
788 			 __u32 *key_type_id, __u32 *value_type_id)
789 {
790 	const struct btf_type *container_type;
791 	const struct btf_member *key, *value;
792 	const size_t max_name = 256;
793 	char container_name[max_name];
794 	__s64 key_size, value_size;
795 	__s32 container_id;
796 
797 	if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
798 	    max_name) {
799 		pr_warn("map:%s length of '____btf_map_%s' is too long\n",
800 			map_name, map_name);
801 		return -EINVAL;
802 	}
803 
804 	container_id = btf__find_by_name(btf, container_name);
805 	if (container_id < 0) {
806 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
807 			 map_name, container_name);
808 		return container_id;
809 	}
810 
811 	container_type = btf__type_by_id(btf, container_id);
812 	if (!container_type) {
813 		pr_warn("map:%s cannot find BTF type for container_id:%u\n",
814 			map_name, container_id);
815 		return -EINVAL;
816 	}
817 
818 	if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
819 		pr_warn("map:%s container_name:%s is an invalid container struct\n",
820 			map_name, container_name);
821 		return -EINVAL;
822 	}
823 
824 	key = btf_members(container_type);
825 	value = key + 1;
826 
827 	key_size = btf__resolve_size(btf, key->type);
828 	if (key_size < 0) {
829 		pr_warn("map:%s invalid BTF key_type_size\n", map_name);
830 		return key_size;
831 	}
832 
833 	if (expected_key_size != key_size) {
834 		pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
835 			map_name, (__u32)key_size, expected_key_size);
836 		return -EINVAL;
837 	}
838 
839 	value_size = btf__resolve_size(btf, value->type);
840 	if (value_size < 0) {
841 		pr_warn("map:%s invalid BTF value_type_size\n", map_name);
842 		return value_size;
843 	}
844 
845 	if (expected_value_size != value_size) {
846 		pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
847 			map_name, (__u32)value_size, expected_value_size);
848 		return -EINVAL;
849 	}
850 
851 	*key_type_id = key->type;
852 	*value_type_id = value->type;
853 
854 	return 0;
855 }
856 
857 struct btf_ext_sec_setup_param {
858 	__u32 off;
859 	__u32 len;
860 	__u32 min_rec_size;
861 	struct btf_ext_info *ext_info;
862 	const char *desc;
863 };
864 
865 static int btf_ext_setup_info(struct btf_ext *btf_ext,
866 			      struct btf_ext_sec_setup_param *ext_sec)
867 {
868 	const struct btf_ext_info_sec *sinfo;
869 	struct btf_ext_info *ext_info;
870 	__u32 info_left, record_size;
871 	/* The start of the info sec (including the __u32 record_size). */
872 	void *info;
873 
874 	if (ext_sec->len == 0)
875 		return 0;
876 
877 	if (ext_sec->off & 0x03) {
878 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
879 		     ext_sec->desc);
880 		return -EINVAL;
881 	}
882 
883 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
884 	info_left = ext_sec->len;
885 
886 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
887 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
888 			 ext_sec->desc, ext_sec->off, ext_sec->len);
889 		return -EINVAL;
890 	}
891 
892 	/* At least a record size */
893 	if (info_left < sizeof(__u32)) {
894 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
895 		return -EINVAL;
896 	}
897 
898 	/* The record size needs to meet the minimum standard */
899 	record_size = *(__u32 *)info;
900 	if (record_size < ext_sec->min_rec_size ||
901 	    record_size & 0x03) {
902 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
903 			 ext_sec->desc, record_size);
904 		return -EINVAL;
905 	}
906 
907 	sinfo = info + sizeof(__u32);
908 	info_left -= sizeof(__u32);
909 
910 	/* If no records, return failure now so .BTF.ext won't be used. */
911 	if (!info_left) {
912 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
913 		return -EINVAL;
914 	}
915 
916 	while (info_left) {
917 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
918 		__u64 total_record_size;
919 		__u32 num_records;
920 
921 		if (info_left < sec_hdrlen) {
922 			pr_debug("%s section header is not found in .BTF.ext\n",
923 			     ext_sec->desc);
924 			return -EINVAL;
925 		}
926 
927 		num_records = sinfo->num_info;
928 		if (num_records == 0) {
929 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
930 			     ext_sec->desc);
931 			return -EINVAL;
932 		}
933 
934 		total_record_size = sec_hdrlen +
935 				    (__u64)num_records * record_size;
936 		if (info_left < total_record_size) {
937 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
938 			     ext_sec->desc);
939 			return -EINVAL;
940 		}
941 
942 		info_left -= total_record_size;
943 		sinfo = (void *)sinfo + total_record_size;
944 	}
945 
946 	ext_info = ext_sec->ext_info;
947 	ext_info->len = ext_sec->len - sizeof(__u32);
948 	ext_info->rec_size = record_size;
949 	ext_info->info = info + sizeof(__u32);
950 
951 	return 0;
952 }
953 
954 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
955 {
956 	struct btf_ext_sec_setup_param param = {
957 		.off = btf_ext->hdr->func_info_off,
958 		.len = btf_ext->hdr->func_info_len,
959 		.min_rec_size = sizeof(struct bpf_func_info_min),
960 		.ext_info = &btf_ext->func_info,
961 		.desc = "func_info"
962 	};
963 
964 	return btf_ext_setup_info(btf_ext, &param);
965 }
966 
967 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
968 {
969 	struct btf_ext_sec_setup_param param = {
970 		.off = btf_ext->hdr->line_info_off,
971 		.len = btf_ext->hdr->line_info_len,
972 		.min_rec_size = sizeof(struct bpf_line_info_min),
973 		.ext_info = &btf_ext->line_info,
974 		.desc = "line_info",
975 	};
976 
977 	return btf_ext_setup_info(btf_ext, &param);
978 }
979 
980 static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
981 {
982 	struct btf_ext_sec_setup_param param = {
983 		.off = btf_ext->hdr->field_reloc_off,
984 		.len = btf_ext->hdr->field_reloc_len,
985 		.min_rec_size = sizeof(struct bpf_field_reloc),
986 		.ext_info = &btf_ext->field_reloc_info,
987 		.desc = "field_reloc",
988 	};
989 
990 	return btf_ext_setup_info(btf_ext, &param);
991 }
992 
993 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
994 {
995 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
996 
997 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
998 	    data_size < hdr->hdr_len) {
999 		pr_debug("BTF.ext header not found");
1000 		return -EINVAL;
1001 	}
1002 
1003 	if (hdr->magic != BTF_MAGIC) {
1004 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (hdr->version != BTF_VERSION) {
1009 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
1010 		return -ENOTSUP;
1011 	}
1012 
1013 	if (hdr->flags) {
1014 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
1015 		return -ENOTSUP;
1016 	}
1017 
1018 	if (data_size == hdr->hdr_len) {
1019 		pr_debug("BTF.ext has no data\n");
1020 		return -EINVAL;
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 void btf_ext__free(struct btf_ext *btf_ext)
1027 {
1028 	if (!btf_ext)
1029 		return;
1030 	free(btf_ext->data);
1031 	free(btf_ext);
1032 }
1033 
1034 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
1035 {
1036 	struct btf_ext *btf_ext;
1037 	int err;
1038 
1039 	err = btf_ext_parse_hdr(data, size);
1040 	if (err)
1041 		return ERR_PTR(err);
1042 
1043 	btf_ext = calloc(1, sizeof(struct btf_ext));
1044 	if (!btf_ext)
1045 		return ERR_PTR(-ENOMEM);
1046 
1047 	btf_ext->data_size = size;
1048 	btf_ext->data = malloc(size);
1049 	if (!btf_ext->data) {
1050 		err = -ENOMEM;
1051 		goto done;
1052 	}
1053 	memcpy(btf_ext->data, data, size);
1054 
1055 	if (btf_ext->hdr->hdr_len <
1056 	    offsetofend(struct btf_ext_header, line_info_len))
1057 		goto done;
1058 	err = btf_ext_setup_func_info(btf_ext);
1059 	if (err)
1060 		goto done;
1061 
1062 	err = btf_ext_setup_line_info(btf_ext);
1063 	if (err)
1064 		goto done;
1065 
1066 	if (btf_ext->hdr->hdr_len <
1067 	    offsetofend(struct btf_ext_header, field_reloc_len))
1068 		goto done;
1069 	err = btf_ext_setup_field_reloc(btf_ext);
1070 	if (err)
1071 		goto done;
1072 
1073 done:
1074 	if (err) {
1075 		btf_ext__free(btf_ext);
1076 		return ERR_PTR(err);
1077 	}
1078 
1079 	return btf_ext;
1080 }
1081 
1082 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
1083 {
1084 	*size = btf_ext->data_size;
1085 	return btf_ext->data;
1086 }
1087 
1088 static int btf_ext_reloc_info(const struct btf *btf,
1089 			      const struct btf_ext_info *ext_info,
1090 			      const char *sec_name, __u32 insns_cnt,
1091 			      void **info, __u32 *cnt)
1092 {
1093 	__u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
1094 	__u32 i, record_size, existing_len, records_len;
1095 	struct btf_ext_info_sec *sinfo;
1096 	const char *info_sec_name;
1097 	__u64 remain_len;
1098 	void *data;
1099 
1100 	record_size = ext_info->rec_size;
1101 	sinfo = ext_info->info;
1102 	remain_len = ext_info->len;
1103 	while (remain_len > 0) {
1104 		records_len = sinfo->num_info * record_size;
1105 		info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
1106 		if (strcmp(info_sec_name, sec_name)) {
1107 			remain_len -= sec_hdrlen + records_len;
1108 			sinfo = (void *)sinfo + sec_hdrlen + records_len;
1109 			continue;
1110 		}
1111 
1112 		existing_len = (*cnt) * record_size;
1113 		data = realloc(*info, existing_len + records_len);
1114 		if (!data)
1115 			return -ENOMEM;
1116 
1117 		memcpy(data + existing_len, sinfo->data, records_len);
1118 		/* adjust insn_off only, the rest data will be passed
1119 		 * to the kernel.
1120 		 */
1121 		for (i = 0; i < sinfo->num_info; i++) {
1122 			__u32 *insn_off;
1123 
1124 			insn_off = data + existing_len + (i * record_size);
1125 			*insn_off = *insn_off / sizeof(struct bpf_insn) +
1126 				insns_cnt;
1127 		}
1128 		*info = data;
1129 		*cnt += sinfo->num_info;
1130 		return 0;
1131 	}
1132 
1133 	return -ENOENT;
1134 }
1135 
1136 int btf_ext__reloc_func_info(const struct btf *btf,
1137 			     const struct btf_ext *btf_ext,
1138 			     const char *sec_name, __u32 insns_cnt,
1139 			     void **func_info, __u32 *cnt)
1140 {
1141 	return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
1142 				  insns_cnt, func_info, cnt);
1143 }
1144 
1145 int btf_ext__reloc_line_info(const struct btf *btf,
1146 			     const struct btf_ext *btf_ext,
1147 			     const char *sec_name, __u32 insns_cnt,
1148 			     void **line_info, __u32 *cnt)
1149 {
1150 	return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
1151 				  insns_cnt, line_info, cnt);
1152 }
1153 
1154 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
1155 {
1156 	return btf_ext->func_info.rec_size;
1157 }
1158 
1159 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
1160 {
1161 	return btf_ext->line_info.rec_size;
1162 }
1163 
1164 struct btf_dedup;
1165 
1166 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1167 				       const struct btf_dedup_opts *opts);
1168 static void btf_dedup_free(struct btf_dedup *d);
1169 static int btf_dedup_strings(struct btf_dedup *d);
1170 static int btf_dedup_prim_types(struct btf_dedup *d);
1171 static int btf_dedup_struct_types(struct btf_dedup *d);
1172 static int btf_dedup_ref_types(struct btf_dedup *d);
1173 static int btf_dedup_compact_types(struct btf_dedup *d);
1174 static int btf_dedup_remap_types(struct btf_dedup *d);
1175 
1176 /*
1177  * Deduplicate BTF types and strings.
1178  *
1179  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1180  * section with all BTF type descriptors and string data. It overwrites that
1181  * memory in-place with deduplicated types and strings without any loss of
1182  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1183  * is provided, all the strings referenced from .BTF.ext section are honored
1184  * and updated to point to the right offsets after deduplication.
1185  *
1186  * If function returns with error, type/string data might be garbled and should
1187  * be discarded.
1188  *
1189  * More verbose and detailed description of both problem btf_dedup is solving,
1190  * as well as solution could be found at:
1191  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1192  *
1193  * Problem description and justification
1194  * =====================================
1195  *
1196  * BTF type information is typically emitted either as a result of conversion
1197  * from DWARF to BTF or directly by compiler. In both cases, each compilation
1198  * unit contains information about a subset of all the types that are used
1199  * in an application. These subsets are frequently overlapping and contain a lot
1200  * of duplicated information when later concatenated together into a single
1201  * binary. This algorithm ensures that each unique type is represented by single
1202  * BTF type descriptor, greatly reducing resulting size of BTF data.
1203  *
1204  * Compilation unit isolation and subsequent duplication of data is not the only
1205  * problem. The same type hierarchy (e.g., struct and all the type that struct
1206  * references) in different compilation units can be represented in BTF to
1207  * various degrees of completeness (or, rather, incompleteness) due to
1208  * struct/union forward declarations.
1209  *
1210  * Let's take a look at an example, that we'll use to better understand the
1211  * problem (and solution). Suppose we have two compilation units, each using
1212  * same `struct S`, but each of them having incomplete type information about
1213  * struct's fields:
1214  *
1215  * // CU #1:
1216  * struct S;
1217  * struct A {
1218  *	int a;
1219  *	struct A* self;
1220  *	struct S* parent;
1221  * };
1222  * struct B;
1223  * struct S {
1224  *	struct A* a_ptr;
1225  *	struct B* b_ptr;
1226  * };
1227  *
1228  * // CU #2:
1229  * struct S;
1230  * struct A;
1231  * struct B {
1232  *	int b;
1233  *	struct B* self;
1234  *	struct S* parent;
1235  * };
1236  * struct S {
1237  *	struct A* a_ptr;
1238  *	struct B* b_ptr;
1239  * };
1240  *
1241  * In case of CU #1, BTF data will know only that `struct B` exist (but no
1242  * more), but will know the complete type information about `struct A`. While
1243  * for CU #2, it will know full type information about `struct B`, but will
1244  * only know about forward declaration of `struct A` (in BTF terms, it will
1245  * have `BTF_KIND_FWD` type descriptor with name `B`).
1246  *
1247  * This compilation unit isolation means that it's possible that there is no
1248  * single CU with complete type information describing structs `S`, `A`, and
1249  * `B`. Also, we might get tons of duplicated and redundant type information.
1250  *
1251  * Additional complication we need to keep in mind comes from the fact that
1252  * types, in general, can form graphs containing cycles, not just DAGs.
1253  *
1254  * While algorithm does deduplication, it also merges and resolves type
1255  * information (unless disabled throught `struct btf_opts`), whenever possible.
1256  * E.g., in the example above with two compilation units having partial type
1257  * information for structs `A` and `B`, the output of algorithm will emit
1258  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1259  * (as well as type information for `int` and pointers), as if they were defined
1260  * in a single compilation unit as:
1261  *
1262  * struct A {
1263  *	int a;
1264  *	struct A* self;
1265  *	struct S* parent;
1266  * };
1267  * struct B {
1268  *	int b;
1269  *	struct B* self;
1270  *	struct S* parent;
1271  * };
1272  * struct S {
1273  *	struct A* a_ptr;
1274  *	struct B* b_ptr;
1275  * };
1276  *
1277  * Algorithm summary
1278  * =================
1279  *
1280  * Algorithm completes its work in 6 separate passes:
1281  *
1282  * 1. Strings deduplication.
1283  * 2. Primitive types deduplication (int, enum, fwd).
1284  * 3. Struct/union types deduplication.
1285  * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1286  *    protos, and const/volatile/restrict modifiers).
1287  * 5. Types compaction.
1288  * 6. Types remapping.
1289  *
1290  * Algorithm determines canonical type descriptor, which is a single
1291  * representative type for each truly unique type. This canonical type is the
1292  * one that will go into final deduplicated BTF type information. For
1293  * struct/unions, it is also the type that algorithm will merge additional type
1294  * information into (while resolving FWDs), as it discovers it from data in
1295  * other CUs. Each input BTF type eventually gets either mapped to itself, if
1296  * that type is canonical, or to some other type, if that type is equivalent
1297  * and was chosen as canonical representative. This mapping is stored in
1298  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1299  * FWD type got resolved to.
1300  *
1301  * To facilitate fast discovery of canonical types, we also maintain canonical
1302  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1303  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1304  * that match that signature. With sufficiently good choice of type signature
1305  * hashing function, we can limit number of canonical types for each unique type
1306  * signature to a very small number, allowing to find canonical type for any
1307  * duplicated type very quickly.
1308  *
1309  * Struct/union deduplication is the most critical part and algorithm for
1310  * deduplicating structs/unions is described in greater details in comments for
1311  * `btf_dedup_is_equiv` function.
1312  */
1313 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1314 	       const struct btf_dedup_opts *opts)
1315 {
1316 	struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1317 	int err;
1318 
1319 	if (IS_ERR(d)) {
1320 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1321 		return -EINVAL;
1322 	}
1323 
1324 	err = btf_dedup_strings(d);
1325 	if (err < 0) {
1326 		pr_debug("btf_dedup_strings failed:%d\n", err);
1327 		goto done;
1328 	}
1329 	err = btf_dedup_prim_types(d);
1330 	if (err < 0) {
1331 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
1332 		goto done;
1333 	}
1334 	err = btf_dedup_struct_types(d);
1335 	if (err < 0) {
1336 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
1337 		goto done;
1338 	}
1339 	err = btf_dedup_ref_types(d);
1340 	if (err < 0) {
1341 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
1342 		goto done;
1343 	}
1344 	err = btf_dedup_compact_types(d);
1345 	if (err < 0) {
1346 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
1347 		goto done;
1348 	}
1349 	err = btf_dedup_remap_types(d);
1350 	if (err < 0) {
1351 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
1352 		goto done;
1353 	}
1354 
1355 done:
1356 	btf_dedup_free(d);
1357 	return err;
1358 }
1359 
1360 #define BTF_UNPROCESSED_ID ((__u32)-1)
1361 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1362 
1363 struct btf_dedup {
1364 	/* .BTF section to be deduped in-place */
1365 	struct btf *btf;
1366 	/*
1367 	 * Optional .BTF.ext section. When provided, any strings referenced
1368 	 * from it will be taken into account when deduping strings
1369 	 */
1370 	struct btf_ext *btf_ext;
1371 	/*
1372 	 * This is a map from any type's signature hash to a list of possible
1373 	 * canonical representative type candidates. Hash collisions are
1374 	 * ignored, so even types of various kinds can share same list of
1375 	 * candidates, which is fine because we rely on subsequent
1376 	 * btf_xxx_equal() checks to authoritatively verify type equality.
1377 	 */
1378 	struct hashmap *dedup_table;
1379 	/* Canonical types map */
1380 	__u32 *map;
1381 	/* Hypothetical mapping, used during type graph equivalence checks */
1382 	__u32 *hypot_map;
1383 	__u32 *hypot_list;
1384 	size_t hypot_cnt;
1385 	size_t hypot_cap;
1386 	/* Various option modifying behavior of algorithm */
1387 	struct btf_dedup_opts opts;
1388 };
1389 
1390 struct btf_str_ptr {
1391 	const char *str;
1392 	__u32 new_off;
1393 	bool used;
1394 };
1395 
1396 struct btf_str_ptrs {
1397 	struct btf_str_ptr *ptrs;
1398 	const char *data;
1399 	__u32 cnt;
1400 	__u32 cap;
1401 };
1402 
1403 static long hash_combine(long h, long value)
1404 {
1405 	return h * 31 + value;
1406 }
1407 
1408 #define for_each_dedup_cand(d, node, hash) \
1409 	hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1410 
1411 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
1412 {
1413 	return hashmap__append(d->dedup_table,
1414 			       (void *)hash, (void *)(long)type_id);
1415 }
1416 
1417 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1418 				   __u32 from_id, __u32 to_id)
1419 {
1420 	if (d->hypot_cnt == d->hypot_cap) {
1421 		__u32 *new_list;
1422 
1423 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
1424 		new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1425 		if (!new_list)
1426 			return -ENOMEM;
1427 		d->hypot_list = new_list;
1428 	}
1429 	d->hypot_list[d->hypot_cnt++] = from_id;
1430 	d->hypot_map[from_id] = to_id;
1431 	return 0;
1432 }
1433 
1434 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1435 {
1436 	int i;
1437 
1438 	for (i = 0; i < d->hypot_cnt; i++)
1439 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1440 	d->hypot_cnt = 0;
1441 }
1442 
1443 static void btf_dedup_free(struct btf_dedup *d)
1444 {
1445 	hashmap__free(d->dedup_table);
1446 	d->dedup_table = NULL;
1447 
1448 	free(d->map);
1449 	d->map = NULL;
1450 
1451 	free(d->hypot_map);
1452 	d->hypot_map = NULL;
1453 
1454 	free(d->hypot_list);
1455 	d->hypot_list = NULL;
1456 
1457 	free(d);
1458 }
1459 
1460 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
1461 {
1462 	return (size_t)key;
1463 }
1464 
1465 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
1466 {
1467 	return 0;
1468 }
1469 
1470 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
1471 {
1472 	return k1 == k2;
1473 }
1474 
1475 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1476 				       const struct btf_dedup_opts *opts)
1477 {
1478 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1479 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
1480 	int i, err = 0;
1481 
1482 	if (!d)
1483 		return ERR_PTR(-ENOMEM);
1484 
1485 	d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1486 	/* dedup_table_size is now used only to force collisions in tests */
1487 	if (opts && opts->dedup_table_size == 1)
1488 		hash_fn = btf_dedup_collision_hash_fn;
1489 
1490 	d->btf = btf;
1491 	d->btf_ext = btf_ext;
1492 
1493 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
1494 	if (IS_ERR(d->dedup_table)) {
1495 		err = PTR_ERR(d->dedup_table);
1496 		d->dedup_table = NULL;
1497 		goto done;
1498 	}
1499 
1500 	d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1501 	if (!d->map) {
1502 		err = -ENOMEM;
1503 		goto done;
1504 	}
1505 	/* special BTF "void" type is made canonical immediately */
1506 	d->map[0] = 0;
1507 	for (i = 1; i <= btf->nr_types; i++) {
1508 		struct btf_type *t = d->btf->types[i];
1509 
1510 		/* VAR and DATASEC are never deduped and are self-canonical */
1511 		if (btf_is_var(t) || btf_is_datasec(t))
1512 			d->map[i] = i;
1513 		else
1514 			d->map[i] = BTF_UNPROCESSED_ID;
1515 	}
1516 
1517 	d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1518 	if (!d->hypot_map) {
1519 		err = -ENOMEM;
1520 		goto done;
1521 	}
1522 	for (i = 0; i <= btf->nr_types; i++)
1523 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
1524 
1525 done:
1526 	if (err) {
1527 		btf_dedup_free(d);
1528 		return ERR_PTR(err);
1529 	}
1530 
1531 	return d;
1532 }
1533 
1534 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1535 
1536 /*
1537  * Iterate over all possible places in .BTF and .BTF.ext that can reference
1538  * string and pass pointer to it to a provided callback `fn`.
1539  */
1540 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1541 {
1542 	void *line_data_cur, *line_data_end;
1543 	int i, j, r, rec_size;
1544 	struct btf_type *t;
1545 
1546 	for (i = 1; i <= d->btf->nr_types; i++) {
1547 		t = d->btf->types[i];
1548 		r = fn(&t->name_off, ctx);
1549 		if (r)
1550 			return r;
1551 
1552 		switch (btf_kind(t)) {
1553 		case BTF_KIND_STRUCT:
1554 		case BTF_KIND_UNION: {
1555 			struct btf_member *m = btf_members(t);
1556 			__u16 vlen = btf_vlen(t);
1557 
1558 			for (j = 0; j < vlen; j++) {
1559 				r = fn(&m->name_off, ctx);
1560 				if (r)
1561 					return r;
1562 				m++;
1563 			}
1564 			break;
1565 		}
1566 		case BTF_KIND_ENUM: {
1567 			struct btf_enum *m = btf_enum(t);
1568 			__u16 vlen = btf_vlen(t);
1569 
1570 			for (j = 0; j < vlen; j++) {
1571 				r = fn(&m->name_off, ctx);
1572 				if (r)
1573 					return r;
1574 				m++;
1575 			}
1576 			break;
1577 		}
1578 		case BTF_KIND_FUNC_PROTO: {
1579 			struct btf_param *m = btf_params(t);
1580 			__u16 vlen = btf_vlen(t);
1581 
1582 			for (j = 0; j < vlen; j++) {
1583 				r = fn(&m->name_off, ctx);
1584 				if (r)
1585 					return r;
1586 				m++;
1587 			}
1588 			break;
1589 		}
1590 		default:
1591 			break;
1592 		}
1593 	}
1594 
1595 	if (!d->btf_ext)
1596 		return 0;
1597 
1598 	line_data_cur = d->btf_ext->line_info.info;
1599 	line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1600 	rec_size = d->btf_ext->line_info.rec_size;
1601 
1602 	while (line_data_cur < line_data_end) {
1603 		struct btf_ext_info_sec *sec = line_data_cur;
1604 		struct bpf_line_info_min *line_info;
1605 		__u32 num_info = sec->num_info;
1606 
1607 		r = fn(&sec->sec_name_off, ctx);
1608 		if (r)
1609 			return r;
1610 
1611 		line_data_cur += sizeof(struct btf_ext_info_sec);
1612 		for (i = 0; i < num_info; i++) {
1613 			line_info = line_data_cur;
1614 			r = fn(&line_info->file_name_off, ctx);
1615 			if (r)
1616 				return r;
1617 			r = fn(&line_info->line_off, ctx);
1618 			if (r)
1619 				return r;
1620 			line_data_cur += rec_size;
1621 		}
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 static int str_sort_by_content(const void *a1, const void *a2)
1628 {
1629 	const struct btf_str_ptr *p1 = a1;
1630 	const struct btf_str_ptr *p2 = a2;
1631 
1632 	return strcmp(p1->str, p2->str);
1633 }
1634 
1635 static int str_sort_by_offset(const void *a1, const void *a2)
1636 {
1637 	const struct btf_str_ptr *p1 = a1;
1638 	const struct btf_str_ptr *p2 = a2;
1639 
1640 	if (p1->str != p2->str)
1641 		return p1->str < p2->str ? -1 : 1;
1642 	return 0;
1643 }
1644 
1645 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1646 {
1647 	const struct btf_str_ptr *p = pelem;
1648 
1649 	if (str_ptr != p->str)
1650 		return (const char *)str_ptr < p->str ? -1 : 1;
1651 	return 0;
1652 }
1653 
1654 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1655 {
1656 	struct btf_str_ptrs *strs;
1657 	struct btf_str_ptr *s;
1658 
1659 	if (*str_off_ptr == 0)
1660 		return 0;
1661 
1662 	strs = ctx;
1663 	s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1664 		    sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1665 	if (!s)
1666 		return -EINVAL;
1667 	s->used = true;
1668 	return 0;
1669 }
1670 
1671 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1672 {
1673 	struct btf_str_ptrs *strs;
1674 	struct btf_str_ptr *s;
1675 
1676 	if (*str_off_ptr == 0)
1677 		return 0;
1678 
1679 	strs = ctx;
1680 	s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1681 		    sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1682 	if (!s)
1683 		return -EINVAL;
1684 	*str_off_ptr = s->new_off;
1685 	return 0;
1686 }
1687 
1688 /*
1689  * Dedup string and filter out those that are not referenced from either .BTF
1690  * or .BTF.ext (if provided) sections.
1691  *
1692  * This is done by building index of all strings in BTF's string section,
1693  * then iterating over all entities that can reference strings (e.g., type
1694  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1695  * strings as used. After that all used strings are deduped and compacted into
1696  * sequential blob of memory and new offsets are calculated. Then all the string
1697  * references are iterated again and rewritten using new offsets.
1698  */
1699 static int btf_dedup_strings(struct btf_dedup *d)
1700 {
1701 	const struct btf_header *hdr = d->btf->hdr;
1702 	char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1703 	char *end = start + d->btf->hdr->str_len;
1704 	char *p = start, *tmp_strs = NULL;
1705 	struct btf_str_ptrs strs = {
1706 		.cnt = 0,
1707 		.cap = 0,
1708 		.ptrs = NULL,
1709 		.data = start,
1710 	};
1711 	int i, j, err = 0, grp_idx;
1712 	bool grp_used;
1713 
1714 	/* build index of all strings */
1715 	while (p < end) {
1716 		if (strs.cnt + 1 > strs.cap) {
1717 			struct btf_str_ptr *new_ptrs;
1718 
1719 			strs.cap += max(strs.cnt / 2, 16U);
1720 			new_ptrs = realloc(strs.ptrs,
1721 					   sizeof(strs.ptrs[0]) * strs.cap);
1722 			if (!new_ptrs) {
1723 				err = -ENOMEM;
1724 				goto done;
1725 			}
1726 			strs.ptrs = new_ptrs;
1727 		}
1728 
1729 		strs.ptrs[strs.cnt].str = p;
1730 		strs.ptrs[strs.cnt].used = false;
1731 
1732 		p += strlen(p) + 1;
1733 		strs.cnt++;
1734 	}
1735 
1736 	/* temporary storage for deduplicated strings */
1737 	tmp_strs = malloc(d->btf->hdr->str_len);
1738 	if (!tmp_strs) {
1739 		err = -ENOMEM;
1740 		goto done;
1741 	}
1742 
1743 	/* mark all used strings */
1744 	strs.ptrs[0].used = true;
1745 	err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1746 	if (err)
1747 		goto done;
1748 
1749 	/* sort strings by context, so that we can identify duplicates */
1750 	qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1751 
1752 	/*
1753 	 * iterate groups of equal strings and if any instance in a group was
1754 	 * referenced, emit single instance and remember new offset
1755 	 */
1756 	p = tmp_strs;
1757 	grp_idx = 0;
1758 	grp_used = strs.ptrs[0].used;
1759 	/* iterate past end to avoid code duplication after loop */
1760 	for (i = 1; i <= strs.cnt; i++) {
1761 		/*
1762 		 * when i == strs.cnt, we want to skip string comparison and go
1763 		 * straight to handling last group of strings (otherwise we'd
1764 		 * need to handle last group after the loop w/ duplicated code)
1765 		 */
1766 		if (i < strs.cnt &&
1767 		    !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1768 			grp_used = grp_used || strs.ptrs[i].used;
1769 			continue;
1770 		}
1771 
1772 		/*
1773 		 * this check would have been required after the loop to handle
1774 		 * last group of strings, but due to <= condition in a loop
1775 		 * we avoid that duplication
1776 		 */
1777 		if (grp_used) {
1778 			int new_off = p - tmp_strs;
1779 			__u32 len = strlen(strs.ptrs[grp_idx].str);
1780 
1781 			memmove(p, strs.ptrs[grp_idx].str, len + 1);
1782 			for (j = grp_idx; j < i; j++)
1783 				strs.ptrs[j].new_off = new_off;
1784 			p += len + 1;
1785 		}
1786 
1787 		if (i < strs.cnt) {
1788 			grp_idx = i;
1789 			grp_used = strs.ptrs[i].used;
1790 		}
1791 	}
1792 
1793 	/* replace original strings with deduped ones */
1794 	d->btf->hdr->str_len = p - tmp_strs;
1795 	memmove(start, tmp_strs, d->btf->hdr->str_len);
1796 	end = start + d->btf->hdr->str_len;
1797 
1798 	/* restore original order for further binary search lookups */
1799 	qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1800 
1801 	/* remap string offsets */
1802 	err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1803 	if (err)
1804 		goto done;
1805 
1806 	d->btf->hdr->str_len = end - start;
1807 
1808 done:
1809 	free(tmp_strs);
1810 	free(strs.ptrs);
1811 	return err;
1812 }
1813 
1814 static long btf_hash_common(struct btf_type *t)
1815 {
1816 	long h;
1817 
1818 	h = hash_combine(0, t->name_off);
1819 	h = hash_combine(h, t->info);
1820 	h = hash_combine(h, t->size);
1821 	return h;
1822 }
1823 
1824 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1825 {
1826 	return t1->name_off == t2->name_off &&
1827 	       t1->info == t2->info &&
1828 	       t1->size == t2->size;
1829 }
1830 
1831 /* Calculate type signature hash of INT. */
1832 static long btf_hash_int(struct btf_type *t)
1833 {
1834 	__u32 info = *(__u32 *)(t + 1);
1835 	long h;
1836 
1837 	h = btf_hash_common(t);
1838 	h = hash_combine(h, info);
1839 	return h;
1840 }
1841 
1842 /* Check structural equality of two INTs. */
1843 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1844 {
1845 	__u32 info1, info2;
1846 
1847 	if (!btf_equal_common(t1, t2))
1848 		return false;
1849 	info1 = *(__u32 *)(t1 + 1);
1850 	info2 = *(__u32 *)(t2 + 1);
1851 	return info1 == info2;
1852 }
1853 
1854 /* Calculate type signature hash of ENUM. */
1855 static long btf_hash_enum(struct btf_type *t)
1856 {
1857 	long h;
1858 
1859 	/* don't hash vlen and enum members to support enum fwd resolving */
1860 	h = hash_combine(0, t->name_off);
1861 	h = hash_combine(h, t->info & ~0xffff);
1862 	h = hash_combine(h, t->size);
1863 	return h;
1864 }
1865 
1866 /* Check structural equality of two ENUMs. */
1867 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1868 {
1869 	const struct btf_enum *m1, *m2;
1870 	__u16 vlen;
1871 	int i;
1872 
1873 	if (!btf_equal_common(t1, t2))
1874 		return false;
1875 
1876 	vlen = btf_vlen(t1);
1877 	m1 = btf_enum(t1);
1878 	m2 = btf_enum(t2);
1879 	for (i = 0; i < vlen; i++) {
1880 		if (m1->name_off != m2->name_off || m1->val != m2->val)
1881 			return false;
1882 		m1++;
1883 		m2++;
1884 	}
1885 	return true;
1886 }
1887 
1888 static inline bool btf_is_enum_fwd(struct btf_type *t)
1889 {
1890 	return btf_is_enum(t) && btf_vlen(t) == 0;
1891 }
1892 
1893 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1894 {
1895 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
1896 		return btf_equal_enum(t1, t2);
1897 	/* ignore vlen when comparing */
1898 	return t1->name_off == t2->name_off &&
1899 	       (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
1900 	       t1->size == t2->size;
1901 }
1902 
1903 /*
1904  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1905  * as referenced type IDs equivalence is established separately during type
1906  * graph equivalence check algorithm.
1907  */
1908 static long btf_hash_struct(struct btf_type *t)
1909 {
1910 	const struct btf_member *member = btf_members(t);
1911 	__u32 vlen = btf_vlen(t);
1912 	long h = btf_hash_common(t);
1913 	int i;
1914 
1915 	for (i = 0; i < vlen; i++) {
1916 		h = hash_combine(h, member->name_off);
1917 		h = hash_combine(h, member->offset);
1918 		/* no hashing of referenced type ID, it can be unresolved yet */
1919 		member++;
1920 	}
1921 	return h;
1922 }
1923 
1924 /*
1925  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1926  * IDs. This check is performed during type graph equivalence check and
1927  * referenced types equivalence is checked separately.
1928  */
1929 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
1930 {
1931 	const struct btf_member *m1, *m2;
1932 	__u16 vlen;
1933 	int i;
1934 
1935 	if (!btf_equal_common(t1, t2))
1936 		return false;
1937 
1938 	vlen = btf_vlen(t1);
1939 	m1 = btf_members(t1);
1940 	m2 = btf_members(t2);
1941 	for (i = 0; i < vlen; i++) {
1942 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
1943 			return false;
1944 		m1++;
1945 		m2++;
1946 	}
1947 	return true;
1948 }
1949 
1950 /*
1951  * Calculate type signature hash of ARRAY, including referenced type IDs,
1952  * under assumption that they were already resolved to canonical type IDs and
1953  * are not going to change.
1954  */
1955 static long btf_hash_array(struct btf_type *t)
1956 {
1957 	const struct btf_array *info = btf_array(t);
1958 	long h = btf_hash_common(t);
1959 
1960 	h = hash_combine(h, info->type);
1961 	h = hash_combine(h, info->index_type);
1962 	h = hash_combine(h, info->nelems);
1963 	return h;
1964 }
1965 
1966 /*
1967  * Check exact equality of two ARRAYs, taking into account referenced
1968  * type IDs, under assumption that they were already resolved to canonical
1969  * type IDs and are not going to change.
1970  * This function is called during reference types deduplication to compare
1971  * ARRAY to potential canonical representative.
1972  */
1973 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
1974 {
1975 	const struct btf_array *info1, *info2;
1976 
1977 	if (!btf_equal_common(t1, t2))
1978 		return false;
1979 
1980 	info1 = btf_array(t1);
1981 	info2 = btf_array(t2);
1982 	return info1->type == info2->type &&
1983 	       info1->index_type == info2->index_type &&
1984 	       info1->nelems == info2->nelems;
1985 }
1986 
1987 /*
1988  * Check structural compatibility of two ARRAYs, ignoring referenced type
1989  * IDs. This check is performed during type graph equivalence check and
1990  * referenced types equivalence is checked separately.
1991  */
1992 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1993 {
1994 	if (!btf_equal_common(t1, t2))
1995 		return false;
1996 
1997 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
1998 }
1999 
2000 /*
2001  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
2002  * under assumption that they were already resolved to canonical type IDs and
2003  * are not going to change.
2004  */
2005 static long btf_hash_fnproto(struct btf_type *t)
2006 {
2007 	const struct btf_param *member = btf_params(t);
2008 	__u16 vlen = btf_vlen(t);
2009 	long h = btf_hash_common(t);
2010 	int i;
2011 
2012 	for (i = 0; i < vlen; i++) {
2013 		h = hash_combine(h, member->name_off);
2014 		h = hash_combine(h, member->type);
2015 		member++;
2016 	}
2017 	return h;
2018 }
2019 
2020 /*
2021  * Check exact equality of two FUNC_PROTOs, taking into account referenced
2022  * type IDs, under assumption that they were already resolved to canonical
2023  * type IDs and are not going to change.
2024  * This function is called during reference types deduplication to compare
2025  * FUNC_PROTO to potential canonical representative.
2026  */
2027 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
2028 {
2029 	const struct btf_param *m1, *m2;
2030 	__u16 vlen;
2031 	int i;
2032 
2033 	if (!btf_equal_common(t1, t2))
2034 		return false;
2035 
2036 	vlen = btf_vlen(t1);
2037 	m1 = btf_params(t1);
2038 	m2 = btf_params(t2);
2039 	for (i = 0; i < vlen; i++) {
2040 		if (m1->name_off != m2->name_off || m1->type != m2->type)
2041 			return false;
2042 		m1++;
2043 		m2++;
2044 	}
2045 	return true;
2046 }
2047 
2048 /*
2049  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
2050  * IDs. This check is performed during type graph equivalence check and
2051  * referenced types equivalence is checked separately.
2052  */
2053 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
2054 {
2055 	const struct btf_param *m1, *m2;
2056 	__u16 vlen;
2057 	int i;
2058 
2059 	/* skip return type ID */
2060 	if (t1->name_off != t2->name_off || t1->info != t2->info)
2061 		return false;
2062 
2063 	vlen = btf_vlen(t1);
2064 	m1 = btf_params(t1);
2065 	m2 = btf_params(t2);
2066 	for (i = 0; i < vlen; i++) {
2067 		if (m1->name_off != m2->name_off)
2068 			return false;
2069 		m1++;
2070 		m2++;
2071 	}
2072 	return true;
2073 }
2074 
2075 /*
2076  * Deduplicate primitive types, that can't reference other types, by calculating
2077  * their type signature hash and comparing them with any possible canonical
2078  * candidate. If no canonical candidate matches, type itself is marked as
2079  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
2080  */
2081 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
2082 {
2083 	struct btf_type *t = d->btf->types[type_id];
2084 	struct hashmap_entry *hash_entry;
2085 	struct btf_type *cand;
2086 	/* if we don't find equivalent type, then we are canonical */
2087 	__u32 new_id = type_id;
2088 	__u32 cand_id;
2089 	long h;
2090 
2091 	switch (btf_kind(t)) {
2092 	case BTF_KIND_CONST:
2093 	case BTF_KIND_VOLATILE:
2094 	case BTF_KIND_RESTRICT:
2095 	case BTF_KIND_PTR:
2096 	case BTF_KIND_TYPEDEF:
2097 	case BTF_KIND_ARRAY:
2098 	case BTF_KIND_STRUCT:
2099 	case BTF_KIND_UNION:
2100 	case BTF_KIND_FUNC:
2101 	case BTF_KIND_FUNC_PROTO:
2102 	case BTF_KIND_VAR:
2103 	case BTF_KIND_DATASEC:
2104 		return 0;
2105 
2106 	case BTF_KIND_INT:
2107 		h = btf_hash_int(t);
2108 		for_each_dedup_cand(d, hash_entry, h) {
2109 			cand_id = (__u32)(long)hash_entry->value;
2110 			cand = d->btf->types[cand_id];
2111 			if (btf_equal_int(t, cand)) {
2112 				new_id = cand_id;
2113 				break;
2114 			}
2115 		}
2116 		break;
2117 
2118 	case BTF_KIND_ENUM:
2119 		h = btf_hash_enum(t);
2120 		for_each_dedup_cand(d, hash_entry, h) {
2121 			cand_id = (__u32)(long)hash_entry->value;
2122 			cand = d->btf->types[cand_id];
2123 			if (btf_equal_enum(t, cand)) {
2124 				new_id = cand_id;
2125 				break;
2126 			}
2127 			if (d->opts.dont_resolve_fwds)
2128 				continue;
2129 			if (btf_compat_enum(t, cand)) {
2130 				if (btf_is_enum_fwd(t)) {
2131 					/* resolve fwd to full enum */
2132 					new_id = cand_id;
2133 					break;
2134 				}
2135 				/* resolve canonical enum fwd to full enum */
2136 				d->map[cand_id] = type_id;
2137 			}
2138 		}
2139 		break;
2140 
2141 	case BTF_KIND_FWD:
2142 		h = btf_hash_common(t);
2143 		for_each_dedup_cand(d, hash_entry, h) {
2144 			cand_id = (__u32)(long)hash_entry->value;
2145 			cand = d->btf->types[cand_id];
2146 			if (btf_equal_common(t, cand)) {
2147 				new_id = cand_id;
2148 				break;
2149 			}
2150 		}
2151 		break;
2152 
2153 	default:
2154 		return -EINVAL;
2155 	}
2156 
2157 	d->map[type_id] = new_id;
2158 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2159 		return -ENOMEM;
2160 
2161 	return 0;
2162 }
2163 
2164 static int btf_dedup_prim_types(struct btf_dedup *d)
2165 {
2166 	int i, err;
2167 
2168 	for (i = 1; i <= d->btf->nr_types; i++) {
2169 		err = btf_dedup_prim_type(d, i);
2170 		if (err)
2171 			return err;
2172 	}
2173 	return 0;
2174 }
2175 
2176 /*
2177  * Check whether type is already mapped into canonical one (could be to itself).
2178  */
2179 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
2180 {
2181 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
2182 }
2183 
2184 /*
2185  * Resolve type ID into its canonical type ID, if any; otherwise return original
2186  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2187  * STRUCT/UNION link and resolve it into canonical type ID as well.
2188  */
2189 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
2190 {
2191 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2192 		type_id = d->map[type_id];
2193 	return type_id;
2194 }
2195 
2196 /*
2197  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2198  * type ID.
2199  */
2200 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
2201 {
2202 	__u32 orig_type_id = type_id;
2203 
2204 	if (!btf_is_fwd(d->btf->types[type_id]))
2205 		return type_id;
2206 
2207 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2208 		type_id = d->map[type_id];
2209 
2210 	if (!btf_is_fwd(d->btf->types[type_id]))
2211 		return type_id;
2212 
2213 	return orig_type_id;
2214 }
2215 
2216 
2217 static inline __u16 btf_fwd_kind(struct btf_type *t)
2218 {
2219 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
2220 }
2221 
2222 /*
2223  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2224  * call it "candidate graph" in this description for brevity) to a type graph
2225  * formed by (potential) canonical struct/union ("canonical graph" for brevity
2226  * here, though keep in mind that not all types in canonical graph are
2227  * necessarily canonical representatives themselves, some of them might be
2228  * duplicates or its uniqueness might not have been established yet).
2229  * Returns:
2230  *  - >0, if type graphs are equivalent;
2231  *  -  0, if not equivalent;
2232  *  - <0, on error.
2233  *
2234  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2235  * equivalence of BTF types at each step. If at any point BTF types in candidate
2236  * and canonical graphs are not compatible structurally, whole graphs are
2237  * incompatible. If types are structurally equivalent (i.e., all information
2238  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2239  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2240  * If a type references other types, then those referenced types are checked
2241  * for equivalence recursively.
2242  *
2243  * During DFS traversal, if we find that for current `canon_id` type we
2244  * already have some mapping in hypothetical map, we check for two possible
2245  * situations:
2246  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2247  *     happen when type graphs have cycles. In this case we assume those two
2248  *     types are equivalent.
2249  *   - `canon_id` is mapped to different type. This is contradiction in our
2250  *     hypothetical mapping, because same graph in canonical graph corresponds
2251  *     to two different types in candidate graph, which for equivalent type
2252  *     graphs shouldn't happen. This condition terminates equivalence check
2253  *     with negative result.
2254  *
2255  * If type graphs traversal exhausts types to check and find no contradiction,
2256  * then type graphs are equivalent.
2257  *
2258  * When checking types for equivalence, there is one special case: FWD types.
2259  * If FWD type resolution is allowed and one of the types (either from canonical
2260  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2261  * flag) and their names match, hypothetical mapping is updated to point from
2262  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2263  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2264  *
2265  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2266  * if there are two exactly named (or anonymous) structs/unions that are
2267  * compatible structurally, one of which has FWD field, while other is concrete
2268  * STRUCT/UNION, but according to C sources they are different structs/unions
2269  * that are referencing different types with the same name. This is extremely
2270  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2271  * this logic is causing problems.
2272  *
2273  * Doing FWD resolution means that both candidate and/or canonical graphs can
2274  * consists of portions of the graph that come from multiple compilation units.
2275  * This is due to the fact that types within single compilation unit are always
2276  * deduplicated and FWDs are already resolved, if referenced struct/union
2277  * definiton is available. So, if we had unresolved FWD and found corresponding
2278  * STRUCT/UNION, they will be from different compilation units. This
2279  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2280  * type graph will likely have at least two different BTF types that describe
2281  * same type (e.g., most probably there will be two different BTF types for the
2282  * same 'int' primitive type) and could even have "overlapping" parts of type
2283  * graph that describe same subset of types.
2284  *
2285  * This in turn means that our assumption that each type in canonical graph
2286  * must correspond to exactly one type in candidate graph might not hold
2287  * anymore and will make it harder to detect contradictions using hypothetical
2288  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2289  * resolution only in canonical graph. FWDs in candidate graphs are never
2290  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2291  * that can occur:
2292  *   - Both types in canonical and candidate graphs are FWDs. If they are
2293  *     structurally equivalent, then they can either be both resolved to the
2294  *     same STRUCT/UNION or not resolved at all. In both cases they are
2295  *     equivalent and there is no need to resolve FWD on candidate side.
2296  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2297  *     so nothing to resolve as well, algorithm will check equivalence anyway.
2298  *   - Type in canonical graph is FWD, while type in candidate is concrete
2299  *     STRUCT/UNION. In this case candidate graph comes from single compilation
2300  *     unit, so there is exactly one BTF type for each unique C type. After
2301  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
2302  *     in canonical graph mapping to single BTF type in candidate graph, but
2303  *     because hypothetical mapping maps from canonical to candidate types, it's
2304  *     alright, and we still maintain the property of having single `canon_id`
2305  *     mapping to single `cand_id` (there could be two different `canon_id`
2306  *     mapped to the same `cand_id`, but it's not contradictory).
2307  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2308  *     graph is FWD. In this case we are just going to check compatibility of
2309  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2310  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2311  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2312  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2313  *     canonical graph.
2314  */
2315 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2316 			      __u32 canon_id)
2317 {
2318 	struct btf_type *cand_type;
2319 	struct btf_type *canon_type;
2320 	__u32 hypot_type_id;
2321 	__u16 cand_kind;
2322 	__u16 canon_kind;
2323 	int i, eq;
2324 
2325 	/* if both resolve to the same canonical, they must be equivalent */
2326 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2327 		return 1;
2328 
2329 	canon_id = resolve_fwd_id(d, canon_id);
2330 
2331 	hypot_type_id = d->hypot_map[canon_id];
2332 	if (hypot_type_id <= BTF_MAX_NR_TYPES)
2333 		return hypot_type_id == cand_id;
2334 
2335 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2336 		return -ENOMEM;
2337 
2338 	cand_type = d->btf->types[cand_id];
2339 	canon_type = d->btf->types[canon_id];
2340 	cand_kind = btf_kind(cand_type);
2341 	canon_kind = btf_kind(canon_type);
2342 
2343 	if (cand_type->name_off != canon_type->name_off)
2344 		return 0;
2345 
2346 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
2347 	if (!d->opts.dont_resolve_fwds
2348 	    && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2349 	    && cand_kind != canon_kind) {
2350 		__u16 real_kind;
2351 		__u16 fwd_kind;
2352 
2353 		if (cand_kind == BTF_KIND_FWD) {
2354 			real_kind = canon_kind;
2355 			fwd_kind = btf_fwd_kind(cand_type);
2356 		} else {
2357 			real_kind = cand_kind;
2358 			fwd_kind = btf_fwd_kind(canon_type);
2359 		}
2360 		return fwd_kind == real_kind;
2361 	}
2362 
2363 	if (cand_kind != canon_kind)
2364 		return 0;
2365 
2366 	switch (cand_kind) {
2367 	case BTF_KIND_INT:
2368 		return btf_equal_int(cand_type, canon_type);
2369 
2370 	case BTF_KIND_ENUM:
2371 		if (d->opts.dont_resolve_fwds)
2372 			return btf_equal_enum(cand_type, canon_type);
2373 		else
2374 			return btf_compat_enum(cand_type, canon_type);
2375 
2376 	case BTF_KIND_FWD:
2377 		return btf_equal_common(cand_type, canon_type);
2378 
2379 	case BTF_KIND_CONST:
2380 	case BTF_KIND_VOLATILE:
2381 	case BTF_KIND_RESTRICT:
2382 	case BTF_KIND_PTR:
2383 	case BTF_KIND_TYPEDEF:
2384 	case BTF_KIND_FUNC:
2385 		if (cand_type->info != canon_type->info)
2386 			return 0;
2387 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2388 
2389 	case BTF_KIND_ARRAY: {
2390 		const struct btf_array *cand_arr, *canon_arr;
2391 
2392 		if (!btf_compat_array(cand_type, canon_type))
2393 			return 0;
2394 		cand_arr = btf_array(cand_type);
2395 		canon_arr = btf_array(canon_type);
2396 		eq = btf_dedup_is_equiv(d,
2397 			cand_arr->index_type, canon_arr->index_type);
2398 		if (eq <= 0)
2399 			return eq;
2400 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2401 	}
2402 
2403 	case BTF_KIND_STRUCT:
2404 	case BTF_KIND_UNION: {
2405 		const struct btf_member *cand_m, *canon_m;
2406 		__u16 vlen;
2407 
2408 		if (!btf_shallow_equal_struct(cand_type, canon_type))
2409 			return 0;
2410 		vlen = btf_vlen(cand_type);
2411 		cand_m = btf_members(cand_type);
2412 		canon_m = btf_members(canon_type);
2413 		for (i = 0; i < vlen; i++) {
2414 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2415 			if (eq <= 0)
2416 				return eq;
2417 			cand_m++;
2418 			canon_m++;
2419 		}
2420 
2421 		return 1;
2422 	}
2423 
2424 	case BTF_KIND_FUNC_PROTO: {
2425 		const struct btf_param *cand_p, *canon_p;
2426 		__u16 vlen;
2427 
2428 		if (!btf_compat_fnproto(cand_type, canon_type))
2429 			return 0;
2430 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2431 		if (eq <= 0)
2432 			return eq;
2433 		vlen = btf_vlen(cand_type);
2434 		cand_p = btf_params(cand_type);
2435 		canon_p = btf_params(canon_type);
2436 		for (i = 0; i < vlen; i++) {
2437 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2438 			if (eq <= 0)
2439 				return eq;
2440 			cand_p++;
2441 			canon_p++;
2442 		}
2443 		return 1;
2444 	}
2445 
2446 	default:
2447 		return -EINVAL;
2448 	}
2449 	return 0;
2450 }
2451 
2452 /*
2453  * Use hypothetical mapping, produced by successful type graph equivalence
2454  * check, to augment existing struct/union canonical mapping, where possible.
2455  *
2456  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2457  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2458  * it doesn't matter if FWD type was part of canonical graph or candidate one,
2459  * we are recording the mapping anyway. As opposed to carefulness required
2460  * for struct/union correspondence mapping (described below), for FWD resolution
2461  * it's not important, as by the time that FWD type (reference type) will be
2462  * deduplicated all structs/unions will be deduped already anyway.
2463  *
2464  * Recording STRUCT/UNION mapping is purely a performance optimization and is
2465  * not required for correctness. It needs to be done carefully to ensure that
2466  * struct/union from candidate's type graph is not mapped into corresponding
2467  * struct/union from canonical type graph that itself hasn't been resolved into
2468  * canonical representative. The only guarantee we have is that canonical
2469  * struct/union was determined as canonical and that won't change. But any
2470  * types referenced through that struct/union fields could have been not yet
2471  * resolved, so in case like that it's too early to establish any kind of
2472  * correspondence between structs/unions.
2473  *
2474  * No canonical correspondence is derived for primitive types (they are already
2475  * deduplicated completely already anyway) or reference types (they rely on
2476  * stability of struct/union canonical relationship for equivalence checks).
2477  */
2478 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2479 {
2480 	__u32 cand_type_id, targ_type_id;
2481 	__u16 t_kind, c_kind;
2482 	__u32 t_id, c_id;
2483 	int i;
2484 
2485 	for (i = 0; i < d->hypot_cnt; i++) {
2486 		cand_type_id = d->hypot_list[i];
2487 		targ_type_id = d->hypot_map[cand_type_id];
2488 		t_id = resolve_type_id(d, targ_type_id);
2489 		c_id = resolve_type_id(d, cand_type_id);
2490 		t_kind = btf_kind(d->btf->types[t_id]);
2491 		c_kind = btf_kind(d->btf->types[c_id]);
2492 		/*
2493 		 * Resolve FWD into STRUCT/UNION.
2494 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2495 		 * mapped to canonical representative (as opposed to
2496 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2497 		 * eventually that struct is going to be mapped and all resolved
2498 		 * FWDs will automatically resolve to correct canonical
2499 		 * representative. This will happen before ref type deduping,
2500 		 * which critically depends on stability of these mapping. This
2501 		 * stability is not a requirement for STRUCT/UNION equivalence
2502 		 * checks, though.
2503 		 */
2504 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2505 			d->map[c_id] = t_id;
2506 		else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2507 			d->map[t_id] = c_id;
2508 
2509 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2510 		    c_kind != BTF_KIND_FWD &&
2511 		    is_type_mapped(d, c_id) &&
2512 		    !is_type_mapped(d, t_id)) {
2513 			/*
2514 			 * as a perf optimization, we can map struct/union
2515 			 * that's part of type graph we just verified for
2516 			 * equivalence. We can do that for struct/union that has
2517 			 * canonical representative only, though.
2518 			 */
2519 			d->map[t_id] = c_id;
2520 		}
2521 	}
2522 }
2523 
2524 /*
2525  * Deduplicate struct/union types.
2526  *
2527  * For each struct/union type its type signature hash is calculated, taking
2528  * into account type's name, size, number, order and names of fields, but
2529  * ignoring type ID's referenced from fields, because they might not be deduped
2530  * completely until after reference types deduplication phase. This type hash
2531  * is used to iterate over all potential canonical types, sharing same hash.
2532  * For each canonical candidate we check whether type graphs that they form
2533  * (through referenced types in fields and so on) are equivalent using algorithm
2534  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2535  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2536  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2537  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2538  * potentially map other structs/unions to their canonical representatives,
2539  * if such relationship hasn't yet been established. This speeds up algorithm
2540  * by eliminating some of the duplicate work.
2541  *
2542  * If no matching canonical representative was found, struct/union is marked
2543  * as canonical for itself and is added into btf_dedup->dedup_table hash map
2544  * for further look ups.
2545  */
2546 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2547 {
2548 	struct btf_type *cand_type, *t;
2549 	struct hashmap_entry *hash_entry;
2550 	/* if we don't find equivalent type, then we are canonical */
2551 	__u32 new_id = type_id;
2552 	__u16 kind;
2553 	long h;
2554 
2555 	/* already deduped or is in process of deduping (loop detected) */
2556 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2557 		return 0;
2558 
2559 	t = d->btf->types[type_id];
2560 	kind = btf_kind(t);
2561 
2562 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2563 		return 0;
2564 
2565 	h = btf_hash_struct(t);
2566 	for_each_dedup_cand(d, hash_entry, h) {
2567 		__u32 cand_id = (__u32)(long)hash_entry->value;
2568 		int eq;
2569 
2570 		/*
2571 		 * Even though btf_dedup_is_equiv() checks for
2572 		 * btf_shallow_equal_struct() internally when checking two
2573 		 * structs (unions) for equivalence, we need to guard here
2574 		 * from picking matching FWD type as a dedup candidate.
2575 		 * This can happen due to hash collision. In such case just
2576 		 * relying on btf_dedup_is_equiv() would lead to potentially
2577 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2578 		 * FWD and compatible STRUCT/UNION are considered equivalent.
2579 		 */
2580 		cand_type = d->btf->types[cand_id];
2581 		if (!btf_shallow_equal_struct(t, cand_type))
2582 			continue;
2583 
2584 		btf_dedup_clear_hypot_map(d);
2585 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
2586 		if (eq < 0)
2587 			return eq;
2588 		if (!eq)
2589 			continue;
2590 		new_id = cand_id;
2591 		btf_dedup_merge_hypot_map(d);
2592 		break;
2593 	}
2594 
2595 	d->map[type_id] = new_id;
2596 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2597 		return -ENOMEM;
2598 
2599 	return 0;
2600 }
2601 
2602 static int btf_dedup_struct_types(struct btf_dedup *d)
2603 {
2604 	int i, err;
2605 
2606 	for (i = 1; i <= d->btf->nr_types; i++) {
2607 		err = btf_dedup_struct_type(d, i);
2608 		if (err)
2609 			return err;
2610 	}
2611 	return 0;
2612 }
2613 
2614 /*
2615  * Deduplicate reference type.
2616  *
2617  * Once all primitive and struct/union types got deduplicated, we can easily
2618  * deduplicate all other (reference) BTF types. This is done in two steps:
2619  *
2620  * 1. Resolve all referenced type IDs into their canonical type IDs. This
2621  * resolution can be done either immediately for primitive or struct/union types
2622  * (because they were deduped in previous two phases) or recursively for
2623  * reference types. Recursion will always terminate at either primitive or
2624  * struct/union type, at which point we can "unwind" chain of reference types
2625  * one by one. There is no danger of encountering cycles because in C type
2626  * system the only way to form type cycle is through struct/union, so any chain
2627  * of reference types, even those taking part in a type cycle, will inevitably
2628  * reach struct/union at some point.
2629  *
2630  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2631  * becomes "stable", in the sense that no further deduplication will cause
2632  * any changes to it. With that, it's now possible to calculate type's signature
2633  * hash (this time taking into account referenced type IDs) and loop over all
2634  * potential canonical representatives. If no match was found, current type
2635  * will become canonical representative of itself and will be added into
2636  * btf_dedup->dedup_table as another possible canonical representative.
2637  */
2638 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2639 {
2640 	struct hashmap_entry *hash_entry;
2641 	__u32 new_id = type_id, cand_id;
2642 	struct btf_type *t, *cand;
2643 	/* if we don't find equivalent type, then we are representative type */
2644 	int ref_type_id;
2645 	long h;
2646 
2647 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2648 		return -ELOOP;
2649 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2650 		return resolve_type_id(d, type_id);
2651 
2652 	t = d->btf->types[type_id];
2653 	d->map[type_id] = BTF_IN_PROGRESS_ID;
2654 
2655 	switch (btf_kind(t)) {
2656 	case BTF_KIND_CONST:
2657 	case BTF_KIND_VOLATILE:
2658 	case BTF_KIND_RESTRICT:
2659 	case BTF_KIND_PTR:
2660 	case BTF_KIND_TYPEDEF:
2661 	case BTF_KIND_FUNC:
2662 		ref_type_id = btf_dedup_ref_type(d, t->type);
2663 		if (ref_type_id < 0)
2664 			return ref_type_id;
2665 		t->type = ref_type_id;
2666 
2667 		h = btf_hash_common(t);
2668 		for_each_dedup_cand(d, hash_entry, h) {
2669 			cand_id = (__u32)(long)hash_entry->value;
2670 			cand = d->btf->types[cand_id];
2671 			if (btf_equal_common(t, cand)) {
2672 				new_id = cand_id;
2673 				break;
2674 			}
2675 		}
2676 		break;
2677 
2678 	case BTF_KIND_ARRAY: {
2679 		struct btf_array *info = btf_array(t);
2680 
2681 		ref_type_id = btf_dedup_ref_type(d, info->type);
2682 		if (ref_type_id < 0)
2683 			return ref_type_id;
2684 		info->type = ref_type_id;
2685 
2686 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
2687 		if (ref_type_id < 0)
2688 			return ref_type_id;
2689 		info->index_type = ref_type_id;
2690 
2691 		h = btf_hash_array(t);
2692 		for_each_dedup_cand(d, hash_entry, h) {
2693 			cand_id = (__u32)(long)hash_entry->value;
2694 			cand = d->btf->types[cand_id];
2695 			if (btf_equal_array(t, cand)) {
2696 				new_id = cand_id;
2697 				break;
2698 			}
2699 		}
2700 		break;
2701 	}
2702 
2703 	case BTF_KIND_FUNC_PROTO: {
2704 		struct btf_param *param;
2705 		__u16 vlen;
2706 		int i;
2707 
2708 		ref_type_id = btf_dedup_ref_type(d, t->type);
2709 		if (ref_type_id < 0)
2710 			return ref_type_id;
2711 		t->type = ref_type_id;
2712 
2713 		vlen = btf_vlen(t);
2714 		param = btf_params(t);
2715 		for (i = 0; i < vlen; i++) {
2716 			ref_type_id = btf_dedup_ref_type(d, param->type);
2717 			if (ref_type_id < 0)
2718 				return ref_type_id;
2719 			param->type = ref_type_id;
2720 			param++;
2721 		}
2722 
2723 		h = btf_hash_fnproto(t);
2724 		for_each_dedup_cand(d, hash_entry, h) {
2725 			cand_id = (__u32)(long)hash_entry->value;
2726 			cand = d->btf->types[cand_id];
2727 			if (btf_equal_fnproto(t, cand)) {
2728 				new_id = cand_id;
2729 				break;
2730 			}
2731 		}
2732 		break;
2733 	}
2734 
2735 	default:
2736 		return -EINVAL;
2737 	}
2738 
2739 	d->map[type_id] = new_id;
2740 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2741 		return -ENOMEM;
2742 
2743 	return new_id;
2744 }
2745 
2746 static int btf_dedup_ref_types(struct btf_dedup *d)
2747 {
2748 	int i, err;
2749 
2750 	for (i = 1; i <= d->btf->nr_types; i++) {
2751 		err = btf_dedup_ref_type(d, i);
2752 		if (err < 0)
2753 			return err;
2754 	}
2755 	/* we won't need d->dedup_table anymore */
2756 	hashmap__free(d->dedup_table);
2757 	d->dedup_table = NULL;
2758 	return 0;
2759 }
2760 
2761 /*
2762  * Compact types.
2763  *
2764  * After we established for each type its corresponding canonical representative
2765  * type, we now can eliminate types that are not canonical and leave only
2766  * canonical ones layed out sequentially in memory by copying them over
2767  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2768  * a map from original type ID to a new compacted type ID, which will be used
2769  * during next phase to "fix up" type IDs, referenced from struct/union and
2770  * reference types.
2771  */
2772 static int btf_dedup_compact_types(struct btf_dedup *d)
2773 {
2774 	struct btf_type **new_types;
2775 	__u32 next_type_id = 1;
2776 	char *types_start, *p;
2777 	int i, len;
2778 
2779 	/* we are going to reuse hypot_map to store compaction remapping */
2780 	d->hypot_map[0] = 0;
2781 	for (i = 1; i <= d->btf->nr_types; i++)
2782 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
2783 
2784 	types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2785 	p = types_start;
2786 
2787 	for (i = 1; i <= d->btf->nr_types; i++) {
2788 		if (d->map[i] != i)
2789 			continue;
2790 
2791 		len = btf_type_size(d->btf->types[i]);
2792 		if (len < 0)
2793 			return len;
2794 
2795 		memmove(p, d->btf->types[i], len);
2796 		d->hypot_map[i] = next_type_id;
2797 		d->btf->types[next_type_id] = (struct btf_type *)p;
2798 		p += len;
2799 		next_type_id++;
2800 	}
2801 
2802 	/* shrink struct btf's internal types index and update btf_header */
2803 	d->btf->nr_types = next_type_id - 1;
2804 	d->btf->types_size = d->btf->nr_types;
2805 	d->btf->hdr->type_len = p - types_start;
2806 	new_types = realloc(d->btf->types,
2807 			    (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2808 	if (!new_types)
2809 		return -ENOMEM;
2810 	d->btf->types = new_types;
2811 
2812 	/* make sure string section follows type information without gaps */
2813 	d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2814 	memmove(p, d->btf->strings, d->btf->hdr->str_len);
2815 	d->btf->strings = p;
2816 	p += d->btf->hdr->str_len;
2817 
2818 	d->btf->data_size = p - (char *)d->btf->data;
2819 	return 0;
2820 }
2821 
2822 /*
2823  * Figure out final (deduplicated and compacted) type ID for provided original
2824  * `type_id` by first resolving it into corresponding canonical type ID and
2825  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2826  * which is populated during compaction phase.
2827  */
2828 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2829 {
2830 	__u32 resolved_type_id, new_type_id;
2831 
2832 	resolved_type_id = resolve_type_id(d, type_id);
2833 	new_type_id = d->hypot_map[resolved_type_id];
2834 	if (new_type_id > BTF_MAX_NR_TYPES)
2835 		return -EINVAL;
2836 	return new_type_id;
2837 }
2838 
2839 /*
2840  * Remap referenced type IDs into deduped type IDs.
2841  *
2842  * After BTF types are deduplicated and compacted, their final type IDs may
2843  * differ from original ones. The map from original to a corresponding
2844  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2845  * compaction phase. During remapping phase we are rewriting all type IDs
2846  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2847  * their final deduped type IDs.
2848  */
2849 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
2850 {
2851 	struct btf_type *t = d->btf->types[type_id];
2852 	int i, r;
2853 
2854 	switch (btf_kind(t)) {
2855 	case BTF_KIND_INT:
2856 	case BTF_KIND_ENUM:
2857 		break;
2858 
2859 	case BTF_KIND_FWD:
2860 	case BTF_KIND_CONST:
2861 	case BTF_KIND_VOLATILE:
2862 	case BTF_KIND_RESTRICT:
2863 	case BTF_KIND_PTR:
2864 	case BTF_KIND_TYPEDEF:
2865 	case BTF_KIND_FUNC:
2866 	case BTF_KIND_VAR:
2867 		r = btf_dedup_remap_type_id(d, t->type);
2868 		if (r < 0)
2869 			return r;
2870 		t->type = r;
2871 		break;
2872 
2873 	case BTF_KIND_ARRAY: {
2874 		struct btf_array *arr_info = btf_array(t);
2875 
2876 		r = btf_dedup_remap_type_id(d, arr_info->type);
2877 		if (r < 0)
2878 			return r;
2879 		arr_info->type = r;
2880 		r = btf_dedup_remap_type_id(d, arr_info->index_type);
2881 		if (r < 0)
2882 			return r;
2883 		arr_info->index_type = r;
2884 		break;
2885 	}
2886 
2887 	case BTF_KIND_STRUCT:
2888 	case BTF_KIND_UNION: {
2889 		struct btf_member *member = btf_members(t);
2890 		__u16 vlen = btf_vlen(t);
2891 
2892 		for (i = 0; i < vlen; i++) {
2893 			r = btf_dedup_remap_type_id(d, member->type);
2894 			if (r < 0)
2895 				return r;
2896 			member->type = r;
2897 			member++;
2898 		}
2899 		break;
2900 	}
2901 
2902 	case BTF_KIND_FUNC_PROTO: {
2903 		struct btf_param *param = btf_params(t);
2904 		__u16 vlen = btf_vlen(t);
2905 
2906 		r = btf_dedup_remap_type_id(d, t->type);
2907 		if (r < 0)
2908 			return r;
2909 		t->type = r;
2910 
2911 		for (i = 0; i < vlen; i++) {
2912 			r = btf_dedup_remap_type_id(d, param->type);
2913 			if (r < 0)
2914 				return r;
2915 			param->type = r;
2916 			param++;
2917 		}
2918 		break;
2919 	}
2920 
2921 	case BTF_KIND_DATASEC: {
2922 		struct btf_var_secinfo *var = btf_var_secinfos(t);
2923 		__u16 vlen = btf_vlen(t);
2924 
2925 		for (i = 0; i < vlen; i++) {
2926 			r = btf_dedup_remap_type_id(d, var->type);
2927 			if (r < 0)
2928 				return r;
2929 			var->type = r;
2930 			var++;
2931 		}
2932 		break;
2933 	}
2934 
2935 	default:
2936 		return -EINVAL;
2937 	}
2938 
2939 	return 0;
2940 }
2941 
2942 static int btf_dedup_remap_types(struct btf_dedup *d)
2943 {
2944 	int i, r;
2945 
2946 	for (i = 1; i <= d->btf->nr_types; i++) {
2947 		r = btf_dedup_remap_type(d, i);
2948 		if (r < 0)
2949 			return r;
2950 	}
2951 	return 0;
2952 }
2953 
2954 static struct btf *btf_load_raw(const char *path)
2955 {
2956 	struct btf *btf;
2957 	size_t read_cnt;
2958 	struct stat st;
2959 	void *data;
2960 	FILE *f;
2961 
2962 	if (stat(path, &st))
2963 		return ERR_PTR(-errno);
2964 
2965 	data = malloc(st.st_size);
2966 	if (!data)
2967 		return ERR_PTR(-ENOMEM);
2968 
2969 	f = fopen(path, "rb");
2970 	if (!f) {
2971 		btf = ERR_PTR(-errno);
2972 		goto cleanup;
2973 	}
2974 
2975 	read_cnt = fread(data, 1, st.st_size, f);
2976 	fclose(f);
2977 	if (read_cnt < st.st_size) {
2978 		btf = ERR_PTR(-EBADF);
2979 		goto cleanup;
2980 	}
2981 
2982 	btf = btf__new(data, read_cnt);
2983 
2984 cleanup:
2985 	free(data);
2986 	return btf;
2987 }
2988 
2989 /*
2990  * Probe few well-known locations for vmlinux kernel image and try to load BTF
2991  * data out of it to use for target BTF.
2992  */
2993 struct btf *libbpf_find_kernel_btf(void)
2994 {
2995 	struct {
2996 		const char *path_fmt;
2997 		bool raw_btf;
2998 	} locations[] = {
2999 		/* try canonical vmlinux BTF through sysfs first */
3000 		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
3001 		/* fall back to trying to find vmlinux ELF on disk otherwise */
3002 		{ "/boot/vmlinux-%1$s" },
3003 		{ "/lib/modules/%1$s/vmlinux-%1$s" },
3004 		{ "/lib/modules/%1$s/build/vmlinux" },
3005 		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
3006 		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
3007 		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
3008 		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
3009 	};
3010 	char path[PATH_MAX + 1];
3011 	struct utsname buf;
3012 	struct btf *btf;
3013 	int i;
3014 
3015 	uname(&buf);
3016 
3017 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
3018 		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
3019 
3020 		if (access(path, R_OK))
3021 			continue;
3022 
3023 		if (locations[i].raw_btf)
3024 			btf = btf_load_raw(path);
3025 		else
3026 			btf = btf__parse_elf(path, NULL);
3027 
3028 		pr_debug("loading kernel BTF '%s': %ld\n",
3029 			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
3030 		if (IS_ERR(btf))
3031 			continue;
3032 
3033 		return btf;
3034 	}
3035 
3036 	pr_warn("failed to find valid kernel BTF\n");
3037 	return ERR_PTR(-ESRCH);
3038 }
3039