xref: /openbmc/linux/tools/bpf/bpftool/gen.c (revision dc695516)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Facebook */
3 
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <linux/err.h>
11 #include <stdbool.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <bpf/bpf.h>
16 #include <bpf/libbpf.h>
17 #include <bpf/libbpf_internal.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <sys/mman.h>
21 #include <bpf/btf.h>
22 
23 #include "json_writer.h"
24 #include "main.h"
25 
26 #define MAX_OBJ_NAME_LEN 64
27 
28 static void sanitize_identifier(char *name)
29 {
30 	int i;
31 
32 	for (i = 0; name[i]; i++)
33 		if (!isalnum(name[i]) && name[i] != '_')
34 			name[i] = '_';
35 }
36 
37 static bool str_has_prefix(const char *str, const char *prefix)
38 {
39 	return strncmp(str, prefix, strlen(prefix)) == 0;
40 }
41 
42 static bool str_has_suffix(const char *str, const char *suffix)
43 {
44 	size_t i, n1 = strlen(str), n2 = strlen(suffix);
45 
46 	if (n1 < n2)
47 		return false;
48 
49 	for (i = 0; i < n2; i++) {
50 		if (str[n1 - i - 1] != suffix[n2 - i - 1])
51 			return false;
52 	}
53 
54 	return true;
55 }
56 
57 static void get_obj_name(char *name, const char *file)
58 {
59 	/* Using basename() GNU version which doesn't modify arg. */
60 	strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
61 	name[MAX_OBJ_NAME_LEN - 1] = '\0';
62 	if (str_has_suffix(name, ".o"))
63 		name[strlen(name) - 2] = '\0';
64 	sanitize_identifier(name);
65 }
66 
67 static void get_header_guard(char *guard, const char *obj_name)
68 {
69 	int i;
70 
71 	sprintf(guard, "__%s_SKEL_H__", obj_name);
72 	for (i = 0; guard[i]; i++)
73 		guard[i] = toupper(guard[i]);
74 }
75 
76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
77 {
78 	static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
79 	const char *name = bpf_map__name(map);
80 	int i, n;
81 
82 	if (!bpf_map__is_internal(map)) {
83 		snprintf(buf, buf_sz, "%s", name);
84 		return true;
85 	}
86 
87 	for  (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
88 		const char *sfx = sfxs[i], *p;
89 
90 		p = strstr(name, sfx);
91 		if (p) {
92 			snprintf(buf, buf_sz, "%s", p + 1);
93 			sanitize_identifier(buf);
94 			return true;
95 		}
96 	}
97 
98 	return false;
99 }
100 
101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
102 {
103 	static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
104 	int i, n;
105 
106 	for  (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
107 		const char *pfx = pfxs[i];
108 
109 		if (str_has_prefix(sec_name, pfx)) {
110 			snprintf(buf, buf_sz, "%s", sec_name + 1);
111 			sanitize_identifier(buf);
112 			return true;
113 		}
114 	}
115 
116 	return false;
117 }
118 
119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
120 {
121 	vprintf(fmt, args);
122 }
123 
124 static int codegen_datasec_def(struct bpf_object *obj,
125 			       struct btf *btf,
126 			       struct btf_dump *d,
127 			       const struct btf_type *sec,
128 			       const char *obj_name)
129 {
130 	const char *sec_name = btf__name_by_offset(btf, sec->name_off);
131 	const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
132 	int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
133 	char var_ident[256], sec_ident[256];
134 	bool strip_mods = false;
135 
136 	if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
137 		return 0;
138 
139 	if (strcmp(sec_name, ".kconfig") != 0)
140 		strip_mods = true;
141 
142 	printf("	struct %s__%s {\n", obj_name, sec_ident);
143 	for (i = 0; i < vlen; i++, sec_var++) {
144 		const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
145 		const char *var_name = btf__name_by_offset(btf, var->name_off);
146 		DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
147 			.field_name = var_ident,
148 			.indent_level = 2,
149 			.strip_mods = strip_mods,
150 		);
151 		int need_off = sec_var->offset, align_off, align;
152 		__u32 var_type_id = var->type;
153 
154 		/* static variables are not exposed through BPF skeleton */
155 		if (btf_var(var)->linkage == BTF_VAR_STATIC)
156 			continue;
157 
158 		if (off > need_off) {
159 			p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
160 			      sec_name, i, need_off, off);
161 			return -EINVAL;
162 		}
163 
164 		align = btf__align_of(btf, var->type);
165 		if (align <= 0) {
166 			p_err("Failed to determine alignment of variable '%s': %d",
167 			      var_name, align);
168 			return -EINVAL;
169 		}
170 		/* Assume 32-bit architectures when generating data section
171 		 * struct memory layout. Given bpftool can't know which target
172 		 * host architecture it's emitting skeleton for, we need to be
173 		 * conservative and assume 32-bit one to ensure enough padding
174 		 * bytes are generated for pointer and long types. This will
175 		 * still work correctly for 64-bit architectures, because in
176 		 * the worst case we'll generate unnecessary padding field,
177 		 * which on 64-bit architectures is not strictly necessary and
178 		 * would be handled by natural 8-byte alignment. But it still
179 		 * will be a correct memory layout, based on recorded offsets
180 		 * in BTF.
181 		 */
182 		if (align > 4)
183 			align = 4;
184 
185 		align_off = (off + align - 1) / align * align;
186 		if (align_off != need_off) {
187 			printf("\t\tchar __pad%d[%d];\n",
188 			       pad_cnt, need_off - off);
189 			pad_cnt++;
190 		}
191 
192 		/* sanitize variable name, e.g., for static vars inside
193 		 * a function, it's name is '<function name>.<variable name>',
194 		 * which we'll turn into a '<function name>_<variable name>'
195 		 */
196 		var_ident[0] = '\0';
197 		strncat(var_ident, var_name, sizeof(var_ident) - 1);
198 		sanitize_identifier(var_ident);
199 
200 		printf("\t\t");
201 		err = btf_dump__emit_type_decl(d, var_type_id, &opts);
202 		if (err)
203 			return err;
204 		printf(";\n");
205 
206 		off = sec_var->offset + sec_var->size;
207 	}
208 	printf("	} *%s;\n", sec_ident);
209 	return 0;
210 }
211 
212 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
213 {
214 	struct btf *btf = bpf_object__btf(obj);
215 	int n = btf__type_cnt(btf);
216 	struct btf_dump *d;
217 	struct bpf_map *map;
218 	const struct btf_type *sec;
219 	char sec_ident[256], map_ident[256];
220 	int i, err = 0;
221 
222 	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
223 	err = libbpf_get_error(d);
224 	if (err)
225 		return err;
226 
227 	bpf_object__for_each_map(map, obj) {
228 		/* only generate definitions for memory-mapped internal maps */
229 		if (!bpf_map__is_internal(map))
230 			continue;
231 		if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
232 			continue;
233 
234 		if (!get_map_ident(map, map_ident, sizeof(map_ident)))
235 			continue;
236 
237 		sec = NULL;
238 		for (i = 1; i < n; i++) {
239 			const struct btf_type *t = btf__type_by_id(btf, i);
240 			const char *name;
241 
242 			if (!btf_is_datasec(t))
243 				continue;
244 
245 			name = btf__str_by_offset(btf, t->name_off);
246 			if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
247 				continue;
248 
249 			if (strcmp(sec_ident, map_ident) == 0) {
250 				sec = t;
251 				break;
252 			}
253 		}
254 
255 		/* In some cases (e.g., sections like .rodata.cst16 containing
256 		 * compiler allocated string constants only) there will be
257 		 * special internal maps with no corresponding DATASEC BTF
258 		 * type. In such case, generate empty structs for each such
259 		 * map. It will still be memory-mapped and its contents
260 		 * accessible from user-space through BPF skeleton.
261 		 */
262 		if (!sec) {
263 			printf("	struct %s__%s {\n", obj_name, map_ident);
264 			printf("	} *%s;\n", map_ident);
265 		} else {
266 			err = codegen_datasec_def(obj, btf, d, sec, obj_name);
267 			if (err)
268 				goto out;
269 		}
270 	}
271 
272 
273 out:
274 	btf_dump__free(d);
275 	return err;
276 }
277 
278 static void codegen(const char *template, ...)
279 {
280 	const char *src, *end;
281 	int skip_tabs = 0, n;
282 	char *s, *dst;
283 	va_list args;
284 	char c;
285 
286 	n = strlen(template);
287 	s = malloc(n + 1);
288 	if (!s)
289 		exit(-1);
290 	src = template;
291 	dst = s;
292 
293 	/* find out "baseline" indentation to skip */
294 	while ((c = *src++)) {
295 		if (c == '\t') {
296 			skip_tabs++;
297 		} else if (c == '\n') {
298 			break;
299 		} else {
300 			p_err("unrecognized character at pos %td in template '%s': '%c'",
301 			      src - template - 1, template, c);
302 			free(s);
303 			exit(-1);
304 		}
305 	}
306 
307 	while (*src) {
308 		/* skip baseline indentation tabs */
309 		for (n = skip_tabs; n > 0; n--, src++) {
310 			if (*src != '\t') {
311 				p_err("not enough tabs at pos %td in template '%s'",
312 				      src - template - 1, template);
313 				free(s);
314 				exit(-1);
315 			}
316 		}
317 		/* trim trailing whitespace */
318 		end = strchrnul(src, '\n');
319 		for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
320 			;
321 		memcpy(dst, src, n);
322 		dst += n;
323 		if (*end)
324 			*dst++ = '\n';
325 		src = *end ? end + 1 : end;
326 	}
327 	*dst++ = '\0';
328 
329 	/* print out using adjusted template */
330 	va_start(args, template);
331 	n = vprintf(s, args);
332 	va_end(args);
333 
334 	free(s);
335 }
336 
337 static void print_hex(const char *data, int data_sz)
338 {
339 	int i, len;
340 
341 	for (i = 0, len = 0; i < data_sz; i++) {
342 		int w = data[i] ? 4 : 2;
343 
344 		len += w;
345 		if (len > 78) {
346 			printf("\\\n");
347 			len = w;
348 		}
349 		if (!data[i])
350 			printf("\\0");
351 		else
352 			printf("\\x%02x", (unsigned char)data[i]);
353 	}
354 }
355 
356 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
357 {
358 	long page_sz = sysconf(_SC_PAGE_SIZE);
359 	size_t map_sz;
360 
361 	map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
362 	map_sz = roundup(map_sz, page_sz);
363 	return map_sz;
364 }
365 
366 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
367 {
368 	struct bpf_program *prog;
369 
370 	bpf_object__for_each_program(prog, obj) {
371 		const char *tp_name;
372 
373 		codegen("\
374 			\n\
375 			\n\
376 			static inline int					    \n\
377 			%1$s__%2$s__attach(struct %1$s *skel)			    \n\
378 			{							    \n\
379 				int prog_fd = skel->progs.%2$s.prog_fd;		    \n\
380 			", obj_name, bpf_program__name(prog));
381 
382 		switch (bpf_program__type(prog)) {
383 		case BPF_PROG_TYPE_RAW_TRACEPOINT:
384 			tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
385 			printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
386 			break;
387 		case BPF_PROG_TYPE_TRACING:
388 			if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
389 				printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
390 			else
391 				printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
392 			break;
393 		default:
394 			printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
395 			break;
396 		}
397 		codegen("\
398 			\n\
399 										    \n\
400 				if (fd > 0)					    \n\
401 					skel->links.%1$s_fd = fd;		    \n\
402 				return fd;					    \n\
403 			}							    \n\
404 			", bpf_program__name(prog));
405 	}
406 
407 	codegen("\
408 		\n\
409 									    \n\
410 		static inline int					    \n\
411 		%1$s__attach(struct %1$s *skel)				    \n\
412 		{							    \n\
413 			int ret = 0;					    \n\
414 									    \n\
415 		", obj_name);
416 
417 	bpf_object__for_each_program(prog, obj) {
418 		codegen("\
419 			\n\
420 				ret = ret < 0 ? ret : %1$s__%2$s__attach(skel);   \n\
421 			", obj_name, bpf_program__name(prog));
422 	}
423 
424 	codegen("\
425 		\n\
426 			return ret < 0 ? ret : 0;			    \n\
427 		}							    \n\
428 									    \n\
429 		static inline void					    \n\
430 		%1$s__detach(struct %1$s *skel)				    \n\
431 		{							    \n\
432 		", obj_name);
433 
434 	bpf_object__for_each_program(prog, obj) {
435 		codegen("\
436 			\n\
437 				skel_closenz(skel->links.%1$s_fd);	    \n\
438 			", bpf_program__name(prog));
439 	}
440 
441 	codegen("\
442 		\n\
443 		}							    \n\
444 		");
445 }
446 
447 static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
448 {
449 	struct bpf_program *prog;
450 	struct bpf_map *map;
451 	char ident[256];
452 
453 	codegen("\
454 		\n\
455 		static void						    \n\
456 		%1$s__destroy(struct %1$s *skel)			    \n\
457 		{							    \n\
458 			if (!skel)					    \n\
459 				return;					    \n\
460 			%1$s__detach(skel);				    \n\
461 		",
462 		obj_name);
463 
464 	bpf_object__for_each_program(prog, obj) {
465 		codegen("\
466 			\n\
467 				skel_closenz(skel->progs.%1$s.prog_fd);	    \n\
468 			", bpf_program__name(prog));
469 	}
470 
471 	bpf_object__for_each_map(map, obj) {
472 		if (!get_map_ident(map, ident, sizeof(ident)))
473 			continue;
474 		if (bpf_map__is_internal(map) &&
475 		    (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
476 			printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
477 			       ident, bpf_map_mmap_sz(map));
478 		codegen("\
479 			\n\
480 				skel_closenz(skel->maps.%1$s.map_fd);	    \n\
481 			", ident);
482 	}
483 	codegen("\
484 		\n\
485 			skel_free(skel);				    \n\
486 		}							    \n\
487 		",
488 		obj_name);
489 }
490 
491 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
492 {
493 	DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
494 	struct bpf_map *map;
495 	char ident[256];
496 	int err = 0;
497 
498 	err = bpf_object__gen_loader(obj, &opts);
499 	if (err)
500 		return err;
501 
502 	err = bpf_object__load(obj);
503 	if (err) {
504 		p_err("failed to load object file");
505 		goto out;
506 	}
507 	/* If there was no error during load then gen_loader_opts
508 	 * are populated with the loader program.
509 	 */
510 
511 	/* finish generating 'struct skel' */
512 	codegen("\
513 		\n\
514 		};							    \n\
515 		", obj_name);
516 
517 
518 	codegen_attach_detach(obj, obj_name);
519 
520 	codegen_destroy(obj, obj_name);
521 
522 	codegen("\
523 		\n\
524 		static inline struct %1$s *				    \n\
525 		%1$s__open(void)					    \n\
526 		{							    \n\
527 			struct %1$s *skel;				    \n\
528 									    \n\
529 			skel = skel_alloc(sizeof(*skel));		    \n\
530 			if (!skel)					    \n\
531 				goto cleanup;				    \n\
532 			skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
533 		",
534 		obj_name, opts.data_sz);
535 	bpf_object__for_each_map(map, obj) {
536 		const void *mmap_data = NULL;
537 		size_t mmap_size = 0;
538 
539 		if (!get_map_ident(map, ident, sizeof(ident)))
540 			continue;
541 
542 		if (!bpf_map__is_internal(map) ||
543 		    !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
544 			continue;
545 
546 		codegen("\
547 		\n\
548 			skel->%1$s = skel_prep_map_data((void *)\"\\	    \n\
549 		", ident);
550 		mmap_data = bpf_map__initial_value(map, &mmap_size);
551 		print_hex(mmap_data, mmap_size);
552 		codegen("\
553 		\n\
554 		\", %1$zd, %2$zd);					    \n\
555 			if (!skel->%3$s)				    \n\
556 				goto cleanup;				    \n\
557 			skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
558 		", bpf_map_mmap_sz(map), mmap_size, ident);
559 	}
560 	codegen("\
561 		\n\
562 			return skel;					    \n\
563 		cleanup:						    \n\
564 			%1$s__destroy(skel);				    \n\
565 			return NULL;					    \n\
566 		}							    \n\
567 									    \n\
568 		static inline int					    \n\
569 		%1$s__load(struct %1$s *skel)				    \n\
570 		{							    \n\
571 			struct bpf_load_and_run_opts opts = {};		    \n\
572 			int err;					    \n\
573 									    \n\
574 			opts.ctx = (struct bpf_loader_ctx *)skel;	    \n\
575 			opts.data_sz = %2$d;				    \n\
576 			opts.data = (void *)\"\\			    \n\
577 		",
578 		obj_name, opts.data_sz);
579 	print_hex(opts.data, opts.data_sz);
580 	codegen("\
581 		\n\
582 		\";							    \n\
583 		");
584 
585 	codegen("\
586 		\n\
587 			opts.insns_sz = %d;				    \n\
588 			opts.insns = (void *)\"\\			    \n\
589 		",
590 		opts.insns_sz);
591 	print_hex(opts.insns, opts.insns_sz);
592 	codegen("\
593 		\n\
594 		\";							    \n\
595 			err = bpf_load_and_run(&opts);			    \n\
596 			if (err < 0)					    \n\
597 				return err;				    \n\
598 		", obj_name);
599 	bpf_object__for_each_map(map, obj) {
600 		const char *mmap_flags;
601 
602 		if (!get_map_ident(map, ident, sizeof(ident)))
603 			continue;
604 
605 		if (!bpf_map__is_internal(map) ||
606 		    !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
607 			continue;
608 
609 		if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
610 			mmap_flags = "PROT_READ";
611 		else
612 			mmap_flags = "PROT_READ | PROT_WRITE";
613 
614 		codegen("\
615 		\n\
616 			skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value,  \n\
617 							%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
618 			if (!skel->%1$s)				    \n\
619 				return -ENOMEM;				    \n\
620 			",
621 		       ident, bpf_map_mmap_sz(map), mmap_flags);
622 	}
623 	codegen("\
624 		\n\
625 			return 0;					    \n\
626 		}							    \n\
627 									    \n\
628 		static inline struct %1$s *				    \n\
629 		%1$s__open_and_load(void)				    \n\
630 		{							    \n\
631 			struct %1$s *skel;				    \n\
632 									    \n\
633 			skel = %1$s__open();				    \n\
634 			if (!skel)					    \n\
635 				return NULL;				    \n\
636 			if (%1$s__load(skel)) {				    \n\
637 				%1$s__destroy(skel);			    \n\
638 				return NULL;				    \n\
639 			}						    \n\
640 			return skel;					    \n\
641 		}							    \n\
642 		", obj_name);
643 
644 	codegen("\
645 		\n\
646 									    \n\
647 		#endif /* %s */						    \n\
648 		",
649 		header_guard);
650 	err = 0;
651 out:
652 	return err;
653 }
654 
655 static int do_skeleton(int argc, char **argv)
656 {
657 	char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
658 	size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
659 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
660 	char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
661 	struct bpf_object *obj = NULL;
662 	const char *file;
663 	char ident[256];
664 	struct bpf_program *prog;
665 	int fd, err = -1;
666 	struct bpf_map *map;
667 	struct btf *btf;
668 	struct stat st;
669 
670 	if (!REQ_ARGS(1)) {
671 		usage();
672 		return -1;
673 	}
674 	file = GET_ARG();
675 
676 	while (argc) {
677 		if (!REQ_ARGS(2))
678 			return -1;
679 
680 		if (is_prefix(*argv, "name")) {
681 			NEXT_ARG();
682 
683 			if (obj_name[0] != '\0') {
684 				p_err("object name already specified");
685 				return -1;
686 			}
687 
688 			strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
689 			obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
690 		} else {
691 			p_err("unknown arg %s", *argv);
692 			return -1;
693 		}
694 
695 		NEXT_ARG();
696 	}
697 
698 	if (argc) {
699 		p_err("extra unknown arguments");
700 		return -1;
701 	}
702 
703 	if (stat(file, &st)) {
704 		p_err("failed to stat() %s: %s", file, strerror(errno));
705 		return -1;
706 	}
707 	file_sz = st.st_size;
708 	mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
709 	fd = open(file, O_RDONLY);
710 	if (fd < 0) {
711 		p_err("failed to open() %s: %s", file, strerror(errno));
712 		return -1;
713 	}
714 	obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
715 	if (obj_data == MAP_FAILED) {
716 		obj_data = NULL;
717 		p_err("failed to mmap() %s: %s", file, strerror(errno));
718 		goto out;
719 	}
720 	if (obj_name[0] == '\0')
721 		get_obj_name(obj_name, file);
722 	opts.object_name = obj_name;
723 	if (verifier_logs)
724 		/* log_level1 + log_level2 + stats, but not stable UAPI */
725 		opts.kernel_log_level = 1 + 2 + 4;
726 	obj = bpf_object__open_mem(obj_data, file_sz, &opts);
727 	err = libbpf_get_error(obj);
728 	if (err) {
729 		char err_buf[256];
730 
731 		libbpf_strerror(err, err_buf, sizeof(err_buf));
732 		p_err("failed to open BPF object file: %s", err_buf);
733 		obj = NULL;
734 		goto out;
735 	}
736 
737 	bpf_object__for_each_map(map, obj) {
738 		if (!get_map_ident(map, ident, sizeof(ident))) {
739 			p_err("ignoring unrecognized internal map '%s'...",
740 			      bpf_map__name(map));
741 			continue;
742 		}
743 		map_cnt++;
744 	}
745 	bpf_object__for_each_program(prog, obj) {
746 		prog_cnt++;
747 	}
748 
749 	get_header_guard(header_guard, obj_name);
750 	if (use_loader) {
751 		codegen("\
752 		\n\
753 		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
754 		/* THIS FILE IS AUTOGENERATED! */			    \n\
755 		#ifndef %2$s						    \n\
756 		#define %2$s						    \n\
757 									    \n\
758 		#include <bpf/skel_internal.h>				    \n\
759 									    \n\
760 		struct %1$s {						    \n\
761 			struct bpf_loader_ctx ctx;			    \n\
762 		",
763 		obj_name, header_guard
764 		);
765 	} else {
766 		codegen("\
767 		\n\
768 		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
769 									    \n\
770 		/* THIS FILE IS AUTOGENERATED! */			    \n\
771 		#ifndef %2$s						    \n\
772 		#define %2$s						    \n\
773 									    \n\
774 		#include <errno.h>					    \n\
775 		#include <stdlib.h>					    \n\
776 		#include <bpf/libbpf.h>					    \n\
777 									    \n\
778 		struct %1$s {						    \n\
779 			struct bpf_object_skeleton *skeleton;		    \n\
780 			struct bpf_object *obj;				    \n\
781 		",
782 		obj_name, header_guard
783 		);
784 	}
785 
786 	if (map_cnt) {
787 		printf("\tstruct {\n");
788 		bpf_object__for_each_map(map, obj) {
789 			if (!get_map_ident(map, ident, sizeof(ident)))
790 				continue;
791 			if (use_loader)
792 				printf("\t\tstruct bpf_map_desc %s;\n", ident);
793 			else
794 				printf("\t\tstruct bpf_map *%s;\n", ident);
795 		}
796 		printf("\t} maps;\n");
797 	}
798 
799 	if (prog_cnt) {
800 		printf("\tstruct {\n");
801 		bpf_object__for_each_program(prog, obj) {
802 			if (use_loader)
803 				printf("\t\tstruct bpf_prog_desc %s;\n",
804 				       bpf_program__name(prog));
805 			else
806 				printf("\t\tstruct bpf_program *%s;\n",
807 				       bpf_program__name(prog));
808 		}
809 		printf("\t} progs;\n");
810 		printf("\tstruct {\n");
811 		bpf_object__for_each_program(prog, obj) {
812 			if (use_loader)
813 				printf("\t\tint %s_fd;\n",
814 				       bpf_program__name(prog));
815 			else
816 				printf("\t\tstruct bpf_link *%s;\n",
817 				       bpf_program__name(prog));
818 		}
819 		printf("\t} links;\n");
820 	}
821 
822 	btf = bpf_object__btf(obj);
823 	if (btf) {
824 		err = codegen_datasecs(obj, obj_name);
825 		if (err)
826 			goto out;
827 	}
828 	if (use_loader) {
829 		err = gen_trace(obj, obj_name, header_guard);
830 		goto out;
831 	}
832 
833 	codegen("\
834 		\n\
835 									    \n\
836 		#ifdef __cplusplus					    \n\
837 			static struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
838 			static struct %1$s *open_and_load();		    \n\
839 			static int load(struct %1$s *skel);		    \n\
840 			static int attach(struct %1$s *skel);		    \n\
841 			static void detach(struct %1$s *skel);		    \n\
842 			static void destroy(struct %1$s *skel);		    \n\
843 			static const void *elf_bytes(size_t *sz);	    \n\
844 		#endif /* __cplusplus */				    \n\
845 		};							    \n\
846 									    \n\
847 		static void						    \n\
848 		%1$s__destroy(struct %1$s *obj)				    \n\
849 		{							    \n\
850 			if (!obj)					    \n\
851 				return;					    \n\
852 			if (obj->skeleton)				    \n\
853 				bpf_object__destroy_skeleton(obj->skeleton);\n\
854 			free(obj);					    \n\
855 		}							    \n\
856 									    \n\
857 		static inline int					    \n\
858 		%1$s__create_skeleton(struct %1$s *obj);		    \n\
859 									    \n\
860 		static inline struct %1$s *				    \n\
861 		%1$s__open_opts(const struct bpf_object_open_opts *opts)    \n\
862 		{							    \n\
863 			struct %1$s *obj;				    \n\
864 			int err;					    \n\
865 									    \n\
866 			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\
867 			if (!obj) {					    \n\
868 				errno = ENOMEM;				    \n\
869 				return NULL;				    \n\
870 			}						    \n\
871 									    \n\
872 			err = %1$s__create_skeleton(obj);		    \n\
873 			if (err)					    \n\
874 				goto err_out;				    \n\
875 									    \n\
876 			err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
877 			if (err)					    \n\
878 				goto err_out;				    \n\
879 									    \n\
880 			return obj;					    \n\
881 		err_out:						    \n\
882 			%1$s__destroy(obj);				    \n\
883 			errno = -err;					    \n\
884 			return NULL;					    \n\
885 		}							    \n\
886 									    \n\
887 		static inline struct %1$s *				    \n\
888 		%1$s__open(void)					    \n\
889 		{							    \n\
890 			return %1$s__open_opts(NULL);			    \n\
891 		}							    \n\
892 									    \n\
893 		static inline int					    \n\
894 		%1$s__load(struct %1$s *obj)				    \n\
895 		{							    \n\
896 			return bpf_object__load_skeleton(obj->skeleton);    \n\
897 		}							    \n\
898 									    \n\
899 		static inline struct %1$s *				    \n\
900 		%1$s__open_and_load(void)				    \n\
901 		{							    \n\
902 			struct %1$s *obj;				    \n\
903 			int err;					    \n\
904 									    \n\
905 			obj = %1$s__open();				    \n\
906 			if (!obj)					    \n\
907 				return NULL;				    \n\
908 			err = %1$s__load(obj);				    \n\
909 			if (err) {					    \n\
910 				%1$s__destroy(obj);			    \n\
911 				errno = -err;				    \n\
912 				return NULL;				    \n\
913 			}						    \n\
914 			return obj;					    \n\
915 		}							    \n\
916 									    \n\
917 		static inline int					    \n\
918 		%1$s__attach(struct %1$s *obj)				    \n\
919 		{							    \n\
920 			return bpf_object__attach_skeleton(obj->skeleton);  \n\
921 		}							    \n\
922 									    \n\
923 		static inline void					    \n\
924 		%1$s__detach(struct %1$s *obj)				    \n\
925 		{							    \n\
926 			return bpf_object__detach_skeleton(obj->skeleton);  \n\
927 		}							    \n\
928 		",
929 		obj_name
930 	);
931 
932 	codegen("\
933 		\n\
934 									    \n\
935 		static inline const void *%1$s__elf_bytes(size_t *sz);	    \n\
936 									    \n\
937 		static inline int					    \n\
938 		%1$s__create_skeleton(struct %1$s *obj)			    \n\
939 		{							    \n\
940 			struct bpf_object_skeleton *s;			    \n\
941 									    \n\
942 			s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
943 			if (!s)						    \n\
944 				goto err;				    \n\
945 									    \n\
946 			s->sz = sizeof(*s);				    \n\
947 			s->name = \"%1$s\";				    \n\
948 			s->obj = &obj->obj;				    \n\
949 		",
950 		obj_name
951 	);
952 	if (map_cnt) {
953 		codegen("\
954 			\n\
955 									    \n\
956 				/* maps */				    \n\
957 				s->map_cnt = %zu;			    \n\
958 				s->map_skel_sz = sizeof(*s->maps);	    \n\
959 				s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
960 				if (!s->maps)				    \n\
961 					goto err;			    \n\
962 			",
963 			map_cnt
964 		);
965 		i = 0;
966 		bpf_object__for_each_map(map, obj) {
967 			if (!get_map_ident(map, ident, sizeof(ident)))
968 				continue;
969 
970 			codegen("\
971 				\n\
972 									    \n\
973 					s->maps[%zu].name = \"%s\";	    \n\
974 					s->maps[%zu].map = &obj->maps.%s;   \n\
975 				",
976 				i, bpf_map__name(map), i, ident);
977 			/* memory-mapped internal maps */
978 			if (bpf_map__is_internal(map) &&
979 			    (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
980 				printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
981 				       i, ident);
982 			}
983 			i++;
984 		}
985 	}
986 	if (prog_cnt) {
987 		codegen("\
988 			\n\
989 									    \n\
990 				/* programs */				    \n\
991 				s->prog_cnt = %zu;			    \n\
992 				s->prog_skel_sz = sizeof(*s->progs);	    \n\
993 				s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
994 				if (!s->progs)				    \n\
995 					goto err;			    \n\
996 			",
997 			prog_cnt
998 		);
999 		i = 0;
1000 		bpf_object__for_each_program(prog, obj) {
1001 			codegen("\
1002 				\n\
1003 									    \n\
1004 					s->progs[%1$zu].name = \"%2$s\";    \n\
1005 					s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
1006 					s->progs[%1$zu].link = &obj->links.%2$s;\n\
1007 				",
1008 				i, bpf_program__name(prog));
1009 			i++;
1010 		}
1011 	}
1012 	codegen("\
1013 		\n\
1014 									    \n\
1015 			s->data = (void *)%2$s__elf_bytes(&s->data_sz);	    \n\
1016 									    \n\
1017 			obj->skeleton = s;				    \n\
1018 			return 0;					    \n\
1019 		err:							    \n\
1020 			bpf_object__destroy_skeleton(s);		    \n\
1021 			return -ENOMEM;					    \n\
1022 		}							    \n\
1023 									    \n\
1024 		static inline const void *%2$s__elf_bytes(size_t *sz)	    \n\
1025 		{							    \n\
1026 			*sz = %1$d;					    \n\
1027 			return (const void *)\"\\			    \n\
1028 		"
1029 		, file_sz, obj_name);
1030 
1031 	/* embed contents of BPF object file */
1032 	print_hex(obj_data, file_sz);
1033 
1034 	codegen("\
1035 		\n\
1036 		\";							    \n\
1037 		}							    \n\
1038 									    \n\
1039 		#ifdef __cplusplus					    \n\
1040 		struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1041 		struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); }	\n\
1042 		int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); }		\n\
1043 		int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); }	\n\
1044 		void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); }		\n\
1045 		void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }		\n\
1046 		const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1047 		#endif /* __cplusplus */				    \n\
1048 									    \n\
1049 		#endif /* %2$s */					    \n\
1050 		",
1051 		obj_name, header_guard);
1052 	err = 0;
1053 out:
1054 	bpf_object__close(obj);
1055 	if (obj_data)
1056 		munmap(obj_data, mmap_sz);
1057 	close(fd);
1058 	return err;
1059 }
1060 
1061 static int do_object(int argc, char **argv)
1062 {
1063 	struct bpf_linker *linker;
1064 	const char *output_file, *file;
1065 	int err = 0;
1066 
1067 	if (!REQ_ARGS(2)) {
1068 		usage();
1069 		return -1;
1070 	}
1071 
1072 	output_file = GET_ARG();
1073 
1074 	linker = bpf_linker__new(output_file, NULL);
1075 	if (!linker) {
1076 		p_err("failed to create BPF linker instance");
1077 		return -1;
1078 	}
1079 
1080 	while (argc) {
1081 		file = GET_ARG();
1082 
1083 		err = bpf_linker__add_file(linker, file, NULL);
1084 		if (err) {
1085 			p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
1086 			goto out;
1087 		}
1088 	}
1089 
1090 	err = bpf_linker__finalize(linker);
1091 	if (err) {
1092 		p_err("failed to finalize ELF file: %s (%d)", strerror(err), err);
1093 		goto out;
1094 	}
1095 
1096 	err = 0;
1097 out:
1098 	bpf_linker__free(linker);
1099 	return err;
1100 }
1101 
1102 static int do_help(int argc, char **argv)
1103 {
1104 	if (json_output) {
1105 		jsonw_null(json_wtr);
1106 		return 0;
1107 	}
1108 
1109 	fprintf(stderr,
1110 		"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1111 		"       %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1112 		"       %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1113 		"       %1$s %2$s help\n"
1114 		"\n"
1115 		"       " HELP_SPEC_OPTIONS " |\n"
1116 		"                    {-L|--use-loader} }\n"
1117 		"",
1118 		bin_name, "gen");
1119 
1120 	return 0;
1121 }
1122 
1123 static int btf_save_raw(const struct btf *btf, const char *path)
1124 {
1125 	const void *data;
1126 	FILE *f = NULL;
1127 	__u32 data_sz;
1128 	int err = 0;
1129 
1130 	data = btf__raw_data(btf, &data_sz);
1131 	if (!data)
1132 		return -ENOMEM;
1133 
1134 	f = fopen(path, "wb");
1135 	if (!f)
1136 		return -errno;
1137 
1138 	if (fwrite(data, 1, data_sz, f) != data_sz)
1139 		err = -errno;
1140 
1141 	fclose(f);
1142 	return err;
1143 }
1144 
1145 struct btfgen_info {
1146 	struct btf *src_btf;
1147 	struct btf *marked_btf; /* btf structure used to mark used types */
1148 };
1149 
1150 static size_t btfgen_hash_fn(const void *key, void *ctx)
1151 {
1152 	return (size_t)key;
1153 }
1154 
1155 static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx)
1156 {
1157 	return k1 == k2;
1158 }
1159 
1160 static void *u32_as_hash_key(__u32 x)
1161 {
1162 	return (void *)(uintptr_t)x;
1163 }
1164 
1165 static void btfgen_free_info(struct btfgen_info *info)
1166 {
1167 	if (!info)
1168 		return;
1169 
1170 	btf__free(info->src_btf);
1171 	btf__free(info->marked_btf);
1172 
1173 	free(info);
1174 }
1175 
1176 static struct btfgen_info *
1177 btfgen_new_info(const char *targ_btf_path)
1178 {
1179 	struct btfgen_info *info;
1180 	int err;
1181 
1182 	info = calloc(1, sizeof(*info));
1183 	if (!info)
1184 		return NULL;
1185 
1186 	info->src_btf = btf__parse(targ_btf_path, NULL);
1187 	if (!info->src_btf) {
1188 		err = -errno;
1189 		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1190 		goto err_out;
1191 	}
1192 
1193 	info->marked_btf = btf__parse(targ_btf_path, NULL);
1194 	if (!info->marked_btf) {
1195 		err = -errno;
1196 		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1197 		goto err_out;
1198 	}
1199 
1200 	return info;
1201 
1202 err_out:
1203 	btfgen_free_info(info);
1204 	errno = -err;
1205 	return NULL;
1206 }
1207 
1208 #define MARKED UINT32_MAX
1209 
1210 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
1211 {
1212 	const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
1213 	struct btf_member *m = btf_members(t) + idx;
1214 
1215 	m->name_off = MARKED;
1216 }
1217 
1218 static int
1219 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
1220 {
1221 	const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
1222 	struct btf_type *cloned_type;
1223 	struct btf_param *param;
1224 	struct btf_array *array;
1225 	int err, i;
1226 
1227 	if (type_id == 0)
1228 		return 0;
1229 
1230 	/* mark type on cloned BTF as used */
1231 	cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
1232 	cloned_type->name_off = MARKED;
1233 
1234 	/* recursively mark other types needed by it */
1235 	switch (btf_kind(btf_type)) {
1236 	case BTF_KIND_UNKN:
1237 	case BTF_KIND_INT:
1238 	case BTF_KIND_FLOAT:
1239 	case BTF_KIND_ENUM:
1240 	case BTF_KIND_STRUCT:
1241 	case BTF_KIND_UNION:
1242 		break;
1243 	case BTF_KIND_PTR:
1244 		if (follow_pointers) {
1245 			err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1246 			if (err)
1247 				return err;
1248 		}
1249 		break;
1250 	case BTF_KIND_CONST:
1251 	case BTF_KIND_VOLATILE:
1252 	case BTF_KIND_TYPEDEF:
1253 		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1254 		if (err)
1255 			return err;
1256 		break;
1257 	case BTF_KIND_ARRAY:
1258 		array = btf_array(btf_type);
1259 
1260 		/* mark array type */
1261 		err = btfgen_mark_type(info, array->type, follow_pointers);
1262 		/* mark array's index type */
1263 		err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
1264 		if (err)
1265 			return err;
1266 		break;
1267 	case BTF_KIND_FUNC_PROTO:
1268 		/* mark ret type */
1269 		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1270 		if (err)
1271 			return err;
1272 
1273 		/* mark parameters types */
1274 		param = btf_params(btf_type);
1275 		for (i = 0; i < btf_vlen(btf_type); i++) {
1276 			err = btfgen_mark_type(info, param->type, follow_pointers);
1277 			if (err)
1278 				return err;
1279 			param++;
1280 		}
1281 		break;
1282 	/* tells if some other type needs to be handled */
1283 	default:
1284 		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
1285 		return -EINVAL;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1292 {
1293 	struct btf *btf = info->src_btf;
1294 	const struct btf_type *btf_type;
1295 	struct btf_member *btf_member;
1296 	struct btf_array *array;
1297 	unsigned int type_id = targ_spec->root_type_id;
1298 	int idx, err;
1299 
1300 	/* mark root type */
1301 	btf_type = btf__type_by_id(btf, type_id);
1302 	err = btfgen_mark_type(info, type_id, false);
1303 	if (err)
1304 		return err;
1305 
1306 	/* mark types for complex types (arrays, unions, structures) */
1307 	for (int i = 1; i < targ_spec->raw_len; i++) {
1308 		/* skip typedefs and mods */
1309 		while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
1310 			type_id = btf_type->type;
1311 			btf_type = btf__type_by_id(btf, type_id);
1312 		}
1313 
1314 		switch (btf_kind(btf_type)) {
1315 		case BTF_KIND_STRUCT:
1316 		case BTF_KIND_UNION:
1317 			idx = targ_spec->raw_spec[i];
1318 			btf_member = btf_members(btf_type) + idx;
1319 
1320 			/* mark member */
1321 			btfgen_mark_member(info, type_id, idx);
1322 
1323 			/* mark member's type */
1324 			type_id = btf_member->type;
1325 			btf_type = btf__type_by_id(btf, type_id);
1326 			err = btfgen_mark_type(info, type_id, false);
1327 			if (err)
1328 				return err;
1329 			break;
1330 		case BTF_KIND_ARRAY:
1331 			array = btf_array(btf_type);
1332 			type_id = array->type;
1333 			btf_type = btf__type_by_id(btf, type_id);
1334 			break;
1335 		default:
1336 			p_err("unsupported kind: %s (%d)",
1337 			      btf_kind_str(btf_type), btf_type->type);
1338 			return -EINVAL;
1339 		}
1340 	}
1341 
1342 	return 0;
1343 }
1344 
1345 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1346 {
1347 	return btfgen_mark_type(info, targ_spec->root_type_id, true);
1348 }
1349 
1350 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1351 {
1352 	return btfgen_mark_type(info, targ_spec->root_type_id, false);
1353 }
1354 
1355 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
1356 {
1357 	switch (res->relo_kind) {
1358 	case BPF_CORE_FIELD_BYTE_OFFSET:
1359 	case BPF_CORE_FIELD_BYTE_SIZE:
1360 	case BPF_CORE_FIELD_EXISTS:
1361 	case BPF_CORE_FIELD_SIGNED:
1362 	case BPF_CORE_FIELD_LSHIFT_U64:
1363 	case BPF_CORE_FIELD_RSHIFT_U64:
1364 		return btfgen_record_field_relo(info, res);
1365 	case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
1366 		return 0;
1367 	case BPF_CORE_TYPE_ID_TARGET:
1368 	case BPF_CORE_TYPE_EXISTS:
1369 	case BPF_CORE_TYPE_SIZE:
1370 		return btfgen_record_type_relo(info, res);
1371 	case BPF_CORE_ENUMVAL_EXISTS:
1372 	case BPF_CORE_ENUMVAL_VALUE:
1373 		return btfgen_record_enumval_relo(info, res);
1374 	default:
1375 		return -EINVAL;
1376 	}
1377 }
1378 
1379 static struct bpf_core_cand_list *
1380 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
1381 {
1382 	const struct btf_type *local_type;
1383 	struct bpf_core_cand_list *cands = NULL;
1384 	struct bpf_core_cand local_cand = {};
1385 	size_t local_essent_len;
1386 	const char *local_name;
1387 	int err;
1388 
1389 	local_cand.btf = local_btf;
1390 	local_cand.id = local_id;
1391 
1392 	local_type = btf__type_by_id(local_btf, local_id);
1393 	if (!local_type) {
1394 		err = -EINVAL;
1395 		goto err_out;
1396 	}
1397 
1398 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
1399 	if (!local_name) {
1400 		err = -EINVAL;
1401 		goto err_out;
1402 	}
1403 	local_essent_len = bpf_core_essential_name_len(local_name);
1404 
1405 	cands = calloc(1, sizeof(*cands));
1406 	if (!cands)
1407 		return NULL;
1408 
1409 	err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
1410 	if (err)
1411 		goto err_out;
1412 
1413 	return cands;
1414 
1415 err_out:
1416 	bpf_core_free_cands(cands);
1417 	errno = -err;
1418 	return NULL;
1419 }
1420 
1421 /* Record relocation information for a single BPF object */
1422 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
1423 {
1424 	const struct btf_ext_info_sec *sec;
1425 	const struct bpf_core_relo *relo;
1426 	const struct btf_ext_info *seg;
1427 	struct hashmap_entry *entry;
1428 	struct hashmap *cand_cache = NULL;
1429 	struct btf_ext *btf_ext = NULL;
1430 	unsigned int relo_idx;
1431 	struct btf *btf = NULL;
1432 	size_t i;
1433 	int err;
1434 
1435 	btf = btf__parse(obj_path, &btf_ext);
1436 	if (!btf) {
1437 		err = -errno;
1438 		p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
1439 		return err;
1440 	}
1441 
1442 	if (!btf_ext) {
1443 		p_err("failed to parse BPF object '%s': section %s not found",
1444 		      obj_path, BTF_EXT_ELF_SEC);
1445 		err = -EINVAL;
1446 		goto out;
1447 	}
1448 
1449 	if (btf_ext->core_relo_info.len == 0) {
1450 		err = 0;
1451 		goto out;
1452 	}
1453 
1454 	cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
1455 	if (IS_ERR(cand_cache)) {
1456 		err = PTR_ERR(cand_cache);
1457 		goto out;
1458 	}
1459 
1460 	seg = &btf_ext->core_relo_info;
1461 	for_each_btf_ext_sec(seg, sec) {
1462 		for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
1463 			struct bpf_core_spec specs_scratch[3] = {};
1464 			struct bpf_core_relo_res targ_res = {};
1465 			struct bpf_core_cand_list *cands = NULL;
1466 			const void *type_key = u32_as_hash_key(relo->type_id);
1467 			const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
1468 
1469 			if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
1470 			    !hashmap__find(cand_cache, type_key, (void **)&cands)) {
1471 				cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
1472 				if (!cands) {
1473 					err = -errno;
1474 					goto out;
1475 				}
1476 
1477 				err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
1478 				if (err)
1479 					goto out;
1480 			}
1481 
1482 			err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
1483 						      specs_scratch, &targ_res);
1484 			if (err)
1485 				goto out;
1486 
1487 			/* specs_scratch[2] is the target spec */
1488 			err = btfgen_record_reloc(info, &specs_scratch[2]);
1489 			if (err)
1490 				goto out;
1491 		}
1492 	}
1493 
1494 out:
1495 	btf__free(btf);
1496 	btf_ext__free(btf_ext);
1497 
1498 	if (!IS_ERR_OR_NULL(cand_cache)) {
1499 		hashmap__for_each_entry(cand_cache, entry, i) {
1500 			bpf_core_free_cands(entry->value);
1501 		}
1502 		hashmap__free(cand_cache);
1503 	}
1504 
1505 	return err;
1506 }
1507 
1508 static int btfgen_remap_id(__u32 *type_id, void *ctx)
1509 {
1510 	unsigned int *ids = ctx;
1511 
1512 	*type_id = ids[*type_id];
1513 
1514 	return 0;
1515 }
1516 
1517 /* Generate BTF from relocation information previously recorded */
1518 static struct btf *btfgen_get_btf(struct btfgen_info *info)
1519 {
1520 	struct btf *btf_new = NULL;
1521 	unsigned int *ids = NULL;
1522 	unsigned int i, n = btf__type_cnt(info->marked_btf);
1523 	int err = 0;
1524 
1525 	btf_new = btf__new_empty();
1526 	if (!btf_new) {
1527 		err = -errno;
1528 		goto err_out;
1529 	}
1530 
1531 	ids = calloc(n, sizeof(*ids));
1532 	if (!ids) {
1533 		err = -errno;
1534 		goto err_out;
1535 	}
1536 
1537 	/* first pass: add all marked types to btf_new and add their new ids to the ids map */
1538 	for (i = 1; i < n; i++) {
1539 		const struct btf_type *cloned_type, *type;
1540 		const char *name;
1541 		int new_id;
1542 
1543 		cloned_type = btf__type_by_id(info->marked_btf, i);
1544 
1545 		if (cloned_type->name_off != MARKED)
1546 			continue;
1547 
1548 		type = btf__type_by_id(info->src_btf, i);
1549 
1550 		/* add members for struct and union */
1551 		if (btf_is_composite(type)) {
1552 			struct btf_member *cloned_m, *m;
1553 			unsigned short vlen;
1554 			int idx_src;
1555 
1556 			name = btf__str_by_offset(info->src_btf, type->name_off);
1557 
1558 			if (btf_is_struct(type))
1559 				err = btf__add_struct(btf_new, name, type->size);
1560 			else
1561 				err = btf__add_union(btf_new, name, type->size);
1562 
1563 			if (err < 0)
1564 				goto err_out;
1565 			new_id = err;
1566 
1567 			cloned_m = btf_members(cloned_type);
1568 			m = btf_members(type);
1569 			vlen = btf_vlen(cloned_type);
1570 			for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
1571 				/* add only members that are marked as used */
1572 				if (cloned_m->name_off != MARKED)
1573 					continue;
1574 
1575 				name = btf__str_by_offset(info->src_btf, m->name_off);
1576 				err = btf__add_field(btf_new, name, m->type,
1577 						     btf_member_bit_offset(cloned_type, idx_src),
1578 						     btf_member_bitfield_size(cloned_type, idx_src));
1579 				if (err < 0)
1580 					goto err_out;
1581 			}
1582 		} else {
1583 			err = btf__add_type(btf_new, info->src_btf, type);
1584 			if (err < 0)
1585 				goto err_out;
1586 			new_id = err;
1587 		}
1588 
1589 		/* add ID mapping */
1590 		ids[i] = new_id;
1591 	}
1592 
1593 	/* second pass: fix up type ids */
1594 	for (i = 1; i < btf__type_cnt(btf_new); i++) {
1595 		struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
1596 
1597 		err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
1598 		if (err)
1599 			goto err_out;
1600 	}
1601 
1602 	free(ids);
1603 	return btf_new;
1604 
1605 err_out:
1606 	btf__free(btf_new);
1607 	free(ids);
1608 	errno = -err;
1609 	return NULL;
1610 }
1611 
1612 /* Create minimized BTF file for a set of BPF objects.
1613  *
1614  * The BTFGen algorithm is divided in two main parts: (1) collect the
1615  * BTF types that are involved in relocations and (2) generate the BTF
1616  * object using the collected types.
1617  *
1618  * In order to collect the types involved in the relocations, we parse
1619  * the BTF and BTF.ext sections of the BPF objects and use
1620  * bpf_core_calc_relo_insn() to get the target specification, this
1621  * indicates how the types and fields are used in a relocation.
1622  *
1623  * Types are recorded in different ways according to the kind of the
1624  * relocation. For field-based relocations only the members that are
1625  * actually used are saved in order to reduce the size of the generated
1626  * BTF file. For type-based relocations empty struct / unions are
1627  * generated and for enum-based relocations the whole type is saved.
1628  *
1629  * The second part of the algorithm generates the BTF object. It creates
1630  * an empty BTF object and fills it with the types recorded in the
1631  * previous step. This function takes care of only adding the structure
1632  * and union members that were marked as used and it also fixes up the
1633  * type IDs on the generated BTF object.
1634  */
1635 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
1636 {
1637 	struct btfgen_info *info;
1638 	struct btf *btf_new = NULL;
1639 	int err, i;
1640 
1641 	info = btfgen_new_info(src_btf);
1642 	if (!info) {
1643 		err = -errno;
1644 		p_err("failed to allocate info structure: %s", strerror(errno));
1645 		goto out;
1646 	}
1647 
1648 	for (i = 0; objspaths[i] != NULL; i++) {
1649 		err = btfgen_record_obj(info, objspaths[i]);
1650 		if (err) {
1651 			p_err("error recording relocations for %s: %s", objspaths[i],
1652 			      strerror(errno));
1653 			goto out;
1654 		}
1655 	}
1656 
1657 	btf_new = btfgen_get_btf(info);
1658 	if (!btf_new) {
1659 		err = -errno;
1660 		p_err("error generating BTF: %s", strerror(errno));
1661 		goto out;
1662 	}
1663 
1664 	err = btf_save_raw(btf_new, dst_btf);
1665 	if (err) {
1666 		p_err("error saving btf file: %s", strerror(errno));
1667 		goto out;
1668 	}
1669 
1670 out:
1671 	btf__free(btf_new);
1672 	btfgen_free_info(info);
1673 
1674 	return err;
1675 }
1676 
1677 static int do_min_core_btf(int argc, char **argv)
1678 {
1679 	const char *input, *output, **objs;
1680 	int i, err;
1681 
1682 	if (!REQ_ARGS(3)) {
1683 		usage();
1684 		return -1;
1685 	}
1686 
1687 	input = GET_ARG();
1688 	output = GET_ARG();
1689 
1690 	objs = (const char **) calloc(argc + 1, sizeof(*objs));
1691 	if (!objs) {
1692 		p_err("failed to allocate array for object names");
1693 		return -ENOMEM;
1694 	}
1695 
1696 	i = 0;
1697 	while (argc)
1698 		objs[i++] = GET_ARG();
1699 
1700 	err = minimize_btf(input, output, objs);
1701 	free(objs);
1702 	return err;
1703 }
1704 
1705 static const struct cmd cmds[] = {
1706 	{ "object",		do_object },
1707 	{ "skeleton",		do_skeleton },
1708 	{ "min_core_btf",	do_min_core_btf},
1709 	{ "help",		do_help },
1710 	{ 0 }
1711 };
1712 
1713 int do_gen(int argc, char **argv)
1714 {
1715 	return cmd_select(cmds, argc, argv, do_help);
1716 }
1717