xref: /openbmc/linux/tools/bpf/bpftool/gen.c (revision d82a6c5ef9dc0aab296936e1aa4ad28fd5162a55)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Facebook */
3 
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <linux/err.h>
11 #include <stdbool.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <bpf/bpf.h>
16 #include <bpf/libbpf.h>
17 #include <bpf/libbpf_internal.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <sys/mman.h>
21 #include <bpf/btf.h>
22 
23 #include "json_writer.h"
24 #include "main.h"
25 
26 #define MAX_OBJ_NAME_LEN 64
27 
28 static void sanitize_identifier(char *name)
29 {
30 	int i;
31 
32 	for (i = 0; name[i]; i++)
33 		if (!isalnum(name[i]) && name[i] != '_')
34 			name[i] = '_';
35 }
36 
37 static bool str_has_prefix(const char *str, const char *prefix)
38 {
39 	return strncmp(str, prefix, strlen(prefix)) == 0;
40 }
41 
42 static bool str_has_suffix(const char *str, const char *suffix)
43 {
44 	size_t i, n1 = strlen(str), n2 = strlen(suffix);
45 
46 	if (n1 < n2)
47 		return false;
48 
49 	for (i = 0; i < n2; i++) {
50 		if (str[n1 - i - 1] != suffix[n2 - i - 1])
51 			return false;
52 	}
53 
54 	return true;
55 }
56 
57 static void get_obj_name(char *name, const char *file)
58 {
59 	/* Using basename() GNU version which doesn't modify arg. */
60 	strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
61 	name[MAX_OBJ_NAME_LEN - 1] = '\0';
62 	if (str_has_suffix(name, ".o"))
63 		name[strlen(name) - 2] = '\0';
64 	sanitize_identifier(name);
65 }
66 
67 static void get_header_guard(char *guard, const char *obj_name)
68 {
69 	int i;
70 
71 	sprintf(guard, "__%s_SKEL_H__", obj_name);
72 	for (i = 0; guard[i]; i++)
73 		guard[i] = toupper(guard[i]);
74 }
75 
76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
77 {
78 	static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
79 	const char *name = bpf_map__name(map);
80 	int i, n;
81 
82 	if (!bpf_map__is_internal(map)) {
83 		snprintf(buf, buf_sz, "%s", name);
84 		return true;
85 	}
86 
87 	for  (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
88 		const char *sfx = sfxs[i], *p;
89 
90 		p = strstr(name, sfx);
91 		if (p) {
92 			snprintf(buf, buf_sz, "%s", p + 1);
93 			sanitize_identifier(buf);
94 			return true;
95 		}
96 	}
97 
98 	return false;
99 }
100 
101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
102 {
103 	static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
104 	int i, n;
105 
106 	for  (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
107 		const char *pfx = pfxs[i];
108 
109 		if (str_has_prefix(sec_name, pfx)) {
110 			snprintf(buf, buf_sz, "%s", sec_name + 1);
111 			sanitize_identifier(buf);
112 			return true;
113 		}
114 	}
115 
116 	return false;
117 }
118 
119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
120 {
121 	vprintf(fmt, args);
122 }
123 
124 static int codegen_datasec_def(struct bpf_object *obj,
125 			       struct btf *btf,
126 			       struct btf_dump *d,
127 			       const struct btf_type *sec,
128 			       const char *obj_name)
129 {
130 	const char *sec_name = btf__name_by_offset(btf, sec->name_off);
131 	const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
132 	int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
133 	char var_ident[256], sec_ident[256];
134 	bool strip_mods = false;
135 
136 	if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
137 		return 0;
138 
139 	if (strcmp(sec_name, ".kconfig") != 0)
140 		strip_mods = true;
141 
142 	printf("	struct %s__%s {\n", obj_name, sec_ident);
143 	for (i = 0; i < vlen; i++, sec_var++) {
144 		const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
145 		const char *var_name = btf__name_by_offset(btf, var->name_off);
146 		DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
147 			.field_name = var_ident,
148 			.indent_level = 2,
149 			.strip_mods = strip_mods,
150 		);
151 		int need_off = sec_var->offset, align_off, align;
152 		__u32 var_type_id = var->type;
153 
154 		/* static variables are not exposed through BPF skeleton */
155 		if (btf_var(var)->linkage == BTF_VAR_STATIC)
156 			continue;
157 
158 		if (off > need_off) {
159 			p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
160 			      sec_name, i, need_off, off);
161 			return -EINVAL;
162 		}
163 
164 		align = btf__align_of(btf, var->type);
165 		if (align <= 0) {
166 			p_err("Failed to determine alignment of variable '%s': %d",
167 			      var_name, align);
168 			return -EINVAL;
169 		}
170 		/* Assume 32-bit architectures when generating data section
171 		 * struct memory layout. Given bpftool can't know which target
172 		 * host architecture it's emitting skeleton for, we need to be
173 		 * conservative and assume 32-bit one to ensure enough padding
174 		 * bytes are generated for pointer and long types. This will
175 		 * still work correctly for 64-bit architectures, because in
176 		 * the worst case we'll generate unnecessary padding field,
177 		 * which on 64-bit architectures is not strictly necessary and
178 		 * would be handled by natural 8-byte alignment. But it still
179 		 * will be a correct memory layout, based on recorded offsets
180 		 * in BTF.
181 		 */
182 		if (align > 4)
183 			align = 4;
184 
185 		align_off = (off + align - 1) / align * align;
186 		if (align_off != need_off) {
187 			printf("\t\tchar __pad%d[%d];\n",
188 			       pad_cnt, need_off - off);
189 			pad_cnt++;
190 		}
191 
192 		/* sanitize variable name, e.g., for static vars inside
193 		 * a function, it's name is '<function name>.<variable name>',
194 		 * which we'll turn into a '<function name>_<variable name>'
195 		 */
196 		var_ident[0] = '\0';
197 		strncat(var_ident, var_name, sizeof(var_ident) - 1);
198 		sanitize_identifier(var_ident);
199 
200 		printf("\t\t");
201 		err = btf_dump__emit_type_decl(d, var_type_id, &opts);
202 		if (err)
203 			return err;
204 		printf(";\n");
205 
206 		off = sec_var->offset + sec_var->size;
207 	}
208 	printf("	} *%s;\n", sec_ident);
209 	return 0;
210 }
211 
212 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
213 {
214 	int n = btf__type_cnt(btf), i;
215 	char sec_ident[256];
216 
217 	for (i = 1; i < n; i++) {
218 		const struct btf_type *t = btf__type_by_id(btf, i);
219 		const char *name;
220 
221 		if (!btf_is_datasec(t))
222 			continue;
223 
224 		name = btf__str_by_offset(btf, t->name_off);
225 		if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
226 			continue;
227 
228 		if (strcmp(sec_ident, map_ident) == 0)
229 			return t;
230 	}
231 	return NULL;
232 }
233 
234 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
235 {
236 	struct btf *btf = bpf_object__btf(obj);
237 	struct btf_dump *d;
238 	struct bpf_map *map;
239 	const struct btf_type *sec;
240 	char map_ident[256];
241 	int err = 0;
242 
243 	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
244 	err = libbpf_get_error(d);
245 	if (err)
246 		return err;
247 
248 	bpf_object__for_each_map(map, obj) {
249 		/* only generate definitions for memory-mapped internal maps */
250 		if (!bpf_map__is_internal(map))
251 			continue;
252 		if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
253 			continue;
254 
255 		if (!get_map_ident(map, map_ident, sizeof(map_ident)))
256 			continue;
257 
258 		sec = find_type_for_map(btf, map_ident);
259 
260 		/* In some cases (e.g., sections like .rodata.cst16 containing
261 		 * compiler allocated string constants only) there will be
262 		 * special internal maps with no corresponding DATASEC BTF
263 		 * type. In such case, generate empty structs for each such
264 		 * map. It will still be memory-mapped and its contents
265 		 * accessible from user-space through BPF skeleton.
266 		 */
267 		if (!sec) {
268 			printf("	struct %s__%s {\n", obj_name, map_ident);
269 			printf("	} *%s;\n", map_ident);
270 		} else {
271 			err = codegen_datasec_def(obj, btf, d, sec, obj_name);
272 			if (err)
273 				goto out;
274 		}
275 	}
276 
277 
278 out:
279 	btf_dump__free(d);
280 	return err;
281 }
282 
283 static void codegen(const char *template, ...)
284 {
285 	const char *src, *end;
286 	int skip_tabs = 0, n;
287 	char *s, *dst;
288 	va_list args;
289 	char c;
290 
291 	n = strlen(template);
292 	s = malloc(n + 1);
293 	if (!s)
294 		exit(-1);
295 	src = template;
296 	dst = s;
297 
298 	/* find out "baseline" indentation to skip */
299 	while ((c = *src++)) {
300 		if (c == '\t') {
301 			skip_tabs++;
302 		} else if (c == '\n') {
303 			break;
304 		} else {
305 			p_err("unrecognized character at pos %td in template '%s': '%c'",
306 			      src - template - 1, template, c);
307 			free(s);
308 			exit(-1);
309 		}
310 	}
311 
312 	while (*src) {
313 		/* skip baseline indentation tabs */
314 		for (n = skip_tabs; n > 0; n--, src++) {
315 			if (*src != '\t') {
316 				p_err("not enough tabs at pos %td in template '%s'",
317 				      src - template - 1, template);
318 				free(s);
319 				exit(-1);
320 			}
321 		}
322 		/* trim trailing whitespace */
323 		end = strchrnul(src, '\n');
324 		for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
325 			;
326 		memcpy(dst, src, n);
327 		dst += n;
328 		if (*end)
329 			*dst++ = '\n';
330 		src = *end ? end + 1 : end;
331 	}
332 	*dst++ = '\0';
333 
334 	/* print out using adjusted template */
335 	va_start(args, template);
336 	n = vprintf(s, args);
337 	va_end(args);
338 
339 	free(s);
340 }
341 
342 static void print_hex(const char *data, int data_sz)
343 {
344 	int i, len;
345 
346 	for (i = 0, len = 0; i < data_sz; i++) {
347 		int w = data[i] ? 4 : 2;
348 
349 		len += w;
350 		if (len > 78) {
351 			printf("\\\n");
352 			len = w;
353 		}
354 		if (!data[i])
355 			printf("\\0");
356 		else
357 			printf("\\x%02x", (unsigned char)data[i]);
358 	}
359 }
360 
361 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
362 {
363 	long page_sz = sysconf(_SC_PAGE_SIZE);
364 	size_t map_sz;
365 
366 	map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
367 	map_sz = roundup(map_sz, page_sz);
368 	return map_sz;
369 }
370 
371 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
372 static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
373 {
374 	struct btf *btf = bpf_object__btf(obj);
375 	struct bpf_map *map;
376 	struct btf_var_secinfo *sec_var;
377 	int i, vlen;
378 	const struct btf_type *sec;
379 	char map_ident[256], var_ident[256];
380 
381 	codegen("\
382 		\n\
383 		__attribute__((unused)) static void			    \n\
384 		%1$s__assert(struct %1$s *s)				    \n\
385 		{							    \n\
386 		#ifdef __cplusplus					    \n\
387 		#define _Static_assert static_assert			    \n\
388 		#endif							    \n\
389 		", obj_name);
390 
391 	bpf_object__for_each_map(map, obj) {
392 		if (!bpf_map__is_internal(map))
393 			continue;
394 		if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
395 			continue;
396 		if (!get_map_ident(map, map_ident, sizeof(map_ident)))
397 			continue;
398 
399 		sec = find_type_for_map(btf, map_ident);
400 		if (!sec) {
401 			/* best effort, couldn't find the type for this map */
402 			continue;
403 		}
404 
405 		sec_var = btf_var_secinfos(sec);
406 		vlen =  btf_vlen(sec);
407 
408 		for (i = 0; i < vlen; i++, sec_var++) {
409 			const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
410 			const char *var_name = btf__name_by_offset(btf, var->name_off);
411 			long var_size;
412 
413 			/* static variables are not exposed through BPF skeleton */
414 			if (btf_var(var)->linkage == BTF_VAR_STATIC)
415 				continue;
416 
417 			var_size = btf__resolve_size(btf, var->type);
418 			if (var_size < 0)
419 				continue;
420 
421 			var_ident[0] = '\0';
422 			strncat(var_ident, var_name, sizeof(var_ident) - 1);
423 			sanitize_identifier(var_ident);
424 
425 			printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
426 			       map_ident, var_ident, var_size, var_ident);
427 		}
428 	}
429 	codegen("\
430 		\n\
431 		#ifdef __cplusplus					    \n\
432 		#undef _Static_assert					    \n\
433 		#endif							    \n\
434 		}							    \n\
435 		");
436 }
437 
438 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
439 {
440 	struct bpf_program *prog;
441 
442 	bpf_object__for_each_program(prog, obj) {
443 		const char *tp_name;
444 
445 		codegen("\
446 			\n\
447 			\n\
448 			static inline int					    \n\
449 			%1$s__%2$s__attach(struct %1$s *skel)			    \n\
450 			{							    \n\
451 				int prog_fd = skel->progs.%2$s.prog_fd;		    \n\
452 			", obj_name, bpf_program__name(prog));
453 
454 		switch (bpf_program__type(prog)) {
455 		case BPF_PROG_TYPE_RAW_TRACEPOINT:
456 			tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
457 			printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
458 			break;
459 		case BPF_PROG_TYPE_TRACING:
460 			if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
461 				printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
462 			else
463 				printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
464 			break;
465 		default:
466 			printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
467 			break;
468 		}
469 		codegen("\
470 			\n\
471 										    \n\
472 				if (fd > 0)					    \n\
473 					skel->links.%1$s_fd = fd;		    \n\
474 				return fd;					    \n\
475 			}							    \n\
476 			", bpf_program__name(prog));
477 	}
478 
479 	codegen("\
480 		\n\
481 									    \n\
482 		static inline int					    \n\
483 		%1$s__attach(struct %1$s *skel)				    \n\
484 		{							    \n\
485 			int ret = 0;					    \n\
486 									    \n\
487 		", obj_name);
488 
489 	bpf_object__for_each_program(prog, obj) {
490 		codegen("\
491 			\n\
492 				ret = ret < 0 ? ret : %1$s__%2$s__attach(skel);   \n\
493 			", obj_name, bpf_program__name(prog));
494 	}
495 
496 	codegen("\
497 		\n\
498 			return ret < 0 ? ret : 0;			    \n\
499 		}							    \n\
500 									    \n\
501 		static inline void					    \n\
502 		%1$s__detach(struct %1$s *skel)				    \n\
503 		{							    \n\
504 		", obj_name);
505 
506 	bpf_object__for_each_program(prog, obj) {
507 		codegen("\
508 			\n\
509 				skel_closenz(skel->links.%1$s_fd);	    \n\
510 			", bpf_program__name(prog));
511 	}
512 
513 	codegen("\
514 		\n\
515 		}							    \n\
516 		");
517 }
518 
519 static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
520 {
521 	struct bpf_program *prog;
522 	struct bpf_map *map;
523 	char ident[256];
524 
525 	codegen("\
526 		\n\
527 		static void						    \n\
528 		%1$s__destroy(struct %1$s *skel)			    \n\
529 		{							    \n\
530 			if (!skel)					    \n\
531 				return;					    \n\
532 			%1$s__detach(skel);				    \n\
533 		",
534 		obj_name);
535 
536 	bpf_object__for_each_program(prog, obj) {
537 		codegen("\
538 			\n\
539 				skel_closenz(skel->progs.%1$s.prog_fd);	    \n\
540 			", bpf_program__name(prog));
541 	}
542 
543 	bpf_object__for_each_map(map, obj) {
544 		if (!get_map_ident(map, ident, sizeof(ident)))
545 			continue;
546 		if (bpf_map__is_internal(map) &&
547 		    (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
548 			printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
549 			       ident, bpf_map_mmap_sz(map));
550 		codegen("\
551 			\n\
552 				skel_closenz(skel->maps.%1$s.map_fd);	    \n\
553 			", ident);
554 	}
555 	codegen("\
556 		\n\
557 			skel_free(skel);				    \n\
558 		}							    \n\
559 		",
560 		obj_name);
561 }
562 
563 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
564 {
565 	DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
566 	struct bpf_map *map;
567 	char ident[256];
568 	int err = 0;
569 
570 	err = bpf_object__gen_loader(obj, &opts);
571 	if (err)
572 		return err;
573 
574 	err = bpf_object__load(obj);
575 	if (err) {
576 		p_err("failed to load object file");
577 		goto out;
578 	}
579 	/* If there was no error during load then gen_loader_opts
580 	 * are populated with the loader program.
581 	 */
582 
583 	/* finish generating 'struct skel' */
584 	codegen("\
585 		\n\
586 		};							    \n\
587 		", obj_name);
588 
589 
590 	codegen_attach_detach(obj, obj_name);
591 
592 	codegen_destroy(obj, obj_name);
593 
594 	codegen("\
595 		\n\
596 		static inline struct %1$s *				    \n\
597 		%1$s__open(void)					    \n\
598 		{							    \n\
599 			struct %1$s *skel;				    \n\
600 									    \n\
601 			skel = skel_alloc(sizeof(*skel));		    \n\
602 			if (!skel)					    \n\
603 				goto cleanup;				    \n\
604 			skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
605 		",
606 		obj_name, opts.data_sz);
607 	bpf_object__for_each_map(map, obj) {
608 		const void *mmap_data = NULL;
609 		size_t mmap_size = 0;
610 
611 		if (!get_map_ident(map, ident, sizeof(ident)))
612 			continue;
613 
614 		if (!bpf_map__is_internal(map) ||
615 		    !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
616 			continue;
617 
618 		codegen("\
619 		\n\
620 			skel->%1$s = skel_prep_map_data((void *)\"\\	    \n\
621 		", ident);
622 		mmap_data = bpf_map__initial_value(map, &mmap_size);
623 		print_hex(mmap_data, mmap_size);
624 		codegen("\
625 		\n\
626 		\", %1$zd, %2$zd);					    \n\
627 			if (!skel->%3$s)				    \n\
628 				goto cleanup;				    \n\
629 			skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
630 		", bpf_map_mmap_sz(map), mmap_size, ident);
631 	}
632 	codegen("\
633 		\n\
634 			return skel;					    \n\
635 		cleanup:						    \n\
636 			%1$s__destroy(skel);				    \n\
637 			return NULL;					    \n\
638 		}							    \n\
639 									    \n\
640 		static inline int					    \n\
641 		%1$s__load(struct %1$s *skel)				    \n\
642 		{							    \n\
643 			struct bpf_load_and_run_opts opts = {};		    \n\
644 			int err;					    \n\
645 									    \n\
646 			opts.ctx = (struct bpf_loader_ctx *)skel;	    \n\
647 			opts.data_sz = %2$d;				    \n\
648 			opts.data = (void *)\"\\			    \n\
649 		",
650 		obj_name, opts.data_sz);
651 	print_hex(opts.data, opts.data_sz);
652 	codegen("\
653 		\n\
654 		\";							    \n\
655 		");
656 
657 	codegen("\
658 		\n\
659 			opts.insns_sz = %d;				    \n\
660 			opts.insns = (void *)\"\\			    \n\
661 		",
662 		opts.insns_sz);
663 	print_hex(opts.insns, opts.insns_sz);
664 	codegen("\
665 		\n\
666 		\";							    \n\
667 			err = bpf_load_and_run(&opts);			    \n\
668 			if (err < 0)					    \n\
669 				return err;				    \n\
670 		", obj_name);
671 	bpf_object__for_each_map(map, obj) {
672 		const char *mmap_flags;
673 
674 		if (!get_map_ident(map, ident, sizeof(ident)))
675 			continue;
676 
677 		if (!bpf_map__is_internal(map) ||
678 		    !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
679 			continue;
680 
681 		if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
682 			mmap_flags = "PROT_READ";
683 		else
684 			mmap_flags = "PROT_READ | PROT_WRITE";
685 
686 		codegen("\
687 		\n\
688 			skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value,  \n\
689 							%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
690 			if (!skel->%1$s)				    \n\
691 				return -ENOMEM;				    \n\
692 			",
693 		       ident, bpf_map_mmap_sz(map), mmap_flags);
694 	}
695 	codegen("\
696 		\n\
697 			return 0;					    \n\
698 		}							    \n\
699 									    \n\
700 		static inline struct %1$s *				    \n\
701 		%1$s__open_and_load(void)				    \n\
702 		{							    \n\
703 			struct %1$s *skel;				    \n\
704 									    \n\
705 			skel = %1$s__open();				    \n\
706 			if (!skel)					    \n\
707 				return NULL;				    \n\
708 			if (%1$s__load(skel)) {				    \n\
709 				%1$s__destroy(skel);			    \n\
710 				return NULL;				    \n\
711 			}						    \n\
712 			return skel;					    \n\
713 		}							    \n\
714 									    \n\
715 		", obj_name);
716 
717 	codegen_asserts(obj, obj_name);
718 
719 	codegen("\
720 		\n\
721 									    \n\
722 		#endif /* %s */						    \n\
723 		",
724 		header_guard);
725 	err = 0;
726 out:
727 	return err;
728 }
729 
730 static int do_skeleton(int argc, char **argv)
731 {
732 	char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
733 	size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
734 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
735 	char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
736 	struct bpf_object *obj = NULL;
737 	const char *file;
738 	char ident[256];
739 	struct bpf_program *prog;
740 	int fd, err = -1;
741 	struct bpf_map *map;
742 	struct btf *btf;
743 	struct stat st;
744 
745 	if (!REQ_ARGS(1)) {
746 		usage();
747 		return -1;
748 	}
749 	file = GET_ARG();
750 
751 	while (argc) {
752 		if (!REQ_ARGS(2))
753 			return -1;
754 
755 		if (is_prefix(*argv, "name")) {
756 			NEXT_ARG();
757 
758 			if (obj_name[0] != '\0') {
759 				p_err("object name already specified");
760 				return -1;
761 			}
762 
763 			strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
764 			obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
765 		} else {
766 			p_err("unknown arg %s", *argv);
767 			return -1;
768 		}
769 
770 		NEXT_ARG();
771 	}
772 
773 	if (argc) {
774 		p_err("extra unknown arguments");
775 		return -1;
776 	}
777 
778 	if (stat(file, &st)) {
779 		p_err("failed to stat() %s: %s", file, strerror(errno));
780 		return -1;
781 	}
782 	file_sz = st.st_size;
783 	mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
784 	fd = open(file, O_RDONLY);
785 	if (fd < 0) {
786 		p_err("failed to open() %s: %s", file, strerror(errno));
787 		return -1;
788 	}
789 	obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
790 	if (obj_data == MAP_FAILED) {
791 		obj_data = NULL;
792 		p_err("failed to mmap() %s: %s", file, strerror(errno));
793 		goto out;
794 	}
795 	if (obj_name[0] == '\0')
796 		get_obj_name(obj_name, file);
797 	opts.object_name = obj_name;
798 	if (verifier_logs)
799 		/* log_level1 + log_level2 + stats, but not stable UAPI */
800 		opts.kernel_log_level = 1 + 2 + 4;
801 	obj = bpf_object__open_mem(obj_data, file_sz, &opts);
802 	err = libbpf_get_error(obj);
803 	if (err) {
804 		char err_buf[256];
805 
806 		libbpf_strerror(err, err_buf, sizeof(err_buf));
807 		p_err("failed to open BPF object file: %s", err_buf);
808 		obj = NULL;
809 		goto out;
810 	}
811 
812 	bpf_object__for_each_map(map, obj) {
813 		if (!get_map_ident(map, ident, sizeof(ident))) {
814 			p_err("ignoring unrecognized internal map '%s'...",
815 			      bpf_map__name(map));
816 			continue;
817 		}
818 		map_cnt++;
819 	}
820 	bpf_object__for_each_program(prog, obj) {
821 		prog_cnt++;
822 	}
823 
824 	get_header_guard(header_guard, obj_name);
825 	if (use_loader) {
826 		codegen("\
827 		\n\
828 		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
829 		/* THIS FILE IS AUTOGENERATED! */			    \n\
830 		#ifndef %2$s						    \n\
831 		#define %2$s						    \n\
832 									    \n\
833 		#include <bpf/skel_internal.h>				    \n\
834 									    \n\
835 		struct %1$s {						    \n\
836 			struct bpf_loader_ctx ctx;			    \n\
837 		",
838 		obj_name, header_guard
839 		);
840 	} else {
841 		codegen("\
842 		\n\
843 		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
844 									    \n\
845 		/* THIS FILE IS AUTOGENERATED! */			    \n\
846 		#ifndef %2$s						    \n\
847 		#define %2$s						    \n\
848 									    \n\
849 		#include <errno.h>					    \n\
850 		#include <stdlib.h>					    \n\
851 		#include <bpf/libbpf.h>					    \n\
852 									    \n\
853 		struct %1$s {						    \n\
854 			struct bpf_object_skeleton *skeleton;		    \n\
855 			struct bpf_object *obj;				    \n\
856 		",
857 		obj_name, header_guard
858 		);
859 	}
860 
861 	if (map_cnt) {
862 		printf("\tstruct {\n");
863 		bpf_object__for_each_map(map, obj) {
864 			if (!get_map_ident(map, ident, sizeof(ident)))
865 				continue;
866 			if (use_loader)
867 				printf("\t\tstruct bpf_map_desc %s;\n", ident);
868 			else
869 				printf("\t\tstruct bpf_map *%s;\n", ident);
870 		}
871 		printf("\t} maps;\n");
872 	}
873 
874 	if (prog_cnt) {
875 		printf("\tstruct {\n");
876 		bpf_object__for_each_program(prog, obj) {
877 			if (use_loader)
878 				printf("\t\tstruct bpf_prog_desc %s;\n",
879 				       bpf_program__name(prog));
880 			else
881 				printf("\t\tstruct bpf_program *%s;\n",
882 				       bpf_program__name(prog));
883 		}
884 		printf("\t} progs;\n");
885 		printf("\tstruct {\n");
886 		bpf_object__for_each_program(prog, obj) {
887 			if (use_loader)
888 				printf("\t\tint %s_fd;\n",
889 				       bpf_program__name(prog));
890 			else
891 				printf("\t\tstruct bpf_link *%s;\n",
892 				       bpf_program__name(prog));
893 		}
894 		printf("\t} links;\n");
895 	}
896 
897 	btf = bpf_object__btf(obj);
898 	if (btf) {
899 		err = codegen_datasecs(obj, obj_name);
900 		if (err)
901 			goto out;
902 	}
903 	if (use_loader) {
904 		err = gen_trace(obj, obj_name, header_guard);
905 		goto out;
906 	}
907 
908 	codegen("\
909 		\n\
910 									    \n\
911 		#ifdef __cplusplus					    \n\
912 			static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
913 			static inline struct %1$s *open_and_load();	    \n\
914 			static inline int load(struct %1$s *skel);	    \n\
915 			static inline int attach(struct %1$s *skel);	    \n\
916 			static inline void detach(struct %1$s *skel);	    \n\
917 			static inline void destroy(struct %1$s *skel);	    \n\
918 			static inline const void *elf_bytes(size_t *sz);    \n\
919 		#endif /* __cplusplus */				    \n\
920 		};							    \n\
921 									    \n\
922 		static void						    \n\
923 		%1$s__destroy(struct %1$s *obj)				    \n\
924 		{							    \n\
925 			if (!obj)					    \n\
926 				return;					    \n\
927 			if (obj->skeleton)				    \n\
928 				bpf_object__destroy_skeleton(obj->skeleton);\n\
929 			free(obj);					    \n\
930 		}							    \n\
931 									    \n\
932 		static inline int					    \n\
933 		%1$s__create_skeleton(struct %1$s *obj);		    \n\
934 									    \n\
935 		static inline struct %1$s *				    \n\
936 		%1$s__open_opts(const struct bpf_object_open_opts *opts)    \n\
937 		{							    \n\
938 			struct %1$s *obj;				    \n\
939 			int err;					    \n\
940 									    \n\
941 			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\
942 			if (!obj) {					    \n\
943 				errno = ENOMEM;				    \n\
944 				return NULL;				    \n\
945 			}						    \n\
946 									    \n\
947 			err = %1$s__create_skeleton(obj);		    \n\
948 			if (err)					    \n\
949 				goto err_out;				    \n\
950 									    \n\
951 			err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
952 			if (err)					    \n\
953 				goto err_out;				    \n\
954 									    \n\
955 			return obj;					    \n\
956 		err_out:						    \n\
957 			%1$s__destroy(obj);				    \n\
958 			errno = -err;					    \n\
959 			return NULL;					    \n\
960 		}							    \n\
961 									    \n\
962 		static inline struct %1$s *				    \n\
963 		%1$s__open(void)					    \n\
964 		{							    \n\
965 			return %1$s__open_opts(NULL);			    \n\
966 		}							    \n\
967 									    \n\
968 		static inline int					    \n\
969 		%1$s__load(struct %1$s *obj)				    \n\
970 		{							    \n\
971 			return bpf_object__load_skeleton(obj->skeleton);    \n\
972 		}							    \n\
973 									    \n\
974 		static inline struct %1$s *				    \n\
975 		%1$s__open_and_load(void)				    \n\
976 		{							    \n\
977 			struct %1$s *obj;				    \n\
978 			int err;					    \n\
979 									    \n\
980 			obj = %1$s__open();				    \n\
981 			if (!obj)					    \n\
982 				return NULL;				    \n\
983 			err = %1$s__load(obj);				    \n\
984 			if (err) {					    \n\
985 				%1$s__destroy(obj);			    \n\
986 				errno = -err;				    \n\
987 				return NULL;				    \n\
988 			}						    \n\
989 			return obj;					    \n\
990 		}							    \n\
991 									    \n\
992 		static inline int					    \n\
993 		%1$s__attach(struct %1$s *obj)				    \n\
994 		{							    \n\
995 			return bpf_object__attach_skeleton(obj->skeleton);  \n\
996 		}							    \n\
997 									    \n\
998 		static inline void					    \n\
999 		%1$s__detach(struct %1$s *obj)				    \n\
1000 		{							    \n\
1001 			return bpf_object__detach_skeleton(obj->skeleton);  \n\
1002 		}							    \n\
1003 		",
1004 		obj_name
1005 	);
1006 
1007 	codegen("\
1008 		\n\
1009 									    \n\
1010 		static inline const void *%1$s__elf_bytes(size_t *sz);	    \n\
1011 									    \n\
1012 		static inline int					    \n\
1013 		%1$s__create_skeleton(struct %1$s *obj)			    \n\
1014 		{							    \n\
1015 			struct bpf_object_skeleton *s;			    \n\
1016 									    \n\
1017 			s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1018 			if (!s)						    \n\
1019 				goto err;				    \n\
1020 									    \n\
1021 			s->sz = sizeof(*s);				    \n\
1022 			s->name = \"%1$s\";				    \n\
1023 			s->obj = &obj->obj;				    \n\
1024 		",
1025 		obj_name
1026 	);
1027 	if (map_cnt) {
1028 		codegen("\
1029 			\n\
1030 									    \n\
1031 				/* maps */				    \n\
1032 				s->map_cnt = %zu;			    \n\
1033 				s->map_skel_sz = sizeof(*s->maps);	    \n\
1034 				s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
1035 				if (!s->maps)				    \n\
1036 					goto err;			    \n\
1037 			",
1038 			map_cnt
1039 		);
1040 		i = 0;
1041 		bpf_object__for_each_map(map, obj) {
1042 			if (!get_map_ident(map, ident, sizeof(ident)))
1043 				continue;
1044 
1045 			codegen("\
1046 				\n\
1047 									    \n\
1048 					s->maps[%zu].name = \"%s\";	    \n\
1049 					s->maps[%zu].map = &obj->maps.%s;   \n\
1050 				",
1051 				i, bpf_map__name(map), i, ident);
1052 			/* memory-mapped internal maps */
1053 			if (bpf_map__is_internal(map) &&
1054 			    (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
1055 				printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
1056 				       i, ident);
1057 			}
1058 			i++;
1059 		}
1060 	}
1061 	if (prog_cnt) {
1062 		codegen("\
1063 			\n\
1064 									    \n\
1065 				/* programs */				    \n\
1066 				s->prog_cnt = %zu;			    \n\
1067 				s->prog_skel_sz = sizeof(*s->progs);	    \n\
1068 				s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
1069 				if (!s->progs)				    \n\
1070 					goto err;			    \n\
1071 			",
1072 			prog_cnt
1073 		);
1074 		i = 0;
1075 		bpf_object__for_each_program(prog, obj) {
1076 			codegen("\
1077 				\n\
1078 									    \n\
1079 					s->progs[%1$zu].name = \"%2$s\";    \n\
1080 					s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
1081 					s->progs[%1$zu].link = &obj->links.%2$s;\n\
1082 				",
1083 				i, bpf_program__name(prog));
1084 			i++;
1085 		}
1086 	}
1087 	codegen("\
1088 		\n\
1089 									    \n\
1090 			s->data = (void *)%2$s__elf_bytes(&s->data_sz);	    \n\
1091 									    \n\
1092 			obj->skeleton = s;				    \n\
1093 			return 0;					    \n\
1094 		err:							    \n\
1095 			bpf_object__destroy_skeleton(s);		    \n\
1096 			return -ENOMEM;					    \n\
1097 		}							    \n\
1098 									    \n\
1099 		static inline const void *%2$s__elf_bytes(size_t *sz)	    \n\
1100 		{							    \n\
1101 			*sz = %1$d;					    \n\
1102 			return (const void *)\"\\			    \n\
1103 		"
1104 		, file_sz, obj_name);
1105 
1106 	/* embed contents of BPF object file */
1107 	print_hex(obj_data, file_sz);
1108 
1109 	codegen("\
1110 		\n\
1111 		\";							    \n\
1112 		}							    \n\
1113 									    \n\
1114 		#ifdef __cplusplus					    \n\
1115 		struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1116 		struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); }	\n\
1117 		int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); }		\n\
1118 		int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); }	\n\
1119 		void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); }		\n\
1120 		void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }		\n\
1121 		const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1122 		#endif /* __cplusplus */				    \n\
1123 									    \n\
1124 		",
1125 		obj_name);
1126 
1127 	codegen_asserts(obj, obj_name);
1128 
1129 	codegen("\
1130 		\n\
1131 									    \n\
1132 		#endif /* %1$s */					    \n\
1133 		",
1134 		header_guard);
1135 	err = 0;
1136 out:
1137 	bpf_object__close(obj);
1138 	if (obj_data)
1139 		munmap(obj_data, mmap_sz);
1140 	close(fd);
1141 	return err;
1142 }
1143 
1144 static int do_object(int argc, char **argv)
1145 {
1146 	struct bpf_linker *linker;
1147 	const char *output_file, *file;
1148 	int err = 0;
1149 
1150 	if (!REQ_ARGS(2)) {
1151 		usage();
1152 		return -1;
1153 	}
1154 
1155 	output_file = GET_ARG();
1156 
1157 	linker = bpf_linker__new(output_file, NULL);
1158 	if (!linker) {
1159 		p_err("failed to create BPF linker instance");
1160 		return -1;
1161 	}
1162 
1163 	while (argc) {
1164 		file = GET_ARG();
1165 
1166 		err = bpf_linker__add_file(linker, file, NULL);
1167 		if (err) {
1168 			p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
1169 			goto out;
1170 		}
1171 	}
1172 
1173 	err = bpf_linker__finalize(linker);
1174 	if (err) {
1175 		p_err("failed to finalize ELF file: %s (%d)", strerror(err), err);
1176 		goto out;
1177 	}
1178 
1179 	err = 0;
1180 out:
1181 	bpf_linker__free(linker);
1182 	return err;
1183 }
1184 
1185 static int do_help(int argc, char **argv)
1186 {
1187 	if (json_output) {
1188 		jsonw_null(json_wtr);
1189 		return 0;
1190 	}
1191 
1192 	fprintf(stderr,
1193 		"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1194 		"       %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1195 		"       %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1196 		"       %1$s %2$s help\n"
1197 		"\n"
1198 		"       " HELP_SPEC_OPTIONS " |\n"
1199 		"                    {-L|--use-loader} }\n"
1200 		"",
1201 		bin_name, "gen");
1202 
1203 	return 0;
1204 }
1205 
1206 static int btf_save_raw(const struct btf *btf, const char *path)
1207 {
1208 	const void *data;
1209 	FILE *f = NULL;
1210 	__u32 data_sz;
1211 	int err = 0;
1212 
1213 	data = btf__raw_data(btf, &data_sz);
1214 	if (!data)
1215 		return -ENOMEM;
1216 
1217 	f = fopen(path, "wb");
1218 	if (!f)
1219 		return -errno;
1220 
1221 	if (fwrite(data, 1, data_sz, f) != data_sz)
1222 		err = -errno;
1223 
1224 	fclose(f);
1225 	return err;
1226 }
1227 
1228 struct btfgen_info {
1229 	struct btf *src_btf;
1230 	struct btf *marked_btf; /* btf structure used to mark used types */
1231 };
1232 
1233 static size_t btfgen_hash_fn(const void *key, void *ctx)
1234 {
1235 	return (size_t)key;
1236 }
1237 
1238 static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx)
1239 {
1240 	return k1 == k2;
1241 }
1242 
1243 static void *u32_as_hash_key(__u32 x)
1244 {
1245 	return (void *)(uintptr_t)x;
1246 }
1247 
1248 static void btfgen_free_info(struct btfgen_info *info)
1249 {
1250 	if (!info)
1251 		return;
1252 
1253 	btf__free(info->src_btf);
1254 	btf__free(info->marked_btf);
1255 
1256 	free(info);
1257 }
1258 
1259 static struct btfgen_info *
1260 btfgen_new_info(const char *targ_btf_path)
1261 {
1262 	struct btfgen_info *info;
1263 	int err;
1264 
1265 	info = calloc(1, sizeof(*info));
1266 	if (!info)
1267 		return NULL;
1268 
1269 	info->src_btf = btf__parse(targ_btf_path, NULL);
1270 	if (!info->src_btf) {
1271 		err = -errno;
1272 		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1273 		goto err_out;
1274 	}
1275 
1276 	info->marked_btf = btf__parse(targ_btf_path, NULL);
1277 	if (!info->marked_btf) {
1278 		err = -errno;
1279 		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1280 		goto err_out;
1281 	}
1282 
1283 	return info;
1284 
1285 err_out:
1286 	btfgen_free_info(info);
1287 	errno = -err;
1288 	return NULL;
1289 }
1290 
1291 #define MARKED UINT32_MAX
1292 
1293 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
1294 {
1295 	const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
1296 	struct btf_member *m = btf_members(t) + idx;
1297 
1298 	m->name_off = MARKED;
1299 }
1300 
1301 static int
1302 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
1303 {
1304 	const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
1305 	struct btf_type *cloned_type;
1306 	struct btf_param *param;
1307 	struct btf_array *array;
1308 	int err, i;
1309 
1310 	if (type_id == 0)
1311 		return 0;
1312 
1313 	/* mark type on cloned BTF as used */
1314 	cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
1315 	cloned_type->name_off = MARKED;
1316 
1317 	/* recursively mark other types needed by it */
1318 	switch (btf_kind(btf_type)) {
1319 	case BTF_KIND_UNKN:
1320 	case BTF_KIND_INT:
1321 	case BTF_KIND_FLOAT:
1322 	case BTF_KIND_ENUM:
1323 	case BTF_KIND_STRUCT:
1324 	case BTF_KIND_UNION:
1325 		break;
1326 	case BTF_KIND_PTR:
1327 		if (follow_pointers) {
1328 			err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1329 			if (err)
1330 				return err;
1331 		}
1332 		break;
1333 	case BTF_KIND_CONST:
1334 	case BTF_KIND_VOLATILE:
1335 	case BTF_KIND_TYPEDEF:
1336 		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1337 		if (err)
1338 			return err;
1339 		break;
1340 	case BTF_KIND_ARRAY:
1341 		array = btf_array(btf_type);
1342 
1343 		/* mark array type */
1344 		err = btfgen_mark_type(info, array->type, follow_pointers);
1345 		/* mark array's index type */
1346 		err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
1347 		if (err)
1348 			return err;
1349 		break;
1350 	case BTF_KIND_FUNC_PROTO:
1351 		/* mark ret type */
1352 		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1353 		if (err)
1354 			return err;
1355 
1356 		/* mark parameters types */
1357 		param = btf_params(btf_type);
1358 		for (i = 0; i < btf_vlen(btf_type); i++) {
1359 			err = btfgen_mark_type(info, param->type, follow_pointers);
1360 			if (err)
1361 				return err;
1362 			param++;
1363 		}
1364 		break;
1365 	/* tells if some other type needs to be handled */
1366 	default:
1367 		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
1368 		return -EINVAL;
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1375 {
1376 	struct btf *btf = info->src_btf;
1377 	const struct btf_type *btf_type;
1378 	struct btf_member *btf_member;
1379 	struct btf_array *array;
1380 	unsigned int type_id = targ_spec->root_type_id;
1381 	int idx, err;
1382 
1383 	/* mark root type */
1384 	btf_type = btf__type_by_id(btf, type_id);
1385 	err = btfgen_mark_type(info, type_id, false);
1386 	if (err)
1387 		return err;
1388 
1389 	/* mark types for complex types (arrays, unions, structures) */
1390 	for (int i = 1; i < targ_spec->raw_len; i++) {
1391 		/* skip typedefs and mods */
1392 		while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
1393 			type_id = btf_type->type;
1394 			btf_type = btf__type_by_id(btf, type_id);
1395 		}
1396 
1397 		switch (btf_kind(btf_type)) {
1398 		case BTF_KIND_STRUCT:
1399 		case BTF_KIND_UNION:
1400 			idx = targ_spec->raw_spec[i];
1401 			btf_member = btf_members(btf_type) + idx;
1402 
1403 			/* mark member */
1404 			btfgen_mark_member(info, type_id, idx);
1405 
1406 			/* mark member's type */
1407 			type_id = btf_member->type;
1408 			btf_type = btf__type_by_id(btf, type_id);
1409 			err = btfgen_mark_type(info, type_id, false);
1410 			if (err)
1411 				return err;
1412 			break;
1413 		case BTF_KIND_ARRAY:
1414 			array = btf_array(btf_type);
1415 			type_id = array->type;
1416 			btf_type = btf__type_by_id(btf, type_id);
1417 			break;
1418 		default:
1419 			p_err("unsupported kind: %s (%d)",
1420 			      btf_kind_str(btf_type), btf_type->type);
1421 			return -EINVAL;
1422 		}
1423 	}
1424 
1425 	return 0;
1426 }
1427 
1428 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1429 {
1430 	return btfgen_mark_type(info, targ_spec->root_type_id, true);
1431 }
1432 
1433 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1434 {
1435 	return btfgen_mark_type(info, targ_spec->root_type_id, false);
1436 }
1437 
1438 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
1439 {
1440 	switch (res->relo_kind) {
1441 	case BPF_CORE_FIELD_BYTE_OFFSET:
1442 	case BPF_CORE_FIELD_BYTE_SIZE:
1443 	case BPF_CORE_FIELD_EXISTS:
1444 	case BPF_CORE_FIELD_SIGNED:
1445 	case BPF_CORE_FIELD_LSHIFT_U64:
1446 	case BPF_CORE_FIELD_RSHIFT_U64:
1447 		return btfgen_record_field_relo(info, res);
1448 	case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
1449 		return 0;
1450 	case BPF_CORE_TYPE_ID_TARGET:
1451 	case BPF_CORE_TYPE_EXISTS:
1452 	case BPF_CORE_TYPE_SIZE:
1453 		return btfgen_record_type_relo(info, res);
1454 	case BPF_CORE_ENUMVAL_EXISTS:
1455 	case BPF_CORE_ENUMVAL_VALUE:
1456 		return btfgen_record_enumval_relo(info, res);
1457 	default:
1458 		return -EINVAL;
1459 	}
1460 }
1461 
1462 static struct bpf_core_cand_list *
1463 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
1464 {
1465 	const struct btf_type *local_type;
1466 	struct bpf_core_cand_list *cands = NULL;
1467 	struct bpf_core_cand local_cand = {};
1468 	size_t local_essent_len;
1469 	const char *local_name;
1470 	int err;
1471 
1472 	local_cand.btf = local_btf;
1473 	local_cand.id = local_id;
1474 
1475 	local_type = btf__type_by_id(local_btf, local_id);
1476 	if (!local_type) {
1477 		err = -EINVAL;
1478 		goto err_out;
1479 	}
1480 
1481 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
1482 	if (!local_name) {
1483 		err = -EINVAL;
1484 		goto err_out;
1485 	}
1486 	local_essent_len = bpf_core_essential_name_len(local_name);
1487 
1488 	cands = calloc(1, sizeof(*cands));
1489 	if (!cands)
1490 		return NULL;
1491 
1492 	err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
1493 	if (err)
1494 		goto err_out;
1495 
1496 	return cands;
1497 
1498 err_out:
1499 	bpf_core_free_cands(cands);
1500 	errno = -err;
1501 	return NULL;
1502 }
1503 
1504 /* Record relocation information for a single BPF object */
1505 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
1506 {
1507 	const struct btf_ext_info_sec *sec;
1508 	const struct bpf_core_relo *relo;
1509 	const struct btf_ext_info *seg;
1510 	struct hashmap_entry *entry;
1511 	struct hashmap *cand_cache = NULL;
1512 	struct btf_ext *btf_ext = NULL;
1513 	unsigned int relo_idx;
1514 	struct btf *btf = NULL;
1515 	size_t i;
1516 	int err;
1517 
1518 	btf = btf__parse(obj_path, &btf_ext);
1519 	if (!btf) {
1520 		err = -errno;
1521 		p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
1522 		return err;
1523 	}
1524 
1525 	if (!btf_ext) {
1526 		p_err("failed to parse BPF object '%s': section %s not found",
1527 		      obj_path, BTF_EXT_ELF_SEC);
1528 		err = -EINVAL;
1529 		goto out;
1530 	}
1531 
1532 	if (btf_ext->core_relo_info.len == 0) {
1533 		err = 0;
1534 		goto out;
1535 	}
1536 
1537 	cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
1538 	if (IS_ERR(cand_cache)) {
1539 		err = PTR_ERR(cand_cache);
1540 		goto out;
1541 	}
1542 
1543 	seg = &btf_ext->core_relo_info;
1544 	for_each_btf_ext_sec(seg, sec) {
1545 		for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
1546 			struct bpf_core_spec specs_scratch[3] = {};
1547 			struct bpf_core_relo_res targ_res = {};
1548 			struct bpf_core_cand_list *cands = NULL;
1549 			const void *type_key = u32_as_hash_key(relo->type_id);
1550 			const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
1551 
1552 			if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
1553 			    !hashmap__find(cand_cache, type_key, (void **)&cands)) {
1554 				cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
1555 				if (!cands) {
1556 					err = -errno;
1557 					goto out;
1558 				}
1559 
1560 				err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
1561 				if (err)
1562 					goto out;
1563 			}
1564 
1565 			err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
1566 						      specs_scratch, &targ_res);
1567 			if (err)
1568 				goto out;
1569 
1570 			/* specs_scratch[2] is the target spec */
1571 			err = btfgen_record_reloc(info, &specs_scratch[2]);
1572 			if (err)
1573 				goto out;
1574 		}
1575 	}
1576 
1577 out:
1578 	btf__free(btf);
1579 	btf_ext__free(btf_ext);
1580 
1581 	if (!IS_ERR_OR_NULL(cand_cache)) {
1582 		hashmap__for_each_entry(cand_cache, entry, i) {
1583 			bpf_core_free_cands(entry->value);
1584 		}
1585 		hashmap__free(cand_cache);
1586 	}
1587 
1588 	return err;
1589 }
1590 
1591 static int btfgen_remap_id(__u32 *type_id, void *ctx)
1592 {
1593 	unsigned int *ids = ctx;
1594 
1595 	*type_id = ids[*type_id];
1596 
1597 	return 0;
1598 }
1599 
1600 /* Generate BTF from relocation information previously recorded */
1601 static struct btf *btfgen_get_btf(struct btfgen_info *info)
1602 {
1603 	struct btf *btf_new = NULL;
1604 	unsigned int *ids = NULL;
1605 	unsigned int i, n = btf__type_cnt(info->marked_btf);
1606 	int err = 0;
1607 
1608 	btf_new = btf__new_empty();
1609 	if (!btf_new) {
1610 		err = -errno;
1611 		goto err_out;
1612 	}
1613 
1614 	ids = calloc(n, sizeof(*ids));
1615 	if (!ids) {
1616 		err = -errno;
1617 		goto err_out;
1618 	}
1619 
1620 	/* first pass: add all marked types to btf_new and add their new ids to the ids map */
1621 	for (i = 1; i < n; i++) {
1622 		const struct btf_type *cloned_type, *type;
1623 		const char *name;
1624 		int new_id;
1625 
1626 		cloned_type = btf__type_by_id(info->marked_btf, i);
1627 
1628 		if (cloned_type->name_off != MARKED)
1629 			continue;
1630 
1631 		type = btf__type_by_id(info->src_btf, i);
1632 
1633 		/* add members for struct and union */
1634 		if (btf_is_composite(type)) {
1635 			struct btf_member *cloned_m, *m;
1636 			unsigned short vlen;
1637 			int idx_src;
1638 
1639 			name = btf__str_by_offset(info->src_btf, type->name_off);
1640 
1641 			if (btf_is_struct(type))
1642 				err = btf__add_struct(btf_new, name, type->size);
1643 			else
1644 				err = btf__add_union(btf_new, name, type->size);
1645 
1646 			if (err < 0)
1647 				goto err_out;
1648 			new_id = err;
1649 
1650 			cloned_m = btf_members(cloned_type);
1651 			m = btf_members(type);
1652 			vlen = btf_vlen(cloned_type);
1653 			for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
1654 				/* add only members that are marked as used */
1655 				if (cloned_m->name_off != MARKED)
1656 					continue;
1657 
1658 				name = btf__str_by_offset(info->src_btf, m->name_off);
1659 				err = btf__add_field(btf_new, name, m->type,
1660 						     btf_member_bit_offset(cloned_type, idx_src),
1661 						     btf_member_bitfield_size(cloned_type, idx_src));
1662 				if (err < 0)
1663 					goto err_out;
1664 			}
1665 		} else {
1666 			err = btf__add_type(btf_new, info->src_btf, type);
1667 			if (err < 0)
1668 				goto err_out;
1669 			new_id = err;
1670 		}
1671 
1672 		/* add ID mapping */
1673 		ids[i] = new_id;
1674 	}
1675 
1676 	/* second pass: fix up type ids */
1677 	for (i = 1; i < btf__type_cnt(btf_new); i++) {
1678 		struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
1679 
1680 		err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
1681 		if (err)
1682 			goto err_out;
1683 	}
1684 
1685 	free(ids);
1686 	return btf_new;
1687 
1688 err_out:
1689 	btf__free(btf_new);
1690 	free(ids);
1691 	errno = -err;
1692 	return NULL;
1693 }
1694 
1695 /* Create minimized BTF file for a set of BPF objects.
1696  *
1697  * The BTFGen algorithm is divided in two main parts: (1) collect the
1698  * BTF types that are involved in relocations and (2) generate the BTF
1699  * object using the collected types.
1700  *
1701  * In order to collect the types involved in the relocations, we parse
1702  * the BTF and BTF.ext sections of the BPF objects and use
1703  * bpf_core_calc_relo_insn() to get the target specification, this
1704  * indicates how the types and fields are used in a relocation.
1705  *
1706  * Types are recorded in different ways according to the kind of the
1707  * relocation. For field-based relocations only the members that are
1708  * actually used are saved in order to reduce the size of the generated
1709  * BTF file. For type-based relocations empty struct / unions are
1710  * generated and for enum-based relocations the whole type is saved.
1711  *
1712  * The second part of the algorithm generates the BTF object. It creates
1713  * an empty BTF object and fills it with the types recorded in the
1714  * previous step. This function takes care of only adding the structure
1715  * and union members that were marked as used and it also fixes up the
1716  * type IDs on the generated BTF object.
1717  */
1718 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
1719 {
1720 	struct btfgen_info *info;
1721 	struct btf *btf_new = NULL;
1722 	int err, i;
1723 
1724 	info = btfgen_new_info(src_btf);
1725 	if (!info) {
1726 		err = -errno;
1727 		p_err("failed to allocate info structure: %s", strerror(errno));
1728 		goto out;
1729 	}
1730 
1731 	for (i = 0; objspaths[i] != NULL; i++) {
1732 		err = btfgen_record_obj(info, objspaths[i]);
1733 		if (err) {
1734 			p_err("error recording relocations for %s: %s", objspaths[i],
1735 			      strerror(errno));
1736 			goto out;
1737 		}
1738 	}
1739 
1740 	btf_new = btfgen_get_btf(info);
1741 	if (!btf_new) {
1742 		err = -errno;
1743 		p_err("error generating BTF: %s", strerror(errno));
1744 		goto out;
1745 	}
1746 
1747 	err = btf_save_raw(btf_new, dst_btf);
1748 	if (err) {
1749 		p_err("error saving btf file: %s", strerror(errno));
1750 		goto out;
1751 	}
1752 
1753 out:
1754 	btf__free(btf_new);
1755 	btfgen_free_info(info);
1756 
1757 	return err;
1758 }
1759 
1760 static int do_min_core_btf(int argc, char **argv)
1761 {
1762 	const char *input, *output, **objs;
1763 	int i, err;
1764 
1765 	if (!REQ_ARGS(3)) {
1766 		usage();
1767 		return -1;
1768 	}
1769 
1770 	input = GET_ARG();
1771 	output = GET_ARG();
1772 
1773 	objs = (const char **) calloc(argc + 1, sizeof(*objs));
1774 	if (!objs) {
1775 		p_err("failed to allocate array for object names");
1776 		return -ENOMEM;
1777 	}
1778 
1779 	i = 0;
1780 	while (argc)
1781 		objs[i++] = GET_ARG();
1782 
1783 	err = minimize_btf(input, output, objs);
1784 	free(objs);
1785 	return err;
1786 }
1787 
1788 static const struct cmd cmds[] = {
1789 	{ "object",		do_object },
1790 	{ "skeleton",		do_skeleton },
1791 	{ "min_core_btf",	do_min_core_btf},
1792 	{ "help",		do_help },
1793 	{ 0 }
1794 };
1795 
1796 int do_gen(int argc, char **argv)
1797 {
1798 	return cmd_select(cmds, argc, argv, do_help);
1799 }
1800