xref: /openbmc/linux/tools/bpf/bpftool/prog.c (revision ac73d4bf)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <signal.h>
10 #include <stdarg.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15 #include <unistd.h>
16 #include <net/if.h>
17 #include <sys/ioctl.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <sys/syscall.h>
21 #include <dirent.h>
22 
23 #include <linux/err.h>
24 #include <linux/perf_event.h>
25 #include <linux/sizes.h>
26 
27 #include <bpf/bpf.h>
28 #include <bpf/btf.h>
29 #include <bpf/hashmap.h>
30 #include <bpf/libbpf.h>
31 #include <bpf/libbpf_internal.h>
32 #include <bpf/skel_internal.h>
33 
34 #include "cfg.h"
35 #include "main.h"
36 #include "xlated_dumper.h"
37 
38 #define BPF_METADATA_PREFIX "bpf_metadata_"
39 #define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
40 
41 enum dump_mode {
42 	DUMP_JITED,
43 	DUMP_XLATED,
44 };
45 
46 static const bool attach_types[] = {
47 	[BPF_SK_SKB_STREAM_PARSER] = true,
48 	[BPF_SK_SKB_STREAM_VERDICT] = true,
49 	[BPF_SK_SKB_VERDICT] = true,
50 	[BPF_SK_MSG_VERDICT] = true,
51 	[BPF_FLOW_DISSECTOR] = true,
52 	[__MAX_BPF_ATTACH_TYPE] = false,
53 };
54 
55 /* Textual representations traditionally used by the program and kept around
56  * for the sake of backwards compatibility.
57  */
58 static const char * const attach_type_strings[] = {
59 	[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
60 	[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
61 	[BPF_SK_SKB_VERDICT] = "skb_verdict",
62 	[BPF_SK_MSG_VERDICT] = "msg_verdict",
63 	[__MAX_BPF_ATTACH_TYPE] = NULL,
64 };
65 
66 static struct hashmap *prog_table;
67 
68 static enum bpf_attach_type parse_attach_type(const char *str)
69 {
70 	enum bpf_attach_type type;
71 
72 	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
73 		if (attach_types[type]) {
74 			const char *attach_type_str;
75 
76 			attach_type_str = libbpf_bpf_attach_type_str(type);
77 			if (!strcmp(str, attach_type_str))
78 				return type;
79 		}
80 
81 		if (attach_type_strings[type] &&
82 		    is_prefix(str, attach_type_strings[type]))
83 			return type;
84 	}
85 
86 	return __MAX_BPF_ATTACH_TYPE;
87 }
88 
89 static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
90 			  void **info_data, size_t *const info_data_sz)
91 {
92 	struct bpf_prog_info holder = {};
93 	size_t needed = 0;
94 	void *ptr;
95 
96 	if (mode == DUMP_JITED) {
97 		holder.jited_prog_len = info->jited_prog_len;
98 		needed += info->jited_prog_len;
99 	} else {
100 		holder.xlated_prog_len = info->xlated_prog_len;
101 		needed += info->xlated_prog_len;
102 	}
103 
104 	holder.nr_jited_ksyms = info->nr_jited_ksyms;
105 	needed += info->nr_jited_ksyms * sizeof(__u64);
106 
107 	holder.nr_jited_func_lens = info->nr_jited_func_lens;
108 	needed += info->nr_jited_func_lens * sizeof(__u32);
109 
110 	holder.nr_func_info = info->nr_func_info;
111 	holder.func_info_rec_size = info->func_info_rec_size;
112 	needed += info->nr_func_info * info->func_info_rec_size;
113 
114 	holder.nr_line_info = info->nr_line_info;
115 	holder.line_info_rec_size = info->line_info_rec_size;
116 	needed += info->nr_line_info * info->line_info_rec_size;
117 
118 	holder.nr_jited_line_info = info->nr_jited_line_info;
119 	holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
120 	needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
121 
122 	if (needed > *info_data_sz) {
123 		ptr = realloc(*info_data, needed);
124 		if (!ptr)
125 			return -1;
126 
127 		*info_data = ptr;
128 		*info_data_sz = needed;
129 	}
130 	ptr = *info_data;
131 
132 	if (mode == DUMP_JITED) {
133 		holder.jited_prog_insns = ptr_to_u64(ptr);
134 		ptr += holder.jited_prog_len;
135 	} else {
136 		holder.xlated_prog_insns = ptr_to_u64(ptr);
137 		ptr += holder.xlated_prog_len;
138 	}
139 
140 	holder.jited_ksyms = ptr_to_u64(ptr);
141 	ptr += holder.nr_jited_ksyms * sizeof(__u64);
142 
143 	holder.jited_func_lens = ptr_to_u64(ptr);
144 	ptr += holder.nr_jited_func_lens * sizeof(__u32);
145 
146 	holder.func_info = ptr_to_u64(ptr);
147 	ptr += holder.nr_func_info * holder.func_info_rec_size;
148 
149 	holder.line_info = ptr_to_u64(ptr);
150 	ptr += holder.nr_line_info * holder.line_info_rec_size;
151 
152 	holder.jited_line_info = ptr_to_u64(ptr);
153 	ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
154 
155 	*info = holder;
156 	return 0;
157 }
158 
159 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
160 {
161 	struct timespec real_time_ts, boot_time_ts;
162 	time_t wallclock_secs;
163 	struct tm load_tm;
164 
165 	buf[--size] = '\0';
166 
167 	if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
168 	    clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
169 		perror("Can't read clocks");
170 		snprintf(buf, size, "%llu", nsecs / 1000000000);
171 		return;
172 	}
173 
174 	wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
175 		(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
176 		1000000000;
177 
178 
179 	if (!localtime_r(&wallclock_secs, &load_tm)) {
180 		snprintf(buf, size, "%llu", nsecs / 1000000000);
181 		return;
182 	}
183 
184 	if (json_output)
185 		strftime(buf, size, "%s", &load_tm);
186 	else
187 		strftime(buf, size, "%FT%T%z", &load_tm);
188 }
189 
190 static void show_prog_maps(int fd, __u32 num_maps)
191 {
192 	struct bpf_prog_info info = {};
193 	__u32 len = sizeof(info);
194 	__u32 map_ids[num_maps];
195 	unsigned int i;
196 	int err;
197 
198 	info.nr_map_ids = num_maps;
199 	info.map_ids = ptr_to_u64(map_ids);
200 
201 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
202 	if (err || !info.nr_map_ids)
203 		return;
204 
205 	if (json_output) {
206 		jsonw_name(json_wtr, "map_ids");
207 		jsonw_start_array(json_wtr);
208 		for (i = 0; i < info.nr_map_ids; i++)
209 			jsonw_uint(json_wtr, map_ids[i]);
210 		jsonw_end_array(json_wtr);
211 	} else {
212 		printf("  map_ids ");
213 		for (i = 0; i < info.nr_map_ids; i++)
214 			printf("%u%s", map_ids[i],
215 			       i == info.nr_map_ids - 1 ? "" : ",");
216 	}
217 }
218 
219 static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
220 {
221 	struct bpf_prog_info prog_info;
222 	__u32 prog_info_len;
223 	__u32 map_info_len;
224 	void *value = NULL;
225 	__u32 *map_ids;
226 	int nr_maps;
227 	int key = 0;
228 	int map_fd;
229 	int ret;
230 	__u32 i;
231 
232 	memset(&prog_info, 0, sizeof(prog_info));
233 	prog_info_len = sizeof(prog_info);
234 	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
235 	if (ret)
236 		return NULL;
237 
238 	if (!prog_info.nr_map_ids)
239 		return NULL;
240 
241 	map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
242 	if (!map_ids)
243 		return NULL;
244 
245 	nr_maps = prog_info.nr_map_ids;
246 	memset(&prog_info, 0, sizeof(prog_info));
247 	prog_info.nr_map_ids = nr_maps;
248 	prog_info.map_ids = ptr_to_u64(map_ids);
249 	prog_info_len = sizeof(prog_info);
250 
251 	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
252 	if (ret)
253 		goto free_map_ids;
254 
255 	for (i = 0; i < prog_info.nr_map_ids; i++) {
256 		map_fd = bpf_map_get_fd_by_id(map_ids[i]);
257 		if (map_fd < 0)
258 			goto free_map_ids;
259 
260 		memset(map_info, 0, sizeof(*map_info));
261 		map_info_len = sizeof(*map_info);
262 		ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
263 		if (ret < 0) {
264 			close(map_fd);
265 			goto free_map_ids;
266 		}
267 
268 		if (map_info->type != BPF_MAP_TYPE_ARRAY ||
269 		    map_info->key_size != sizeof(int) ||
270 		    map_info->max_entries != 1 ||
271 		    !map_info->btf_value_type_id ||
272 		    !strstr(map_info->name, ".rodata")) {
273 			close(map_fd);
274 			continue;
275 		}
276 
277 		value = malloc(map_info->value_size);
278 		if (!value) {
279 			close(map_fd);
280 			goto free_map_ids;
281 		}
282 
283 		if (bpf_map_lookup_elem(map_fd, &key, value)) {
284 			close(map_fd);
285 			free(value);
286 			value = NULL;
287 			goto free_map_ids;
288 		}
289 
290 		close(map_fd);
291 		break;
292 	}
293 
294 free_map_ids:
295 	free(map_ids);
296 	return value;
297 }
298 
299 static bool has_metadata_prefix(const char *s)
300 {
301 	return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
302 }
303 
304 static void show_prog_metadata(int fd, __u32 num_maps)
305 {
306 	const struct btf_type *t_datasec, *t_var;
307 	struct bpf_map_info map_info;
308 	struct btf_var_secinfo *vsi;
309 	bool printed_header = false;
310 	unsigned int i, vlen;
311 	void *value = NULL;
312 	const char *name;
313 	struct btf *btf;
314 	int err;
315 
316 	if (!num_maps)
317 		return;
318 
319 	memset(&map_info, 0, sizeof(map_info));
320 	value = find_metadata(fd, &map_info);
321 	if (!value)
322 		return;
323 
324 	btf = btf__load_from_kernel_by_id(map_info.btf_id);
325 	if (libbpf_get_error(btf))
326 		goto out_free;
327 
328 	t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
329 	if (!btf_is_datasec(t_datasec))
330 		goto out_free;
331 
332 	vlen = btf_vlen(t_datasec);
333 	vsi = btf_var_secinfos(t_datasec);
334 
335 	/* We don't proceed to check the kinds of the elements of the DATASEC.
336 	 * The verifier enforces them to be BTF_KIND_VAR.
337 	 */
338 
339 	if (json_output) {
340 		struct btf_dumper d = {
341 			.btf = btf,
342 			.jw = json_wtr,
343 			.is_plain_text = false,
344 		};
345 
346 		for (i = 0; i < vlen; i++, vsi++) {
347 			t_var = btf__type_by_id(btf, vsi->type);
348 			name = btf__name_by_offset(btf, t_var->name_off);
349 
350 			if (!has_metadata_prefix(name))
351 				continue;
352 
353 			if (!printed_header) {
354 				jsonw_name(json_wtr, "metadata");
355 				jsonw_start_object(json_wtr);
356 				printed_header = true;
357 			}
358 
359 			jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
360 			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
361 			if (err) {
362 				p_err("btf dump failed: %d", err);
363 				break;
364 			}
365 		}
366 		if (printed_header)
367 			jsonw_end_object(json_wtr);
368 	} else {
369 		json_writer_t *btf_wtr;
370 		struct btf_dumper d = {
371 			.btf = btf,
372 			.is_plain_text = true,
373 		};
374 
375 		for (i = 0; i < vlen; i++, vsi++) {
376 			t_var = btf__type_by_id(btf, vsi->type);
377 			name = btf__name_by_offset(btf, t_var->name_off);
378 
379 			if (!has_metadata_prefix(name))
380 				continue;
381 
382 			if (!printed_header) {
383 				printf("\tmetadata:");
384 
385 				btf_wtr = jsonw_new(stdout);
386 				if (!btf_wtr) {
387 					p_err("jsonw alloc failed");
388 					goto out_free;
389 				}
390 				d.jw = btf_wtr,
391 
392 				printed_header = true;
393 			}
394 
395 			printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
396 
397 			jsonw_reset(btf_wtr);
398 			err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
399 			if (err) {
400 				p_err("btf dump failed: %d", err);
401 				break;
402 			}
403 		}
404 		if (printed_header)
405 			jsonw_destroy(&btf_wtr);
406 	}
407 
408 out_free:
409 	btf__free(btf);
410 	free(value);
411 }
412 
413 static void print_prog_header_json(struct bpf_prog_info *info, int fd)
414 {
415 	const char *prog_type_str;
416 	char prog_name[MAX_PROG_FULL_NAME];
417 
418 	jsonw_uint_field(json_wtr, "id", info->id);
419 	prog_type_str = libbpf_bpf_prog_type_str(info->type);
420 
421 	if (prog_type_str)
422 		jsonw_string_field(json_wtr, "type", prog_type_str);
423 	else
424 		jsonw_uint_field(json_wtr, "type", info->type);
425 
426 	if (*info->name) {
427 		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
428 		jsonw_string_field(json_wtr, "name", prog_name);
429 	}
430 
431 	jsonw_name(json_wtr, "tag");
432 	jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
433 		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
434 		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
435 
436 	jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
437 	if (info->run_time_ns) {
438 		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
439 		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
440 	}
441 	if (info->recursion_misses)
442 		jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
443 }
444 
445 static void print_prog_json(struct bpf_prog_info *info, int fd)
446 {
447 	char *memlock;
448 
449 	jsonw_start_object(json_wtr);
450 	print_prog_header_json(info, fd);
451 	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
452 
453 	if (info->load_time) {
454 		char buf[32];
455 
456 		print_boot_time(info->load_time, buf, sizeof(buf));
457 
458 		/* Piggy back on load_time, since 0 uid is a valid one */
459 		jsonw_name(json_wtr, "loaded_at");
460 		jsonw_printf(json_wtr, "%s", buf);
461 		jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
462 	}
463 
464 	jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
465 
466 	if (info->jited_prog_len) {
467 		jsonw_bool_field(json_wtr, "jited", true);
468 		jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
469 	} else {
470 		jsonw_bool_field(json_wtr, "jited", false);
471 	}
472 
473 	memlock = get_fdinfo(fd, "memlock");
474 	if (memlock)
475 		jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
476 	free(memlock);
477 
478 	if (info->nr_map_ids)
479 		show_prog_maps(fd, info->nr_map_ids);
480 
481 	if (info->btf_id)
482 		jsonw_int_field(json_wtr, "btf_id", info->btf_id);
483 
484 	if (!hashmap__empty(prog_table)) {
485 		struct hashmap_entry *entry;
486 
487 		jsonw_name(json_wtr, "pinned");
488 		jsonw_start_array(json_wtr);
489 		hashmap__for_each_key_entry(prog_table, entry,
490 					    u32_as_hash_field(info->id))
491 			jsonw_string(json_wtr, entry->value);
492 		jsonw_end_array(json_wtr);
493 	}
494 
495 	emit_obj_refs_json(refs_table, info->id, json_wtr);
496 
497 	show_prog_metadata(fd, info->nr_map_ids);
498 
499 	jsonw_end_object(json_wtr);
500 }
501 
502 static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
503 {
504 	const char *prog_type_str;
505 	char prog_name[MAX_PROG_FULL_NAME];
506 
507 	printf("%u: ", info->id);
508 	prog_type_str = libbpf_bpf_prog_type_str(info->type);
509 	if (prog_type_str)
510 		printf("%s  ", prog_type_str);
511 	else
512 		printf("type %u  ", info->type);
513 
514 	if (*info->name) {
515 		get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
516 		printf("name %s  ", prog_name);
517 	}
518 
519 	printf("tag ");
520 	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
521 	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
522 	printf("%s", info->gpl_compatible ? "  gpl" : "");
523 	if (info->run_time_ns)
524 		printf(" run_time_ns %lld run_cnt %lld",
525 		       info->run_time_ns, info->run_cnt);
526 	if (info->recursion_misses)
527 		printf(" recursion_misses %lld", info->recursion_misses);
528 	printf("\n");
529 }
530 
531 static void print_prog_plain(struct bpf_prog_info *info, int fd)
532 {
533 	char *memlock;
534 
535 	print_prog_header_plain(info, fd);
536 
537 	if (info->load_time) {
538 		char buf[32];
539 
540 		print_boot_time(info->load_time, buf, sizeof(buf));
541 
542 		/* Piggy back on load_time, since 0 uid is a valid one */
543 		printf("\tloaded_at %s  uid %u\n", buf, info->created_by_uid);
544 	}
545 
546 	printf("\txlated %uB", info->xlated_prog_len);
547 
548 	if (info->jited_prog_len)
549 		printf("  jited %uB", info->jited_prog_len);
550 	else
551 		printf("  not jited");
552 
553 	memlock = get_fdinfo(fd, "memlock");
554 	if (memlock)
555 		printf("  memlock %sB", memlock);
556 	free(memlock);
557 
558 	if (info->nr_map_ids)
559 		show_prog_maps(fd, info->nr_map_ids);
560 
561 	if (!hashmap__empty(prog_table)) {
562 		struct hashmap_entry *entry;
563 
564 		hashmap__for_each_key_entry(prog_table, entry,
565 					    u32_as_hash_field(info->id))
566 			printf("\n\tpinned %s", (char *)entry->value);
567 	}
568 
569 	if (info->btf_id)
570 		printf("\n\tbtf_id %d", info->btf_id);
571 
572 	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
573 
574 	printf("\n");
575 
576 	show_prog_metadata(fd, info->nr_map_ids);
577 }
578 
579 static int show_prog(int fd)
580 {
581 	struct bpf_prog_info info = {};
582 	__u32 len = sizeof(info);
583 	int err;
584 
585 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
586 	if (err) {
587 		p_err("can't get prog info: %s", strerror(errno));
588 		return -1;
589 	}
590 
591 	if (json_output)
592 		print_prog_json(&info, fd);
593 	else
594 		print_prog_plain(&info, fd);
595 
596 	return 0;
597 }
598 
599 static int do_show_subset(int argc, char **argv)
600 {
601 	int *fds = NULL;
602 	int nb_fds, i;
603 	int err = -1;
604 
605 	fds = malloc(sizeof(int));
606 	if (!fds) {
607 		p_err("mem alloc failed");
608 		return -1;
609 	}
610 	nb_fds = prog_parse_fds(&argc, &argv, &fds);
611 	if (nb_fds < 1)
612 		goto exit_free;
613 
614 	if (json_output && nb_fds > 1)
615 		jsonw_start_array(json_wtr);	/* root array */
616 	for (i = 0; i < nb_fds; i++) {
617 		err = show_prog(fds[i]);
618 		if (err) {
619 			for (; i < nb_fds; i++)
620 				close(fds[i]);
621 			break;
622 		}
623 		close(fds[i]);
624 	}
625 	if (json_output && nb_fds > 1)
626 		jsonw_end_array(json_wtr);	/* root array */
627 
628 exit_free:
629 	free(fds);
630 	return err;
631 }
632 
633 static int do_show(int argc, char **argv)
634 {
635 	__u32 id = 0;
636 	int err;
637 	int fd;
638 
639 	if (show_pinned) {
640 		prog_table = hashmap__new(hash_fn_for_key_as_id,
641 					  equal_fn_for_key_as_id, NULL);
642 		if (IS_ERR(prog_table)) {
643 			p_err("failed to create hashmap for pinned paths");
644 			return -1;
645 		}
646 		build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
647 	}
648 	build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
649 
650 	if (argc == 2)
651 		return do_show_subset(argc, argv);
652 
653 	if (argc)
654 		return BAD_ARG();
655 
656 	if (json_output)
657 		jsonw_start_array(json_wtr);
658 	while (true) {
659 		err = bpf_prog_get_next_id(id, &id);
660 		if (err) {
661 			if (errno == ENOENT) {
662 				err = 0;
663 				break;
664 			}
665 			p_err("can't get next program: %s%s", strerror(errno),
666 			      errno == EINVAL ? " -- kernel too old?" : "");
667 			err = -1;
668 			break;
669 		}
670 
671 		fd = bpf_prog_get_fd_by_id(id);
672 		if (fd < 0) {
673 			if (errno == ENOENT)
674 				continue;
675 			p_err("can't get prog by id (%u): %s",
676 			      id, strerror(errno));
677 			err = -1;
678 			break;
679 		}
680 
681 		err = show_prog(fd);
682 		close(fd);
683 		if (err)
684 			break;
685 	}
686 
687 	if (json_output)
688 		jsonw_end_array(json_wtr);
689 
690 	delete_obj_refs_table(refs_table);
691 
692 	if (show_pinned)
693 		delete_pinned_obj_table(prog_table);
694 
695 	return err;
696 }
697 
698 static int
699 prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
700 	  char *filepath, bool opcodes, bool visual, bool linum)
701 {
702 	struct bpf_prog_linfo *prog_linfo = NULL;
703 	const char *disasm_opt = NULL;
704 	struct dump_data dd = {};
705 	void *func_info = NULL;
706 	struct btf *btf = NULL;
707 	char func_sig[1024];
708 	unsigned char *buf;
709 	__u32 member_len;
710 	int fd, err = -1;
711 	ssize_t n;
712 
713 	if (mode == DUMP_JITED) {
714 		if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
715 			p_info("no instructions returned");
716 			return -1;
717 		}
718 		buf = u64_to_ptr(info->jited_prog_insns);
719 		member_len = info->jited_prog_len;
720 	} else {	/* DUMP_XLATED */
721 		if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
722 			p_err("error retrieving insn dump: kernel.kptr_restrict set?");
723 			return -1;
724 		}
725 		buf = u64_to_ptr(info->xlated_prog_insns);
726 		member_len = info->xlated_prog_len;
727 	}
728 
729 	if (info->btf_id) {
730 		btf = btf__load_from_kernel_by_id(info->btf_id);
731 		if (libbpf_get_error(btf)) {
732 			p_err("failed to get btf");
733 			return -1;
734 		}
735 	}
736 
737 	func_info = u64_to_ptr(info->func_info);
738 
739 	if (info->nr_line_info) {
740 		prog_linfo = bpf_prog_linfo__new(info);
741 		if (!prog_linfo)
742 			p_info("error in processing bpf_line_info.  continue without it.");
743 	}
744 
745 	if (filepath) {
746 		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
747 		if (fd < 0) {
748 			p_err("can't open file %s: %s", filepath,
749 			      strerror(errno));
750 			goto exit_free;
751 		}
752 
753 		n = write(fd, buf, member_len);
754 		close(fd);
755 		if (n != (ssize_t)member_len) {
756 			p_err("error writing output file: %s",
757 			      n < 0 ? strerror(errno) : "short write");
758 			goto exit_free;
759 		}
760 
761 		if (json_output)
762 			jsonw_null(json_wtr);
763 	} else if (mode == DUMP_JITED) {
764 		const char *name = NULL;
765 
766 		if (info->ifindex) {
767 			name = ifindex_to_arch(info->ifindex, info->netns_dev,
768 					       info->netns_ino, &disasm_opt);
769 			if (!name)
770 				goto exit_free;
771 		}
772 
773 		if (info->nr_jited_func_lens && info->jited_func_lens) {
774 			struct kernel_sym *sym = NULL;
775 			struct bpf_func_info *record;
776 			char sym_name[SYM_MAX_NAME];
777 			unsigned char *img = buf;
778 			__u64 *ksyms = NULL;
779 			__u32 *lens;
780 			__u32 i;
781 			if (info->nr_jited_ksyms) {
782 				kernel_syms_load(&dd);
783 				ksyms = u64_to_ptr(info->jited_ksyms);
784 			}
785 
786 			if (json_output)
787 				jsonw_start_array(json_wtr);
788 
789 			lens = u64_to_ptr(info->jited_func_lens);
790 			for (i = 0; i < info->nr_jited_func_lens; i++) {
791 				if (ksyms) {
792 					sym = kernel_syms_search(&dd, ksyms[i]);
793 					if (sym)
794 						sprintf(sym_name, "%s", sym->name);
795 					else
796 						sprintf(sym_name, "0x%016llx", ksyms[i]);
797 				} else {
798 					strcpy(sym_name, "unknown");
799 				}
800 
801 				if (func_info) {
802 					record = func_info + i * info->func_info_rec_size;
803 					btf_dumper_type_only(btf, record->type_id,
804 							     func_sig,
805 							     sizeof(func_sig));
806 				}
807 
808 				if (json_output) {
809 					jsonw_start_object(json_wtr);
810 					if (func_info && func_sig[0] != '\0') {
811 						jsonw_name(json_wtr, "proto");
812 						jsonw_string(json_wtr, func_sig);
813 					}
814 					jsonw_name(json_wtr, "name");
815 					jsonw_string(json_wtr, sym_name);
816 					jsonw_name(json_wtr, "insns");
817 				} else {
818 					if (func_info && func_sig[0] != '\0')
819 						printf("%s:\n", func_sig);
820 					printf("%s:\n", sym_name);
821 				}
822 
823 				if (disasm_print_insn(img, lens[i], opcodes,
824 						      name, disasm_opt, btf,
825 						      prog_linfo, ksyms[i], i,
826 						      linum))
827 					goto exit_free;
828 
829 				img += lens[i];
830 
831 				if (json_output)
832 					jsonw_end_object(json_wtr);
833 				else
834 					printf("\n");
835 			}
836 
837 			if (json_output)
838 				jsonw_end_array(json_wtr);
839 		} else {
840 			if (disasm_print_insn(buf, member_len, opcodes, name,
841 					      disasm_opt, btf, NULL, 0, 0,
842 					      false))
843 				goto exit_free;
844 		}
845 	} else if (visual) {
846 		if (json_output)
847 			jsonw_null(json_wtr);
848 		else
849 			dump_xlated_cfg(buf, member_len);
850 	} else {
851 		kernel_syms_load(&dd);
852 		dd.nr_jited_ksyms = info->nr_jited_ksyms;
853 		dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
854 		dd.btf = btf;
855 		dd.func_info = func_info;
856 		dd.finfo_rec_size = info->func_info_rec_size;
857 		dd.prog_linfo = prog_linfo;
858 
859 		if (json_output)
860 			dump_xlated_json(&dd, buf, member_len, opcodes,
861 					 linum);
862 		else
863 			dump_xlated_plain(&dd, buf, member_len, opcodes,
864 					  linum);
865 		kernel_syms_destroy(&dd);
866 	}
867 
868 	err = 0;
869 
870 exit_free:
871 	btf__free(btf);
872 	bpf_prog_linfo__free(prog_linfo);
873 	return err;
874 }
875 
876 static int do_dump(int argc, char **argv)
877 {
878 	struct bpf_prog_info info;
879 	__u32 info_len = sizeof(info);
880 	size_t info_data_sz = 0;
881 	void *info_data = NULL;
882 	char *filepath = NULL;
883 	bool opcodes = false;
884 	bool visual = false;
885 	enum dump_mode mode;
886 	bool linum = false;
887 	int nb_fds, i = 0;
888 	int *fds = NULL;
889 	int err = -1;
890 
891 	if (is_prefix(*argv, "jited")) {
892 		if (disasm_init())
893 			return -1;
894 		mode = DUMP_JITED;
895 	} else if (is_prefix(*argv, "xlated")) {
896 		mode = DUMP_XLATED;
897 	} else {
898 		p_err("expected 'xlated' or 'jited', got: %s", *argv);
899 		return -1;
900 	}
901 	NEXT_ARG();
902 
903 	if (argc < 2)
904 		usage();
905 
906 	fds = malloc(sizeof(int));
907 	if (!fds) {
908 		p_err("mem alloc failed");
909 		return -1;
910 	}
911 	nb_fds = prog_parse_fds(&argc, &argv, &fds);
912 	if (nb_fds < 1)
913 		goto exit_free;
914 
915 	if (is_prefix(*argv, "file")) {
916 		NEXT_ARG();
917 		if (!argc) {
918 			p_err("expected file path");
919 			goto exit_close;
920 		}
921 		if (nb_fds > 1) {
922 			p_err("several programs matched");
923 			goto exit_close;
924 		}
925 
926 		filepath = *argv;
927 		NEXT_ARG();
928 	} else if (is_prefix(*argv, "opcodes")) {
929 		opcodes = true;
930 		NEXT_ARG();
931 	} else if (is_prefix(*argv, "visual")) {
932 		if (nb_fds > 1) {
933 			p_err("several programs matched");
934 			goto exit_close;
935 		}
936 
937 		visual = true;
938 		NEXT_ARG();
939 	} else if (is_prefix(*argv, "linum")) {
940 		linum = true;
941 		NEXT_ARG();
942 	}
943 
944 	if (argc) {
945 		usage();
946 		goto exit_close;
947 	}
948 
949 	if (json_output && nb_fds > 1)
950 		jsonw_start_array(json_wtr);	/* root array */
951 	for (i = 0; i < nb_fds; i++) {
952 		memset(&info, 0, sizeof(info));
953 
954 		err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
955 		if (err) {
956 			p_err("can't get prog info: %s", strerror(errno));
957 			break;
958 		}
959 
960 		err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
961 		if (err) {
962 			p_err("can't grow prog info_data");
963 			break;
964 		}
965 
966 		err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
967 		if (err) {
968 			p_err("can't get prog info: %s", strerror(errno));
969 			break;
970 		}
971 
972 		if (json_output && nb_fds > 1) {
973 			jsonw_start_object(json_wtr);	/* prog object */
974 			print_prog_header_json(&info, fds[i]);
975 			jsonw_name(json_wtr, "insns");
976 		} else if (nb_fds > 1) {
977 			print_prog_header_plain(&info, fds[i]);
978 		}
979 
980 		err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
981 
982 		if (json_output && nb_fds > 1)
983 			jsonw_end_object(json_wtr);	/* prog object */
984 		else if (i != nb_fds - 1 && nb_fds > 1)
985 			printf("\n");
986 
987 		if (err)
988 			break;
989 		close(fds[i]);
990 	}
991 	if (json_output && nb_fds > 1)
992 		jsonw_end_array(json_wtr);	/* root array */
993 
994 exit_close:
995 	for (; i < nb_fds; i++)
996 		close(fds[i]);
997 exit_free:
998 	free(info_data);
999 	free(fds);
1000 	return err;
1001 }
1002 
1003 static int do_pin(int argc, char **argv)
1004 {
1005 	int err;
1006 
1007 	err = do_pin_any(argc, argv, prog_parse_fd);
1008 	if (!err && json_output)
1009 		jsonw_null(json_wtr);
1010 	return err;
1011 }
1012 
1013 struct map_replace {
1014 	int idx;
1015 	int fd;
1016 	char *name;
1017 };
1018 
1019 static int map_replace_compar(const void *p1, const void *p2)
1020 {
1021 	const struct map_replace *a = p1, *b = p2;
1022 
1023 	return a->idx - b->idx;
1024 }
1025 
1026 static int parse_attach_detach_args(int argc, char **argv, int *progfd,
1027 				    enum bpf_attach_type *attach_type,
1028 				    int *mapfd)
1029 {
1030 	if (!REQ_ARGS(3))
1031 		return -EINVAL;
1032 
1033 	*progfd = prog_parse_fd(&argc, &argv);
1034 	if (*progfd < 0)
1035 		return *progfd;
1036 
1037 	*attach_type = parse_attach_type(*argv);
1038 	if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
1039 		p_err("invalid attach/detach type");
1040 		return -EINVAL;
1041 	}
1042 
1043 	if (*attach_type == BPF_FLOW_DISSECTOR) {
1044 		*mapfd = 0;
1045 		return 0;
1046 	}
1047 
1048 	NEXT_ARG();
1049 	if (!REQ_ARGS(2))
1050 		return -EINVAL;
1051 
1052 	*mapfd = map_parse_fd(&argc, &argv);
1053 	if (*mapfd < 0)
1054 		return *mapfd;
1055 
1056 	return 0;
1057 }
1058 
1059 static int do_attach(int argc, char **argv)
1060 {
1061 	enum bpf_attach_type attach_type;
1062 	int err, progfd;
1063 	int mapfd;
1064 
1065 	err = parse_attach_detach_args(argc, argv,
1066 				       &progfd, &attach_type, &mapfd);
1067 	if (err)
1068 		return err;
1069 
1070 	err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
1071 	if (err) {
1072 		p_err("failed prog attach to map");
1073 		return -EINVAL;
1074 	}
1075 
1076 	if (json_output)
1077 		jsonw_null(json_wtr);
1078 	return 0;
1079 }
1080 
1081 static int do_detach(int argc, char **argv)
1082 {
1083 	enum bpf_attach_type attach_type;
1084 	int err, progfd;
1085 	int mapfd;
1086 
1087 	err = parse_attach_detach_args(argc, argv,
1088 				       &progfd, &attach_type, &mapfd);
1089 	if (err)
1090 		return err;
1091 
1092 	err = bpf_prog_detach2(progfd, mapfd, attach_type);
1093 	if (err) {
1094 		p_err("failed prog detach from map");
1095 		return -EINVAL;
1096 	}
1097 
1098 	if (json_output)
1099 		jsonw_null(json_wtr);
1100 	return 0;
1101 }
1102 
1103 static int check_single_stdin(char *file_data_in, char *file_ctx_in)
1104 {
1105 	if (file_data_in && file_ctx_in &&
1106 	    !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
1107 		p_err("cannot use standard input for both data_in and ctx_in");
1108 		return -1;
1109 	}
1110 
1111 	return 0;
1112 }
1113 
1114 static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
1115 {
1116 	size_t block_size = 256;
1117 	size_t buf_size = block_size;
1118 	size_t nb_read = 0;
1119 	void *tmp;
1120 	FILE *f;
1121 
1122 	if (!fname) {
1123 		*data_ptr = NULL;
1124 		*size = 0;
1125 		return 0;
1126 	}
1127 
1128 	if (!strcmp(fname, "-"))
1129 		f = stdin;
1130 	else
1131 		f = fopen(fname, "r");
1132 	if (!f) {
1133 		p_err("failed to open %s: %s", fname, strerror(errno));
1134 		return -1;
1135 	}
1136 
1137 	*data_ptr = malloc(block_size);
1138 	if (!*data_ptr) {
1139 		p_err("failed to allocate memory for data_in/ctx_in: %s",
1140 		      strerror(errno));
1141 		goto err_fclose;
1142 	}
1143 
1144 	while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
1145 		if (feof(f))
1146 			break;
1147 		if (ferror(f)) {
1148 			p_err("failed to read data_in/ctx_in from %s: %s",
1149 			      fname, strerror(errno));
1150 			goto err_free;
1151 		}
1152 		if (nb_read > buf_size - block_size) {
1153 			if (buf_size == UINT32_MAX) {
1154 				p_err("data_in/ctx_in is too long (max: %d)",
1155 				      UINT32_MAX);
1156 				goto err_free;
1157 			}
1158 			/* No space for fread()-ing next chunk; realloc() */
1159 			buf_size *= 2;
1160 			tmp = realloc(*data_ptr, buf_size);
1161 			if (!tmp) {
1162 				p_err("failed to reallocate data_in/ctx_in: %s",
1163 				      strerror(errno));
1164 				goto err_free;
1165 			}
1166 			*data_ptr = tmp;
1167 		}
1168 	}
1169 	if (f != stdin)
1170 		fclose(f);
1171 
1172 	*size = nb_read;
1173 	return 0;
1174 
1175 err_free:
1176 	free(*data_ptr);
1177 	*data_ptr = NULL;
1178 err_fclose:
1179 	if (f != stdin)
1180 		fclose(f);
1181 	return -1;
1182 }
1183 
1184 static void hex_print(void *data, unsigned int size, FILE *f)
1185 {
1186 	size_t i, j;
1187 	char c;
1188 
1189 	for (i = 0; i < size; i += 16) {
1190 		/* Row offset */
1191 		fprintf(f, "%07zx\t", i);
1192 
1193 		/* Hexadecimal values */
1194 		for (j = i; j < i + 16 && j < size; j++)
1195 			fprintf(f, "%02x%s", *(uint8_t *)(data + j),
1196 				j % 2 ? " " : "");
1197 		for (; j < i + 16; j++)
1198 			fprintf(f, "  %s", j % 2 ? " " : "");
1199 
1200 		/* ASCII values (if relevant), '.' otherwise */
1201 		fprintf(f, "| ");
1202 		for (j = i; j < i + 16 && j < size; j++) {
1203 			c = *(char *)(data + j);
1204 			if (c < ' ' || c > '~')
1205 				c = '.';
1206 			fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
1207 		}
1208 
1209 		fprintf(f, "\n");
1210 	}
1211 }
1212 
1213 static int
1214 print_run_output(void *data, unsigned int size, const char *fname,
1215 		 const char *json_key)
1216 {
1217 	size_t nb_written;
1218 	FILE *f;
1219 
1220 	if (!fname)
1221 		return 0;
1222 
1223 	if (!strcmp(fname, "-")) {
1224 		f = stdout;
1225 		if (json_output) {
1226 			jsonw_name(json_wtr, json_key);
1227 			print_data_json(data, size);
1228 		} else {
1229 			hex_print(data, size, f);
1230 		}
1231 		return 0;
1232 	}
1233 
1234 	f = fopen(fname, "w");
1235 	if (!f) {
1236 		p_err("failed to open %s: %s", fname, strerror(errno));
1237 		return -1;
1238 	}
1239 
1240 	nb_written = fwrite(data, 1, size, f);
1241 	fclose(f);
1242 	if (nb_written != size) {
1243 		p_err("failed to write output data/ctx: %s", strerror(errno));
1244 		return -1;
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 static int alloc_run_data(void **data_ptr, unsigned int size_out)
1251 {
1252 	*data_ptr = calloc(size_out, 1);
1253 	if (!*data_ptr) {
1254 		p_err("failed to allocate memory for output data/ctx: %s",
1255 		      strerror(errno));
1256 		return -1;
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static int do_run(int argc, char **argv)
1263 {
1264 	char *data_fname_in = NULL, *data_fname_out = NULL;
1265 	char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
1266 	const unsigned int default_size = SZ_32K;
1267 	void *data_in = NULL, *data_out = NULL;
1268 	void *ctx_in = NULL, *ctx_out = NULL;
1269 	unsigned int repeat = 1;
1270 	int fd, err;
1271 	LIBBPF_OPTS(bpf_test_run_opts, test_attr);
1272 
1273 	if (!REQ_ARGS(4))
1274 		return -1;
1275 
1276 	fd = prog_parse_fd(&argc, &argv);
1277 	if (fd < 0)
1278 		return -1;
1279 
1280 	while (argc) {
1281 		if (detect_common_prefix(*argv, "data_in", "data_out",
1282 					 "data_size_out", NULL))
1283 			return -1;
1284 		if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
1285 					 "ctx_size_out", NULL))
1286 			return -1;
1287 
1288 		if (is_prefix(*argv, "data_in")) {
1289 			NEXT_ARG();
1290 			if (!REQ_ARGS(1))
1291 				return -1;
1292 
1293 			data_fname_in = GET_ARG();
1294 			if (check_single_stdin(data_fname_in, ctx_fname_in))
1295 				return -1;
1296 		} else if (is_prefix(*argv, "data_out")) {
1297 			NEXT_ARG();
1298 			if (!REQ_ARGS(1))
1299 				return -1;
1300 
1301 			data_fname_out = GET_ARG();
1302 		} else if (is_prefix(*argv, "data_size_out")) {
1303 			char *endptr;
1304 
1305 			NEXT_ARG();
1306 			if (!REQ_ARGS(1))
1307 				return -1;
1308 
1309 			test_attr.data_size_out = strtoul(*argv, &endptr, 0);
1310 			if (*endptr) {
1311 				p_err("can't parse %s as output data size",
1312 				      *argv);
1313 				return -1;
1314 			}
1315 			NEXT_ARG();
1316 		} else if (is_prefix(*argv, "ctx_in")) {
1317 			NEXT_ARG();
1318 			if (!REQ_ARGS(1))
1319 				return -1;
1320 
1321 			ctx_fname_in = GET_ARG();
1322 			if (check_single_stdin(data_fname_in, ctx_fname_in))
1323 				return -1;
1324 		} else if (is_prefix(*argv, "ctx_out")) {
1325 			NEXT_ARG();
1326 			if (!REQ_ARGS(1))
1327 				return -1;
1328 
1329 			ctx_fname_out = GET_ARG();
1330 		} else if (is_prefix(*argv, "ctx_size_out")) {
1331 			char *endptr;
1332 
1333 			NEXT_ARG();
1334 			if (!REQ_ARGS(1))
1335 				return -1;
1336 
1337 			test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
1338 			if (*endptr) {
1339 				p_err("can't parse %s as output context size",
1340 				      *argv);
1341 				return -1;
1342 			}
1343 			NEXT_ARG();
1344 		} else if (is_prefix(*argv, "repeat")) {
1345 			char *endptr;
1346 
1347 			NEXT_ARG();
1348 			if (!REQ_ARGS(1))
1349 				return -1;
1350 
1351 			repeat = strtoul(*argv, &endptr, 0);
1352 			if (*endptr) {
1353 				p_err("can't parse %s as repeat number",
1354 				      *argv);
1355 				return -1;
1356 			}
1357 			NEXT_ARG();
1358 		} else {
1359 			p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1360 			      *argv);
1361 			return -1;
1362 		}
1363 	}
1364 
1365 	err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
1366 	if (err)
1367 		return -1;
1368 
1369 	if (data_in) {
1370 		if (!test_attr.data_size_out)
1371 			test_attr.data_size_out = default_size;
1372 		err = alloc_run_data(&data_out, test_attr.data_size_out);
1373 		if (err)
1374 			goto free_data_in;
1375 	}
1376 
1377 	err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
1378 	if (err)
1379 		goto free_data_out;
1380 
1381 	if (ctx_in) {
1382 		if (!test_attr.ctx_size_out)
1383 			test_attr.ctx_size_out = default_size;
1384 		err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
1385 		if (err)
1386 			goto free_ctx_in;
1387 	}
1388 
1389 	test_attr.repeat	= repeat;
1390 	test_attr.data_in	= data_in;
1391 	test_attr.data_out	= data_out;
1392 	test_attr.ctx_in	= ctx_in;
1393 	test_attr.ctx_out	= ctx_out;
1394 
1395 	err = bpf_prog_test_run_opts(fd, &test_attr);
1396 	if (err) {
1397 		p_err("failed to run program: %s", strerror(errno));
1398 		goto free_ctx_out;
1399 	}
1400 
1401 	err = 0;
1402 
1403 	if (json_output)
1404 		jsonw_start_object(json_wtr);	/* root */
1405 
1406 	/* Do not exit on errors occurring when printing output data/context,
1407 	 * we still want to print return value and duration for program run.
1408 	 */
1409 	if (test_attr.data_size_out)
1410 		err += print_run_output(test_attr.data_out,
1411 					test_attr.data_size_out,
1412 					data_fname_out, "data_out");
1413 	if (test_attr.ctx_size_out)
1414 		err += print_run_output(test_attr.ctx_out,
1415 					test_attr.ctx_size_out,
1416 					ctx_fname_out, "ctx_out");
1417 
1418 	if (json_output) {
1419 		jsonw_uint_field(json_wtr, "retval", test_attr.retval);
1420 		jsonw_uint_field(json_wtr, "duration", test_attr.duration);
1421 		jsonw_end_object(json_wtr);	/* root */
1422 	} else {
1423 		fprintf(stdout, "Return value: %u, duration%s: %uns\n",
1424 			test_attr.retval,
1425 			repeat > 1 ? " (average)" : "", test_attr.duration);
1426 	}
1427 
1428 free_ctx_out:
1429 	free(ctx_out);
1430 free_ctx_in:
1431 	free(ctx_in);
1432 free_data_out:
1433 	free(data_out);
1434 free_data_in:
1435 	free(data_in);
1436 
1437 	return err;
1438 }
1439 
1440 static int
1441 get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
1442 		      enum bpf_attach_type *expected_attach_type)
1443 {
1444 	libbpf_print_fn_t print_backup;
1445 	int ret;
1446 
1447 	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1448 	if (!ret)
1449 		return ret;
1450 
1451 	/* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1452 	print_backup = libbpf_set_print(print_all_levels);
1453 	ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
1454 	libbpf_set_print(print_backup);
1455 
1456 	return ret;
1457 }
1458 
1459 static int
1460 auto_attach_program(struct bpf_program *prog, const char *path)
1461 {
1462 	struct bpf_link *link;
1463 	int err;
1464 
1465 	link = bpf_program__attach(prog);
1466 	if (!link) {
1467 		p_info("Program %s does not support autoattach, falling back to pinning",
1468 		       bpf_program__name(prog));
1469 		return bpf_obj_pin(bpf_program__fd(prog), path);
1470 	}
1471 
1472 	err = bpf_link__pin(link, path);
1473 	bpf_link__destroy(link);
1474 	return err;
1475 }
1476 
1477 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
1478 {
1479 	int len;
1480 
1481 	len = snprintf(buf, buf_sz, "%s/%s", path, name);
1482 	if (len < 0)
1483 		return -EINVAL;
1484 	if ((size_t)len >= buf_sz)
1485 		return -ENAMETOOLONG;
1486 
1487 	return 0;
1488 }
1489 
1490 static int
1491 auto_attach_programs(struct bpf_object *obj, const char *path)
1492 {
1493 	struct bpf_program *prog;
1494 	char buf[PATH_MAX];
1495 	int err;
1496 
1497 	bpf_object__for_each_program(prog, obj) {
1498 		err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
1499 		if (err)
1500 			goto err_unpin_programs;
1501 
1502 		err = auto_attach_program(prog, buf);
1503 		if (err)
1504 			goto err_unpin_programs;
1505 	}
1506 
1507 	return 0;
1508 
1509 err_unpin_programs:
1510 	while ((prog = bpf_object__prev_program(obj, prog))) {
1511 		if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
1512 			continue;
1513 
1514 		bpf_program__unpin(prog, buf);
1515 	}
1516 
1517 	return err;
1518 }
1519 
1520 static int load_with_options(int argc, char **argv, bool first_prog_only)
1521 {
1522 	enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
1523 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
1524 		.relaxed_maps = relaxed_maps,
1525 	);
1526 	enum bpf_attach_type expected_attach_type;
1527 	struct map_replace *map_replace = NULL;
1528 	struct bpf_program *prog = NULL, *pos;
1529 	unsigned int old_map_fds = 0;
1530 	const char *pinmaps = NULL;
1531 	bool auto_attach = false;
1532 	struct bpf_object *obj;
1533 	struct bpf_map *map;
1534 	const char *pinfile;
1535 	unsigned int i, j;
1536 	__u32 ifindex = 0;
1537 	const char *file;
1538 	int idx, err;
1539 
1540 
1541 	if (!REQ_ARGS(2))
1542 		return -1;
1543 	file = GET_ARG();
1544 	pinfile = GET_ARG();
1545 
1546 	while (argc) {
1547 		if (is_prefix(*argv, "type")) {
1548 			NEXT_ARG();
1549 
1550 			if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
1551 				p_err("program type already specified");
1552 				goto err_free_reuse_maps;
1553 			}
1554 			if (!REQ_ARGS(1))
1555 				goto err_free_reuse_maps;
1556 
1557 			err = libbpf_prog_type_by_name(*argv, &common_prog_type,
1558 						       &expected_attach_type);
1559 			if (err < 0) {
1560 				/* Put a '/' at the end of type to appease libbpf */
1561 				char *type = malloc(strlen(*argv) + 2);
1562 
1563 				if (!type) {
1564 					p_err("mem alloc failed");
1565 					goto err_free_reuse_maps;
1566 				}
1567 				*type = 0;
1568 				strcat(type, *argv);
1569 				strcat(type, "/");
1570 
1571 				err = get_prog_type_by_name(type, &common_prog_type,
1572 							    &expected_attach_type);
1573 				free(type);
1574 				if (err < 0)
1575 					goto err_free_reuse_maps;
1576 			}
1577 
1578 			NEXT_ARG();
1579 		} else if (is_prefix(*argv, "map")) {
1580 			void *new_map_replace;
1581 			char *endptr, *name;
1582 			int fd;
1583 
1584 			NEXT_ARG();
1585 
1586 			if (!REQ_ARGS(4))
1587 				goto err_free_reuse_maps;
1588 
1589 			if (is_prefix(*argv, "idx")) {
1590 				NEXT_ARG();
1591 
1592 				idx = strtoul(*argv, &endptr, 0);
1593 				if (*endptr) {
1594 					p_err("can't parse %s as IDX", *argv);
1595 					goto err_free_reuse_maps;
1596 				}
1597 				name = NULL;
1598 			} else if (is_prefix(*argv, "name")) {
1599 				NEXT_ARG();
1600 
1601 				name = *argv;
1602 				idx = -1;
1603 			} else {
1604 				p_err("expected 'idx' or 'name', got: '%s'?",
1605 				      *argv);
1606 				goto err_free_reuse_maps;
1607 			}
1608 			NEXT_ARG();
1609 
1610 			fd = map_parse_fd(&argc, &argv);
1611 			if (fd < 0)
1612 				goto err_free_reuse_maps;
1613 
1614 			new_map_replace = libbpf_reallocarray(map_replace,
1615 							      old_map_fds + 1,
1616 							      sizeof(*map_replace));
1617 			if (!new_map_replace) {
1618 				p_err("mem alloc failed");
1619 				goto err_free_reuse_maps;
1620 			}
1621 			map_replace = new_map_replace;
1622 
1623 			map_replace[old_map_fds].idx = idx;
1624 			map_replace[old_map_fds].name = name;
1625 			map_replace[old_map_fds].fd = fd;
1626 			old_map_fds++;
1627 		} else if (is_prefix(*argv, "dev")) {
1628 			NEXT_ARG();
1629 
1630 			if (ifindex) {
1631 				p_err("offload device already specified");
1632 				goto err_free_reuse_maps;
1633 			}
1634 			if (!REQ_ARGS(1))
1635 				goto err_free_reuse_maps;
1636 
1637 			ifindex = if_nametoindex(*argv);
1638 			if (!ifindex) {
1639 				p_err("unrecognized netdevice '%s': %s",
1640 				      *argv, strerror(errno));
1641 				goto err_free_reuse_maps;
1642 			}
1643 			NEXT_ARG();
1644 		} else if (is_prefix(*argv, "pinmaps")) {
1645 			NEXT_ARG();
1646 
1647 			if (!REQ_ARGS(1))
1648 				goto err_free_reuse_maps;
1649 
1650 			pinmaps = GET_ARG();
1651 		} else if (is_prefix(*argv, "autoattach")) {
1652 			auto_attach = true;
1653 			NEXT_ARG();
1654 		} else {
1655 			p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1656 			      *argv);
1657 			goto err_free_reuse_maps;
1658 		}
1659 	}
1660 
1661 	set_max_rlimit();
1662 
1663 	if (verifier_logs)
1664 		/* log_level1 + log_level2 + stats, but not stable UAPI */
1665 		open_opts.kernel_log_level = 1 + 2 + 4;
1666 
1667 	obj = bpf_object__open_file(file, &open_opts);
1668 	if (libbpf_get_error(obj)) {
1669 		p_err("failed to open object file");
1670 		goto err_free_reuse_maps;
1671 	}
1672 
1673 	bpf_object__for_each_program(pos, obj) {
1674 		enum bpf_prog_type prog_type = common_prog_type;
1675 
1676 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
1677 			const char *sec_name = bpf_program__section_name(pos);
1678 
1679 			err = get_prog_type_by_name(sec_name, &prog_type,
1680 						    &expected_attach_type);
1681 			if (err < 0)
1682 				goto err_close_obj;
1683 		}
1684 
1685 		bpf_program__set_ifindex(pos, ifindex);
1686 		bpf_program__set_type(pos, prog_type);
1687 		bpf_program__set_expected_attach_type(pos, expected_attach_type);
1688 	}
1689 
1690 	qsort(map_replace, old_map_fds, sizeof(*map_replace),
1691 	      map_replace_compar);
1692 
1693 	/* After the sort maps by name will be first on the list, because they
1694 	 * have idx == -1.  Resolve them.
1695 	 */
1696 	j = 0;
1697 	while (j < old_map_fds && map_replace[j].name) {
1698 		i = 0;
1699 		bpf_object__for_each_map(map, obj) {
1700 			if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
1701 				map_replace[j].idx = i;
1702 				break;
1703 			}
1704 			i++;
1705 		}
1706 		if (map_replace[j].idx == -1) {
1707 			p_err("unable to find map '%s'", map_replace[j].name);
1708 			goto err_close_obj;
1709 		}
1710 		j++;
1711 	}
1712 	/* Resort if any names were resolved */
1713 	if (j)
1714 		qsort(map_replace, old_map_fds, sizeof(*map_replace),
1715 		      map_replace_compar);
1716 
1717 	/* Set ifindex and name reuse */
1718 	j = 0;
1719 	idx = 0;
1720 	bpf_object__for_each_map(map, obj) {
1721 		if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1722 			bpf_map__set_ifindex(map, ifindex);
1723 
1724 		if (j < old_map_fds && idx == map_replace[j].idx) {
1725 			err = bpf_map__reuse_fd(map, map_replace[j++].fd);
1726 			if (err) {
1727 				p_err("unable to set up map reuse: %d", err);
1728 				goto err_close_obj;
1729 			}
1730 
1731 			/* Next reuse wants to apply to the same map */
1732 			if (j < old_map_fds && map_replace[j].idx == idx) {
1733 				p_err("replacement for map idx %d specified more than once",
1734 				      idx);
1735 				goto err_close_obj;
1736 			}
1737 		}
1738 
1739 		idx++;
1740 	}
1741 	if (j < old_map_fds) {
1742 		p_err("map idx '%d' not used", map_replace[j].idx);
1743 		goto err_close_obj;
1744 	}
1745 
1746 	err = bpf_object__load(obj);
1747 	if (err) {
1748 		p_err("failed to load object file");
1749 		goto err_close_obj;
1750 	}
1751 
1752 	err = mount_bpffs_for_pin(pinfile);
1753 	if (err)
1754 		goto err_close_obj;
1755 
1756 	if (first_prog_only) {
1757 		prog = bpf_object__next_program(obj, NULL);
1758 		if (!prog) {
1759 			p_err("object file doesn't contain any bpf program");
1760 			goto err_close_obj;
1761 		}
1762 
1763 		if (auto_attach)
1764 			err = auto_attach_program(prog, pinfile);
1765 		else
1766 			err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
1767 		if (err) {
1768 			p_err("failed to pin program %s",
1769 			      bpf_program__section_name(prog));
1770 			goto err_close_obj;
1771 		}
1772 	} else {
1773 		if (auto_attach)
1774 			err = auto_attach_programs(obj, pinfile);
1775 		else
1776 			err = bpf_object__pin_programs(obj, pinfile);
1777 		if (err) {
1778 			p_err("failed to pin all programs");
1779 			goto err_close_obj;
1780 		}
1781 	}
1782 
1783 	if (pinmaps) {
1784 		err = bpf_object__pin_maps(obj, pinmaps);
1785 		if (err) {
1786 			p_err("failed to pin all maps");
1787 			goto err_unpin;
1788 		}
1789 	}
1790 
1791 	if (json_output)
1792 		jsonw_null(json_wtr);
1793 
1794 	bpf_object__close(obj);
1795 	for (i = 0; i < old_map_fds; i++)
1796 		close(map_replace[i].fd);
1797 	free(map_replace);
1798 
1799 	return 0;
1800 
1801 err_unpin:
1802 	if (first_prog_only)
1803 		unlink(pinfile);
1804 	else
1805 		bpf_object__unpin_programs(obj, pinfile);
1806 err_close_obj:
1807 	if (!legacy_libbpf) {
1808 		p_info("Warning: bpftool is now running in libbpf strict mode and has more stringent requirements about BPF programs.\n"
1809 		       "If it used to work for this object file but now doesn't, see --legacy option for more details.\n");
1810 	}
1811 
1812 	bpf_object__close(obj);
1813 err_free_reuse_maps:
1814 	for (i = 0; i < old_map_fds; i++)
1815 		close(map_replace[i].fd);
1816 	free(map_replace);
1817 	return -1;
1818 }
1819 
1820 static int count_open_fds(void)
1821 {
1822 	DIR *dp = opendir("/proc/self/fd");
1823 	struct dirent *de;
1824 	int cnt = -3;
1825 
1826 	if (!dp)
1827 		return -1;
1828 
1829 	while ((de = readdir(dp)))
1830 		cnt++;
1831 
1832 	closedir(dp);
1833 	return cnt;
1834 }
1835 
1836 static int try_loader(struct gen_loader_opts *gen)
1837 {
1838 	struct bpf_load_and_run_opts opts = {};
1839 	struct bpf_loader_ctx *ctx;
1840 	int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
1841 					     sizeof(struct bpf_prog_desc));
1842 	int log_buf_sz = (1u << 24) - 1;
1843 	int err, fds_before, fd_delta;
1844 	char *log_buf = NULL;
1845 
1846 	ctx = alloca(ctx_sz);
1847 	memset(ctx, 0, ctx_sz);
1848 	ctx->sz = ctx_sz;
1849 	if (verifier_logs) {
1850 		ctx->log_level = 1 + 2 + 4;
1851 		ctx->log_size = log_buf_sz;
1852 		log_buf = malloc(log_buf_sz);
1853 		if (!log_buf)
1854 			return -ENOMEM;
1855 		ctx->log_buf = (long) log_buf;
1856 	}
1857 	opts.ctx = ctx;
1858 	opts.data = gen->data;
1859 	opts.data_sz = gen->data_sz;
1860 	opts.insns = gen->insns;
1861 	opts.insns_sz = gen->insns_sz;
1862 	fds_before = count_open_fds();
1863 	err = bpf_load_and_run(&opts);
1864 	fd_delta = count_open_fds() - fds_before;
1865 	if (err < 0 || verifier_logs) {
1866 		fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
1867 		if (fd_delta && err < 0)
1868 			fprintf(stderr, "loader prog leaked %d FDs\n",
1869 				fd_delta);
1870 	}
1871 	free(log_buf);
1872 	return err;
1873 }
1874 
1875 static int do_loader(int argc, char **argv)
1876 {
1877 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
1878 	DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
1879 	struct bpf_object *obj;
1880 	const char *file;
1881 	int err = 0;
1882 
1883 	if (!REQ_ARGS(1))
1884 		return -1;
1885 	file = GET_ARG();
1886 
1887 	if (verifier_logs)
1888 		/* log_level1 + log_level2 + stats, but not stable UAPI */
1889 		open_opts.kernel_log_level = 1 + 2 + 4;
1890 
1891 	obj = bpf_object__open_file(file, &open_opts);
1892 	if (libbpf_get_error(obj)) {
1893 		p_err("failed to open object file");
1894 		goto err_close_obj;
1895 	}
1896 
1897 	err = bpf_object__gen_loader(obj, &gen);
1898 	if (err)
1899 		goto err_close_obj;
1900 
1901 	err = bpf_object__load(obj);
1902 	if (err) {
1903 		p_err("failed to load object file");
1904 		goto err_close_obj;
1905 	}
1906 
1907 	if (verifier_logs) {
1908 		struct dump_data dd = {};
1909 
1910 		kernel_syms_load(&dd);
1911 		dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
1912 		kernel_syms_destroy(&dd);
1913 	}
1914 	err = try_loader(&gen);
1915 err_close_obj:
1916 	bpf_object__close(obj);
1917 	return err;
1918 }
1919 
1920 static int do_load(int argc, char **argv)
1921 {
1922 	if (use_loader)
1923 		return do_loader(argc, argv);
1924 	return load_with_options(argc, argv, true);
1925 }
1926 
1927 static int do_loadall(int argc, char **argv)
1928 {
1929 	return load_with_options(argc, argv, false);
1930 }
1931 
1932 #ifdef BPFTOOL_WITHOUT_SKELETONS
1933 
1934 static int do_profile(int argc, char **argv)
1935 {
1936 	p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1937 	return 0;
1938 }
1939 
1940 #else /* BPFTOOL_WITHOUT_SKELETONS */
1941 
1942 #include "profiler.skel.h"
1943 
1944 struct profile_metric {
1945 	const char *name;
1946 	struct bpf_perf_event_value val;
1947 	struct perf_event_attr attr;
1948 	bool selected;
1949 
1950 	/* calculate ratios like instructions per cycle */
1951 	const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
1952 	const char *ratio_desc;
1953 	const float ratio_mul;
1954 } metrics[] = {
1955 	{
1956 		.name = "cycles",
1957 		.attr = {
1958 			.type = PERF_TYPE_HARDWARE,
1959 			.config = PERF_COUNT_HW_CPU_CYCLES,
1960 			.exclude_user = 1,
1961 		},
1962 	},
1963 	{
1964 		.name = "instructions",
1965 		.attr = {
1966 			.type = PERF_TYPE_HARDWARE,
1967 			.config = PERF_COUNT_HW_INSTRUCTIONS,
1968 			.exclude_user = 1,
1969 		},
1970 		.ratio_metric = 1,
1971 		.ratio_desc = "insns per cycle",
1972 		.ratio_mul = 1.0,
1973 	},
1974 	{
1975 		.name = "l1d_loads",
1976 		.attr = {
1977 			.type = PERF_TYPE_HW_CACHE,
1978 			.config =
1979 				PERF_COUNT_HW_CACHE_L1D |
1980 				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1981 				(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
1982 			.exclude_user = 1,
1983 		},
1984 	},
1985 	{
1986 		.name = "llc_misses",
1987 		.attr = {
1988 			.type = PERF_TYPE_HW_CACHE,
1989 			.config =
1990 				PERF_COUNT_HW_CACHE_LL |
1991 				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
1992 				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
1993 			.exclude_user = 1
1994 		},
1995 		.ratio_metric = 2,
1996 		.ratio_desc = "LLC misses per million insns",
1997 		.ratio_mul = 1e6,
1998 	},
1999 	{
2000 		.name = "itlb_misses",
2001 		.attr = {
2002 			.type = PERF_TYPE_HW_CACHE,
2003 			.config =
2004 				PERF_COUNT_HW_CACHE_ITLB |
2005 				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2006 				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2007 			.exclude_user = 1
2008 		},
2009 		.ratio_metric = 2,
2010 		.ratio_desc = "itlb misses per million insns",
2011 		.ratio_mul = 1e6,
2012 	},
2013 	{
2014 		.name = "dtlb_misses",
2015 		.attr = {
2016 			.type = PERF_TYPE_HW_CACHE,
2017 			.config =
2018 				PERF_COUNT_HW_CACHE_DTLB |
2019 				(PERF_COUNT_HW_CACHE_OP_READ << 8) |
2020 				(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
2021 			.exclude_user = 1
2022 		},
2023 		.ratio_metric = 2,
2024 		.ratio_desc = "dtlb misses per million insns",
2025 		.ratio_mul = 1e6,
2026 	},
2027 };
2028 
2029 static __u64 profile_total_count;
2030 
2031 #define MAX_NUM_PROFILE_METRICS 4
2032 
2033 static int profile_parse_metrics(int argc, char **argv)
2034 {
2035 	unsigned int metric_cnt;
2036 	int selected_cnt = 0;
2037 	unsigned int i;
2038 
2039 	metric_cnt = ARRAY_SIZE(metrics);
2040 
2041 	while (argc > 0) {
2042 		for (i = 0; i < metric_cnt; i++) {
2043 			if (is_prefix(argv[0], metrics[i].name)) {
2044 				if (!metrics[i].selected)
2045 					selected_cnt++;
2046 				metrics[i].selected = true;
2047 				break;
2048 			}
2049 		}
2050 		if (i == metric_cnt) {
2051 			p_err("unknown metric %s", argv[0]);
2052 			return -1;
2053 		}
2054 		NEXT_ARG();
2055 	}
2056 	if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
2057 		p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
2058 		      selected_cnt, MAX_NUM_PROFILE_METRICS);
2059 		return -1;
2060 	}
2061 	return selected_cnt;
2062 }
2063 
2064 static void profile_read_values(struct profiler_bpf *obj)
2065 {
2066 	__u32 m, cpu, num_cpu = obj->rodata->num_cpu;
2067 	int reading_map_fd, count_map_fd;
2068 	__u64 counts[num_cpu];
2069 	__u32 key = 0;
2070 	int err;
2071 
2072 	reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
2073 	count_map_fd = bpf_map__fd(obj->maps.counts);
2074 	if (reading_map_fd < 0 || count_map_fd < 0) {
2075 		p_err("failed to get fd for map");
2076 		return;
2077 	}
2078 
2079 	err = bpf_map_lookup_elem(count_map_fd, &key, counts);
2080 	if (err) {
2081 		p_err("failed to read count_map: %s", strerror(errno));
2082 		return;
2083 	}
2084 
2085 	profile_total_count = 0;
2086 	for (cpu = 0; cpu < num_cpu; cpu++)
2087 		profile_total_count += counts[cpu];
2088 
2089 	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2090 		struct bpf_perf_event_value values[num_cpu];
2091 
2092 		if (!metrics[m].selected)
2093 			continue;
2094 
2095 		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
2096 		if (err) {
2097 			p_err("failed to read reading_map: %s",
2098 			      strerror(errno));
2099 			return;
2100 		}
2101 		for (cpu = 0; cpu < num_cpu; cpu++) {
2102 			metrics[m].val.counter += values[cpu].counter;
2103 			metrics[m].val.enabled += values[cpu].enabled;
2104 			metrics[m].val.running += values[cpu].running;
2105 		}
2106 		key++;
2107 	}
2108 }
2109 
2110 static void profile_print_readings_json(void)
2111 {
2112 	__u32 m;
2113 
2114 	jsonw_start_array(json_wtr);
2115 	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2116 		if (!metrics[m].selected)
2117 			continue;
2118 		jsonw_start_object(json_wtr);
2119 		jsonw_string_field(json_wtr, "metric", metrics[m].name);
2120 		jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
2121 		jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
2122 		jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
2123 		jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
2124 
2125 		jsonw_end_object(json_wtr);
2126 	}
2127 	jsonw_end_array(json_wtr);
2128 }
2129 
2130 static void profile_print_readings_plain(void)
2131 {
2132 	__u32 m;
2133 
2134 	printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
2135 	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2136 		struct bpf_perf_event_value *val = &metrics[m].val;
2137 		int r;
2138 
2139 		if (!metrics[m].selected)
2140 			continue;
2141 		printf("%18llu %-20s", val->counter, metrics[m].name);
2142 
2143 		r = metrics[m].ratio_metric - 1;
2144 		if (r >= 0 && metrics[r].selected &&
2145 		    metrics[r].val.counter > 0) {
2146 			printf("# %8.2f %-30s",
2147 			       val->counter * metrics[m].ratio_mul /
2148 			       metrics[r].val.counter,
2149 			       metrics[m].ratio_desc);
2150 		} else {
2151 			printf("%-41s", "");
2152 		}
2153 
2154 		if (val->enabled > val->running)
2155 			printf("(%4.2f%%)",
2156 			       val->running * 100.0 / val->enabled);
2157 		printf("\n");
2158 	}
2159 }
2160 
2161 static void profile_print_readings(void)
2162 {
2163 	if (json_output)
2164 		profile_print_readings_json();
2165 	else
2166 		profile_print_readings_plain();
2167 }
2168 
2169 static char *profile_target_name(int tgt_fd)
2170 {
2171 	struct bpf_func_info func_info;
2172 	struct bpf_prog_info info = {};
2173 	__u32 info_len = sizeof(info);
2174 	const struct btf_type *t;
2175 	__u32 func_info_rec_size;
2176 	struct btf *btf = NULL;
2177 	char *name = NULL;
2178 	int err;
2179 
2180 	err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
2181 	if (err) {
2182 		p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd);
2183 		goto out;
2184 	}
2185 
2186 	if (info.btf_id == 0) {
2187 		p_err("prog FD %d doesn't have valid btf", tgt_fd);
2188 		goto out;
2189 	}
2190 
2191 	func_info_rec_size = info.func_info_rec_size;
2192 	if (info.nr_func_info == 0) {
2193 		p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd);
2194 		goto out;
2195 	}
2196 
2197 	memset(&info, 0, sizeof(info));
2198 	info.nr_func_info = 1;
2199 	info.func_info_rec_size = func_info_rec_size;
2200 	info.func_info = ptr_to_u64(&func_info);
2201 
2202 	err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
2203 	if (err) {
2204 		p_err("failed to get func_info for prog FD %d", tgt_fd);
2205 		goto out;
2206 	}
2207 
2208 	btf = btf__load_from_kernel_by_id(info.btf_id);
2209 	if (libbpf_get_error(btf)) {
2210 		p_err("failed to load btf for prog FD %d", tgt_fd);
2211 		goto out;
2212 	}
2213 
2214 	t = btf__type_by_id(btf, func_info.type_id);
2215 	if (!t) {
2216 		p_err("btf %d doesn't have type %d",
2217 		      info.btf_id, func_info.type_id);
2218 		goto out;
2219 	}
2220 	name = strdup(btf__name_by_offset(btf, t->name_off));
2221 out:
2222 	btf__free(btf);
2223 	return name;
2224 }
2225 
2226 static struct profiler_bpf *profile_obj;
2227 static int profile_tgt_fd = -1;
2228 static char *profile_tgt_name;
2229 static int *profile_perf_events;
2230 static int profile_perf_event_cnt;
2231 
2232 static void profile_close_perf_events(struct profiler_bpf *obj)
2233 {
2234 	int i;
2235 
2236 	for (i = profile_perf_event_cnt - 1; i >= 0; i--)
2237 		close(profile_perf_events[i]);
2238 
2239 	free(profile_perf_events);
2240 	profile_perf_event_cnt = 0;
2241 }
2242 
2243 static int profile_open_perf_events(struct profiler_bpf *obj)
2244 {
2245 	unsigned int cpu, m;
2246 	int map_fd, pmu_fd;
2247 
2248 	profile_perf_events = calloc(
2249 		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
2250 	if (!profile_perf_events) {
2251 		p_err("failed to allocate memory for perf_event array: %s",
2252 		      strerror(errno));
2253 		return -1;
2254 	}
2255 	map_fd = bpf_map__fd(obj->maps.events);
2256 	if (map_fd < 0) {
2257 		p_err("failed to get fd for events map");
2258 		return -1;
2259 	}
2260 
2261 	for (m = 0; m < ARRAY_SIZE(metrics); m++) {
2262 		if (!metrics[m].selected)
2263 			continue;
2264 		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
2265 			pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
2266 					 -1/*pid*/, cpu, -1/*group_fd*/, 0);
2267 			if (pmu_fd < 0 ||
2268 			    bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
2269 						&pmu_fd, BPF_ANY) ||
2270 			    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
2271 				p_err("failed to create event %s on cpu %d",
2272 				      metrics[m].name, cpu);
2273 				return -1;
2274 			}
2275 			profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
2276 		}
2277 	}
2278 	return 0;
2279 }
2280 
2281 static void profile_print_and_cleanup(void)
2282 {
2283 	profile_close_perf_events(profile_obj);
2284 	profile_read_values(profile_obj);
2285 	profile_print_readings();
2286 	profiler_bpf__destroy(profile_obj);
2287 
2288 	close(profile_tgt_fd);
2289 	free(profile_tgt_name);
2290 }
2291 
2292 static void int_exit(int signo)
2293 {
2294 	profile_print_and_cleanup();
2295 	exit(0);
2296 }
2297 
2298 static int do_profile(int argc, char **argv)
2299 {
2300 	int num_metric, num_cpu, err = -1;
2301 	struct bpf_program *prog;
2302 	unsigned long duration;
2303 	char *endptr;
2304 
2305 	/* we at least need two args for the prog and one metric */
2306 	if (!REQ_ARGS(3))
2307 		return -EINVAL;
2308 
2309 	/* parse target fd */
2310 	profile_tgt_fd = prog_parse_fd(&argc, &argv);
2311 	if (profile_tgt_fd < 0) {
2312 		p_err("failed to parse fd");
2313 		return -1;
2314 	}
2315 
2316 	/* parse profiling optional duration */
2317 	if (argc > 2 && is_prefix(argv[0], "duration")) {
2318 		NEXT_ARG();
2319 		duration = strtoul(*argv, &endptr, 0);
2320 		if (*endptr)
2321 			usage();
2322 		NEXT_ARG();
2323 	} else {
2324 		duration = UINT_MAX;
2325 	}
2326 
2327 	num_metric = profile_parse_metrics(argc, argv);
2328 	if (num_metric <= 0)
2329 		goto out;
2330 
2331 	num_cpu = libbpf_num_possible_cpus();
2332 	if (num_cpu <= 0) {
2333 		p_err("failed to identify number of CPUs");
2334 		goto out;
2335 	}
2336 
2337 	profile_obj = profiler_bpf__open();
2338 	if (!profile_obj) {
2339 		p_err("failed to open and/or load BPF object");
2340 		goto out;
2341 	}
2342 
2343 	profile_obj->rodata->num_cpu = num_cpu;
2344 	profile_obj->rodata->num_metric = num_metric;
2345 
2346 	/* adjust map sizes */
2347 	bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
2348 	bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
2349 	bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
2350 	bpf_map__set_max_entries(profile_obj->maps.counts, 1);
2351 
2352 	/* change target name */
2353 	profile_tgt_name = profile_target_name(profile_tgt_fd);
2354 	if (!profile_tgt_name)
2355 		goto out;
2356 
2357 	bpf_object__for_each_program(prog, profile_obj->obj) {
2358 		err = bpf_program__set_attach_target(prog, profile_tgt_fd,
2359 						     profile_tgt_name);
2360 		if (err) {
2361 			p_err("failed to set attach target\n");
2362 			goto out;
2363 		}
2364 	}
2365 
2366 	set_max_rlimit();
2367 	err = profiler_bpf__load(profile_obj);
2368 	if (err) {
2369 		p_err("failed to load profile_obj");
2370 		goto out;
2371 	}
2372 
2373 	err = profile_open_perf_events(profile_obj);
2374 	if (err)
2375 		goto out;
2376 
2377 	err = profiler_bpf__attach(profile_obj);
2378 	if (err) {
2379 		p_err("failed to attach profile_obj");
2380 		goto out;
2381 	}
2382 	signal(SIGINT, int_exit);
2383 
2384 	sleep(duration);
2385 	profile_print_and_cleanup();
2386 	return 0;
2387 
2388 out:
2389 	profile_close_perf_events(profile_obj);
2390 	if (profile_obj)
2391 		profiler_bpf__destroy(profile_obj);
2392 	close(profile_tgt_fd);
2393 	free(profile_tgt_name);
2394 	return err;
2395 }
2396 
2397 #endif /* BPFTOOL_WITHOUT_SKELETONS */
2398 
2399 static int do_help(int argc, char **argv)
2400 {
2401 	if (json_output) {
2402 		jsonw_null(json_wtr);
2403 		return 0;
2404 	}
2405 
2406 	fprintf(stderr,
2407 		"Usage: %1$s %2$s { show | list } [PROG]\n"
2408 		"       %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
2409 		"       %1$s %2$s dump jited  PROG [{ file FILE | opcodes | linum }]\n"
2410 		"       %1$s %2$s pin   PROG FILE\n"
2411 		"       %1$s %2$s { load | loadall } OBJ  PATH \\\n"
2412 		"                         [type TYPE] [dev NAME] \\\n"
2413 		"                         [map { idx IDX | name NAME } MAP]\\\n"
2414 		"                         [pinmaps MAP_DIR]\n"
2415 		"                         [autoattach]\n"
2416 		"       %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
2417 		"       %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
2418 		"       %1$s %2$s run PROG \\\n"
2419 		"                         data_in FILE \\\n"
2420 		"                         [data_out FILE [data_size_out L]] \\\n"
2421 		"                         [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
2422 		"                         [repeat N]\n"
2423 		"       %1$s %2$s profile PROG [duration DURATION] METRICs\n"
2424 		"       %1$s %2$s tracelog\n"
2425 		"       %1$s %2$s help\n"
2426 		"\n"
2427 		"       " HELP_SPEC_MAP "\n"
2428 		"       " HELP_SPEC_PROGRAM "\n"
2429 		"       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
2430 		"                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
2431 		"                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
2432 		"                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
2433 		"                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
2434 		"                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
2435 		"                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
2436 		"                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
2437 		"                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
2438 		"                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
2439 		"                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
2440 		"                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
2441 		"       ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
2442 		"                        sk_skb_stream_parser | flow_dissector }\n"
2443 		"       METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
2444 		"       " HELP_SPEC_OPTIONS " |\n"
2445 		"                    {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
2446 		"                    {-L|--use-loader} }\n"
2447 		"",
2448 		bin_name, argv[-2]);
2449 
2450 	return 0;
2451 }
2452 
2453 static const struct cmd cmds[] = {
2454 	{ "show",	do_show },
2455 	{ "list",	do_show },
2456 	{ "help",	do_help },
2457 	{ "dump",	do_dump },
2458 	{ "pin",	do_pin },
2459 	{ "load",	do_load },
2460 	{ "loadall",	do_loadall },
2461 	{ "attach",	do_attach },
2462 	{ "detach",	do_detach },
2463 	{ "tracelog",	do_tracelog },
2464 	{ "run",	do_run },
2465 	{ "profile",	do_profile },
2466 	{ 0 }
2467 };
2468 
2469 int do_prog(int argc, char **argv)
2470 {
2471 	return cmd_select(cmds, argc, argv, do_help);
2472 }
2473