xref: /openbmc/linux/tools/perf/util/bpf-event.c (revision a44e4f3a)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <bpf/bpf.h>
5 #include <bpf/btf.h>
6 #include <bpf/libbpf.h>
7 #include <linux/btf.h>
8 #include <linux/err.h>
9 #include "bpf-event.h"
10 #include "debug.h"
11 #include "dso.h"
12 #include "symbol.h"
13 #include "machine.h"
14 #include "env.h"
15 #include "session.h"
16 #include "map.h"
17 #include "evlist.h"
18 #include "record.h"
19 #include "util/synthetic-events.h"
20 
21 #define ptr_to_u64(ptr)    ((__u64)(unsigned long)(ptr))
22 
23 static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
24 {
25 	int ret = 0;
26 	size_t i;
27 
28 	for (i = 0; i < len; i++)
29 		ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
30 	return ret;
31 }
32 
33 static int machine__process_bpf_event_load(struct machine *machine,
34 					   union perf_event *event,
35 					   struct perf_sample *sample __maybe_unused)
36 {
37 	struct bpf_prog_info_linear *info_linear;
38 	struct bpf_prog_info_node *info_node;
39 	struct perf_env *env = machine->env;
40 	int id = event->bpf.id;
41 	unsigned int i;
42 
43 	/* perf-record, no need to handle bpf-event */
44 	if (env == NULL)
45 		return 0;
46 
47 	info_node = perf_env__find_bpf_prog_info(env, id);
48 	if (!info_node)
49 		return 0;
50 	info_linear = info_node->info_linear;
51 
52 	for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
53 		u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
54 		u64 addr = addrs[i];
55 		struct map *map;
56 
57 		map = map_groups__find(&machine->kmaps, addr);
58 
59 		if (map) {
60 			map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
61 			map->dso->bpf_prog.id = id;
62 			map->dso->bpf_prog.sub_id = i;
63 			map->dso->bpf_prog.env = env;
64 		}
65 	}
66 	return 0;
67 }
68 
69 int machine__process_bpf(struct machine *machine, union perf_event *event,
70 			 struct perf_sample *sample)
71 {
72 	if (dump_trace)
73 		perf_event__fprintf_bpf(event, stdout);
74 
75 	switch (event->bpf.type) {
76 	case PERF_BPF_EVENT_PROG_LOAD:
77 		return machine__process_bpf_event_load(machine, event, sample);
78 
79 	case PERF_BPF_EVENT_PROG_UNLOAD:
80 		/*
81 		 * Do not free bpf_prog_info and btf of the program here,
82 		 * as annotation still need them. They will be freed at
83 		 * the end of the session.
84 		 */
85 		break;
86 	default:
87 		pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
88 		break;
89 	}
90 	return 0;
91 }
92 
93 static int perf_env__fetch_btf(struct perf_env *env,
94 			       u32 btf_id,
95 			       struct btf *btf)
96 {
97 	struct btf_node *node;
98 	u32 data_size;
99 	const void *data;
100 
101 	data = btf__get_raw_data(btf, &data_size);
102 
103 	node = malloc(data_size + sizeof(struct btf_node));
104 	if (!node)
105 		return -1;
106 
107 	node->id = btf_id;
108 	node->data_size = data_size;
109 	memcpy(node->data, data, data_size);
110 
111 	perf_env__insert_btf(env, node);
112 	return 0;
113 }
114 
115 static int synthesize_bpf_prog_name(char *buf, int size,
116 				    struct bpf_prog_info *info,
117 				    struct btf *btf,
118 				    u32 sub_id)
119 {
120 	u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
121 	void *func_infos = (void *)(uintptr_t)(info->func_info);
122 	u32 sub_prog_cnt = info->nr_jited_ksyms;
123 	const struct bpf_func_info *finfo;
124 	const char *short_name = NULL;
125 	const struct btf_type *t;
126 	int name_len;
127 
128 	name_len = snprintf(buf, size, "bpf_prog_");
129 	name_len += snprintf_hex(buf + name_len, size - name_len,
130 				 prog_tags[sub_id], BPF_TAG_SIZE);
131 	if (btf) {
132 		finfo = func_infos + sub_id * info->func_info_rec_size;
133 		t = btf__type_by_id(btf, finfo->type_id);
134 		short_name = btf__name_by_offset(btf, t->name_off);
135 	} else if (sub_id == 0 && sub_prog_cnt == 1) {
136 		/* no subprog */
137 		if (info->name[0])
138 			short_name = info->name;
139 	} else
140 		short_name = "F";
141 	if (short_name)
142 		name_len += snprintf(buf + name_len, size - name_len,
143 				     "_%s", short_name);
144 	return name_len;
145 }
146 
147 /*
148  * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
149  * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
150  * one PERF_RECORD_KSYMBOL is generated for each sub program.
151  *
152  * Returns:
153  *    0 for success;
154  *   -1 for failures;
155  *   -2 for lack of kernel support.
156  */
157 static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
158 					       perf_event__handler_t process,
159 					       struct machine *machine,
160 					       int fd,
161 					       union perf_event *event,
162 					       struct record_opts *opts)
163 {
164 	struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
165 	struct perf_record_bpf_event *bpf_event = &event->bpf;
166 	struct bpf_prog_info_linear *info_linear;
167 	struct perf_tool *tool = session->tool;
168 	struct bpf_prog_info_node *info_node;
169 	struct bpf_prog_info *info;
170 	struct btf *btf = NULL;
171 	struct perf_env *env;
172 	u32 sub_prog_cnt, i;
173 	int err = 0;
174 	u64 arrays;
175 
176 	/*
177 	 * for perf-record and perf-report use header.env;
178 	 * otherwise, use global perf_env.
179 	 */
180 	env = session->data ? &session->header.env : &perf_env;
181 
182 	arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
183 	arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
184 	arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
185 	arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
186 	arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
187 	arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
188 	arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
189 
190 	info_linear = bpf_program__get_prog_info_linear(fd, arrays);
191 	if (IS_ERR_OR_NULL(info_linear)) {
192 		info_linear = NULL;
193 		pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
194 		return -1;
195 	}
196 
197 	if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
198 		pr_debug("%s: the kernel is too old, aborting\n", __func__);
199 		return -2;
200 	}
201 
202 	info = &info_linear->info;
203 
204 	/* number of ksyms, func_lengths, and tags should match */
205 	sub_prog_cnt = info->nr_jited_ksyms;
206 	if (sub_prog_cnt != info->nr_prog_tags ||
207 	    sub_prog_cnt != info->nr_jited_func_lens)
208 		return -1;
209 
210 	/* check BTF func info support */
211 	if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
212 		/* btf func info number should be same as sub_prog_cnt */
213 		if (sub_prog_cnt != info->nr_func_info) {
214 			pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
215 			err = -1;
216 			goto out;
217 		}
218 		if (btf__get_from_id(info->btf_id, &btf)) {
219 			pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
220 			err = -1;
221 			btf = NULL;
222 			goto out;
223 		}
224 		perf_env__fetch_btf(env, info->btf_id, btf);
225 	}
226 
227 	/* Synthesize PERF_RECORD_KSYMBOL */
228 	for (i = 0; i < sub_prog_cnt; i++) {
229 		__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
230 		__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
231 		int name_len;
232 
233 		*ksymbol_event = (struct perf_record_ksymbol) {
234 			.header = {
235 				.type = PERF_RECORD_KSYMBOL,
236 				.size = offsetof(struct perf_record_ksymbol, name),
237 			},
238 			.addr = prog_addrs[i],
239 			.len = prog_lens[i],
240 			.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
241 			.flags = 0,
242 		};
243 
244 		name_len = synthesize_bpf_prog_name(ksymbol_event->name,
245 						    KSYM_NAME_LEN, info, btf, i);
246 		ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
247 							 sizeof(u64));
248 
249 		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
250 		event->header.size += machine->id_hdr_size;
251 		err = perf_tool__process_synth_event(tool, event,
252 						     machine, process);
253 	}
254 
255 	if (!opts->no_bpf_event) {
256 		/* Synthesize PERF_RECORD_BPF_EVENT */
257 		*bpf_event = (struct perf_record_bpf_event) {
258 			.header = {
259 				.type = PERF_RECORD_BPF_EVENT,
260 				.size = sizeof(struct perf_record_bpf_event),
261 			},
262 			.type = PERF_BPF_EVENT_PROG_LOAD,
263 			.flags = 0,
264 			.id = info->id,
265 		};
266 		memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
267 		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
268 		event->header.size += machine->id_hdr_size;
269 
270 		/* save bpf_prog_info to env */
271 		info_node = malloc(sizeof(struct bpf_prog_info_node));
272 		if (!info_node) {
273 			err = -1;
274 			goto out;
275 		}
276 
277 		info_node->info_linear = info_linear;
278 		perf_env__insert_bpf_prog_info(env, info_node);
279 		info_linear = NULL;
280 
281 		/*
282 		 * process after saving bpf_prog_info to env, so that
283 		 * required information is ready for look up
284 		 */
285 		err = perf_tool__process_synth_event(tool, event,
286 						     machine, process);
287 	}
288 
289 out:
290 	free(info_linear);
291 	free(btf);
292 	return err ? -1 : 0;
293 }
294 
295 int perf_event__synthesize_bpf_events(struct perf_session *session,
296 				      perf_event__handler_t process,
297 				      struct machine *machine,
298 				      struct record_opts *opts)
299 {
300 	union perf_event *event;
301 	__u32 id = 0;
302 	int err;
303 	int fd;
304 
305 	event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
306 	if (!event)
307 		return -1;
308 	while (true) {
309 		err = bpf_prog_get_next_id(id, &id);
310 		if (err) {
311 			if (errno == ENOENT) {
312 				err = 0;
313 				break;
314 			}
315 			pr_debug("%s: can't get next program: %s%s\n",
316 				 __func__, strerror(errno),
317 				 errno == EINVAL ? " -- kernel too old?" : "");
318 			/* don't report error on old kernel or EPERM  */
319 			err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
320 			break;
321 		}
322 		fd = bpf_prog_get_fd_by_id(id);
323 		if (fd < 0) {
324 			pr_debug("%s: failed to get fd for prog_id %u\n",
325 				 __func__, id);
326 			continue;
327 		}
328 
329 		err = perf_event__synthesize_one_bpf_prog(session, process,
330 							  machine, fd,
331 							  event, opts);
332 		close(fd);
333 		if (err) {
334 			/* do not return error for old kernel */
335 			if (err == -2)
336 				err = 0;
337 			break;
338 		}
339 	}
340 	free(event);
341 	return err;
342 }
343 
344 static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
345 {
346 	struct bpf_prog_info_linear *info_linear;
347 	struct bpf_prog_info_node *info_node;
348 	struct btf *btf = NULL;
349 	u64 arrays;
350 	u32 btf_id;
351 	int fd;
352 
353 	fd = bpf_prog_get_fd_by_id(id);
354 	if (fd < 0)
355 		return;
356 
357 	arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
358 	arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
359 	arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
360 	arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
361 	arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
362 	arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
363 	arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
364 
365 	info_linear = bpf_program__get_prog_info_linear(fd, arrays);
366 	if (IS_ERR_OR_NULL(info_linear)) {
367 		pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
368 		goto out;
369 	}
370 
371 	btf_id = info_linear->info.btf_id;
372 
373 	info_node = malloc(sizeof(struct bpf_prog_info_node));
374 	if (info_node) {
375 		info_node->info_linear = info_linear;
376 		perf_env__insert_bpf_prog_info(env, info_node);
377 	} else
378 		free(info_linear);
379 
380 	if (btf_id == 0)
381 		goto out;
382 
383 	if (btf__get_from_id(btf_id, &btf)) {
384 		pr_debug("%s: failed to get BTF of id %u, aborting\n",
385 			 __func__, btf_id);
386 		goto out;
387 	}
388 	perf_env__fetch_btf(env, btf_id, btf);
389 
390 out:
391 	free(btf);
392 	close(fd);
393 }
394 
395 static int bpf_event__sb_cb(union perf_event *event, void *data)
396 {
397 	struct perf_env *env = data;
398 
399 	if (event->header.type != PERF_RECORD_BPF_EVENT)
400 		return -1;
401 
402 	switch (event->bpf.type) {
403 	case PERF_BPF_EVENT_PROG_LOAD:
404 		perf_env__add_bpf_info(env, event->bpf.id);
405 
406 	case PERF_BPF_EVENT_PROG_UNLOAD:
407 		/*
408 		 * Do not free bpf_prog_info and btf of the program here,
409 		 * as annotation still need them. They will be freed at
410 		 * the end of the session.
411 		 */
412 		break;
413 	default:
414 		pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
415 		break;
416 	}
417 
418 	return 0;
419 }
420 
421 int bpf_event__add_sb_event(struct evlist **evlist,
422 			    struct perf_env *env)
423 {
424 	struct perf_event_attr attr = {
425 		.type	          = PERF_TYPE_SOFTWARE,
426 		.config           = PERF_COUNT_SW_DUMMY,
427 		.sample_id_all    = 1,
428 		.watermark        = 1,
429 		.bpf_event        = 1,
430 		.size	   = sizeof(attr), /* to capture ABI version */
431 	};
432 
433 	/*
434 	 * Older gcc versions don't support designated initializers, like above,
435 	 * for unnamed union members, such as the following:
436 	 */
437 	attr.wakeup_watermark = 1;
438 
439 	return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
440 }
441 
442 void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
443 				    struct perf_env *env,
444 				    FILE *fp)
445 {
446 	__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
447 	__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
448 	char name[KSYM_NAME_LEN];
449 	struct btf *btf = NULL;
450 	u32 sub_prog_cnt, i;
451 
452 	sub_prog_cnt = info->nr_jited_ksyms;
453 	if (sub_prog_cnt != info->nr_prog_tags ||
454 	    sub_prog_cnt != info->nr_jited_func_lens)
455 		return;
456 
457 	if (info->btf_id) {
458 		struct btf_node *node;
459 
460 		node = perf_env__find_btf(env, info->btf_id);
461 		if (node)
462 			btf = btf__new((__u8 *)(node->data),
463 				       node->data_size);
464 	}
465 
466 	if (sub_prog_cnt == 1) {
467 		synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
468 		fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
469 			info->id, name, prog_addrs[0], prog_lens[0]);
470 		return;
471 	}
472 
473 	fprintf(fp, "# bpf_prog_info %u:\n", info->id);
474 	for (i = 0; i < sub_prog_cnt; i++) {
475 		synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
476 
477 		fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
478 			i, name, prog_addrs[i], prog_lens[i]);
479 	}
480 }
481