xref: /openbmc/linux/tools/perf/util/bpf_counter.c (revision df202b452fe6c6d6f1351bad485e2367ef1e644e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2019 Facebook */
4 
5 #include <assert.h>
6 #include <limits.h>
7 #include <unistd.h>
8 #include <sys/file.h>
9 #include <sys/time.h>
10 #include <linux/err.h>
11 #include <linux/zalloc.h>
12 #include <api/fs/fs.h>
13 #include <perf/bpf_perf.h>
14 
15 #include "bpf_counter.h"
16 #include "bpf-utils.h"
17 #include "counts.h"
18 #include "debug.h"
19 #include "evsel.h"
20 #include "evlist.h"
21 #include "target.h"
22 #include "cgroup.h"
23 #include "cpumap.h"
24 #include "thread_map.h"
25 
26 #include "bpf_skel/bpf_prog_profiler.skel.h"
27 #include "bpf_skel/bperf_u.h"
28 #include "bpf_skel/bperf_leader.skel.h"
29 #include "bpf_skel/bperf_follower.skel.h"
30 
31 #define ATTR_MAP_SIZE 16
32 
33 static inline void *u64_to_ptr(__u64 ptr)
34 {
35 	return (void *)(unsigned long)ptr;
36 }
37 
38 static struct bpf_counter *bpf_counter_alloc(void)
39 {
40 	struct bpf_counter *counter;
41 
42 	counter = zalloc(sizeof(*counter));
43 	if (counter)
44 		INIT_LIST_HEAD(&counter->list);
45 	return counter;
46 }
47 
48 static int bpf_program_profiler__destroy(struct evsel *evsel)
49 {
50 	struct bpf_counter *counter, *tmp;
51 
52 	list_for_each_entry_safe(counter, tmp,
53 				 &evsel->bpf_counter_list, list) {
54 		list_del_init(&counter->list);
55 		bpf_prog_profiler_bpf__destroy(counter->skel);
56 		free(counter);
57 	}
58 	assert(list_empty(&evsel->bpf_counter_list));
59 
60 	return 0;
61 }
62 
63 static char *bpf_target_prog_name(int tgt_fd)
64 {
65 	struct bpf_func_info *func_info;
66 	struct perf_bpil *info_linear;
67 	const struct btf_type *t;
68 	struct btf *btf = NULL;
69 	char *name = NULL;
70 
71 	info_linear = get_bpf_prog_info_linear(tgt_fd, 1UL << PERF_BPIL_FUNC_INFO);
72 	if (IS_ERR_OR_NULL(info_linear)) {
73 		pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
74 		return NULL;
75 	}
76 
77 	if (info_linear->info.btf_id == 0) {
78 		pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
79 		goto out;
80 	}
81 
82 	btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
83 	if (libbpf_get_error(btf)) {
84 		pr_debug("failed to load btf for prog FD %d\n", tgt_fd);
85 		goto out;
86 	}
87 
88 	func_info = u64_to_ptr(info_linear->info.func_info);
89 	t = btf__type_by_id(btf, func_info[0].type_id);
90 	if (!t) {
91 		pr_debug("btf %d doesn't have type %d\n",
92 			 info_linear->info.btf_id, func_info[0].type_id);
93 		goto out;
94 	}
95 	name = strdup(btf__name_by_offset(btf, t->name_off));
96 out:
97 	btf__free(btf);
98 	free(info_linear);
99 	return name;
100 }
101 
102 static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
103 {
104 	struct bpf_prog_profiler_bpf *skel;
105 	struct bpf_counter *counter;
106 	struct bpf_program *prog;
107 	char *prog_name;
108 	int prog_fd;
109 	int err;
110 
111 	prog_fd = bpf_prog_get_fd_by_id(prog_id);
112 	if (prog_fd < 0) {
113 		pr_err("Failed to open fd for bpf prog %u\n", prog_id);
114 		return -1;
115 	}
116 	counter = bpf_counter_alloc();
117 	if (!counter) {
118 		close(prog_fd);
119 		return -1;
120 	}
121 
122 	skel = bpf_prog_profiler_bpf__open();
123 	if (!skel) {
124 		pr_err("Failed to open bpf skeleton\n");
125 		goto err_out;
126 	}
127 
128 	skel->rodata->num_cpu = evsel__nr_cpus(evsel);
129 
130 	bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
131 	bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
132 	bpf_map__set_max_entries(skel->maps.accum_readings, 1);
133 
134 	prog_name = bpf_target_prog_name(prog_fd);
135 	if (!prog_name) {
136 		pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
137 		goto err_out;
138 	}
139 
140 	bpf_object__for_each_program(prog, skel->obj) {
141 		err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
142 		if (err) {
143 			pr_err("bpf_program__set_attach_target failed.\n"
144 			       "Does bpf prog %u have BTF?\n", prog_id);
145 			goto err_out;
146 		}
147 	}
148 	set_max_rlimit();
149 	err = bpf_prog_profiler_bpf__load(skel);
150 	if (err) {
151 		pr_err("bpf_prog_profiler_bpf__load failed\n");
152 		goto err_out;
153 	}
154 
155 	assert(skel != NULL);
156 	counter->skel = skel;
157 	list_add(&counter->list, &evsel->bpf_counter_list);
158 	close(prog_fd);
159 	return 0;
160 err_out:
161 	bpf_prog_profiler_bpf__destroy(skel);
162 	free(counter);
163 	close(prog_fd);
164 	return -1;
165 }
166 
167 static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
168 {
169 	char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
170 	u32 prog_id;
171 	int ret;
172 
173 	bpf_str_ = bpf_str = strdup(target->bpf_str);
174 	if (!bpf_str)
175 		return -1;
176 
177 	while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
178 		prog_id = strtoul(tok, &p, 10);
179 		if (prog_id == 0 || prog_id == UINT_MAX ||
180 		    (*p != '\0' && *p != ',')) {
181 			pr_err("Failed to parse bpf prog ids %s\n",
182 			       target->bpf_str);
183 			return -1;
184 		}
185 
186 		ret = bpf_program_profiler_load_one(evsel, prog_id);
187 		if (ret) {
188 			bpf_program_profiler__destroy(evsel);
189 			free(bpf_str_);
190 			return -1;
191 		}
192 		bpf_str = NULL;
193 	}
194 	free(bpf_str_);
195 	return 0;
196 }
197 
198 static int bpf_program_profiler__enable(struct evsel *evsel)
199 {
200 	struct bpf_counter *counter;
201 	int ret;
202 
203 	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
204 		assert(counter->skel != NULL);
205 		ret = bpf_prog_profiler_bpf__attach(counter->skel);
206 		if (ret) {
207 			bpf_program_profiler__destroy(evsel);
208 			return ret;
209 		}
210 	}
211 	return 0;
212 }
213 
214 static int bpf_program_profiler__disable(struct evsel *evsel)
215 {
216 	struct bpf_counter *counter;
217 
218 	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
219 		assert(counter->skel != NULL);
220 		bpf_prog_profiler_bpf__detach(counter->skel);
221 	}
222 	return 0;
223 }
224 
225 static int bpf_program_profiler__read(struct evsel *evsel)
226 {
227 	// BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
228 	// Sometimes possible > online, like on a Ryzen 3900X that has 24
229 	// threads but its possible showed 0-31 -acme
230 	int num_cpu_bpf = libbpf_num_possible_cpus();
231 	struct bpf_perf_event_value values[num_cpu_bpf];
232 	struct bpf_counter *counter;
233 	struct perf_counts_values *counts;
234 	int reading_map_fd;
235 	__u32 key = 0;
236 	int err, idx, bpf_cpu;
237 
238 	if (list_empty(&evsel->bpf_counter_list))
239 		return -EAGAIN;
240 
241 	perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
242 		counts = perf_counts(evsel->counts, idx, 0);
243 		counts->val = 0;
244 		counts->ena = 0;
245 		counts->run = 0;
246 	}
247 	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
248 		struct bpf_prog_profiler_bpf *skel = counter->skel;
249 
250 		assert(skel != NULL);
251 		reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
252 
253 		err = bpf_map_lookup_elem(reading_map_fd, &key, values);
254 		if (err) {
255 			pr_err("failed to read value\n");
256 			return err;
257 		}
258 
259 		for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
260 			idx = perf_cpu_map__idx(evsel__cpus(evsel),
261 						(struct perf_cpu){.cpu = bpf_cpu});
262 			if (idx == -1)
263 				continue;
264 			counts = perf_counts(evsel->counts, idx, 0);
265 			counts->val += values[bpf_cpu].counter;
266 			counts->ena += values[bpf_cpu].enabled;
267 			counts->run += values[bpf_cpu].running;
268 		}
269 	}
270 	return 0;
271 }
272 
273 static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx,
274 					    int fd)
275 {
276 	struct bpf_prog_profiler_bpf *skel;
277 	struct bpf_counter *counter;
278 	int ret;
279 
280 	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
281 		skel = counter->skel;
282 		assert(skel != NULL);
283 
284 		ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
285 					  &cpu_map_idx, &fd, BPF_ANY);
286 		if (ret)
287 			return ret;
288 	}
289 	return 0;
290 }
291 
292 struct bpf_counter_ops bpf_program_profiler_ops = {
293 	.load       = bpf_program_profiler__load,
294 	.enable	    = bpf_program_profiler__enable,
295 	.disable    = bpf_program_profiler__disable,
296 	.read       = bpf_program_profiler__read,
297 	.destroy    = bpf_program_profiler__destroy,
298 	.install_pe = bpf_program_profiler__install_pe,
299 };
300 
301 static bool bperf_attr_map_compatible(int attr_map_fd)
302 {
303 	struct bpf_map_info map_info = {0};
304 	__u32 map_info_len = sizeof(map_info);
305 	int err;
306 
307 	err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
308 
309 	if (err)
310 		return false;
311 	return (map_info.key_size == sizeof(struct perf_event_attr)) &&
312 		(map_info.value_size == sizeof(struct perf_event_attr_map_entry));
313 }
314 
315 int __weak
316 bpf_map_create(enum bpf_map_type map_type,
317 	       const char *map_name __maybe_unused,
318 	       __u32 key_size,
319 	       __u32 value_size,
320 	       __u32 max_entries,
321 	       const struct bpf_map_create_opts *opts __maybe_unused)
322 {
323 #pragma GCC diagnostic push
324 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
325 	return bpf_create_map(map_type, key_size, value_size, max_entries, 0);
326 #pragma GCC diagnostic pop
327 }
328 
329 static int bperf_lock_attr_map(struct target *target)
330 {
331 	char path[PATH_MAX];
332 	int map_fd, err;
333 
334 	if (target->attr_map) {
335 		scnprintf(path, PATH_MAX, "%s", target->attr_map);
336 	} else {
337 		scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
338 			  BPF_PERF_DEFAULT_ATTR_MAP_PATH);
339 	}
340 
341 	if (access(path, F_OK)) {
342 		map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
343 					sizeof(struct perf_event_attr),
344 					sizeof(struct perf_event_attr_map_entry),
345 					ATTR_MAP_SIZE, NULL);
346 		if (map_fd < 0)
347 			return -1;
348 
349 		err = bpf_obj_pin(map_fd, path);
350 		if (err) {
351 			/* someone pinned the map in parallel? */
352 			close(map_fd);
353 			map_fd = bpf_obj_get(path);
354 			if (map_fd < 0)
355 				return -1;
356 		}
357 	} else {
358 		map_fd = bpf_obj_get(path);
359 		if (map_fd < 0)
360 			return -1;
361 	}
362 
363 	if (!bperf_attr_map_compatible(map_fd)) {
364 		close(map_fd);
365 		return -1;
366 
367 	}
368 	err = flock(map_fd, LOCK_EX);
369 	if (err) {
370 		close(map_fd);
371 		return -1;
372 	}
373 	return map_fd;
374 }
375 
376 static int bperf_check_target(struct evsel *evsel,
377 			      struct target *target,
378 			      enum bperf_filter_type *filter_type,
379 			      __u32 *filter_entry_cnt)
380 {
381 	if (evsel->core.leader->nr_members > 1) {
382 		pr_err("bpf managed perf events do not yet support groups.\n");
383 		return -1;
384 	}
385 
386 	/* determine filter type based on target */
387 	if (target->system_wide) {
388 		*filter_type = BPERF_FILTER_GLOBAL;
389 		*filter_entry_cnt = 1;
390 	} else if (target->cpu_list) {
391 		*filter_type = BPERF_FILTER_CPU;
392 		*filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
393 	} else if (target->tid) {
394 		*filter_type = BPERF_FILTER_PID;
395 		*filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
396 	} else if (target->pid || evsel->evlist->workload.pid != -1) {
397 		*filter_type = BPERF_FILTER_TGID;
398 		*filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
399 	} else {
400 		pr_err("bpf managed perf events do not yet support these targets.\n");
401 		return -1;
402 	}
403 
404 	return 0;
405 }
406 
407 static	struct perf_cpu_map *all_cpu_map;
408 
409 static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
410 				       struct perf_event_attr_map_entry *entry)
411 {
412 	struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
413 	int link_fd, diff_map_fd, err;
414 	struct bpf_link *link = NULL;
415 
416 	if (!skel) {
417 		pr_err("Failed to open leader skeleton\n");
418 		return -1;
419 	}
420 
421 	bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
422 	err = bperf_leader_bpf__load(skel);
423 	if (err) {
424 		pr_err("Failed to load leader skeleton\n");
425 		goto out;
426 	}
427 
428 	link = bpf_program__attach(skel->progs.on_switch);
429 	if (IS_ERR(link)) {
430 		pr_err("Failed to attach leader program\n");
431 		err = PTR_ERR(link);
432 		goto out;
433 	}
434 
435 	link_fd = bpf_link__fd(link);
436 	diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
437 	entry->link_id = bpf_link_get_id(link_fd);
438 	entry->diff_map_id = bpf_map_get_id(diff_map_fd);
439 	err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
440 	assert(err == 0);
441 
442 	evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
443 	assert(evsel->bperf_leader_link_fd >= 0);
444 
445 	/*
446 	 * save leader_skel for install_pe, which is called within
447 	 * following evsel__open_per_cpu call
448 	 */
449 	evsel->leader_skel = skel;
450 	evsel__open_per_cpu(evsel, all_cpu_map, -1);
451 
452 out:
453 	bperf_leader_bpf__destroy(skel);
454 	bpf_link__destroy(link);
455 	return err;
456 }
457 
458 static int bperf__load(struct evsel *evsel, struct target *target)
459 {
460 	struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
461 	int attr_map_fd, diff_map_fd = -1, err;
462 	enum bperf_filter_type filter_type;
463 	__u32 filter_entry_cnt, i;
464 
465 	if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
466 		return -1;
467 
468 	if (!all_cpu_map) {
469 		all_cpu_map = perf_cpu_map__new(NULL);
470 		if (!all_cpu_map)
471 			return -1;
472 	}
473 
474 	evsel->bperf_leader_prog_fd = -1;
475 	evsel->bperf_leader_link_fd = -1;
476 
477 	/*
478 	 * Step 1: hold a fd on the leader program and the bpf_link, if
479 	 * the program is not already gone, reload the program.
480 	 * Use flock() to ensure exclusive access to the perf_event_attr
481 	 * map.
482 	 */
483 	attr_map_fd = bperf_lock_attr_map(target);
484 	if (attr_map_fd < 0) {
485 		pr_err("Failed to lock perf_event_attr map\n");
486 		return -1;
487 	}
488 
489 	err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
490 	if (err) {
491 		err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
492 		if (err)
493 			goto out;
494 	}
495 
496 	evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
497 	if (evsel->bperf_leader_link_fd < 0 &&
498 	    bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
499 		err = -1;
500 		goto out;
501 	}
502 	/*
503 	 * The bpf_link holds reference to the leader program, and the
504 	 * leader program holds reference to the maps. Therefore, if
505 	 * link_id is valid, diff_map_id should also be valid.
506 	 */
507 	evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
508 		bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
509 	assert(evsel->bperf_leader_prog_fd >= 0);
510 
511 	diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
512 	assert(diff_map_fd >= 0);
513 
514 	/*
515 	 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
516 	 * whether the kernel support it
517 	 */
518 	err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
519 	if (err) {
520 		pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
521 		       "Therefore, --use-bpf might show inaccurate readings\n");
522 		goto out;
523 	}
524 
525 	/* Step 2: load the follower skeleton */
526 	evsel->follower_skel = bperf_follower_bpf__open();
527 	if (!evsel->follower_skel) {
528 		err = -1;
529 		pr_err("Failed to open follower skeleton\n");
530 		goto out;
531 	}
532 
533 	/* attach fexit program to the leader program */
534 	bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
535 				       evsel->bperf_leader_prog_fd, "on_switch");
536 
537 	/* connect to leader diff_reading map */
538 	bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
539 
540 	/* set up reading map */
541 	bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
542 				 filter_entry_cnt);
543 	/* set up follower filter based on target */
544 	bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
545 				 filter_entry_cnt);
546 	err = bperf_follower_bpf__load(evsel->follower_skel);
547 	if (err) {
548 		pr_err("Failed to load follower skeleton\n");
549 		bperf_follower_bpf__destroy(evsel->follower_skel);
550 		evsel->follower_skel = NULL;
551 		goto out;
552 	}
553 
554 	for (i = 0; i < filter_entry_cnt; i++) {
555 		int filter_map_fd;
556 		__u32 key;
557 
558 		if (filter_type == BPERF_FILTER_PID ||
559 		    filter_type == BPERF_FILTER_TGID)
560 			key = evsel->core.threads->map[i].pid;
561 		else if (filter_type == BPERF_FILTER_CPU)
562 			key = evsel->core.cpus->map[i].cpu;
563 		else
564 			break;
565 
566 		filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
567 		bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
568 	}
569 
570 	evsel->follower_skel->bss->type = filter_type;
571 
572 	err = bperf_follower_bpf__attach(evsel->follower_skel);
573 
574 out:
575 	if (err && evsel->bperf_leader_link_fd >= 0)
576 		close(evsel->bperf_leader_link_fd);
577 	if (err && evsel->bperf_leader_prog_fd >= 0)
578 		close(evsel->bperf_leader_prog_fd);
579 	if (diff_map_fd >= 0)
580 		close(diff_map_fd);
581 
582 	flock(attr_map_fd, LOCK_UN);
583 	close(attr_map_fd);
584 
585 	return err;
586 }
587 
588 static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
589 {
590 	struct bperf_leader_bpf *skel = evsel->leader_skel;
591 
592 	return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
593 				   &cpu_map_idx, &fd, BPF_ANY);
594 }
595 
596 /*
597  * trigger the leader prog on each cpu, so the accum_reading map could get
598  * the latest readings.
599  */
600 static int bperf_sync_counters(struct evsel *evsel)
601 {
602 	int num_cpu, i, cpu;
603 
604 	num_cpu = all_cpu_map->nr;
605 	for (i = 0; i < num_cpu; i++) {
606 		cpu = all_cpu_map->map[i].cpu;
607 		bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
608 	}
609 	return 0;
610 }
611 
612 static int bperf__enable(struct evsel *evsel)
613 {
614 	evsel->follower_skel->bss->enabled = 1;
615 	return 0;
616 }
617 
618 static int bperf__disable(struct evsel *evsel)
619 {
620 	evsel->follower_skel->bss->enabled = 0;
621 	return 0;
622 }
623 
624 static int bperf__read(struct evsel *evsel)
625 {
626 	struct bperf_follower_bpf *skel = evsel->follower_skel;
627 	__u32 num_cpu_bpf = cpu__max_cpu().cpu;
628 	struct bpf_perf_event_value values[num_cpu_bpf];
629 	struct perf_counts_values *counts;
630 	int reading_map_fd, err = 0;
631 	__u32 i;
632 	int j;
633 
634 	bperf_sync_counters(evsel);
635 	reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
636 
637 	for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
638 		struct perf_cpu entry;
639 		__u32 cpu;
640 
641 		err = bpf_map_lookup_elem(reading_map_fd, &i, values);
642 		if (err)
643 			goto out;
644 		switch (evsel->follower_skel->bss->type) {
645 		case BPERF_FILTER_GLOBAL:
646 			assert(i == 0);
647 
648 			perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
649 				counts = perf_counts(evsel->counts, j, 0);
650 				counts->val = values[entry.cpu].counter;
651 				counts->ena = values[entry.cpu].enabled;
652 				counts->run = values[entry.cpu].running;
653 			}
654 			break;
655 		case BPERF_FILTER_CPU:
656 			cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
657 			assert(cpu >= 0);
658 			counts = perf_counts(evsel->counts, i, 0);
659 			counts->val = values[cpu].counter;
660 			counts->ena = values[cpu].enabled;
661 			counts->run = values[cpu].running;
662 			break;
663 		case BPERF_FILTER_PID:
664 		case BPERF_FILTER_TGID:
665 			counts = perf_counts(evsel->counts, 0, i);
666 			counts->val = 0;
667 			counts->ena = 0;
668 			counts->run = 0;
669 
670 			for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
671 				counts->val += values[cpu].counter;
672 				counts->ena += values[cpu].enabled;
673 				counts->run += values[cpu].running;
674 			}
675 			break;
676 		default:
677 			break;
678 		}
679 	}
680 out:
681 	return err;
682 }
683 
684 static int bperf__destroy(struct evsel *evsel)
685 {
686 	bperf_follower_bpf__destroy(evsel->follower_skel);
687 	close(evsel->bperf_leader_prog_fd);
688 	close(evsel->bperf_leader_link_fd);
689 	return 0;
690 }
691 
692 /*
693  * bperf: share hardware PMCs with BPF
694  *
695  * perf uses performance monitoring counters (PMC) to monitor system
696  * performance. The PMCs are limited hardware resources. For example,
697  * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
698  *
699  * Modern data center systems use these PMCs in many different ways:
700  * system level monitoring, (maybe nested) container level monitoring, per
701  * process monitoring, profiling (in sample mode), etc. In some cases,
702  * there are more active perf_events than available hardware PMCs. To allow
703  * all perf_events to have a chance to run, it is necessary to do expensive
704  * time multiplexing of events.
705  *
706  * On the other hand, many monitoring tools count the common metrics
707  * (cycles, instructions). It is a waste to have multiple tools create
708  * multiple perf_events of "cycles" and occupy multiple PMCs.
709  *
710  * bperf tries to reduce such wastes by allowing multiple perf_events of
711  * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
712  * of having each perf-stat session to read its own perf_events, bperf uses
713  * BPF programs to read the perf_events and aggregate readings to BPF maps.
714  * Then, the perf-stat session(s) reads the values from these BPF maps.
715  *
716  *                                ||
717  *       shared progs and maps <- || -> per session progs and maps
718  *                                ||
719  *   ---------------              ||
720  *   | perf_events |              ||
721  *   ---------------       fexit  ||      -----------------
722  *          |             --------||----> | follower prog |
723  *       --------------- /        || ---  -----------------
724  * cs -> | leader prog |/         ||/        |         |
725  *   --> ---------------         /||  --------------  ------------------
726  *  /       |         |         / ||  | filter map |  | accum_readings |
727  * /  ------------  ------------  ||  --------------  ------------------
728  * |  | prev map |  | diff map |  ||                        |
729  * |  ------------  ------------  ||                        |
730  *  \                             ||                        |
731  * = \ ==================================================== | ============
732  *    \                                                    /   user space
733  *     \                                                  /
734  *      \                                                /
735  *    BPF_PROG_TEST_RUN                    BPF_MAP_LOOKUP_ELEM
736  *        \                                            /
737  *         \                                          /
738  *          \------  perf-stat ----------------------/
739  *
740  * The figure above shows the architecture of bperf. Note that the figure
741  * is divided into 3 regions: shared progs and maps (top left), per session
742  * progs and maps (top right), and user space (bottom).
743  *
744  * The leader prog is triggered on each context switch (cs). The leader
745  * prog reads perf_events and stores the difference (current_reading -
746  * previous_reading) to the diff map. For the same metric, e.g. "cycles",
747  * multiple perf-stat sessions share the same leader prog.
748  *
749  * Each perf-stat session creates a follower prog as fexit program to the
750  * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
751  * follower progs to the same leader prog. The follower prog checks current
752  * task and processor ID to decide whether to add the value from the diff
753  * map to its accumulated reading map (accum_readings).
754  *
755  * Finally, perf-stat user space reads the value from accum_reading map.
756  *
757  * Besides context switch, it is also necessary to trigger the leader prog
758  * before perf-stat reads the value. Otherwise, the accum_reading map may
759  * not have the latest reading from the perf_events. This is achieved by
760  * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
761  *
762  * Comment before the definition of struct perf_event_attr_map_entry
763  * describes how different sessions of perf-stat share information about
764  * the leader prog.
765  */
766 
767 struct bpf_counter_ops bperf_ops = {
768 	.load       = bperf__load,
769 	.enable     = bperf__enable,
770 	.disable    = bperf__disable,
771 	.read       = bperf__read,
772 	.install_pe = bperf__install_pe,
773 	.destroy    = bperf__destroy,
774 };
775 
776 extern struct bpf_counter_ops bperf_cgrp_ops;
777 
778 static inline bool bpf_counter_skip(struct evsel *evsel)
779 {
780 	return list_empty(&evsel->bpf_counter_list) &&
781 		evsel->follower_skel == NULL;
782 }
783 
784 int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
785 {
786 	if (bpf_counter_skip(evsel))
787 		return 0;
788 	return evsel->bpf_counter_ops->install_pe(evsel, cpu_map_idx, fd);
789 }
790 
791 int bpf_counter__load(struct evsel *evsel, struct target *target)
792 {
793 	if (target->bpf_str)
794 		evsel->bpf_counter_ops = &bpf_program_profiler_ops;
795 	else if (cgrp_event_expanded && target->use_bpf)
796 		evsel->bpf_counter_ops = &bperf_cgrp_ops;
797 	else if (target->use_bpf || evsel->bpf_counter ||
798 		 evsel__match_bpf_counter_events(evsel->name))
799 		evsel->bpf_counter_ops = &bperf_ops;
800 
801 	if (evsel->bpf_counter_ops)
802 		return evsel->bpf_counter_ops->load(evsel, target);
803 	return 0;
804 }
805 
806 int bpf_counter__enable(struct evsel *evsel)
807 {
808 	if (bpf_counter_skip(evsel))
809 		return 0;
810 	return evsel->bpf_counter_ops->enable(evsel);
811 }
812 
813 int bpf_counter__disable(struct evsel *evsel)
814 {
815 	if (bpf_counter_skip(evsel))
816 		return 0;
817 	return evsel->bpf_counter_ops->disable(evsel);
818 }
819 
820 int bpf_counter__read(struct evsel *evsel)
821 {
822 	if (bpf_counter_skip(evsel))
823 		return -EAGAIN;
824 	return evsel->bpf_counter_ops->read(evsel);
825 }
826 
827 void bpf_counter__destroy(struct evsel *evsel)
828 {
829 	if (bpf_counter_skip(evsel))
830 		return;
831 	evsel->bpf_counter_ops->destroy(evsel);
832 	evsel->bpf_counter_ops = NULL;
833 }
834