xref: /openbmc/linux/tools/perf/util/stat.c (revision e5f4afbe)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <linux/err.h>
4 #include <inttypes.h>
5 #include <math.h>
6 #include <string.h>
7 #include "counts.h"
8 #include "cpumap.h"
9 #include "debug.h"
10 #include "header.h"
11 #include "stat.h"
12 #include "session.h"
13 #include "target.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include "thread_map.h"
17 #ifdef HAVE_LIBBPF_SUPPORT
18 #include <bpf/hashmap.h>
19 #else
20 #include "util/hashmap.h"
21 #endif
22 #include <linux/zalloc.h>
23 
24 void update_stats(struct stats *stats, u64 val)
25 {
26 	double delta;
27 
28 	stats->n++;
29 	delta = val - stats->mean;
30 	stats->mean += delta / stats->n;
31 	stats->M2 += delta*(val - stats->mean);
32 
33 	if (val > stats->max)
34 		stats->max = val;
35 
36 	if (val < stats->min)
37 		stats->min = val;
38 }
39 
40 double avg_stats(struct stats *stats)
41 {
42 	return stats->mean;
43 }
44 
45 /*
46  * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
47  *
48  *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
49  * s^2 = -------------------------------
50  *                  n - 1
51  *
52  * http://en.wikipedia.org/wiki/Stddev
53  *
54  * The std dev of the mean is related to the std dev by:
55  *
56  *             s
57  * s_mean = -------
58  *          sqrt(n)
59  *
60  */
61 double stddev_stats(struct stats *stats)
62 {
63 	double variance, variance_mean;
64 
65 	if (stats->n < 2)
66 		return 0.0;
67 
68 	variance = stats->M2 / (stats->n - 1);
69 	variance_mean = variance / stats->n;
70 
71 	return sqrt(variance_mean);
72 }
73 
74 double rel_stddev_stats(double stddev, double avg)
75 {
76 	double pct = 0.0;
77 
78 	if (avg)
79 		pct = 100.0 * stddev/avg;
80 
81 	return pct;
82 }
83 
84 bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
85 {
86 	struct perf_stat_evsel *ps = evsel->stats;
87 
88 	return ps->id == id;
89 }
90 
91 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
92 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
93 	ID(NONE,		x),
94 	ID(CYCLES_IN_TX,	cpu/cycles-t/),
95 	ID(TRANSACTION_START,	cpu/tx-start/),
96 	ID(ELISION_START,	cpu/el-start/),
97 	ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/),
98 	ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
99 	ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
100 	ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
101 	ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
102 	ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
103 	ID(TOPDOWN_RETIRING, topdown-retiring),
104 	ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
105 	ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
106 	ID(TOPDOWN_BE_BOUND, topdown-be-bound),
107 	ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
108 	ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
109 	ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
110 	ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
111 	ID(SMI_NUM, msr/smi/),
112 	ID(APERF, msr/aperf/),
113 };
114 #undef ID
115 
116 static void perf_stat_evsel_id_init(struct evsel *evsel)
117 {
118 	struct perf_stat_evsel *ps = evsel->stats;
119 	int i;
120 
121 	/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
122 
123 	for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
124 		if (!strcmp(evsel__name(evsel), id_str[i]) ||
125 		    (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
126 		     && strstr(evsel__name(evsel), evsel->pmu_name))) {
127 			ps->id = i;
128 			break;
129 		}
130 	}
131 }
132 
133 static void evsel__reset_aggr_stats(struct evsel *evsel)
134 {
135 	struct perf_stat_evsel *ps = evsel->stats;
136 	struct perf_stat_aggr *aggr = ps->aggr;
137 
138 	if (aggr)
139 		memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
140 }
141 
142 static void evsel__reset_stat_priv(struct evsel *evsel)
143 {
144 	struct perf_stat_evsel *ps = evsel->stats;
145 
146 	init_stats(&ps->res_stats);
147 	evsel__reset_aggr_stats(evsel);
148 }
149 
150 static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
151 {
152 	struct perf_stat_evsel *ps = evsel->stats;
153 
154 	if (ps == NULL)
155 		return 0;
156 
157 	ps->nr_aggr = nr_aggr;
158 	ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
159 	if (ps->aggr == NULL)
160 		return -ENOMEM;
161 
162 	return 0;
163 }
164 
165 int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
166 {
167 	struct evsel *evsel;
168 
169 	evlist__for_each_entry(evlist, evsel) {
170 		if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
171 			return -1;
172 	}
173 	return 0;
174 }
175 
176 static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
177 {
178 	struct perf_stat_evsel *ps;
179 
180 	ps = zalloc(sizeof(*ps));
181 	if (ps == NULL)
182 		return -ENOMEM;
183 
184 	evsel->stats = ps;
185 
186 	if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
187 		evsel->stats = NULL;
188 		free(ps);
189 		return -ENOMEM;
190 	}
191 
192 	perf_stat_evsel_id_init(evsel);
193 	evsel__reset_stat_priv(evsel);
194 	return 0;
195 }
196 
197 static void evsel__free_stat_priv(struct evsel *evsel)
198 {
199 	struct perf_stat_evsel *ps = evsel->stats;
200 
201 	if (ps) {
202 		zfree(&ps->aggr);
203 		zfree(&ps->group_data);
204 	}
205 	zfree(&evsel->stats);
206 }
207 
208 static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
209 {
210 	int cpu_map_nr = evsel__nr_cpus(evsel);
211 	int nthreads = perf_thread_map__nr(evsel->core.threads);
212 	struct perf_counts *counts;
213 
214 	counts = perf_counts__new(cpu_map_nr, nthreads);
215 	if (counts)
216 		evsel->prev_raw_counts = counts;
217 
218 	return counts ? 0 : -ENOMEM;
219 }
220 
221 static void evsel__free_prev_raw_counts(struct evsel *evsel)
222 {
223 	perf_counts__delete(evsel->prev_raw_counts);
224 	evsel->prev_raw_counts = NULL;
225 }
226 
227 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
228 {
229 	if (evsel->prev_raw_counts)
230 		perf_counts__reset(evsel->prev_raw_counts);
231 }
232 
233 static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
234 {
235 	if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
236 	    evsel__alloc_counts(evsel) < 0 ||
237 	    (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
238 		return -ENOMEM;
239 
240 	return 0;
241 }
242 
243 int evlist__alloc_stats(struct perf_stat_config *config,
244 			struct evlist *evlist, bool alloc_raw)
245 {
246 	struct evsel *evsel;
247 	int nr_aggr = 0;
248 
249 	if (config && config->aggr_map)
250 		nr_aggr = config->aggr_map->nr;
251 
252 	evlist__for_each_entry(evlist, evsel) {
253 		if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
254 			goto out_free;
255 	}
256 
257 	return 0;
258 
259 out_free:
260 	evlist__free_stats(evlist);
261 	return -1;
262 }
263 
264 void evlist__free_stats(struct evlist *evlist)
265 {
266 	struct evsel *evsel;
267 
268 	evlist__for_each_entry(evlist, evsel) {
269 		evsel__free_stat_priv(evsel);
270 		evsel__free_counts(evsel);
271 		evsel__free_prev_raw_counts(evsel);
272 	}
273 }
274 
275 void evlist__reset_stats(struct evlist *evlist)
276 {
277 	struct evsel *evsel;
278 
279 	evlist__for_each_entry(evlist, evsel) {
280 		evsel__reset_stat_priv(evsel);
281 		evsel__reset_counts(evsel);
282 	}
283 }
284 
285 void evlist__reset_aggr_stats(struct evlist *evlist)
286 {
287 	struct evsel *evsel;
288 
289 	evlist__for_each_entry(evlist, evsel)
290 		evsel__reset_aggr_stats(evsel);
291 }
292 
293 void evlist__reset_prev_raw_counts(struct evlist *evlist)
294 {
295 	struct evsel *evsel;
296 
297 	evlist__for_each_entry(evlist, evsel)
298 		evsel__reset_prev_raw_counts(evsel);
299 }
300 
301 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
302 {
303 	int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
304 
305 	for (int thread = 0; thread < nthreads; thread++) {
306 		perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
307 			*perf_counts(evsel->counts, idx, thread) =
308 				*perf_counts(evsel->prev_raw_counts, idx, thread);
309 		}
310 	}
311 }
312 
313 void evlist__copy_prev_raw_counts(struct evlist *evlist)
314 {
315 	struct evsel *evsel;
316 
317 	evlist__for_each_entry(evlist, evsel)
318 		evsel__copy_prev_raw_counts(evsel);
319 }
320 
321 static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
322 {
323 	uint64_t *key = (uint64_t *) __key;
324 
325 	return *key & 0xffffffff;
326 }
327 
328 static bool pkg_id_equal(const void *__key1, const void *__key2,
329 			 void *ctx __maybe_unused)
330 {
331 	uint64_t *key1 = (uint64_t *) __key1;
332 	uint64_t *key2 = (uint64_t *) __key2;
333 
334 	return *key1 == *key2;
335 }
336 
337 static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
338 			 int cpu_map_idx, bool *skip)
339 {
340 	struct hashmap *mask = counter->per_pkg_mask;
341 	struct perf_cpu_map *cpus = evsel__cpus(counter);
342 	struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
343 	int s, d, ret = 0;
344 	uint64_t *key;
345 
346 	*skip = false;
347 
348 	if (!counter->per_pkg)
349 		return 0;
350 
351 	if (perf_cpu_map__empty(cpus))
352 		return 0;
353 
354 	if (!mask) {
355 		mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
356 		if (IS_ERR(mask))
357 			return -ENOMEM;
358 
359 		counter->per_pkg_mask = mask;
360 	}
361 
362 	/*
363 	 * we do not consider an event that has not run as a good
364 	 * instance to mark a package as used (skip=1). Otherwise
365 	 * we may run into a situation where the first CPU in a package
366 	 * is not running anything, yet the second is, and this function
367 	 * would mark the package as used after the first CPU and would
368 	 * not read the values from the second CPU.
369 	 */
370 	if (!(vals->run && vals->ena))
371 		return 0;
372 
373 	s = cpu__get_socket_id(cpu);
374 	if (s < 0)
375 		return -1;
376 
377 	/*
378 	 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
379 	 * We use hashmap(socket, die) to check the used socket+die pair.
380 	 */
381 	d = cpu__get_die_id(cpu);
382 	if (d < 0)
383 		return -1;
384 
385 	key = malloc(sizeof(*key));
386 	if (!key)
387 		return -ENOMEM;
388 
389 	*key = (uint64_t)d << 32 | s;
390 	if (hashmap__find(mask, (void *)key, NULL)) {
391 		*skip = true;
392 		free(key);
393 	} else
394 		ret = hashmap__add(mask, (void *)key, (void *)1);
395 
396 	return ret;
397 }
398 
399 static bool evsel__count_has_error(struct evsel *evsel,
400 				   struct perf_counts_values *count,
401 				   struct perf_stat_config *config)
402 {
403 	/* the evsel was failed already */
404 	if (evsel->err || evsel->counts->scaled == -1)
405 		return true;
406 
407 	/* this is meaningful for CPU aggregation modes only */
408 	if (config->aggr_mode == AGGR_GLOBAL)
409 		return false;
410 
411 	/* it's considered ok when it actually ran */
412 	if (count->ena != 0 && count->run != 0)
413 		return false;
414 
415 	return true;
416 }
417 
418 static int
419 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
420 		       int cpu_map_idx, int thread,
421 		       struct perf_counts_values *count)
422 {
423 	struct perf_stat_evsel *ps = evsel->stats;
424 	static struct perf_counts_values zero;
425 	bool skip = false;
426 
427 	if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
428 		pr_err("failed to read per-pkg counter\n");
429 		return -1;
430 	}
431 
432 	if (skip)
433 		count = &zero;
434 
435 	if (!evsel->snapshot)
436 		evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
437 	perf_counts_values__scale(count, config->scale, NULL);
438 
439 	if (config->aggr_mode == AGGR_THREAD) {
440 		struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
441 
442 		/*
443 		 * Skip value 0 when enabling --per-thread globally,
444 		 * otherwise too many 0 output.
445 		 */
446 		if (count->val == 0 && config->system_wide)
447 			return 0;
448 
449 		ps->aggr[thread].nr++;
450 
451 		aggr_counts->val += count->val;
452 		aggr_counts->ena += count->ena;
453 		aggr_counts->run += count->run;
454 		return 0;
455 	}
456 
457 	if (ps->aggr) {
458 		struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
459 		struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
460 		struct perf_stat_aggr *ps_aggr;
461 		int i;
462 
463 		for (i = 0; i < ps->nr_aggr; i++) {
464 			if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
465 				continue;
466 
467 			ps_aggr = &ps->aggr[i];
468 			ps_aggr->nr++;
469 
470 			/*
471 			 * When any result is bad, make them all to give consistent output
472 			 * in interval mode.  But per-task counters can have 0 enabled time
473 			 * when some tasks are idle.
474 			 */
475 			if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
476 				ps_aggr->counts.val = 0;
477 				ps_aggr->counts.ena = 0;
478 				ps_aggr->counts.run = 0;
479 				ps_aggr->failed = true;
480 			}
481 
482 			if (!ps_aggr->failed) {
483 				ps_aggr->counts.val += count->val;
484 				ps_aggr->counts.ena += count->ena;
485 				ps_aggr->counts.run += count->run;
486 			}
487 			break;
488 		}
489 	}
490 
491 	return 0;
492 }
493 
494 static int process_counter_maps(struct perf_stat_config *config,
495 				struct evsel *counter)
496 {
497 	int nthreads = perf_thread_map__nr(counter->core.threads);
498 	int ncpus = evsel__nr_cpus(counter);
499 	int idx, thread;
500 
501 	for (thread = 0; thread < nthreads; thread++) {
502 		for (idx = 0; idx < ncpus; idx++) {
503 			if (process_counter_values(config, counter, idx, thread,
504 						   perf_counts(counter->counts, idx, thread)))
505 				return -1;
506 		}
507 	}
508 
509 	return 0;
510 }
511 
512 int perf_stat_process_counter(struct perf_stat_config *config,
513 			      struct evsel *counter)
514 {
515 	struct perf_stat_evsel *ps = counter->stats;
516 	u64 *count;
517 	int ret;
518 
519 	if (counter->per_pkg)
520 		evsel__zero_per_pkg(counter);
521 
522 	ret = process_counter_maps(config, counter);
523 	if (ret)
524 		return ret;
525 
526 	if (config->aggr_mode != AGGR_GLOBAL)
527 		return 0;
528 
529 	/*
530 	 * GLOBAL aggregation mode only has a single aggr counts,
531 	 * so we can use ps->aggr[0] as the actual output.
532 	 */
533 	count = ps->aggr[0].counts.values;
534 	update_stats(&ps->res_stats, *count);
535 
536 	if (verbose > 0) {
537 		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
538 			evsel__name(counter), count[0], count[1], count[2]);
539 	}
540 
541 	return 0;
542 }
543 
544 static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
545 {
546 	struct perf_stat_evsel *ps_a = evsel->stats;
547 	struct perf_stat_evsel *ps_b = alias->stats;
548 	int i;
549 
550 	if (ps_a->aggr == NULL && ps_b->aggr == NULL)
551 		return 0;
552 
553 	if (ps_a->nr_aggr != ps_b->nr_aggr) {
554 		pr_err("Unmatched aggregation mode between aliases\n");
555 		return -1;
556 	}
557 
558 	for (i = 0; i < ps_a->nr_aggr; i++) {
559 		struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
560 		struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
561 
562 		/* NB: don't increase aggr.nr for aliases */
563 
564 		aggr_counts_a->val += aggr_counts_b->val;
565 		aggr_counts_a->ena += aggr_counts_b->ena;
566 		aggr_counts_a->run += aggr_counts_b->run;
567 	}
568 
569 	return 0;
570 }
571 /* events should have the same name, scale, unit, cgroup but on different PMUs */
572 static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
573 {
574 	if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
575 		return false;
576 
577 	if (evsel_a->scale != evsel_b->scale)
578 		return false;
579 
580 	if (evsel_a->cgrp != evsel_b->cgrp)
581 		return false;
582 
583 	if (strcmp(evsel_a->unit, evsel_b->unit))
584 		return false;
585 
586 	if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
587 		return false;
588 
589 	return !!strcmp(evsel_a->pmu_name, evsel_b->pmu_name);
590 }
591 
592 static void evsel__merge_aliases(struct evsel *evsel)
593 {
594 	struct evlist *evlist = evsel->evlist;
595 	struct evsel *alias;
596 
597 	alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
598 	list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
599 		/* Merge the same events on different PMUs. */
600 		if (evsel__is_alias(evsel, alias)) {
601 			evsel__merge_aggr_counters(evsel, alias);
602 			alias->merged_stat = true;
603 		}
604 	}
605 }
606 
607 static bool evsel__should_merge_hybrid(const struct evsel *evsel,
608 				       const struct perf_stat_config *config)
609 {
610 	return config->hybrid_merge && evsel__is_hybrid(evsel);
611 }
612 
613 static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
614 {
615 	/* this evsel is already merged */
616 	if (evsel->merged_stat)
617 		return;
618 
619 	if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
620 		evsel__merge_aliases(evsel);
621 }
622 
623 /* merge the same uncore and hybrid events if requested */
624 void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
625 {
626 	struct evsel *evsel;
627 
628 	if (config->no_merge)
629 		return;
630 
631 	evlist__for_each_entry(evlist, evsel)
632 		evsel__merge_stats(evsel, config);
633 }
634 
635 static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
636 {
637 	struct perf_stat_evsel *ps = evsel->stats;
638 	struct perf_counts_values counts = { 0, };
639 	struct aggr_cpu_id id;
640 	struct perf_cpu cpu;
641 	int idx;
642 
643 	/* collect per-core counts */
644 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
645 		struct perf_stat_aggr *aggr = &ps->aggr[idx];
646 
647 		id = aggr_cpu_id__core(cpu, NULL);
648 		if (!aggr_cpu_id__equal(core_id, &id))
649 			continue;
650 
651 		counts.val += aggr->counts.val;
652 		counts.ena += aggr->counts.ena;
653 		counts.run += aggr->counts.run;
654 	}
655 
656 	/* update aggregated per-core counts for each CPU */
657 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
658 		struct perf_stat_aggr *aggr = &ps->aggr[idx];
659 
660 		id = aggr_cpu_id__core(cpu, NULL);
661 		if (!aggr_cpu_id__equal(core_id, &id))
662 			continue;
663 
664 		aggr->counts.val = counts.val;
665 		aggr->counts.ena = counts.ena;
666 		aggr->counts.run = counts.run;
667 
668 		aggr->used = true;
669 	}
670 }
671 
672 /* we have an aggr_map for cpu, but want to aggregate the counters per-core */
673 static void evsel__process_percore(struct evsel *evsel)
674 {
675 	struct perf_stat_evsel *ps = evsel->stats;
676 	struct aggr_cpu_id core_id;
677 	struct perf_cpu cpu;
678 	int idx;
679 
680 	if (!evsel->percore)
681 		return;
682 
683 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
684 		struct perf_stat_aggr *aggr = &ps->aggr[idx];
685 
686 		if (aggr->used)
687 			continue;
688 
689 		core_id = aggr_cpu_id__core(cpu, NULL);
690 		evsel__update_percore_stats(evsel, &core_id);
691 	}
692 }
693 
694 /* process cpu stats on per-core events */
695 void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
696 {
697 	struct evsel *evsel;
698 
699 	if (config->aggr_mode != AGGR_NONE)
700 		return;
701 
702 	evlist__for_each_entry(evlist, evsel)
703 		evsel__process_percore(evsel);
704 }
705 
706 static void evsel__update_shadow_stats(struct evsel *evsel)
707 {
708 	struct perf_stat_evsel *ps = evsel->stats;
709 	int i;
710 
711 	if (ps->aggr == NULL)
712 		return;
713 
714 	for (i = 0; i < ps->nr_aggr; i++) {
715 		struct perf_counts_values *aggr_counts = &ps->aggr[i].counts;
716 
717 		perf_stat__update_shadow_stats(evsel, aggr_counts->val, i, &rt_stat);
718 	}
719 }
720 
721 void perf_stat_process_shadow_stats(struct perf_stat_config *config __maybe_unused,
722 				    struct evlist *evlist)
723 {
724 	struct evsel *evsel;
725 
726 	evlist__for_each_entry(evlist, evsel)
727 		evsel__update_shadow_stats(evsel);
728 }
729 
730 int perf_event__process_stat_event(struct perf_session *session,
731 				   union perf_event *event)
732 {
733 	struct perf_counts_values count, *ptr;
734 	struct perf_record_stat *st = &event->stat;
735 	struct evsel *counter;
736 	int cpu_map_idx;
737 
738 	count.val = st->val;
739 	count.ena = st->ena;
740 	count.run = st->run;
741 
742 	counter = evlist__id2evsel(session->evlist, st->id);
743 	if (!counter) {
744 		pr_err("Failed to resolve counter for stat event.\n");
745 		return -EINVAL;
746 	}
747 	cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
748 	if (cpu_map_idx == -1) {
749 		pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
750 		return -EINVAL;
751 	}
752 	ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
753 	if (ptr == NULL) {
754 		pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
755 			st->cpu, st->thread, evsel__name(counter));
756 		return -EINVAL;
757 	}
758 	*ptr = count;
759 	counter->supported = true;
760 	return 0;
761 }
762 
763 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
764 {
765 	struct perf_record_stat *st = (struct perf_record_stat *)event;
766 	size_t ret;
767 
768 	ret  = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
769 		       st->id, st->cpu, st->thread);
770 	ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
771 		       st->val, st->ena, st->run);
772 
773 	return ret;
774 }
775 
776 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
777 {
778 	struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
779 	size_t ret;
780 
781 	ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
782 		      rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
783 
784 	return ret;
785 }
786 
787 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
788 {
789 	struct perf_stat_config sc;
790 	size_t ret;
791 
792 	perf_event__read_stat_config(&sc, &event->stat_config);
793 
794 	ret  = fprintf(fp, "\n");
795 	ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
796 	ret += fprintf(fp, "... scale     %d\n", sc.scale);
797 	ret += fprintf(fp, "... interval  %u\n", sc.interval);
798 
799 	return ret;
800 }
801 
802 int create_perf_stat_counter(struct evsel *evsel,
803 			     struct perf_stat_config *config,
804 			     struct target *target,
805 			     int cpu_map_idx)
806 {
807 	struct perf_event_attr *attr = &evsel->core.attr;
808 	struct evsel *leader = evsel__leader(evsel);
809 
810 	attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
811 			    PERF_FORMAT_TOTAL_TIME_RUNNING;
812 
813 	/*
814 	 * The event is part of non trivial group, let's enable
815 	 * the group read (for leader) and ID retrieval for all
816 	 * members.
817 	 */
818 	if (leader->core.nr_members > 1)
819 		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
820 
821 	attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
822 
823 	/*
824 	 * Some events get initialized with sample_(period/type) set,
825 	 * like tracepoints. Clear it up for counting.
826 	 */
827 	attr->sample_period = 0;
828 
829 	if (config->identifier)
830 		attr->sample_type = PERF_SAMPLE_IDENTIFIER;
831 
832 	if (config->all_user) {
833 		attr->exclude_kernel = 1;
834 		attr->exclude_user   = 0;
835 	}
836 
837 	if (config->all_kernel) {
838 		attr->exclude_kernel = 0;
839 		attr->exclude_user   = 1;
840 	}
841 
842 	/*
843 	 * Disabling all counters initially, they will be enabled
844 	 * either manually by us or by kernel via enable_on_exec
845 	 * set later.
846 	 */
847 	if (evsel__is_group_leader(evsel)) {
848 		attr->disabled = 1;
849 
850 		/*
851 		 * In case of initial_delay we enable tracee
852 		 * events manually.
853 		 */
854 		if (target__none(target) && !config->initial_delay)
855 			attr->enable_on_exec = 1;
856 	}
857 
858 	if (target__has_cpu(target) && !target__has_per_thread(target))
859 		return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
860 
861 	return evsel__open_per_thread(evsel, evsel->core.threads);
862 }
863