1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <linux/err.h>
4 #include <inttypes.h>
5 #include <math.h>
6 #include <string.h>
7 #include "counts.h"
8 #include "cpumap.h"
9 #include "debug.h"
10 #include "header.h"
11 #include "stat.h"
12 #include "session.h"
13 #include "target.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include "thread_map.h"
17 #include "util/hashmap.h"
18 #include <linux/zalloc.h>
19
update_stats(struct stats * stats,u64 val)20 void update_stats(struct stats *stats, u64 val)
21 {
22 double delta;
23
24 stats->n++;
25 delta = val - stats->mean;
26 stats->mean += delta / stats->n;
27 stats->M2 += delta*(val - stats->mean);
28
29 if (val > stats->max)
30 stats->max = val;
31
32 if (val < stats->min)
33 stats->min = val;
34 }
35
avg_stats(struct stats * stats)36 double avg_stats(struct stats *stats)
37 {
38 return stats->mean;
39 }
40
41 /*
42 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
43 *
44 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
45 * s^2 = -------------------------------
46 * n - 1
47 *
48 * http://en.wikipedia.org/wiki/Stddev
49 *
50 * The std dev of the mean is related to the std dev by:
51 *
52 * s
53 * s_mean = -------
54 * sqrt(n)
55 *
56 */
stddev_stats(struct stats * stats)57 double stddev_stats(struct stats *stats)
58 {
59 double variance, variance_mean;
60
61 if (stats->n < 2)
62 return 0.0;
63
64 variance = stats->M2 / (stats->n - 1);
65 variance_mean = variance / stats->n;
66
67 return sqrt(variance_mean);
68 }
69
rel_stddev_stats(double stddev,double avg)70 double rel_stddev_stats(double stddev, double avg)
71 {
72 double pct = 0.0;
73
74 if (avg)
75 pct = 100.0 * stddev/avg;
76
77 return pct;
78 }
79
evsel__reset_aggr_stats(struct evsel * evsel)80 static void evsel__reset_aggr_stats(struct evsel *evsel)
81 {
82 struct perf_stat_evsel *ps = evsel->stats;
83 struct perf_stat_aggr *aggr = ps->aggr;
84
85 if (aggr)
86 memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
87 }
88
evsel__reset_stat_priv(struct evsel * evsel)89 static void evsel__reset_stat_priv(struct evsel *evsel)
90 {
91 struct perf_stat_evsel *ps = evsel->stats;
92
93 init_stats(&ps->res_stats);
94 evsel__reset_aggr_stats(evsel);
95 }
96
evsel__alloc_aggr_stats(struct evsel * evsel,int nr_aggr)97 static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
98 {
99 struct perf_stat_evsel *ps = evsel->stats;
100
101 if (ps == NULL)
102 return 0;
103
104 ps->nr_aggr = nr_aggr;
105 ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
106 if (ps->aggr == NULL)
107 return -ENOMEM;
108
109 return 0;
110 }
111
evlist__alloc_aggr_stats(struct evlist * evlist,int nr_aggr)112 int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
113 {
114 struct evsel *evsel;
115
116 evlist__for_each_entry(evlist, evsel) {
117 if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
118 return -1;
119 }
120 return 0;
121 }
122
evsel__alloc_stat_priv(struct evsel * evsel,int nr_aggr)123 static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
124 {
125 struct perf_stat_evsel *ps;
126
127 ps = zalloc(sizeof(*ps));
128 if (ps == NULL)
129 return -ENOMEM;
130
131 evsel->stats = ps;
132
133 if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
134 evsel->stats = NULL;
135 free(ps);
136 return -ENOMEM;
137 }
138
139 evsel__reset_stat_priv(evsel);
140 return 0;
141 }
142
evsel__free_stat_priv(struct evsel * evsel)143 static void evsel__free_stat_priv(struct evsel *evsel)
144 {
145 struct perf_stat_evsel *ps = evsel->stats;
146
147 if (ps) {
148 zfree(&ps->aggr);
149 zfree(&ps->group_data);
150 }
151 zfree(&evsel->stats);
152 }
153
evsel__alloc_prev_raw_counts(struct evsel * evsel)154 static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
155 {
156 int cpu_map_nr = evsel__nr_cpus(evsel);
157 int nthreads = perf_thread_map__nr(evsel->core.threads);
158 struct perf_counts *counts;
159
160 counts = perf_counts__new(cpu_map_nr, nthreads);
161 if (counts)
162 evsel->prev_raw_counts = counts;
163
164 return counts ? 0 : -ENOMEM;
165 }
166
evsel__free_prev_raw_counts(struct evsel * evsel)167 static void evsel__free_prev_raw_counts(struct evsel *evsel)
168 {
169 perf_counts__delete(evsel->prev_raw_counts);
170 evsel->prev_raw_counts = NULL;
171 }
172
evsel__reset_prev_raw_counts(struct evsel * evsel)173 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
174 {
175 if (evsel->prev_raw_counts)
176 perf_counts__reset(evsel->prev_raw_counts);
177 }
178
evsel__alloc_stats(struct evsel * evsel,int nr_aggr,bool alloc_raw)179 static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
180 {
181 if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
182 evsel__alloc_counts(evsel) < 0 ||
183 (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
184 return -ENOMEM;
185
186 return 0;
187 }
188
evlist__alloc_stats(struct perf_stat_config * config,struct evlist * evlist,bool alloc_raw)189 int evlist__alloc_stats(struct perf_stat_config *config,
190 struct evlist *evlist, bool alloc_raw)
191 {
192 struct evsel *evsel;
193 int nr_aggr = 0;
194
195 if (config && config->aggr_map)
196 nr_aggr = config->aggr_map->nr;
197
198 evlist__for_each_entry(evlist, evsel) {
199 if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
200 goto out_free;
201 }
202
203 return 0;
204
205 out_free:
206 evlist__free_stats(evlist);
207 return -1;
208 }
209
evlist__free_stats(struct evlist * evlist)210 void evlist__free_stats(struct evlist *evlist)
211 {
212 struct evsel *evsel;
213
214 evlist__for_each_entry(evlist, evsel) {
215 evsel__free_stat_priv(evsel);
216 evsel__free_counts(evsel);
217 evsel__free_prev_raw_counts(evsel);
218 }
219 }
220
evlist__reset_stats(struct evlist * evlist)221 void evlist__reset_stats(struct evlist *evlist)
222 {
223 struct evsel *evsel;
224
225 evlist__for_each_entry(evlist, evsel) {
226 evsel__reset_stat_priv(evsel);
227 evsel__reset_counts(evsel);
228 }
229 }
230
evlist__reset_aggr_stats(struct evlist * evlist)231 void evlist__reset_aggr_stats(struct evlist *evlist)
232 {
233 struct evsel *evsel;
234
235 evlist__for_each_entry(evlist, evsel)
236 evsel__reset_aggr_stats(evsel);
237 }
238
evlist__reset_prev_raw_counts(struct evlist * evlist)239 void evlist__reset_prev_raw_counts(struct evlist *evlist)
240 {
241 struct evsel *evsel;
242
243 evlist__for_each_entry(evlist, evsel)
244 evsel__reset_prev_raw_counts(evsel);
245 }
246
evsel__copy_prev_raw_counts(struct evsel * evsel)247 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
248 {
249 int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
250
251 for (int thread = 0; thread < nthreads; thread++) {
252 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
253 *perf_counts(evsel->counts, idx, thread) =
254 *perf_counts(evsel->prev_raw_counts, idx, thread);
255 }
256 }
257 }
258
evlist__copy_prev_raw_counts(struct evlist * evlist)259 void evlist__copy_prev_raw_counts(struct evlist *evlist)
260 {
261 struct evsel *evsel;
262
263 evlist__for_each_entry(evlist, evsel)
264 evsel__copy_prev_raw_counts(evsel);
265 }
266
evsel__copy_res_stats(struct evsel * evsel)267 static void evsel__copy_res_stats(struct evsel *evsel)
268 {
269 struct perf_stat_evsel *ps = evsel->stats;
270
271 /*
272 * For GLOBAL aggregation mode, it updates the counts for each run
273 * in the evsel->stats.res_stats. See perf_stat_process_counter().
274 */
275 *ps->aggr[0].counts.values = avg_stats(&ps->res_stats);
276 }
277
evlist__copy_res_stats(struct perf_stat_config * config,struct evlist * evlist)278 void evlist__copy_res_stats(struct perf_stat_config *config, struct evlist *evlist)
279 {
280 struct evsel *evsel;
281
282 if (config->aggr_mode != AGGR_GLOBAL)
283 return;
284
285 evlist__for_each_entry(evlist, evsel)
286 evsel__copy_res_stats(evsel);
287 }
288
pkg_id_hash(long __key,void * ctx __maybe_unused)289 static size_t pkg_id_hash(long __key, void *ctx __maybe_unused)
290 {
291 uint64_t *key = (uint64_t *) __key;
292
293 return *key & 0xffffffff;
294 }
295
pkg_id_equal(long __key1,long __key2,void * ctx __maybe_unused)296 static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused)
297 {
298 uint64_t *key1 = (uint64_t *) __key1;
299 uint64_t *key2 = (uint64_t *) __key2;
300
301 return *key1 == *key2;
302 }
303
check_per_pkg(struct evsel * counter,struct perf_counts_values * vals,int cpu_map_idx,bool * skip)304 static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
305 int cpu_map_idx, bool *skip)
306 {
307 struct hashmap *mask = counter->per_pkg_mask;
308 struct perf_cpu_map *cpus = evsel__cpus(counter);
309 struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
310 int s, d, ret = 0;
311 uint64_t *key;
312
313 *skip = false;
314
315 if (!counter->per_pkg)
316 return 0;
317
318 if (perf_cpu_map__empty(cpus))
319 return 0;
320
321 if (!mask) {
322 mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
323 if (IS_ERR(mask))
324 return -ENOMEM;
325
326 counter->per_pkg_mask = mask;
327 }
328
329 /*
330 * we do not consider an event that has not run as a good
331 * instance to mark a package as used (skip=1). Otherwise
332 * we may run into a situation where the first CPU in a package
333 * is not running anything, yet the second is, and this function
334 * would mark the package as used after the first CPU and would
335 * not read the values from the second CPU.
336 */
337 if (!(vals->run && vals->ena))
338 return 0;
339
340 s = cpu__get_socket_id(cpu);
341 if (s < 0)
342 return -1;
343
344 /*
345 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
346 * We use hashmap(socket, die) to check the used socket+die pair.
347 */
348 d = cpu__get_die_id(cpu);
349 if (d < 0)
350 return -1;
351
352 key = malloc(sizeof(*key));
353 if (!key)
354 return -ENOMEM;
355
356 *key = (uint64_t)d << 32 | s;
357 if (hashmap__find(mask, key, NULL)) {
358 *skip = true;
359 free(key);
360 } else
361 ret = hashmap__add(mask, key, 1);
362
363 return ret;
364 }
365
evsel__count_has_error(struct evsel * evsel,struct perf_counts_values * count,struct perf_stat_config * config)366 static bool evsel__count_has_error(struct evsel *evsel,
367 struct perf_counts_values *count,
368 struct perf_stat_config *config)
369 {
370 /* the evsel was failed already */
371 if (evsel->err || evsel->counts->scaled == -1)
372 return true;
373
374 /* this is meaningful for CPU aggregation modes only */
375 if (config->aggr_mode == AGGR_GLOBAL)
376 return false;
377
378 /* it's considered ok when it actually ran */
379 if (count->ena != 0 && count->run != 0)
380 return false;
381
382 return true;
383 }
384
385 static int
process_counter_values(struct perf_stat_config * config,struct evsel * evsel,int cpu_map_idx,int thread,struct perf_counts_values * count)386 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
387 int cpu_map_idx, int thread,
388 struct perf_counts_values *count)
389 {
390 struct perf_stat_evsel *ps = evsel->stats;
391 static struct perf_counts_values zero;
392 bool skip = false;
393
394 if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
395 pr_err("failed to read per-pkg counter\n");
396 return -1;
397 }
398
399 if (skip)
400 count = &zero;
401
402 if (!evsel->snapshot)
403 evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
404 perf_counts_values__scale(count, config->scale, NULL);
405
406 if (config->aggr_mode == AGGR_THREAD) {
407 struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
408
409 /*
410 * Skip value 0 when enabling --per-thread globally,
411 * otherwise too many 0 output.
412 */
413 if (count->val == 0 && config->system_wide)
414 return 0;
415
416 ps->aggr[thread].nr++;
417
418 aggr_counts->val += count->val;
419 aggr_counts->ena += count->ena;
420 aggr_counts->run += count->run;
421 return 0;
422 }
423
424 if (ps->aggr) {
425 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
426 struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
427 struct perf_stat_aggr *ps_aggr;
428 int i;
429
430 for (i = 0; i < ps->nr_aggr; i++) {
431 if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
432 continue;
433
434 ps_aggr = &ps->aggr[i];
435 ps_aggr->nr++;
436
437 /*
438 * When any result is bad, make them all to give consistent output
439 * in interval mode. But per-task counters can have 0 enabled time
440 * when some tasks are idle.
441 */
442 if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
443 ps_aggr->counts.val = 0;
444 ps_aggr->counts.ena = 0;
445 ps_aggr->counts.run = 0;
446 ps_aggr->failed = true;
447 }
448
449 if (!ps_aggr->failed) {
450 ps_aggr->counts.val += count->val;
451 ps_aggr->counts.ena += count->ena;
452 ps_aggr->counts.run += count->run;
453 }
454 break;
455 }
456 }
457
458 return 0;
459 }
460
process_counter_maps(struct perf_stat_config * config,struct evsel * counter)461 static int process_counter_maps(struct perf_stat_config *config,
462 struct evsel *counter)
463 {
464 int nthreads = perf_thread_map__nr(counter->core.threads);
465 int ncpus = evsel__nr_cpus(counter);
466 int idx, thread;
467
468 for (thread = 0; thread < nthreads; thread++) {
469 for (idx = 0; idx < ncpus; idx++) {
470 if (process_counter_values(config, counter, idx, thread,
471 perf_counts(counter->counts, idx, thread)))
472 return -1;
473 }
474 }
475
476 return 0;
477 }
478
perf_stat_process_counter(struct perf_stat_config * config,struct evsel * counter)479 int perf_stat_process_counter(struct perf_stat_config *config,
480 struct evsel *counter)
481 {
482 struct perf_stat_evsel *ps = counter->stats;
483 u64 *count;
484 int ret;
485
486 if (counter->per_pkg)
487 evsel__zero_per_pkg(counter);
488
489 ret = process_counter_maps(config, counter);
490 if (ret)
491 return ret;
492
493 if (config->aggr_mode != AGGR_GLOBAL)
494 return 0;
495
496 /*
497 * GLOBAL aggregation mode only has a single aggr counts,
498 * so we can use ps->aggr[0] as the actual output.
499 */
500 count = ps->aggr[0].counts.values;
501 update_stats(&ps->res_stats, *count);
502
503 if (verbose > 0) {
504 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
505 evsel__name(counter), count[0], count[1], count[2]);
506 }
507
508 return 0;
509 }
510
evsel__merge_aggr_counters(struct evsel * evsel,struct evsel * alias)511 static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
512 {
513 struct perf_stat_evsel *ps_a = evsel->stats;
514 struct perf_stat_evsel *ps_b = alias->stats;
515 int i;
516
517 if (ps_a->aggr == NULL && ps_b->aggr == NULL)
518 return 0;
519
520 if (ps_a->nr_aggr != ps_b->nr_aggr) {
521 pr_err("Unmatched aggregation mode between aliases\n");
522 return -1;
523 }
524
525 for (i = 0; i < ps_a->nr_aggr; i++) {
526 struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
527 struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
528
529 /* NB: don't increase aggr.nr for aliases */
530
531 aggr_counts_a->val += aggr_counts_b->val;
532 aggr_counts_a->ena += aggr_counts_b->ena;
533 aggr_counts_a->run += aggr_counts_b->run;
534 }
535
536 return 0;
537 }
538 /* events should have the same name, scale, unit, cgroup but on different PMUs */
evsel__is_alias(struct evsel * evsel_a,struct evsel * evsel_b)539 static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
540 {
541 if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
542 return false;
543
544 if (evsel_a->scale != evsel_b->scale)
545 return false;
546
547 if (evsel_a->cgrp != evsel_b->cgrp)
548 return false;
549
550 if (strcmp(evsel_a->unit, evsel_b->unit))
551 return false;
552
553 if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
554 return false;
555
556 return !!strcmp(evsel_a->pmu_name, evsel_b->pmu_name);
557 }
558
evsel__merge_aliases(struct evsel * evsel)559 static void evsel__merge_aliases(struct evsel *evsel)
560 {
561 struct evlist *evlist = evsel->evlist;
562 struct evsel *alias;
563
564 alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
565 list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
566 /* Merge the same events on different PMUs. */
567 if (evsel__is_alias(evsel, alias)) {
568 evsel__merge_aggr_counters(evsel, alias);
569 alias->merged_stat = true;
570 }
571 }
572 }
573
evsel__should_merge_hybrid(const struct evsel * evsel,const struct perf_stat_config * config)574 static bool evsel__should_merge_hybrid(const struct evsel *evsel,
575 const struct perf_stat_config *config)
576 {
577 return config->hybrid_merge && evsel__is_hybrid(evsel);
578 }
579
evsel__merge_stats(struct evsel * evsel,struct perf_stat_config * config)580 static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
581 {
582 /* this evsel is already merged */
583 if (evsel->merged_stat)
584 return;
585
586 if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
587 evsel__merge_aliases(evsel);
588 }
589
590 /* merge the same uncore and hybrid events if requested */
perf_stat_merge_counters(struct perf_stat_config * config,struct evlist * evlist)591 void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
592 {
593 struct evsel *evsel;
594
595 if (config->no_merge)
596 return;
597
598 evlist__for_each_entry(evlist, evsel)
599 evsel__merge_stats(evsel, config);
600 }
601
evsel__update_percore_stats(struct evsel * evsel,struct aggr_cpu_id * core_id)602 static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
603 {
604 struct perf_stat_evsel *ps = evsel->stats;
605 struct perf_counts_values counts = { 0, };
606 struct aggr_cpu_id id;
607 struct perf_cpu cpu;
608 int idx;
609
610 /* collect per-core counts */
611 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
612 struct perf_stat_aggr *aggr = &ps->aggr[idx];
613
614 id = aggr_cpu_id__core(cpu, NULL);
615 if (!aggr_cpu_id__equal(core_id, &id))
616 continue;
617
618 counts.val += aggr->counts.val;
619 counts.ena += aggr->counts.ena;
620 counts.run += aggr->counts.run;
621 }
622
623 /* update aggregated per-core counts for each CPU */
624 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
625 struct perf_stat_aggr *aggr = &ps->aggr[idx];
626
627 id = aggr_cpu_id__core(cpu, NULL);
628 if (!aggr_cpu_id__equal(core_id, &id))
629 continue;
630
631 aggr->counts.val = counts.val;
632 aggr->counts.ena = counts.ena;
633 aggr->counts.run = counts.run;
634
635 aggr->used = true;
636 }
637 }
638
639 /* we have an aggr_map for cpu, but want to aggregate the counters per-core */
evsel__process_percore(struct evsel * evsel)640 static void evsel__process_percore(struct evsel *evsel)
641 {
642 struct perf_stat_evsel *ps = evsel->stats;
643 struct aggr_cpu_id core_id;
644 struct perf_cpu cpu;
645 int idx;
646
647 if (!evsel->percore)
648 return;
649
650 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
651 struct perf_stat_aggr *aggr = &ps->aggr[idx];
652
653 if (aggr->used)
654 continue;
655
656 core_id = aggr_cpu_id__core(cpu, NULL);
657 evsel__update_percore_stats(evsel, &core_id);
658 }
659 }
660
661 /* process cpu stats on per-core events */
perf_stat_process_percore(struct perf_stat_config * config,struct evlist * evlist)662 void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
663 {
664 struct evsel *evsel;
665
666 if (config->aggr_mode != AGGR_NONE)
667 return;
668
669 evlist__for_each_entry(evlist, evsel)
670 evsel__process_percore(evsel);
671 }
672
perf_event__process_stat_event(struct perf_session * session,union perf_event * event)673 int perf_event__process_stat_event(struct perf_session *session,
674 union perf_event *event)
675 {
676 struct perf_counts_values count, *ptr;
677 struct perf_record_stat *st = &event->stat;
678 struct evsel *counter;
679 int cpu_map_idx;
680
681 count.val = st->val;
682 count.ena = st->ena;
683 count.run = st->run;
684
685 counter = evlist__id2evsel(session->evlist, st->id);
686 if (!counter) {
687 pr_err("Failed to resolve counter for stat event.\n");
688 return -EINVAL;
689 }
690 cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
691 if (cpu_map_idx == -1) {
692 pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
693 return -EINVAL;
694 }
695 ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
696 if (ptr == NULL) {
697 pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
698 st->cpu, st->thread, evsel__name(counter));
699 return -EINVAL;
700 }
701 *ptr = count;
702 counter->supported = true;
703 return 0;
704 }
705
perf_event__fprintf_stat(union perf_event * event,FILE * fp)706 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
707 {
708 struct perf_record_stat *st = (struct perf_record_stat *)event;
709 size_t ret;
710
711 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
712 st->id, st->cpu, st->thread);
713 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
714 st->val, st->ena, st->run);
715
716 return ret;
717 }
718
perf_event__fprintf_stat_round(union perf_event * event,FILE * fp)719 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
720 {
721 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
722 size_t ret;
723
724 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
725 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
726
727 return ret;
728 }
729
perf_event__fprintf_stat_config(union perf_event * event,FILE * fp)730 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
731 {
732 struct perf_stat_config sc = {};
733 size_t ret;
734
735 perf_event__read_stat_config(&sc, &event->stat_config);
736
737 ret = fprintf(fp, "\n");
738 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
739 ret += fprintf(fp, "... scale %d\n", sc.scale);
740 ret += fprintf(fp, "... interval %u\n", sc.interval);
741
742 return ret;
743 }
744
create_perf_stat_counter(struct evsel * evsel,struct perf_stat_config * config,struct target * target,int cpu_map_idx)745 int create_perf_stat_counter(struct evsel *evsel,
746 struct perf_stat_config *config,
747 struct target *target,
748 int cpu_map_idx)
749 {
750 struct perf_event_attr *attr = &evsel->core.attr;
751 struct evsel *leader = evsel__leader(evsel);
752
753 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
754 PERF_FORMAT_TOTAL_TIME_RUNNING;
755
756 /*
757 * The event is part of non trivial group, let's enable
758 * the group read (for leader) and ID retrieval for all
759 * members.
760 */
761 if (leader->core.nr_members > 1)
762 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
763
764 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
765
766 /*
767 * Some events get initialized with sample_(period/type) set,
768 * like tracepoints. Clear it up for counting.
769 */
770 attr->sample_period = 0;
771
772 if (config->identifier)
773 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
774
775 if (config->all_user) {
776 attr->exclude_kernel = 1;
777 attr->exclude_user = 0;
778 }
779
780 if (config->all_kernel) {
781 attr->exclude_kernel = 0;
782 attr->exclude_user = 1;
783 }
784
785 /*
786 * Disabling all counters initially, they will be enabled
787 * either manually by us or by kernel via enable_on_exec
788 * set later.
789 */
790 if (evsel__is_group_leader(evsel)) {
791 attr->disabled = 1;
792
793 if (target__enable_on_exec(target))
794 attr->enable_on_exec = 1;
795 }
796
797 if (target__has_cpu(target) && !target__has_per_thread(target))
798 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
799
800 return evsel__open_per_thread(evsel, evsel->core.threads);
801 }
802