xref: /openbmc/linux/tools/perf/util/stat.c (revision 91f88a0a)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2a43783aeSArnaldo Carvalho de Melo #include <errno.h>
3d73f5d14SLv Ruyi #include <linux/err.h>
4fd20e811SArnaldo Carvalho de Melo #include <inttypes.h>
50007eceaSXiao Guangrong #include <math.h>
6f2a39fe8SArnaldo Carvalho de Melo #include <string.h>
7bfc49182SArnaldo Carvalho de Melo #include "counts.h"
887ffb6c6SArnaldo Carvalho de Melo #include "cpumap.h"
9b4209025SArnaldo Carvalho de Melo #include "debug.h"
10f2a39fe8SArnaldo Carvalho de Melo #include "header.h"
110007eceaSXiao Guangrong #include "stat.h"
12f2a39fe8SArnaldo Carvalho de Melo #include "session.h"
13aeb00b1aSArnaldo Carvalho de Melo #include "target.h"
1424e34f68SJiri Olsa #include "evlist.h"
15e2f56da1SJiri Olsa #include "evsel.h"
1624e34f68SJiri Olsa #include "thread_map.h"
17bdf45725SIan Rogers #include "util/hashmap.h"
187f7c536fSArnaldo Carvalho de Melo #include <linux/zalloc.h>
190007eceaSXiao Guangrong 
update_stats(struct stats * stats,u64 val)200007eceaSXiao Guangrong void update_stats(struct stats *stats, u64 val)
210007eceaSXiao Guangrong {
220007eceaSXiao Guangrong 	double delta;
230007eceaSXiao Guangrong 
240007eceaSXiao Guangrong 	stats->n++;
250007eceaSXiao Guangrong 	delta = val - stats->mean;
260007eceaSXiao Guangrong 	stats->mean += delta / stats->n;
270007eceaSXiao Guangrong 	stats->M2 += delta*(val - stats->mean);
28ffe4f3c0SDavid Ahern 
29ffe4f3c0SDavid Ahern 	if (val > stats->max)
30ffe4f3c0SDavid Ahern 		stats->max = val;
31ffe4f3c0SDavid Ahern 
32ffe4f3c0SDavid Ahern 	if (val < stats->min)
33ffe4f3c0SDavid Ahern 		stats->min = val;
340007eceaSXiao Guangrong }
350007eceaSXiao Guangrong 
avg_stats(struct stats * stats)360007eceaSXiao Guangrong double avg_stats(struct stats *stats)
370007eceaSXiao Guangrong {
380007eceaSXiao Guangrong 	return stats->mean;
390007eceaSXiao Guangrong }
400007eceaSXiao Guangrong 
410007eceaSXiao Guangrong /*
420007eceaSXiao Guangrong  * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
430007eceaSXiao Guangrong  *
440007eceaSXiao Guangrong  *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
450007eceaSXiao Guangrong  * s^2 = -------------------------------
460007eceaSXiao Guangrong  *                  n - 1
470007eceaSXiao Guangrong  *
480007eceaSXiao Guangrong  * http://en.wikipedia.org/wiki/Stddev
490007eceaSXiao Guangrong  *
500007eceaSXiao Guangrong  * The std dev of the mean is related to the std dev by:
510007eceaSXiao Guangrong  *
520007eceaSXiao Guangrong  *             s
530007eceaSXiao Guangrong  * s_mean = -------
540007eceaSXiao Guangrong  *          sqrt(n)
550007eceaSXiao Guangrong  *
560007eceaSXiao Guangrong  */
stddev_stats(struct stats * stats)570007eceaSXiao Guangrong double stddev_stats(struct stats *stats)
580007eceaSXiao Guangrong {
590007eceaSXiao Guangrong 	double variance, variance_mean;
600007eceaSXiao Guangrong 
6145528f7cSDavid Ahern 	if (stats->n < 2)
620007eceaSXiao Guangrong 		return 0.0;
630007eceaSXiao Guangrong 
640007eceaSXiao Guangrong 	variance = stats->M2 / (stats->n - 1);
650007eceaSXiao Guangrong 	variance_mean = variance / stats->n;
660007eceaSXiao Guangrong 
670007eceaSXiao Guangrong 	return sqrt(variance_mean);
680007eceaSXiao Guangrong }
690007eceaSXiao Guangrong 
rel_stddev_stats(double stddev,double avg)700007eceaSXiao Guangrong double rel_stddev_stats(double stddev, double avg)
710007eceaSXiao Guangrong {
720007eceaSXiao Guangrong 	double pct = 0.0;
730007eceaSXiao Guangrong 
740007eceaSXiao Guangrong 	if (avg)
750007eceaSXiao Guangrong 		pct = 100.0 * stddev/avg;
760007eceaSXiao Guangrong 
770007eceaSXiao Guangrong 	return pct;
780007eceaSXiao Guangrong }
79e2f56da1SJiri Olsa 
evsel__reset_aggr_stats(struct evsel * evsel)808f97963eSNamhyung Kim static void evsel__reset_aggr_stats(struct evsel *evsel)
819689edfaSJiri Olsa {
82e669e833SArnaldo Carvalho de Melo 	struct perf_stat_evsel *ps = evsel->stats;
83ca68b374SNamhyung Kim 	struct perf_stat_aggr *aggr = ps->aggr;
849689edfaSJiri Olsa 
85ca68b374SNamhyung Kim 	if (aggr)
86ca68b374SNamhyung Kim 		memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
879689edfaSJiri Olsa }
889689edfaSJiri Olsa 
evsel__reset_stat_priv(struct evsel * evsel)898f97963eSNamhyung Kim static void evsel__reset_stat_priv(struct evsel *evsel)
908f97963eSNamhyung Kim {
918f97963eSNamhyung Kim 	struct perf_stat_evsel *ps = evsel->stats;
928f97963eSNamhyung Kim 
938f97963eSNamhyung Kim 	init_stats(&ps->res_stats);
948f97963eSNamhyung Kim 	evsel__reset_aggr_stats(evsel);
958f97963eSNamhyung Kim }
968f97963eSNamhyung Kim 
evsel__alloc_aggr_stats(struct evsel * evsel,int nr_aggr)97ae7e6492SNamhyung Kim static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
98ae7e6492SNamhyung Kim {
99ae7e6492SNamhyung Kim 	struct perf_stat_evsel *ps = evsel->stats;
100ae7e6492SNamhyung Kim 
101ae7e6492SNamhyung Kim 	if (ps == NULL)
102ae7e6492SNamhyung Kim 		return 0;
103ae7e6492SNamhyung Kim 
104ae7e6492SNamhyung Kim 	ps->nr_aggr = nr_aggr;
105ae7e6492SNamhyung Kim 	ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
106ae7e6492SNamhyung Kim 	if (ps->aggr == NULL)
107ae7e6492SNamhyung Kim 		return -ENOMEM;
108ae7e6492SNamhyung Kim 
109ae7e6492SNamhyung Kim 	return 0;
110ae7e6492SNamhyung Kim }
111ae7e6492SNamhyung Kim 
evlist__alloc_aggr_stats(struct evlist * evlist,int nr_aggr)112ae7e6492SNamhyung Kim int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
113ae7e6492SNamhyung Kim {
114ae7e6492SNamhyung Kim 	struct evsel *evsel;
115ae7e6492SNamhyung Kim 
116ae7e6492SNamhyung Kim 	evlist__for_each_entry(evlist, evsel) {
117ae7e6492SNamhyung Kim 		if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
118ae7e6492SNamhyung Kim 			return -1;
119ae7e6492SNamhyung Kim 	}
120ae7e6492SNamhyung Kim 	return 0;
121ae7e6492SNamhyung Kim }
122ca68b374SNamhyung Kim 
evsel__alloc_stat_priv(struct evsel * evsel,int nr_aggr)123ca68b374SNamhyung Kim static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
1249689edfaSJiri Olsa {
125ca68b374SNamhyung Kim 	struct perf_stat_evsel *ps;
126ca68b374SNamhyung Kim 
127ca68b374SNamhyung Kim 	ps = zalloc(sizeof(*ps));
128ca68b374SNamhyung Kim 	if (ps == NULL)
1299689edfaSJiri Olsa 		return -ENOMEM;
130ca68b374SNamhyung Kim 
131ae7e6492SNamhyung Kim 	evsel->stats = ps;
132ae7e6492SNamhyung Kim 
133ae7e6492SNamhyung Kim 	if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
134ae7e6492SNamhyung Kim 		evsel->stats = NULL;
135ca68b374SNamhyung Kim 		free(ps);
136ca68b374SNamhyung Kim 		return -ENOMEM;
137ca68b374SNamhyung Kim 	}
138ca68b374SNamhyung Kim 
1397d1e239eSArnaldo Carvalho de Melo 	evsel__reset_stat_priv(evsel);
1409689edfaSJiri Olsa 	return 0;
1419689edfaSJiri Olsa }
1429689edfaSJiri Olsa 
evsel__free_stat_priv(struct evsel * evsel)1437d1e239eSArnaldo Carvalho de Melo static void evsel__free_stat_priv(struct evsel *evsel)
1449689edfaSJiri Olsa {
145e669e833SArnaldo Carvalho de Melo 	struct perf_stat_evsel *ps = evsel->stats;
146f7794d52SJiri Olsa 
147ca68b374SNamhyung Kim 	if (ps) {
148ca68b374SNamhyung Kim 		zfree(&ps->aggr);
149d8f9da24SArnaldo Carvalho de Melo 		zfree(&ps->group_data);
150ca68b374SNamhyung Kim 	}
151e669e833SArnaldo Carvalho de Melo 	zfree(&evsel->stats);
1529689edfaSJiri Olsa }
153a939512dSJiri Olsa 
evsel__alloc_prev_raw_counts(struct evsel * evsel)1542ca0a371SIan Rogers static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
155a939512dSJiri Olsa {
1562ca0a371SIan Rogers 	int cpu_map_nr = evsel__nr_cpus(evsel);
1572ca0a371SIan Rogers 	int nthreads = perf_thread_map__nr(evsel->core.threads);
158a939512dSJiri Olsa 	struct perf_counts *counts;
159a939512dSJiri Olsa 
1602ca0a371SIan Rogers 	counts = perf_counts__new(cpu_map_nr, nthreads);
161a939512dSJiri Olsa 	if (counts)
162a939512dSJiri Olsa 		evsel->prev_raw_counts = counts;
163a939512dSJiri Olsa 
164a939512dSJiri Olsa 	return counts ? 0 : -ENOMEM;
165a939512dSJiri Olsa }
166a939512dSJiri Olsa 
evsel__free_prev_raw_counts(struct evsel * evsel)1677d1e239eSArnaldo Carvalho de Melo static void evsel__free_prev_raw_counts(struct evsel *evsel)
168a939512dSJiri Olsa {
169a939512dSJiri Olsa 	perf_counts__delete(evsel->prev_raw_counts);
170a939512dSJiri Olsa 	evsel->prev_raw_counts = NULL;
171a939512dSJiri Olsa }
17224e34f68SJiri Olsa 
evsel__reset_prev_raw_counts(struct evsel * evsel)1737d1e239eSArnaldo Carvalho de Melo static void evsel__reset_prev_raw_counts(struct evsel *evsel)
174b63fd11cSSrikar Dronamraju {
175cf4d9bd6SJin Yao 	if (evsel->prev_raw_counts)
176cf4d9bd6SJin Yao 		perf_counts__reset(evsel->prev_raw_counts);
177b63fd11cSSrikar Dronamraju }
178b63fd11cSSrikar Dronamraju 
evsel__alloc_stats(struct evsel * evsel,int nr_aggr,bool alloc_raw)179ca68b374SNamhyung Kim static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
18024e34f68SJiri Olsa {
181ca68b374SNamhyung Kim 	if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
1822ca0a371SIan Rogers 	    evsel__alloc_counts(evsel) < 0 ||
1832ca0a371SIan Rogers 	    (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
184a7d0a102SJiri Olsa 		return -ENOMEM;
185a7d0a102SJiri Olsa 
186a7d0a102SJiri Olsa 	return 0;
187a7d0a102SJiri Olsa }
188a7d0a102SJiri Olsa 
evlist__alloc_stats(struct perf_stat_config * config,struct evlist * evlist,bool alloc_raw)1891f297a6eSNamhyung Kim int evlist__alloc_stats(struct perf_stat_config *config,
1901f297a6eSNamhyung Kim 			struct evlist *evlist, bool alloc_raw)
191a7d0a102SJiri Olsa {
19232dcd021SJiri Olsa 	struct evsel *evsel;
1931f297a6eSNamhyung Kim 	int nr_aggr = 0;
1941f297a6eSNamhyung Kim 
1951f297a6eSNamhyung Kim 	if (config && config->aggr_map)
1961f297a6eSNamhyung Kim 		nr_aggr = config->aggr_map->nr;
197a7d0a102SJiri Olsa 
198e5cadb93SArnaldo Carvalho de Melo 	evlist__for_each_entry(evlist, evsel) {
1991f297a6eSNamhyung Kim 		if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
20024e34f68SJiri Olsa 			goto out_free;
20124e34f68SJiri Olsa 	}
20224e34f68SJiri Olsa 
20324e34f68SJiri Olsa 	return 0;
20424e34f68SJiri Olsa 
20524e34f68SJiri Olsa out_free:
20653f5e908SArnaldo Carvalho de Melo 	evlist__free_stats(evlist);
20724e34f68SJiri Olsa 	return -1;
20824e34f68SJiri Olsa }
20924e34f68SJiri Olsa 
evlist__free_stats(struct evlist * evlist)21053f5e908SArnaldo Carvalho de Melo void evlist__free_stats(struct evlist *evlist)
21124e34f68SJiri Olsa {
21232dcd021SJiri Olsa 	struct evsel *evsel;
21324e34f68SJiri Olsa 
214e5cadb93SArnaldo Carvalho de Melo 	evlist__for_each_entry(evlist, evsel) {
2157d1e239eSArnaldo Carvalho de Melo 		evsel__free_stat_priv(evsel);
2167d1e239eSArnaldo Carvalho de Melo 		evsel__free_counts(evsel);
2177d1e239eSArnaldo Carvalho de Melo 		evsel__free_prev_raw_counts(evsel);
21824e34f68SJiri Olsa 	}
21924e34f68SJiri Olsa }
22024e34f68SJiri Olsa 
evlist__reset_stats(struct evlist * evlist)22153f5e908SArnaldo Carvalho de Melo void evlist__reset_stats(struct evlist *evlist)
22224e34f68SJiri Olsa {
22332dcd021SJiri Olsa 	struct evsel *evsel;
22424e34f68SJiri Olsa 
225e5cadb93SArnaldo Carvalho de Melo 	evlist__for_each_entry(evlist, evsel) {
2267d1e239eSArnaldo Carvalho de Melo 		evsel__reset_stat_priv(evsel);
2277d1e239eSArnaldo Carvalho de Melo 		evsel__reset_counts(evsel);
22824e34f68SJiri Olsa 	}
22924e34f68SJiri Olsa }
230f80010ebSJiri Olsa 
evlist__reset_aggr_stats(struct evlist * evlist)2318f97963eSNamhyung Kim void evlist__reset_aggr_stats(struct evlist *evlist)
2328f97963eSNamhyung Kim {
2338f97963eSNamhyung Kim 	struct evsel *evsel;
2348f97963eSNamhyung Kim 
2358f97963eSNamhyung Kim 	evlist__for_each_entry(evlist, evsel)
2368f97963eSNamhyung Kim 		evsel__reset_aggr_stats(evsel);
2378f97963eSNamhyung Kim }
2388f97963eSNamhyung Kim 
evlist__reset_prev_raw_counts(struct evlist * evlist)23953f5e908SArnaldo Carvalho de Melo void evlist__reset_prev_raw_counts(struct evlist *evlist)
240b63fd11cSSrikar Dronamraju {
241b63fd11cSSrikar Dronamraju 	struct evsel *evsel;
242b63fd11cSSrikar Dronamraju 
243b63fd11cSSrikar Dronamraju 	evlist__for_each_entry(evlist, evsel)
2447d1e239eSArnaldo Carvalho de Melo 		evsel__reset_prev_raw_counts(evsel);
245b63fd11cSSrikar Dronamraju }
246b63fd11cSSrikar Dronamraju 
evsel__copy_prev_raw_counts(struct evsel * evsel)24756933029SArnaldo Carvalho de Melo static void evsel__copy_prev_raw_counts(struct evsel *evsel)
248297767acSJin Yao {
2490b9462d0SIan Rogers 	int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
250297767acSJin Yao 
251297767acSJin Yao 	for (int thread = 0; thread < nthreads; thread++) {
2520b9462d0SIan Rogers 		perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
2530b9462d0SIan Rogers 			*perf_counts(evsel->counts, idx, thread) =
2540b9462d0SIan Rogers 				*perf_counts(evsel->prev_raw_counts, idx, thread);
255297767acSJin Yao 		}
256297767acSJin Yao 	}
257297767acSJin Yao }
258297767acSJin Yao 
evlist__copy_prev_raw_counts(struct evlist * evlist)25953f5e908SArnaldo Carvalho de Melo void evlist__copy_prev_raw_counts(struct evlist *evlist)
260297767acSJin Yao {
261297767acSJin Yao 	struct evsel *evsel;
262297767acSJin Yao 
263297767acSJin Yao 	evlist__for_each_entry(evlist, evsel)
26456933029SArnaldo Carvalho de Melo 		evsel__copy_prev_raw_counts(evsel);
265297767acSJin Yao }
266297767acSJin Yao 
evsel__copy_res_stats(struct evsel * evsel)267dada1a1fSNamhyung Kim static void evsel__copy_res_stats(struct evsel *evsel)
268dada1a1fSNamhyung Kim {
269dada1a1fSNamhyung Kim 	struct perf_stat_evsel *ps = evsel->stats;
270dada1a1fSNamhyung Kim 
271dada1a1fSNamhyung Kim 	/*
272dada1a1fSNamhyung Kim 	 * For GLOBAL aggregation mode, it updates the counts for each run
273dada1a1fSNamhyung Kim 	 * in the evsel->stats.res_stats.  See perf_stat_process_counter().
274dada1a1fSNamhyung Kim 	 */
275dada1a1fSNamhyung Kim 	*ps->aggr[0].counts.values = avg_stats(&ps->res_stats);
276dada1a1fSNamhyung Kim }
277dada1a1fSNamhyung Kim 
evlist__copy_res_stats(struct perf_stat_config * config,struct evlist * evlist)278dada1a1fSNamhyung Kim void evlist__copy_res_stats(struct perf_stat_config *config, struct evlist *evlist)
279dada1a1fSNamhyung Kim {
280dada1a1fSNamhyung Kim 	struct evsel *evsel;
281dada1a1fSNamhyung Kim 
282dada1a1fSNamhyung Kim 	if (config->aggr_mode != AGGR_GLOBAL)
283dada1a1fSNamhyung Kim 		return;
284dada1a1fSNamhyung Kim 
285dada1a1fSNamhyung Kim 	evlist__for_each_entry(evlist, evsel)
286dada1a1fSNamhyung Kim 		evsel__copy_res_stats(evsel);
287dada1a1fSNamhyung Kim }
288dada1a1fSNamhyung Kim 
pkg_id_hash(long __key,void * ctx __maybe_unused)289c302378bSEduard Zingerman static size_t pkg_id_hash(long __key, void *ctx __maybe_unused)
290f80010ebSJiri Olsa {
291034f7ee1SJin Yao 	uint64_t *key = (uint64_t *) __key;
292034f7ee1SJin Yao 
293034f7ee1SJin Yao 	return *key & 0xffffffff;
294034f7ee1SJin Yao }
295034f7ee1SJin Yao 
pkg_id_equal(long __key1,long __key2,void * ctx __maybe_unused)296c302378bSEduard Zingerman static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused)
297034f7ee1SJin Yao {
298034f7ee1SJin Yao 	uint64_t *key1 = (uint64_t *) __key1;
299034f7ee1SJin Yao 	uint64_t *key2 = (uint64_t *) __key2;
300034f7ee1SJin Yao 
301034f7ee1SJin Yao 	return *key1 == *key2;
302f80010ebSJiri Olsa }
303f80010ebSJiri Olsa 
check_per_pkg(struct evsel * counter,struct perf_counts_values * vals,int cpu_map_idx,bool * skip)304379c224bSIan Rogers static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
305379c224bSIan Rogers 			 int cpu_map_idx, bool *skip)
306f80010ebSJiri Olsa {
307034f7ee1SJin Yao 	struct hashmap *mask = counter->per_pkg_mask;
308b49aca3eSJiri Olsa 	struct perf_cpu_map *cpus = evsel__cpus(counter);
3096d18804bSIan Rogers 	struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
310034f7ee1SJin Yao 	int s, d, ret = 0;
311034f7ee1SJin Yao 	uint64_t *key;
312f80010ebSJiri Olsa 
313f80010ebSJiri Olsa 	*skip = false;
314f80010ebSJiri Olsa 
315f80010ebSJiri Olsa 	if (!counter->per_pkg)
316f80010ebSJiri Olsa 		return 0;
317f80010ebSJiri Olsa 
318315c0a1fSJiri Olsa 	if (perf_cpu_map__empty(cpus))
319f80010ebSJiri Olsa 		return 0;
320f80010ebSJiri Olsa 
321f80010ebSJiri Olsa 	if (!mask) {
322034f7ee1SJin Yao 		mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
323d73f5d14SLv Ruyi 		if (IS_ERR(mask))
324f80010ebSJiri Olsa 			return -ENOMEM;
325f80010ebSJiri Olsa 
326f80010ebSJiri Olsa 		counter->per_pkg_mask = mask;
327f80010ebSJiri Olsa 	}
328f80010ebSJiri Olsa 
32902d8dabcSStephane Eranian 	/*
33002d8dabcSStephane Eranian 	 * we do not consider an event that has not run as a good
33102d8dabcSStephane Eranian 	 * instance to mark a package as used (skip=1). Otherwise
33202d8dabcSStephane Eranian 	 * we may run into a situation where the first CPU in a package
33302d8dabcSStephane Eranian 	 * is not running anything, yet the second is, and this function
33402d8dabcSStephane Eranian 	 * would mark the package as used after the first CPU and would
33502d8dabcSStephane Eranian 	 * not read the values from the second CPU.
33602d8dabcSStephane Eranian 	 */
33702d8dabcSStephane Eranian 	if (!(vals->run && vals->ena))
33802d8dabcSStephane Eranian 		return 0;
33902d8dabcSStephane Eranian 
3404e90e5ccSIan Rogers 	s = cpu__get_socket_id(cpu);
341f80010ebSJiri Olsa 	if (s < 0)
342f80010ebSJiri Olsa 		return -1;
343f80010ebSJiri Olsa 
344034f7ee1SJin Yao 	/*
345034f7ee1SJin Yao 	 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
346034f7ee1SJin Yao 	 * We use hashmap(socket, die) to check the used socket+die pair.
347034f7ee1SJin Yao 	 */
3484e90e5ccSIan Rogers 	d = cpu__get_die_id(cpu);
349034f7ee1SJin Yao 	if (d < 0)
350034f7ee1SJin Yao 		return -1;
351034f7ee1SJin Yao 
352034f7ee1SJin Yao 	key = malloc(sizeof(*key));
353034f7ee1SJin Yao 	if (!key)
354034f7ee1SJin Yao 		return -ENOMEM;
355034f7ee1SJin Yao 
356034f7ee1SJin Yao 	*key = (uint64_t)d << 32 | s;
357c302378bSEduard Zingerman 	if (hashmap__find(mask, key, NULL)) {
358034f7ee1SJin Yao 		*skip = true;
359f9e891eaSIan Rogers 		free(key);
360f9e891eaSIan Rogers 	} else
361c302378bSEduard Zingerman 		ret = hashmap__add(mask, key, 1);
362034f7ee1SJin Yao 
363034f7ee1SJin Yao 	return ret;
364f80010ebSJiri Olsa }
365f80010ebSJiri Olsa 
evsel__count_has_error(struct evsel * evsel,struct perf_counts_values * count,struct perf_stat_config * config)366049aba09SNamhyung Kim static bool evsel__count_has_error(struct evsel *evsel,
367049aba09SNamhyung Kim 				   struct perf_counts_values *count,
368049aba09SNamhyung Kim 				   struct perf_stat_config *config)
369049aba09SNamhyung Kim {
370049aba09SNamhyung Kim 	/* the evsel was failed already */
371049aba09SNamhyung Kim 	if (evsel->err || evsel->counts->scaled == -1)
372049aba09SNamhyung Kim 		return true;
373049aba09SNamhyung Kim 
374049aba09SNamhyung Kim 	/* this is meaningful for CPU aggregation modes only */
375049aba09SNamhyung Kim 	if (config->aggr_mode == AGGR_GLOBAL)
376049aba09SNamhyung Kim 		return false;
377049aba09SNamhyung Kim 
378049aba09SNamhyung Kim 	/* it's considered ok when it actually ran */
379049aba09SNamhyung Kim 	if (count->ena != 0 && count->run != 0)
380049aba09SNamhyung Kim 		return false;
381049aba09SNamhyung Kim 
382049aba09SNamhyung Kim 	return true;
383049aba09SNamhyung Kim }
384049aba09SNamhyung Kim 
385f80010ebSJiri Olsa static int
process_counter_values(struct perf_stat_config * config,struct evsel * evsel,int cpu_map_idx,int thread,struct perf_counts_values * count)38632dcd021SJiri Olsa process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
3875b1af93dSIan Rogers 		       int cpu_map_idx, int thread,
388f80010ebSJiri Olsa 		       struct perf_counts_values *count)
389f80010ebSJiri Olsa {
390f976bc6bSNamhyung Kim 	struct perf_stat_evsel *ps = evsel->stats;
391f80010ebSJiri Olsa 	static struct perf_counts_values zero;
392f80010ebSJiri Olsa 	bool skip = false;
393f80010ebSJiri Olsa 
3945b1af93dSIan Rogers 	if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
395f80010ebSJiri Olsa 		pr_err("failed to read per-pkg counter\n");
396f80010ebSJiri Olsa 		return -1;
397f80010ebSJiri Olsa 	}
398f80010ebSJiri Olsa 
399f80010ebSJiri Olsa 	if (skip)
400f80010ebSJiri Olsa 		count = &zero;
401f80010ebSJiri Olsa 
402f976bc6bSNamhyung Kim 	if (!evsel->snapshot)
403f976bc6bSNamhyung Kim 		evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
404f976bc6bSNamhyung Kim 	perf_counts_values__scale(count, config->scale, NULL);
405f976bc6bSNamhyung Kim 
406050059e1SNamhyung Kim 	if (config->aggr_mode == AGGR_THREAD) {
407050059e1SNamhyung Kim 		struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
408050059e1SNamhyung Kim 
409050059e1SNamhyung Kim 		/*
410050059e1SNamhyung Kim 		 * Skip value 0 when enabling --per-thread globally,
411050059e1SNamhyung Kim 		 * otherwise too many 0 output.
412050059e1SNamhyung Kim 		 */
413050059e1SNamhyung Kim 		if (count->val == 0 && config->system_wide)
414050059e1SNamhyung Kim 			return 0;
415050059e1SNamhyung Kim 
416050059e1SNamhyung Kim 		ps->aggr[thread].nr++;
417050059e1SNamhyung Kim 
418050059e1SNamhyung Kim 		aggr_counts->val += count->val;
419050059e1SNamhyung Kim 		aggr_counts->ena += count->ena;
420050059e1SNamhyung Kim 		aggr_counts->run += count->run;
42188f1d351SNamhyung Kim 		return 0;
422050059e1SNamhyung Kim 	}
423050059e1SNamhyung Kim 
424f976bc6bSNamhyung Kim 	if (ps->aggr) {
425f976bc6bSNamhyung Kim 		struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
426f976bc6bSNamhyung Kim 		struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
427f976bc6bSNamhyung Kim 		struct perf_stat_aggr *ps_aggr;
428f976bc6bSNamhyung Kim 		int i;
429f976bc6bSNamhyung Kim 
430f976bc6bSNamhyung Kim 		for (i = 0; i < ps->nr_aggr; i++) {
431f976bc6bSNamhyung Kim 			if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
432f976bc6bSNamhyung Kim 				continue;
433f976bc6bSNamhyung Kim 
434f976bc6bSNamhyung Kim 			ps_aggr = &ps->aggr[i];
435f976bc6bSNamhyung Kim 			ps_aggr->nr++;
436f976bc6bSNamhyung Kim 
437f976bc6bSNamhyung Kim 			/*
438050059e1SNamhyung Kim 			 * When any result is bad, make them all to give consistent output
439050059e1SNamhyung Kim 			 * in interval mode.  But per-task counters can have 0 enabled time
440050059e1SNamhyung Kim 			 * when some tasks are idle.
441f976bc6bSNamhyung Kim 			 */
442049aba09SNamhyung Kim 			if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
443f976bc6bSNamhyung Kim 				ps_aggr->counts.val = 0;
444f976bc6bSNamhyung Kim 				ps_aggr->counts.ena = 0;
445f976bc6bSNamhyung Kim 				ps_aggr->counts.run = 0;
446f976bc6bSNamhyung Kim 				ps_aggr->failed = true;
447f976bc6bSNamhyung Kim 			}
448f976bc6bSNamhyung Kim 
449f976bc6bSNamhyung Kim 			if (!ps_aggr->failed) {
450f976bc6bSNamhyung Kim 				ps_aggr->counts.val += count->val;
451f976bc6bSNamhyung Kim 				ps_aggr->counts.ena += count->ena;
452f976bc6bSNamhyung Kim 				ps_aggr->counts.run += count->run;
453f976bc6bSNamhyung Kim 			}
454f976bc6bSNamhyung Kim 			break;
455f976bc6bSNamhyung Kim 		}
456f976bc6bSNamhyung Kim 	}
457f976bc6bSNamhyung Kim 
458f80010ebSJiri Olsa 	return 0;
459f80010ebSJiri Olsa }
460f80010ebSJiri Olsa 
process_counter_maps(struct perf_stat_config * config,struct evsel * counter)461f80010ebSJiri Olsa static int process_counter_maps(struct perf_stat_config *config,
46232dcd021SJiri Olsa 				struct evsel *counter)
463f80010ebSJiri Olsa {
464a2f354e3SJiri Olsa 	int nthreads = perf_thread_map__nr(counter->core.threads);
4655eb88f04SArnaldo Carvalho de Melo 	int ncpus = evsel__nr_cpus(counter);
4665b1af93dSIan Rogers 	int idx, thread;
467f80010ebSJiri Olsa 
468f80010ebSJiri Olsa 	for (thread = 0; thread < nthreads; thread++) {
4695b1af93dSIan Rogers 		for (idx = 0; idx < ncpus; idx++) {
4705b1af93dSIan Rogers 			if (process_counter_values(config, counter, idx, thread,
4715b1af93dSIan Rogers 						   perf_counts(counter->counts, idx, thread)))
472f80010ebSJiri Olsa 				return -1;
473f80010ebSJiri Olsa 		}
474f80010ebSJiri Olsa 	}
475f80010ebSJiri Olsa 
476f80010ebSJiri Olsa 	return 0;
477f80010ebSJiri Olsa }
478f80010ebSJiri Olsa 
perf_stat_process_counter(struct perf_stat_config * config,struct evsel * counter)479f80010ebSJiri Olsa int perf_stat_process_counter(struct perf_stat_config *config,
48032dcd021SJiri Olsa 			      struct evsel *counter)
481f80010ebSJiri Olsa {
482e669e833SArnaldo Carvalho de Melo 	struct perf_stat_evsel *ps = counter->stats;
4838b76a318SNamhyung Kim 	u64 *count;
48466b76e30SNamhyung Kim 	int ret;
485f80010ebSJiri Olsa 
486f80010ebSJiri Olsa 	if (counter->per_pkg)
487034f7ee1SJin Yao 		evsel__zero_per_pkg(counter);
488f80010ebSJiri Olsa 
489f80010ebSJiri Olsa 	ret = process_counter_maps(config, counter);
490f80010ebSJiri Olsa 	if (ret)
491f80010ebSJiri Olsa 		return ret;
492f80010ebSJiri Olsa 
493f80010ebSJiri Olsa 	if (config->aggr_mode != AGGR_GLOBAL)
494f80010ebSJiri Olsa 		return 0;
495f80010ebSJiri Olsa 
4968b76a318SNamhyung Kim 	/*
4978b76a318SNamhyung Kim 	 * GLOBAL aggregation mode only has a single aggr counts,
4988b76a318SNamhyung Kim 	 * so we can use ps->aggr[0] as the actual output.
4998b76a318SNamhyung Kim 	 */
5008b76a318SNamhyung Kim 	count = ps->aggr[0].counts.values;
50166b76e30SNamhyung Kim 	update_stats(&ps->res_stats, *count);
502f80010ebSJiri Olsa 
503bb963e16SNamhyung Kim 	if (verbose > 0) {
504f80010ebSJiri Olsa 		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
5058ab2e96dSArnaldo Carvalho de Melo 			evsel__name(counter), count[0], count[1], count[2]);
506f80010ebSJiri Olsa 	}
507f80010ebSJiri Olsa 
508f80010ebSJiri Olsa 	return 0;
509f80010ebSJiri Olsa }
5100ea0e355SJiri Olsa 
evsel__merge_aggr_counters(struct evsel * evsel,struct evsel * alias)511942c5593SNamhyung Kim static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
512942c5593SNamhyung Kim {
513942c5593SNamhyung Kim 	struct perf_stat_evsel *ps_a = evsel->stats;
514942c5593SNamhyung Kim 	struct perf_stat_evsel *ps_b = alias->stats;
515942c5593SNamhyung Kim 	int i;
516942c5593SNamhyung Kim 
517942c5593SNamhyung Kim 	if (ps_a->aggr == NULL && ps_b->aggr == NULL)
518942c5593SNamhyung Kim 		return 0;
519942c5593SNamhyung Kim 
520942c5593SNamhyung Kim 	if (ps_a->nr_aggr != ps_b->nr_aggr) {
521942c5593SNamhyung Kim 		pr_err("Unmatched aggregation mode between aliases\n");
522942c5593SNamhyung Kim 		return -1;
523942c5593SNamhyung Kim 	}
524942c5593SNamhyung Kim 
525942c5593SNamhyung Kim 	for (i = 0; i < ps_a->nr_aggr; i++) {
526942c5593SNamhyung Kim 		struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
527942c5593SNamhyung Kim 		struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
528942c5593SNamhyung Kim 
529942c5593SNamhyung Kim 		/* NB: don't increase aggr.nr for aliases */
530942c5593SNamhyung Kim 
531942c5593SNamhyung Kim 		aggr_counts_a->val += aggr_counts_b->val;
532942c5593SNamhyung Kim 		aggr_counts_a->ena += aggr_counts_b->ena;
533942c5593SNamhyung Kim 		aggr_counts_a->run += aggr_counts_b->run;
534942c5593SNamhyung Kim 	}
535942c5593SNamhyung Kim 
536942c5593SNamhyung Kim 	return 0;
537942c5593SNamhyung Kim }
538942c5593SNamhyung Kim /* events should have the same name, scale, unit, cgroup but on different PMUs */
evsel__is_alias(struct evsel * evsel_a,struct evsel * evsel_b)539942c5593SNamhyung Kim static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
540942c5593SNamhyung Kim {
541942c5593SNamhyung Kim 	if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
542942c5593SNamhyung Kim 		return false;
543942c5593SNamhyung Kim 
544942c5593SNamhyung Kim 	if (evsel_a->scale != evsel_b->scale)
545942c5593SNamhyung Kim 		return false;
546942c5593SNamhyung Kim 
547942c5593SNamhyung Kim 	if (evsel_a->cgrp != evsel_b->cgrp)
548942c5593SNamhyung Kim 		return false;
549942c5593SNamhyung Kim 
550942c5593SNamhyung Kim 	if (strcmp(evsel_a->unit, evsel_b->unit))
551942c5593SNamhyung Kim 		return false;
552942c5593SNamhyung Kim 
553942c5593SNamhyung Kim 	if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
554942c5593SNamhyung Kim 		return false;
555942c5593SNamhyung Kim 
556942c5593SNamhyung Kim 	return !!strcmp(evsel_a->pmu_name, evsel_b->pmu_name);
557942c5593SNamhyung Kim }
558942c5593SNamhyung Kim 
evsel__merge_aliases(struct evsel * evsel)559942c5593SNamhyung Kim static void evsel__merge_aliases(struct evsel *evsel)
560942c5593SNamhyung Kim {
561942c5593SNamhyung Kim 	struct evlist *evlist = evsel->evlist;
562942c5593SNamhyung Kim 	struct evsel *alias;
563942c5593SNamhyung Kim 
564942c5593SNamhyung Kim 	alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
565942c5593SNamhyung Kim 	list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
566942c5593SNamhyung Kim 		/* Merge the same events on different PMUs. */
567942c5593SNamhyung Kim 		if (evsel__is_alias(evsel, alias)) {
568942c5593SNamhyung Kim 			evsel__merge_aggr_counters(evsel, alias);
569942c5593SNamhyung Kim 			alias->merged_stat = true;
570942c5593SNamhyung Kim 		}
571942c5593SNamhyung Kim 	}
572942c5593SNamhyung Kim }
573942c5593SNamhyung Kim 
evsel__should_merge_hybrid(const struct evsel * evsel,const struct perf_stat_config * config)574e5f4afbeSIan Rogers static bool evsel__should_merge_hybrid(const struct evsel *evsel,
575e5f4afbeSIan Rogers 				       const struct perf_stat_config *config)
576942c5593SNamhyung Kim {
577e5f4afbeSIan Rogers 	return config->hybrid_merge && evsel__is_hybrid(evsel);
578942c5593SNamhyung Kim }
579942c5593SNamhyung Kim 
evsel__merge_stats(struct evsel * evsel,struct perf_stat_config * config)580942c5593SNamhyung Kim static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
581942c5593SNamhyung Kim {
582942c5593SNamhyung Kim 	/* this evsel is already merged */
583942c5593SNamhyung Kim 	if (evsel->merged_stat)
584942c5593SNamhyung Kim 		return;
585942c5593SNamhyung Kim 
586942c5593SNamhyung Kim 	if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
587942c5593SNamhyung Kim 		evsel__merge_aliases(evsel);
588942c5593SNamhyung Kim }
589942c5593SNamhyung Kim 
590942c5593SNamhyung Kim /* merge the same uncore and hybrid events if requested */
perf_stat_merge_counters(struct perf_stat_config * config,struct evlist * evlist)591942c5593SNamhyung Kim void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
592942c5593SNamhyung Kim {
593942c5593SNamhyung Kim 	struct evsel *evsel;
594942c5593SNamhyung Kim 
595942c5593SNamhyung Kim 	if (config->no_merge)
596942c5593SNamhyung Kim 		return;
597942c5593SNamhyung Kim 
598942c5593SNamhyung Kim 	evlist__for_each_entry(evlist, evsel)
599942c5593SNamhyung Kim 		evsel__merge_stats(evsel, config);
600942c5593SNamhyung Kim }
601942c5593SNamhyung Kim 
evsel__update_percore_stats(struct evsel * evsel,struct aggr_cpu_id * core_id)6021d6d2beaSNamhyung Kim static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
6031d6d2beaSNamhyung Kim {
6041d6d2beaSNamhyung Kim 	struct perf_stat_evsel *ps = evsel->stats;
6051d6d2beaSNamhyung Kim 	struct perf_counts_values counts = { 0, };
6061d6d2beaSNamhyung Kim 	struct aggr_cpu_id id;
6071d6d2beaSNamhyung Kim 	struct perf_cpu cpu;
6081d6d2beaSNamhyung Kim 	int idx;
6091d6d2beaSNamhyung Kim 
6101d6d2beaSNamhyung Kim 	/* collect per-core counts */
6111d6d2beaSNamhyung Kim 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
6121d6d2beaSNamhyung Kim 		struct perf_stat_aggr *aggr = &ps->aggr[idx];
6131d6d2beaSNamhyung Kim 
6141d6d2beaSNamhyung Kim 		id = aggr_cpu_id__core(cpu, NULL);
6151d6d2beaSNamhyung Kim 		if (!aggr_cpu_id__equal(core_id, &id))
6161d6d2beaSNamhyung Kim 			continue;
6171d6d2beaSNamhyung Kim 
6181d6d2beaSNamhyung Kim 		counts.val += aggr->counts.val;
6191d6d2beaSNamhyung Kim 		counts.ena += aggr->counts.ena;
6201d6d2beaSNamhyung Kim 		counts.run += aggr->counts.run;
6211d6d2beaSNamhyung Kim 	}
6221d6d2beaSNamhyung Kim 
6231d6d2beaSNamhyung Kim 	/* update aggregated per-core counts for each CPU */
6241d6d2beaSNamhyung Kim 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
6251d6d2beaSNamhyung Kim 		struct perf_stat_aggr *aggr = &ps->aggr[idx];
6261d6d2beaSNamhyung Kim 
6271d6d2beaSNamhyung Kim 		id = aggr_cpu_id__core(cpu, NULL);
6281d6d2beaSNamhyung Kim 		if (!aggr_cpu_id__equal(core_id, &id))
6291d6d2beaSNamhyung Kim 			continue;
6301d6d2beaSNamhyung Kim 
6311d6d2beaSNamhyung Kim 		aggr->counts.val = counts.val;
6321d6d2beaSNamhyung Kim 		aggr->counts.ena = counts.ena;
6331d6d2beaSNamhyung Kim 		aggr->counts.run = counts.run;
6341d6d2beaSNamhyung Kim 
6351d6d2beaSNamhyung Kim 		aggr->used = true;
6361d6d2beaSNamhyung Kim 	}
6371d6d2beaSNamhyung Kim }
6381d6d2beaSNamhyung Kim 
6391d6d2beaSNamhyung Kim /* we have an aggr_map for cpu, but want to aggregate the counters per-core */
evsel__process_percore(struct evsel * evsel)6401d6d2beaSNamhyung Kim static void evsel__process_percore(struct evsel *evsel)
6411d6d2beaSNamhyung Kim {
6421d6d2beaSNamhyung Kim 	struct perf_stat_evsel *ps = evsel->stats;
6431d6d2beaSNamhyung Kim 	struct aggr_cpu_id core_id;
6441d6d2beaSNamhyung Kim 	struct perf_cpu cpu;
6451d6d2beaSNamhyung Kim 	int idx;
6461d6d2beaSNamhyung Kim 
6471d6d2beaSNamhyung Kim 	if (!evsel->percore)
6481d6d2beaSNamhyung Kim 		return;
6491d6d2beaSNamhyung Kim 
6501d6d2beaSNamhyung Kim 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
6511d6d2beaSNamhyung Kim 		struct perf_stat_aggr *aggr = &ps->aggr[idx];
6521d6d2beaSNamhyung Kim 
6531d6d2beaSNamhyung Kim 		if (aggr->used)
6541d6d2beaSNamhyung Kim 			continue;
6551d6d2beaSNamhyung Kim 
6561d6d2beaSNamhyung Kim 		core_id = aggr_cpu_id__core(cpu, NULL);
6571d6d2beaSNamhyung Kim 		evsel__update_percore_stats(evsel, &core_id);
6581d6d2beaSNamhyung Kim 	}
6591d6d2beaSNamhyung Kim }
6601d6d2beaSNamhyung Kim 
6611d6d2beaSNamhyung Kim /* process cpu stats on per-core events */
perf_stat_process_percore(struct perf_stat_config * config,struct evlist * evlist)6621d6d2beaSNamhyung Kim void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
6631d6d2beaSNamhyung Kim {
6641d6d2beaSNamhyung Kim 	struct evsel *evsel;
6651d6d2beaSNamhyung Kim 
6661d6d2beaSNamhyung Kim 	if (config->aggr_mode != AGGR_NONE)
6671d6d2beaSNamhyung Kim 		return;
6681d6d2beaSNamhyung Kim 
6691d6d2beaSNamhyung Kim 	evlist__for_each_entry(evlist, evsel)
6701d6d2beaSNamhyung Kim 		evsel__process_percore(evsel);
6711d6d2beaSNamhyung Kim }
6721d6d2beaSNamhyung Kim 
perf_event__process_stat_event(struct perf_session * session,union perf_event * event)67389f1688aSJiri Olsa int perf_event__process_stat_event(struct perf_session *session,
67489f1688aSJiri Olsa 				   union perf_event *event)
6750ea0e355SJiri Olsa {
67692d579eaSIan Rogers 	struct perf_counts_values count, *ptr;
67772932371SJiri Olsa 	struct perf_record_stat *st = &event->stat;
67832dcd021SJiri Olsa 	struct evsel *counter;
67992d579eaSIan Rogers 	int cpu_map_idx;
6800ea0e355SJiri Olsa 
6810ea0e355SJiri Olsa 	count.val = st->val;
6820ea0e355SJiri Olsa 	count.ena = st->ena;
6830ea0e355SJiri Olsa 	count.run = st->run;
6840ea0e355SJiri Olsa 
6853ccf8a7bSArnaldo Carvalho de Melo 	counter = evlist__id2evsel(session->evlist, st->id);
6860ea0e355SJiri Olsa 	if (!counter) {
6870ea0e355SJiri Olsa 		pr_err("Failed to resolve counter for stat event.\n");
6880ea0e355SJiri Olsa 		return -EINVAL;
6890ea0e355SJiri Olsa 	}
69092d579eaSIan Rogers 	cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
69192d579eaSIan Rogers 	if (cpu_map_idx == -1) {
69292d579eaSIan Rogers 		pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
69392d579eaSIan Rogers 		return -EINVAL;
69492d579eaSIan Rogers 	}
69592d579eaSIan Rogers 	ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
69692d579eaSIan Rogers 	if (ptr == NULL) {
69792d579eaSIan Rogers 		pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
69892d579eaSIan Rogers 			st->cpu, st->thread, evsel__name(counter));
69992d579eaSIan Rogers 		return -EINVAL;
70092d579eaSIan Rogers 	}
70192d579eaSIan Rogers 	*ptr = count;
7020ea0e355SJiri Olsa 	counter->supported = true;
7030ea0e355SJiri Olsa 	return 0;
7040ea0e355SJiri Olsa }
705e08a4564SJiri Olsa 
perf_event__fprintf_stat(union perf_event * event,FILE * fp)706e08a4564SJiri Olsa size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
707e08a4564SJiri Olsa {
70872932371SJiri Olsa 	struct perf_record_stat *st = (struct perf_record_stat *)event;
709e08a4564SJiri Olsa 	size_t ret;
710e08a4564SJiri Olsa 
71118a13a60SJiri Olsa 	ret  = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
712e08a4564SJiri Olsa 		       st->id, st->cpu, st->thread);
71318a13a60SJiri Olsa 	ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
714e08a4564SJiri Olsa 		       st->val, st->ena, st->run);
715e08a4564SJiri Olsa 
716e08a4564SJiri Olsa 	return ret;
717e08a4564SJiri Olsa }
718e08a4564SJiri Olsa 
perf_event__fprintf_stat_round(union perf_event * event,FILE * fp)719e08a4564SJiri Olsa size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
720e08a4564SJiri Olsa {
72172932371SJiri Olsa 	struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
722e08a4564SJiri Olsa 	size_t ret;
723e08a4564SJiri Olsa 
724782adbe2SJiri Olsa 	ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
725e08a4564SJiri Olsa 		      rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
726e08a4564SJiri Olsa 
727e08a4564SJiri Olsa 	return ret;
728e08a4564SJiri Olsa }
729e08a4564SJiri Olsa 
perf_event__fprintf_stat_config(union perf_event * event,FILE * fp)730e08a4564SJiri Olsa size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
731e08a4564SJiri Olsa {
732*91f88a0aSIan Rogers 	struct perf_stat_config sc = {};
733e08a4564SJiri Olsa 	size_t ret;
734e08a4564SJiri Olsa 
735e08a4564SJiri Olsa 	perf_event__read_stat_config(&sc, &event->stat_config);
736e08a4564SJiri Olsa 
737e08a4564SJiri Olsa 	ret  = fprintf(fp, "\n");
738e08a4564SJiri Olsa 	ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
739e08a4564SJiri Olsa 	ret += fprintf(fp, "... scale     %d\n", sc.scale);
740e08a4564SJiri Olsa 	ret += fprintf(fp, "... interval  %u\n", sc.interval);
741e08a4564SJiri Olsa 
742e08a4564SJiri Olsa 	return ret;
743e08a4564SJiri Olsa }
744d09cefd2SJiri Olsa 
create_perf_stat_counter(struct evsel * evsel,struct perf_stat_config * config,struct target * target,int cpu_map_idx)74532dcd021SJiri Olsa int create_perf_stat_counter(struct evsel *evsel,
746d09cefd2SJiri Olsa 			     struct perf_stat_config *config,
7474804e011SAndi Kleen 			     struct target *target,
7486f844b1fSIan Rogers 			     int cpu_map_idx)
749d09cefd2SJiri Olsa {
7501fc632ceSJiri Olsa 	struct perf_event_attr *attr = &evsel->core.attr;
751fba7c866SJiri Olsa 	struct evsel *leader = evsel__leader(evsel);
752d09cefd2SJiri Olsa 
753d09cefd2SJiri Olsa 	attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
754d09cefd2SJiri Olsa 			    PERF_FORMAT_TOTAL_TIME_RUNNING;
755d09cefd2SJiri Olsa 
756d09cefd2SJiri Olsa 	/*
757d09cefd2SJiri Olsa 	 * The event is part of non trivial group, let's enable
758d09cefd2SJiri Olsa 	 * the group read (for leader) and ID retrieval for all
759d09cefd2SJiri Olsa 	 * members.
760d09cefd2SJiri Olsa 	 */
7615643b1a5SJiri Olsa 	if (leader->core.nr_members > 1)
762d09cefd2SJiri Olsa 		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
763d09cefd2SJiri Olsa 
764fa853c4bSSong Liu 	attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
765d09cefd2SJiri Olsa 
766d09cefd2SJiri Olsa 	/*
767d09cefd2SJiri Olsa 	 * Some events get initialized with sample_(period/type) set,
768d09cefd2SJiri Olsa 	 * like tracepoints. Clear it up for counting.
769d09cefd2SJiri Olsa 	 */
770d09cefd2SJiri Olsa 	attr->sample_period = 0;
771d09cefd2SJiri Olsa 
772d09cefd2SJiri Olsa 	if (config->identifier)
773d09cefd2SJiri Olsa 		attr->sample_type = PERF_SAMPLE_IDENTIFIER;
774d09cefd2SJiri Olsa 
775dd071024SJin Yao 	if (config->all_user) {
776dd071024SJin Yao 		attr->exclude_kernel = 1;
777dd071024SJin Yao 		attr->exclude_user   = 0;
778dd071024SJin Yao 	}
779dd071024SJin Yao 
780dd071024SJin Yao 	if (config->all_kernel) {
781dd071024SJin Yao 		attr->exclude_kernel = 0;
782dd071024SJin Yao 		attr->exclude_user   = 1;
783dd071024SJin Yao 	}
784dd071024SJin Yao 
785d09cefd2SJiri Olsa 	/*
786d09cefd2SJiri Olsa 	 * Disabling all counters initially, they will be enabled
787d09cefd2SJiri Olsa 	 * either manually by us or by kernel via enable_on_exec
788d09cefd2SJiri Olsa 	 * set later.
789d09cefd2SJiri Olsa 	 */
790c754c382SArnaldo Carvalho de Melo 	if (evsel__is_group_leader(evsel)) {
791d09cefd2SJiri Olsa 		attr->disabled = 1;
792d09cefd2SJiri Olsa 
79325f69c69SChangbin Du 		if (target__enable_on_exec(target))
794d09cefd2SJiri Olsa 			attr->enable_on_exec = 1;
795d09cefd2SJiri Olsa 	}
796d09cefd2SJiri Olsa 
797d09cefd2SJiri Olsa 	if (target__has_cpu(target) && !target__has_per_thread(target))
7986f844b1fSIan Rogers 		return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
799d09cefd2SJiri Olsa 
800aa8c406bSArnaldo Carvalho de Melo 	return evsel__open_per_thread(evsel, evsel->core.threads);
801d09cefd2SJiri Olsa }
802