xref: /openbmc/linux/tools/perf/builtin-stat.c (revision ac95df46)
191007045SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
286470930SIngo Molnar /*
386470930SIngo Molnar  * builtin-stat.c
486470930SIngo Molnar  *
586470930SIngo Molnar  * Builtin stat command: Give a precise performance counters summary
686470930SIngo Molnar  * overview about any workload, CPU or specific PID.
786470930SIngo Molnar  *
886470930SIngo Molnar  * Sample output:
986470930SIngo Molnar 
102cba3ffbSIngo Molnar    $ perf stat ./hackbench 10
1186470930SIngo Molnar 
122cba3ffbSIngo Molnar   Time: 0.118
1386470930SIngo Molnar 
142cba3ffbSIngo Molnar   Performance counter stats for './hackbench 10':
1586470930SIngo Molnar 
162cba3ffbSIngo Molnar        1708.761321 task-clock                #   11.037 CPUs utilized
172cba3ffbSIngo Molnar             41,190 context-switches          #    0.024 M/sec
182cba3ffbSIngo Molnar              6,735 CPU-migrations            #    0.004 M/sec
192cba3ffbSIngo Molnar             17,318 page-faults               #    0.010 M/sec
202cba3ffbSIngo Molnar      5,205,202,243 cycles                    #    3.046 GHz
212cba3ffbSIngo Molnar      3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
222cba3ffbSIngo Molnar      1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
232cba3ffbSIngo Molnar      2,603,501,247 instructions              #    0.50  insns per cycle
242cba3ffbSIngo Molnar                                              #    1.48  stalled cycles per insn
252cba3ffbSIngo Molnar        484,357,498 branches                  #  283.455 M/sec
262cba3ffbSIngo Molnar          6,388,934 branch-misses             #    1.32% of all branches
272cba3ffbSIngo Molnar 
282cba3ffbSIngo Molnar         0.154822978  seconds time elapsed
2986470930SIngo Molnar 
3086470930SIngo Molnar  *
312cba3ffbSIngo Molnar  * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
3286470930SIngo Molnar  *
3386470930SIngo Molnar  * Improvements and fixes by:
3486470930SIngo Molnar  *
3586470930SIngo Molnar  *   Arjan van de Ven <arjan@linux.intel.com>
3686470930SIngo Molnar  *   Yanmin Zhang <yanmin.zhang@intel.com>
3786470930SIngo Molnar  *   Wu Fengguang <fengguang.wu@intel.com>
3886470930SIngo Molnar  *   Mike Galbraith <efault@gmx.de>
3986470930SIngo Molnar  *   Paul Mackerras <paulus@samba.org>
406e750a8fSJaswinder Singh Rajput  *   Jaswinder Singh Rajput <jaswinder@kernel.org>
4186470930SIngo Molnar  */
4286470930SIngo Molnar 
4386470930SIngo Molnar #include "builtin.h"
44f14d5707SArnaldo Carvalho de Melo #include "util/cgroup.h"
454b6ab94eSJosh Poimboeuf #include <subcmd/parse-options.h>
4686470930SIngo Molnar #include "util/parse-events.h"
47003be8c4SIan Rogers #include "util/pmus.h"
484cabc3d1SAndi Kleen #include "util/pmu.h"
498f28827aSFrederic Weisbecker #include "util/event.h"
50361c99a6SArnaldo Carvalho de Melo #include "util/evlist.h"
5169aad6f1SArnaldo Carvalho de Melo #include "util/evsel.h"
528f28827aSFrederic Weisbecker #include "util/debug.h"
53a5d243d0SIngo Molnar #include "util/color.h"
540007eceaSXiao Guangrong #include "util/stat.h"
5560666c63SLiming Wang #include "util/header.h"
56a12b51c4SPaul Mackerras #include "util/cpumap.h"
57fd78260bSArnaldo Carvalho de Melo #include "util/thread_map.h"
58d809560bSJiri Olsa #include "util/counts.h"
59687986bbSKan Liang #include "util/topdown.h"
604979d0c7SJiri Olsa #include "util/session.h"
61ba6039b6SJiri Olsa #include "util/tool.h"
62a067558eSArnaldo Carvalho de Melo #include "util/string2.h"
63b18f3e36SAndi Kleen #include "util/metricgroup.h"
64ea49e01cSArnaldo Carvalho de Melo #include "util/synthetic-events.h"
65aeb00b1aSArnaldo Carvalho de Melo #include "util/target.h"
66f3711020SArnaldo Carvalho de Melo #include "util/time-utils.h"
679660e08eSJiri Olsa #include "util/top.h"
684804e011SAndi Kleen #include "util/affinity.h"
6970943490SStephane Eranian #include "util/pfm.h"
70fa853c4bSSong Liu #include "util/bpf_counter.h"
71f07952b1SAlexander Antonov #include "util/iostat.h"
72f12ad272SIan Rogers #include "util/util.h"
73ba6039b6SJiri Olsa #include "asm/bug.h"
7486470930SIngo Molnar 
75bd48c63eSArnaldo Carvalho de Melo #include <linux/time64.h>
767f7c536fSArnaldo Carvalho de Melo #include <linux/zalloc.h>
7744b1e60aSAndi Kleen #include <api/fs/fs.h>
78a43783aeSArnaldo Carvalho de Melo #include <errno.h>
799607ad3aSArnaldo Carvalho de Melo #include <signal.h>
801f16c575SPeter Zijlstra #include <stdlib.h>
8186470930SIngo Molnar #include <sys/prctl.h>
82fd20e811SArnaldo Carvalho de Melo #include <inttypes.h>
835af52b51SStephane Eranian #include <locale.h>
84e3b03b6cSAndi Kleen #include <math.h>
857a8ef4c4SArnaldo Carvalho de Melo #include <sys/types.h>
867a8ef4c4SArnaldo Carvalho de Melo #include <sys/stat.h>
874208735dSArnaldo Carvalho de Melo #include <sys/wait.h>
887a8ef4c4SArnaldo Carvalho de Melo #include <unistd.h>
890ce2da14SJiri Olsa #include <sys/time.h>
900ce2da14SJiri Olsa #include <sys/resource.h>
916ef81c55SMamatha Inamdar #include <linux/err.h>
9286470930SIngo Molnar 
933052ba56SArnaldo Carvalho de Melo #include <linux/ctype.h>
94453fa030SJiri Olsa #include <perf/evlist.h>
95fd3f518fSIan Rogers #include <internal/threadmap.h>
963d689ed6SArnaldo Carvalho de Melo 
97d7470b6aSStephane Eranian #define DEFAULT_SEPARATOR	" "
98daefd0bcSKan Liang #define FREEZE_ON_SMI_PATH	"devices/cpu/freeze_on_smi"
99d7470b6aSStephane Eranian 
100d4f63a47SJiri Olsa static void print_counters(struct timespec *ts, int argc, const char **argv);
10113370a9bSStephane Eranian 
10263503dbaSJiri Olsa static struct evlist	*evsel_list;
103411ad22eSIan Rogers static struct parse_events_option_args parse_events_option_args = {
104411ad22eSIan Rogers 	.evlistp = &evsel_list,
105411ad22eSIan Rogers };
106411ad22eSIan Rogers 
107112cb561SSong Liu static bool all_counters_use_bpf = true;
108361c99a6SArnaldo Carvalho de Melo 
109602ad878SArnaldo Carvalho de Melo static struct target target = {
11077a6f014SNamhyung Kim 	.uid	= UINT_MAX,
11177a6f014SNamhyung Kim };
11242202dd5SIngo Molnar 
113c1a1f5d9SJiri Olsa #define METRIC_ONLY_LEN 20
114c1a1f5d9SJiri Olsa 
11501513fdcSIan Rogers static volatile sig_atomic_t	child_pid			= -1;
1162cba3ffbSIngo Molnar static int			detailed_run			=  0;
1174cabc3d1SAndi Kleen static bool			transaction_run;
11844b1e60aSAndi Kleen static bool			topdown_run			= false;
119daefd0bcSKan Liang static bool			smi_cost			= false;
120daefd0bcSKan Liang static bool			smi_reset			= false;
121d7470b6aSStephane Eranian static int			big_num_opt			=  -1;
1221f16c575SPeter Zijlstra static const char		*pre_cmd			= NULL;
1231f16c575SPeter Zijlstra static const char		*post_cmd			= NULL;
1241f16c575SPeter Zijlstra static bool			sync_run			= false;
125a7e191c3SFrederik Deweerdt static bool			forever				= false;
12644b1e60aSAndi Kleen static bool			force_metric_only		= false;
12713370a9bSStephane Eranian static struct timespec		ref_time;
128e0547311SJiri Olsa static bool			append_file;
129db06a269Syuzhoujian static bool			interval_count;
130e0547311SJiri Olsa static const char		*output_name;
131e0547311SJiri Olsa static int			output_fd;
132a4b8cfcaSIan Rogers static char			*metrics;
1335af52b51SStephane Eranian 
1344979d0c7SJiri Olsa struct perf_stat {
1354979d0c7SJiri Olsa 	bool			 record;
1368ceb41d7SJiri Olsa 	struct perf_data	 data;
1374979d0c7SJiri Olsa 	struct perf_session	*session;
1384979d0c7SJiri Olsa 	u64			 bytes_written;
139ba6039b6SJiri Olsa 	struct perf_tool	 tool;
1401975d36eSJiri Olsa 	bool			 maps_allocated;
141f854839bSJiri Olsa 	struct perf_cpu_map	*cpus;
1429749b90eSJiri Olsa 	struct perf_thread_map *threads;
14389af4e05SJiri Olsa 	enum aggr_mode		 aggr_mode;
144995ed074SK Prateek Nayak 	u32			 aggr_level;
1454979d0c7SJiri Olsa };
1464979d0c7SJiri Olsa 
1474979d0c7SJiri Olsa static struct perf_stat		perf_stat;
1484979d0c7SJiri Olsa #define STAT_RECORD		perf_stat.record
1494979d0c7SJiri Olsa 
15001513fdcSIan Rogers static volatile sig_atomic_t done = 0;
15160666c63SLiming Wang 
152421a50f3SJiri Olsa static struct perf_stat_config stat_config = {
153421a50f3SJiri Olsa 	.aggr_mode		= AGGR_GLOBAL,
154995ed074SK Prateek Nayak 	.aggr_level		= MAX_CACHE_LVL + 1,
155711a572eSJiri Olsa 	.scale			= true,
156df4f7b4dSJiri Olsa 	.unit_width		= 4, /* strlen("unit") */
157d97ae04bSJiri Olsa 	.run_count		= 1,
158ee1760e2SJiri Olsa 	.metric_only_len	= METRIC_ONLY_LEN,
15926893a60SJiri Olsa 	.walltime_nsecs_stats	= &walltime_nsecs_stats,
160c735b0a5SFlorian Fischer 	.ru_stats		= &ru_stats,
16134ff0866SJiri Olsa 	.big_num		= true,
16227e9769aSAlexey Budankov 	.ctl_fd			= -1,
163f07952b1SAlexander Antonov 	.ctl_fd_ack		= -1,
164f07952b1SAlexander Antonov 	.iostat_run		= false,
165421a50f3SJiri Olsa };
166421a50f3SJiri Olsa 
cpus_map_matched(struct evsel * a,struct evsel * b)167a9a17902SJiri Olsa static bool cpus_map_matched(struct evsel *a, struct evsel *b)
168a9a17902SJiri Olsa {
169a9a17902SJiri Olsa 	if (!a->core.cpus && !b->core.cpus)
170a9a17902SJiri Olsa 		return true;
171a9a17902SJiri Olsa 
172a9a17902SJiri Olsa 	if (!a->core.cpus || !b->core.cpus)
173a9a17902SJiri Olsa 		return false;
174a9a17902SJiri Olsa 
17544028699SIan Rogers 	if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus))
176a9a17902SJiri Olsa 		return false;
177a9a17902SJiri Olsa 
17844028699SIan Rogers 	for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) {
17944028699SIan Rogers 		if (perf_cpu_map__cpu(a->core.cpus, i).cpu !=
18044028699SIan Rogers 		    perf_cpu_map__cpu(b->core.cpus, i).cpu)
181a9a17902SJiri Olsa 			return false;
182a9a17902SJiri Olsa 	}
183a9a17902SJiri Olsa 
184a9a17902SJiri Olsa 	return true;
185a9a17902SJiri Olsa }
186a9a17902SJiri Olsa 
evlist__check_cpu_maps(struct evlist * evlist)187a9a17902SJiri Olsa static void evlist__check_cpu_maps(struct evlist *evlist)
188a9a17902SJiri Olsa {
189bc6c6cdcSIan Rogers 	struct evsel *evsel, *warned_leader = NULL;
190a9a17902SJiri Olsa 
191a9a17902SJiri Olsa 	evlist__for_each_entry(evlist, evsel) {
192bc6c6cdcSIan Rogers 		struct evsel *leader = evsel__leader(evsel);
193a9a17902SJiri Olsa 
194a9a17902SJiri Olsa 		/* Check that leader matches cpus with each member. */
195a9a17902SJiri Olsa 		if (leader == evsel)
196a9a17902SJiri Olsa 			continue;
197a9a17902SJiri Olsa 		if (cpus_map_matched(leader, evsel))
198a9a17902SJiri Olsa 			continue;
199a9a17902SJiri Olsa 
200a9a17902SJiri Olsa 		/* If there's mismatch disable the group and warn user. */
201bc6c6cdcSIan Rogers 		if (warned_leader != leader) {
202bc6c6cdcSIan Rogers 			char buf[200];
203bc6c6cdcSIan Rogers 
204bc6c6cdcSIan Rogers 			pr_warning("WARNING: grouped events cpus do not match.\n"
205bc6c6cdcSIan Rogers 				"Events with CPUs not matching the leader will "
206bc6c6cdcSIan Rogers 				"be removed from the group.\n");
207a9a17902SJiri Olsa 			evsel__group_desc(leader, buf, sizeof(buf));
208a9a17902SJiri Olsa 			pr_warning("  %s\n", buf);
209bc6c6cdcSIan Rogers 			warned_leader = leader;
210bc6c6cdcSIan Rogers 		}
2117c0a6144SYang Jihong 		if (verbose > 0) {
212bc6c6cdcSIan Rogers 			char buf[200];
213bc6c6cdcSIan Rogers 
214a9a17902SJiri Olsa 			cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
215a9a17902SJiri Olsa 			pr_warning("     %s: %s\n", leader->name, buf);
216a9a17902SJiri Olsa 			cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
217a9a17902SJiri Olsa 			pr_warning("     %s: %s\n", evsel->name, buf);
218a9a17902SJiri Olsa 		}
219a9a17902SJiri Olsa 
220bc6c6cdcSIan Rogers 		evsel__remove_from_group(evsel, leader);
221a9a17902SJiri Olsa 	}
222a9a17902SJiri Olsa }
223a9a17902SJiri Olsa 
diff_timespec(struct timespec * r,struct timespec * a,struct timespec * b)22413370a9bSStephane Eranian static inline void diff_timespec(struct timespec *r, struct timespec *a,
22513370a9bSStephane Eranian 				 struct timespec *b)
22613370a9bSStephane Eranian {
22713370a9bSStephane Eranian 	r->tv_sec = a->tv_sec - b->tv_sec;
22813370a9bSStephane Eranian 	if (a->tv_nsec < b->tv_nsec) {
229310ebb93SArnaldo Carvalho de Melo 		r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
23013370a9bSStephane Eranian 		r->tv_sec--;
23113370a9bSStephane Eranian 	} else {
23213370a9bSStephane Eranian 		r->tv_nsec = a->tv_nsec - b->tv_nsec ;
23313370a9bSStephane Eranian 	}
23413370a9bSStephane Eranian }
23513370a9bSStephane Eranian 
perf_stat__reset_stats(void)236254ecbc7SJiri Olsa static void perf_stat__reset_stats(void)
237254ecbc7SJiri Olsa {
23853f5e908SArnaldo Carvalho de Melo 	evlist__reset_stats(evsel_list);
239f87027b9SJiri Olsa 	perf_stat__reset_shadow_stats();
2401eda3b21SJiri Olsa }
2411eda3b21SJiri Olsa 
process_synthesized_event(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)2428b99b1a4SJiri Olsa static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
2434979d0c7SJiri Olsa 				     union perf_event *event,
2444979d0c7SJiri Olsa 				     struct perf_sample *sample __maybe_unused,
2454979d0c7SJiri Olsa 				     struct machine *machine __maybe_unused)
2464979d0c7SJiri Olsa {
2478ceb41d7SJiri Olsa 	if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
2488b99b1a4SJiri Olsa 		pr_err("failed to write perf data, error: %m\n");
2498b99b1a4SJiri Olsa 		return -1;
2508b99b1a4SJiri Olsa 	}
2518b99b1a4SJiri Olsa 
2528b99b1a4SJiri Olsa 	perf_stat.bytes_written += event->header.size;
2538b99b1a4SJiri Olsa 	return 0;
2544979d0c7SJiri Olsa }
2554979d0c7SJiri Olsa 
write_stat_round_event(u64 tm,u64 type)2561975d36eSJiri Olsa static int write_stat_round_event(u64 tm, u64 type)
2577aad0c32SJiri Olsa {
2581975d36eSJiri Olsa 	return perf_event__synthesize_stat_round(NULL, tm, type,
2597aad0c32SJiri Olsa 						 process_synthesized_event,
2607aad0c32SJiri Olsa 						 NULL);
2617aad0c32SJiri Olsa }
2627aad0c32SJiri Olsa 
2637aad0c32SJiri Olsa #define WRITE_STAT_ROUND_EVENT(time, interval) \
2647aad0c32SJiri Olsa 	write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
2657aad0c32SJiri Olsa 
2668cd36f3eSJiri Olsa #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
2675a6ea81bSJiri Olsa 
evsel__write_stat_event(struct evsel * counter,int cpu_map_idx,u32 thread,struct perf_counts_values * count)2687ac0089dSIan Rogers static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread,
2695a6ea81bSJiri Olsa 				   struct perf_counts_values *count)
2705a6ea81bSJiri Olsa {
2717ac0089dSIan Rogers 	struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread);
2726d18804bSIan Rogers 	struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx);
2735a6ea81bSJiri Olsa 
2745a6ea81bSJiri Olsa 	return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
2755a6ea81bSJiri Olsa 					   process_synthesized_event, NULL);
2765a6ea81bSJiri Olsa }
2775a6ea81bSJiri Olsa 
read_single_counter(struct evsel * counter,int cpu_map_idx,int thread,struct timespec * rs)278da8c94c0SIan Rogers static int read_single_counter(struct evsel *counter, int cpu_map_idx,
279f0fbb114SAndi Kleen 			       int thread, struct timespec *rs)
280f0fbb114SAndi Kleen {
281b03b89b3SFlorian Fischer 	switch(counter->tool_event) {
282b03b89b3SFlorian Fischer 		case PERF_TOOL_DURATION_TIME: {
283f0fbb114SAndi Kleen 			u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
284f0fbb114SAndi Kleen 			struct perf_counts_values *count =
285da8c94c0SIan Rogers 				perf_counts(counter->counts, cpu_map_idx, thread);
286f0fbb114SAndi Kleen 			count->ena = count->run = val;
287f0fbb114SAndi Kleen 			count->val = val;
288f0fbb114SAndi Kleen 			return 0;
289f0fbb114SAndi Kleen 		}
290b03b89b3SFlorian Fischer 		case PERF_TOOL_USER_TIME:
291b03b89b3SFlorian Fischer 		case PERF_TOOL_SYSTEM_TIME: {
292b03b89b3SFlorian Fischer 			u64 val;
293b03b89b3SFlorian Fischer 			struct perf_counts_values *count =
294b03b89b3SFlorian Fischer 				perf_counts(counter->counts, cpu_map_idx, thread);
295b03b89b3SFlorian Fischer 			if (counter->tool_event == PERF_TOOL_USER_TIME)
296b03b89b3SFlorian Fischer 				val = ru_stats.ru_utime_usec_stat.mean;
297b03b89b3SFlorian Fischer 			else
298b03b89b3SFlorian Fischer 				val = ru_stats.ru_stime_usec_stat.mean;
299b03b89b3SFlorian Fischer 			count->ena = count->run = val;
300b03b89b3SFlorian Fischer 			count->val = val;
301b03b89b3SFlorian Fischer 			return 0;
302b03b89b3SFlorian Fischer 		}
303b03b89b3SFlorian Fischer 		default:
304b03b89b3SFlorian Fischer 		case PERF_TOOL_NONE:
305da8c94c0SIan Rogers 			return evsel__read_counter(counter, cpu_map_idx, thread);
306b03b89b3SFlorian Fischer 		case PERF_TOOL_MAX:
307b03b89b3SFlorian Fischer 			/* This should never be reached */
308b03b89b3SFlorian Fischer 			return 0;
309b03b89b3SFlorian Fischer 	}
310f0fbb114SAndi Kleen }
311f0fbb114SAndi Kleen 
312f5b4a9c3SStephane Eranian /*
313f5b4a9c3SStephane Eranian  * Read out the results of a single counter:
314f5b4a9c3SStephane Eranian  * do not aggregate counts across CPUs in system-wide mode
315f5b4a9c3SStephane Eranian  */
read_counter_cpu(struct evsel * counter,struct timespec * rs,int cpu_map_idx)316da8c94c0SIan Rogers static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx)
317f5b4a9c3SStephane Eranian {
318a2f354e3SJiri Olsa 	int nthreads = perf_thread_map__nr(evsel_list->core.threads);
3194b49ab70SAndi Kleen 	int thread;
320f5b4a9c3SStephane Eranian 
3213b4331d9SSuzuki K. Poulose 	if (!counter->supported)
3223b4331d9SSuzuki K. Poulose 		return -ENOENT;
3233b4331d9SSuzuki K. Poulose 
3249bf1a529SJiri Olsa 	for (thread = 0; thread < nthreads; thread++) {
3253b3eb044SJiri Olsa 		struct perf_counts_values *count;
3263b3eb044SJiri Olsa 
327da8c94c0SIan Rogers 		count = perf_counts(counter->counts, cpu_map_idx, thread);
32882bf311eSJiri Olsa 
32982bf311eSJiri Olsa 		/*
33082bf311eSJiri Olsa 		 * The leader's group read loads data into its group members
331ea089692SArnaldo Carvalho de Melo 		 * (via evsel__read_counter()) and sets their count->loaded.
33282bf311eSJiri Olsa 		 */
333da8c94c0SIan Rogers 		if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) &&
334da8c94c0SIan Rogers 		    read_single_counter(counter, cpu_map_idx, thread, rs)) {
335db49a717SStephane Eranian 			counter->counts->scaled = -1;
336da8c94c0SIan Rogers 			perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0;
337da8c94c0SIan Rogers 			perf_counts(counter->counts, cpu_map_idx, thread)->run = 0;
338c52b12edSArnaldo Carvalho de Melo 			return -1;
339db49a717SStephane Eranian 		}
3405a6ea81bSJiri Olsa 
341da8c94c0SIan Rogers 		perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false);
34282bf311eSJiri Olsa 
3435a6ea81bSJiri Olsa 		if (STAT_RECORD) {
344da8c94c0SIan Rogers 			if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) {
3455a6ea81bSJiri Olsa 				pr_err("failed to write stat event\n");
3465a6ea81bSJiri Olsa 				return -1;
3475a6ea81bSJiri Olsa 			}
3485a6ea81bSJiri Olsa 		}
3490b1abbf4SAndi Kleen 
3500b1abbf4SAndi Kleen 		if (verbose > 1) {
3510b1abbf4SAndi Kleen 			fprintf(stat_config.output,
3520b1abbf4SAndi Kleen 				"%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
3538ab2e96dSArnaldo Carvalho de Melo 					evsel__name(counter),
3546d18804bSIan Rogers 					perf_cpu_map__cpu(evsel__cpus(counter),
3556d18804bSIan Rogers 							  cpu_map_idx).cpu,
3560b1abbf4SAndi Kleen 					count->val, count->ena, count->run);
3570b1abbf4SAndi Kleen 		}
358f5b4a9c3SStephane Eranian 	}
359c52b12edSArnaldo Carvalho de Melo 
360c52b12edSArnaldo Carvalho de Melo 	return 0;
36186470930SIngo Molnar }
36286470930SIngo Molnar 
read_affinity_counters(struct timespec * rs)363c7e5b328SJin Yao static int read_affinity_counters(struct timespec *rs)
364106a94a0SJiri Olsa {
365472832d2SIan Rogers 	struct evlist_cpu_iterator evlist_cpu_itr;
366472832d2SIan Rogers 	struct affinity saved_affinity, *affinity;
3674b49ab70SAndi Kleen 
368112cb561SSong Liu 	if (all_counters_use_bpf)
369112cb561SSong Liu 		return 0;
370112cb561SSong Liu 
3714b49ab70SAndi Kleen 	if (!target__has_cpu(&target) || target__has_per_thread(&target))
372472832d2SIan Rogers 		affinity = NULL;
373472832d2SIan Rogers 	else if (affinity__setup(&saved_affinity) < 0)
374472832d2SIan Rogers 		return -1;
375472832d2SIan Rogers 	else
376472832d2SIan Rogers 		affinity = &saved_affinity;
377106a94a0SJiri Olsa 
378472832d2SIan Rogers 	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
379472832d2SIan Rogers 		struct evsel *counter = evlist_cpu_itr.evsel;
380472832d2SIan Rogers 
381112cb561SSong Liu 		if (evsel__is_bpf(counter))
382112cb561SSong Liu 			continue;
383472832d2SIan Rogers 
3844b49ab70SAndi Kleen 		if (!counter->err) {
3854b49ab70SAndi Kleen 			counter->err = read_counter_cpu(counter, rs,
386472832d2SIan Rogers 							evlist_cpu_itr.cpu_map_idx);
3874b49ab70SAndi Kleen 		}
3884b49ab70SAndi Kleen 	}
389472832d2SIan Rogers 	if (affinity)
390472832d2SIan Rogers 		affinity__cleanup(&saved_affinity);
391472832d2SIan Rogers 
392c7e5b328SJin Yao 	return 0;
393c7e5b328SJin Yao }
394c7e5b328SJin Yao 
read_bpf_map_counters(void)395fa853c4bSSong Liu static int read_bpf_map_counters(void)
396fa853c4bSSong Liu {
397fa853c4bSSong Liu 	struct evsel *counter;
398fa853c4bSSong Liu 	int err;
399fa853c4bSSong Liu 
400fa853c4bSSong Liu 	evlist__for_each_entry(evsel_list, counter) {
401112cb561SSong Liu 		if (!evsel__is_bpf(counter))
402112cb561SSong Liu 			continue;
403112cb561SSong Liu 
404fa853c4bSSong Liu 		err = bpf_counter__read(counter);
405fa853c4bSSong Liu 		if (err)
406fa853c4bSSong Liu 			return err;
407fa853c4bSSong Liu 	}
408fa853c4bSSong Liu 	return 0;
409fa853c4bSSong Liu }
410fa853c4bSSong Liu 
read_counters(struct timespec * rs)4118962cbecSNamhyung Kim static int read_counters(struct timespec *rs)
412c7e5b328SJin Yao {
413fa853c4bSSong Liu 	if (!stat_config.stop_read_counter) {
414112cb561SSong Liu 		if (read_bpf_map_counters() ||
415112cb561SSong Liu 		    read_affinity_counters(rs))
4168962cbecSNamhyung Kim 			return -1;
417fa853c4bSSong Liu 	}
4188962cbecSNamhyung Kim 	return 0;
4198962cbecSNamhyung Kim }
4208962cbecSNamhyung Kim 
process_counters(void)4218962cbecSNamhyung Kim static void process_counters(void)
4228962cbecSNamhyung Kim {
4238962cbecSNamhyung Kim 	struct evsel *counter;
4243b3eb044SJiri Olsa 
4254b49ab70SAndi Kleen 	evlist__for_each_entry(evsel_list, counter) {
4264b49ab70SAndi Kleen 		if (counter->err)
4274b49ab70SAndi Kleen 			pr_debug("failed to read counter %s\n", counter->name);
4284b49ab70SAndi Kleen 		if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter))
4293b3eb044SJiri Olsa 			pr_warning("failed to process counter %s\n", counter->name);
4304b49ab70SAndi Kleen 		counter->err = 0;
431106a94a0SJiri Olsa 	}
432942c5593SNamhyung Kim 
433942c5593SNamhyung Kim 	perf_stat_merge_counters(&stat_config, evsel_list);
4341d6d2beaSNamhyung Kim 	perf_stat_process_percore(&stat_config, evsel_list);
435106a94a0SJiri Olsa }
436106a94a0SJiri Olsa 
process_interval(void)437ba411a95SJiri Olsa static void process_interval(void)
43813370a9bSStephane Eranian {
43913370a9bSStephane Eranian 	struct timespec ts, rs;
44013370a9bSStephane Eranian 
44113370a9bSStephane Eranian 	clock_gettime(CLOCK_MONOTONIC, &ts);
44213370a9bSStephane Eranian 	diff_timespec(&rs, &ts, &ref_time);
44313370a9bSStephane Eranian 
4448f97963eSNamhyung Kim 	evlist__reset_aggr_stats(evsel_list);
4458f97963eSNamhyung Kim 
4468962cbecSNamhyung Kim 	if (read_counters(&rs) == 0)
4478962cbecSNamhyung Kim 		process_counters();
448f0fbb114SAndi Kleen 
4497aad0c32SJiri Olsa 	if (STAT_RECORD) {
450bd48c63eSArnaldo Carvalho de Melo 		if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
4517aad0c32SJiri Olsa 			pr_err("failed to write stat round event\n");
4527aad0c32SJiri Olsa 	}
4537aad0c32SJiri Olsa 
454b90f1333SAndi Kleen 	init_stats(&walltime_nsecs_stats);
455ea9eb1f4SJiri Olsa 	update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
456d4f63a47SJiri Olsa 	print_counters(&rs, 0, NULL);
45713370a9bSStephane Eranian }
45813370a9bSStephane Eranian 
handle_interval(unsigned int interval,int * times)459dece3a4dSAlexey Budankov static bool handle_interval(unsigned int interval, int *times)
460dece3a4dSAlexey Budankov {
461dece3a4dSAlexey Budankov 	if (interval) {
462dece3a4dSAlexey Budankov 		process_interval();
463dece3a4dSAlexey Budankov 		if (interval_count && !(--(*times)))
464dece3a4dSAlexey Budankov 			return true;
465dece3a4dSAlexey Budankov 	}
466dece3a4dSAlexey Budankov 	return false;
467dece3a4dSAlexey Budankov }
468dece3a4dSAlexey Budankov 
enable_counters(void)469fa853c4bSSong Liu static int enable_counters(void)
47041191688SAndi Kleen {
471fa853c4bSSong Liu 	struct evsel *evsel;
472fa853c4bSSong Liu 	int err;
473fa853c4bSSong Liu 
474fa853c4bSSong Liu 	evlist__for_each_entry(evsel_list, evsel) {
475112cb561SSong Liu 		if (!evsel__is_bpf(evsel))
476112cb561SSong Liu 			continue;
477112cb561SSong Liu 
478fa853c4bSSong Liu 		err = bpf_counter__enable(evsel);
479fa853c4bSSong Liu 		if (err)
480fa853c4bSSong Liu 			return err;
481fa853c4bSSong Liu 	}
482fa853c4bSSong Liu 
48325f69c69SChangbin Du 	if (!target__enable_on_exec(&target)) {
484f8b61bd2SSong Liu 		if (!all_counters_use_bpf)
4851c87f165SJiri Olsa 			evlist__enable(evsel_list);
4862162b9c6SAlexey Budankov 	}
487fa853c4bSSong Liu 	return 0;
48841191688SAndi Kleen }
48941191688SAndi Kleen 
disable_counters(void)4903df33effSMark Rutland static void disable_counters(void)
4913df33effSMark Rutland {
492f8b61bd2SSong Liu 	struct evsel *counter;
493f8b61bd2SSong Liu 
4943df33effSMark Rutland 	/*
4953df33effSMark Rutland 	 * If we don't have tracee (attaching to task or cpu), counters may
4963df33effSMark Rutland 	 * still be running. To get accurate group ratios, we must stop groups
4973df33effSMark Rutland 	 * from counting before reading their constituent counters.
4983df33effSMark Rutland 	 */
499f8b61bd2SSong Liu 	if (!target__none(&target)) {
500f8b61bd2SSong Liu 		evlist__for_each_entry(evsel_list, counter)
501f8b61bd2SSong Liu 			bpf_counter__disable(counter);
502f8b61bd2SSong Liu 		if (!all_counters_use_bpf)
503e74676deSJiri Olsa 			evlist__disable(evsel_list);
5043df33effSMark Rutland 	}
505f8b61bd2SSong Liu }
5063df33effSMark Rutland 
50701513fdcSIan Rogers static volatile sig_atomic_t workload_exec_errno;
5086af206fdSArnaldo Carvalho de Melo 
5096af206fdSArnaldo Carvalho de Melo /*
5107b392ef0SArnaldo Carvalho de Melo  * evlist__prepare_workload will send a SIGUSR1
5116af206fdSArnaldo Carvalho de Melo  * if the fork fails, since we asked by setting its
5126af206fdSArnaldo Carvalho de Melo  * want_signal to true.
5136af206fdSArnaldo Carvalho de Melo  */
workload_exec_failed_signal(int signo __maybe_unused,siginfo_t * info,void * ucontext __maybe_unused)514f33cbe72SArnaldo Carvalho de Melo static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
515f33cbe72SArnaldo Carvalho de Melo 					void *ucontext __maybe_unused)
5166af206fdSArnaldo Carvalho de Melo {
517f33cbe72SArnaldo Carvalho de Melo 	workload_exec_errno = info->si_value.sival_int;
5186af206fdSArnaldo Carvalho de Melo }
5196af206fdSArnaldo Carvalho de Melo 
evsel__should_store_id(struct evsel * counter)520ddc6999eSArnaldo Carvalho de Melo static bool evsel__should_store_id(struct evsel *counter)
52182bf311eSJiri Olsa {
5221fc632ceSJiri Olsa 	return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
52382bf311eSJiri Olsa }
52482bf311eSJiri Olsa 
is_target_alive(struct target * _target,struct perf_thread_map * threads)525cbb5df7eSJiri Olsa static bool is_target_alive(struct target *_target,
5269749b90eSJiri Olsa 			    struct perf_thread_map *threads)
527cbb5df7eSJiri Olsa {
528cbb5df7eSJiri Olsa 	struct stat st;
529cbb5df7eSJiri Olsa 	int i;
530cbb5df7eSJiri Olsa 
531cbb5df7eSJiri Olsa 	if (!target__has_task(_target))
532cbb5df7eSJiri Olsa 		return true;
533cbb5df7eSJiri Olsa 
534cbb5df7eSJiri Olsa 	for (i = 0; i < threads->nr; i++) {
535cbb5df7eSJiri Olsa 		char path[PATH_MAX];
536cbb5df7eSJiri Olsa 
537cbb5df7eSJiri Olsa 		scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
538cbb5df7eSJiri Olsa 			  threads->map[i].pid);
539cbb5df7eSJiri Olsa 
540cbb5df7eSJiri Olsa 		if (!stat(path, &st))
541cbb5df7eSJiri Olsa 			return true;
542cbb5df7eSJiri Olsa 	}
543cbb5df7eSJiri Olsa 
544cbb5df7eSJiri Olsa 	return false;
545cbb5df7eSJiri Olsa }
546cbb5df7eSJiri Olsa 
process_evlist(struct evlist * evlist,unsigned int interval)547bee328cbSAlexey Budankov static void process_evlist(struct evlist *evlist, unsigned int interval)
548bee328cbSAlexey Budankov {
549bee328cbSAlexey Budankov 	enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
550bee328cbSAlexey Budankov 
551bee328cbSAlexey Budankov 	if (evlist__ctlfd_process(evlist, &cmd) > 0) {
552bee328cbSAlexey Budankov 		switch (cmd) {
553bee328cbSAlexey Budankov 		case EVLIST_CTL_CMD_ENABLE:
554f7a858bfSLiam Howlett 			fallthrough;
555bee328cbSAlexey Budankov 		case EVLIST_CTL_CMD_DISABLE:
556bee328cbSAlexey Budankov 			if (interval)
557bee328cbSAlexey Budankov 				process_interval();
558bee328cbSAlexey Budankov 			break;
559d20aff15SAdrian Hunter 		case EVLIST_CTL_CMD_SNAPSHOT:
560bee328cbSAlexey Budankov 		case EVLIST_CTL_CMD_ACK:
561bee328cbSAlexey Budankov 		case EVLIST_CTL_CMD_UNSUPPORTED:
562142544a9SJiri Olsa 		case EVLIST_CTL_CMD_EVLIST:
563f186cd61SJiri Olsa 		case EVLIST_CTL_CMD_STOP:
56447fddcb4SJiri Olsa 		case EVLIST_CTL_CMD_PING:
565bee328cbSAlexey Budankov 		default:
566bee328cbSAlexey Budankov 			break;
567bee328cbSAlexey Budankov 		}
568bee328cbSAlexey Budankov 	}
569bee328cbSAlexey Budankov }
570bee328cbSAlexey Budankov 
compute_tts(struct timespec * time_start,struct timespec * time_stop,int * time_to_sleep)571bee328cbSAlexey Budankov static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
572bee328cbSAlexey Budankov 			int *time_to_sleep)
573bee328cbSAlexey Budankov {
574bee328cbSAlexey Budankov 	int tts = *time_to_sleep;
575bee328cbSAlexey Budankov 	struct timespec time_diff;
576bee328cbSAlexey Budankov 
577bee328cbSAlexey Budankov 	diff_timespec(&time_diff, time_stop, time_start);
578bee328cbSAlexey Budankov 
579bee328cbSAlexey Budankov 	tts -= time_diff.tv_sec * MSEC_PER_SEC +
580bee328cbSAlexey Budankov 	       time_diff.tv_nsec / NSEC_PER_MSEC;
581bee328cbSAlexey Budankov 
582bee328cbSAlexey Budankov 	if (tts < 0)
583bee328cbSAlexey Budankov 		tts = 0;
584bee328cbSAlexey Budankov 
585bee328cbSAlexey Budankov 	*time_to_sleep = tts;
586bee328cbSAlexey Budankov }
587bee328cbSAlexey Budankov 
dispatch_events(bool forks,int timeout,int interval,int * times)588bee328cbSAlexey Budankov static int dispatch_events(bool forks, int timeout, int interval, int *times)
589987b8238SAlexey Budankov {
590987b8238SAlexey Budankov 	int child_exited = 0, status = 0;
591bee328cbSAlexey Budankov 	int time_to_sleep, sleep_time;
592bee328cbSAlexey Budankov 	struct timespec time_start, time_stop;
593bee328cbSAlexey Budankov 
594bee328cbSAlexey Budankov 	if (interval)
595bee328cbSAlexey Budankov 		sleep_time = interval;
596bee328cbSAlexey Budankov 	else if (timeout)
597bee328cbSAlexey Budankov 		sleep_time = timeout;
598bee328cbSAlexey Budankov 	else
599bee328cbSAlexey Budankov 		sleep_time = 1000;
600bee328cbSAlexey Budankov 
601bee328cbSAlexey Budankov 	time_to_sleep = sleep_time;
602987b8238SAlexey Budankov 
603987b8238SAlexey Budankov 	while (!done) {
604987b8238SAlexey Budankov 		if (forks)
605987b8238SAlexey Budankov 			child_exited = waitpid(child_pid, &status, WNOHANG);
606987b8238SAlexey Budankov 		else
607987b8238SAlexey Budankov 			child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
608987b8238SAlexey Budankov 
609987b8238SAlexey Budankov 		if (child_exited)
610987b8238SAlexey Budankov 			break;
611987b8238SAlexey Budankov 
612bee328cbSAlexey Budankov 		clock_gettime(CLOCK_MONOTONIC, &time_start);
613bee328cbSAlexey Budankov 		if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
614987b8238SAlexey Budankov 			if (timeout || handle_interval(interval, times))
615987b8238SAlexey Budankov 				break;
616bee328cbSAlexey Budankov 			time_to_sleep = sleep_time;
617bee328cbSAlexey Budankov 		} else { /* fd revent */
618bee328cbSAlexey Budankov 			process_evlist(evsel_list, interval);
619bee328cbSAlexey Budankov 			clock_gettime(CLOCK_MONOTONIC, &time_stop);
620bee328cbSAlexey Budankov 			compute_tts(&time_start, &time_stop, &time_to_sleep);
621bee328cbSAlexey Budankov 		}
622987b8238SAlexey Budankov 	}
623987b8238SAlexey Budankov 
624987b8238SAlexey Budankov 	return status;
625987b8238SAlexey Budankov }
626987b8238SAlexey Budankov 
627e0e6a6caSAndi Kleen enum counter_recovery {
628e0e6a6caSAndi Kleen 	COUNTER_SKIP,
629e0e6a6caSAndi Kleen 	COUNTER_RETRY,
630e0e6a6caSAndi Kleen 	COUNTER_FATAL,
631e0e6a6caSAndi Kleen };
632e0e6a6caSAndi Kleen 
stat_handle_error(struct evsel * counter)633e0e6a6caSAndi Kleen static enum counter_recovery stat_handle_error(struct evsel *counter)
634e0e6a6caSAndi Kleen {
635e0e6a6caSAndi Kleen 	char msg[BUFSIZ];
636e0e6a6caSAndi Kleen 	/*
637e0e6a6caSAndi Kleen 	 * PPC returns ENXIO for HW counters until 2.6.37
638e0e6a6caSAndi Kleen 	 * (behavior changed with commit b0a873e).
639e0e6a6caSAndi Kleen 	 */
640e0e6a6caSAndi Kleen 	if (errno == EINVAL || errno == ENOSYS ||
641e0e6a6caSAndi Kleen 	    errno == ENOENT || errno == EOPNOTSUPP ||
642e0e6a6caSAndi Kleen 	    errno == ENXIO) {
643e0e6a6caSAndi Kleen 		if (verbose > 0)
644e0e6a6caSAndi Kleen 			ui__warning("%s event is not supported by the kernel.\n",
6458ab2e96dSArnaldo Carvalho de Melo 				    evsel__name(counter));
646e0e6a6caSAndi Kleen 		counter->supported = false;
6474804e011SAndi Kleen 		/*
6484804e011SAndi Kleen 		 * errored is a sticky flag that means one of the counter's
6494804e011SAndi Kleen 		 * cpu event had a problem and needs to be reexamined.
6504804e011SAndi Kleen 		 */
6514804e011SAndi Kleen 		counter->errored = true;
652e0e6a6caSAndi Kleen 
653fba7c866SJiri Olsa 		if ((evsel__leader(counter) != counter) ||
654fba7c866SJiri Olsa 		    !(counter->core.leader->nr_members > 1))
655e0e6a6caSAndi Kleen 			return COUNTER_SKIP;
656ae430892SArnaldo Carvalho de Melo 	} else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
657e0e6a6caSAndi Kleen 		if (verbose > 0)
658e0e6a6caSAndi Kleen 			ui__warning("%s\n", msg);
659e0e6a6caSAndi Kleen 		return COUNTER_RETRY;
660e0e6a6caSAndi Kleen 	} else if (target__has_per_thread(&target) &&
661e0e6a6caSAndi Kleen 		   evsel_list->core.threads &&
662e0e6a6caSAndi Kleen 		   evsel_list->core.threads->err_thread != -1) {
663e0e6a6caSAndi Kleen 		/*
664e0e6a6caSAndi Kleen 		 * For global --per-thread case, skip current
665e0e6a6caSAndi Kleen 		 * error thread.
666e0e6a6caSAndi Kleen 		 */
667e0e6a6caSAndi Kleen 		if (!thread_map__remove(evsel_list->core.threads,
668e0e6a6caSAndi Kleen 					evsel_list->core.threads->err_thread)) {
669e0e6a6caSAndi Kleen 			evsel_list->core.threads->err_thread = -1;
670e0e6a6caSAndi Kleen 			return COUNTER_RETRY;
671e0e6a6caSAndi Kleen 		}
6721b114824SIan Rogers 	} else if (counter->skippable) {
6731b114824SIan Rogers 		if (verbose > 0)
6741b114824SIan Rogers 			ui__warning("skipping event %s that kernel failed to open .\n",
6751b114824SIan Rogers 				    evsel__name(counter));
6761b114824SIan Rogers 		counter->supported = false;
6771b114824SIan Rogers 		counter->errored = true;
6781b114824SIan Rogers 		return COUNTER_SKIP;
679e0e6a6caSAndi Kleen 	}
680e0e6a6caSAndi Kleen 
6812bb72dbbSArnaldo Carvalho de Melo 	evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
682e0e6a6caSAndi Kleen 	ui__error("%s\n", msg);
683e0e6a6caSAndi Kleen 
684e0e6a6caSAndi Kleen 	if (child_pid != -1)
685e0e6a6caSAndi Kleen 		kill(child_pid, SIGTERM);
686e0e6a6caSAndi Kleen 	return COUNTER_FATAL;
687e0e6a6caSAndi Kleen }
688e0e6a6caSAndi Kleen 
__run_perf_stat(int argc,const char ** argv,int run_idx)689e55c14afSJiri Olsa static int __run_perf_stat(int argc, const char **argv, int run_idx)
69086470930SIngo Molnar {
691ec0d3d1fSJiri Olsa 	int interval = stat_config.interval;
692db06a269Syuzhoujian 	int times = stat_config.times;
693f1f8ad52Syuzhoujian 	int timeout = stat_config.timeout;
694d6195a6aSArnaldo Carvalho de Melo 	char msg[BUFSIZ];
69586470930SIngo Molnar 	unsigned long long t0, t1;
69632dcd021SJiri Olsa 	struct evsel *counter;
697410136f5SStephane Eranian 	size_t l;
69842202dd5SIngo Molnar 	int status = 0;
6996be2850eSZhang, Yanmin 	const bool forks = (argc > 0);
7008ceb41d7SJiri Olsa 	bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
701472832d2SIan Rogers 	struct evlist_cpu_iterator evlist_cpu_itr;
70249de1795SArnaldo Carvalho de Melo 	struct affinity saved_affinity, *affinity = NULL;
703472832d2SIan Rogers 	int err;
7044804e011SAndi Kleen 	bool second_pass = false;
70586470930SIngo Molnar 
706acf28922SNamhyung Kim 	if (forks) {
7077b392ef0SArnaldo Carvalho de Melo 		if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
708acf28922SNamhyung Kim 			perror("failed to prepare workload");
709fceda7feSDavid Ahern 			return -1;
710051ae7f7SPaul Mackerras 		}
711d20a47e7SNamhyung Kim 		child_pid = evsel_list->workload.pid;
71260666c63SLiming Wang 	}
713051ae7f7SPaul Mackerras 
7140df6ade7SIan Rogers 	if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
71549de1795SArnaldo Carvalho de Melo 		if (affinity__setup(&saved_affinity) < 0)
7164804e011SAndi Kleen 			return -1;
71749de1795SArnaldo Carvalho de Melo 		affinity = &saved_affinity;
71849de1795SArnaldo Carvalho de Melo 	}
7195a5dfe4bSAndi Kleen 
720fa853c4bSSong Liu 	evlist__for_each_entry(evsel_list, counter) {
721bf515f02SIan Rogers 		counter->reset_group = false;
722fa853c4bSSong Liu 		if (bpf_counter__load(counter, &target))
723fa853c4bSSong Liu 			return -1;
724ecc68ee2SDmitrii Dolgov 		if (!(evsel__is_bperf(counter)))
725112cb561SSong Liu 			all_counters_use_bpf = false;
726fa853c4bSSong Liu 	}
727fa853c4bSSong Liu 
728ed4090a2SNamhyung Kim 	evlist__reset_aggr_stats(evsel_list);
729ed4090a2SNamhyung Kim 
73049de1795SArnaldo Carvalho de Melo 	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
731472832d2SIan Rogers 		counter = evlist_cpu_itr.evsel;
732472832d2SIan Rogers 
7337fac83aaSSong Liu 		/*
7347fac83aaSSong Liu 		 * bperf calls evsel__open_per_cpu() in bperf__load(), so
7357fac83aaSSong Liu 		 * no need to call it again here.
7367fac83aaSSong Liu 		 */
7377fac83aaSSong Liu 		if (target.use_bpf)
7387fac83aaSSong Liu 			break;
7394804e011SAndi Kleen 
7404804e011SAndi Kleen 		if (counter->reset_group || counter->errored)
7414804e011SAndi Kleen 			continue;
742ecc68ee2SDmitrii Dolgov 		if (evsel__is_bperf(counter))
743112cb561SSong Liu 			continue;
7444804e011SAndi Kleen try_again:
7454804e011SAndi Kleen 		if (create_perf_stat_counter(counter, &stat_config, &target,
746472832d2SIan Rogers 					     evlist_cpu_itr.cpu_map_idx) < 0) {
7474804e011SAndi Kleen 
7484804e011SAndi Kleen 			/*
7494804e011SAndi Kleen 			 * Weak group failed. We cannot just undo this here
7504804e011SAndi Kleen 			 * because earlier CPUs might be in group mode, and the kernel
7514804e011SAndi Kleen 			 * doesn't support mixing group and non group reads. Defer
7524804e011SAndi Kleen 			 * it to later.
7534804e011SAndi Kleen 			 * Don't close here because we're in the wrong affinity.
7544804e011SAndi Kleen 			 */
75535c1980eSAndi Kleen 			if ((errno == EINVAL || errno == EBADF) &&
756fba7c866SJiri Olsa 				evsel__leader(counter) != counter &&
7575a5dfe4bSAndi Kleen 				counter->weak_group) {
75864b4778bSArnaldo Carvalho de Melo 				evlist__reset_weak_group(evsel_list, counter, false);
7594804e011SAndi Kleen 				assert(counter->reset_group);
7604804e011SAndi Kleen 				second_pass = true;
7614804e011SAndi Kleen 				continue;
7625a5dfe4bSAndi Kleen 			}
7635a5dfe4bSAndi Kleen 
764e0e6a6caSAndi Kleen 			switch (stat_handle_error(counter)) {
765e0e6a6caSAndi Kleen 			case COUNTER_FATAL:
766084ab9f8SArnaldo Carvalho de Melo 				return -1;
767e0e6a6caSAndi Kleen 			case COUNTER_RETRY:
768e0e6a6caSAndi Kleen 				goto try_again;
769e0e6a6caSAndi Kleen 			case COUNTER_SKIP:
770e0e6a6caSAndi Kleen 				continue;
771e0e6a6caSAndi Kleen 			default:
772e0e6a6caSAndi Kleen 				break;
773e0e6a6caSAndi Kleen 			}
7744804e011SAndi Kleen 
775084ab9f8SArnaldo Carvalho de Melo 		}
7762cee77c4SDavid Ahern 		counter->supported = true;
7774804e011SAndi Kleen 	}
7784804e011SAndi Kleen 
7794804e011SAndi Kleen 	if (second_pass) {
7804804e011SAndi Kleen 		/*
7814804e011SAndi Kleen 		 * Now redo all the weak group after closing them,
7824804e011SAndi Kleen 		 * and also close errored counters.
7834804e011SAndi Kleen 		 */
7844804e011SAndi Kleen 
7854804e011SAndi Kleen 		/* First close errored or weak retry */
78649de1795SArnaldo Carvalho de Melo 		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
787472832d2SIan Rogers 			counter = evlist_cpu_itr.evsel;
788472832d2SIan Rogers 
7894804e011SAndi Kleen 			if (!counter->reset_group && !counter->errored)
7904804e011SAndi Kleen 				continue;
791472832d2SIan Rogers 
792472832d2SIan Rogers 			perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
7934804e011SAndi Kleen 		}
7944804e011SAndi Kleen 		/* Now reopen weak */
79549de1795SArnaldo Carvalho de Melo 		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
796472832d2SIan Rogers 			counter = evlist_cpu_itr.evsel;
797472832d2SIan Rogers 
7984804e011SAndi Kleen 			if (!counter->reset_group)
7994804e011SAndi Kleen 				continue;
8004804e011SAndi Kleen try_again_reset:
8018ab2e96dSArnaldo Carvalho de Melo 			pr_debug2("reopening weak %s\n", evsel__name(counter));
8024804e011SAndi Kleen 			if (create_perf_stat_counter(counter, &stat_config, &target,
803472832d2SIan Rogers 						     evlist_cpu_itr.cpu_map_idx) < 0) {
8044804e011SAndi Kleen 
8054804e011SAndi Kleen 				switch (stat_handle_error(counter)) {
8064804e011SAndi Kleen 				case COUNTER_FATAL:
8074804e011SAndi Kleen 					return -1;
8084804e011SAndi Kleen 				case COUNTER_RETRY:
8094804e011SAndi Kleen 					goto try_again_reset;
8104804e011SAndi Kleen 				case COUNTER_SKIP:
8114804e011SAndi Kleen 					continue;
8124804e011SAndi Kleen 				default:
8134804e011SAndi Kleen 					break;
8144804e011SAndi Kleen 				}
8154804e011SAndi Kleen 			}
8164804e011SAndi Kleen 			counter->supported = true;
8174804e011SAndi Kleen 		}
8184804e011SAndi Kleen 	}
81949de1795SArnaldo Carvalho de Melo 	affinity__cleanup(affinity);
8204804e011SAndi Kleen 
8214804e011SAndi Kleen 	evlist__for_each_entry(evsel_list, counter) {
8224804e011SAndi Kleen 		if (!counter->supported) {
8234804e011SAndi Kleen 			perf_evsel__free_fd(&counter->core);
8244804e011SAndi Kleen 			continue;
8254804e011SAndi Kleen 		}
826410136f5SStephane Eranian 
827410136f5SStephane Eranian 		l = strlen(counter->unit);
828df4f7b4dSJiri Olsa 		if (l > stat_config.unit_width)
829df4f7b4dSJiri Olsa 			stat_config.unit_width = l;
8302af4646dSJiri Olsa 
831ddc6999eSArnaldo Carvalho de Melo 		if (evsel__should_store_id(counter) &&
83234397753SArnaldo Carvalho de Melo 		    evsel__store_ids(counter, evsel_list))
8332af4646dSJiri Olsa 			return -1;
83448290609SArnaldo Carvalho de Melo 	}
83586470930SIngo Molnar 
83624bf91a7SArnaldo Carvalho de Melo 	if (evlist__apply_filters(evsel_list, &counter)) {
83762d94b00SArnaldo Carvalho de Melo 		pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
8388ab2e96dSArnaldo Carvalho de Melo 			counter->filter, evsel__name(counter), errno,
839c8b5f2c9SArnaldo Carvalho de Melo 			str_error_r(errno, msg, sizeof(msg)));
840cfd748aeSFrederic Weisbecker 		return -1;
841cfd748aeSFrederic Weisbecker 	}
842cfd748aeSFrederic Weisbecker 
8434979d0c7SJiri Olsa 	if (STAT_RECORD) {
844fa853c4bSSong Liu 		int fd = perf_data__fd(&perf_stat.data);
8454979d0c7SJiri Olsa 
846664c98d4SJiri Olsa 		if (is_pipe) {
8478ceb41d7SJiri Olsa 			err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
848664c98d4SJiri Olsa 		} else {
8494979d0c7SJiri Olsa 			err = perf_session__write_header(perf_stat.session, evsel_list,
8504979d0c7SJiri Olsa 							 fd, false);
851664c98d4SJiri Olsa 		}
852664c98d4SJiri Olsa 
8534979d0c7SJiri Olsa 		if (err < 0)
8544979d0c7SJiri Olsa 			return err;
8558b99b1a4SJiri Olsa 
856b251892dSArnaldo Carvalho de Melo 		err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
857c2c247f2SJiri Olsa 							 process_synthesized_event, is_pipe);
8588b99b1a4SJiri Olsa 		if (err < 0)
8598b99b1a4SJiri Olsa 			return err;
8604979d0c7SJiri Olsa 	}
8614979d0c7SJiri Olsa 
86225f69c69SChangbin Du 	if (target.initial_delay) {
863c587e77eSNamhyung Kim 		pr_info(EVLIST_DISABLED_MSG);
864c587e77eSNamhyung Kim 	} else {
865fa853c4bSSong Liu 		err = enable_counters();
866fa853c4bSSong Liu 		if (err)
867fa853c4bSSong Liu 			return -1;
868c587e77eSNamhyung Kim 	}
869bb8bc52eSAdrián Herrera Arcila 
870bb8bc52eSAdrián Herrera Arcila 	/* Exec the command, if any */
871bb8bc52eSAdrián Herrera Arcila 	if (forks)
872d0a0a511SThomas Richter 		evlist__start_workload(evsel_list);
873acf28922SNamhyung Kim 
87425f69c69SChangbin Du 	if (target.initial_delay > 0) {
87525f69c69SChangbin Du 		usleep(target.initial_delay * USEC_PER_MSEC);
876c587e77eSNamhyung Kim 		err = enable_counters();
877c587e77eSNamhyung Kim 		if (err)
878c587e77eSNamhyung Kim 			return -1;
879c587e77eSNamhyung Kim 
880c587e77eSNamhyung Kim 		pr_info(EVLIST_ENABLED_MSG);
881c587e77eSNamhyung Kim 	}
882c587e77eSNamhyung Kim 
883435b46efSSong Liu 	t0 = rdclock();
884435b46efSSong Liu 	clock_gettime(CLOCK_MONOTONIC, &ref_time);
885435b46efSSong Liu 
886bb8bc52eSAdrián Herrera Arcila 	if (forks) {
88727e9769aSAlexey Budankov 		if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
888bee328cbSAlexey Budankov 			status = dispatch_events(forks, timeout, interval, &times);
889cfbd41b7SArnaldo Carvalho de Melo 		if (child_pid != -1) {
890cfbd41b7SArnaldo Carvalho de Melo 			if (timeout)
891cfbd41b7SArnaldo Carvalho de Melo 				kill(child_pid, SIGTERM);
8928897a891SJiri Olsa 			wait4(child_pid, &status, 0, &stat_config.ru_data);
893cfbd41b7SArnaldo Carvalho de Melo 		}
8946af206fdSArnaldo Carvalho de Melo 
895f33cbe72SArnaldo Carvalho de Melo 		if (workload_exec_errno) {
896c8b5f2c9SArnaldo Carvalho de Melo 			const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
897f33cbe72SArnaldo Carvalho de Melo 			pr_err("Workload failed: %s\n", emsg);
8986af206fdSArnaldo Carvalho de Melo 			return -1;
899f33cbe72SArnaldo Carvalho de Melo 		}
9006af206fdSArnaldo Carvalho de Melo 
90133e49ea7SAndi Kleen 		if (WIFSIGNALED(status))
90233e49ea7SAndi Kleen 			psignal(WTERMSIG(status), argv[0]);
90360666c63SLiming Wang 	} else {
904bee328cbSAlexey Budankov 		status = dispatch_events(forks, timeout, interval, &times);
90560666c63SLiming Wang 	}
90686470930SIngo Molnar 
9073df33effSMark Rutland 	disable_counters();
9083df33effSMark Rutland 
90986470930SIngo Molnar 	t1 = rdclock();
91086470930SIngo Molnar 
91154ac0b1bSJiri Olsa 	if (stat_config.walltime_run_table)
91254ac0b1bSJiri Olsa 		stat_config.walltime_run[run_idx] = t1 - t0;
913e55c14afSJiri Olsa 
914ee6a9614SJin Yao 	if (interval && stat_config.summary) {
915c7e5b328SJin Yao 		stat_config.interval = 0;
916ee6a9614SJin Yao 		stat_config.stop_read_counter = true;
917c7e5b328SJin Yao 		init_stats(&walltime_nsecs_stats);
918c7e5b328SJin Yao 		update_stats(&walltime_nsecs_stats, t1 - t0);
919c7e5b328SJin Yao 
92053f5e908SArnaldo Carvalho de Melo 		evlist__copy_prev_raw_counts(evsel_list);
92153f5e908SArnaldo Carvalho de Melo 		evlist__reset_prev_raw_counts(evsel_list);
9228f97963eSNamhyung Kim 		evlist__reset_aggr_stats(evsel_list);
923c735b0a5SFlorian Fischer 	} else {
9249e9772c4SPeter Zijlstra 		update_stats(&walltime_nsecs_stats, t1 - t0);
925c735b0a5SFlorian Fischer 		update_rusage_stats(&ru_stats, &stat_config.ru_data);
926c735b0a5SFlorian Fischer 	}
92742202dd5SIngo Molnar 
9283df33effSMark Rutland 	/*
9293df33effSMark Rutland 	 * Closing a group leader splits the group, and as we only disable
9303df33effSMark Rutland 	 * group leaders, results in remaining events becoming enabled. To
9313df33effSMark Rutland 	 * avoid arbitrary skew, we must read all counters before closing any
9323df33effSMark Rutland 	 * group leaders.
9333df33effSMark Rutland 	 */
9348962cbecSNamhyung Kim 	if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
9358962cbecSNamhyung Kim 		process_counters();
93608ef3af1SJiri Olsa 
93708ef3af1SJiri Olsa 	/*
93808ef3af1SJiri Olsa 	 * We need to keep evsel_list alive, because it's processed
93908ef3af1SJiri Olsa 	 * later the evsel_list will be closed after.
94008ef3af1SJiri Olsa 	 */
94108ef3af1SJiri Olsa 	if (!STAT_RECORD)
942750b4edeSJiri Olsa 		evlist__close(evsel_list);
943c52b12edSArnaldo Carvalho de Melo 
94442202dd5SIngo Molnar 	return WEXITSTATUS(status);
94542202dd5SIngo Molnar }
94642202dd5SIngo Molnar 
run_perf_stat(int argc,const char ** argv,int run_idx)947e55c14afSJiri Olsa static int run_perf_stat(int argc, const char **argv, int run_idx)
9481f16c575SPeter Zijlstra {
9491f16c575SPeter Zijlstra 	int ret;
9501f16c575SPeter Zijlstra 
9511f16c575SPeter Zijlstra 	if (pre_cmd) {
9521f16c575SPeter Zijlstra 		ret = system(pre_cmd);
9531f16c575SPeter Zijlstra 		if (ret)
9541f16c575SPeter Zijlstra 			return ret;
9551f16c575SPeter Zijlstra 	}
9561f16c575SPeter Zijlstra 
9571f16c575SPeter Zijlstra 	if (sync_run)
9581f16c575SPeter Zijlstra 		sync();
9591f16c575SPeter Zijlstra 
960e55c14afSJiri Olsa 	ret = __run_perf_stat(argc, argv, run_idx);
9611f16c575SPeter Zijlstra 	if (ret)
9621f16c575SPeter Zijlstra 		return ret;
9631f16c575SPeter Zijlstra 
9641f16c575SPeter Zijlstra 	if (post_cmd) {
9651f16c575SPeter Zijlstra 		ret = system(post_cmd);
9661f16c575SPeter Zijlstra 		if (ret)
9671f16c575SPeter Zijlstra 			return ret;
9681f16c575SPeter Zijlstra 	}
9691f16c575SPeter Zijlstra 
9701f16c575SPeter Zijlstra 	return ret;
9711f16c575SPeter Zijlstra }
9721f16c575SPeter Zijlstra 
print_counters(struct timespec * ts,int argc,const char ** argv)973a5a9eac1SJiri Olsa static void print_counters(struct timespec *ts, int argc, const char **argv)
974a5a9eac1SJiri Olsa {
9750174820aSJiri Olsa 	/* Do not print anything if we record to the pipe. */
9760174820aSJiri Olsa 	if (STAT_RECORD && perf_stat.data.is_pipe)
9770174820aSJiri Olsa 		return;
978a527c2c1SJames Clark 	if (quiet)
97955a4de94SAndi Kleen 		return;
9800174820aSJiri Olsa 
98171273724SArnaldo Carvalho de Melo 	evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
982a5a9eac1SJiri Olsa }
983a5a9eac1SJiri Olsa 
98401513fdcSIan Rogers static volatile sig_atomic_t signr = -1;
985f7b7c26eSPeter Zijlstra 
skip_signal(int signo)98686470930SIngo Molnar static void skip_signal(int signo)
98786470930SIngo Molnar {
988ec0d3d1fSJiri Olsa 	if ((child_pid == -1) || stat_config.interval)
98960666c63SLiming Wang 		done = 1;
99060666c63SLiming Wang 
991f7b7c26eSPeter Zijlstra 	signr = signo;
992d07f0b12SStephane Eranian 	/*
993d07f0b12SStephane Eranian 	 * render child_pid harmless
994d07f0b12SStephane Eranian 	 * won't send SIGTERM to a random
995d07f0b12SStephane Eranian 	 * process in case of race condition
996d07f0b12SStephane Eranian 	 * and fast PID recycling
997d07f0b12SStephane Eranian 	 */
998d07f0b12SStephane Eranian 	child_pid = -1;
999f7b7c26eSPeter Zijlstra }
1000f7b7c26eSPeter Zijlstra 
sig_atexit(void)1001f7b7c26eSPeter Zijlstra static void sig_atexit(void)
1002f7b7c26eSPeter Zijlstra {
1003d07f0b12SStephane Eranian 	sigset_t set, oset;
1004d07f0b12SStephane Eranian 
1005d07f0b12SStephane Eranian 	/*
1006d07f0b12SStephane Eranian 	 * avoid race condition with SIGCHLD handler
1007d07f0b12SStephane Eranian 	 * in skip_signal() which is modifying child_pid
1008d07f0b12SStephane Eranian 	 * goal is to avoid send SIGTERM to a random
1009d07f0b12SStephane Eranian 	 * process
1010d07f0b12SStephane Eranian 	 */
1011d07f0b12SStephane Eranian 	sigemptyset(&set);
1012d07f0b12SStephane Eranian 	sigaddset(&set, SIGCHLD);
1013d07f0b12SStephane Eranian 	sigprocmask(SIG_BLOCK, &set, &oset);
1014d07f0b12SStephane Eranian 
1015933da83aSChris Wilson 	if (child_pid != -1)
1016933da83aSChris Wilson 		kill(child_pid, SIGTERM);
1017933da83aSChris Wilson 
1018d07f0b12SStephane Eranian 	sigprocmask(SIG_SETMASK, &oset, NULL);
1019d07f0b12SStephane Eranian 
1020f7b7c26eSPeter Zijlstra 	if (signr == -1)
1021f7b7c26eSPeter Zijlstra 		return;
1022f7b7c26eSPeter Zijlstra 
1023f7b7c26eSPeter Zijlstra 	signal(signr, SIG_DFL);
1024f7b7c26eSPeter Zijlstra 	kill(getpid(), signr);
102586470930SIngo Molnar }
102686470930SIngo Molnar 
perf_stat__set_big_num(int set)1027d778a778SPaul A. Clarke void perf_stat__set_big_num(int set)
1028d778a778SPaul A. Clarke {
1029d778a778SPaul A. Clarke 	stat_config.big_num = (set != 0);
1030d778a778SPaul A. Clarke }
1031d778a778SPaul A. Clarke 
perf_stat__set_no_csv_summary(int set)10320bdad978SJin Yao void perf_stat__set_no_csv_summary(int set)
10330bdad978SJin Yao {
10340bdad978SJin Yao 	stat_config.no_csv_summary = (set != 0);
10350bdad978SJin Yao }
10360bdad978SJin Yao 
stat__set_big_num(const struct option * opt __maybe_unused,const char * s __maybe_unused,int unset)10371d037ca1SIrina Tirdea static int stat__set_big_num(const struct option *opt __maybe_unused,
10381d037ca1SIrina Tirdea 			     const char *s __maybe_unused, int unset)
1039d7470b6aSStephane Eranian {
1040d7470b6aSStephane Eranian 	big_num_opt = unset ? 0 : 1;
1041d778a778SPaul A. Clarke 	perf_stat__set_big_num(!unset);
1042d7470b6aSStephane Eranian 	return 0;
1043d7470b6aSStephane Eranian }
1044d7470b6aSStephane Eranian 
enable_metric_only(const struct option * opt __maybe_unused,const char * s __maybe_unused,int unset)104544b1e60aSAndi Kleen static int enable_metric_only(const struct option *opt __maybe_unused,
104644b1e60aSAndi Kleen 			      const char *s __maybe_unused, int unset)
104744b1e60aSAndi Kleen {
104844b1e60aSAndi Kleen 	force_metric_only = true;
10490ce5aa02SJiri Olsa 	stat_config.metric_only = !unset;
105044b1e60aSAndi Kleen 	return 0;
105144b1e60aSAndi Kleen }
105244b1e60aSAndi Kleen 
append_metric_groups(const struct option * opt __maybe_unused,const char * str,int unset __maybe_unused)1053a4b8cfcaSIan Rogers static int append_metric_groups(const struct option *opt __maybe_unused,
1054b18f3e36SAndi Kleen 			       const char *str,
1055b18f3e36SAndi Kleen 			       int unset __maybe_unused)
1056b18f3e36SAndi Kleen {
1057a4b8cfcaSIan Rogers 	if (metrics) {
1058a4b8cfcaSIan Rogers 		char *tmp;
1059a4b8cfcaSIan Rogers 
1060a4b8cfcaSIan Rogers 		if (asprintf(&tmp, "%s,%s", metrics, str) < 0)
1061a4b8cfcaSIan Rogers 			return -ENOMEM;
1062a4b8cfcaSIan Rogers 		free(metrics);
1063a4b8cfcaSIan Rogers 		metrics = tmp;
1064a4b8cfcaSIan Rogers 	} else {
1065a4b8cfcaSIan Rogers 		metrics = strdup(str);
1066a4b8cfcaSIan Rogers 		if (!metrics)
1067a4b8cfcaSIan Rogers 			return -ENOMEM;
1068a4b8cfcaSIan Rogers 	}
1069a4b8cfcaSIan Rogers 	return 0;
1070b18f3e36SAndi Kleen }
1071b18f3e36SAndi Kleen 
parse_control_option(const struct option * opt,const char * str,int unset __maybe_unused)107227e9769aSAlexey Budankov static int parse_control_option(const struct option *opt,
107327e9769aSAlexey Budankov 				const char *str,
107427e9769aSAlexey Budankov 				int unset __maybe_unused)
107527e9769aSAlexey Budankov {
10769864a66dSAdrian Hunter 	struct perf_stat_config *config = opt->value;
107727e9769aSAlexey Budankov 
1078a8fcbd26SAdrian Hunter 	return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close);
1079a8fcbd26SAdrian Hunter }
1080a8fcbd26SAdrian Hunter 
parse_stat_cgroups(const struct option * opt,const char * str,int unset)1081d1c5a0e8SNamhyung Kim static int parse_stat_cgroups(const struct option *opt,
1082d1c5a0e8SNamhyung Kim 			      const char *str, int unset)
1083d1c5a0e8SNamhyung Kim {
1084d1c5a0e8SNamhyung Kim 	if (stat_config.cgroup_list) {
1085d1c5a0e8SNamhyung Kim 		pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
1086d1c5a0e8SNamhyung Kim 		return -1;
1087d1c5a0e8SNamhyung Kim 	}
1088d1c5a0e8SNamhyung Kim 
1089d1c5a0e8SNamhyung Kim 	return parse_cgroups(opt, str, unset);
1090d1c5a0e8SNamhyung Kim }
1091d1c5a0e8SNamhyung Kim 
parse_cputype(const struct option * opt,const char * str,int unset __maybe_unused)1092003be8c4SIan Rogers static int parse_cputype(const struct option *opt,
1093e69dc842SJin Yao 			     const char *str,
1094e69dc842SJin Yao 			     int unset __maybe_unused)
1095e69dc842SJin Yao {
1096003be8c4SIan Rogers 	const struct perf_pmu *pmu;
1097e69dc842SJin Yao 	struct evlist *evlist = *(struct evlist **)opt->value;
1098e69dc842SJin Yao 
1099e69dc842SJin Yao 	if (!list_empty(&evlist->core.entries)) {
1100e69dc842SJin Yao 		fprintf(stderr, "Must define cputype before events/metrics\n");
1101e69dc842SJin Yao 		return -1;
1102e69dc842SJin Yao 	}
1103e69dc842SJin Yao 
1104003be8c4SIan Rogers 	pmu = perf_pmus__pmu_for_pmu_filter(str);
1105003be8c4SIan Rogers 	if (!pmu) {
1106e69dc842SJin Yao 		fprintf(stderr, "--cputype %s is not supported!\n", str);
1107e69dc842SJin Yao 		return -1;
1108e69dc842SJin Yao 	}
1109003be8c4SIan Rogers 	parse_events_option_args.pmu_filter = pmu->name;
1110e69dc842SJin Yao 
1111e69dc842SJin Yao 	return 0;
1112e69dc842SJin Yao }
1113e69dc842SJin Yao 
parse_cache_level(const struct option * opt,const char * str,int unset __maybe_unused)1114aab667caSK Prateek Nayak static int parse_cache_level(const struct option *opt,
1115aab667caSK Prateek Nayak 			     const char *str,
1116aab667caSK Prateek Nayak 			     int unset __maybe_unused)
1117aab667caSK Prateek Nayak {
1118aab667caSK Prateek Nayak 	int level;
1119aab667caSK Prateek Nayak 	u32 *aggr_mode = (u32 *)opt->value;
1120aab667caSK Prateek Nayak 	u32 *aggr_level = (u32 *)opt->data;
1121aab667caSK Prateek Nayak 
1122aab667caSK Prateek Nayak 	/*
1123aab667caSK Prateek Nayak 	 * If no string is specified, aggregate based on the topology of
1124aab667caSK Prateek Nayak 	 * Last Level Cache (LLC). Since the LLC level can change from
1125aab667caSK Prateek Nayak 	 * architecture to architecture, set level greater than
1126aab667caSK Prateek Nayak 	 * MAX_CACHE_LVL which will be interpreted as LLC.
1127aab667caSK Prateek Nayak 	 */
1128aab667caSK Prateek Nayak 	if (str == NULL) {
1129aab667caSK Prateek Nayak 		level = MAX_CACHE_LVL + 1;
1130aab667caSK Prateek Nayak 		goto out;
1131aab667caSK Prateek Nayak 	}
1132aab667caSK Prateek Nayak 
1133aab667caSK Prateek Nayak 	/*
1134aab667caSK Prateek Nayak 	 * The format to specify cache level is LX or lX where X is the
1135aab667caSK Prateek Nayak 	 * cache level.
1136aab667caSK Prateek Nayak 	 */
1137aab667caSK Prateek Nayak 	if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) {
1138aab667caSK Prateek Nayak 		pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n",
1139aab667caSK Prateek Nayak 		       MAX_CACHE_LVL,
1140aab667caSK Prateek Nayak 		       MAX_CACHE_LVL);
1141aab667caSK Prateek Nayak 		return -EINVAL;
1142aab667caSK Prateek Nayak 	}
1143aab667caSK Prateek Nayak 
1144aab667caSK Prateek Nayak 	level = atoi(&str[1]);
1145aab667caSK Prateek Nayak 	if (level < 1) {
1146aab667caSK Prateek Nayak 		pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n",
1147aab667caSK Prateek Nayak 		       MAX_CACHE_LVL,
1148aab667caSK Prateek Nayak 		       MAX_CACHE_LVL);
1149aab667caSK Prateek Nayak 		return -EINVAL;
1150aab667caSK Prateek Nayak 	}
1151aab667caSK Prateek Nayak 
1152aab667caSK Prateek Nayak 	if (level > MAX_CACHE_LVL) {
1153aab667caSK Prateek Nayak 		pr_err("perf only supports max cache level of %d.\n"
1154aab667caSK Prateek Nayak 		       "Consider increasing MAX_CACHE_LVL\n", MAX_CACHE_LVL);
1155aab667caSK Prateek Nayak 		return -EINVAL;
1156aab667caSK Prateek Nayak 	}
1157aab667caSK Prateek Nayak out:
1158aab667caSK Prateek Nayak 	*aggr_mode = AGGR_CACHE;
1159aab667caSK Prateek Nayak 	*aggr_level = level;
1160aab667caSK Prateek Nayak 	return 0;
1161aab667caSK Prateek Nayak }
1162aab667caSK Prateek Nayak 
116351433eadSMichael Petlan static struct option stat_options[] = {
1164e0547311SJiri Olsa 	OPT_BOOLEAN('T', "transaction", &transaction_run,
1165e0547311SJiri Olsa 		    "hardware transaction statistics"),
1166411ad22eSIan Rogers 	OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
1167e0547311SJiri Olsa 		     "event selector. use 'perf list' to list available events",
1168e0547311SJiri Olsa 		     parse_events_option),
1169e0547311SJiri Olsa 	OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1170e0547311SJiri Olsa 		     "event filter", parse_filter),
11715698f26bSJiri Olsa 	OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
1172e0547311SJiri Olsa 		    "child tasks do not inherit counters"),
1173e0547311SJiri Olsa 	OPT_STRING('p', "pid", &target.pid, "pid",
1174e0547311SJiri Olsa 		   "stat events on existing process id"),
1175e0547311SJiri Olsa 	OPT_STRING('t', "tid", &target.tid, "tid",
1176e0547311SJiri Olsa 		   "stat events on existing thread id"),
1177fa853c4bSSong Liu #ifdef HAVE_BPF_SKEL
1178fa853c4bSSong Liu 	OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
1179fa853c4bSSong Liu 		   "stat events on existing bpf program id"),
11807fac83aaSSong Liu 	OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
11817fac83aaSSong Liu 		    "use bpf program to count events"),
11827fac83aaSSong Liu 	OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
11837fac83aaSSong Liu 		   "path to perf_event_attr map"),
1184fa853c4bSSong Liu #endif
1185e0547311SJiri Olsa 	OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1186e0547311SJiri Olsa 		    "system-wide collection from all CPUs"),
118775998bb2SAndi Kleen 	OPT_BOOLEAN(0, "scale", &stat_config.scale,
118875998bb2SAndi Kleen 		    "Use --no-scale to disable counter scaling for multiplexing"),
1189e0547311SJiri Olsa 	OPT_INCR('v', "verbose", &verbose,
1190e0547311SJiri Olsa 		    "be more verbose (show counter open errors, etc)"),
1191d97ae04bSJiri Olsa 	OPT_INTEGER('r', "repeat", &stat_config.run_count,
1192e0547311SJiri Olsa 		    "repeat command and print average + stddev (max: 100, forever: 0)"),
119354ac0b1bSJiri Olsa 	OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
1194e55c14afSJiri Olsa 		    "display details about each run (only with -r option)"),
1195aea0dca1SJiri Olsa 	OPT_BOOLEAN('n', "null", &stat_config.null_run,
1196e0547311SJiri Olsa 		    "null run - dont start any counters"),
1197e0547311SJiri Olsa 	OPT_INCR('d', "detailed", &detailed_run,
1198e0547311SJiri Olsa 		    "detailed run - start a lot of events"),
1199e0547311SJiri Olsa 	OPT_BOOLEAN('S', "sync", &sync_run,
1200e0547311SJiri Olsa 		    "call sync() before starting a run"),
1201e0547311SJiri Olsa 	OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1202e0547311SJiri Olsa 			   "print large numbers with thousands\' separators",
1203e0547311SJiri Olsa 			   stat__set_big_num),
1204e0547311SJiri Olsa 	OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1205e0547311SJiri Olsa 		    "list of cpus to monitor in system-wide"),
1206e0547311SJiri Olsa 	OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1207e0547311SJiri Olsa 		    "disable CPU count aggregation", AGGR_NONE),
1208fdee335bSJiri Olsa 	OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
12092c8e6451SZhengjun Xing 	OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
12102c8e6451SZhengjun Xing 		    "Merge identical named hybrid events"),
1211fa7070a3SJiri Olsa 	OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
1212e0547311SJiri Olsa 		   "print counts with custom separator"),
1213df936cadSClaire Jensen 	OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
1214df936cadSClaire Jensen 		   "print counts in JSON format"),
1215e0547311SJiri Olsa 	OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1216d1c5a0e8SNamhyung Kim 		     "monitor event in cgroup name only", parse_stat_cgroups),
1217d1c5a0e8SNamhyung Kim 	OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
1218d1c5a0e8SNamhyung Kim 		    "expand events for each cgroup"),
1219e0547311SJiri Olsa 	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1220e0547311SJiri Olsa 	OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1221e0547311SJiri Olsa 	OPT_INTEGER(0, "log-fd", &output_fd,
1222e0547311SJiri Olsa 		    "log output to fd, instead of stderr"),
1223e0547311SJiri Olsa 	OPT_STRING(0, "pre", &pre_cmd, "command",
1224e0547311SJiri Olsa 			"command to run prior to the measured command"),
1225e0547311SJiri Olsa 	OPT_STRING(0, "post", &post_cmd, "command",
1226e0547311SJiri Olsa 			"command to run after to the measured command"),
1227e0547311SJiri Olsa 	OPT_UINTEGER('I', "interval-print", &stat_config.interval,
12289dc9a95fSAlexey Budankov 		    "print counts at regular interval in ms "
12299dc9a95fSAlexey Budankov 		    "(overhead is possible for values <= 100ms)"),
1230db06a269Syuzhoujian 	OPT_INTEGER(0, "interval-count", &stat_config.times,
1231db06a269Syuzhoujian 		    "print counts for fixed number of times"),
1232132c6ba3SJiri Olsa 	OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
12339660e08eSJiri Olsa 		    "clear screen in between new interval"),
1234f1f8ad52Syuzhoujian 	OPT_UINTEGER(0, "timeout", &stat_config.timeout,
1235f1f8ad52Syuzhoujian 		    "stop workload and print counts after a timeout period in ms (>= 10ms)"),
1236e0547311SJiri Olsa 	OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1237e0547311SJiri Olsa 		     "aggregate counts per processor socket", AGGR_SOCKET),
1238db5742b6SKan Liang 	OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
1239db5742b6SKan Liang 		     "aggregate counts per processor die", AGGR_DIE),
1240aab667caSK Prateek Nayak 	OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level,
1241aab667caSK Prateek Nayak 			    "cache level", "aggregate count at this cache level (Default: LLC)",
1242aab667caSK Prateek Nayak 			    parse_cache_level),
1243e0547311SJiri Olsa 	OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1244e0547311SJiri Olsa 		     "aggregate counts per physical processor core", AGGR_CORE),
1245e0547311SJiri Olsa 	OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1246e0547311SJiri Olsa 		     "aggregate counts per thread", AGGR_THREAD),
124786895b48SJiri Olsa 	OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
124886895b48SJiri Olsa 		     "aggregate counts per numa node", AGGR_NODE),
124925f69c69SChangbin Du 	OPT_INTEGER('D', "delay", &target.initial_delay,
12502162b9c6SAlexey Budankov 		    "ms to wait before starting measurement after program start (-1: start with events disabled)"),
12510ce5aa02SJiri Olsa 	OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
125244b1e60aSAndi Kleen 			"Only print computed metrics. No raw values", enable_metric_only),
125305530a79SIan Rogers 	OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
125405530a79SIan Rogers 		       "don't group metric events, impacts multiplexing"),
125505530a79SIan Rogers 	OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
125605530a79SIan Rogers 		       "don't try to share events between metrics in a group"),
12571fd09e29SIan Rogers 	OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
12581fd09e29SIan Rogers 		       "don't try to share events between metrics in a group  "),
125944b1e60aSAndi Kleen 	OPT_BOOLEAN(0, "topdown", &topdown_run,
126063e39aa6SKan Liang 			"measure top-down statistics"),
126163e39aa6SKan Liang 	OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
126263e39aa6SKan Liang 			"Set the metrics level for the top-down statistics (0: max level)"),
1263daefd0bcSKan Liang 	OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1264daefd0bcSKan Liang 			"measure SMI cost"),
1265b18f3e36SAndi Kleen 	OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1266b18f3e36SAndi Kleen 		     "monitor specified metrics or metric groups (separated by ,)",
1267a4b8cfcaSIan Rogers 		     append_metric_groups),
1268dd071024SJin Yao 	OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
1269dd071024SJin Yao 			 "Configure all used events to run in kernel space.",
1270dd071024SJin Yao 			 PARSE_OPT_EXCLUSIVE),
1271dd071024SJin Yao 	OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
1272dd071024SJin Yao 			 "Configure all used events to run in user space.",
1273dd071024SJin Yao 			 PARSE_OPT_EXCLUSIVE),
12741af62ce6SJin Yao 	OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
12751af62ce6SJin Yao 		    "Use with 'percore' event qualifier to show the event "
12761af62ce6SJin Yao 		    "counts of one hardware thread by sum up total hardware "
12771af62ce6SJin Yao 		    "threads of same physical core"),
1278ee6a9614SJin Yao 	OPT_BOOLEAN(0, "summary", &stat_config.summary,
1279ee6a9614SJin Yao 		       "print summary for interval mode"),
12800bdad978SJin Yao 	OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
12810bdad978SJin Yao 		       "don't print 'summary' for CSV summary output"),
1282a527c2c1SJames Clark 	OPT_BOOLEAN(0, "quiet", &quiet,
1283a527c2c1SJames Clark 			"don't print any output, messages or warnings (useful with record)"),
1284e69dc842SJin Yao 	OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
1285e69dc842SJin Yao 		     "Only enable events on applying cpu with this type "
1286e69dc842SJin Yao 		     "for hybrid platform (e.g. core or atom)",
1287003be8c4SIan Rogers 		     parse_cputype),
128870943490SStephane Eranian #ifdef HAVE_LIBPFM
128970943490SStephane Eranian 	OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
129070943490SStephane Eranian 		"libpfm4 event selector. use 'perf list' to list available events",
129170943490SStephane Eranian 		parse_libpfm_events_option),
129270943490SStephane Eranian #endif
1293a8fcbd26SAdrian Hunter 	OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
129427e9769aSAlexey Budankov 		     "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
1295a8fcbd26SAdrian Hunter 		     "\t\t\t  Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
1296a8fcbd26SAdrian Hunter 		     "\t\t\t  Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
129727e9769aSAlexey Budankov 		      parse_control_option),
1298f07952b1SAlexander Antonov 	OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
1299f07952b1SAlexander Antonov 			    "measure I/O performance metrics provided by arch/platform",
1300f07952b1SAlexander Antonov 			    iostat_parse),
1301e0547311SJiri Olsa 	OPT_END()
1302e0547311SJiri Olsa };
1303e0547311SJiri Olsa 
1304995ed074SK Prateek Nayak /**
1305995ed074SK Prateek Nayak  * Calculate the cache instance ID from the map in
1306995ed074SK Prateek Nayak  * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list
1307995ed074SK Prateek Nayak  * Cache instance ID is the first CPU reported in the shared_cpu_list file.
1308995ed074SK Prateek Nayak  */
cpu__get_cache_id_from_map(struct perf_cpu cpu,char * map)1309995ed074SK Prateek Nayak static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
1310995ed074SK Prateek Nayak {
1311995ed074SK Prateek Nayak 	int id;
1312995ed074SK Prateek Nayak 	struct perf_cpu_map *cpu_map = perf_cpu_map__new(map);
1313995ed074SK Prateek Nayak 
1314995ed074SK Prateek Nayak 	/*
1315995ed074SK Prateek Nayak 	 * If the map contains no CPU, consider the current CPU to
1316995ed074SK Prateek Nayak 	 * be the first online CPU in the cache domain else use the
1317995ed074SK Prateek Nayak 	 * first online CPU of the cache domain as the ID.
1318995ed074SK Prateek Nayak 	 */
1319995ed074SK Prateek Nayak 	if (perf_cpu_map__empty(cpu_map))
1320995ed074SK Prateek Nayak 		id = cpu.cpu;
1321995ed074SK Prateek Nayak 	else
1322995ed074SK Prateek Nayak 		id = perf_cpu_map__cpu(cpu_map, 0).cpu;
1323995ed074SK Prateek Nayak 
1324995ed074SK Prateek Nayak 	/* Free the perf_cpu_map used to find the cache ID */
1325995ed074SK Prateek Nayak 	perf_cpu_map__put(cpu_map);
1326995ed074SK Prateek Nayak 
1327995ed074SK Prateek Nayak 	return id;
1328995ed074SK Prateek Nayak }
1329995ed074SK Prateek Nayak 
1330995ed074SK Prateek Nayak /**
1331995ed074SK Prateek Nayak  * cpu__get_cache_id - Returns 0 if successful in populating the
1332995ed074SK Prateek Nayak  * cache level and cache id. Cache level is read from
1333995ed074SK Prateek Nayak  * /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID
1334995ed074SK Prateek Nayak  * is the first CPU reported by
1335995ed074SK Prateek Nayak  * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list
1336995ed074SK Prateek Nayak  */
cpu__get_cache_details(struct perf_cpu cpu,struct perf_cache * cache)1337995ed074SK Prateek Nayak static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache)
1338995ed074SK Prateek Nayak {
1339995ed074SK Prateek Nayak 	int ret = 0;
1340995ed074SK Prateek Nayak 	u32 cache_level = stat_config.aggr_level;
1341995ed074SK Prateek Nayak 	struct cpu_cache_level caches[MAX_CACHE_LVL];
1342995ed074SK Prateek Nayak 	u32 i = 0, caches_cnt = 0;
1343995ed074SK Prateek Nayak 
1344995ed074SK Prateek Nayak 	cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level;
1345995ed074SK Prateek Nayak 	cache->cache = -1;
1346995ed074SK Prateek Nayak 
1347995ed074SK Prateek Nayak 	ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt);
1348995ed074SK Prateek Nayak 	if (ret) {
1349995ed074SK Prateek Nayak 		/*
1350995ed074SK Prateek Nayak 		 * If caches_cnt is not 0, cpu_cache_level data
1351995ed074SK Prateek Nayak 		 * was allocated when building the topology.
1352995ed074SK Prateek Nayak 		 * Free the allocated data before returning.
1353995ed074SK Prateek Nayak 		 */
1354995ed074SK Prateek Nayak 		if (caches_cnt)
1355995ed074SK Prateek Nayak 			goto free_caches;
1356995ed074SK Prateek Nayak 
1357995ed074SK Prateek Nayak 		return ret;
1358995ed074SK Prateek Nayak 	}
1359995ed074SK Prateek Nayak 
1360995ed074SK Prateek Nayak 	if (!caches_cnt)
1361995ed074SK Prateek Nayak 		return -1;
1362995ed074SK Prateek Nayak 
1363995ed074SK Prateek Nayak 	/*
1364995ed074SK Prateek Nayak 	 * Save the data for the highest level if no
1365995ed074SK Prateek Nayak 	 * level was specified by the user.
1366995ed074SK Prateek Nayak 	 */
1367995ed074SK Prateek Nayak 	if (cache_level > MAX_CACHE_LVL) {
1368995ed074SK Prateek Nayak 		int max_level_index = 0;
1369995ed074SK Prateek Nayak 
1370995ed074SK Prateek Nayak 		for (i = 1; i < caches_cnt; ++i) {
1371995ed074SK Prateek Nayak 			if (caches[i].level > caches[max_level_index].level)
1372995ed074SK Prateek Nayak 				max_level_index = i;
1373995ed074SK Prateek Nayak 		}
1374995ed074SK Prateek Nayak 
1375995ed074SK Prateek Nayak 		cache->cache_lvl = caches[max_level_index].level;
1376995ed074SK Prateek Nayak 		cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map);
1377995ed074SK Prateek Nayak 
1378995ed074SK Prateek Nayak 		/* Reset i to 0 to free entire caches[] */
1379995ed074SK Prateek Nayak 		i = 0;
1380995ed074SK Prateek Nayak 		goto free_caches;
1381995ed074SK Prateek Nayak 	}
1382995ed074SK Prateek Nayak 
1383995ed074SK Prateek Nayak 	for (i = 0; i < caches_cnt; ++i) {
1384995ed074SK Prateek Nayak 		if (caches[i].level == cache_level) {
1385995ed074SK Prateek Nayak 			cache->cache_lvl = cache_level;
1386995ed074SK Prateek Nayak 			cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
1387995ed074SK Prateek Nayak 		}
1388995ed074SK Prateek Nayak 
1389995ed074SK Prateek Nayak 		cpu_cache_level__free(&caches[i]);
1390995ed074SK Prateek Nayak 	}
1391995ed074SK Prateek Nayak 
1392995ed074SK Prateek Nayak free_caches:
1393995ed074SK Prateek Nayak 	/*
1394995ed074SK Prateek Nayak 	 * Free all the allocated cpu_cache_level data.
1395995ed074SK Prateek Nayak 	 */
1396995ed074SK Prateek Nayak 	while (i < caches_cnt)
1397995ed074SK Prateek Nayak 		cpu_cache_level__free(&caches[i++]);
1398995ed074SK Prateek Nayak 
1399995ed074SK Prateek Nayak 	return ret;
1400995ed074SK Prateek Nayak }
1401995ed074SK Prateek Nayak 
1402995ed074SK Prateek Nayak /**
1403995ed074SK Prateek Nayak  * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache
1404995ed074SK Prateek Nayak  * level, die and socket populated with the cache instache ID, cache level,
1405995ed074SK Prateek Nayak  * die and socket for cpu. The function signature is compatible with
1406995ed074SK Prateek Nayak  * aggr_cpu_id_get_t.
1407995ed074SK Prateek Nayak  */
aggr_cpu_id__cache(struct perf_cpu cpu,void * data)1408995ed074SK Prateek Nayak static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data)
1409995ed074SK Prateek Nayak {
1410995ed074SK Prateek Nayak 	int ret;
1411995ed074SK Prateek Nayak 	struct aggr_cpu_id id;
1412995ed074SK Prateek Nayak 	struct perf_cache cache;
1413995ed074SK Prateek Nayak 
1414995ed074SK Prateek Nayak 	id = aggr_cpu_id__die(cpu, data);
1415995ed074SK Prateek Nayak 	if (aggr_cpu_id__is_empty(&id))
1416995ed074SK Prateek Nayak 		return id;
1417995ed074SK Prateek Nayak 
1418995ed074SK Prateek Nayak 	ret = cpu__get_cache_details(cpu, &cache);
1419995ed074SK Prateek Nayak 	if (ret)
1420995ed074SK Prateek Nayak 		return id;
1421995ed074SK Prateek Nayak 
1422995ed074SK Prateek Nayak 	id.cache_lvl = cache.cache_lvl;
1423995ed074SK Prateek Nayak 	id.cache = cache.cache;
1424995ed074SK Prateek Nayak 	return id;
1425995ed074SK Prateek Nayak }
1426995ed074SK Prateek Nayak 
14275f50e15cSIan Rogers static const char *const aggr_mode__string[] = {
14285f50e15cSIan Rogers 	[AGGR_CORE] = "core",
1429995ed074SK Prateek Nayak 	[AGGR_CACHE] = "cache",
14305f50e15cSIan Rogers 	[AGGR_DIE] = "die",
14315f50e15cSIan Rogers 	[AGGR_GLOBAL] = "global",
14325f50e15cSIan Rogers 	[AGGR_NODE] = "node",
14335f50e15cSIan Rogers 	[AGGR_NONE] = "none",
14345f50e15cSIan Rogers 	[AGGR_SOCKET] = "socket",
14355f50e15cSIan Rogers 	[AGGR_THREAD] = "thread",
14365f50e15cSIan Rogers 	[AGGR_UNSET] = "unset",
14375f50e15cSIan Rogers };
14385f50e15cSIan Rogers 
perf_stat__get_socket(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14392760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
14406d18804bSIan Rogers 						struct perf_cpu cpu)
14411fe7a300SJiri Olsa {
1442973aeb3cSIan Rogers 	return aggr_cpu_id__socket(cpu, /*data=*/NULL);
14431fe7a300SJiri Olsa }
14441fe7a300SJiri Olsa 
perf_stat__get_die(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14452760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
14466d18804bSIan Rogers 					     struct perf_cpu cpu)
1447db5742b6SKan Liang {
1448973aeb3cSIan Rogers 	return aggr_cpu_id__die(cpu, /*data=*/NULL);
1449db5742b6SKan Liang }
1450db5742b6SKan Liang 
perf_stat__get_cache_id(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1451995ed074SK Prateek Nayak static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused,
1452995ed074SK Prateek Nayak 						  struct perf_cpu cpu)
1453995ed074SK Prateek Nayak {
1454995ed074SK Prateek Nayak 	return aggr_cpu_id__cache(cpu, /*data=*/NULL);
1455995ed074SK Prateek Nayak }
1456995ed074SK Prateek Nayak 
perf_stat__get_core(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14572760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
14586d18804bSIan Rogers 					      struct perf_cpu cpu)
14591fe7a300SJiri Olsa {
1460973aeb3cSIan Rogers 	return aggr_cpu_id__core(cpu, /*data=*/NULL);
14611fe7a300SJiri Olsa }
14621fe7a300SJiri Olsa 
perf_stat__get_node(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14632760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
14646d18804bSIan Rogers 					      struct perf_cpu cpu)
146586895b48SJiri Olsa {
1466973aeb3cSIan Rogers 	return aggr_cpu_id__node(cpu, /*data=*/NULL);
146786895b48SJiri Olsa }
146886895b48SJiri Olsa 
perf_stat__get_global(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1469375369abSNamhyung Kim static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused,
1470375369abSNamhyung Kim 						struct perf_cpu cpu)
1471375369abSNamhyung Kim {
1472375369abSNamhyung Kim 	return aggr_cpu_id__global(cpu, /*data=*/NULL);
1473375369abSNamhyung Kim }
1474375369abSNamhyung Kim 
perf_stat__get_cpu(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14758938cfa7SNamhyung Kim static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused,
14768938cfa7SNamhyung Kim 					     struct perf_cpu cpu)
14778938cfa7SNamhyung Kim {
14788938cfa7SNamhyung Kim 	return aggr_cpu_id__cpu(cpu, /*data=*/NULL);
14798938cfa7SNamhyung Kim }
14808938cfa7SNamhyung Kim 
perf_stat__get_aggr(struct perf_stat_config * config,aggr_get_id_t get_id,struct perf_cpu cpu)14812760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config,
14826d18804bSIan Rogers 					      aggr_get_id_t get_id, struct perf_cpu cpu)
14831e5a2931SJiri Olsa {
1484ae7e6492SNamhyung Kim 	struct aggr_cpu_id id;
1485ae7e6492SNamhyung Kim 
1486ae7e6492SNamhyung Kim 	/* per-process mode - should use global aggr mode */
1487ae7e6492SNamhyung Kim 	if (cpu.cpu == -1)
1488ae7e6492SNamhyung Kim 		return get_id(config, cpu);
14891e5a2931SJiri Olsa 
14906d18804bSIan Rogers 	if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
14916d18804bSIan Rogers 		config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
14921e5a2931SJiri Olsa 
14936d18804bSIan Rogers 	id = config->cpus_aggr_map->map[cpu.cpu];
14942760f5a1SJames Clark 	return id;
14951e5a2931SJiri Olsa }
14961e5a2931SJiri Olsa 
perf_stat__get_socket_cached(struct perf_stat_config * config,struct perf_cpu cpu)14972760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config,
14986d18804bSIan Rogers 						       struct perf_cpu cpu)
14991e5a2931SJiri Olsa {
150088031a0dSIan Rogers 	return perf_stat__get_aggr(config, perf_stat__get_socket, cpu);
15011e5a2931SJiri Olsa }
15021e5a2931SJiri Olsa 
perf_stat__get_die_cached(struct perf_stat_config * config,struct perf_cpu cpu)15032760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config,
15046d18804bSIan Rogers 						    struct perf_cpu cpu)
1505db5742b6SKan Liang {
150688031a0dSIan Rogers 	return perf_stat__get_aggr(config, perf_stat__get_die, cpu);
1507db5742b6SKan Liang }
1508db5742b6SKan Liang 
perf_stat__get_cache_id_cached(struct perf_stat_config * config,struct perf_cpu cpu)1509995ed074SK Prateek Nayak static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config,
1510995ed074SK Prateek Nayak 							 struct perf_cpu cpu)
1511995ed074SK Prateek Nayak {
1512995ed074SK Prateek Nayak 	return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu);
1513995ed074SK Prateek Nayak }
1514995ed074SK Prateek Nayak 
perf_stat__get_core_cached(struct perf_stat_config * config,struct perf_cpu cpu)15152760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config,
15166d18804bSIan Rogers 						     struct perf_cpu cpu)
15171e5a2931SJiri Olsa {
151888031a0dSIan Rogers 	return perf_stat__get_aggr(config, perf_stat__get_core, cpu);
15191e5a2931SJiri Olsa }
15201e5a2931SJiri Olsa 
perf_stat__get_node_cached(struct perf_stat_config * config,struct perf_cpu cpu)15212760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config,
15226d18804bSIan Rogers 						     struct perf_cpu cpu)
152386895b48SJiri Olsa {
152488031a0dSIan Rogers 	return perf_stat__get_aggr(config, perf_stat__get_node, cpu);
152586895b48SJiri Olsa }
152686895b48SJiri Olsa 
perf_stat__get_global_cached(struct perf_stat_config * config,struct perf_cpu cpu)1527375369abSNamhyung Kim static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config,
1528375369abSNamhyung Kim 						       struct perf_cpu cpu)
1529375369abSNamhyung Kim {
1530375369abSNamhyung Kim 	return perf_stat__get_aggr(config, perf_stat__get_global, cpu);
1531375369abSNamhyung Kim }
1532375369abSNamhyung Kim 
perf_stat__get_cpu_cached(struct perf_stat_config * config,struct perf_cpu cpu)15338938cfa7SNamhyung Kim static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config,
15348938cfa7SNamhyung Kim 						    struct perf_cpu cpu)
15358938cfa7SNamhyung Kim {
15368938cfa7SNamhyung Kim 	return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu);
15378938cfa7SNamhyung Kim }
15388938cfa7SNamhyung Kim 
aggr_mode__get_aggr(enum aggr_mode aggr_mode)15395f50e15cSIan Rogers static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode)
154086ee6e18SStephane Eranian {
15415f50e15cSIan Rogers 	switch (aggr_mode) {
154286ee6e18SStephane Eranian 	case AGGR_SOCKET:
1543973aeb3cSIan Rogers 		return aggr_cpu_id__socket;
1544db5742b6SKan Liang 	case AGGR_DIE:
1545973aeb3cSIan Rogers 		return aggr_cpu_id__die;
1546995ed074SK Prateek Nayak 	case AGGR_CACHE:
1547995ed074SK Prateek Nayak 		return aggr_cpu_id__cache;
154812c08a9fSStephane Eranian 	case AGGR_CORE:
1549973aeb3cSIan Rogers 		return aggr_cpu_id__core;
155086895b48SJiri Olsa 	case AGGR_NODE:
1551973aeb3cSIan Rogers 		return aggr_cpu_id__node;
155286ee6e18SStephane Eranian 	case AGGR_NONE:
15538938cfa7SNamhyung Kim 		return aggr_cpu_id__cpu;
155486ee6e18SStephane Eranian 	case AGGR_GLOBAL:
1555375369abSNamhyung Kim 		return aggr_cpu_id__global;
155632b8af82SJiri Olsa 	case AGGR_THREAD:
1557208df99eSJiri Olsa 	case AGGR_UNSET:
1558df936cadSClaire Jensen 	case AGGR_MAX:
155986ee6e18SStephane Eranian 	default:
15605f50e15cSIan Rogers 		return NULL;
15615f50e15cSIan Rogers 	}
15625f50e15cSIan Rogers }
15635f50e15cSIan Rogers 
aggr_mode__get_id(enum aggr_mode aggr_mode)15645f50e15cSIan Rogers static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode)
15655f50e15cSIan Rogers {
15665f50e15cSIan Rogers 	switch (aggr_mode) {
15675f50e15cSIan Rogers 	case AGGR_SOCKET:
15685f50e15cSIan Rogers 		return perf_stat__get_socket_cached;
15695f50e15cSIan Rogers 	case AGGR_DIE:
15705f50e15cSIan Rogers 		return perf_stat__get_die_cached;
1571995ed074SK Prateek Nayak 	case AGGR_CACHE:
1572995ed074SK Prateek Nayak 		return perf_stat__get_cache_id_cached;
15735f50e15cSIan Rogers 	case AGGR_CORE:
15745f50e15cSIan Rogers 		return perf_stat__get_core_cached;
15755f50e15cSIan Rogers 	case AGGR_NODE:
15765f50e15cSIan Rogers 		return perf_stat__get_node_cached;
15775f50e15cSIan Rogers 	case AGGR_NONE:
15788938cfa7SNamhyung Kim 		return perf_stat__get_cpu_cached;
15795f50e15cSIan Rogers 	case AGGR_GLOBAL:
1580375369abSNamhyung Kim 		return perf_stat__get_global_cached;
15815f50e15cSIan Rogers 	case AGGR_THREAD:
15825f50e15cSIan Rogers 	case AGGR_UNSET:
1583df936cadSClaire Jensen 	case AGGR_MAX:
15845f50e15cSIan Rogers 	default:
15855f50e15cSIan Rogers 		return NULL;
15865f50e15cSIan Rogers 	}
15875f50e15cSIan Rogers }
15885f50e15cSIan Rogers 
perf_stat_init_aggr_mode(void)15895f50e15cSIan Rogers static int perf_stat_init_aggr_mode(void)
15905f50e15cSIan Rogers {
15915f50e15cSIan Rogers 	int nr;
15925f50e15cSIan Rogers 	aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
15935f50e15cSIan Rogers 
15945f50e15cSIan Rogers 	if (get_id) {
1595505ac48bSNamhyung Kim 		bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
15960df6ade7SIan Rogers 		stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1597505ac48bSNamhyung Kim 							 get_id, /*data=*/NULL, needs_sort);
15985f50e15cSIan Rogers 		if (!stat_config.aggr_map) {
1599db1f5f10SYang Jihong 			pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
16005f50e15cSIan Rogers 			return -1;
16015f50e15cSIan Rogers 		}
16025f50e15cSIan Rogers 		stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode);
160386ee6e18SStephane Eranian 	}
16041e5a2931SJiri Olsa 
1605050059e1SNamhyung Kim 	if (stat_config.aggr_mode == AGGR_THREAD) {
1606050059e1SNamhyung Kim 		nr = perf_thread_map__nr(evsel_list->core.threads);
1607050059e1SNamhyung Kim 		stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
1608050059e1SNamhyung Kim 		if (stat_config.aggr_map == NULL)
1609050059e1SNamhyung Kim 			return -ENOMEM;
1610050059e1SNamhyung Kim 
1611050059e1SNamhyung Kim 		for (int s = 0; s < nr; s++) {
1612050059e1SNamhyung Kim 			struct aggr_cpu_id id = aggr_cpu_id__empty();
1613050059e1SNamhyung Kim 
1614050059e1SNamhyung Kim 			id.thread_idx = s;
1615050059e1SNamhyung Kim 			stat_config.aggr_map->map[s] = id;
1616050059e1SNamhyung Kim 		}
1617050059e1SNamhyung Kim 		return 0;
1618050059e1SNamhyung Kim 	}
1619050059e1SNamhyung Kim 
16201e5a2931SJiri Olsa 	/*
16211e5a2931SJiri Olsa 	 * The evsel_list->cpus is the base we operate on,
16221e5a2931SJiri Olsa 	 * taking the highest cpu number to be the size of
16231e5a2931SJiri Olsa 	 * the aggregation translate cpumap.
16241e5a2931SJiri Olsa 	 */
16254b9d563dSIan Rogers 	if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
16260df6ade7SIan Rogers 		nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
16278a96f454SIan Rogers 	else
16288a96f454SIan Rogers 		nr = 0;
1629d526e1a0SJames Clark 	stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
16306f6b6594SJiri Olsa 	return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
163186ee6e18SStephane Eranian }
163286ee6e18SStephane Eranian 
cpu_aggr_map__delete(struct cpu_aggr_map * map)1633d526e1a0SJames Clark static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
1634d526e1a0SJames Clark {
1635d526e1a0SJames Clark 	if (map) {
1636d526e1a0SJames Clark 		WARN_ONCE(refcount_read(&map->refcnt) != 0,
1637d526e1a0SJames Clark 			  "cpu_aggr_map refcnt unbalanced\n");
1638d526e1a0SJames Clark 		free(map);
1639d526e1a0SJames Clark 	}
1640d526e1a0SJames Clark }
1641d526e1a0SJames Clark 
cpu_aggr_map__put(struct cpu_aggr_map * map)1642d526e1a0SJames Clark static void cpu_aggr_map__put(struct cpu_aggr_map *map)
1643d526e1a0SJames Clark {
1644d526e1a0SJames Clark 	if (map && refcount_dec_and_test(&map->refcnt))
1645d526e1a0SJames Clark 		cpu_aggr_map__delete(map);
1646d526e1a0SJames Clark }
1647d526e1a0SJames Clark 
perf_stat__exit_aggr_mode(void)1648544c2ae7SMasami Hiramatsu static void perf_stat__exit_aggr_mode(void)
1649544c2ae7SMasami Hiramatsu {
1650d526e1a0SJames Clark 	cpu_aggr_map__put(stat_config.aggr_map);
1651d526e1a0SJames Clark 	cpu_aggr_map__put(stat_config.cpus_aggr_map);
16526f6b6594SJiri Olsa 	stat_config.aggr_map = NULL;
16536f6b6594SJiri Olsa 	stat_config.cpus_aggr_map = NULL;
1654544c2ae7SMasami Hiramatsu }
1655544c2ae7SMasami Hiramatsu 
perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu,void * data)16566d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data)
165768d702f7SJiri Olsa {
165868d702f7SJiri Olsa 	struct perf_env *env = data;
165951b826faSIan Rogers 	struct aggr_cpu_id id = aggr_cpu_id__empty();
166068d702f7SJiri Olsa 
16616d18804bSIan Rogers 	if (cpu.cpu != -1)
16626d18804bSIan Rogers 		id.socket = env->cpu[cpu.cpu].socket_id;
16632760f5a1SJames Clark 
16642760f5a1SJames Clark 	return id;
166568d702f7SJiri Olsa }
166668d702f7SJiri Olsa 
perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu,void * data)16676d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data)
1668db5742b6SKan Liang {
1669db5742b6SKan Liang 	struct perf_env *env = data;
167051b826faSIan Rogers 	struct aggr_cpu_id id = aggr_cpu_id__empty();
1671db5742b6SKan Liang 
16726d18804bSIan Rogers 	if (cpu.cpu != -1) {
1673db5742b6SKan Liang 		/*
16741a270cb6SJames Clark 		 * die_id is relative to socket, so start
16751a270cb6SJames Clark 		 * with the socket ID and then add die to
16761a270cb6SJames Clark 		 * make a unique ID.
1677db5742b6SKan Liang 		 */
16786d18804bSIan Rogers 		id.socket = env->cpu[cpu.cpu].socket_id;
16796d18804bSIan Rogers 		id.die = env->cpu[cpu.cpu].die_id;
1680db5742b6SKan Liang 	}
1681db5742b6SKan Liang 
16822760f5a1SJames Clark 	return id;
1683db5742b6SKan Liang }
1684db5742b6SKan Liang 
perf_env__get_cache_id_for_cpu(struct perf_cpu cpu,struct perf_env * env,u32 cache_level,struct aggr_cpu_id * id)1685995ed074SK Prateek Nayak static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env,
1686995ed074SK Prateek Nayak 					   u32 cache_level, struct aggr_cpu_id *id)
1687995ed074SK Prateek Nayak {
1688995ed074SK Prateek Nayak 	int i;
1689995ed074SK Prateek Nayak 	int caches_cnt = env->caches_cnt;
1690995ed074SK Prateek Nayak 	struct cpu_cache_level *caches = env->caches;
1691995ed074SK Prateek Nayak 
1692995ed074SK Prateek Nayak 	id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level;
1693995ed074SK Prateek Nayak 	id->cache = -1;
1694995ed074SK Prateek Nayak 
1695995ed074SK Prateek Nayak 	if (!caches_cnt)
1696995ed074SK Prateek Nayak 		return;
1697995ed074SK Prateek Nayak 
1698995ed074SK Prateek Nayak 	for (i = caches_cnt - 1; i > -1; --i) {
1699995ed074SK Prateek Nayak 		struct perf_cpu_map *cpu_map;
1700995ed074SK Prateek Nayak 		int map_contains_cpu;
1701995ed074SK Prateek Nayak 
1702995ed074SK Prateek Nayak 		/*
1703995ed074SK Prateek Nayak 		 * If user has not specified a level, find the fist level with
1704995ed074SK Prateek Nayak 		 * the cpu in the map. Since building the map is expensive, do
1705995ed074SK Prateek Nayak 		 * this only if levels match.
1706995ed074SK Prateek Nayak 		 */
1707995ed074SK Prateek Nayak 		if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level)
1708995ed074SK Prateek Nayak 			continue;
1709995ed074SK Prateek Nayak 
1710995ed074SK Prateek Nayak 		cpu_map = perf_cpu_map__new(caches[i].map);
1711995ed074SK Prateek Nayak 		map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu);
1712995ed074SK Prateek Nayak 		perf_cpu_map__put(cpu_map);
1713995ed074SK Prateek Nayak 
1714995ed074SK Prateek Nayak 		if (map_contains_cpu != -1) {
1715995ed074SK Prateek Nayak 			id->cache_lvl = caches[i].level;
1716995ed074SK Prateek Nayak 			id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
1717995ed074SK Prateek Nayak 			return;
1718995ed074SK Prateek Nayak 		}
1719995ed074SK Prateek Nayak 	}
1720995ed074SK Prateek Nayak }
1721995ed074SK Prateek Nayak 
perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu,void * data)1722995ed074SK Prateek Nayak static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu,
1723995ed074SK Prateek Nayak 							  void *data)
1724995ed074SK Prateek Nayak {
1725995ed074SK Prateek Nayak 	struct perf_env *env = data;
1726995ed074SK Prateek Nayak 	struct aggr_cpu_id id = aggr_cpu_id__empty();
1727995ed074SK Prateek Nayak 
1728995ed074SK Prateek Nayak 	if (cpu.cpu != -1) {
1729995ed074SK Prateek Nayak 		u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level;
1730995ed074SK Prateek Nayak 
1731995ed074SK Prateek Nayak 		id.socket = env->cpu[cpu.cpu].socket_id;
1732995ed074SK Prateek Nayak 		id.die = env->cpu[cpu.cpu].die_id;
1733995ed074SK Prateek Nayak 		perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id);
1734995ed074SK Prateek Nayak 	}
1735995ed074SK Prateek Nayak 
1736995ed074SK Prateek Nayak 	return id;
1737995ed074SK Prateek Nayak }
1738995ed074SK Prateek Nayak 
perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu,void * data)17396d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data)
174068d702f7SJiri Olsa {
174168d702f7SJiri Olsa 	struct perf_env *env = data;
174251b826faSIan Rogers 	struct aggr_cpu_id id = aggr_cpu_id__empty();
174368d702f7SJiri Olsa 
17446d18804bSIan Rogers 	if (cpu.cpu != -1) {
174568d702f7SJiri Olsa 		/*
1746db5742b6SKan Liang 		 * core_id is relative to socket and die,
1747b9933817SJames Clark 		 * we need a global id. So we set
1748b9933817SJames Clark 		 * socket, die id and core id
174968d702f7SJiri Olsa 		 */
17506d18804bSIan Rogers 		id.socket = env->cpu[cpu.cpu].socket_id;
17516d18804bSIan Rogers 		id.die = env->cpu[cpu.cpu].die_id;
17526d18804bSIan Rogers 		id.core = env->cpu[cpu.cpu].core_id;
175368d702f7SJiri Olsa 	}
175468d702f7SJiri Olsa 
17552760f5a1SJames Clark 	return id;
175668d702f7SJiri Olsa }
175768d702f7SJiri Olsa 
perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu,void * data)17588938cfa7SNamhyung Kim static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data)
17598938cfa7SNamhyung Kim {
17608938cfa7SNamhyung Kim 	struct perf_env *env = data;
17618938cfa7SNamhyung Kim 	struct aggr_cpu_id id = aggr_cpu_id__empty();
17628938cfa7SNamhyung Kim 
17638938cfa7SNamhyung Kim 	if (cpu.cpu != -1) {
17648938cfa7SNamhyung Kim 		/*
17658938cfa7SNamhyung Kim 		 * core_id is relative to socket and die,
17668938cfa7SNamhyung Kim 		 * we need a global id. So we set
17678938cfa7SNamhyung Kim 		 * socket, die id and core id
17688938cfa7SNamhyung Kim 		 */
17698938cfa7SNamhyung Kim 		id.socket = env->cpu[cpu.cpu].socket_id;
17708938cfa7SNamhyung Kim 		id.die = env->cpu[cpu.cpu].die_id;
17718938cfa7SNamhyung Kim 		id.core = env->cpu[cpu.cpu].core_id;
17728938cfa7SNamhyung Kim 		id.cpu = cpu;
17738938cfa7SNamhyung Kim 	}
17748938cfa7SNamhyung Kim 
17758938cfa7SNamhyung Kim 	return id;
17768938cfa7SNamhyung Kim }
17778938cfa7SNamhyung Kim 
perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu,void * data)17786d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data)
177988031a0dSIan Rogers {
178051b826faSIan Rogers 	struct aggr_cpu_id id = aggr_cpu_id__empty();
178186895b48SJiri Olsa 
1782fcd83a35SJames Clark 	id.node = perf_env__numa_node(data, cpu);
17832760f5a1SJames Clark 	return id;
178486895b48SJiri Olsa }
178586895b48SJiri Olsa 
perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,void * data __maybe_unused)1786375369abSNamhyung Kim static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,
1787375369abSNamhyung Kim 							   void *data __maybe_unused)
1788375369abSNamhyung Kim {
1789375369abSNamhyung Kim 	struct aggr_cpu_id id = aggr_cpu_id__empty();
1790375369abSNamhyung Kim 
1791375369abSNamhyung Kim 	/* it always aggregates to the cpu 0 */
1792375369abSNamhyung Kim 	id.cpu = (struct perf_cpu){ .cpu = 0 };
1793375369abSNamhyung Kim 	return id;
1794375369abSNamhyung Kim }
1795375369abSNamhyung Kim 
perf_stat__get_socket_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)17962760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
17976d18804bSIan Rogers 						     struct perf_cpu cpu)
179868d702f7SJiri Olsa {
179988031a0dSIan Rogers 	return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env);
180068d702f7SJiri Olsa }
perf_stat__get_die_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18012760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
18026d18804bSIan Rogers 						  struct perf_cpu cpu)
1803db5742b6SKan Liang {
180488031a0dSIan Rogers 	return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1805db5742b6SKan Liang }
180668d702f7SJiri Olsa 
perf_stat__get_cache_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1807995ed074SK Prateek Nayak static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused,
1808995ed074SK Prateek Nayak 						    struct perf_cpu cpu)
1809995ed074SK Prateek Nayak {
1810995ed074SK Prateek Nayak 	return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1811995ed074SK Prateek Nayak }
1812995ed074SK Prateek Nayak 
perf_stat__get_core_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18132760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
18146d18804bSIan Rogers 						   struct perf_cpu cpu)
181568d702f7SJiri Olsa {
181688031a0dSIan Rogers 	return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env);
181768d702f7SJiri Olsa }
181868d702f7SJiri Olsa 
perf_stat__get_cpu_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18198938cfa7SNamhyung Kim static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused,
18208938cfa7SNamhyung Kim 						  struct perf_cpu cpu)
18218938cfa7SNamhyung Kim {
18228938cfa7SNamhyung Kim 	return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env);
18238938cfa7SNamhyung Kim }
18248938cfa7SNamhyung Kim 
perf_stat__get_node_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18252760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
18266d18804bSIan Rogers 						   struct perf_cpu cpu)
182786895b48SJiri Olsa {
182888031a0dSIan Rogers 	return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env);
182986895b48SJiri Olsa }
183086895b48SJiri Olsa 
perf_stat__get_global_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1831375369abSNamhyung Kim static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused,
1832375369abSNamhyung Kim 						     struct perf_cpu cpu)
1833375369abSNamhyung Kim {
1834375369abSNamhyung Kim 	return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1835375369abSNamhyung Kim }
1836375369abSNamhyung Kim 
aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)18375f50e15cSIan Rogers static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)
183868d702f7SJiri Olsa {
18395f50e15cSIan Rogers 	switch (aggr_mode) {
184068d702f7SJiri Olsa 	case AGGR_SOCKET:
18415f50e15cSIan Rogers 		return perf_env__get_socket_aggr_by_cpu;
1842db5742b6SKan Liang 	case AGGR_DIE:
18435f50e15cSIan Rogers 		return perf_env__get_die_aggr_by_cpu;
1844995ed074SK Prateek Nayak 	case AGGR_CACHE:
1845995ed074SK Prateek Nayak 		return perf_env__get_cache_aggr_by_cpu;
184668d702f7SJiri Olsa 	case AGGR_CORE:
18475f50e15cSIan Rogers 		return perf_env__get_core_aggr_by_cpu;
184886895b48SJiri Olsa 	case AGGR_NODE:
18495f50e15cSIan Rogers 		return perf_env__get_node_aggr_by_cpu;
185068d702f7SJiri Olsa 	case AGGR_GLOBAL:
1851375369abSNamhyung Kim 		return perf_env__get_global_aggr_by_cpu;
1852375369abSNamhyung Kim 	case AGGR_NONE:
18538938cfa7SNamhyung Kim 		return perf_env__get_cpu_aggr_by_cpu;
185468d702f7SJiri Olsa 	case AGGR_THREAD:
185568d702f7SJiri Olsa 	case AGGR_UNSET:
1856df936cadSClaire Jensen 	case AGGR_MAX:
185768d702f7SJiri Olsa 	default:
18585f50e15cSIan Rogers 		return NULL;
18595f50e15cSIan Rogers 	}
186068d702f7SJiri Olsa }
186168d702f7SJiri Olsa 
aggr_mode__get_id_file(enum aggr_mode aggr_mode)18625f50e15cSIan Rogers static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode)
18635f50e15cSIan Rogers {
18645f50e15cSIan Rogers 	switch (aggr_mode) {
18655f50e15cSIan Rogers 	case AGGR_SOCKET:
18665f50e15cSIan Rogers 		return perf_stat__get_socket_file;
18675f50e15cSIan Rogers 	case AGGR_DIE:
18685f50e15cSIan Rogers 		return perf_stat__get_die_file;
1869995ed074SK Prateek Nayak 	case AGGR_CACHE:
1870995ed074SK Prateek Nayak 		return perf_stat__get_cache_file;
18715f50e15cSIan Rogers 	case AGGR_CORE:
18725f50e15cSIan Rogers 		return perf_stat__get_core_file;
18735f50e15cSIan Rogers 	case AGGR_NODE:
18745f50e15cSIan Rogers 		return perf_stat__get_node_file;
18755f50e15cSIan Rogers 	case AGGR_GLOBAL:
1876375369abSNamhyung Kim 		return perf_stat__get_global_file;
1877375369abSNamhyung Kim 	case AGGR_NONE:
18788938cfa7SNamhyung Kim 		return perf_stat__get_cpu_file;
18795f50e15cSIan Rogers 	case AGGR_THREAD:
18805f50e15cSIan Rogers 	case AGGR_UNSET:
1881df936cadSClaire Jensen 	case AGGR_MAX:
18825f50e15cSIan Rogers 	default:
18835f50e15cSIan Rogers 		return NULL;
18845f50e15cSIan Rogers 	}
18855f50e15cSIan Rogers }
18865f50e15cSIan Rogers 
perf_stat_init_aggr_mode_file(struct perf_stat * st)18875f50e15cSIan Rogers static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
18885f50e15cSIan Rogers {
18895f50e15cSIan Rogers 	struct perf_env *env = &st->session->header.env;
18905f50e15cSIan Rogers 	aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode);
1891505ac48bSNamhyung Kim 	bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
18925f50e15cSIan Rogers 
1893050059e1SNamhyung Kim 	if (stat_config.aggr_mode == AGGR_THREAD) {
1894050059e1SNamhyung Kim 		int nr = perf_thread_map__nr(evsel_list->core.threads);
1895050059e1SNamhyung Kim 
1896050059e1SNamhyung Kim 		stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
1897050059e1SNamhyung Kim 		if (stat_config.aggr_map == NULL)
1898050059e1SNamhyung Kim 			return -ENOMEM;
1899050059e1SNamhyung Kim 
1900050059e1SNamhyung Kim 		for (int s = 0; s < nr; s++) {
1901050059e1SNamhyung Kim 			struct aggr_cpu_id id = aggr_cpu_id__empty();
1902050059e1SNamhyung Kim 
1903050059e1SNamhyung Kim 			id.thread_idx = s;
1904050059e1SNamhyung Kim 			stat_config.aggr_map->map[s] = id;
1905050059e1SNamhyung Kim 		}
1906050059e1SNamhyung Kim 		return 0;
1907050059e1SNamhyung Kim 	}
1908050059e1SNamhyung Kim 
19095f50e15cSIan Rogers 	if (!get_id)
19105f50e15cSIan Rogers 		return 0;
19115f50e15cSIan Rogers 
1912505ac48bSNamhyung Kim 	stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1913505ac48bSNamhyung Kim 						 get_id, env, needs_sort);
19145f50e15cSIan Rogers 	if (!stat_config.aggr_map) {
1915db1f5f10SYang Jihong 		pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
19165f50e15cSIan Rogers 		return -1;
19175f50e15cSIan Rogers 	}
19185f50e15cSIan Rogers 	stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode);
191968d702f7SJiri Olsa 	return 0;
192068d702f7SJiri Olsa }
192168d702f7SJiri Olsa 
19222cba3ffbSIngo Molnar /*
19232cba3ffbSIngo Molnar  * Add default attributes, if there were no attributes specified or
19242cba3ffbSIngo Molnar  * if -d/--detailed, -d -d or -d -d -d is used:
19252cba3ffbSIngo Molnar  */
add_default_attributes(void)19262cba3ffbSIngo Molnar static int add_default_attributes(void)
19272cba3ffbSIngo Molnar {
19289dec4473SAndi Kleen 	struct perf_event_attr default_attrs0[] = {
1929b070a547SArnaldo Carvalho de Melo 
1930b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK		},
1931b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES	},
1932b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS		},
1933b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS		},
1934b070a547SArnaldo Carvalho de Melo 
1935b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES		},
19369dec4473SAndi Kleen };
19379dec4473SAndi Kleen 	struct perf_event_attr frontend_attrs[] = {
1938b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	},
19399dec4473SAndi Kleen };
19409dec4473SAndi Kleen 	struct perf_event_attr backend_attrs[] = {
1941b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND	},
19429dec4473SAndi Kleen };
19439dec4473SAndi Kleen 	struct perf_event_attr default_attrs1[] = {
1944b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS		},
1945b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS	},
1946b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES		},
1947b070a547SArnaldo Carvalho de Melo 
1948b070a547SArnaldo Carvalho de Melo };
1949b070a547SArnaldo Carvalho de Melo 
1950b070a547SArnaldo Carvalho de Melo /*
1951b070a547SArnaldo Carvalho de Melo  * Detailed stats (-d), covering the L1 and last level data caches:
1952b070a547SArnaldo Carvalho de Melo  */
1953b070a547SArnaldo Carvalho de Melo 	struct perf_event_attr detailed_attrs[] = {
1954b070a547SArnaldo Carvalho de Melo 
1955b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
1956b070a547SArnaldo Carvalho de Melo     .config =
1957b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
1958b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
1959b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},
1960b070a547SArnaldo Carvalho de Melo 
1961b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
1962b070a547SArnaldo Carvalho de Melo     .config =
1963b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
1964b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
1965b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
1966b070a547SArnaldo Carvalho de Melo 
1967b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
1968b070a547SArnaldo Carvalho de Melo     .config =
1969b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_LL			<<  0  |
1970b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
1971b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},
1972b070a547SArnaldo Carvalho de Melo 
1973b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
1974b070a547SArnaldo Carvalho de Melo     .config =
1975b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_LL			<<  0  |
1976b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
1977b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
1978b070a547SArnaldo Carvalho de Melo };
1979b070a547SArnaldo Carvalho de Melo 
1980b070a547SArnaldo Carvalho de Melo /*
1981b070a547SArnaldo Carvalho de Melo  * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1982b070a547SArnaldo Carvalho de Melo  */
1983b070a547SArnaldo Carvalho de Melo 	struct perf_event_attr very_detailed_attrs[] = {
1984b070a547SArnaldo Carvalho de Melo 
1985b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
1986b070a547SArnaldo Carvalho de Melo     .config =
1987b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_L1I		<<  0  |
1988b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
1989b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},
1990b070a547SArnaldo Carvalho de Melo 
1991b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
1992b070a547SArnaldo Carvalho de Melo     .config =
1993b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_L1I		<<  0  |
1994b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
1995b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
1996b070a547SArnaldo Carvalho de Melo 
1997b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
1998b070a547SArnaldo Carvalho de Melo     .config =
1999b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_DTLB		<<  0  |
2000b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
2001b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},
2002b070a547SArnaldo Carvalho de Melo 
2003b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
2004b070a547SArnaldo Carvalho de Melo     .config =
2005b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_DTLB		<<  0  |
2006b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
2007b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
2008b070a547SArnaldo Carvalho de Melo 
2009b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
2010b070a547SArnaldo Carvalho de Melo     .config =
2011b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_ITLB		<<  0  |
2012b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
2013b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},
2014b070a547SArnaldo Carvalho de Melo 
2015b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
2016b070a547SArnaldo Carvalho de Melo     .config =
2017b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_ITLB		<<  0  |
2018b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_READ		<<  8) |
2019b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
2020b070a547SArnaldo Carvalho de Melo 
2021b070a547SArnaldo Carvalho de Melo };
2022b070a547SArnaldo Carvalho de Melo 
2023b070a547SArnaldo Carvalho de Melo /*
2024b070a547SArnaldo Carvalho de Melo  * Very, very detailed stats (-d -d -d), adding prefetch events:
2025b070a547SArnaldo Carvalho de Melo  */
2026b070a547SArnaldo Carvalho de Melo 	struct perf_event_attr very_very_detailed_attrs[] = {
2027b070a547SArnaldo Carvalho de Melo 
2028b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
2029b070a547SArnaldo Carvalho de Melo     .config =
2030b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
2031b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_PREFETCH	<<  8) |
2032b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_ACCESS	<< 16)				},
2033b070a547SArnaldo Carvalho de Melo 
2034b070a547SArnaldo Carvalho de Melo   { .type = PERF_TYPE_HW_CACHE,
2035b070a547SArnaldo Carvalho de Melo     .config =
2036b070a547SArnaldo Carvalho de Melo 	 PERF_COUNT_HW_CACHE_L1D		<<  0  |
2037b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_OP_PREFETCH	<<  8) |
2038b070a547SArnaldo Carvalho de Melo 	(PERF_COUNT_HW_CACHE_RESULT_MISS	<< 16)				},
2039b070a547SArnaldo Carvalho de Melo };
2040a9c1ecdaSKan Liang 
2041a9c1ecdaSKan Liang 	struct perf_event_attr default_null_attrs[] = {};
2042dae47d39SIan Rogers 	const char *pmu = parse_events_option_args.pmu_filter ?: "all";
2043a9c1ecdaSKan Liang 
20442cba3ffbSIngo Molnar 	/* Set attrs if no event is selected and !null_run: */
2045aea0dca1SJiri Olsa 	if (stat_config.null_run)
20462cba3ffbSIngo Molnar 		return 0;
20472cba3ffbSIngo Molnar 
20484cabc3d1SAndi Kleen 	if (transaction_run) {
2049742d92ffSThomas Richter 		/* Handle -T as -M transaction. Once platform specific metrics
20504d39c89fSIngo Molnar 		 * support has been added to the json files, all architectures
2051742d92ffSThomas Richter 		 * will use this approach. To determine transaction support
2052742d92ffSThomas Richter 		 * on an architecture test for such a metric name.
2053742d92ffSThomas Richter 		 */
2054dae47d39SIan Rogers 		if (!metricgroup__has_metric(pmu, "transaction")) {
2055db1f5f10SYang Jihong 			pr_err("Missing transaction metrics\n");
2056d6964c5bSIan Rogers 			return -1;
2057d6964c5bSIan Rogers 		}
2058dae47d39SIan Rogers 		return metricgroup__parse_groups(evsel_list, pmu, "transaction",
205905530a79SIan Rogers 						stat_config.metric_no_group,
206005530a79SIan Rogers 						stat_config.metric_no_merge,
20611fd09e29SIan Rogers 						stat_config.metric_no_threshold,
20621725e9cdSIan Rogers 						stat_config.user_requested_cpu_list,
20631725e9cdSIan Rogers 						stat_config.system_wide,
2064d0192fdbSJiri Olsa 						&stat_config.metric_events);
2065742d92ffSThomas Richter 	}
2066742d92ffSThomas Richter 
2067daefd0bcSKan Liang 	if (smi_cost) {
2068daefd0bcSKan Liang 		int smi;
2069daefd0bcSKan Liang 
2070daefd0bcSKan Liang 		if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
2071db1f5f10SYang Jihong 			pr_err("freeze_on_smi is not supported.\n");
2072daefd0bcSKan Liang 			return -1;
2073daefd0bcSKan Liang 		}
2074daefd0bcSKan Liang 
2075daefd0bcSKan Liang 		if (!smi) {
2076daefd0bcSKan Liang 			if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
2077daefd0bcSKan Liang 				fprintf(stderr, "Failed to set freeze_on_smi.\n");
2078daefd0bcSKan Liang 				return -1;
2079daefd0bcSKan Liang 			}
2080daefd0bcSKan Liang 			smi_reset = true;
2081daefd0bcSKan Liang 		}
2082daefd0bcSKan Liang 
2083dae47d39SIan Rogers 		if (!metricgroup__has_metric(pmu, "smi")) {
2084db1f5f10SYang Jihong 			pr_err("Missing smi metrics\n");
2085daefd0bcSKan Liang 			return -1;
2086daefd0bcSKan Liang 		}
2087c23f5cc0SIan Rogers 
208807eafd4eSIan Rogers 		if (!force_metric_only)
208907eafd4eSIan Rogers 			stat_config.metric_only = true;
209007eafd4eSIan Rogers 
2091dae47d39SIan Rogers 		return metricgroup__parse_groups(evsel_list, pmu, "smi",
2092c23f5cc0SIan Rogers 						stat_config.metric_no_group,
2093c23f5cc0SIan Rogers 						stat_config.metric_no_merge,
2094c23f5cc0SIan Rogers 						stat_config.metric_no_threshold,
2095c23f5cc0SIan Rogers 						stat_config.user_requested_cpu_list,
2096c23f5cc0SIan Rogers 						stat_config.system_wide,
2097c23f5cc0SIan Rogers 						&stat_config.metric_events);
2098daefd0bcSKan Liang 	}
2099daefd0bcSKan Liang 
210044b1e60aSAndi Kleen 	if (topdown_run) {
21011647cd5bSIan Rogers 		unsigned int max_level = metricgroups__topdown_max_level();
21021647cd5bSIan Rogers 		char str[] = "TopdownL1";
210344b1e60aSAndi Kleen 
210455c36a9fSAndi Kleen 		if (!force_metric_only)
210555c36a9fSAndi Kleen 			stat_config.metric_only = true;
210655c36a9fSAndi Kleen 
21071647cd5bSIan Rogers 		if (!max_level) {
21081647cd5bSIan Rogers 			pr_err("Topdown requested but the topdown metric groups aren't present.\n"
2109db1f5f10SYang Jihong 				"(See perf list the metric groups have names like TopdownL1)\n");
21101647cd5bSIan Rogers 			return -1;
211163e39aa6SKan Liang 		}
211263e39aa6SKan Liang 		if (stat_config.topdown_level > max_level) {
211363e39aa6SKan Liang 			pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level);
211463e39aa6SKan Liang 			return -1;
211563e39aa6SKan Liang 		} else if (!stat_config.topdown_level)
21161647cd5bSIan Rogers 			stat_config.topdown_level = 1;
211763e39aa6SKan Liang 
211855c36a9fSAndi Kleen 		if (!stat_config.interval && !stat_config.metric_only) {
211955c36a9fSAndi Kleen 			fprintf(stat_config.output,
212055c36a9fSAndi Kleen 				"Topdown accuracy may decrease when measuring long periods.\n"
212155c36a9fSAndi Kleen 				"Please print the result regularly, e.g. -I1000\n");
212255c36a9fSAndi Kleen 		}
21231647cd5bSIan Rogers 		str[8] = stat_config.topdown_level + '0';
2124dae47d39SIan Rogers 		if (metricgroup__parse_groups(evsel_list,
2125dae47d39SIan Rogers 						pmu, str,
21261647cd5bSIan Rogers 						/*metric_no_group=*/false,
21271647cd5bSIan Rogers 						/*metric_no_merge=*/false,
21281647cd5bSIan Rogers 						/*metric_no_threshold=*/true,
21291647cd5bSIan Rogers 						stat_config.user_requested_cpu_list,
21301647cd5bSIan Rogers 						stat_config.system_wide,
21311647cd5bSIan Rogers 						&stat_config.metric_events) < 0)
213244b1e60aSAndi Kleen 			return -1;
213344b1e60aSAndi Kleen 	}
213444b1e60aSAndi Kleen 
2135f0c86a2bSZhengjun Xing 	if (!stat_config.topdown_level)
21361647cd5bSIan Rogers 		stat_config.topdown_level = 1;
2137f0c86a2bSZhengjun Xing 
21386484d2f9SJiri Olsa 	if (!evsel_list->core.nr_entries) {
213994b1a603SIan Rogers 		/* No events so add defaults. */
2140a1f3d567SNamhyung Kim 		if (target__has_cpu(&target))
2141a1f3d567SNamhyung Kim 			default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
2142a1f3d567SNamhyung Kim 
2143e251abeeSArnaldo Carvalho de Melo 		if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
21449dec4473SAndi Kleen 			return -1;
21451eaf496eSIan Rogers 		if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) {
2146e251abeeSArnaldo Carvalho de Melo 			if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
21479dec4473SAndi Kleen 				return -1;
21489dec4473SAndi Kleen 		}
21491eaf496eSIan Rogers 		if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) {
2150e251abeeSArnaldo Carvalho de Melo 			if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
21519dec4473SAndi Kleen 				return -1;
21529dec4473SAndi Kleen 		}
2153e251abeeSArnaldo Carvalho de Melo 		if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
21542cba3ffbSIngo Molnar 			return -1;
215594b1a603SIan Rogers 		/*
215694b1a603SIan Rogers 		 * Add TopdownL1 metrics if they exist. To minimize
215794b1a603SIan Rogers 		 * multiplexing, don't request threshold computation.
215894b1a603SIan Rogers 		 */
2159b0a9e8f8SKan Liang 		if (metricgroup__has_metric(pmu, "Default")) {
21601b114824SIan Rogers 			struct evlist *metric_evlist = evlist__new();
21611b114824SIan Rogers 			struct evsel *metric_evsel;
21621b114824SIan Rogers 
21631b114824SIan Rogers 			if (!metric_evlist)
21641b114824SIan Rogers 				return -1;
21651b114824SIan Rogers 
2166b0a9e8f8SKan Liang 			if (metricgroup__parse_groups(metric_evlist, pmu, "Default",
216794b1a603SIan Rogers 							/*metric_no_group=*/false,
216894b1a603SIan Rogers 							/*metric_no_merge=*/false,
216994b1a603SIan Rogers 							/*metric_no_threshold=*/true,
217094b1a603SIan Rogers 							stat_config.user_requested_cpu_list,
217194b1a603SIan Rogers 							stat_config.system_wide,
217294b1a603SIan Rogers 							&stat_config.metric_events) < 0)
217394b1a603SIan Rogers 				return -1;
217406bff3d9SIan Rogers 
21751b114824SIan Rogers 			evlist__for_each_entry(metric_evlist, metric_evsel) {
21761b114824SIan Rogers 				metric_evsel->skippable = true;
21776a80d794SKan Liang 				metric_evsel->default_metricgroup = true;
21781b114824SIan Rogers 			}
21791b114824SIan Rogers 			evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
21801b114824SIan Rogers 			evlist__delete(metric_evlist);
21811b114824SIan Rogers 		}
21821b114824SIan Rogers 
2183a9c1ecdaSKan Liang 		/* Platform specific attrs */
2184a9c1ecdaSKan Liang 		if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
218542641d6fSKan Liang 			return -1;
21862cba3ffbSIngo Molnar 	}
21872cba3ffbSIngo Molnar 
21882cba3ffbSIngo Molnar 	/* Detailed events get appended to the event list: */
21892cba3ffbSIngo Molnar 
21902cba3ffbSIngo Molnar 	if (detailed_run <  1)
21912cba3ffbSIngo Molnar 		return 0;
21922cba3ffbSIngo Molnar 
21932cba3ffbSIngo Molnar 	/* Append detailed run extra attributes: */
2194e251abeeSArnaldo Carvalho de Melo 	if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
21952cba3ffbSIngo Molnar 		return -1;
21962cba3ffbSIngo Molnar 
21972cba3ffbSIngo Molnar 	if (detailed_run < 2)
21982cba3ffbSIngo Molnar 		return 0;
21992cba3ffbSIngo Molnar 
22002cba3ffbSIngo Molnar 	/* Append very detailed run extra attributes: */
2201e251abeeSArnaldo Carvalho de Melo 	if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
22022cba3ffbSIngo Molnar 		return -1;
22032cba3ffbSIngo Molnar 
22042cba3ffbSIngo Molnar 	if (detailed_run < 3)
22052cba3ffbSIngo Molnar 		return 0;
22062cba3ffbSIngo Molnar 
22072cba3ffbSIngo Molnar 	/* Append very, very detailed run extra attributes: */
2208e251abeeSArnaldo Carvalho de Melo 	return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
22092cba3ffbSIngo Molnar }
22102cba3ffbSIngo Molnar 
22118a59f3ccSJiri Olsa static const char * const stat_record_usage[] = {
22124979d0c7SJiri Olsa 	"perf stat record [<options>]",
22134979d0c7SJiri Olsa 	NULL,
22144979d0c7SJiri Olsa };
22154979d0c7SJiri Olsa 
init_features(struct perf_session * session)22163ba78bd0SJiri Olsa static void init_features(struct perf_session *session)
22173ba78bd0SJiri Olsa {
22183ba78bd0SJiri Olsa 	int feat;
22193ba78bd0SJiri Olsa 
22203ba78bd0SJiri Olsa 	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
22213ba78bd0SJiri Olsa 		perf_header__set_feat(&session->header, feat);
22223ba78bd0SJiri Olsa 
22238002a63fSJiri Olsa 	perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
22243ba78bd0SJiri Olsa 	perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
22253ba78bd0SJiri Olsa 	perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
22263ba78bd0SJiri Olsa 	perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
22273ba78bd0SJiri Olsa 	perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
22283ba78bd0SJiri Olsa }
22293ba78bd0SJiri Olsa 
__cmd_record(int argc,const char ** argv)22304979d0c7SJiri Olsa static int __cmd_record(int argc, const char **argv)
22314979d0c7SJiri Olsa {
22324979d0c7SJiri Olsa 	struct perf_session *session;
22338ceb41d7SJiri Olsa 	struct perf_data *data = &perf_stat.data;
22344979d0c7SJiri Olsa 
22358a59f3ccSJiri Olsa 	argc = parse_options(argc, argv, stat_options, stat_record_usage,
22364979d0c7SJiri Olsa 			     PARSE_OPT_STOP_AT_NON_OPTION);
22374979d0c7SJiri Olsa 
22384979d0c7SJiri Olsa 	if (output_name)
22392d4f2799SJiri Olsa 		data->path = output_name;
22404979d0c7SJiri Olsa 
2241d97ae04bSJiri Olsa 	if (stat_config.run_count != 1 || forever) {
2242e9d6db8eSJiri Olsa 		pr_err("Cannot use -r option with perf stat record.\n");
2243e9d6db8eSJiri Olsa 		return -1;
2244e9d6db8eSJiri Olsa 	}
2245e9d6db8eSJiri Olsa 
22462681bd85SNamhyung Kim 	session = perf_session__new(data, NULL);
22476ef81c55SMamatha Inamdar 	if (IS_ERR(session)) {
22486ef81c55SMamatha Inamdar 		pr_err("Perf session creation failed\n");
22496ef81c55SMamatha Inamdar 		return PTR_ERR(session);
22504979d0c7SJiri Olsa 	}
22514979d0c7SJiri Olsa 
22523ba78bd0SJiri Olsa 	init_features(session);
22533ba78bd0SJiri Olsa 
22544979d0c7SJiri Olsa 	session->evlist   = evsel_list;
22554979d0c7SJiri Olsa 	perf_stat.session = session;
22564979d0c7SJiri Olsa 	perf_stat.record  = true;
22574979d0c7SJiri Olsa 	return argc;
22584979d0c7SJiri Olsa }
22594979d0c7SJiri Olsa 
process_stat_round_event(struct perf_session * session,union perf_event * event)226089f1688aSJiri Olsa static int process_stat_round_event(struct perf_session *session,
226189f1688aSJiri Olsa 				    union perf_event *event)
2262a56f9390SJiri Olsa {
226372932371SJiri Olsa 	struct perf_record_stat_round *stat_round = &event->stat_round;
2264a56f9390SJiri Olsa 	struct timespec tsh, *ts = NULL;
2265a56f9390SJiri Olsa 	const char **argv = session->header.env.cmdline_argv;
2266a56f9390SJiri Olsa 	int argc = session->header.env.nr_cmdline;
2267a56f9390SJiri Olsa 
22688962cbecSNamhyung Kim 	process_counters();
2269a56f9390SJiri Olsa 
2270e3b03b6cSAndi Kleen 	if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
2271e3b03b6cSAndi Kleen 		update_stats(&walltime_nsecs_stats, stat_round->time);
2272a56f9390SJiri Olsa 
2273e3b03b6cSAndi Kleen 	if (stat_config.interval && stat_round->time) {
2274bd48c63eSArnaldo Carvalho de Melo 		tsh.tv_sec  = stat_round->time / NSEC_PER_SEC;
2275bd48c63eSArnaldo Carvalho de Melo 		tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
2276a56f9390SJiri Olsa 		ts = &tsh;
2277a56f9390SJiri Olsa 	}
2278a56f9390SJiri Olsa 
2279a56f9390SJiri Olsa 	print_counters(ts, argc, argv);
2280a56f9390SJiri Olsa 	return 0;
2281a56f9390SJiri Olsa }
2282a56f9390SJiri Olsa 
228362ba18baSJiri Olsa static
process_stat_config_event(struct perf_session * session,union perf_event * event)228489f1688aSJiri Olsa int process_stat_config_event(struct perf_session *session,
228589f1688aSJiri Olsa 			      union perf_event *event)
228662ba18baSJiri Olsa {
228789f1688aSJiri Olsa 	struct perf_tool *tool = session->tool;
228868d702f7SJiri Olsa 	struct perf_stat *st = container_of(tool, struct perf_stat, tool);
228968d702f7SJiri Olsa 
229062ba18baSJiri Olsa 	perf_event__read_stat_config(&stat_config, &event->stat_config);
229168d702f7SJiri Olsa 
2292315c0a1fSJiri Olsa 	if (perf_cpu_map__empty(st->cpus)) {
229389af4e05SJiri Olsa 		if (st->aggr_mode != AGGR_UNSET)
229489af4e05SJiri Olsa 			pr_warning("warning: processing task data, aggregation mode not set\n");
2295ae7e6492SNamhyung Kim 	} else if (st->aggr_mode != AGGR_UNSET) {
229689af4e05SJiri Olsa 		stat_config.aggr_mode = st->aggr_mode;
2297ae7e6492SNamhyung Kim 	}
229889af4e05SJiri Olsa 
22998ceb41d7SJiri Olsa 	if (perf_stat.data.is_pipe)
230068d702f7SJiri Olsa 		perf_stat_init_aggr_mode();
230168d702f7SJiri Olsa 	else
230268d702f7SJiri Olsa 		perf_stat_init_aggr_mode_file(st);
230368d702f7SJiri Olsa 
2304ae7e6492SNamhyung Kim 	if (stat_config.aggr_map) {
2305ae7e6492SNamhyung Kim 		int nr_aggr = stat_config.aggr_map->nr;
2306ae7e6492SNamhyung Kim 
2307ae7e6492SNamhyung Kim 		if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) {
2308ae7e6492SNamhyung Kim 			pr_err("cannot allocate aggr counts\n");
2309ae7e6492SNamhyung Kim 			return -1;
2310ae7e6492SNamhyung Kim 		}
2311ae7e6492SNamhyung Kim 	}
231262ba18baSJiri Olsa 	return 0;
231362ba18baSJiri Olsa }
231462ba18baSJiri Olsa 
set_maps(struct perf_stat * st)23151975d36eSJiri Olsa static int set_maps(struct perf_stat *st)
23161975d36eSJiri Olsa {
23171975d36eSJiri Olsa 	if (!st->cpus || !st->threads)
23181975d36eSJiri Olsa 		return 0;
23191975d36eSJiri Olsa 
23201975d36eSJiri Olsa 	if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
23211975d36eSJiri Olsa 		return -EINVAL;
23221975d36eSJiri Olsa 
2323453fa030SJiri Olsa 	perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
23241975d36eSJiri Olsa 
23251f297a6eSNamhyung Kim 	if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true))
23261975d36eSJiri Olsa 		return -ENOMEM;
23271975d36eSJiri Olsa 
23281975d36eSJiri Olsa 	st->maps_allocated = true;
23291975d36eSJiri Olsa 	return 0;
23301975d36eSJiri Olsa }
23311975d36eSJiri Olsa 
23321975d36eSJiri Olsa static
process_thread_map_event(struct perf_session * session,union perf_event * event)233389f1688aSJiri Olsa int process_thread_map_event(struct perf_session *session,
233489f1688aSJiri Olsa 			     union perf_event *event)
23351975d36eSJiri Olsa {
233689f1688aSJiri Olsa 	struct perf_tool *tool = session->tool;
23371975d36eSJiri Olsa 	struct perf_stat *st = container_of(tool, struct perf_stat, tool);
23381975d36eSJiri Olsa 
23391975d36eSJiri Olsa 	if (st->threads) {
23401975d36eSJiri Olsa 		pr_warning("Extra thread map event, ignoring.\n");
23411975d36eSJiri Olsa 		return 0;
23421975d36eSJiri Olsa 	}
23431975d36eSJiri Olsa 
23441975d36eSJiri Olsa 	st->threads = thread_map__new_event(&event->thread_map);
23451975d36eSJiri Olsa 	if (!st->threads)
23461975d36eSJiri Olsa 		return -ENOMEM;
23471975d36eSJiri Olsa 
23481975d36eSJiri Olsa 	return set_maps(st);
23491975d36eSJiri Olsa }
23501975d36eSJiri Olsa 
23511975d36eSJiri Olsa static
process_cpu_map_event(struct perf_session * session,union perf_event * event)235289f1688aSJiri Olsa int process_cpu_map_event(struct perf_session *session,
235389f1688aSJiri Olsa 			  union perf_event *event)
23541975d36eSJiri Olsa {
235589f1688aSJiri Olsa 	struct perf_tool *tool = session->tool;
23561975d36eSJiri Olsa 	struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2357f854839bSJiri Olsa 	struct perf_cpu_map *cpus;
23581975d36eSJiri Olsa 
23591975d36eSJiri Olsa 	if (st->cpus) {
23601975d36eSJiri Olsa 		pr_warning("Extra cpu map event, ignoring.\n");
23611975d36eSJiri Olsa 		return 0;
23621975d36eSJiri Olsa 	}
23631975d36eSJiri Olsa 
23641975d36eSJiri Olsa 	cpus = cpu_map__new_data(&event->cpu_map.data);
23651975d36eSJiri Olsa 	if (!cpus)
23661975d36eSJiri Olsa 		return -ENOMEM;
23671975d36eSJiri Olsa 
23681975d36eSJiri Olsa 	st->cpus = cpus;
23691975d36eSJiri Olsa 	return set_maps(st);
23701975d36eSJiri Olsa }
23711975d36eSJiri Olsa 
23728a59f3ccSJiri Olsa static const char * const stat_report_usage[] = {
2373ba6039b6SJiri Olsa 	"perf stat report [<options>]",
2374ba6039b6SJiri Olsa 	NULL,
2375ba6039b6SJiri Olsa };
2376ba6039b6SJiri Olsa 
2377ba6039b6SJiri Olsa static struct perf_stat perf_stat = {
2378ba6039b6SJiri Olsa 	.tool = {
2379ba6039b6SJiri Olsa 		.attr		= perf_event__process_attr,
2380fa6ea781SJiri Olsa 		.event_update	= perf_event__process_event_update,
23811975d36eSJiri Olsa 		.thread_map	= process_thread_map_event,
23821975d36eSJiri Olsa 		.cpu_map	= process_cpu_map_event,
238362ba18baSJiri Olsa 		.stat_config	= process_stat_config_event,
2384a56f9390SJiri Olsa 		.stat		= perf_event__process_stat_event,
2385a56f9390SJiri Olsa 		.stat_round	= process_stat_round_event,
2386ba6039b6SJiri Olsa 	},
238789af4e05SJiri Olsa 	.aggr_mode	= AGGR_UNSET,
2388995ed074SK Prateek Nayak 	.aggr_level	= 0,
2389ba6039b6SJiri Olsa };
2390ba6039b6SJiri Olsa 
__cmd_report(int argc,const char ** argv)2391ba6039b6SJiri Olsa static int __cmd_report(int argc, const char **argv)
2392ba6039b6SJiri Olsa {
2393ba6039b6SJiri Olsa 	struct perf_session *session;
2394ba6039b6SJiri Olsa 	const struct option options[] = {
2395ba6039b6SJiri Olsa 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
239689af4e05SJiri Olsa 	OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
239789af4e05SJiri Olsa 		     "aggregate counts per processor socket", AGGR_SOCKET),
2398db5742b6SKan Liang 	OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
2399db5742b6SKan Liang 		     "aggregate counts per processor die", AGGR_DIE),
2400aab667caSK Prateek Nayak 	OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level,
2401aab667caSK Prateek Nayak 			    "cache level",
2402aab667caSK Prateek Nayak 			    "aggregate count at this cache level (Default: LLC)",
2403aab667caSK Prateek Nayak 			    parse_cache_level),
240489af4e05SJiri Olsa 	OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
240589af4e05SJiri Olsa 		     "aggregate counts per physical processor core", AGGR_CORE),
240686895b48SJiri Olsa 	OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
240786895b48SJiri Olsa 		     "aggregate counts per numa node", AGGR_NODE),
240889af4e05SJiri Olsa 	OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
240989af4e05SJiri Olsa 		     "disable CPU count aggregation", AGGR_NONE),
2410ba6039b6SJiri Olsa 	OPT_END()
2411ba6039b6SJiri Olsa 	};
2412ba6039b6SJiri Olsa 	struct stat st;
2413ba6039b6SJiri Olsa 	int ret;
2414ba6039b6SJiri Olsa 
24158a59f3ccSJiri Olsa 	argc = parse_options(argc, argv, options, stat_report_usage, 0);
2416ba6039b6SJiri Olsa 
2417ba6039b6SJiri Olsa 	if (!input_name || !strlen(input_name)) {
2418ba6039b6SJiri Olsa 		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
2419ba6039b6SJiri Olsa 			input_name = "-";
2420ba6039b6SJiri Olsa 		else
2421ba6039b6SJiri Olsa 			input_name = "perf.data";
2422ba6039b6SJiri Olsa 	}
2423ba6039b6SJiri Olsa 
24242d4f2799SJiri Olsa 	perf_stat.data.path = input_name;
24258ceb41d7SJiri Olsa 	perf_stat.data.mode = PERF_DATA_MODE_READ;
2426ba6039b6SJiri Olsa 
24272681bd85SNamhyung Kim 	session = perf_session__new(&perf_stat.data, &perf_stat.tool);
24286ef81c55SMamatha Inamdar 	if (IS_ERR(session))
24296ef81c55SMamatha Inamdar 		return PTR_ERR(session);
2430ba6039b6SJiri Olsa 
2431ba6039b6SJiri Olsa 	perf_stat.session  = session;
2432ba6039b6SJiri Olsa 	stat_config.output = stderr;
24332b87be18SIan Rogers 	evlist__delete(evsel_list);
2434ba6039b6SJiri Olsa 	evsel_list         = session->evlist;
2435ba6039b6SJiri Olsa 
2436ba6039b6SJiri Olsa 	ret = perf_session__process_events(session);
2437ba6039b6SJiri Olsa 	if (ret)
2438ba6039b6SJiri Olsa 		return ret;
2439ba6039b6SJiri Olsa 
2440ba6039b6SJiri Olsa 	perf_session__delete(session);
2441ba6039b6SJiri Olsa 	return 0;
2442ba6039b6SJiri Olsa }
2443ba6039b6SJiri Olsa 
setup_system_wide(int forks)2444e3ba76deSJiri Olsa static void setup_system_wide(int forks)
2445e3ba76deSJiri Olsa {
2446e3ba76deSJiri Olsa 	/*
2447e3ba76deSJiri Olsa 	 * Make system wide (-a) the default target if
2448e3ba76deSJiri Olsa 	 * no target was specified and one of following
2449e3ba76deSJiri Olsa 	 * conditions is met:
2450e3ba76deSJiri Olsa 	 *
2451e3ba76deSJiri Olsa 	 *   - there's no workload specified
2452e3ba76deSJiri Olsa 	 *   - there is workload specified but all requested
2453e3ba76deSJiri Olsa 	 *     events are system wide events
2454e3ba76deSJiri Olsa 	 */
2455e3ba76deSJiri Olsa 	if (!target__none(&target))
2456e3ba76deSJiri Olsa 		return;
2457e3ba76deSJiri Olsa 
2458e3ba76deSJiri Olsa 	if (!forks)
2459e3ba76deSJiri Olsa 		target.system_wide = true;
2460e3ba76deSJiri Olsa 	else {
246132dcd021SJiri Olsa 		struct evsel *counter;
2462e3ba76deSJiri Olsa 
2463e3ba76deSJiri Olsa 		evlist__for_each_entry(evsel_list, counter) {
2464d3345fecSAdrian Hunter 			if (!counter->core.requires_cpu &&
2465ce1d3bc2SArnaldo Carvalho de Melo 			    !evsel__name_is(counter, "duration_time")) {
2466e3ba76deSJiri Olsa 				return;
2467e3ba76deSJiri Olsa 			}
2468002a3d69SJin Yao 		}
2469e3ba76deSJiri Olsa 
24706484d2f9SJiri Olsa 		if (evsel_list->core.nr_entries)
2471e3ba76deSJiri Olsa 			target.system_wide = true;
2472e3ba76deSJiri Olsa 	}
2473e3ba76deSJiri Olsa }
2474e3ba76deSJiri Olsa 
cmd_stat(int argc,const char ** argv)2475b0ad8ea6SArnaldo Carvalho de Melo int cmd_stat(int argc, const char **argv)
247686470930SIngo Molnar {
2477b070a547SArnaldo Carvalho de Melo 	const char * const stat_usage[] = {
2478b070a547SArnaldo Carvalho de Melo 		"perf stat [<options>] [<command>]",
2479b070a547SArnaldo Carvalho de Melo 		NULL
2480b070a547SArnaldo Carvalho de Melo 	};
2481fa853c4bSSong Liu 	int status = -EINVAL, run_idx, err;
24824aa9015fSStephane Eranian 	const char *mode;
24835821522eSJiri Olsa 	FILE *output = stderr;
2484f1f8ad52Syuzhoujian 	unsigned int interval, timeout;
2485ba6039b6SJiri Olsa 	const char * const stat_subcommands[] = { "record", "report" };
2486fa853c4bSSong Liu 	char errbuf[BUFSIZ];
248742202dd5SIngo Molnar 
24885af52b51SStephane Eranian 	setlocale(LC_ALL, "");
24895af52b51SStephane Eranian 
24900f98b11cSJiri Olsa 	evsel_list = evlist__new();
2491361c99a6SArnaldo Carvalho de Melo 	if (evsel_list == NULL)
2492361c99a6SArnaldo Carvalho de Melo 		return -ENOMEM;
2493361c99a6SArnaldo Carvalho de Melo 
24941669e509SWang Nan 	parse_events__shrink_config_terms();
249551433eadSMichael Petlan 
249651433eadSMichael Petlan 	/* String-parsing callback-based options would segfault when negated */
249751433eadSMichael Petlan 	set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
249851433eadSMichael Petlan 	set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
249951433eadSMichael Petlan 	set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
250051433eadSMichael Petlan 
25014979d0c7SJiri Olsa 	argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
25024979d0c7SJiri Olsa 					(const char **) stat_usage,
2503a0541234SAnton Blanchard 					PARSE_OPT_STOP_AT_NON_OPTION);
2504d7470b6aSStephane Eranian 
2505fa7070a3SJiri Olsa 	if (stat_config.csv_sep) {
2506fa7070a3SJiri Olsa 		stat_config.csv_output = true;
2507fa7070a3SJiri Olsa 		if (!strcmp(stat_config.csv_sep, "\\t"))
2508fa7070a3SJiri Olsa 			stat_config.csv_sep = "\t";
25096edb78a2SJiri Olsa 	} else
2510fa7070a3SJiri Olsa 		stat_config.csv_sep = DEFAULT_SEPARATOR;
25116edb78a2SJiri Olsa 
2512ae0f4eb3SWei Li 	if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
25134979d0c7SJiri Olsa 		argc = __cmd_record(argc, argv);
25144979d0c7SJiri Olsa 		if (argc < 0)
25154979d0c7SJiri Olsa 			return -1;
2516ae0f4eb3SWei Li 	} else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0]))
2517ba6039b6SJiri Olsa 		return __cmd_report(argc, argv);
25184979d0c7SJiri Olsa 
2519ec0d3d1fSJiri Olsa 	interval = stat_config.interval;
2520f1f8ad52Syuzhoujian 	timeout = stat_config.timeout;
2521ec0d3d1fSJiri Olsa 
25224979d0c7SJiri Olsa 	/*
25234979d0c7SJiri Olsa 	 * For record command the -o is already taken care of.
25244979d0c7SJiri Olsa 	 */
25254979d0c7SJiri Olsa 	if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
25264aa9015fSStephane Eranian 		output = NULL;
25274aa9015fSStephane Eranian 
252856f3bae7SJim Cromie 	if (output_name && output_fd) {
252956f3bae7SJim Cromie 		fprintf(stderr, "cannot use both --output and --log-fd\n");
2530e0547311SJiri Olsa 		parse_options_usage(stat_usage, stat_options, "o", 1);
2531e0547311SJiri Olsa 		parse_options_usage(NULL, stat_options, "log-fd", 0);
2532cc03c542SNamhyung Kim 		goto out;
253356f3bae7SJim Cromie 	}
2534fc3e4d07SStephane Eranian 
25350ce5aa02SJiri Olsa 	if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
253654b50916SAndi Kleen 		fprintf(stderr, "--metric-only is not supported with --per-thread\n");
253754b50916SAndi Kleen 		goto out;
253854b50916SAndi Kleen 	}
253954b50916SAndi Kleen 
2540d97ae04bSJiri Olsa 	if (stat_config.metric_only && stat_config.run_count > 1) {
254154b50916SAndi Kleen 		fprintf(stderr, "--metric-only is not supported with -r\n");
254254b50916SAndi Kleen 		goto out;
254354b50916SAndi Kleen 	}
254454b50916SAndi Kleen 
254554ac0b1bSJiri Olsa 	if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
2546e55c14afSJiri Olsa 		fprintf(stderr, "--table is only supported with -r\n");
2547e55c14afSJiri Olsa 		parse_options_usage(stat_usage, stat_options, "r", 1);
2548e55c14afSJiri Olsa 		parse_options_usage(NULL, stat_options, "table", 0);
2549e55c14afSJiri Olsa 		goto out;
2550e55c14afSJiri Olsa 	}
2551e55c14afSJiri Olsa 
2552fc3e4d07SStephane Eranian 	if (output_fd < 0) {
2553fc3e4d07SStephane Eranian 		fprintf(stderr, "argument to --log-fd must be a > 0\n");
2554e0547311SJiri Olsa 		parse_options_usage(stat_usage, stat_options, "log-fd", 0);
2555cc03c542SNamhyung Kim 		goto out;
2556fc3e4d07SStephane Eranian 	}
2557fc3e4d07SStephane Eranian 
2558a527c2c1SJames Clark 	if (!output && !quiet) {
25594aa9015fSStephane Eranian 		struct timespec tm;
25604aa9015fSStephane Eranian 		mode = append_file ? "a" : "w";
25614aa9015fSStephane Eranian 
25624aa9015fSStephane Eranian 		output = fopen(output_name, mode);
25634aa9015fSStephane Eranian 		if (!output) {
25644aa9015fSStephane Eranian 			perror("failed to create output file");
2565fceda7feSDavid Ahern 			return -1;
25664aa9015fSStephane Eranian 		}
25674228df84SIan Rogers 		if (!stat_config.json_output) {
25684aa9015fSStephane Eranian 			clock_gettime(CLOCK_REALTIME, &tm);
25694aa9015fSStephane Eranian 			fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
25704228df84SIan Rogers 		}
2571fc3e4d07SStephane Eranian 	} else if (output_fd > 0) {
257256f3bae7SJim Cromie 		mode = append_file ? "a" : "w";
257356f3bae7SJim Cromie 		output = fdopen(output_fd, mode);
257456f3bae7SJim Cromie 		if (!output) {
257556f3bae7SJim Cromie 			perror("Failed opening logfd");
257656f3bae7SJim Cromie 			return -errno;
257756f3bae7SJim Cromie 		}
25784aa9015fSStephane Eranian 	}
25794aa9015fSStephane Eranian 
2580f5bc4428SNamhyung Kim 	if (stat_config.interval_clear && !isatty(fileno(output))) {
2581f5bc4428SNamhyung Kim 		fprintf(stderr, "--interval-clear does not work with output\n");
2582f5bc4428SNamhyung Kim 		parse_options_usage(stat_usage, stat_options, "o", 1);
2583f5bc4428SNamhyung Kim 		parse_options_usage(NULL, stat_options, "log-fd", 0);
2584f5bc4428SNamhyung Kim 		parse_options_usage(NULL, stat_options, "interval-clear", 0);
2585f5bc4428SNamhyung Kim 		return -1;
2586f5bc4428SNamhyung Kim 	}
2587f5bc4428SNamhyung Kim 
25885821522eSJiri Olsa 	stat_config.output = output;
25895821522eSJiri Olsa 
2590d7470b6aSStephane Eranian 	/*
2591d7470b6aSStephane Eranian 	 * let the spreadsheet do the pretty-printing
2592d7470b6aSStephane Eranian 	 */
2593fa7070a3SJiri Olsa 	if (stat_config.csv_output) {
259461a9f324SJim Cromie 		/* User explicitly passed -B? */
2595d7470b6aSStephane Eranian 		if (big_num_opt == 1) {
2596d7470b6aSStephane Eranian 			fprintf(stderr, "-B option not supported with -x\n");
2597e0547311SJiri Olsa 			parse_options_usage(stat_usage, stat_options, "B", 1);
2598e0547311SJiri Olsa 			parse_options_usage(NULL, stat_options, "x", 1);
2599cc03c542SNamhyung Kim 			goto out;
2600d7470b6aSStephane Eranian 		} else /* Nope, so disable big number formatting */
260134ff0866SJiri Olsa 			stat_config.big_num = false;
2602d7470b6aSStephane Eranian 	} else if (big_num_opt == 0) /* User passed --no-big-num */
260334ff0866SJiri Olsa 		stat_config.big_num = false;
2604d7470b6aSStephane Eranian 
2605fa853c4bSSong Liu 	err = target__validate(&target);
2606fa853c4bSSong Liu 	if (err) {
2607fa853c4bSSong Liu 		target__strerror(&target, err, errbuf, BUFSIZ);
2608fa853c4bSSong Liu 		pr_warning("%s\n", errbuf);
2609fa853c4bSSong Liu 	}
2610fa853c4bSSong Liu 
2611e3ba76deSJiri Olsa 	setup_system_wide(argc);
2612ac3063bdSDavid Ahern 
26130ce2da14SJiri Olsa 	/*
26140ce2da14SJiri Olsa 	 * Display user/system times only for single
26150ce2da14SJiri Olsa 	 * run and when there's specified tracee.
26160ce2da14SJiri Olsa 	 */
2617d97ae04bSJiri Olsa 	if ((stat_config.run_count == 1) && target__none(&target))
26188897a891SJiri Olsa 		stat_config.ru_display = true;
26190ce2da14SJiri Olsa 
2620d97ae04bSJiri Olsa 	if (stat_config.run_count < 0) {
2621cc03c542SNamhyung Kim 		pr_err("Run count must be a positive number\n");
2622e0547311SJiri Olsa 		parse_options_usage(stat_usage, stat_options, "r", 1);
2623cc03c542SNamhyung Kim 		goto out;
2624d97ae04bSJiri Olsa 	} else if (stat_config.run_count == 0) {
2625a7e191c3SFrederik Deweerdt 		forever = true;
2626d97ae04bSJiri Olsa 		stat_config.run_count = 1;
2627a7e191c3SFrederik Deweerdt 	}
262886470930SIngo Molnar 
262954ac0b1bSJiri Olsa 	if (stat_config.walltime_run_table) {
263054ac0b1bSJiri Olsa 		stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
263154ac0b1bSJiri Olsa 		if (!stat_config.walltime_run) {
2632e55c14afSJiri Olsa 			pr_err("failed to setup -r option");
2633e55c14afSJiri Olsa 			goto out;
2634e55c14afSJiri Olsa 		}
2635e55c14afSJiri Olsa 	}
2636e55c14afSJiri Olsa 
26371d9f8d1bSJin Yao 	if ((stat_config.aggr_mode == AGGR_THREAD) &&
26381d9f8d1bSJin Yao 		!target__has_task(&target)) {
26391d9f8d1bSJin Yao 		if (!target.system_wide || target.cpu_list) {
26401d9f8d1bSJin Yao 			fprintf(stderr, "The --per-thread option is only "
26411d9f8d1bSJin Yao 				"available when monitoring via -p -t -a "
26421d9f8d1bSJin Yao 				"options or only --per-thread.\n");
2643e0547311SJiri Olsa 			parse_options_usage(NULL, stat_options, "p", 1);
2644e0547311SJiri Olsa 			parse_options_usage(NULL, stat_options, "t", 1);
264532b8af82SJiri Olsa 			goto out;
264632b8af82SJiri Olsa 		}
26471d9f8d1bSJin Yao 	}
264832b8af82SJiri Olsa 
264932b8af82SJiri Olsa 	/*
265032b8af82SJiri Olsa 	 * no_aggr, cgroup are for system-wide only
265132b8af82SJiri Olsa 	 * --per-thread is aggregated per thread, we dont mix it with cpu mode
265232b8af82SJiri Olsa 	 */
2653421a50f3SJiri Olsa 	if (((stat_config.aggr_mode != AGGR_GLOBAL &&
26541c02f6c9SNamhyung Kim 	      stat_config.aggr_mode != AGGR_THREAD) ||
26551c02f6c9SNamhyung Kim 	     (nr_cgroups || stat_config.cgroup_list)) &&
2656602ad878SArnaldo Carvalho de Melo 	    !target__has_cpu(&target)) {
2657023695d9SStephane Eranian 		fprintf(stderr, "both cgroup and no-aggregation "
2658023695d9SStephane Eranian 			"modes only available in system-wide mode\n");
2659023695d9SStephane Eranian 
2660e0547311SJiri Olsa 		parse_options_usage(stat_usage, stat_options, "G", 1);
2661e0547311SJiri Olsa 		parse_options_usage(NULL, stat_options, "A", 1);
2662e0547311SJiri Olsa 		parse_options_usage(NULL, stat_options, "a", 1);
26631c02f6c9SNamhyung Kim 		parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
2664cc03c542SNamhyung Kim 		goto out;
2665d7e7a451SStephane Eranian 	}
2666d7e7a451SStephane Eranian 
2667f07952b1SAlexander Antonov 	if (stat_config.iostat_run) {
2668f07952b1SAlexander Antonov 		status = iostat_prepare(evsel_list, &stat_config);
2669f07952b1SAlexander Antonov 		if (status)
2670f07952b1SAlexander Antonov 			goto out;
2671f07952b1SAlexander Antonov 		if (iostat_mode == IOSTAT_LIST) {
2672f07952b1SAlexander Antonov 			iostat_list(evsel_list, &stat_config);
2673f07952b1SAlexander Antonov 			goto out;
26747c0a6144SYang Jihong 		} else if (verbose > 0)
2675f07952b1SAlexander Antonov 			iostat_list(evsel_list, &stat_config);
2676e4fe5d73SLike Xu 		if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
2677e4fe5d73SLike Xu 			target.system_wide = true;
2678f07952b1SAlexander Antonov 	}
2679f07952b1SAlexander Antonov 
2680a4b8cfcaSIan Rogers 	if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
2681a4b8cfcaSIan Rogers 		target.per_thread = true;
2682a4b8cfcaSIan Rogers 
26831725e9cdSIan Rogers 	stat_config.system_wide = target.system_wide;
26841725e9cdSIan Rogers 	if (target.cpu_list) {
26851725e9cdSIan Rogers 		stat_config.user_requested_cpu_list = strdup(target.cpu_list);
26861725e9cdSIan Rogers 		if (!stat_config.user_requested_cpu_list) {
26871725e9cdSIan Rogers 			status = -ENOMEM;
26881725e9cdSIan Rogers 			goto out;
26891725e9cdSIan Rogers 		}
26901725e9cdSIan Rogers 	}
26911725e9cdSIan Rogers 
2692a4b8cfcaSIan Rogers 	/*
2693a4b8cfcaSIan Rogers 	 * Metric parsing needs to be delayed as metrics may optimize events
2694a4b8cfcaSIan Rogers 	 * knowing the target is system-wide.
2695a4b8cfcaSIan Rogers 	 */
2696a4b8cfcaSIan Rogers 	if (metrics) {
2697dae47d39SIan Rogers 		const char *pmu = parse_events_option_args.pmu_filter ?: "all";
2698*ac95df46SIan Rogers 		int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
2699a4b8cfcaSIan Rogers 						stat_config.metric_no_group,
2700a4b8cfcaSIan Rogers 						stat_config.metric_no_merge,
27011fd09e29SIan Rogers 						stat_config.metric_no_threshold,
27021725e9cdSIan Rogers 						stat_config.user_requested_cpu_list,
27031725e9cdSIan Rogers 						stat_config.system_wide,
2704a4b8cfcaSIan Rogers 						&stat_config.metric_events);
2705*ac95df46SIan Rogers 
2706a4b8cfcaSIan Rogers 		zfree(&metrics);
2707*ac95df46SIan Rogers 		if (ret) {
2708*ac95df46SIan Rogers 			status = ret;
2709*ac95df46SIan Rogers 			goto out;
2710*ac95df46SIan Rogers 		}
2711a4b8cfcaSIan Rogers 	}
2712a4b8cfcaSIan Rogers 
27132cba3ffbSIngo Molnar 	if (add_default_attributes())
2714c6264defSIngo Molnar 		goto out;
271586470930SIngo Molnar 
2716d1c5a0e8SNamhyung Kim 	if (stat_config.cgroup_list) {
2717d1c5a0e8SNamhyung Kim 		if (nr_cgroups > 0) {
2718d1c5a0e8SNamhyung Kim 			pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
2719d1c5a0e8SNamhyung Kim 			parse_options_usage(stat_usage, stat_options, "G", 1);
2720d1c5a0e8SNamhyung Kim 			parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
2721d1c5a0e8SNamhyung Kim 			goto out;
2722d1c5a0e8SNamhyung Kim 		}
2723d1c5a0e8SNamhyung Kim 
2724b214ba8cSNamhyung Kim 		if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
2725bb1c15b6SNamhyung Kim 					  &stat_config.metric_events, true) < 0) {
2726bb1c15b6SNamhyung Kim 			parse_options_usage(stat_usage, stat_options,
2727bb1c15b6SNamhyung Kim 					    "for-each-cgroup", 0);
2728d1c5a0e8SNamhyung Kim 			goto out;
2729d1c5a0e8SNamhyung Kim 		}
2730bb1c15b6SNamhyung Kim 	}
2731d1c5a0e8SNamhyung Kim 
27325ac72634SIan Rogers 	evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
27331d3351e6SJin Yao 
27347748bb71SArnaldo Carvalho de Melo 	if (evlist__create_maps(evsel_list, &target) < 0) {
2735602ad878SArnaldo Carvalho de Melo 		if (target__has_task(&target)) {
27365c98d466SArnaldo Carvalho de Melo 			pr_err("Problems finding threads of monitor\n");
2737e0547311SJiri Olsa 			parse_options_usage(stat_usage, stat_options, "p", 1);
2738e0547311SJiri Olsa 			parse_options_usage(NULL, stat_options, "t", 1);
2739602ad878SArnaldo Carvalho de Melo 		} else if (target__has_cpu(&target)) {
274060d567e2SArnaldo Carvalho de Melo 			perror("failed to parse CPUs map");
2741e0547311SJiri Olsa 			parse_options_usage(stat_usage, stat_options, "C", 1);
2742e0547311SJiri Olsa 			parse_options_usage(NULL, stat_options, "a", 1);
2743cc03c542SNamhyung Kim 		}
2744cc03c542SNamhyung Kim 		goto out;
274560d567e2SArnaldo Carvalho de Melo 	}
274632b8af82SJiri Olsa 
2747a9a17902SJiri Olsa 	evlist__check_cpu_maps(evsel_list);
2748a9a17902SJiri Olsa 
274932b8af82SJiri Olsa 	/*
275032b8af82SJiri Olsa 	 * Initialize thread_map with comm names,
275132b8af82SJiri Olsa 	 * so we could print it out on output.
275232b8af82SJiri Olsa 	 */
275356739444SJin Yao 	if (stat_config.aggr_mode == AGGR_THREAD) {
275403617c22SJiri Olsa 		thread_map__read_comms(evsel_list->core.threads);
275556739444SJin Yao 	}
275632b8af82SJiri Olsa 
275786895b48SJiri Olsa 	if (stat_config.aggr_mode == AGGR_NODE)
275886895b48SJiri Olsa 		cpu__setup_cpunode_map();
275986895b48SJiri Olsa 
2760db06a269Syuzhoujian 	if (stat_config.times && interval)
2761db06a269Syuzhoujian 		interval_count = true;
2762db06a269Syuzhoujian 	else if (stat_config.times && !interval) {
2763db06a269Syuzhoujian 		pr_err("interval-count option should be used together with "
2764db06a269Syuzhoujian 				"interval-print.\n");
2765db06a269Syuzhoujian 		parse_options_usage(stat_usage, stat_options, "interval-count", 0);
2766db06a269Syuzhoujian 		parse_options_usage(stat_usage, stat_options, "I", 1);
2767db06a269Syuzhoujian 		goto out;
2768db06a269Syuzhoujian 	}
2769c45c6ea2SStephane Eranian 
2770f1f8ad52Syuzhoujian 	if (timeout && timeout < 100) {
2771f1f8ad52Syuzhoujian 		if (timeout < 10) {
2772f1f8ad52Syuzhoujian 			pr_err("timeout must be >= 10ms.\n");
2773f1f8ad52Syuzhoujian 			parse_options_usage(stat_usage, stat_options, "timeout", 0);
2774f1f8ad52Syuzhoujian 			goto out;
2775f1f8ad52Syuzhoujian 		} else
2776f1f8ad52Syuzhoujian 			pr_warning("timeout < 100ms. "
2777f1f8ad52Syuzhoujian 				   "The overhead percentage could be high in some cases. "
2778f1f8ad52Syuzhoujian 				   "Please proceed with caution.\n");
2779f1f8ad52Syuzhoujian 	}
2780f1f8ad52Syuzhoujian 	if (timeout && interval) {
2781f1f8ad52Syuzhoujian 		pr_err("timeout option is not supported with interval-print.\n");
2782f1f8ad52Syuzhoujian 		parse_options_usage(stat_usage, stat_options, "timeout", 0);
2783f1f8ad52Syuzhoujian 		parse_options_usage(stat_usage, stat_options, "I", 1);
2784f1f8ad52Syuzhoujian 		goto out;
2785f1f8ad52Syuzhoujian 	}
2786f1f8ad52Syuzhoujian 
27871f297a6eSNamhyung Kim 	if (perf_stat_init_aggr_mode())
278803ad9747SArnaldo Carvalho de Melo 		goto out;
2789d6d901c2SZhang, Yanmin 
27901f297a6eSNamhyung Kim 	if (evlist__alloc_stats(&stat_config, evsel_list, interval))
279103ad9747SArnaldo Carvalho de Melo 		goto out;
279286ee6e18SStephane Eranian 
279386470930SIngo Molnar 	/*
27947d9ad16aSJiri Olsa 	 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
27957d9ad16aSJiri Olsa 	 * while avoiding that older tools show confusing messages.
27967d9ad16aSJiri Olsa 	 *
27977d9ad16aSJiri Olsa 	 * However for pipe sessions we need to keep it zero,
27987d9ad16aSJiri Olsa 	 * because script's perf_evsel__check_attr is triggered
27997d9ad16aSJiri Olsa 	 * by attr->sample_type != 0, and we can't run it on
28007d9ad16aSJiri Olsa 	 * stat sessions.
28017d9ad16aSJiri Olsa 	 */
28027d9ad16aSJiri Olsa 	stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
28037d9ad16aSJiri Olsa 
28047d9ad16aSJiri Olsa 	/*
280586470930SIngo Molnar 	 * We dont want to block the signals - that would cause
280686470930SIngo Molnar 	 * child tasks to inherit that and Ctrl-C would not work.
280786470930SIngo Molnar 	 * What we want is for Ctrl-C to work in the exec()-ed
280886470930SIngo Molnar 	 * task, but being ignored by perf stat itself:
280986470930SIngo Molnar 	 */
2810f7b7c26eSPeter Zijlstra 	atexit(sig_atexit);
2811a7e191c3SFrederik Deweerdt 	if (!forever)
281286470930SIngo Molnar 		signal(SIGINT,  skip_signal);
281313370a9bSStephane Eranian 	signal(SIGCHLD, skip_signal);
281486470930SIngo Molnar 	signal(SIGALRM, skip_signal);
281586470930SIngo Molnar 	signal(SIGABRT, skip_signal);
281686470930SIngo Molnar 
281727e9769aSAlexey Budankov 	if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
281827e9769aSAlexey Budankov 		goto out;
281927e9769aSAlexey Budankov 
2820448ce0e6SGang Li 	/* Enable ignoring missing threads when -p option is defined. */
2821448ce0e6SGang Li 	evlist__first(evsel_list)->ignore_missing_thread = target.pid;
282242202dd5SIngo Molnar 	status = 0;
2823d97ae04bSJiri Olsa 	for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
2824d97ae04bSJiri Olsa 		if (stat_config.run_count != 1 && verbose > 0)
28254aa9015fSStephane Eranian 			fprintf(output, "[ perf stat: executing run #%d ... ]\n",
28264aa9015fSStephane Eranian 				run_idx + 1);
2827f9cef0a9SIngo Molnar 
2828b63fd11cSSrikar Dronamraju 		if (run_idx != 0)
282953f5e908SArnaldo Carvalho de Melo 			evlist__reset_prev_raw_counts(evsel_list);
2830b63fd11cSSrikar Dronamraju 
2831e55c14afSJiri Olsa 		status = run_perf_stat(argc, argv, run_idx);
2832443f2d5bSSrikar Dronamraju 		if (forever && status != -1 && !interval) {
2833d4f63a47SJiri Olsa 			print_counters(NULL, argc, argv);
2834254ecbc7SJiri Olsa 			perf_stat__reset_stats();
2835a7e191c3SFrederik Deweerdt 		}
283642202dd5SIngo Molnar 	}
283742202dd5SIngo Molnar 
2838dada1a1fSNamhyung Kim 	if (!forever && status != -1 && (!interval || stat_config.summary)) {
2839dada1a1fSNamhyung Kim 		if (stat_config.run_count > 1)
2840dada1a1fSNamhyung Kim 			evlist__copy_res_stats(&stat_config, evsel_list);
2841d4f63a47SJiri Olsa 		print_counters(NULL, argc, argv);
2842dada1a1fSNamhyung Kim 	}
2843d134ffb9SArnaldo Carvalho de Melo 
284427e9769aSAlexey Budankov 	evlist__finalize_ctlfd(evsel_list);
284527e9769aSAlexey Budankov 
28464979d0c7SJiri Olsa 	if (STAT_RECORD) {
28474979d0c7SJiri Olsa 		/*
28484979d0c7SJiri Olsa 		 * We synthesize the kernel mmap record just so that older tools
28494979d0c7SJiri Olsa 		 * don't emit warnings about not being able to resolve symbols
28504d39c89fSIngo Molnar 		 * due to /proc/sys/kernel/kptr_restrict settings and instead provide
28514979d0c7SJiri Olsa 		 * a saner message about no samples being in the perf.data file.
28524979d0c7SJiri Olsa 		 *
28534979d0c7SJiri Olsa 		 * This also serves to suppress a warning about f_header.data.size == 0
28548b99b1a4SJiri Olsa 		 * in header.c at the moment 'perf stat record' gets introduced, which
28558b99b1a4SJiri Olsa 		 * is not really needed once we start adding the stat specific PERF_RECORD_
28568b99b1a4SJiri Olsa 		 * records, but the need to suppress the kptr_restrict messages in older
28578b99b1a4SJiri Olsa 		 * tools remain  -acme
28584979d0c7SJiri Olsa 		 */
28598ceb41d7SJiri Olsa 		int fd = perf_data__fd(&perf_stat.data);
2860fa853c4bSSong Liu 
2861fa853c4bSSong Liu 		err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
28624979d0c7SJiri Olsa 							 process_synthesized_event,
28634979d0c7SJiri Olsa 							 &perf_stat.session->machines.host);
28644979d0c7SJiri Olsa 		if (err) {
28654979d0c7SJiri Olsa 			pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
28664979d0c7SJiri Olsa 				   "older tools may produce warnings about this file\n.");
28674979d0c7SJiri Olsa 		}
28684979d0c7SJiri Olsa 
28697aad0c32SJiri Olsa 		if (!interval) {
28707aad0c32SJiri Olsa 			if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
28717aad0c32SJiri Olsa 				pr_err("failed to write stat round event\n");
28727aad0c32SJiri Olsa 		}
28737aad0c32SJiri Olsa 
28748ceb41d7SJiri Olsa 		if (!perf_stat.data.is_pipe) {
28754979d0c7SJiri Olsa 			perf_stat.session->header.data_size += perf_stat.bytes_written;
28764979d0c7SJiri Olsa 			perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2877664c98d4SJiri Olsa 		}
28784979d0c7SJiri Olsa 
2879750b4edeSJiri Olsa 		evlist__close(evsel_list);
28804979d0c7SJiri Olsa 		perf_session__delete(perf_stat.session);
28814979d0c7SJiri Olsa 	}
28824979d0c7SJiri Olsa 
2883544c2ae7SMasami Hiramatsu 	perf_stat__exit_aggr_mode();
288453f5e908SArnaldo Carvalho de Melo 	evlist__free_stats(evsel_list);
28850015e2e1SArnaldo Carvalho de Melo out:
2886f07952b1SAlexander Antonov 	if (stat_config.iostat_run)
2887f07952b1SAlexander Antonov 		iostat_release(evsel_list);
2888f07952b1SAlexander Antonov 
2889d8f9da24SArnaldo Carvalho de Melo 	zfree(&stat_config.walltime_run);
28901725e9cdSIan Rogers 	zfree(&stat_config.user_requested_cpu_list);
2891e55c14afSJiri Olsa 
2892daefd0bcSKan Liang 	if (smi_cost && smi_reset)
2893daefd0bcSKan Liang 		sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
2894daefd0bcSKan Liang 
2895c12995a5SJiri Olsa 	evlist__delete(evsel_list);
289656739444SJin Yao 
28979afe5658SJiri Olsa 	metricgroup__rblist_exit(&stat_config.metric_events);
2898ee7fe31eSAdrian Hunter 	evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
289956739444SJin Yao 
290042202dd5SIngo Molnar 	return status;
290186470930SIngo Molnar }
2902