191007045SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
286470930SIngo Molnar /*
386470930SIngo Molnar * builtin-stat.c
486470930SIngo Molnar *
586470930SIngo Molnar * Builtin stat command: Give a precise performance counters summary
686470930SIngo Molnar * overview about any workload, CPU or specific PID.
786470930SIngo Molnar *
886470930SIngo Molnar * Sample output:
986470930SIngo Molnar
102cba3ffbSIngo Molnar $ perf stat ./hackbench 10
1186470930SIngo Molnar
122cba3ffbSIngo Molnar Time: 0.118
1386470930SIngo Molnar
142cba3ffbSIngo Molnar Performance counter stats for './hackbench 10':
1586470930SIngo Molnar
162cba3ffbSIngo Molnar 1708.761321 task-clock # 11.037 CPUs utilized
172cba3ffbSIngo Molnar 41,190 context-switches # 0.024 M/sec
182cba3ffbSIngo Molnar 6,735 CPU-migrations # 0.004 M/sec
192cba3ffbSIngo Molnar 17,318 page-faults # 0.010 M/sec
202cba3ffbSIngo Molnar 5,205,202,243 cycles # 3.046 GHz
212cba3ffbSIngo Molnar 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
222cba3ffbSIngo Molnar 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
232cba3ffbSIngo Molnar 2,603,501,247 instructions # 0.50 insns per cycle
242cba3ffbSIngo Molnar # 1.48 stalled cycles per insn
252cba3ffbSIngo Molnar 484,357,498 branches # 283.455 M/sec
262cba3ffbSIngo Molnar 6,388,934 branch-misses # 1.32% of all branches
272cba3ffbSIngo Molnar
282cba3ffbSIngo Molnar 0.154822978 seconds time elapsed
2986470930SIngo Molnar
3086470930SIngo Molnar *
312cba3ffbSIngo Molnar * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
3286470930SIngo Molnar *
3386470930SIngo Molnar * Improvements and fixes by:
3486470930SIngo Molnar *
3586470930SIngo Molnar * Arjan van de Ven <arjan@linux.intel.com>
3686470930SIngo Molnar * Yanmin Zhang <yanmin.zhang@intel.com>
3786470930SIngo Molnar * Wu Fengguang <fengguang.wu@intel.com>
3886470930SIngo Molnar * Mike Galbraith <efault@gmx.de>
3986470930SIngo Molnar * Paul Mackerras <paulus@samba.org>
406e750a8fSJaswinder Singh Rajput * Jaswinder Singh Rajput <jaswinder@kernel.org>
4186470930SIngo Molnar */
4286470930SIngo Molnar
4386470930SIngo Molnar #include "builtin.h"
44f14d5707SArnaldo Carvalho de Melo #include "util/cgroup.h"
454b6ab94eSJosh Poimboeuf #include <subcmd/parse-options.h>
4686470930SIngo Molnar #include "util/parse-events.h"
47003be8c4SIan Rogers #include "util/pmus.h"
484cabc3d1SAndi Kleen #include "util/pmu.h"
498f28827aSFrederic Weisbecker #include "util/event.h"
50361c99a6SArnaldo Carvalho de Melo #include "util/evlist.h"
5169aad6f1SArnaldo Carvalho de Melo #include "util/evsel.h"
528f28827aSFrederic Weisbecker #include "util/debug.h"
53a5d243d0SIngo Molnar #include "util/color.h"
540007eceaSXiao Guangrong #include "util/stat.h"
5560666c63SLiming Wang #include "util/header.h"
56a12b51c4SPaul Mackerras #include "util/cpumap.h"
57fd78260bSArnaldo Carvalho de Melo #include "util/thread_map.h"
58d809560bSJiri Olsa #include "util/counts.h"
59687986bbSKan Liang #include "util/topdown.h"
604979d0c7SJiri Olsa #include "util/session.h"
61ba6039b6SJiri Olsa #include "util/tool.h"
62a067558eSArnaldo Carvalho de Melo #include "util/string2.h"
63b18f3e36SAndi Kleen #include "util/metricgroup.h"
64ea49e01cSArnaldo Carvalho de Melo #include "util/synthetic-events.h"
65aeb00b1aSArnaldo Carvalho de Melo #include "util/target.h"
66f3711020SArnaldo Carvalho de Melo #include "util/time-utils.h"
679660e08eSJiri Olsa #include "util/top.h"
684804e011SAndi Kleen #include "util/affinity.h"
6970943490SStephane Eranian #include "util/pfm.h"
70fa853c4bSSong Liu #include "util/bpf_counter.h"
71f07952b1SAlexander Antonov #include "util/iostat.h"
72f12ad272SIan Rogers #include "util/util.h"
73ba6039b6SJiri Olsa #include "asm/bug.h"
7486470930SIngo Molnar
75bd48c63eSArnaldo Carvalho de Melo #include <linux/time64.h>
767f7c536fSArnaldo Carvalho de Melo #include <linux/zalloc.h>
7744b1e60aSAndi Kleen #include <api/fs/fs.h>
78a43783aeSArnaldo Carvalho de Melo #include <errno.h>
799607ad3aSArnaldo Carvalho de Melo #include <signal.h>
801f16c575SPeter Zijlstra #include <stdlib.h>
8186470930SIngo Molnar #include <sys/prctl.h>
82fd20e811SArnaldo Carvalho de Melo #include <inttypes.h>
835af52b51SStephane Eranian #include <locale.h>
84e3b03b6cSAndi Kleen #include <math.h>
857a8ef4c4SArnaldo Carvalho de Melo #include <sys/types.h>
867a8ef4c4SArnaldo Carvalho de Melo #include <sys/stat.h>
874208735dSArnaldo Carvalho de Melo #include <sys/wait.h>
887a8ef4c4SArnaldo Carvalho de Melo #include <unistd.h>
890ce2da14SJiri Olsa #include <sys/time.h>
900ce2da14SJiri Olsa #include <sys/resource.h>
916ef81c55SMamatha Inamdar #include <linux/err.h>
9286470930SIngo Molnar
933052ba56SArnaldo Carvalho de Melo #include <linux/ctype.h>
94453fa030SJiri Olsa #include <perf/evlist.h>
95fd3f518fSIan Rogers #include <internal/threadmap.h>
963d689ed6SArnaldo Carvalho de Melo
97d7470b6aSStephane Eranian #define DEFAULT_SEPARATOR " "
98daefd0bcSKan Liang #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
99d7470b6aSStephane Eranian
100d4f63a47SJiri Olsa static void print_counters(struct timespec *ts, int argc, const char **argv);
10113370a9bSStephane Eranian
10263503dbaSJiri Olsa static struct evlist *evsel_list;
103411ad22eSIan Rogers static struct parse_events_option_args parse_events_option_args = {
104411ad22eSIan Rogers .evlistp = &evsel_list,
105411ad22eSIan Rogers };
106411ad22eSIan Rogers
107112cb561SSong Liu static bool all_counters_use_bpf = true;
108361c99a6SArnaldo Carvalho de Melo
109602ad878SArnaldo Carvalho de Melo static struct target target = {
11077a6f014SNamhyung Kim .uid = UINT_MAX,
11177a6f014SNamhyung Kim };
11242202dd5SIngo Molnar
113c1a1f5d9SJiri Olsa #define METRIC_ONLY_LEN 20
114c1a1f5d9SJiri Olsa
11501513fdcSIan Rogers static volatile sig_atomic_t child_pid = -1;
1162cba3ffbSIngo Molnar static int detailed_run = 0;
1174cabc3d1SAndi Kleen static bool transaction_run;
11844b1e60aSAndi Kleen static bool topdown_run = false;
119daefd0bcSKan Liang static bool smi_cost = false;
120daefd0bcSKan Liang static bool smi_reset = false;
121d7470b6aSStephane Eranian static int big_num_opt = -1;
1221f16c575SPeter Zijlstra static const char *pre_cmd = NULL;
1231f16c575SPeter Zijlstra static const char *post_cmd = NULL;
1241f16c575SPeter Zijlstra static bool sync_run = false;
125a7e191c3SFrederik Deweerdt static bool forever = false;
12644b1e60aSAndi Kleen static bool force_metric_only = false;
12713370a9bSStephane Eranian static struct timespec ref_time;
128e0547311SJiri Olsa static bool append_file;
129db06a269Syuzhoujian static bool interval_count;
130e0547311SJiri Olsa static const char *output_name;
131e0547311SJiri Olsa static int output_fd;
132a4b8cfcaSIan Rogers static char *metrics;
1335af52b51SStephane Eranian
1344979d0c7SJiri Olsa struct perf_stat {
1354979d0c7SJiri Olsa bool record;
1368ceb41d7SJiri Olsa struct perf_data data;
1374979d0c7SJiri Olsa struct perf_session *session;
1384979d0c7SJiri Olsa u64 bytes_written;
139ba6039b6SJiri Olsa struct perf_tool tool;
1401975d36eSJiri Olsa bool maps_allocated;
141f854839bSJiri Olsa struct perf_cpu_map *cpus;
1429749b90eSJiri Olsa struct perf_thread_map *threads;
14389af4e05SJiri Olsa enum aggr_mode aggr_mode;
144995ed074SK Prateek Nayak u32 aggr_level;
1454979d0c7SJiri Olsa };
1464979d0c7SJiri Olsa
1474979d0c7SJiri Olsa static struct perf_stat perf_stat;
1484979d0c7SJiri Olsa #define STAT_RECORD perf_stat.record
1494979d0c7SJiri Olsa
15001513fdcSIan Rogers static volatile sig_atomic_t done = 0;
15160666c63SLiming Wang
152421a50f3SJiri Olsa static struct perf_stat_config stat_config = {
153421a50f3SJiri Olsa .aggr_mode = AGGR_GLOBAL,
154995ed074SK Prateek Nayak .aggr_level = MAX_CACHE_LVL + 1,
155711a572eSJiri Olsa .scale = true,
156df4f7b4dSJiri Olsa .unit_width = 4, /* strlen("unit") */
157d97ae04bSJiri Olsa .run_count = 1,
158ee1760e2SJiri Olsa .metric_only_len = METRIC_ONLY_LEN,
15926893a60SJiri Olsa .walltime_nsecs_stats = &walltime_nsecs_stats,
160c735b0a5SFlorian Fischer .ru_stats = &ru_stats,
16134ff0866SJiri Olsa .big_num = true,
16227e9769aSAlexey Budankov .ctl_fd = -1,
163f07952b1SAlexander Antonov .ctl_fd_ack = -1,
164f07952b1SAlexander Antonov .iostat_run = false,
165421a50f3SJiri Olsa };
166421a50f3SJiri Olsa
cpus_map_matched(struct evsel * a,struct evsel * b)167a9a17902SJiri Olsa static bool cpus_map_matched(struct evsel *a, struct evsel *b)
168a9a17902SJiri Olsa {
169a9a17902SJiri Olsa if (!a->core.cpus && !b->core.cpus)
170a9a17902SJiri Olsa return true;
171a9a17902SJiri Olsa
172a9a17902SJiri Olsa if (!a->core.cpus || !b->core.cpus)
173a9a17902SJiri Olsa return false;
174a9a17902SJiri Olsa
17544028699SIan Rogers if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus))
176a9a17902SJiri Olsa return false;
177a9a17902SJiri Olsa
17844028699SIan Rogers for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) {
17944028699SIan Rogers if (perf_cpu_map__cpu(a->core.cpus, i).cpu !=
18044028699SIan Rogers perf_cpu_map__cpu(b->core.cpus, i).cpu)
181a9a17902SJiri Olsa return false;
182a9a17902SJiri Olsa }
183a9a17902SJiri Olsa
184a9a17902SJiri Olsa return true;
185a9a17902SJiri Olsa }
186a9a17902SJiri Olsa
evlist__check_cpu_maps(struct evlist * evlist)187a9a17902SJiri Olsa static void evlist__check_cpu_maps(struct evlist *evlist)
188a9a17902SJiri Olsa {
189bc6c6cdcSIan Rogers struct evsel *evsel, *warned_leader = NULL;
190a9a17902SJiri Olsa
191a9a17902SJiri Olsa evlist__for_each_entry(evlist, evsel) {
192bc6c6cdcSIan Rogers struct evsel *leader = evsel__leader(evsel);
193a9a17902SJiri Olsa
194a9a17902SJiri Olsa /* Check that leader matches cpus with each member. */
195a9a17902SJiri Olsa if (leader == evsel)
196a9a17902SJiri Olsa continue;
197a9a17902SJiri Olsa if (cpus_map_matched(leader, evsel))
198a9a17902SJiri Olsa continue;
199a9a17902SJiri Olsa
200a9a17902SJiri Olsa /* If there's mismatch disable the group and warn user. */
201bc6c6cdcSIan Rogers if (warned_leader != leader) {
202bc6c6cdcSIan Rogers char buf[200];
203bc6c6cdcSIan Rogers
204bc6c6cdcSIan Rogers pr_warning("WARNING: grouped events cpus do not match.\n"
205bc6c6cdcSIan Rogers "Events with CPUs not matching the leader will "
206bc6c6cdcSIan Rogers "be removed from the group.\n");
207a9a17902SJiri Olsa evsel__group_desc(leader, buf, sizeof(buf));
208a9a17902SJiri Olsa pr_warning(" %s\n", buf);
209bc6c6cdcSIan Rogers warned_leader = leader;
210bc6c6cdcSIan Rogers }
2117c0a6144SYang Jihong if (verbose > 0) {
212bc6c6cdcSIan Rogers char buf[200];
213bc6c6cdcSIan Rogers
214a9a17902SJiri Olsa cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
215a9a17902SJiri Olsa pr_warning(" %s: %s\n", leader->name, buf);
216a9a17902SJiri Olsa cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
217a9a17902SJiri Olsa pr_warning(" %s: %s\n", evsel->name, buf);
218a9a17902SJiri Olsa }
219a9a17902SJiri Olsa
220bc6c6cdcSIan Rogers evsel__remove_from_group(evsel, leader);
221a9a17902SJiri Olsa }
222a9a17902SJiri Olsa }
223a9a17902SJiri Olsa
diff_timespec(struct timespec * r,struct timespec * a,struct timespec * b)22413370a9bSStephane Eranian static inline void diff_timespec(struct timespec *r, struct timespec *a,
22513370a9bSStephane Eranian struct timespec *b)
22613370a9bSStephane Eranian {
22713370a9bSStephane Eranian r->tv_sec = a->tv_sec - b->tv_sec;
22813370a9bSStephane Eranian if (a->tv_nsec < b->tv_nsec) {
229310ebb93SArnaldo Carvalho de Melo r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
23013370a9bSStephane Eranian r->tv_sec--;
23113370a9bSStephane Eranian } else {
23213370a9bSStephane Eranian r->tv_nsec = a->tv_nsec - b->tv_nsec ;
23313370a9bSStephane Eranian }
23413370a9bSStephane Eranian }
23513370a9bSStephane Eranian
perf_stat__reset_stats(void)236254ecbc7SJiri Olsa static void perf_stat__reset_stats(void)
237254ecbc7SJiri Olsa {
23853f5e908SArnaldo Carvalho de Melo evlist__reset_stats(evsel_list);
239f87027b9SJiri Olsa perf_stat__reset_shadow_stats();
2401eda3b21SJiri Olsa }
2411eda3b21SJiri Olsa
process_synthesized_event(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)2428b99b1a4SJiri Olsa static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
2434979d0c7SJiri Olsa union perf_event *event,
2444979d0c7SJiri Olsa struct perf_sample *sample __maybe_unused,
2454979d0c7SJiri Olsa struct machine *machine __maybe_unused)
2464979d0c7SJiri Olsa {
2478ceb41d7SJiri Olsa if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
2488b99b1a4SJiri Olsa pr_err("failed to write perf data, error: %m\n");
2498b99b1a4SJiri Olsa return -1;
2508b99b1a4SJiri Olsa }
2518b99b1a4SJiri Olsa
2528b99b1a4SJiri Olsa perf_stat.bytes_written += event->header.size;
2538b99b1a4SJiri Olsa return 0;
2544979d0c7SJiri Olsa }
2554979d0c7SJiri Olsa
write_stat_round_event(u64 tm,u64 type)2561975d36eSJiri Olsa static int write_stat_round_event(u64 tm, u64 type)
2577aad0c32SJiri Olsa {
2581975d36eSJiri Olsa return perf_event__synthesize_stat_round(NULL, tm, type,
2597aad0c32SJiri Olsa process_synthesized_event,
2607aad0c32SJiri Olsa NULL);
2617aad0c32SJiri Olsa }
2627aad0c32SJiri Olsa
2637aad0c32SJiri Olsa #define WRITE_STAT_ROUND_EVENT(time, interval) \
2647aad0c32SJiri Olsa write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
2657aad0c32SJiri Olsa
2668cd36f3eSJiri Olsa #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
2675a6ea81bSJiri Olsa
evsel__write_stat_event(struct evsel * counter,int cpu_map_idx,u32 thread,struct perf_counts_values * count)2687ac0089dSIan Rogers static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread,
2695a6ea81bSJiri Olsa struct perf_counts_values *count)
2705a6ea81bSJiri Olsa {
2717ac0089dSIan Rogers struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread);
2726d18804bSIan Rogers struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx);
2735a6ea81bSJiri Olsa
2745a6ea81bSJiri Olsa return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
2755a6ea81bSJiri Olsa process_synthesized_event, NULL);
2765a6ea81bSJiri Olsa }
2775a6ea81bSJiri Olsa
read_single_counter(struct evsel * counter,int cpu_map_idx,int thread,struct timespec * rs)278da8c94c0SIan Rogers static int read_single_counter(struct evsel *counter, int cpu_map_idx,
279f0fbb114SAndi Kleen int thread, struct timespec *rs)
280f0fbb114SAndi Kleen {
281b03b89b3SFlorian Fischer switch(counter->tool_event) {
282b03b89b3SFlorian Fischer case PERF_TOOL_DURATION_TIME: {
283f0fbb114SAndi Kleen u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
284f0fbb114SAndi Kleen struct perf_counts_values *count =
285da8c94c0SIan Rogers perf_counts(counter->counts, cpu_map_idx, thread);
286f0fbb114SAndi Kleen count->ena = count->run = val;
287f0fbb114SAndi Kleen count->val = val;
288f0fbb114SAndi Kleen return 0;
289f0fbb114SAndi Kleen }
290b03b89b3SFlorian Fischer case PERF_TOOL_USER_TIME:
291b03b89b3SFlorian Fischer case PERF_TOOL_SYSTEM_TIME: {
292b03b89b3SFlorian Fischer u64 val;
293b03b89b3SFlorian Fischer struct perf_counts_values *count =
294b03b89b3SFlorian Fischer perf_counts(counter->counts, cpu_map_idx, thread);
295b03b89b3SFlorian Fischer if (counter->tool_event == PERF_TOOL_USER_TIME)
296b03b89b3SFlorian Fischer val = ru_stats.ru_utime_usec_stat.mean;
297b03b89b3SFlorian Fischer else
298b03b89b3SFlorian Fischer val = ru_stats.ru_stime_usec_stat.mean;
299b03b89b3SFlorian Fischer count->ena = count->run = val;
300b03b89b3SFlorian Fischer count->val = val;
301b03b89b3SFlorian Fischer return 0;
302b03b89b3SFlorian Fischer }
303b03b89b3SFlorian Fischer default:
304b03b89b3SFlorian Fischer case PERF_TOOL_NONE:
305da8c94c0SIan Rogers return evsel__read_counter(counter, cpu_map_idx, thread);
306b03b89b3SFlorian Fischer case PERF_TOOL_MAX:
307b03b89b3SFlorian Fischer /* This should never be reached */
308b03b89b3SFlorian Fischer return 0;
309b03b89b3SFlorian Fischer }
310f0fbb114SAndi Kleen }
311f0fbb114SAndi Kleen
312f5b4a9c3SStephane Eranian /*
313f5b4a9c3SStephane Eranian * Read out the results of a single counter:
314f5b4a9c3SStephane Eranian * do not aggregate counts across CPUs in system-wide mode
315f5b4a9c3SStephane Eranian */
read_counter_cpu(struct evsel * counter,struct timespec * rs,int cpu_map_idx)316da8c94c0SIan Rogers static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx)
317f5b4a9c3SStephane Eranian {
318a2f354e3SJiri Olsa int nthreads = perf_thread_map__nr(evsel_list->core.threads);
3194b49ab70SAndi Kleen int thread;
320f5b4a9c3SStephane Eranian
3213b4331d9SSuzuki K. Poulose if (!counter->supported)
3223b4331d9SSuzuki K. Poulose return -ENOENT;
3233b4331d9SSuzuki K. Poulose
3249bf1a529SJiri Olsa for (thread = 0; thread < nthreads; thread++) {
3253b3eb044SJiri Olsa struct perf_counts_values *count;
3263b3eb044SJiri Olsa
327da8c94c0SIan Rogers count = perf_counts(counter->counts, cpu_map_idx, thread);
32882bf311eSJiri Olsa
32982bf311eSJiri Olsa /*
33082bf311eSJiri Olsa * The leader's group read loads data into its group members
331ea089692SArnaldo Carvalho de Melo * (via evsel__read_counter()) and sets their count->loaded.
33282bf311eSJiri Olsa */
333da8c94c0SIan Rogers if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) &&
334da8c94c0SIan Rogers read_single_counter(counter, cpu_map_idx, thread, rs)) {
335db49a717SStephane Eranian counter->counts->scaled = -1;
336da8c94c0SIan Rogers perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0;
337da8c94c0SIan Rogers perf_counts(counter->counts, cpu_map_idx, thread)->run = 0;
338c52b12edSArnaldo Carvalho de Melo return -1;
339db49a717SStephane Eranian }
3405a6ea81bSJiri Olsa
341da8c94c0SIan Rogers perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false);
34282bf311eSJiri Olsa
3435a6ea81bSJiri Olsa if (STAT_RECORD) {
344da8c94c0SIan Rogers if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) {
3455a6ea81bSJiri Olsa pr_err("failed to write stat event\n");
3465a6ea81bSJiri Olsa return -1;
3475a6ea81bSJiri Olsa }
3485a6ea81bSJiri Olsa }
3490b1abbf4SAndi Kleen
3500b1abbf4SAndi Kleen if (verbose > 1) {
3510b1abbf4SAndi Kleen fprintf(stat_config.output,
3520b1abbf4SAndi Kleen "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
3538ab2e96dSArnaldo Carvalho de Melo evsel__name(counter),
3546d18804bSIan Rogers perf_cpu_map__cpu(evsel__cpus(counter),
3556d18804bSIan Rogers cpu_map_idx).cpu,
3560b1abbf4SAndi Kleen count->val, count->ena, count->run);
3570b1abbf4SAndi Kleen }
358f5b4a9c3SStephane Eranian }
359c52b12edSArnaldo Carvalho de Melo
360c52b12edSArnaldo Carvalho de Melo return 0;
36186470930SIngo Molnar }
36286470930SIngo Molnar
read_affinity_counters(struct timespec * rs)363c7e5b328SJin Yao static int read_affinity_counters(struct timespec *rs)
364106a94a0SJiri Olsa {
365472832d2SIan Rogers struct evlist_cpu_iterator evlist_cpu_itr;
366472832d2SIan Rogers struct affinity saved_affinity, *affinity;
3674b49ab70SAndi Kleen
368112cb561SSong Liu if (all_counters_use_bpf)
369112cb561SSong Liu return 0;
370112cb561SSong Liu
3714b49ab70SAndi Kleen if (!target__has_cpu(&target) || target__has_per_thread(&target))
372472832d2SIan Rogers affinity = NULL;
373472832d2SIan Rogers else if (affinity__setup(&saved_affinity) < 0)
374472832d2SIan Rogers return -1;
375472832d2SIan Rogers else
376472832d2SIan Rogers affinity = &saved_affinity;
377106a94a0SJiri Olsa
378472832d2SIan Rogers evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
379472832d2SIan Rogers struct evsel *counter = evlist_cpu_itr.evsel;
380472832d2SIan Rogers
381112cb561SSong Liu if (evsel__is_bpf(counter))
382112cb561SSong Liu continue;
383472832d2SIan Rogers
3844b49ab70SAndi Kleen if (!counter->err) {
3854b49ab70SAndi Kleen counter->err = read_counter_cpu(counter, rs,
386472832d2SIan Rogers evlist_cpu_itr.cpu_map_idx);
3874b49ab70SAndi Kleen }
3884b49ab70SAndi Kleen }
389472832d2SIan Rogers if (affinity)
390472832d2SIan Rogers affinity__cleanup(&saved_affinity);
391472832d2SIan Rogers
392c7e5b328SJin Yao return 0;
393c7e5b328SJin Yao }
394c7e5b328SJin Yao
read_bpf_map_counters(void)395fa853c4bSSong Liu static int read_bpf_map_counters(void)
396fa853c4bSSong Liu {
397fa853c4bSSong Liu struct evsel *counter;
398fa853c4bSSong Liu int err;
399fa853c4bSSong Liu
400fa853c4bSSong Liu evlist__for_each_entry(evsel_list, counter) {
401112cb561SSong Liu if (!evsel__is_bpf(counter))
402112cb561SSong Liu continue;
403112cb561SSong Liu
404fa853c4bSSong Liu err = bpf_counter__read(counter);
405fa853c4bSSong Liu if (err)
406fa853c4bSSong Liu return err;
407fa853c4bSSong Liu }
408fa853c4bSSong Liu return 0;
409fa853c4bSSong Liu }
410fa853c4bSSong Liu
read_counters(struct timespec * rs)4118962cbecSNamhyung Kim static int read_counters(struct timespec *rs)
412c7e5b328SJin Yao {
413fa853c4bSSong Liu if (!stat_config.stop_read_counter) {
414112cb561SSong Liu if (read_bpf_map_counters() ||
415112cb561SSong Liu read_affinity_counters(rs))
4168962cbecSNamhyung Kim return -1;
417fa853c4bSSong Liu }
4188962cbecSNamhyung Kim return 0;
4198962cbecSNamhyung Kim }
4208962cbecSNamhyung Kim
process_counters(void)4218962cbecSNamhyung Kim static void process_counters(void)
4228962cbecSNamhyung Kim {
4238962cbecSNamhyung Kim struct evsel *counter;
4243b3eb044SJiri Olsa
4254b49ab70SAndi Kleen evlist__for_each_entry(evsel_list, counter) {
4264b49ab70SAndi Kleen if (counter->err)
4274b49ab70SAndi Kleen pr_debug("failed to read counter %s\n", counter->name);
4284b49ab70SAndi Kleen if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter))
4293b3eb044SJiri Olsa pr_warning("failed to process counter %s\n", counter->name);
4304b49ab70SAndi Kleen counter->err = 0;
431106a94a0SJiri Olsa }
432942c5593SNamhyung Kim
433942c5593SNamhyung Kim perf_stat_merge_counters(&stat_config, evsel_list);
4341d6d2beaSNamhyung Kim perf_stat_process_percore(&stat_config, evsel_list);
435106a94a0SJiri Olsa }
436106a94a0SJiri Olsa
process_interval(void)437ba411a95SJiri Olsa static void process_interval(void)
43813370a9bSStephane Eranian {
43913370a9bSStephane Eranian struct timespec ts, rs;
44013370a9bSStephane Eranian
44113370a9bSStephane Eranian clock_gettime(CLOCK_MONOTONIC, &ts);
44213370a9bSStephane Eranian diff_timespec(&rs, &ts, &ref_time);
44313370a9bSStephane Eranian
4448f97963eSNamhyung Kim evlist__reset_aggr_stats(evsel_list);
4458f97963eSNamhyung Kim
4468962cbecSNamhyung Kim if (read_counters(&rs) == 0)
4478962cbecSNamhyung Kim process_counters();
448f0fbb114SAndi Kleen
4497aad0c32SJiri Olsa if (STAT_RECORD) {
450bd48c63eSArnaldo Carvalho de Melo if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
4517aad0c32SJiri Olsa pr_err("failed to write stat round event\n");
4527aad0c32SJiri Olsa }
4537aad0c32SJiri Olsa
454b90f1333SAndi Kleen init_stats(&walltime_nsecs_stats);
455ea9eb1f4SJiri Olsa update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
456d4f63a47SJiri Olsa print_counters(&rs, 0, NULL);
45713370a9bSStephane Eranian }
45813370a9bSStephane Eranian
handle_interval(unsigned int interval,int * times)459dece3a4dSAlexey Budankov static bool handle_interval(unsigned int interval, int *times)
460dece3a4dSAlexey Budankov {
461dece3a4dSAlexey Budankov if (interval) {
462dece3a4dSAlexey Budankov process_interval();
463dece3a4dSAlexey Budankov if (interval_count && !(--(*times)))
464dece3a4dSAlexey Budankov return true;
465dece3a4dSAlexey Budankov }
466dece3a4dSAlexey Budankov return false;
467dece3a4dSAlexey Budankov }
468dece3a4dSAlexey Budankov
enable_counters(void)469fa853c4bSSong Liu static int enable_counters(void)
47041191688SAndi Kleen {
471fa853c4bSSong Liu struct evsel *evsel;
472fa853c4bSSong Liu int err;
473fa853c4bSSong Liu
474fa853c4bSSong Liu evlist__for_each_entry(evsel_list, evsel) {
475112cb561SSong Liu if (!evsel__is_bpf(evsel))
476112cb561SSong Liu continue;
477112cb561SSong Liu
478fa853c4bSSong Liu err = bpf_counter__enable(evsel);
479fa853c4bSSong Liu if (err)
480fa853c4bSSong Liu return err;
481fa853c4bSSong Liu }
482fa853c4bSSong Liu
48325f69c69SChangbin Du if (!target__enable_on_exec(&target)) {
484f8b61bd2SSong Liu if (!all_counters_use_bpf)
4851c87f165SJiri Olsa evlist__enable(evsel_list);
4862162b9c6SAlexey Budankov }
487fa853c4bSSong Liu return 0;
48841191688SAndi Kleen }
48941191688SAndi Kleen
disable_counters(void)4903df33effSMark Rutland static void disable_counters(void)
4913df33effSMark Rutland {
492f8b61bd2SSong Liu struct evsel *counter;
493f8b61bd2SSong Liu
4943df33effSMark Rutland /*
4953df33effSMark Rutland * If we don't have tracee (attaching to task or cpu), counters may
4963df33effSMark Rutland * still be running. To get accurate group ratios, we must stop groups
4973df33effSMark Rutland * from counting before reading their constituent counters.
4983df33effSMark Rutland */
499f8b61bd2SSong Liu if (!target__none(&target)) {
500f8b61bd2SSong Liu evlist__for_each_entry(evsel_list, counter)
501f8b61bd2SSong Liu bpf_counter__disable(counter);
502f8b61bd2SSong Liu if (!all_counters_use_bpf)
503e74676deSJiri Olsa evlist__disable(evsel_list);
5043df33effSMark Rutland }
505f8b61bd2SSong Liu }
5063df33effSMark Rutland
50701513fdcSIan Rogers static volatile sig_atomic_t workload_exec_errno;
5086af206fdSArnaldo Carvalho de Melo
5096af206fdSArnaldo Carvalho de Melo /*
5107b392ef0SArnaldo Carvalho de Melo * evlist__prepare_workload will send a SIGUSR1
5116af206fdSArnaldo Carvalho de Melo * if the fork fails, since we asked by setting its
5126af206fdSArnaldo Carvalho de Melo * want_signal to true.
5136af206fdSArnaldo Carvalho de Melo */
workload_exec_failed_signal(int signo __maybe_unused,siginfo_t * info,void * ucontext __maybe_unused)514f33cbe72SArnaldo Carvalho de Melo static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
515f33cbe72SArnaldo Carvalho de Melo void *ucontext __maybe_unused)
5166af206fdSArnaldo Carvalho de Melo {
517f33cbe72SArnaldo Carvalho de Melo workload_exec_errno = info->si_value.sival_int;
5186af206fdSArnaldo Carvalho de Melo }
5196af206fdSArnaldo Carvalho de Melo
evsel__should_store_id(struct evsel * counter)520ddc6999eSArnaldo Carvalho de Melo static bool evsel__should_store_id(struct evsel *counter)
52182bf311eSJiri Olsa {
5221fc632ceSJiri Olsa return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
52382bf311eSJiri Olsa }
52482bf311eSJiri Olsa
is_target_alive(struct target * _target,struct perf_thread_map * threads)525cbb5df7eSJiri Olsa static bool is_target_alive(struct target *_target,
5269749b90eSJiri Olsa struct perf_thread_map *threads)
527cbb5df7eSJiri Olsa {
528cbb5df7eSJiri Olsa struct stat st;
529cbb5df7eSJiri Olsa int i;
530cbb5df7eSJiri Olsa
531cbb5df7eSJiri Olsa if (!target__has_task(_target))
532cbb5df7eSJiri Olsa return true;
533cbb5df7eSJiri Olsa
534cbb5df7eSJiri Olsa for (i = 0; i < threads->nr; i++) {
535cbb5df7eSJiri Olsa char path[PATH_MAX];
536cbb5df7eSJiri Olsa
537cbb5df7eSJiri Olsa scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
538cbb5df7eSJiri Olsa threads->map[i].pid);
539cbb5df7eSJiri Olsa
540cbb5df7eSJiri Olsa if (!stat(path, &st))
541cbb5df7eSJiri Olsa return true;
542cbb5df7eSJiri Olsa }
543cbb5df7eSJiri Olsa
544cbb5df7eSJiri Olsa return false;
545cbb5df7eSJiri Olsa }
546cbb5df7eSJiri Olsa
process_evlist(struct evlist * evlist,unsigned int interval)547bee328cbSAlexey Budankov static void process_evlist(struct evlist *evlist, unsigned int interval)
548bee328cbSAlexey Budankov {
549bee328cbSAlexey Budankov enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
550bee328cbSAlexey Budankov
551bee328cbSAlexey Budankov if (evlist__ctlfd_process(evlist, &cmd) > 0) {
552bee328cbSAlexey Budankov switch (cmd) {
553bee328cbSAlexey Budankov case EVLIST_CTL_CMD_ENABLE:
554f7a858bfSLiam Howlett fallthrough;
555bee328cbSAlexey Budankov case EVLIST_CTL_CMD_DISABLE:
556bee328cbSAlexey Budankov if (interval)
557bee328cbSAlexey Budankov process_interval();
558bee328cbSAlexey Budankov break;
559d20aff15SAdrian Hunter case EVLIST_CTL_CMD_SNAPSHOT:
560bee328cbSAlexey Budankov case EVLIST_CTL_CMD_ACK:
561bee328cbSAlexey Budankov case EVLIST_CTL_CMD_UNSUPPORTED:
562142544a9SJiri Olsa case EVLIST_CTL_CMD_EVLIST:
563f186cd61SJiri Olsa case EVLIST_CTL_CMD_STOP:
56447fddcb4SJiri Olsa case EVLIST_CTL_CMD_PING:
565bee328cbSAlexey Budankov default:
566bee328cbSAlexey Budankov break;
567bee328cbSAlexey Budankov }
568bee328cbSAlexey Budankov }
569bee328cbSAlexey Budankov }
570bee328cbSAlexey Budankov
compute_tts(struct timespec * time_start,struct timespec * time_stop,int * time_to_sleep)571bee328cbSAlexey Budankov static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
572bee328cbSAlexey Budankov int *time_to_sleep)
573bee328cbSAlexey Budankov {
574bee328cbSAlexey Budankov int tts = *time_to_sleep;
575bee328cbSAlexey Budankov struct timespec time_diff;
576bee328cbSAlexey Budankov
577bee328cbSAlexey Budankov diff_timespec(&time_diff, time_stop, time_start);
578bee328cbSAlexey Budankov
579bee328cbSAlexey Budankov tts -= time_diff.tv_sec * MSEC_PER_SEC +
580bee328cbSAlexey Budankov time_diff.tv_nsec / NSEC_PER_MSEC;
581bee328cbSAlexey Budankov
582bee328cbSAlexey Budankov if (tts < 0)
583bee328cbSAlexey Budankov tts = 0;
584bee328cbSAlexey Budankov
585bee328cbSAlexey Budankov *time_to_sleep = tts;
586bee328cbSAlexey Budankov }
587bee328cbSAlexey Budankov
dispatch_events(bool forks,int timeout,int interval,int * times)588bee328cbSAlexey Budankov static int dispatch_events(bool forks, int timeout, int interval, int *times)
589987b8238SAlexey Budankov {
590987b8238SAlexey Budankov int child_exited = 0, status = 0;
591bee328cbSAlexey Budankov int time_to_sleep, sleep_time;
592bee328cbSAlexey Budankov struct timespec time_start, time_stop;
593bee328cbSAlexey Budankov
594bee328cbSAlexey Budankov if (interval)
595bee328cbSAlexey Budankov sleep_time = interval;
596bee328cbSAlexey Budankov else if (timeout)
597bee328cbSAlexey Budankov sleep_time = timeout;
598bee328cbSAlexey Budankov else
599bee328cbSAlexey Budankov sleep_time = 1000;
600bee328cbSAlexey Budankov
601bee328cbSAlexey Budankov time_to_sleep = sleep_time;
602987b8238SAlexey Budankov
603987b8238SAlexey Budankov while (!done) {
604987b8238SAlexey Budankov if (forks)
605987b8238SAlexey Budankov child_exited = waitpid(child_pid, &status, WNOHANG);
606987b8238SAlexey Budankov else
607987b8238SAlexey Budankov child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
608987b8238SAlexey Budankov
609987b8238SAlexey Budankov if (child_exited)
610987b8238SAlexey Budankov break;
611987b8238SAlexey Budankov
612bee328cbSAlexey Budankov clock_gettime(CLOCK_MONOTONIC, &time_start);
613bee328cbSAlexey Budankov if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
614987b8238SAlexey Budankov if (timeout || handle_interval(interval, times))
615987b8238SAlexey Budankov break;
616bee328cbSAlexey Budankov time_to_sleep = sleep_time;
617bee328cbSAlexey Budankov } else { /* fd revent */
618bee328cbSAlexey Budankov process_evlist(evsel_list, interval);
619bee328cbSAlexey Budankov clock_gettime(CLOCK_MONOTONIC, &time_stop);
620bee328cbSAlexey Budankov compute_tts(&time_start, &time_stop, &time_to_sleep);
621bee328cbSAlexey Budankov }
622987b8238SAlexey Budankov }
623987b8238SAlexey Budankov
624987b8238SAlexey Budankov return status;
625987b8238SAlexey Budankov }
626987b8238SAlexey Budankov
627e0e6a6caSAndi Kleen enum counter_recovery {
628e0e6a6caSAndi Kleen COUNTER_SKIP,
629e0e6a6caSAndi Kleen COUNTER_RETRY,
630e0e6a6caSAndi Kleen COUNTER_FATAL,
631e0e6a6caSAndi Kleen };
632e0e6a6caSAndi Kleen
stat_handle_error(struct evsel * counter)633e0e6a6caSAndi Kleen static enum counter_recovery stat_handle_error(struct evsel *counter)
634e0e6a6caSAndi Kleen {
635e0e6a6caSAndi Kleen char msg[BUFSIZ];
636e0e6a6caSAndi Kleen /*
637e0e6a6caSAndi Kleen * PPC returns ENXIO for HW counters until 2.6.37
638e0e6a6caSAndi Kleen * (behavior changed with commit b0a873e).
639e0e6a6caSAndi Kleen */
640e0e6a6caSAndi Kleen if (errno == EINVAL || errno == ENOSYS ||
641e0e6a6caSAndi Kleen errno == ENOENT || errno == EOPNOTSUPP ||
642e0e6a6caSAndi Kleen errno == ENXIO) {
643e0e6a6caSAndi Kleen if (verbose > 0)
644e0e6a6caSAndi Kleen ui__warning("%s event is not supported by the kernel.\n",
6458ab2e96dSArnaldo Carvalho de Melo evsel__name(counter));
646e0e6a6caSAndi Kleen counter->supported = false;
6474804e011SAndi Kleen /*
6484804e011SAndi Kleen * errored is a sticky flag that means one of the counter's
6494804e011SAndi Kleen * cpu event had a problem and needs to be reexamined.
6504804e011SAndi Kleen */
6514804e011SAndi Kleen counter->errored = true;
652e0e6a6caSAndi Kleen
653fba7c866SJiri Olsa if ((evsel__leader(counter) != counter) ||
654fba7c866SJiri Olsa !(counter->core.leader->nr_members > 1))
655e0e6a6caSAndi Kleen return COUNTER_SKIP;
656ae430892SArnaldo Carvalho de Melo } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
657e0e6a6caSAndi Kleen if (verbose > 0)
658e0e6a6caSAndi Kleen ui__warning("%s\n", msg);
659e0e6a6caSAndi Kleen return COUNTER_RETRY;
660e0e6a6caSAndi Kleen } else if (target__has_per_thread(&target) &&
661e0e6a6caSAndi Kleen evsel_list->core.threads &&
662e0e6a6caSAndi Kleen evsel_list->core.threads->err_thread != -1) {
663e0e6a6caSAndi Kleen /*
664e0e6a6caSAndi Kleen * For global --per-thread case, skip current
665e0e6a6caSAndi Kleen * error thread.
666e0e6a6caSAndi Kleen */
667e0e6a6caSAndi Kleen if (!thread_map__remove(evsel_list->core.threads,
668e0e6a6caSAndi Kleen evsel_list->core.threads->err_thread)) {
669e0e6a6caSAndi Kleen evsel_list->core.threads->err_thread = -1;
670e0e6a6caSAndi Kleen return COUNTER_RETRY;
671e0e6a6caSAndi Kleen }
6721b114824SIan Rogers } else if (counter->skippable) {
6731b114824SIan Rogers if (verbose > 0)
6741b114824SIan Rogers ui__warning("skipping event %s that kernel failed to open .\n",
6751b114824SIan Rogers evsel__name(counter));
6761b114824SIan Rogers counter->supported = false;
6771b114824SIan Rogers counter->errored = true;
6781b114824SIan Rogers return COUNTER_SKIP;
679e0e6a6caSAndi Kleen }
680e0e6a6caSAndi Kleen
6812bb72dbbSArnaldo Carvalho de Melo evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
682e0e6a6caSAndi Kleen ui__error("%s\n", msg);
683e0e6a6caSAndi Kleen
684e0e6a6caSAndi Kleen if (child_pid != -1)
685e0e6a6caSAndi Kleen kill(child_pid, SIGTERM);
686e0e6a6caSAndi Kleen return COUNTER_FATAL;
687e0e6a6caSAndi Kleen }
688e0e6a6caSAndi Kleen
__run_perf_stat(int argc,const char ** argv,int run_idx)689e55c14afSJiri Olsa static int __run_perf_stat(int argc, const char **argv, int run_idx)
69086470930SIngo Molnar {
691ec0d3d1fSJiri Olsa int interval = stat_config.interval;
692db06a269Syuzhoujian int times = stat_config.times;
693f1f8ad52Syuzhoujian int timeout = stat_config.timeout;
694d6195a6aSArnaldo Carvalho de Melo char msg[BUFSIZ];
69586470930SIngo Molnar unsigned long long t0, t1;
69632dcd021SJiri Olsa struct evsel *counter;
697410136f5SStephane Eranian size_t l;
69842202dd5SIngo Molnar int status = 0;
6996be2850eSZhang, Yanmin const bool forks = (argc > 0);
7008ceb41d7SJiri Olsa bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
701472832d2SIan Rogers struct evlist_cpu_iterator evlist_cpu_itr;
70249de1795SArnaldo Carvalho de Melo struct affinity saved_affinity, *affinity = NULL;
703472832d2SIan Rogers int err;
7044804e011SAndi Kleen bool second_pass = false;
70586470930SIngo Molnar
706acf28922SNamhyung Kim if (forks) {
7077b392ef0SArnaldo Carvalho de Melo if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
708acf28922SNamhyung Kim perror("failed to prepare workload");
709fceda7feSDavid Ahern return -1;
710051ae7f7SPaul Mackerras }
711d20a47e7SNamhyung Kim child_pid = evsel_list->workload.pid;
71260666c63SLiming Wang }
713051ae7f7SPaul Mackerras
7140df6ade7SIan Rogers if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
715035c6b7aSLevi Yun if (affinity__setup(&saved_affinity) < 0) {
716035c6b7aSLevi Yun err = -1;
717035c6b7aSLevi Yun goto err_out;
718035c6b7aSLevi Yun }
71949de1795SArnaldo Carvalho de Melo affinity = &saved_affinity;
72049de1795SArnaldo Carvalho de Melo }
7215a5dfe4bSAndi Kleen
722fa853c4bSSong Liu evlist__for_each_entry(evsel_list, counter) {
723bf515f02SIan Rogers counter->reset_group = false;
724035c6b7aSLevi Yun if (bpf_counter__load(counter, &target)) {
725035c6b7aSLevi Yun err = -1;
726035c6b7aSLevi Yun goto err_out;
727035c6b7aSLevi Yun }
728ecc68ee2SDmitrii Dolgov if (!(evsel__is_bperf(counter)))
729112cb561SSong Liu all_counters_use_bpf = false;
730fa853c4bSSong Liu }
731fa853c4bSSong Liu
732ed4090a2SNamhyung Kim evlist__reset_aggr_stats(evsel_list);
733ed4090a2SNamhyung Kim
73449de1795SArnaldo Carvalho de Melo evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
735472832d2SIan Rogers counter = evlist_cpu_itr.evsel;
736472832d2SIan Rogers
7377fac83aaSSong Liu /*
7387fac83aaSSong Liu * bperf calls evsel__open_per_cpu() in bperf__load(), so
7397fac83aaSSong Liu * no need to call it again here.
7407fac83aaSSong Liu */
7417fac83aaSSong Liu if (target.use_bpf)
7427fac83aaSSong Liu break;
7434804e011SAndi Kleen
7444804e011SAndi Kleen if (counter->reset_group || counter->errored)
7454804e011SAndi Kleen continue;
746ecc68ee2SDmitrii Dolgov if (evsel__is_bperf(counter))
747112cb561SSong Liu continue;
7484804e011SAndi Kleen try_again:
7494804e011SAndi Kleen if (create_perf_stat_counter(counter, &stat_config, &target,
750472832d2SIan Rogers evlist_cpu_itr.cpu_map_idx) < 0) {
7514804e011SAndi Kleen
7524804e011SAndi Kleen /*
7534804e011SAndi Kleen * Weak group failed. We cannot just undo this here
7544804e011SAndi Kleen * because earlier CPUs might be in group mode, and the kernel
7554804e011SAndi Kleen * doesn't support mixing group and non group reads. Defer
7564804e011SAndi Kleen * it to later.
7574804e011SAndi Kleen * Don't close here because we're in the wrong affinity.
7584804e011SAndi Kleen */
75935c1980eSAndi Kleen if ((errno == EINVAL || errno == EBADF) &&
760fba7c866SJiri Olsa evsel__leader(counter) != counter &&
7615a5dfe4bSAndi Kleen counter->weak_group) {
76264b4778bSArnaldo Carvalho de Melo evlist__reset_weak_group(evsel_list, counter, false);
7634804e011SAndi Kleen assert(counter->reset_group);
7644804e011SAndi Kleen second_pass = true;
7654804e011SAndi Kleen continue;
7665a5dfe4bSAndi Kleen }
7675a5dfe4bSAndi Kleen
768e0e6a6caSAndi Kleen switch (stat_handle_error(counter)) {
769e0e6a6caSAndi Kleen case COUNTER_FATAL:
770035c6b7aSLevi Yun err = -1;
771035c6b7aSLevi Yun goto err_out;
772e0e6a6caSAndi Kleen case COUNTER_RETRY:
773e0e6a6caSAndi Kleen goto try_again;
774e0e6a6caSAndi Kleen case COUNTER_SKIP:
775e0e6a6caSAndi Kleen continue;
776e0e6a6caSAndi Kleen default:
777e0e6a6caSAndi Kleen break;
778e0e6a6caSAndi Kleen }
7794804e011SAndi Kleen
780084ab9f8SArnaldo Carvalho de Melo }
7812cee77c4SDavid Ahern counter->supported = true;
7824804e011SAndi Kleen }
7834804e011SAndi Kleen
7844804e011SAndi Kleen if (second_pass) {
7854804e011SAndi Kleen /*
7864804e011SAndi Kleen * Now redo all the weak group after closing them,
7874804e011SAndi Kleen * and also close errored counters.
7884804e011SAndi Kleen */
7894804e011SAndi Kleen
7904804e011SAndi Kleen /* First close errored or weak retry */
79149de1795SArnaldo Carvalho de Melo evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
792472832d2SIan Rogers counter = evlist_cpu_itr.evsel;
793472832d2SIan Rogers
7944804e011SAndi Kleen if (!counter->reset_group && !counter->errored)
7954804e011SAndi Kleen continue;
796472832d2SIan Rogers
797472832d2SIan Rogers perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
7984804e011SAndi Kleen }
7994804e011SAndi Kleen /* Now reopen weak */
80049de1795SArnaldo Carvalho de Melo evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
801472832d2SIan Rogers counter = evlist_cpu_itr.evsel;
802472832d2SIan Rogers
8034804e011SAndi Kleen if (!counter->reset_group)
8044804e011SAndi Kleen continue;
8054804e011SAndi Kleen try_again_reset:
8068ab2e96dSArnaldo Carvalho de Melo pr_debug2("reopening weak %s\n", evsel__name(counter));
8074804e011SAndi Kleen if (create_perf_stat_counter(counter, &stat_config, &target,
808472832d2SIan Rogers evlist_cpu_itr.cpu_map_idx) < 0) {
8094804e011SAndi Kleen
8104804e011SAndi Kleen switch (stat_handle_error(counter)) {
8114804e011SAndi Kleen case COUNTER_FATAL:
812035c6b7aSLevi Yun err = -1;
813035c6b7aSLevi Yun goto err_out;
8144804e011SAndi Kleen case COUNTER_RETRY:
8154804e011SAndi Kleen goto try_again_reset;
8164804e011SAndi Kleen case COUNTER_SKIP:
8174804e011SAndi Kleen continue;
8184804e011SAndi Kleen default:
8194804e011SAndi Kleen break;
8204804e011SAndi Kleen }
8214804e011SAndi Kleen }
8224804e011SAndi Kleen counter->supported = true;
8234804e011SAndi Kleen }
8244804e011SAndi Kleen }
82549de1795SArnaldo Carvalho de Melo affinity__cleanup(affinity);
826*380bc5a6SIan Rogers affinity = NULL;
8274804e011SAndi Kleen
8284804e011SAndi Kleen evlist__for_each_entry(evsel_list, counter) {
8294804e011SAndi Kleen if (!counter->supported) {
8304804e011SAndi Kleen perf_evsel__free_fd(&counter->core);
8314804e011SAndi Kleen continue;
8324804e011SAndi Kleen }
833410136f5SStephane Eranian
834410136f5SStephane Eranian l = strlen(counter->unit);
835df4f7b4dSJiri Olsa if (l > stat_config.unit_width)
836df4f7b4dSJiri Olsa stat_config.unit_width = l;
8372af4646dSJiri Olsa
838ddc6999eSArnaldo Carvalho de Melo if (evsel__should_store_id(counter) &&
839035c6b7aSLevi Yun evsel__store_ids(counter, evsel_list)) {
840035c6b7aSLevi Yun err = -1;
841035c6b7aSLevi Yun goto err_out;
842035c6b7aSLevi Yun }
84348290609SArnaldo Carvalho de Melo }
84486470930SIngo Molnar
84524bf91a7SArnaldo Carvalho de Melo if (evlist__apply_filters(evsel_list, &counter)) {
84662d94b00SArnaldo Carvalho de Melo pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
8478ab2e96dSArnaldo Carvalho de Melo counter->filter, evsel__name(counter), errno,
848c8b5f2c9SArnaldo Carvalho de Melo str_error_r(errno, msg, sizeof(msg)));
849cfd748aeSFrederic Weisbecker return -1;
850cfd748aeSFrederic Weisbecker }
851cfd748aeSFrederic Weisbecker
8524979d0c7SJiri Olsa if (STAT_RECORD) {
853fa853c4bSSong Liu int fd = perf_data__fd(&perf_stat.data);
8544979d0c7SJiri Olsa
855664c98d4SJiri Olsa if (is_pipe) {
8568ceb41d7SJiri Olsa err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
857664c98d4SJiri Olsa } else {
8584979d0c7SJiri Olsa err = perf_session__write_header(perf_stat.session, evsel_list,
8594979d0c7SJiri Olsa fd, false);
860664c98d4SJiri Olsa }
861664c98d4SJiri Olsa
8624979d0c7SJiri Olsa if (err < 0)
863035c6b7aSLevi Yun goto err_out;
8648b99b1a4SJiri Olsa
865b251892dSArnaldo Carvalho de Melo err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
866c2c247f2SJiri Olsa process_synthesized_event, is_pipe);
8678b99b1a4SJiri Olsa if (err < 0)
868035c6b7aSLevi Yun goto err_out;
869035c6b7aSLevi Yun
8704979d0c7SJiri Olsa }
8714979d0c7SJiri Olsa
87225f69c69SChangbin Du if (target.initial_delay) {
873c587e77eSNamhyung Kim pr_info(EVLIST_DISABLED_MSG);
874c587e77eSNamhyung Kim } else {
875fa853c4bSSong Liu err = enable_counters();
876035c6b7aSLevi Yun if (err) {
877035c6b7aSLevi Yun err = -1;
878035c6b7aSLevi Yun goto err_out;
879035c6b7aSLevi Yun }
880c587e77eSNamhyung Kim }
881bb8bc52eSAdrián Herrera Arcila
882bb8bc52eSAdrián Herrera Arcila /* Exec the command, if any */
883bb8bc52eSAdrián Herrera Arcila if (forks)
884d0a0a511SThomas Richter evlist__start_workload(evsel_list);
885acf28922SNamhyung Kim
88625f69c69SChangbin Du if (target.initial_delay > 0) {
88725f69c69SChangbin Du usleep(target.initial_delay * USEC_PER_MSEC);
888c587e77eSNamhyung Kim err = enable_counters();
889035c6b7aSLevi Yun if (err) {
890035c6b7aSLevi Yun err = -1;
891035c6b7aSLevi Yun goto err_out;
892035c6b7aSLevi Yun }
893c587e77eSNamhyung Kim
894c587e77eSNamhyung Kim pr_info(EVLIST_ENABLED_MSG);
895c587e77eSNamhyung Kim }
896c587e77eSNamhyung Kim
897435b46efSSong Liu t0 = rdclock();
898435b46efSSong Liu clock_gettime(CLOCK_MONOTONIC, &ref_time);
899435b46efSSong Liu
900bb8bc52eSAdrián Herrera Arcila if (forks) {
90127e9769aSAlexey Budankov if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
902bee328cbSAlexey Budankov status = dispatch_events(forks, timeout, interval, ×);
903cfbd41b7SArnaldo Carvalho de Melo if (child_pid != -1) {
904cfbd41b7SArnaldo Carvalho de Melo if (timeout)
905cfbd41b7SArnaldo Carvalho de Melo kill(child_pid, SIGTERM);
9068897a891SJiri Olsa wait4(child_pid, &status, 0, &stat_config.ru_data);
907cfbd41b7SArnaldo Carvalho de Melo }
9086af206fdSArnaldo Carvalho de Melo
909f33cbe72SArnaldo Carvalho de Melo if (workload_exec_errno) {
910c8b5f2c9SArnaldo Carvalho de Melo const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
911f33cbe72SArnaldo Carvalho de Melo pr_err("Workload failed: %s\n", emsg);
912035c6b7aSLevi Yun err = -1;
913035c6b7aSLevi Yun goto err_out;
914f33cbe72SArnaldo Carvalho de Melo }
9156af206fdSArnaldo Carvalho de Melo
91633e49ea7SAndi Kleen if (WIFSIGNALED(status))
91733e49ea7SAndi Kleen psignal(WTERMSIG(status), argv[0]);
91860666c63SLiming Wang } else {
919bee328cbSAlexey Budankov status = dispatch_events(forks, timeout, interval, ×);
92060666c63SLiming Wang }
92186470930SIngo Molnar
9223df33effSMark Rutland disable_counters();
9233df33effSMark Rutland
92486470930SIngo Molnar t1 = rdclock();
92586470930SIngo Molnar
92654ac0b1bSJiri Olsa if (stat_config.walltime_run_table)
92754ac0b1bSJiri Olsa stat_config.walltime_run[run_idx] = t1 - t0;
928e55c14afSJiri Olsa
929ee6a9614SJin Yao if (interval && stat_config.summary) {
930c7e5b328SJin Yao stat_config.interval = 0;
931ee6a9614SJin Yao stat_config.stop_read_counter = true;
932c7e5b328SJin Yao init_stats(&walltime_nsecs_stats);
933c7e5b328SJin Yao update_stats(&walltime_nsecs_stats, t1 - t0);
934c7e5b328SJin Yao
93553f5e908SArnaldo Carvalho de Melo evlist__copy_prev_raw_counts(evsel_list);
93653f5e908SArnaldo Carvalho de Melo evlist__reset_prev_raw_counts(evsel_list);
9378f97963eSNamhyung Kim evlist__reset_aggr_stats(evsel_list);
938c735b0a5SFlorian Fischer } else {
9399e9772c4SPeter Zijlstra update_stats(&walltime_nsecs_stats, t1 - t0);
940c735b0a5SFlorian Fischer update_rusage_stats(&ru_stats, &stat_config.ru_data);
941c735b0a5SFlorian Fischer }
94242202dd5SIngo Molnar
9433df33effSMark Rutland /*
9443df33effSMark Rutland * Closing a group leader splits the group, and as we only disable
9453df33effSMark Rutland * group leaders, results in remaining events becoming enabled. To
9463df33effSMark Rutland * avoid arbitrary skew, we must read all counters before closing any
9473df33effSMark Rutland * group leaders.
9483df33effSMark Rutland */
9498962cbecSNamhyung Kim if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
9508962cbecSNamhyung Kim process_counters();
95108ef3af1SJiri Olsa
95208ef3af1SJiri Olsa /*
95308ef3af1SJiri Olsa * We need to keep evsel_list alive, because it's processed
95408ef3af1SJiri Olsa * later the evsel_list will be closed after.
95508ef3af1SJiri Olsa */
95608ef3af1SJiri Olsa if (!STAT_RECORD)
957750b4edeSJiri Olsa evlist__close(evsel_list);
958c52b12edSArnaldo Carvalho de Melo
95942202dd5SIngo Molnar return WEXITSTATUS(status);
960035c6b7aSLevi Yun
961035c6b7aSLevi Yun err_out:
962035c6b7aSLevi Yun if (forks)
963035c6b7aSLevi Yun evlist__cancel_workload(evsel_list);
964035c6b7aSLevi Yun
965*380bc5a6SIan Rogers affinity__cleanup(affinity);
966035c6b7aSLevi Yun return err;
96742202dd5SIngo Molnar }
96842202dd5SIngo Molnar
run_perf_stat(int argc,const char ** argv,int run_idx)969e55c14afSJiri Olsa static int run_perf_stat(int argc, const char **argv, int run_idx)
9701f16c575SPeter Zijlstra {
9711f16c575SPeter Zijlstra int ret;
9721f16c575SPeter Zijlstra
9731f16c575SPeter Zijlstra if (pre_cmd) {
9741f16c575SPeter Zijlstra ret = system(pre_cmd);
9751f16c575SPeter Zijlstra if (ret)
9761f16c575SPeter Zijlstra return ret;
9771f16c575SPeter Zijlstra }
9781f16c575SPeter Zijlstra
9791f16c575SPeter Zijlstra if (sync_run)
9801f16c575SPeter Zijlstra sync();
9811f16c575SPeter Zijlstra
982e55c14afSJiri Olsa ret = __run_perf_stat(argc, argv, run_idx);
9831f16c575SPeter Zijlstra if (ret)
9841f16c575SPeter Zijlstra return ret;
9851f16c575SPeter Zijlstra
9861f16c575SPeter Zijlstra if (post_cmd) {
9871f16c575SPeter Zijlstra ret = system(post_cmd);
9881f16c575SPeter Zijlstra if (ret)
9891f16c575SPeter Zijlstra return ret;
9901f16c575SPeter Zijlstra }
9911f16c575SPeter Zijlstra
9921f16c575SPeter Zijlstra return ret;
9931f16c575SPeter Zijlstra }
9941f16c575SPeter Zijlstra
print_counters(struct timespec * ts,int argc,const char ** argv)995a5a9eac1SJiri Olsa static void print_counters(struct timespec *ts, int argc, const char **argv)
996a5a9eac1SJiri Olsa {
9970174820aSJiri Olsa /* Do not print anything if we record to the pipe. */
9980174820aSJiri Olsa if (STAT_RECORD && perf_stat.data.is_pipe)
9990174820aSJiri Olsa return;
1000a527c2c1SJames Clark if (quiet)
100155a4de94SAndi Kleen return;
10020174820aSJiri Olsa
100371273724SArnaldo Carvalho de Melo evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
1004a5a9eac1SJiri Olsa }
1005a5a9eac1SJiri Olsa
100601513fdcSIan Rogers static volatile sig_atomic_t signr = -1;
1007f7b7c26eSPeter Zijlstra
skip_signal(int signo)100886470930SIngo Molnar static void skip_signal(int signo)
100986470930SIngo Molnar {
1010ec0d3d1fSJiri Olsa if ((child_pid == -1) || stat_config.interval)
101160666c63SLiming Wang done = 1;
101260666c63SLiming Wang
1013f7b7c26eSPeter Zijlstra signr = signo;
1014d07f0b12SStephane Eranian /*
1015d07f0b12SStephane Eranian * render child_pid harmless
1016d07f0b12SStephane Eranian * won't send SIGTERM to a random
1017d07f0b12SStephane Eranian * process in case of race condition
1018d07f0b12SStephane Eranian * and fast PID recycling
1019d07f0b12SStephane Eranian */
1020d07f0b12SStephane Eranian child_pid = -1;
1021f7b7c26eSPeter Zijlstra }
1022f7b7c26eSPeter Zijlstra
sig_atexit(void)1023f7b7c26eSPeter Zijlstra static void sig_atexit(void)
1024f7b7c26eSPeter Zijlstra {
1025d07f0b12SStephane Eranian sigset_t set, oset;
1026d07f0b12SStephane Eranian
1027d07f0b12SStephane Eranian /*
1028d07f0b12SStephane Eranian * avoid race condition with SIGCHLD handler
1029d07f0b12SStephane Eranian * in skip_signal() which is modifying child_pid
1030d07f0b12SStephane Eranian * goal is to avoid send SIGTERM to a random
1031d07f0b12SStephane Eranian * process
1032d07f0b12SStephane Eranian */
1033d07f0b12SStephane Eranian sigemptyset(&set);
1034d07f0b12SStephane Eranian sigaddset(&set, SIGCHLD);
1035d07f0b12SStephane Eranian sigprocmask(SIG_BLOCK, &set, &oset);
1036d07f0b12SStephane Eranian
1037933da83aSChris Wilson if (child_pid != -1)
1038933da83aSChris Wilson kill(child_pid, SIGTERM);
1039933da83aSChris Wilson
1040d07f0b12SStephane Eranian sigprocmask(SIG_SETMASK, &oset, NULL);
1041d07f0b12SStephane Eranian
1042f7b7c26eSPeter Zijlstra if (signr == -1)
1043f7b7c26eSPeter Zijlstra return;
1044f7b7c26eSPeter Zijlstra
1045f7b7c26eSPeter Zijlstra signal(signr, SIG_DFL);
1046f7b7c26eSPeter Zijlstra kill(getpid(), signr);
104786470930SIngo Molnar }
104886470930SIngo Molnar
perf_stat__set_big_num(int set)1049d778a778SPaul A. Clarke void perf_stat__set_big_num(int set)
1050d778a778SPaul A. Clarke {
1051d778a778SPaul A. Clarke stat_config.big_num = (set != 0);
1052d778a778SPaul A. Clarke }
1053d778a778SPaul A. Clarke
perf_stat__set_no_csv_summary(int set)10540bdad978SJin Yao void perf_stat__set_no_csv_summary(int set)
10550bdad978SJin Yao {
10560bdad978SJin Yao stat_config.no_csv_summary = (set != 0);
10570bdad978SJin Yao }
10580bdad978SJin Yao
stat__set_big_num(const struct option * opt __maybe_unused,const char * s __maybe_unused,int unset)10591d037ca1SIrina Tirdea static int stat__set_big_num(const struct option *opt __maybe_unused,
10601d037ca1SIrina Tirdea const char *s __maybe_unused, int unset)
1061d7470b6aSStephane Eranian {
1062d7470b6aSStephane Eranian big_num_opt = unset ? 0 : 1;
1063d778a778SPaul A. Clarke perf_stat__set_big_num(!unset);
1064d7470b6aSStephane Eranian return 0;
1065d7470b6aSStephane Eranian }
1066d7470b6aSStephane Eranian
enable_metric_only(const struct option * opt __maybe_unused,const char * s __maybe_unused,int unset)106744b1e60aSAndi Kleen static int enable_metric_only(const struct option *opt __maybe_unused,
106844b1e60aSAndi Kleen const char *s __maybe_unused, int unset)
106944b1e60aSAndi Kleen {
107044b1e60aSAndi Kleen force_metric_only = true;
10710ce5aa02SJiri Olsa stat_config.metric_only = !unset;
107244b1e60aSAndi Kleen return 0;
107344b1e60aSAndi Kleen }
107444b1e60aSAndi Kleen
append_metric_groups(const struct option * opt __maybe_unused,const char * str,int unset __maybe_unused)1075a4b8cfcaSIan Rogers static int append_metric_groups(const struct option *opt __maybe_unused,
1076b18f3e36SAndi Kleen const char *str,
1077b18f3e36SAndi Kleen int unset __maybe_unused)
1078b18f3e36SAndi Kleen {
1079a4b8cfcaSIan Rogers if (metrics) {
1080a4b8cfcaSIan Rogers char *tmp;
1081a4b8cfcaSIan Rogers
1082a4b8cfcaSIan Rogers if (asprintf(&tmp, "%s,%s", metrics, str) < 0)
1083a4b8cfcaSIan Rogers return -ENOMEM;
1084a4b8cfcaSIan Rogers free(metrics);
1085a4b8cfcaSIan Rogers metrics = tmp;
1086a4b8cfcaSIan Rogers } else {
1087a4b8cfcaSIan Rogers metrics = strdup(str);
1088a4b8cfcaSIan Rogers if (!metrics)
1089a4b8cfcaSIan Rogers return -ENOMEM;
1090a4b8cfcaSIan Rogers }
1091a4b8cfcaSIan Rogers return 0;
1092b18f3e36SAndi Kleen }
1093b18f3e36SAndi Kleen
parse_control_option(const struct option * opt,const char * str,int unset __maybe_unused)109427e9769aSAlexey Budankov static int parse_control_option(const struct option *opt,
109527e9769aSAlexey Budankov const char *str,
109627e9769aSAlexey Budankov int unset __maybe_unused)
109727e9769aSAlexey Budankov {
10989864a66dSAdrian Hunter struct perf_stat_config *config = opt->value;
109927e9769aSAlexey Budankov
1100a8fcbd26SAdrian Hunter return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close);
1101a8fcbd26SAdrian Hunter }
1102a8fcbd26SAdrian Hunter
parse_stat_cgroups(const struct option * opt,const char * str,int unset)1103d1c5a0e8SNamhyung Kim static int parse_stat_cgroups(const struct option *opt,
1104d1c5a0e8SNamhyung Kim const char *str, int unset)
1105d1c5a0e8SNamhyung Kim {
1106d1c5a0e8SNamhyung Kim if (stat_config.cgroup_list) {
1107d1c5a0e8SNamhyung Kim pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
1108d1c5a0e8SNamhyung Kim return -1;
1109d1c5a0e8SNamhyung Kim }
1110d1c5a0e8SNamhyung Kim
1111d1c5a0e8SNamhyung Kim return parse_cgroups(opt, str, unset);
1112d1c5a0e8SNamhyung Kim }
1113d1c5a0e8SNamhyung Kim
parse_cputype(const struct option * opt,const char * str,int unset __maybe_unused)1114003be8c4SIan Rogers static int parse_cputype(const struct option *opt,
1115e69dc842SJin Yao const char *str,
1116e69dc842SJin Yao int unset __maybe_unused)
1117e69dc842SJin Yao {
1118003be8c4SIan Rogers const struct perf_pmu *pmu;
1119e69dc842SJin Yao struct evlist *evlist = *(struct evlist **)opt->value;
1120e69dc842SJin Yao
1121e69dc842SJin Yao if (!list_empty(&evlist->core.entries)) {
1122e69dc842SJin Yao fprintf(stderr, "Must define cputype before events/metrics\n");
1123e69dc842SJin Yao return -1;
1124e69dc842SJin Yao }
1125e69dc842SJin Yao
1126003be8c4SIan Rogers pmu = perf_pmus__pmu_for_pmu_filter(str);
1127003be8c4SIan Rogers if (!pmu) {
1128e69dc842SJin Yao fprintf(stderr, "--cputype %s is not supported!\n", str);
1129e69dc842SJin Yao return -1;
1130e69dc842SJin Yao }
1131003be8c4SIan Rogers parse_events_option_args.pmu_filter = pmu->name;
1132e69dc842SJin Yao
1133e69dc842SJin Yao return 0;
1134e69dc842SJin Yao }
1135e69dc842SJin Yao
parse_cache_level(const struct option * opt,const char * str,int unset __maybe_unused)1136aab667caSK Prateek Nayak static int parse_cache_level(const struct option *opt,
1137aab667caSK Prateek Nayak const char *str,
1138aab667caSK Prateek Nayak int unset __maybe_unused)
1139aab667caSK Prateek Nayak {
1140aab667caSK Prateek Nayak int level;
1141aab667caSK Prateek Nayak u32 *aggr_mode = (u32 *)opt->value;
1142aab667caSK Prateek Nayak u32 *aggr_level = (u32 *)opt->data;
1143aab667caSK Prateek Nayak
1144aab667caSK Prateek Nayak /*
1145aab667caSK Prateek Nayak * If no string is specified, aggregate based on the topology of
1146aab667caSK Prateek Nayak * Last Level Cache (LLC). Since the LLC level can change from
1147aab667caSK Prateek Nayak * architecture to architecture, set level greater than
1148aab667caSK Prateek Nayak * MAX_CACHE_LVL which will be interpreted as LLC.
1149aab667caSK Prateek Nayak */
1150aab667caSK Prateek Nayak if (str == NULL) {
1151aab667caSK Prateek Nayak level = MAX_CACHE_LVL + 1;
1152aab667caSK Prateek Nayak goto out;
1153aab667caSK Prateek Nayak }
1154aab667caSK Prateek Nayak
1155aab667caSK Prateek Nayak /*
1156aab667caSK Prateek Nayak * The format to specify cache level is LX or lX where X is the
1157aab667caSK Prateek Nayak * cache level.
1158aab667caSK Prateek Nayak */
1159aab667caSK Prateek Nayak if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) {
1160aab667caSK Prateek Nayak pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n",
1161aab667caSK Prateek Nayak MAX_CACHE_LVL,
1162aab667caSK Prateek Nayak MAX_CACHE_LVL);
1163aab667caSK Prateek Nayak return -EINVAL;
1164aab667caSK Prateek Nayak }
1165aab667caSK Prateek Nayak
1166aab667caSK Prateek Nayak level = atoi(&str[1]);
1167aab667caSK Prateek Nayak if (level < 1) {
1168aab667caSK Prateek Nayak pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n",
1169aab667caSK Prateek Nayak MAX_CACHE_LVL,
1170aab667caSK Prateek Nayak MAX_CACHE_LVL);
1171aab667caSK Prateek Nayak return -EINVAL;
1172aab667caSK Prateek Nayak }
1173aab667caSK Prateek Nayak
1174aab667caSK Prateek Nayak if (level > MAX_CACHE_LVL) {
1175aab667caSK Prateek Nayak pr_err("perf only supports max cache level of %d.\n"
1176aab667caSK Prateek Nayak "Consider increasing MAX_CACHE_LVL\n", MAX_CACHE_LVL);
1177aab667caSK Prateek Nayak return -EINVAL;
1178aab667caSK Prateek Nayak }
1179aab667caSK Prateek Nayak out:
1180aab667caSK Prateek Nayak *aggr_mode = AGGR_CACHE;
1181aab667caSK Prateek Nayak *aggr_level = level;
1182aab667caSK Prateek Nayak return 0;
1183aab667caSK Prateek Nayak }
1184aab667caSK Prateek Nayak
118551433eadSMichael Petlan static struct option stat_options[] = {
1186e0547311SJiri Olsa OPT_BOOLEAN('T', "transaction", &transaction_run,
1187e0547311SJiri Olsa "hardware transaction statistics"),
1188411ad22eSIan Rogers OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
1189e0547311SJiri Olsa "event selector. use 'perf list' to list available events",
1190e0547311SJiri Olsa parse_events_option),
1191e0547311SJiri Olsa OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1192e0547311SJiri Olsa "event filter", parse_filter),
11935698f26bSJiri Olsa OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
1194e0547311SJiri Olsa "child tasks do not inherit counters"),
1195e0547311SJiri Olsa OPT_STRING('p', "pid", &target.pid, "pid",
1196e0547311SJiri Olsa "stat events on existing process id"),
1197e0547311SJiri Olsa OPT_STRING('t', "tid", &target.tid, "tid",
1198e0547311SJiri Olsa "stat events on existing thread id"),
1199fa853c4bSSong Liu #ifdef HAVE_BPF_SKEL
1200fa853c4bSSong Liu OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
1201fa853c4bSSong Liu "stat events on existing bpf program id"),
12027fac83aaSSong Liu OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
12037fac83aaSSong Liu "use bpf program to count events"),
12047fac83aaSSong Liu OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
12057fac83aaSSong Liu "path to perf_event_attr map"),
1206fa853c4bSSong Liu #endif
1207e0547311SJiri Olsa OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1208e0547311SJiri Olsa "system-wide collection from all CPUs"),
120975998bb2SAndi Kleen OPT_BOOLEAN(0, "scale", &stat_config.scale,
121075998bb2SAndi Kleen "Use --no-scale to disable counter scaling for multiplexing"),
1211e0547311SJiri Olsa OPT_INCR('v', "verbose", &verbose,
1212e0547311SJiri Olsa "be more verbose (show counter open errors, etc)"),
1213d97ae04bSJiri Olsa OPT_INTEGER('r', "repeat", &stat_config.run_count,
1214e0547311SJiri Olsa "repeat command and print average + stddev (max: 100, forever: 0)"),
121554ac0b1bSJiri Olsa OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
1216e55c14afSJiri Olsa "display details about each run (only with -r option)"),
1217aea0dca1SJiri Olsa OPT_BOOLEAN('n', "null", &stat_config.null_run,
1218e0547311SJiri Olsa "null run - dont start any counters"),
1219e0547311SJiri Olsa OPT_INCR('d', "detailed", &detailed_run,
1220e0547311SJiri Olsa "detailed run - start a lot of events"),
1221e0547311SJiri Olsa OPT_BOOLEAN('S', "sync", &sync_run,
1222e0547311SJiri Olsa "call sync() before starting a run"),
1223e0547311SJiri Olsa OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1224e0547311SJiri Olsa "print large numbers with thousands\' separators",
1225e0547311SJiri Olsa stat__set_big_num),
1226e0547311SJiri Olsa OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1227e0547311SJiri Olsa "list of cpus to monitor in system-wide"),
1228e0547311SJiri Olsa OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1229e0547311SJiri Olsa "disable CPU count aggregation", AGGR_NONE),
1230fdee335bSJiri Olsa OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
12312c8e6451SZhengjun Xing OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
12322c8e6451SZhengjun Xing "Merge identical named hybrid events"),
1233fa7070a3SJiri Olsa OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
1234e0547311SJiri Olsa "print counts with custom separator"),
1235df936cadSClaire Jensen OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
1236df936cadSClaire Jensen "print counts in JSON format"),
1237e0547311SJiri Olsa OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1238d1c5a0e8SNamhyung Kim "monitor event in cgroup name only", parse_stat_cgroups),
1239d1c5a0e8SNamhyung Kim OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
1240d1c5a0e8SNamhyung Kim "expand events for each cgroup"),
1241e0547311SJiri Olsa OPT_STRING('o', "output", &output_name, "file", "output file name"),
1242e0547311SJiri Olsa OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1243e0547311SJiri Olsa OPT_INTEGER(0, "log-fd", &output_fd,
1244e0547311SJiri Olsa "log output to fd, instead of stderr"),
1245e0547311SJiri Olsa OPT_STRING(0, "pre", &pre_cmd, "command",
1246e0547311SJiri Olsa "command to run prior to the measured command"),
1247e0547311SJiri Olsa OPT_STRING(0, "post", &post_cmd, "command",
1248e0547311SJiri Olsa "command to run after to the measured command"),
1249e0547311SJiri Olsa OPT_UINTEGER('I', "interval-print", &stat_config.interval,
12509dc9a95fSAlexey Budankov "print counts at regular interval in ms "
12519dc9a95fSAlexey Budankov "(overhead is possible for values <= 100ms)"),
1252db06a269Syuzhoujian OPT_INTEGER(0, "interval-count", &stat_config.times,
1253db06a269Syuzhoujian "print counts for fixed number of times"),
1254132c6ba3SJiri Olsa OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
12559660e08eSJiri Olsa "clear screen in between new interval"),
1256f1f8ad52Syuzhoujian OPT_UINTEGER(0, "timeout", &stat_config.timeout,
1257f1f8ad52Syuzhoujian "stop workload and print counts after a timeout period in ms (>= 10ms)"),
1258e0547311SJiri Olsa OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1259e0547311SJiri Olsa "aggregate counts per processor socket", AGGR_SOCKET),
1260db5742b6SKan Liang OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
1261db5742b6SKan Liang "aggregate counts per processor die", AGGR_DIE),
1262aab667caSK Prateek Nayak OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level,
1263aab667caSK Prateek Nayak "cache level", "aggregate count at this cache level (Default: LLC)",
1264aab667caSK Prateek Nayak parse_cache_level),
1265e0547311SJiri Olsa OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1266e0547311SJiri Olsa "aggregate counts per physical processor core", AGGR_CORE),
1267e0547311SJiri Olsa OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1268e0547311SJiri Olsa "aggregate counts per thread", AGGR_THREAD),
126986895b48SJiri Olsa OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
127086895b48SJiri Olsa "aggregate counts per numa node", AGGR_NODE),
127125f69c69SChangbin Du OPT_INTEGER('D', "delay", &target.initial_delay,
12722162b9c6SAlexey Budankov "ms to wait before starting measurement after program start (-1: start with events disabled)"),
12730ce5aa02SJiri Olsa OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
127444b1e60aSAndi Kleen "Only print computed metrics. No raw values", enable_metric_only),
127505530a79SIan Rogers OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
127605530a79SIan Rogers "don't group metric events, impacts multiplexing"),
127705530a79SIan Rogers OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
127805530a79SIan Rogers "don't try to share events between metrics in a group"),
12791fd09e29SIan Rogers OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
12801fd09e29SIan Rogers "don't try to share events between metrics in a group "),
128144b1e60aSAndi Kleen OPT_BOOLEAN(0, "topdown", &topdown_run,
128263e39aa6SKan Liang "measure top-down statistics"),
128363e39aa6SKan Liang OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
128463e39aa6SKan Liang "Set the metrics level for the top-down statistics (0: max level)"),
1285daefd0bcSKan Liang OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1286daefd0bcSKan Liang "measure SMI cost"),
1287b18f3e36SAndi Kleen OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1288b18f3e36SAndi Kleen "monitor specified metrics or metric groups (separated by ,)",
1289a4b8cfcaSIan Rogers append_metric_groups),
1290dd071024SJin Yao OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
1291dd071024SJin Yao "Configure all used events to run in kernel space.",
1292dd071024SJin Yao PARSE_OPT_EXCLUSIVE),
1293dd071024SJin Yao OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
1294dd071024SJin Yao "Configure all used events to run in user space.",
1295dd071024SJin Yao PARSE_OPT_EXCLUSIVE),
12961af62ce6SJin Yao OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
12971af62ce6SJin Yao "Use with 'percore' event qualifier to show the event "
12981af62ce6SJin Yao "counts of one hardware thread by sum up total hardware "
12991af62ce6SJin Yao "threads of same physical core"),
1300ee6a9614SJin Yao OPT_BOOLEAN(0, "summary", &stat_config.summary,
1301ee6a9614SJin Yao "print summary for interval mode"),
13020bdad978SJin Yao OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
13030bdad978SJin Yao "don't print 'summary' for CSV summary output"),
1304a527c2c1SJames Clark OPT_BOOLEAN(0, "quiet", &quiet,
1305a527c2c1SJames Clark "don't print any output, messages or warnings (useful with record)"),
1306e69dc842SJin Yao OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
1307e69dc842SJin Yao "Only enable events on applying cpu with this type "
1308e69dc842SJin Yao "for hybrid platform (e.g. core or atom)",
1309003be8c4SIan Rogers parse_cputype),
131070943490SStephane Eranian #ifdef HAVE_LIBPFM
131170943490SStephane Eranian OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
131270943490SStephane Eranian "libpfm4 event selector. use 'perf list' to list available events",
131370943490SStephane Eranian parse_libpfm_events_option),
131470943490SStephane Eranian #endif
1315a8fcbd26SAdrian Hunter OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
131627e9769aSAlexey Budankov "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
1317a8fcbd26SAdrian Hunter "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
1318a8fcbd26SAdrian Hunter "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
131927e9769aSAlexey Budankov parse_control_option),
1320f07952b1SAlexander Antonov OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
1321f07952b1SAlexander Antonov "measure I/O performance metrics provided by arch/platform",
1322f07952b1SAlexander Antonov iostat_parse),
1323e0547311SJiri Olsa OPT_END()
1324e0547311SJiri Olsa };
1325e0547311SJiri Olsa
1326995ed074SK Prateek Nayak /**
1327995ed074SK Prateek Nayak * Calculate the cache instance ID from the map in
1328995ed074SK Prateek Nayak * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list
1329995ed074SK Prateek Nayak * Cache instance ID is the first CPU reported in the shared_cpu_list file.
1330995ed074SK Prateek Nayak */
cpu__get_cache_id_from_map(struct perf_cpu cpu,char * map)1331995ed074SK Prateek Nayak static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
1332995ed074SK Prateek Nayak {
1333995ed074SK Prateek Nayak int id;
1334995ed074SK Prateek Nayak struct perf_cpu_map *cpu_map = perf_cpu_map__new(map);
1335995ed074SK Prateek Nayak
1336995ed074SK Prateek Nayak /*
1337995ed074SK Prateek Nayak * If the map contains no CPU, consider the current CPU to
1338995ed074SK Prateek Nayak * be the first online CPU in the cache domain else use the
1339995ed074SK Prateek Nayak * first online CPU of the cache domain as the ID.
1340995ed074SK Prateek Nayak */
1341995ed074SK Prateek Nayak if (perf_cpu_map__empty(cpu_map))
1342995ed074SK Prateek Nayak id = cpu.cpu;
1343995ed074SK Prateek Nayak else
1344995ed074SK Prateek Nayak id = perf_cpu_map__cpu(cpu_map, 0).cpu;
1345995ed074SK Prateek Nayak
1346995ed074SK Prateek Nayak /* Free the perf_cpu_map used to find the cache ID */
1347995ed074SK Prateek Nayak perf_cpu_map__put(cpu_map);
1348995ed074SK Prateek Nayak
1349995ed074SK Prateek Nayak return id;
1350995ed074SK Prateek Nayak }
1351995ed074SK Prateek Nayak
1352995ed074SK Prateek Nayak /**
1353995ed074SK Prateek Nayak * cpu__get_cache_id - Returns 0 if successful in populating the
1354995ed074SK Prateek Nayak * cache level and cache id. Cache level is read from
1355995ed074SK Prateek Nayak * /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID
1356995ed074SK Prateek Nayak * is the first CPU reported by
1357995ed074SK Prateek Nayak * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list
1358995ed074SK Prateek Nayak */
cpu__get_cache_details(struct perf_cpu cpu,struct perf_cache * cache)1359995ed074SK Prateek Nayak static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache)
1360995ed074SK Prateek Nayak {
1361995ed074SK Prateek Nayak int ret = 0;
1362995ed074SK Prateek Nayak u32 cache_level = stat_config.aggr_level;
1363995ed074SK Prateek Nayak struct cpu_cache_level caches[MAX_CACHE_LVL];
1364995ed074SK Prateek Nayak u32 i = 0, caches_cnt = 0;
1365995ed074SK Prateek Nayak
1366995ed074SK Prateek Nayak cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level;
1367995ed074SK Prateek Nayak cache->cache = -1;
1368995ed074SK Prateek Nayak
1369995ed074SK Prateek Nayak ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt);
1370995ed074SK Prateek Nayak if (ret) {
1371995ed074SK Prateek Nayak /*
1372995ed074SK Prateek Nayak * If caches_cnt is not 0, cpu_cache_level data
1373995ed074SK Prateek Nayak * was allocated when building the topology.
1374995ed074SK Prateek Nayak * Free the allocated data before returning.
1375995ed074SK Prateek Nayak */
1376995ed074SK Prateek Nayak if (caches_cnt)
1377995ed074SK Prateek Nayak goto free_caches;
1378995ed074SK Prateek Nayak
1379995ed074SK Prateek Nayak return ret;
1380995ed074SK Prateek Nayak }
1381995ed074SK Prateek Nayak
1382995ed074SK Prateek Nayak if (!caches_cnt)
1383995ed074SK Prateek Nayak return -1;
1384995ed074SK Prateek Nayak
1385995ed074SK Prateek Nayak /*
1386995ed074SK Prateek Nayak * Save the data for the highest level if no
1387995ed074SK Prateek Nayak * level was specified by the user.
1388995ed074SK Prateek Nayak */
1389995ed074SK Prateek Nayak if (cache_level > MAX_CACHE_LVL) {
1390995ed074SK Prateek Nayak int max_level_index = 0;
1391995ed074SK Prateek Nayak
1392995ed074SK Prateek Nayak for (i = 1; i < caches_cnt; ++i) {
1393995ed074SK Prateek Nayak if (caches[i].level > caches[max_level_index].level)
1394995ed074SK Prateek Nayak max_level_index = i;
1395995ed074SK Prateek Nayak }
1396995ed074SK Prateek Nayak
1397995ed074SK Prateek Nayak cache->cache_lvl = caches[max_level_index].level;
1398995ed074SK Prateek Nayak cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map);
1399995ed074SK Prateek Nayak
1400995ed074SK Prateek Nayak /* Reset i to 0 to free entire caches[] */
1401995ed074SK Prateek Nayak i = 0;
1402995ed074SK Prateek Nayak goto free_caches;
1403995ed074SK Prateek Nayak }
1404995ed074SK Prateek Nayak
1405995ed074SK Prateek Nayak for (i = 0; i < caches_cnt; ++i) {
1406995ed074SK Prateek Nayak if (caches[i].level == cache_level) {
1407995ed074SK Prateek Nayak cache->cache_lvl = cache_level;
1408995ed074SK Prateek Nayak cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
1409995ed074SK Prateek Nayak }
1410995ed074SK Prateek Nayak
1411995ed074SK Prateek Nayak cpu_cache_level__free(&caches[i]);
1412995ed074SK Prateek Nayak }
1413995ed074SK Prateek Nayak
1414995ed074SK Prateek Nayak free_caches:
1415995ed074SK Prateek Nayak /*
1416995ed074SK Prateek Nayak * Free all the allocated cpu_cache_level data.
1417995ed074SK Prateek Nayak */
1418995ed074SK Prateek Nayak while (i < caches_cnt)
1419995ed074SK Prateek Nayak cpu_cache_level__free(&caches[i++]);
1420995ed074SK Prateek Nayak
1421995ed074SK Prateek Nayak return ret;
1422995ed074SK Prateek Nayak }
1423995ed074SK Prateek Nayak
1424995ed074SK Prateek Nayak /**
1425995ed074SK Prateek Nayak * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache
1426995ed074SK Prateek Nayak * level, die and socket populated with the cache instache ID, cache level,
1427995ed074SK Prateek Nayak * die and socket for cpu. The function signature is compatible with
1428995ed074SK Prateek Nayak * aggr_cpu_id_get_t.
1429995ed074SK Prateek Nayak */
aggr_cpu_id__cache(struct perf_cpu cpu,void * data)1430995ed074SK Prateek Nayak static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data)
1431995ed074SK Prateek Nayak {
1432995ed074SK Prateek Nayak int ret;
1433995ed074SK Prateek Nayak struct aggr_cpu_id id;
1434995ed074SK Prateek Nayak struct perf_cache cache;
1435995ed074SK Prateek Nayak
1436995ed074SK Prateek Nayak id = aggr_cpu_id__die(cpu, data);
1437995ed074SK Prateek Nayak if (aggr_cpu_id__is_empty(&id))
1438995ed074SK Prateek Nayak return id;
1439995ed074SK Prateek Nayak
1440995ed074SK Prateek Nayak ret = cpu__get_cache_details(cpu, &cache);
1441995ed074SK Prateek Nayak if (ret)
1442995ed074SK Prateek Nayak return id;
1443995ed074SK Prateek Nayak
1444995ed074SK Prateek Nayak id.cache_lvl = cache.cache_lvl;
1445995ed074SK Prateek Nayak id.cache = cache.cache;
1446995ed074SK Prateek Nayak return id;
1447995ed074SK Prateek Nayak }
1448995ed074SK Prateek Nayak
14495f50e15cSIan Rogers static const char *const aggr_mode__string[] = {
14505f50e15cSIan Rogers [AGGR_CORE] = "core",
1451995ed074SK Prateek Nayak [AGGR_CACHE] = "cache",
14525f50e15cSIan Rogers [AGGR_DIE] = "die",
14535f50e15cSIan Rogers [AGGR_GLOBAL] = "global",
14545f50e15cSIan Rogers [AGGR_NODE] = "node",
14555f50e15cSIan Rogers [AGGR_NONE] = "none",
14565f50e15cSIan Rogers [AGGR_SOCKET] = "socket",
14575f50e15cSIan Rogers [AGGR_THREAD] = "thread",
14585f50e15cSIan Rogers [AGGR_UNSET] = "unset",
14595f50e15cSIan Rogers };
14605f50e15cSIan Rogers
perf_stat__get_socket(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14612760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
14626d18804bSIan Rogers struct perf_cpu cpu)
14631fe7a300SJiri Olsa {
1464973aeb3cSIan Rogers return aggr_cpu_id__socket(cpu, /*data=*/NULL);
14651fe7a300SJiri Olsa }
14661fe7a300SJiri Olsa
perf_stat__get_die(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14672760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
14686d18804bSIan Rogers struct perf_cpu cpu)
1469db5742b6SKan Liang {
1470973aeb3cSIan Rogers return aggr_cpu_id__die(cpu, /*data=*/NULL);
1471db5742b6SKan Liang }
1472db5742b6SKan Liang
perf_stat__get_cache_id(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1473995ed074SK Prateek Nayak static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused,
1474995ed074SK Prateek Nayak struct perf_cpu cpu)
1475995ed074SK Prateek Nayak {
1476995ed074SK Prateek Nayak return aggr_cpu_id__cache(cpu, /*data=*/NULL);
1477995ed074SK Prateek Nayak }
1478995ed074SK Prateek Nayak
perf_stat__get_core(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14792760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
14806d18804bSIan Rogers struct perf_cpu cpu)
14811fe7a300SJiri Olsa {
1482973aeb3cSIan Rogers return aggr_cpu_id__core(cpu, /*data=*/NULL);
14831fe7a300SJiri Olsa }
14841fe7a300SJiri Olsa
perf_stat__get_node(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14852760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
14866d18804bSIan Rogers struct perf_cpu cpu)
148786895b48SJiri Olsa {
1488973aeb3cSIan Rogers return aggr_cpu_id__node(cpu, /*data=*/NULL);
148986895b48SJiri Olsa }
149086895b48SJiri Olsa
perf_stat__get_global(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1491375369abSNamhyung Kim static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused,
1492375369abSNamhyung Kim struct perf_cpu cpu)
1493375369abSNamhyung Kim {
1494375369abSNamhyung Kim return aggr_cpu_id__global(cpu, /*data=*/NULL);
1495375369abSNamhyung Kim }
1496375369abSNamhyung Kim
perf_stat__get_cpu(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)14978938cfa7SNamhyung Kim static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused,
14988938cfa7SNamhyung Kim struct perf_cpu cpu)
14998938cfa7SNamhyung Kim {
15008938cfa7SNamhyung Kim return aggr_cpu_id__cpu(cpu, /*data=*/NULL);
15018938cfa7SNamhyung Kim }
15028938cfa7SNamhyung Kim
perf_stat__get_aggr(struct perf_stat_config * config,aggr_get_id_t get_id,struct perf_cpu cpu)15032760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config,
15046d18804bSIan Rogers aggr_get_id_t get_id, struct perf_cpu cpu)
15051e5a2931SJiri Olsa {
1506ae7e6492SNamhyung Kim struct aggr_cpu_id id;
1507ae7e6492SNamhyung Kim
1508ae7e6492SNamhyung Kim /* per-process mode - should use global aggr mode */
1509ae7e6492SNamhyung Kim if (cpu.cpu == -1)
1510ae7e6492SNamhyung Kim return get_id(config, cpu);
15111e5a2931SJiri Olsa
15126d18804bSIan Rogers if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
15136d18804bSIan Rogers config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
15141e5a2931SJiri Olsa
15156d18804bSIan Rogers id = config->cpus_aggr_map->map[cpu.cpu];
15162760f5a1SJames Clark return id;
15171e5a2931SJiri Olsa }
15181e5a2931SJiri Olsa
perf_stat__get_socket_cached(struct perf_stat_config * config,struct perf_cpu cpu)15192760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config,
15206d18804bSIan Rogers struct perf_cpu cpu)
15211e5a2931SJiri Olsa {
152288031a0dSIan Rogers return perf_stat__get_aggr(config, perf_stat__get_socket, cpu);
15231e5a2931SJiri Olsa }
15241e5a2931SJiri Olsa
perf_stat__get_die_cached(struct perf_stat_config * config,struct perf_cpu cpu)15252760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config,
15266d18804bSIan Rogers struct perf_cpu cpu)
1527db5742b6SKan Liang {
152888031a0dSIan Rogers return perf_stat__get_aggr(config, perf_stat__get_die, cpu);
1529db5742b6SKan Liang }
1530db5742b6SKan Liang
perf_stat__get_cache_id_cached(struct perf_stat_config * config,struct perf_cpu cpu)1531995ed074SK Prateek Nayak static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config,
1532995ed074SK Prateek Nayak struct perf_cpu cpu)
1533995ed074SK Prateek Nayak {
1534995ed074SK Prateek Nayak return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu);
1535995ed074SK Prateek Nayak }
1536995ed074SK Prateek Nayak
perf_stat__get_core_cached(struct perf_stat_config * config,struct perf_cpu cpu)15372760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config,
15386d18804bSIan Rogers struct perf_cpu cpu)
15391e5a2931SJiri Olsa {
154088031a0dSIan Rogers return perf_stat__get_aggr(config, perf_stat__get_core, cpu);
15411e5a2931SJiri Olsa }
15421e5a2931SJiri Olsa
perf_stat__get_node_cached(struct perf_stat_config * config,struct perf_cpu cpu)15432760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config,
15446d18804bSIan Rogers struct perf_cpu cpu)
154586895b48SJiri Olsa {
154688031a0dSIan Rogers return perf_stat__get_aggr(config, perf_stat__get_node, cpu);
154786895b48SJiri Olsa }
154886895b48SJiri Olsa
perf_stat__get_global_cached(struct perf_stat_config * config,struct perf_cpu cpu)1549375369abSNamhyung Kim static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config,
1550375369abSNamhyung Kim struct perf_cpu cpu)
1551375369abSNamhyung Kim {
1552375369abSNamhyung Kim return perf_stat__get_aggr(config, perf_stat__get_global, cpu);
1553375369abSNamhyung Kim }
1554375369abSNamhyung Kim
perf_stat__get_cpu_cached(struct perf_stat_config * config,struct perf_cpu cpu)15558938cfa7SNamhyung Kim static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config,
15568938cfa7SNamhyung Kim struct perf_cpu cpu)
15578938cfa7SNamhyung Kim {
15588938cfa7SNamhyung Kim return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu);
15598938cfa7SNamhyung Kim }
15608938cfa7SNamhyung Kim
aggr_mode__get_aggr(enum aggr_mode aggr_mode)15615f50e15cSIan Rogers static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode)
156286ee6e18SStephane Eranian {
15635f50e15cSIan Rogers switch (aggr_mode) {
156486ee6e18SStephane Eranian case AGGR_SOCKET:
1565973aeb3cSIan Rogers return aggr_cpu_id__socket;
1566db5742b6SKan Liang case AGGR_DIE:
1567973aeb3cSIan Rogers return aggr_cpu_id__die;
1568995ed074SK Prateek Nayak case AGGR_CACHE:
1569995ed074SK Prateek Nayak return aggr_cpu_id__cache;
157012c08a9fSStephane Eranian case AGGR_CORE:
1571973aeb3cSIan Rogers return aggr_cpu_id__core;
157286895b48SJiri Olsa case AGGR_NODE:
1573973aeb3cSIan Rogers return aggr_cpu_id__node;
157486ee6e18SStephane Eranian case AGGR_NONE:
15758938cfa7SNamhyung Kim return aggr_cpu_id__cpu;
157686ee6e18SStephane Eranian case AGGR_GLOBAL:
1577375369abSNamhyung Kim return aggr_cpu_id__global;
157832b8af82SJiri Olsa case AGGR_THREAD:
1579208df99eSJiri Olsa case AGGR_UNSET:
1580df936cadSClaire Jensen case AGGR_MAX:
158186ee6e18SStephane Eranian default:
15825f50e15cSIan Rogers return NULL;
15835f50e15cSIan Rogers }
15845f50e15cSIan Rogers }
15855f50e15cSIan Rogers
aggr_mode__get_id(enum aggr_mode aggr_mode)15865f50e15cSIan Rogers static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode)
15875f50e15cSIan Rogers {
15885f50e15cSIan Rogers switch (aggr_mode) {
15895f50e15cSIan Rogers case AGGR_SOCKET:
15905f50e15cSIan Rogers return perf_stat__get_socket_cached;
15915f50e15cSIan Rogers case AGGR_DIE:
15925f50e15cSIan Rogers return perf_stat__get_die_cached;
1593995ed074SK Prateek Nayak case AGGR_CACHE:
1594995ed074SK Prateek Nayak return perf_stat__get_cache_id_cached;
15955f50e15cSIan Rogers case AGGR_CORE:
15965f50e15cSIan Rogers return perf_stat__get_core_cached;
15975f50e15cSIan Rogers case AGGR_NODE:
15985f50e15cSIan Rogers return perf_stat__get_node_cached;
15995f50e15cSIan Rogers case AGGR_NONE:
16008938cfa7SNamhyung Kim return perf_stat__get_cpu_cached;
16015f50e15cSIan Rogers case AGGR_GLOBAL:
1602375369abSNamhyung Kim return perf_stat__get_global_cached;
16035f50e15cSIan Rogers case AGGR_THREAD:
16045f50e15cSIan Rogers case AGGR_UNSET:
1605df936cadSClaire Jensen case AGGR_MAX:
16065f50e15cSIan Rogers default:
16075f50e15cSIan Rogers return NULL;
16085f50e15cSIan Rogers }
16095f50e15cSIan Rogers }
16105f50e15cSIan Rogers
perf_stat_init_aggr_mode(void)16115f50e15cSIan Rogers static int perf_stat_init_aggr_mode(void)
16125f50e15cSIan Rogers {
16135f50e15cSIan Rogers int nr;
16145f50e15cSIan Rogers aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
16155f50e15cSIan Rogers
16165f50e15cSIan Rogers if (get_id) {
1617505ac48bSNamhyung Kim bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
16180df6ade7SIan Rogers stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1619505ac48bSNamhyung Kim get_id, /*data=*/NULL, needs_sort);
16205f50e15cSIan Rogers if (!stat_config.aggr_map) {
1621db1f5f10SYang Jihong pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
16225f50e15cSIan Rogers return -1;
16235f50e15cSIan Rogers }
16245f50e15cSIan Rogers stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode);
162586ee6e18SStephane Eranian }
16261e5a2931SJiri Olsa
1627050059e1SNamhyung Kim if (stat_config.aggr_mode == AGGR_THREAD) {
1628050059e1SNamhyung Kim nr = perf_thread_map__nr(evsel_list->core.threads);
1629050059e1SNamhyung Kim stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
1630050059e1SNamhyung Kim if (stat_config.aggr_map == NULL)
1631050059e1SNamhyung Kim return -ENOMEM;
1632050059e1SNamhyung Kim
1633050059e1SNamhyung Kim for (int s = 0; s < nr; s++) {
1634050059e1SNamhyung Kim struct aggr_cpu_id id = aggr_cpu_id__empty();
1635050059e1SNamhyung Kim
1636050059e1SNamhyung Kim id.thread_idx = s;
1637050059e1SNamhyung Kim stat_config.aggr_map->map[s] = id;
1638050059e1SNamhyung Kim }
1639050059e1SNamhyung Kim return 0;
1640050059e1SNamhyung Kim }
1641050059e1SNamhyung Kim
16421e5a2931SJiri Olsa /*
16431e5a2931SJiri Olsa * The evsel_list->cpus is the base we operate on,
16441e5a2931SJiri Olsa * taking the highest cpu number to be the size of
16451e5a2931SJiri Olsa * the aggregation translate cpumap.
16461e5a2931SJiri Olsa */
16474b9d563dSIan Rogers if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
16480df6ade7SIan Rogers nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
16498a96f454SIan Rogers else
16508a96f454SIan Rogers nr = 0;
1651d526e1a0SJames Clark stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
16526f6b6594SJiri Olsa return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
165386ee6e18SStephane Eranian }
165486ee6e18SStephane Eranian
cpu_aggr_map__delete(struct cpu_aggr_map * map)1655d526e1a0SJames Clark static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
1656d526e1a0SJames Clark {
1657d526e1a0SJames Clark if (map) {
1658d526e1a0SJames Clark WARN_ONCE(refcount_read(&map->refcnt) != 0,
1659d526e1a0SJames Clark "cpu_aggr_map refcnt unbalanced\n");
1660d526e1a0SJames Clark free(map);
1661d526e1a0SJames Clark }
1662d526e1a0SJames Clark }
1663d526e1a0SJames Clark
cpu_aggr_map__put(struct cpu_aggr_map * map)1664d526e1a0SJames Clark static void cpu_aggr_map__put(struct cpu_aggr_map *map)
1665d526e1a0SJames Clark {
1666d526e1a0SJames Clark if (map && refcount_dec_and_test(&map->refcnt))
1667d526e1a0SJames Clark cpu_aggr_map__delete(map);
1668d526e1a0SJames Clark }
1669d526e1a0SJames Clark
perf_stat__exit_aggr_mode(void)1670544c2ae7SMasami Hiramatsu static void perf_stat__exit_aggr_mode(void)
1671544c2ae7SMasami Hiramatsu {
1672d526e1a0SJames Clark cpu_aggr_map__put(stat_config.aggr_map);
1673d526e1a0SJames Clark cpu_aggr_map__put(stat_config.cpus_aggr_map);
16746f6b6594SJiri Olsa stat_config.aggr_map = NULL;
16756f6b6594SJiri Olsa stat_config.cpus_aggr_map = NULL;
1676544c2ae7SMasami Hiramatsu }
1677544c2ae7SMasami Hiramatsu
perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu,void * data)16786d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data)
167968d702f7SJiri Olsa {
168068d702f7SJiri Olsa struct perf_env *env = data;
168151b826faSIan Rogers struct aggr_cpu_id id = aggr_cpu_id__empty();
168268d702f7SJiri Olsa
16836d18804bSIan Rogers if (cpu.cpu != -1)
16846d18804bSIan Rogers id.socket = env->cpu[cpu.cpu].socket_id;
16852760f5a1SJames Clark
16862760f5a1SJames Clark return id;
168768d702f7SJiri Olsa }
168868d702f7SJiri Olsa
perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu,void * data)16896d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data)
1690db5742b6SKan Liang {
1691db5742b6SKan Liang struct perf_env *env = data;
169251b826faSIan Rogers struct aggr_cpu_id id = aggr_cpu_id__empty();
1693db5742b6SKan Liang
16946d18804bSIan Rogers if (cpu.cpu != -1) {
1695db5742b6SKan Liang /*
16961a270cb6SJames Clark * die_id is relative to socket, so start
16971a270cb6SJames Clark * with the socket ID and then add die to
16981a270cb6SJames Clark * make a unique ID.
1699db5742b6SKan Liang */
17006d18804bSIan Rogers id.socket = env->cpu[cpu.cpu].socket_id;
17016d18804bSIan Rogers id.die = env->cpu[cpu.cpu].die_id;
1702db5742b6SKan Liang }
1703db5742b6SKan Liang
17042760f5a1SJames Clark return id;
1705db5742b6SKan Liang }
1706db5742b6SKan Liang
perf_env__get_cache_id_for_cpu(struct perf_cpu cpu,struct perf_env * env,u32 cache_level,struct aggr_cpu_id * id)1707995ed074SK Prateek Nayak static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env,
1708995ed074SK Prateek Nayak u32 cache_level, struct aggr_cpu_id *id)
1709995ed074SK Prateek Nayak {
1710995ed074SK Prateek Nayak int i;
1711995ed074SK Prateek Nayak int caches_cnt = env->caches_cnt;
1712995ed074SK Prateek Nayak struct cpu_cache_level *caches = env->caches;
1713995ed074SK Prateek Nayak
1714995ed074SK Prateek Nayak id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level;
1715995ed074SK Prateek Nayak id->cache = -1;
1716995ed074SK Prateek Nayak
1717995ed074SK Prateek Nayak if (!caches_cnt)
1718995ed074SK Prateek Nayak return;
1719995ed074SK Prateek Nayak
1720995ed074SK Prateek Nayak for (i = caches_cnt - 1; i > -1; --i) {
1721995ed074SK Prateek Nayak struct perf_cpu_map *cpu_map;
1722995ed074SK Prateek Nayak int map_contains_cpu;
1723995ed074SK Prateek Nayak
1724995ed074SK Prateek Nayak /*
1725995ed074SK Prateek Nayak * If user has not specified a level, find the fist level with
1726995ed074SK Prateek Nayak * the cpu in the map. Since building the map is expensive, do
1727995ed074SK Prateek Nayak * this only if levels match.
1728995ed074SK Prateek Nayak */
1729995ed074SK Prateek Nayak if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level)
1730995ed074SK Prateek Nayak continue;
1731995ed074SK Prateek Nayak
1732995ed074SK Prateek Nayak cpu_map = perf_cpu_map__new(caches[i].map);
1733995ed074SK Prateek Nayak map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu);
1734995ed074SK Prateek Nayak perf_cpu_map__put(cpu_map);
1735995ed074SK Prateek Nayak
1736995ed074SK Prateek Nayak if (map_contains_cpu != -1) {
1737995ed074SK Prateek Nayak id->cache_lvl = caches[i].level;
1738995ed074SK Prateek Nayak id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
1739995ed074SK Prateek Nayak return;
1740995ed074SK Prateek Nayak }
1741995ed074SK Prateek Nayak }
1742995ed074SK Prateek Nayak }
1743995ed074SK Prateek Nayak
perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu,void * data)1744995ed074SK Prateek Nayak static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu,
1745995ed074SK Prateek Nayak void *data)
1746995ed074SK Prateek Nayak {
1747995ed074SK Prateek Nayak struct perf_env *env = data;
1748995ed074SK Prateek Nayak struct aggr_cpu_id id = aggr_cpu_id__empty();
1749995ed074SK Prateek Nayak
1750995ed074SK Prateek Nayak if (cpu.cpu != -1) {
1751995ed074SK Prateek Nayak u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level;
1752995ed074SK Prateek Nayak
1753995ed074SK Prateek Nayak id.socket = env->cpu[cpu.cpu].socket_id;
1754995ed074SK Prateek Nayak id.die = env->cpu[cpu.cpu].die_id;
1755995ed074SK Prateek Nayak perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id);
1756995ed074SK Prateek Nayak }
1757995ed074SK Prateek Nayak
1758995ed074SK Prateek Nayak return id;
1759995ed074SK Prateek Nayak }
1760995ed074SK Prateek Nayak
perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu,void * data)17616d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data)
176268d702f7SJiri Olsa {
176368d702f7SJiri Olsa struct perf_env *env = data;
176451b826faSIan Rogers struct aggr_cpu_id id = aggr_cpu_id__empty();
176568d702f7SJiri Olsa
17666d18804bSIan Rogers if (cpu.cpu != -1) {
176768d702f7SJiri Olsa /*
1768db5742b6SKan Liang * core_id is relative to socket and die,
1769b9933817SJames Clark * we need a global id. So we set
1770b9933817SJames Clark * socket, die id and core id
177168d702f7SJiri Olsa */
17726d18804bSIan Rogers id.socket = env->cpu[cpu.cpu].socket_id;
17736d18804bSIan Rogers id.die = env->cpu[cpu.cpu].die_id;
17746d18804bSIan Rogers id.core = env->cpu[cpu.cpu].core_id;
177568d702f7SJiri Olsa }
177668d702f7SJiri Olsa
17772760f5a1SJames Clark return id;
177868d702f7SJiri Olsa }
177968d702f7SJiri Olsa
perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu,void * data)17808938cfa7SNamhyung Kim static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data)
17818938cfa7SNamhyung Kim {
17828938cfa7SNamhyung Kim struct perf_env *env = data;
17838938cfa7SNamhyung Kim struct aggr_cpu_id id = aggr_cpu_id__empty();
17848938cfa7SNamhyung Kim
17858938cfa7SNamhyung Kim if (cpu.cpu != -1) {
17868938cfa7SNamhyung Kim /*
17878938cfa7SNamhyung Kim * core_id is relative to socket and die,
17888938cfa7SNamhyung Kim * we need a global id. So we set
17898938cfa7SNamhyung Kim * socket, die id and core id
17908938cfa7SNamhyung Kim */
17918938cfa7SNamhyung Kim id.socket = env->cpu[cpu.cpu].socket_id;
17928938cfa7SNamhyung Kim id.die = env->cpu[cpu.cpu].die_id;
17938938cfa7SNamhyung Kim id.core = env->cpu[cpu.cpu].core_id;
17948938cfa7SNamhyung Kim id.cpu = cpu;
17958938cfa7SNamhyung Kim }
17968938cfa7SNamhyung Kim
17978938cfa7SNamhyung Kim return id;
17988938cfa7SNamhyung Kim }
17998938cfa7SNamhyung Kim
perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu,void * data)18006d18804bSIan Rogers static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data)
180188031a0dSIan Rogers {
180251b826faSIan Rogers struct aggr_cpu_id id = aggr_cpu_id__empty();
180386895b48SJiri Olsa
1804fcd83a35SJames Clark id.node = perf_env__numa_node(data, cpu);
18052760f5a1SJames Clark return id;
180686895b48SJiri Olsa }
180786895b48SJiri Olsa
perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,void * data __maybe_unused)1808375369abSNamhyung Kim static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,
1809375369abSNamhyung Kim void *data __maybe_unused)
1810375369abSNamhyung Kim {
1811375369abSNamhyung Kim struct aggr_cpu_id id = aggr_cpu_id__empty();
1812375369abSNamhyung Kim
1813375369abSNamhyung Kim /* it always aggregates to the cpu 0 */
1814375369abSNamhyung Kim id.cpu = (struct perf_cpu){ .cpu = 0 };
1815375369abSNamhyung Kim return id;
1816375369abSNamhyung Kim }
1817375369abSNamhyung Kim
perf_stat__get_socket_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18182760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
18196d18804bSIan Rogers struct perf_cpu cpu)
182068d702f7SJiri Olsa {
182188031a0dSIan Rogers return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env);
182268d702f7SJiri Olsa }
perf_stat__get_die_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18232760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
18246d18804bSIan Rogers struct perf_cpu cpu)
1825db5742b6SKan Liang {
182688031a0dSIan Rogers return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1827db5742b6SKan Liang }
182868d702f7SJiri Olsa
perf_stat__get_cache_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1829995ed074SK Prateek Nayak static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused,
1830995ed074SK Prateek Nayak struct perf_cpu cpu)
1831995ed074SK Prateek Nayak {
1832995ed074SK Prateek Nayak return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1833995ed074SK Prateek Nayak }
1834995ed074SK Prateek Nayak
perf_stat__get_core_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18352760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
18366d18804bSIan Rogers struct perf_cpu cpu)
183768d702f7SJiri Olsa {
183888031a0dSIan Rogers return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env);
183968d702f7SJiri Olsa }
184068d702f7SJiri Olsa
perf_stat__get_cpu_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18418938cfa7SNamhyung Kim static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused,
18428938cfa7SNamhyung Kim struct perf_cpu cpu)
18438938cfa7SNamhyung Kim {
18448938cfa7SNamhyung Kim return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env);
18458938cfa7SNamhyung Kim }
18468938cfa7SNamhyung Kim
perf_stat__get_node_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)18472760f5a1SJames Clark static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
18486d18804bSIan Rogers struct perf_cpu cpu)
184986895b48SJiri Olsa {
185088031a0dSIan Rogers return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env);
185186895b48SJiri Olsa }
185286895b48SJiri Olsa
perf_stat__get_global_file(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)1853375369abSNamhyung Kim static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused,
1854375369abSNamhyung Kim struct perf_cpu cpu)
1855375369abSNamhyung Kim {
1856375369abSNamhyung Kim return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env);
1857375369abSNamhyung Kim }
1858375369abSNamhyung Kim
aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)18595f50e15cSIan Rogers static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)
186068d702f7SJiri Olsa {
18615f50e15cSIan Rogers switch (aggr_mode) {
186268d702f7SJiri Olsa case AGGR_SOCKET:
18635f50e15cSIan Rogers return perf_env__get_socket_aggr_by_cpu;
1864db5742b6SKan Liang case AGGR_DIE:
18655f50e15cSIan Rogers return perf_env__get_die_aggr_by_cpu;
1866995ed074SK Prateek Nayak case AGGR_CACHE:
1867995ed074SK Prateek Nayak return perf_env__get_cache_aggr_by_cpu;
186868d702f7SJiri Olsa case AGGR_CORE:
18695f50e15cSIan Rogers return perf_env__get_core_aggr_by_cpu;
187086895b48SJiri Olsa case AGGR_NODE:
18715f50e15cSIan Rogers return perf_env__get_node_aggr_by_cpu;
187268d702f7SJiri Olsa case AGGR_GLOBAL:
1873375369abSNamhyung Kim return perf_env__get_global_aggr_by_cpu;
1874375369abSNamhyung Kim case AGGR_NONE:
18758938cfa7SNamhyung Kim return perf_env__get_cpu_aggr_by_cpu;
187668d702f7SJiri Olsa case AGGR_THREAD:
187768d702f7SJiri Olsa case AGGR_UNSET:
1878df936cadSClaire Jensen case AGGR_MAX:
187968d702f7SJiri Olsa default:
18805f50e15cSIan Rogers return NULL;
18815f50e15cSIan Rogers }
188268d702f7SJiri Olsa }
188368d702f7SJiri Olsa
aggr_mode__get_id_file(enum aggr_mode aggr_mode)18845f50e15cSIan Rogers static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode)
18855f50e15cSIan Rogers {
18865f50e15cSIan Rogers switch (aggr_mode) {
18875f50e15cSIan Rogers case AGGR_SOCKET:
18885f50e15cSIan Rogers return perf_stat__get_socket_file;
18895f50e15cSIan Rogers case AGGR_DIE:
18905f50e15cSIan Rogers return perf_stat__get_die_file;
1891995ed074SK Prateek Nayak case AGGR_CACHE:
1892995ed074SK Prateek Nayak return perf_stat__get_cache_file;
18935f50e15cSIan Rogers case AGGR_CORE:
18945f50e15cSIan Rogers return perf_stat__get_core_file;
18955f50e15cSIan Rogers case AGGR_NODE:
18965f50e15cSIan Rogers return perf_stat__get_node_file;
18975f50e15cSIan Rogers case AGGR_GLOBAL:
1898375369abSNamhyung Kim return perf_stat__get_global_file;
1899375369abSNamhyung Kim case AGGR_NONE:
19008938cfa7SNamhyung Kim return perf_stat__get_cpu_file;
19015f50e15cSIan Rogers case AGGR_THREAD:
19025f50e15cSIan Rogers case AGGR_UNSET:
1903df936cadSClaire Jensen case AGGR_MAX:
19045f50e15cSIan Rogers default:
19055f50e15cSIan Rogers return NULL;
19065f50e15cSIan Rogers }
19075f50e15cSIan Rogers }
19085f50e15cSIan Rogers
perf_stat_init_aggr_mode_file(struct perf_stat * st)19095f50e15cSIan Rogers static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
19105f50e15cSIan Rogers {
19115f50e15cSIan Rogers struct perf_env *env = &st->session->header.env;
19125f50e15cSIan Rogers aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode);
1913505ac48bSNamhyung Kim bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
19145f50e15cSIan Rogers
1915050059e1SNamhyung Kim if (stat_config.aggr_mode == AGGR_THREAD) {
1916050059e1SNamhyung Kim int nr = perf_thread_map__nr(evsel_list->core.threads);
1917050059e1SNamhyung Kim
1918050059e1SNamhyung Kim stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
1919050059e1SNamhyung Kim if (stat_config.aggr_map == NULL)
1920050059e1SNamhyung Kim return -ENOMEM;
1921050059e1SNamhyung Kim
1922050059e1SNamhyung Kim for (int s = 0; s < nr; s++) {
1923050059e1SNamhyung Kim struct aggr_cpu_id id = aggr_cpu_id__empty();
1924050059e1SNamhyung Kim
1925050059e1SNamhyung Kim id.thread_idx = s;
1926050059e1SNamhyung Kim stat_config.aggr_map->map[s] = id;
1927050059e1SNamhyung Kim }
1928050059e1SNamhyung Kim return 0;
1929050059e1SNamhyung Kim }
1930050059e1SNamhyung Kim
19315f50e15cSIan Rogers if (!get_id)
19325f50e15cSIan Rogers return 0;
19335f50e15cSIan Rogers
1934505ac48bSNamhyung Kim stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1935505ac48bSNamhyung Kim get_id, env, needs_sort);
19365f50e15cSIan Rogers if (!stat_config.aggr_map) {
1937db1f5f10SYang Jihong pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
19385f50e15cSIan Rogers return -1;
19395f50e15cSIan Rogers }
19405f50e15cSIan Rogers stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode);
194168d702f7SJiri Olsa return 0;
194268d702f7SJiri Olsa }
194368d702f7SJiri Olsa
19442cba3ffbSIngo Molnar /*
19452cba3ffbSIngo Molnar * Add default attributes, if there were no attributes specified or
19462cba3ffbSIngo Molnar * if -d/--detailed, -d -d or -d -d -d is used:
19472cba3ffbSIngo Molnar */
add_default_attributes(void)19482cba3ffbSIngo Molnar static int add_default_attributes(void)
19492cba3ffbSIngo Molnar {
19509dec4473SAndi Kleen struct perf_event_attr default_attrs0[] = {
1951b070a547SArnaldo Carvalho de Melo
1952b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
1953b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
1954b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
1955b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
1956b070a547SArnaldo Carvalho de Melo
1957b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
19589dec4473SAndi Kleen };
19599dec4473SAndi Kleen struct perf_event_attr frontend_attrs[] = {
1960b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
19619dec4473SAndi Kleen };
19629dec4473SAndi Kleen struct perf_event_attr backend_attrs[] = {
1963b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
19649dec4473SAndi Kleen };
19659dec4473SAndi Kleen struct perf_event_attr default_attrs1[] = {
1966b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
1967b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
1968b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
1969b070a547SArnaldo Carvalho de Melo
1970b070a547SArnaldo Carvalho de Melo };
1971b070a547SArnaldo Carvalho de Melo
1972b070a547SArnaldo Carvalho de Melo /*
1973b070a547SArnaldo Carvalho de Melo * Detailed stats (-d), covering the L1 and last level data caches:
1974b070a547SArnaldo Carvalho de Melo */
1975b070a547SArnaldo Carvalho de Melo struct perf_event_attr detailed_attrs[] = {
1976b070a547SArnaldo Carvalho de Melo
1977b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
1978b070a547SArnaldo Carvalho de Melo .config =
1979b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_L1D << 0 |
1980b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1981b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1982b070a547SArnaldo Carvalho de Melo
1983b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
1984b070a547SArnaldo Carvalho de Melo .config =
1985b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_L1D << 0 |
1986b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1987b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1988b070a547SArnaldo Carvalho de Melo
1989b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
1990b070a547SArnaldo Carvalho de Melo .config =
1991b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_LL << 0 |
1992b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1993b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1994b070a547SArnaldo Carvalho de Melo
1995b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
1996b070a547SArnaldo Carvalho de Melo .config =
1997b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_LL << 0 |
1998b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1999b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2000b070a547SArnaldo Carvalho de Melo };
2001b070a547SArnaldo Carvalho de Melo
2002b070a547SArnaldo Carvalho de Melo /*
2003b070a547SArnaldo Carvalho de Melo * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
2004b070a547SArnaldo Carvalho de Melo */
2005b070a547SArnaldo Carvalho de Melo struct perf_event_attr very_detailed_attrs[] = {
2006b070a547SArnaldo Carvalho de Melo
2007b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2008b070a547SArnaldo Carvalho de Melo .config =
2009b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_L1I << 0 |
2010b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2011b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2012b070a547SArnaldo Carvalho de Melo
2013b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2014b070a547SArnaldo Carvalho de Melo .config =
2015b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_L1I << 0 |
2016b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2017b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2018b070a547SArnaldo Carvalho de Melo
2019b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2020b070a547SArnaldo Carvalho de Melo .config =
2021b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_DTLB << 0 |
2022b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2023b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2024b070a547SArnaldo Carvalho de Melo
2025b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2026b070a547SArnaldo Carvalho de Melo .config =
2027b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_DTLB << 0 |
2028b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2029b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2030b070a547SArnaldo Carvalho de Melo
2031b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2032b070a547SArnaldo Carvalho de Melo .config =
2033b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_ITLB << 0 |
2034b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2035b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2036b070a547SArnaldo Carvalho de Melo
2037b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2038b070a547SArnaldo Carvalho de Melo .config =
2039b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_ITLB << 0 |
2040b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2041b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2042b070a547SArnaldo Carvalho de Melo
2043b070a547SArnaldo Carvalho de Melo };
2044b070a547SArnaldo Carvalho de Melo
2045b070a547SArnaldo Carvalho de Melo /*
2046b070a547SArnaldo Carvalho de Melo * Very, very detailed stats (-d -d -d), adding prefetch events:
2047b070a547SArnaldo Carvalho de Melo */
2048b070a547SArnaldo Carvalho de Melo struct perf_event_attr very_very_detailed_attrs[] = {
2049b070a547SArnaldo Carvalho de Melo
2050b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2051b070a547SArnaldo Carvalho de Melo .config =
2052b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_L1D << 0 |
2053b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
2054b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2055b070a547SArnaldo Carvalho de Melo
2056b070a547SArnaldo Carvalho de Melo { .type = PERF_TYPE_HW_CACHE,
2057b070a547SArnaldo Carvalho de Melo .config =
2058b070a547SArnaldo Carvalho de Melo PERF_COUNT_HW_CACHE_L1D << 0 |
2059b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
2060b070a547SArnaldo Carvalho de Melo (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2061b070a547SArnaldo Carvalho de Melo };
2062a9c1ecdaSKan Liang
2063a9c1ecdaSKan Liang struct perf_event_attr default_null_attrs[] = {};
2064dae47d39SIan Rogers const char *pmu = parse_events_option_args.pmu_filter ?: "all";
2065a9c1ecdaSKan Liang
20662cba3ffbSIngo Molnar /* Set attrs if no event is selected and !null_run: */
2067aea0dca1SJiri Olsa if (stat_config.null_run)
20682cba3ffbSIngo Molnar return 0;
20692cba3ffbSIngo Molnar
20704cabc3d1SAndi Kleen if (transaction_run) {
2071742d92ffSThomas Richter /* Handle -T as -M transaction. Once platform specific metrics
20724d39c89fSIngo Molnar * support has been added to the json files, all architectures
2073742d92ffSThomas Richter * will use this approach. To determine transaction support
2074742d92ffSThomas Richter * on an architecture test for such a metric name.
2075742d92ffSThomas Richter */
2076dae47d39SIan Rogers if (!metricgroup__has_metric(pmu, "transaction")) {
2077db1f5f10SYang Jihong pr_err("Missing transaction metrics\n");
2078d6964c5bSIan Rogers return -1;
2079d6964c5bSIan Rogers }
2080dae47d39SIan Rogers return metricgroup__parse_groups(evsel_list, pmu, "transaction",
208105530a79SIan Rogers stat_config.metric_no_group,
208205530a79SIan Rogers stat_config.metric_no_merge,
20831fd09e29SIan Rogers stat_config.metric_no_threshold,
20841725e9cdSIan Rogers stat_config.user_requested_cpu_list,
20851725e9cdSIan Rogers stat_config.system_wide,
2086d0192fdbSJiri Olsa &stat_config.metric_events);
2087742d92ffSThomas Richter }
2088742d92ffSThomas Richter
2089daefd0bcSKan Liang if (smi_cost) {
2090daefd0bcSKan Liang int smi;
2091daefd0bcSKan Liang
2092daefd0bcSKan Liang if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
2093db1f5f10SYang Jihong pr_err("freeze_on_smi is not supported.\n");
2094daefd0bcSKan Liang return -1;
2095daefd0bcSKan Liang }
2096daefd0bcSKan Liang
2097daefd0bcSKan Liang if (!smi) {
2098daefd0bcSKan Liang if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
2099daefd0bcSKan Liang fprintf(stderr, "Failed to set freeze_on_smi.\n");
2100daefd0bcSKan Liang return -1;
2101daefd0bcSKan Liang }
2102daefd0bcSKan Liang smi_reset = true;
2103daefd0bcSKan Liang }
2104daefd0bcSKan Liang
2105dae47d39SIan Rogers if (!metricgroup__has_metric(pmu, "smi")) {
2106db1f5f10SYang Jihong pr_err("Missing smi metrics\n");
2107daefd0bcSKan Liang return -1;
2108daefd0bcSKan Liang }
2109c23f5cc0SIan Rogers
211007eafd4eSIan Rogers if (!force_metric_only)
211107eafd4eSIan Rogers stat_config.metric_only = true;
211207eafd4eSIan Rogers
2113dae47d39SIan Rogers return metricgroup__parse_groups(evsel_list, pmu, "smi",
2114c23f5cc0SIan Rogers stat_config.metric_no_group,
2115c23f5cc0SIan Rogers stat_config.metric_no_merge,
2116c23f5cc0SIan Rogers stat_config.metric_no_threshold,
2117c23f5cc0SIan Rogers stat_config.user_requested_cpu_list,
2118c23f5cc0SIan Rogers stat_config.system_wide,
2119c23f5cc0SIan Rogers &stat_config.metric_events);
2120daefd0bcSKan Liang }
2121daefd0bcSKan Liang
212244b1e60aSAndi Kleen if (topdown_run) {
21231647cd5bSIan Rogers unsigned int max_level = metricgroups__topdown_max_level();
21241647cd5bSIan Rogers char str[] = "TopdownL1";
212544b1e60aSAndi Kleen
212655c36a9fSAndi Kleen if (!force_metric_only)
212755c36a9fSAndi Kleen stat_config.metric_only = true;
212855c36a9fSAndi Kleen
21291647cd5bSIan Rogers if (!max_level) {
21301647cd5bSIan Rogers pr_err("Topdown requested but the topdown metric groups aren't present.\n"
2131db1f5f10SYang Jihong "(See perf list the metric groups have names like TopdownL1)\n");
21321647cd5bSIan Rogers return -1;
213363e39aa6SKan Liang }
213463e39aa6SKan Liang if (stat_config.topdown_level > max_level) {
213563e39aa6SKan Liang pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level);
213663e39aa6SKan Liang return -1;
213763e39aa6SKan Liang } else if (!stat_config.topdown_level)
21381647cd5bSIan Rogers stat_config.topdown_level = 1;
213963e39aa6SKan Liang
214055c36a9fSAndi Kleen if (!stat_config.interval && !stat_config.metric_only) {
214155c36a9fSAndi Kleen fprintf(stat_config.output,
214255c36a9fSAndi Kleen "Topdown accuracy may decrease when measuring long periods.\n"
214355c36a9fSAndi Kleen "Please print the result regularly, e.g. -I1000\n");
214455c36a9fSAndi Kleen }
21451647cd5bSIan Rogers str[8] = stat_config.topdown_level + '0';
2146dae47d39SIan Rogers if (metricgroup__parse_groups(evsel_list,
2147dae47d39SIan Rogers pmu, str,
21481647cd5bSIan Rogers /*metric_no_group=*/false,
21491647cd5bSIan Rogers /*metric_no_merge=*/false,
21501647cd5bSIan Rogers /*metric_no_threshold=*/true,
21511647cd5bSIan Rogers stat_config.user_requested_cpu_list,
21521647cd5bSIan Rogers stat_config.system_wide,
21531647cd5bSIan Rogers &stat_config.metric_events) < 0)
215444b1e60aSAndi Kleen return -1;
215544b1e60aSAndi Kleen }
215644b1e60aSAndi Kleen
2157f0c86a2bSZhengjun Xing if (!stat_config.topdown_level)
21581647cd5bSIan Rogers stat_config.topdown_level = 1;
2159f0c86a2bSZhengjun Xing
21606484d2f9SJiri Olsa if (!evsel_list->core.nr_entries) {
216194b1a603SIan Rogers /* No events so add defaults. */
2162a1f3d567SNamhyung Kim if (target__has_cpu(&target))
2163a1f3d567SNamhyung Kim default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
2164a1f3d567SNamhyung Kim
2165e251abeeSArnaldo Carvalho de Melo if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
21669dec4473SAndi Kleen return -1;
21671eaf496eSIan Rogers if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) {
2168e251abeeSArnaldo Carvalho de Melo if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
21699dec4473SAndi Kleen return -1;
21709dec4473SAndi Kleen }
21711eaf496eSIan Rogers if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) {
2172e251abeeSArnaldo Carvalho de Melo if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
21739dec4473SAndi Kleen return -1;
21749dec4473SAndi Kleen }
2175e251abeeSArnaldo Carvalho de Melo if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
21762cba3ffbSIngo Molnar return -1;
217794b1a603SIan Rogers /*
217894b1a603SIan Rogers * Add TopdownL1 metrics if they exist. To minimize
217994b1a603SIan Rogers * multiplexing, don't request threshold computation.
218094b1a603SIan Rogers */
2181b0a9e8f8SKan Liang if (metricgroup__has_metric(pmu, "Default")) {
21821b114824SIan Rogers struct evlist *metric_evlist = evlist__new();
21831b114824SIan Rogers struct evsel *metric_evsel;
21841b114824SIan Rogers
21851b114824SIan Rogers if (!metric_evlist)
21861b114824SIan Rogers return -1;
21871b114824SIan Rogers
2188b0a9e8f8SKan Liang if (metricgroup__parse_groups(metric_evlist, pmu, "Default",
218994b1a603SIan Rogers /*metric_no_group=*/false,
219094b1a603SIan Rogers /*metric_no_merge=*/false,
219194b1a603SIan Rogers /*metric_no_threshold=*/true,
219294b1a603SIan Rogers stat_config.user_requested_cpu_list,
219394b1a603SIan Rogers stat_config.system_wide,
219494b1a603SIan Rogers &stat_config.metric_events) < 0)
219594b1a603SIan Rogers return -1;
219606bff3d9SIan Rogers
21971b114824SIan Rogers evlist__for_each_entry(metric_evlist, metric_evsel) {
21981b114824SIan Rogers metric_evsel->skippable = true;
21996a80d794SKan Liang metric_evsel->default_metricgroup = true;
22001b114824SIan Rogers }
22011b114824SIan Rogers evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
22021b114824SIan Rogers evlist__delete(metric_evlist);
22031b114824SIan Rogers }
22041b114824SIan Rogers
2205a9c1ecdaSKan Liang /* Platform specific attrs */
2206a9c1ecdaSKan Liang if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
220742641d6fSKan Liang return -1;
22082cba3ffbSIngo Molnar }
22092cba3ffbSIngo Molnar
22102cba3ffbSIngo Molnar /* Detailed events get appended to the event list: */
22112cba3ffbSIngo Molnar
22122cba3ffbSIngo Molnar if (detailed_run < 1)
22132cba3ffbSIngo Molnar return 0;
22142cba3ffbSIngo Molnar
22152cba3ffbSIngo Molnar /* Append detailed run extra attributes: */
2216e251abeeSArnaldo Carvalho de Melo if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
22172cba3ffbSIngo Molnar return -1;
22182cba3ffbSIngo Molnar
22192cba3ffbSIngo Molnar if (detailed_run < 2)
22202cba3ffbSIngo Molnar return 0;
22212cba3ffbSIngo Molnar
22222cba3ffbSIngo Molnar /* Append very detailed run extra attributes: */
2223e251abeeSArnaldo Carvalho de Melo if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
22242cba3ffbSIngo Molnar return -1;
22252cba3ffbSIngo Molnar
22262cba3ffbSIngo Molnar if (detailed_run < 3)
22272cba3ffbSIngo Molnar return 0;
22282cba3ffbSIngo Molnar
22292cba3ffbSIngo Molnar /* Append very, very detailed run extra attributes: */
2230e251abeeSArnaldo Carvalho de Melo return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
22312cba3ffbSIngo Molnar }
22322cba3ffbSIngo Molnar
22338a59f3ccSJiri Olsa static const char * const stat_record_usage[] = {
22344979d0c7SJiri Olsa "perf stat record [<options>]",
22354979d0c7SJiri Olsa NULL,
22364979d0c7SJiri Olsa };
22374979d0c7SJiri Olsa
init_features(struct perf_session * session)22383ba78bd0SJiri Olsa static void init_features(struct perf_session *session)
22393ba78bd0SJiri Olsa {
22403ba78bd0SJiri Olsa int feat;
22413ba78bd0SJiri Olsa
22423ba78bd0SJiri Olsa for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
22433ba78bd0SJiri Olsa perf_header__set_feat(&session->header, feat);
22443ba78bd0SJiri Olsa
22458002a63fSJiri Olsa perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
22463ba78bd0SJiri Olsa perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
22473ba78bd0SJiri Olsa perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
22483ba78bd0SJiri Olsa perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
22493ba78bd0SJiri Olsa perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
22503ba78bd0SJiri Olsa }
22513ba78bd0SJiri Olsa
__cmd_record(int argc,const char ** argv)22524979d0c7SJiri Olsa static int __cmd_record(int argc, const char **argv)
22534979d0c7SJiri Olsa {
22544979d0c7SJiri Olsa struct perf_session *session;
22558ceb41d7SJiri Olsa struct perf_data *data = &perf_stat.data;
22564979d0c7SJiri Olsa
22578a59f3ccSJiri Olsa argc = parse_options(argc, argv, stat_options, stat_record_usage,
22584979d0c7SJiri Olsa PARSE_OPT_STOP_AT_NON_OPTION);
22594979d0c7SJiri Olsa
22604979d0c7SJiri Olsa if (output_name)
22612d4f2799SJiri Olsa data->path = output_name;
22624979d0c7SJiri Olsa
2263d97ae04bSJiri Olsa if (stat_config.run_count != 1 || forever) {
2264e9d6db8eSJiri Olsa pr_err("Cannot use -r option with perf stat record.\n");
2265e9d6db8eSJiri Olsa return -1;
2266e9d6db8eSJiri Olsa }
2267e9d6db8eSJiri Olsa
22682681bd85SNamhyung Kim session = perf_session__new(data, NULL);
22696ef81c55SMamatha Inamdar if (IS_ERR(session)) {
22706ef81c55SMamatha Inamdar pr_err("Perf session creation failed\n");
22716ef81c55SMamatha Inamdar return PTR_ERR(session);
22724979d0c7SJiri Olsa }
22734979d0c7SJiri Olsa
22743ba78bd0SJiri Olsa init_features(session);
22753ba78bd0SJiri Olsa
22764979d0c7SJiri Olsa session->evlist = evsel_list;
22774979d0c7SJiri Olsa perf_stat.session = session;
22784979d0c7SJiri Olsa perf_stat.record = true;
22794979d0c7SJiri Olsa return argc;
22804979d0c7SJiri Olsa }
22814979d0c7SJiri Olsa
process_stat_round_event(struct perf_session * session,union perf_event * event)228289f1688aSJiri Olsa static int process_stat_round_event(struct perf_session *session,
228389f1688aSJiri Olsa union perf_event *event)
2284a56f9390SJiri Olsa {
228572932371SJiri Olsa struct perf_record_stat_round *stat_round = &event->stat_round;
2286a56f9390SJiri Olsa struct timespec tsh, *ts = NULL;
2287a56f9390SJiri Olsa const char **argv = session->header.env.cmdline_argv;
2288a56f9390SJiri Olsa int argc = session->header.env.nr_cmdline;
2289a56f9390SJiri Olsa
22908962cbecSNamhyung Kim process_counters();
2291a56f9390SJiri Olsa
2292e3b03b6cSAndi Kleen if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
2293e3b03b6cSAndi Kleen update_stats(&walltime_nsecs_stats, stat_round->time);
2294a56f9390SJiri Olsa
2295e3b03b6cSAndi Kleen if (stat_config.interval && stat_round->time) {
2296bd48c63eSArnaldo Carvalho de Melo tsh.tv_sec = stat_round->time / NSEC_PER_SEC;
2297bd48c63eSArnaldo Carvalho de Melo tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
2298a56f9390SJiri Olsa ts = &tsh;
2299a56f9390SJiri Olsa }
2300a56f9390SJiri Olsa
2301a56f9390SJiri Olsa print_counters(ts, argc, argv);
2302a56f9390SJiri Olsa return 0;
2303a56f9390SJiri Olsa }
2304a56f9390SJiri Olsa
230562ba18baSJiri Olsa static
process_stat_config_event(struct perf_session * session,union perf_event * event)230689f1688aSJiri Olsa int process_stat_config_event(struct perf_session *session,
230789f1688aSJiri Olsa union perf_event *event)
230862ba18baSJiri Olsa {
230989f1688aSJiri Olsa struct perf_tool *tool = session->tool;
231068d702f7SJiri Olsa struct perf_stat *st = container_of(tool, struct perf_stat, tool);
231168d702f7SJiri Olsa
231262ba18baSJiri Olsa perf_event__read_stat_config(&stat_config, &event->stat_config);
231368d702f7SJiri Olsa
2314315c0a1fSJiri Olsa if (perf_cpu_map__empty(st->cpus)) {
231589af4e05SJiri Olsa if (st->aggr_mode != AGGR_UNSET)
231689af4e05SJiri Olsa pr_warning("warning: processing task data, aggregation mode not set\n");
2317ae7e6492SNamhyung Kim } else if (st->aggr_mode != AGGR_UNSET) {
231889af4e05SJiri Olsa stat_config.aggr_mode = st->aggr_mode;
2319ae7e6492SNamhyung Kim }
232089af4e05SJiri Olsa
23218ceb41d7SJiri Olsa if (perf_stat.data.is_pipe)
232268d702f7SJiri Olsa perf_stat_init_aggr_mode();
232368d702f7SJiri Olsa else
232468d702f7SJiri Olsa perf_stat_init_aggr_mode_file(st);
232568d702f7SJiri Olsa
2326ae7e6492SNamhyung Kim if (stat_config.aggr_map) {
2327ae7e6492SNamhyung Kim int nr_aggr = stat_config.aggr_map->nr;
2328ae7e6492SNamhyung Kim
2329ae7e6492SNamhyung Kim if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) {
2330ae7e6492SNamhyung Kim pr_err("cannot allocate aggr counts\n");
2331ae7e6492SNamhyung Kim return -1;
2332ae7e6492SNamhyung Kim }
2333ae7e6492SNamhyung Kim }
233462ba18baSJiri Olsa return 0;
233562ba18baSJiri Olsa }
233662ba18baSJiri Olsa
set_maps(struct perf_stat * st)23371975d36eSJiri Olsa static int set_maps(struct perf_stat *st)
23381975d36eSJiri Olsa {
23391975d36eSJiri Olsa if (!st->cpus || !st->threads)
23401975d36eSJiri Olsa return 0;
23411975d36eSJiri Olsa
23421975d36eSJiri Olsa if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
23431975d36eSJiri Olsa return -EINVAL;
23441975d36eSJiri Olsa
2345453fa030SJiri Olsa perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
23461975d36eSJiri Olsa
23471f297a6eSNamhyung Kim if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true))
23481975d36eSJiri Olsa return -ENOMEM;
23491975d36eSJiri Olsa
23501975d36eSJiri Olsa st->maps_allocated = true;
23511975d36eSJiri Olsa return 0;
23521975d36eSJiri Olsa }
23531975d36eSJiri Olsa
23541975d36eSJiri Olsa static
process_thread_map_event(struct perf_session * session,union perf_event * event)235589f1688aSJiri Olsa int process_thread_map_event(struct perf_session *session,
235689f1688aSJiri Olsa union perf_event *event)
23571975d36eSJiri Olsa {
235889f1688aSJiri Olsa struct perf_tool *tool = session->tool;
23591975d36eSJiri Olsa struct perf_stat *st = container_of(tool, struct perf_stat, tool);
23601975d36eSJiri Olsa
23611975d36eSJiri Olsa if (st->threads) {
23621975d36eSJiri Olsa pr_warning("Extra thread map event, ignoring.\n");
23631975d36eSJiri Olsa return 0;
23641975d36eSJiri Olsa }
23651975d36eSJiri Olsa
23661975d36eSJiri Olsa st->threads = thread_map__new_event(&event->thread_map);
23671975d36eSJiri Olsa if (!st->threads)
23681975d36eSJiri Olsa return -ENOMEM;
23691975d36eSJiri Olsa
23701975d36eSJiri Olsa return set_maps(st);
23711975d36eSJiri Olsa }
23721975d36eSJiri Olsa
23731975d36eSJiri Olsa static
process_cpu_map_event(struct perf_session * session,union perf_event * event)237489f1688aSJiri Olsa int process_cpu_map_event(struct perf_session *session,
237589f1688aSJiri Olsa union perf_event *event)
23761975d36eSJiri Olsa {
237789f1688aSJiri Olsa struct perf_tool *tool = session->tool;
23781975d36eSJiri Olsa struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2379f854839bSJiri Olsa struct perf_cpu_map *cpus;
23801975d36eSJiri Olsa
23811975d36eSJiri Olsa if (st->cpus) {
23821975d36eSJiri Olsa pr_warning("Extra cpu map event, ignoring.\n");
23831975d36eSJiri Olsa return 0;
23841975d36eSJiri Olsa }
23851975d36eSJiri Olsa
23861975d36eSJiri Olsa cpus = cpu_map__new_data(&event->cpu_map.data);
23871975d36eSJiri Olsa if (!cpus)
23881975d36eSJiri Olsa return -ENOMEM;
23891975d36eSJiri Olsa
23901975d36eSJiri Olsa st->cpus = cpus;
23911975d36eSJiri Olsa return set_maps(st);
23921975d36eSJiri Olsa }
23931975d36eSJiri Olsa
23948a59f3ccSJiri Olsa static const char * const stat_report_usage[] = {
2395ba6039b6SJiri Olsa "perf stat report [<options>]",
2396ba6039b6SJiri Olsa NULL,
2397ba6039b6SJiri Olsa };
2398ba6039b6SJiri Olsa
2399ba6039b6SJiri Olsa static struct perf_stat perf_stat = {
2400ba6039b6SJiri Olsa .tool = {
2401ba6039b6SJiri Olsa .attr = perf_event__process_attr,
2402fa6ea781SJiri Olsa .event_update = perf_event__process_event_update,
24031975d36eSJiri Olsa .thread_map = process_thread_map_event,
24041975d36eSJiri Olsa .cpu_map = process_cpu_map_event,
240562ba18baSJiri Olsa .stat_config = process_stat_config_event,
2406a56f9390SJiri Olsa .stat = perf_event__process_stat_event,
2407a56f9390SJiri Olsa .stat_round = process_stat_round_event,
2408ba6039b6SJiri Olsa },
240989af4e05SJiri Olsa .aggr_mode = AGGR_UNSET,
2410995ed074SK Prateek Nayak .aggr_level = 0,
2411ba6039b6SJiri Olsa };
2412ba6039b6SJiri Olsa
__cmd_report(int argc,const char ** argv)2413ba6039b6SJiri Olsa static int __cmd_report(int argc, const char **argv)
2414ba6039b6SJiri Olsa {
2415ba6039b6SJiri Olsa struct perf_session *session;
2416ba6039b6SJiri Olsa const struct option options[] = {
2417ba6039b6SJiri Olsa OPT_STRING('i', "input", &input_name, "file", "input file name"),
241889af4e05SJiri Olsa OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
241989af4e05SJiri Olsa "aggregate counts per processor socket", AGGR_SOCKET),
2420db5742b6SKan Liang OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
2421db5742b6SKan Liang "aggregate counts per processor die", AGGR_DIE),
2422aab667caSK Prateek Nayak OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level,
2423aab667caSK Prateek Nayak "cache level",
2424aab667caSK Prateek Nayak "aggregate count at this cache level (Default: LLC)",
2425aab667caSK Prateek Nayak parse_cache_level),
242689af4e05SJiri Olsa OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
242789af4e05SJiri Olsa "aggregate counts per physical processor core", AGGR_CORE),
242886895b48SJiri Olsa OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
242986895b48SJiri Olsa "aggregate counts per numa node", AGGR_NODE),
243089af4e05SJiri Olsa OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
243189af4e05SJiri Olsa "disable CPU count aggregation", AGGR_NONE),
2432ba6039b6SJiri Olsa OPT_END()
2433ba6039b6SJiri Olsa };
2434ba6039b6SJiri Olsa struct stat st;
2435ba6039b6SJiri Olsa int ret;
2436ba6039b6SJiri Olsa
24378a59f3ccSJiri Olsa argc = parse_options(argc, argv, options, stat_report_usage, 0);
2438ba6039b6SJiri Olsa
2439ba6039b6SJiri Olsa if (!input_name || !strlen(input_name)) {
2440ba6039b6SJiri Olsa if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
2441ba6039b6SJiri Olsa input_name = "-";
2442ba6039b6SJiri Olsa else
2443ba6039b6SJiri Olsa input_name = "perf.data";
2444ba6039b6SJiri Olsa }
2445ba6039b6SJiri Olsa
24462d4f2799SJiri Olsa perf_stat.data.path = input_name;
24478ceb41d7SJiri Olsa perf_stat.data.mode = PERF_DATA_MODE_READ;
2448ba6039b6SJiri Olsa
24492681bd85SNamhyung Kim session = perf_session__new(&perf_stat.data, &perf_stat.tool);
24506ef81c55SMamatha Inamdar if (IS_ERR(session))
24516ef81c55SMamatha Inamdar return PTR_ERR(session);
2452ba6039b6SJiri Olsa
2453ba6039b6SJiri Olsa perf_stat.session = session;
2454ba6039b6SJiri Olsa stat_config.output = stderr;
24552b87be18SIan Rogers evlist__delete(evsel_list);
2456ba6039b6SJiri Olsa evsel_list = session->evlist;
2457ba6039b6SJiri Olsa
2458ba6039b6SJiri Olsa ret = perf_session__process_events(session);
2459ba6039b6SJiri Olsa if (ret)
2460ba6039b6SJiri Olsa return ret;
2461ba6039b6SJiri Olsa
2462ba6039b6SJiri Olsa perf_session__delete(session);
2463ba6039b6SJiri Olsa return 0;
2464ba6039b6SJiri Olsa }
2465ba6039b6SJiri Olsa
setup_system_wide(int forks)2466e3ba76deSJiri Olsa static void setup_system_wide(int forks)
2467e3ba76deSJiri Olsa {
2468e3ba76deSJiri Olsa /*
2469e3ba76deSJiri Olsa * Make system wide (-a) the default target if
2470e3ba76deSJiri Olsa * no target was specified and one of following
2471e3ba76deSJiri Olsa * conditions is met:
2472e3ba76deSJiri Olsa *
2473e3ba76deSJiri Olsa * - there's no workload specified
2474e3ba76deSJiri Olsa * - there is workload specified but all requested
2475e3ba76deSJiri Olsa * events are system wide events
2476e3ba76deSJiri Olsa */
2477e3ba76deSJiri Olsa if (!target__none(&target))
2478e3ba76deSJiri Olsa return;
2479e3ba76deSJiri Olsa
2480e3ba76deSJiri Olsa if (!forks)
2481e3ba76deSJiri Olsa target.system_wide = true;
2482e3ba76deSJiri Olsa else {
248332dcd021SJiri Olsa struct evsel *counter;
2484e3ba76deSJiri Olsa
2485e3ba76deSJiri Olsa evlist__for_each_entry(evsel_list, counter) {
2486d3345fecSAdrian Hunter if (!counter->core.requires_cpu &&
2487ce1d3bc2SArnaldo Carvalho de Melo !evsel__name_is(counter, "duration_time")) {
2488e3ba76deSJiri Olsa return;
2489e3ba76deSJiri Olsa }
2490002a3d69SJin Yao }
2491e3ba76deSJiri Olsa
24926484d2f9SJiri Olsa if (evsel_list->core.nr_entries)
2493e3ba76deSJiri Olsa target.system_wide = true;
2494e3ba76deSJiri Olsa }
2495e3ba76deSJiri Olsa }
2496e3ba76deSJiri Olsa
cmd_stat(int argc,const char ** argv)2497b0ad8ea6SArnaldo Carvalho de Melo int cmd_stat(int argc, const char **argv)
249886470930SIngo Molnar {
2499b070a547SArnaldo Carvalho de Melo const char * const stat_usage[] = {
2500b070a547SArnaldo Carvalho de Melo "perf stat [<options>] [<command>]",
2501b070a547SArnaldo Carvalho de Melo NULL
2502b070a547SArnaldo Carvalho de Melo };
2503fa853c4bSSong Liu int status = -EINVAL, run_idx, err;
25044aa9015fSStephane Eranian const char *mode;
25055821522eSJiri Olsa FILE *output = stderr;
2506f1f8ad52Syuzhoujian unsigned int interval, timeout;
2507ba6039b6SJiri Olsa const char * const stat_subcommands[] = { "record", "report" };
2508fa853c4bSSong Liu char errbuf[BUFSIZ];
250942202dd5SIngo Molnar
25105af52b51SStephane Eranian setlocale(LC_ALL, "");
25115af52b51SStephane Eranian
25120f98b11cSJiri Olsa evsel_list = evlist__new();
2513361c99a6SArnaldo Carvalho de Melo if (evsel_list == NULL)
2514361c99a6SArnaldo Carvalho de Melo return -ENOMEM;
2515361c99a6SArnaldo Carvalho de Melo
25161669e509SWang Nan parse_events__shrink_config_terms();
251751433eadSMichael Petlan
251851433eadSMichael Petlan /* String-parsing callback-based options would segfault when negated */
251951433eadSMichael Petlan set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
252051433eadSMichael Petlan set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
252151433eadSMichael Petlan set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
252251433eadSMichael Petlan
25234979d0c7SJiri Olsa argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
25244979d0c7SJiri Olsa (const char **) stat_usage,
2525a0541234SAnton Blanchard PARSE_OPT_STOP_AT_NON_OPTION);
2526d7470b6aSStephane Eranian
2527fa7070a3SJiri Olsa if (stat_config.csv_sep) {
2528fa7070a3SJiri Olsa stat_config.csv_output = true;
2529fa7070a3SJiri Olsa if (!strcmp(stat_config.csv_sep, "\\t"))
2530fa7070a3SJiri Olsa stat_config.csv_sep = "\t";
25316edb78a2SJiri Olsa } else
2532fa7070a3SJiri Olsa stat_config.csv_sep = DEFAULT_SEPARATOR;
25336edb78a2SJiri Olsa
2534ae0f4eb3SWei Li if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
25354979d0c7SJiri Olsa argc = __cmd_record(argc, argv);
25364979d0c7SJiri Olsa if (argc < 0)
25374979d0c7SJiri Olsa return -1;
2538ae0f4eb3SWei Li } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0]))
2539ba6039b6SJiri Olsa return __cmd_report(argc, argv);
25404979d0c7SJiri Olsa
2541ec0d3d1fSJiri Olsa interval = stat_config.interval;
2542f1f8ad52Syuzhoujian timeout = stat_config.timeout;
2543ec0d3d1fSJiri Olsa
25444979d0c7SJiri Olsa /*
25454979d0c7SJiri Olsa * For record command the -o is already taken care of.
25464979d0c7SJiri Olsa */
25474979d0c7SJiri Olsa if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
25484aa9015fSStephane Eranian output = NULL;
25494aa9015fSStephane Eranian
255056f3bae7SJim Cromie if (output_name && output_fd) {
255156f3bae7SJim Cromie fprintf(stderr, "cannot use both --output and --log-fd\n");
2552e0547311SJiri Olsa parse_options_usage(stat_usage, stat_options, "o", 1);
2553e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "log-fd", 0);
2554cc03c542SNamhyung Kim goto out;
255556f3bae7SJim Cromie }
2556fc3e4d07SStephane Eranian
25570ce5aa02SJiri Olsa if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
255854b50916SAndi Kleen fprintf(stderr, "--metric-only is not supported with --per-thread\n");
255954b50916SAndi Kleen goto out;
256054b50916SAndi Kleen }
256154b50916SAndi Kleen
2562d97ae04bSJiri Olsa if (stat_config.metric_only && stat_config.run_count > 1) {
256354b50916SAndi Kleen fprintf(stderr, "--metric-only is not supported with -r\n");
256454b50916SAndi Kleen goto out;
256554b50916SAndi Kleen }
256654b50916SAndi Kleen
256754ac0b1bSJiri Olsa if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
2568e55c14afSJiri Olsa fprintf(stderr, "--table is only supported with -r\n");
2569e55c14afSJiri Olsa parse_options_usage(stat_usage, stat_options, "r", 1);
2570e55c14afSJiri Olsa parse_options_usage(NULL, stat_options, "table", 0);
2571e55c14afSJiri Olsa goto out;
2572e55c14afSJiri Olsa }
2573e55c14afSJiri Olsa
2574fc3e4d07SStephane Eranian if (output_fd < 0) {
2575fc3e4d07SStephane Eranian fprintf(stderr, "argument to --log-fd must be a > 0\n");
2576e0547311SJiri Olsa parse_options_usage(stat_usage, stat_options, "log-fd", 0);
2577cc03c542SNamhyung Kim goto out;
2578fc3e4d07SStephane Eranian }
2579fc3e4d07SStephane Eranian
2580a527c2c1SJames Clark if (!output && !quiet) {
25814aa9015fSStephane Eranian struct timespec tm;
25824aa9015fSStephane Eranian mode = append_file ? "a" : "w";
25834aa9015fSStephane Eranian
25844aa9015fSStephane Eranian output = fopen(output_name, mode);
25854aa9015fSStephane Eranian if (!output) {
25864aa9015fSStephane Eranian perror("failed to create output file");
2587fceda7feSDavid Ahern return -1;
25884aa9015fSStephane Eranian }
25894228df84SIan Rogers if (!stat_config.json_output) {
25904aa9015fSStephane Eranian clock_gettime(CLOCK_REALTIME, &tm);
25914aa9015fSStephane Eranian fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
25924228df84SIan Rogers }
2593fc3e4d07SStephane Eranian } else if (output_fd > 0) {
259456f3bae7SJim Cromie mode = append_file ? "a" : "w";
259556f3bae7SJim Cromie output = fdopen(output_fd, mode);
259656f3bae7SJim Cromie if (!output) {
259756f3bae7SJim Cromie perror("Failed opening logfd");
259856f3bae7SJim Cromie return -errno;
259956f3bae7SJim Cromie }
26004aa9015fSStephane Eranian }
26014aa9015fSStephane Eranian
2602f5bc4428SNamhyung Kim if (stat_config.interval_clear && !isatty(fileno(output))) {
2603f5bc4428SNamhyung Kim fprintf(stderr, "--interval-clear does not work with output\n");
2604f5bc4428SNamhyung Kim parse_options_usage(stat_usage, stat_options, "o", 1);
2605f5bc4428SNamhyung Kim parse_options_usage(NULL, stat_options, "log-fd", 0);
2606f5bc4428SNamhyung Kim parse_options_usage(NULL, stat_options, "interval-clear", 0);
2607f5bc4428SNamhyung Kim return -1;
2608f5bc4428SNamhyung Kim }
2609f5bc4428SNamhyung Kim
26105821522eSJiri Olsa stat_config.output = output;
26115821522eSJiri Olsa
2612d7470b6aSStephane Eranian /*
2613d7470b6aSStephane Eranian * let the spreadsheet do the pretty-printing
2614d7470b6aSStephane Eranian */
2615fa7070a3SJiri Olsa if (stat_config.csv_output) {
261661a9f324SJim Cromie /* User explicitly passed -B? */
2617d7470b6aSStephane Eranian if (big_num_opt == 1) {
2618d7470b6aSStephane Eranian fprintf(stderr, "-B option not supported with -x\n");
2619e0547311SJiri Olsa parse_options_usage(stat_usage, stat_options, "B", 1);
2620e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "x", 1);
2621cc03c542SNamhyung Kim goto out;
2622d7470b6aSStephane Eranian } else /* Nope, so disable big number formatting */
262334ff0866SJiri Olsa stat_config.big_num = false;
2624d7470b6aSStephane Eranian } else if (big_num_opt == 0) /* User passed --no-big-num */
262534ff0866SJiri Olsa stat_config.big_num = false;
2626d7470b6aSStephane Eranian
2627fa853c4bSSong Liu err = target__validate(&target);
2628fa853c4bSSong Liu if (err) {
2629fa853c4bSSong Liu target__strerror(&target, err, errbuf, BUFSIZ);
2630fa853c4bSSong Liu pr_warning("%s\n", errbuf);
2631fa853c4bSSong Liu }
2632fa853c4bSSong Liu
2633e3ba76deSJiri Olsa setup_system_wide(argc);
2634ac3063bdSDavid Ahern
26350ce2da14SJiri Olsa /*
26360ce2da14SJiri Olsa * Display user/system times only for single
26370ce2da14SJiri Olsa * run and when there's specified tracee.
26380ce2da14SJiri Olsa */
2639d97ae04bSJiri Olsa if ((stat_config.run_count == 1) && target__none(&target))
26408897a891SJiri Olsa stat_config.ru_display = true;
26410ce2da14SJiri Olsa
2642d97ae04bSJiri Olsa if (stat_config.run_count < 0) {
2643cc03c542SNamhyung Kim pr_err("Run count must be a positive number\n");
2644e0547311SJiri Olsa parse_options_usage(stat_usage, stat_options, "r", 1);
2645cc03c542SNamhyung Kim goto out;
2646d97ae04bSJiri Olsa } else if (stat_config.run_count == 0) {
2647a7e191c3SFrederik Deweerdt forever = true;
2648d97ae04bSJiri Olsa stat_config.run_count = 1;
2649a7e191c3SFrederik Deweerdt }
265086470930SIngo Molnar
265154ac0b1bSJiri Olsa if (stat_config.walltime_run_table) {
265254ac0b1bSJiri Olsa stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
265354ac0b1bSJiri Olsa if (!stat_config.walltime_run) {
2654e55c14afSJiri Olsa pr_err("failed to setup -r option");
2655e55c14afSJiri Olsa goto out;
2656e55c14afSJiri Olsa }
2657e55c14afSJiri Olsa }
2658e55c14afSJiri Olsa
26591d9f8d1bSJin Yao if ((stat_config.aggr_mode == AGGR_THREAD) &&
26601d9f8d1bSJin Yao !target__has_task(&target)) {
26611d9f8d1bSJin Yao if (!target.system_wide || target.cpu_list) {
26621d9f8d1bSJin Yao fprintf(stderr, "The --per-thread option is only "
26631d9f8d1bSJin Yao "available when monitoring via -p -t -a "
26641d9f8d1bSJin Yao "options or only --per-thread.\n");
2665e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "p", 1);
2666e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "t", 1);
266732b8af82SJiri Olsa goto out;
266832b8af82SJiri Olsa }
26691d9f8d1bSJin Yao }
267032b8af82SJiri Olsa
267132b8af82SJiri Olsa /*
267232b8af82SJiri Olsa * no_aggr, cgroup are for system-wide only
267332b8af82SJiri Olsa * --per-thread is aggregated per thread, we dont mix it with cpu mode
267432b8af82SJiri Olsa */
2675421a50f3SJiri Olsa if (((stat_config.aggr_mode != AGGR_GLOBAL &&
26761c02f6c9SNamhyung Kim stat_config.aggr_mode != AGGR_THREAD) ||
26771c02f6c9SNamhyung Kim (nr_cgroups || stat_config.cgroup_list)) &&
2678602ad878SArnaldo Carvalho de Melo !target__has_cpu(&target)) {
2679023695d9SStephane Eranian fprintf(stderr, "both cgroup and no-aggregation "
2680023695d9SStephane Eranian "modes only available in system-wide mode\n");
2681023695d9SStephane Eranian
2682e0547311SJiri Olsa parse_options_usage(stat_usage, stat_options, "G", 1);
2683e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "A", 1);
2684e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "a", 1);
26851c02f6c9SNamhyung Kim parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
2686cc03c542SNamhyung Kim goto out;
2687d7e7a451SStephane Eranian }
2688d7e7a451SStephane Eranian
2689f07952b1SAlexander Antonov if (stat_config.iostat_run) {
2690f07952b1SAlexander Antonov status = iostat_prepare(evsel_list, &stat_config);
2691f07952b1SAlexander Antonov if (status)
2692f07952b1SAlexander Antonov goto out;
2693f07952b1SAlexander Antonov if (iostat_mode == IOSTAT_LIST) {
2694f07952b1SAlexander Antonov iostat_list(evsel_list, &stat_config);
2695f07952b1SAlexander Antonov goto out;
26967c0a6144SYang Jihong } else if (verbose > 0)
2697f07952b1SAlexander Antonov iostat_list(evsel_list, &stat_config);
2698e4fe5d73SLike Xu if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
2699e4fe5d73SLike Xu target.system_wide = true;
2700f07952b1SAlexander Antonov }
2701f07952b1SAlexander Antonov
2702a4b8cfcaSIan Rogers if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
2703a4b8cfcaSIan Rogers target.per_thread = true;
2704a4b8cfcaSIan Rogers
27051725e9cdSIan Rogers stat_config.system_wide = target.system_wide;
27061725e9cdSIan Rogers if (target.cpu_list) {
27071725e9cdSIan Rogers stat_config.user_requested_cpu_list = strdup(target.cpu_list);
27081725e9cdSIan Rogers if (!stat_config.user_requested_cpu_list) {
27091725e9cdSIan Rogers status = -ENOMEM;
27101725e9cdSIan Rogers goto out;
27111725e9cdSIan Rogers }
27121725e9cdSIan Rogers }
27131725e9cdSIan Rogers
2714a4b8cfcaSIan Rogers /*
2715a4b8cfcaSIan Rogers * Metric parsing needs to be delayed as metrics may optimize events
2716a4b8cfcaSIan Rogers * knowing the target is system-wide.
2717a4b8cfcaSIan Rogers */
2718a4b8cfcaSIan Rogers if (metrics) {
2719dae47d39SIan Rogers const char *pmu = parse_events_option_args.pmu_filter ?: "all";
2720ac95df46SIan Rogers int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
2721a4b8cfcaSIan Rogers stat_config.metric_no_group,
2722a4b8cfcaSIan Rogers stat_config.metric_no_merge,
27231fd09e29SIan Rogers stat_config.metric_no_threshold,
27241725e9cdSIan Rogers stat_config.user_requested_cpu_list,
27251725e9cdSIan Rogers stat_config.system_wide,
2726a4b8cfcaSIan Rogers &stat_config.metric_events);
2727ac95df46SIan Rogers
2728a4b8cfcaSIan Rogers zfree(&metrics);
2729ac95df46SIan Rogers if (ret) {
2730ac95df46SIan Rogers status = ret;
2731ac95df46SIan Rogers goto out;
2732ac95df46SIan Rogers }
2733a4b8cfcaSIan Rogers }
2734a4b8cfcaSIan Rogers
27352cba3ffbSIngo Molnar if (add_default_attributes())
2736c6264defSIngo Molnar goto out;
273786470930SIngo Molnar
2738d1c5a0e8SNamhyung Kim if (stat_config.cgroup_list) {
2739d1c5a0e8SNamhyung Kim if (nr_cgroups > 0) {
2740d1c5a0e8SNamhyung Kim pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
2741d1c5a0e8SNamhyung Kim parse_options_usage(stat_usage, stat_options, "G", 1);
2742d1c5a0e8SNamhyung Kim parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
2743d1c5a0e8SNamhyung Kim goto out;
2744d1c5a0e8SNamhyung Kim }
2745d1c5a0e8SNamhyung Kim
2746b214ba8cSNamhyung Kim if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
2747bb1c15b6SNamhyung Kim &stat_config.metric_events, true) < 0) {
2748bb1c15b6SNamhyung Kim parse_options_usage(stat_usage, stat_options,
2749bb1c15b6SNamhyung Kim "for-each-cgroup", 0);
2750d1c5a0e8SNamhyung Kim goto out;
2751d1c5a0e8SNamhyung Kim }
2752bb1c15b6SNamhyung Kim }
2753d1c5a0e8SNamhyung Kim
27545ac72634SIan Rogers evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
27551d3351e6SJin Yao
27567748bb71SArnaldo Carvalho de Melo if (evlist__create_maps(evsel_list, &target) < 0) {
2757602ad878SArnaldo Carvalho de Melo if (target__has_task(&target)) {
27585c98d466SArnaldo Carvalho de Melo pr_err("Problems finding threads of monitor\n");
2759e0547311SJiri Olsa parse_options_usage(stat_usage, stat_options, "p", 1);
2760e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "t", 1);
2761602ad878SArnaldo Carvalho de Melo } else if (target__has_cpu(&target)) {
276260d567e2SArnaldo Carvalho de Melo perror("failed to parse CPUs map");
2763e0547311SJiri Olsa parse_options_usage(stat_usage, stat_options, "C", 1);
2764e0547311SJiri Olsa parse_options_usage(NULL, stat_options, "a", 1);
2765cc03c542SNamhyung Kim }
2766cc03c542SNamhyung Kim goto out;
276760d567e2SArnaldo Carvalho de Melo }
276832b8af82SJiri Olsa
2769a9a17902SJiri Olsa evlist__check_cpu_maps(evsel_list);
2770a9a17902SJiri Olsa
277132b8af82SJiri Olsa /*
277232b8af82SJiri Olsa * Initialize thread_map with comm names,
277332b8af82SJiri Olsa * so we could print it out on output.
277432b8af82SJiri Olsa */
277556739444SJin Yao if (stat_config.aggr_mode == AGGR_THREAD) {
277603617c22SJiri Olsa thread_map__read_comms(evsel_list->core.threads);
277756739444SJin Yao }
277832b8af82SJiri Olsa
277986895b48SJiri Olsa if (stat_config.aggr_mode == AGGR_NODE)
278086895b48SJiri Olsa cpu__setup_cpunode_map();
278186895b48SJiri Olsa
2782db06a269Syuzhoujian if (stat_config.times && interval)
2783db06a269Syuzhoujian interval_count = true;
2784db06a269Syuzhoujian else if (stat_config.times && !interval) {
2785db06a269Syuzhoujian pr_err("interval-count option should be used together with "
2786db06a269Syuzhoujian "interval-print.\n");
2787db06a269Syuzhoujian parse_options_usage(stat_usage, stat_options, "interval-count", 0);
2788db06a269Syuzhoujian parse_options_usage(stat_usage, stat_options, "I", 1);
2789db06a269Syuzhoujian goto out;
2790db06a269Syuzhoujian }
2791c45c6ea2SStephane Eranian
2792f1f8ad52Syuzhoujian if (timeout && timeout < 100) {
2793f1f8ad52Syuzhoujian if (timeout < 10) {
2794f1f8ad52Syuzhoujian pr_err("timeout must be >= 10ms.\n");
2795f1f8ad52Syuzhoujian parse_options_usage(stat_usage, stat_options, "timeout", 0);
2796f1f8ad52Syuzhoujian goto out;
2797f1f8ad52Syuzhoujian } else
2798f1f8ad52Syuzhoujian pr_warning("timeout < 100ms. "
2799f1f8ad52Syuzhoujian "The overhead percentage could be high in some cases. "
2800f1f8ad52Syuzhoujian "Please proceed with caution.\n");
2801f1f8ad52Syuzhoujian }
2802f1f8ad52Syuzhoujian if (timeout && interval) {
2803f1f8ad52Syuzhoujian pr_err("timeout option is not supported with interval-print.\n");
2804f1f8ad52Syuzhoujian parse_options_usage(stat_usage, stat_options, "timeout", 0);
2805f1f8ad52Syuzhoujian parse_options_usage(stat_usage, stat_options, "I", 1);
2806f1f8ad52Syuzhoujian goto out;
2807f1f8ad52Syuzhoujian }
2808f1f8ad52Syuzhoujian
28091f297a6eSNamhyung Kim if (perf_stat_init_aggr_mode())
281003ad9747SArnaldo Carvalho de Melo goto out;
2811d6d901c2SZhang, Yanmin
28121f297a6eSNamhyung Kim if (evlist__alloc_stats(&stat_config, evsel_list, interval))
281303ad9747SArnaldo Carvalho de Melo goto out;
281486ee6e18SStephane Eranian
281586470930SIngo Molnar /*
28167d9ad16aSJiri Olsa * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
28177d9ad16aSJiri Olsa * while avoiding that older tools show confusing messages.
28187d9ad16aSJiri Olsa *
28197d9ad16aSJiri Olsa * However for pipe sessions we need to keep it zero,
28207d9ad16aSJiri Olsa * because script's perf_evsel__check_attr is triggered
28217d9ad16aSJiri Olsa * by attr->sample_type != 0, and we can't run it on
28227d9ad16aSJiri Olsa * stat sessions.
28237d9ad16aSJiri Olsa */
28247d9ad16aSJiri Olsa stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
28257d9ad16aSJiri Olsa
28267d9ad16aSJiri Olsa /*
282786470930SIngo Molnar * We dont want to block the signals - that would cause
282886470930SIngo Molnar * child tasks to inherit that and Ctrl-C would not work.
282986470930SIngo Molnar * What we want is for Ctrl-C to work in the exec()-ed
283086470930SIngo Molnar * task, but being ignored by perf stat itself:
283186470930SIngo Molnar */
2832f7b7c26eSPeter Zijlstra atexit(sig_atexit);
2833a7e191c3SFrederik Deweerdt if (!forever)
283486470930SIngo Molnar signal(SIGINT, skip_signal);
283513370a9bSStephane Eranian signal(SIGCHLD, skip_signal);
283686470930SIngo Molnar signal(SIGALRM, skip_signal);
283786470930SIngo Molnar signal(SIGABRT, skip_signal);
283886470930SIngo Molnar
283927e9769aSAlexey Budankov if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
284027e9769aSAlexey Budankov goto out;
284127e9769aSAlexey Budankov
2842448ce0e6SGang Li /* Enable ignoring missing threads when -p option is defined. */
2843448ce0e6SGang Li evlist__first(evsel_list)->ignore_missing_thread = target.pid;
284442202dd5SIngo Molnar status = 0;
2845d97ae04bSJiri Olsa for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
2846d97ae04bSJiri Olsa if (stat_config.run_count != 1 && verbose > 0)
28474aa9015fSStephane Eranian fprintf(output, "[ perf stat: executing run #%d ... ]\n",
28484aa9015fSStephane Eranian run_idx + 1);
2849f9cef0a9SIngo Molnar
2850b63fd11cSSrikar Dronamraju if (run_idx != 0)
285153f5e908SArnaldo Carvalho de Melo evlist__reset_prev_raw_counts(evsel_list);
2852b63fd11cSSrikar Dronamraju
2853e55c14afSJiri Olsa status = run_perf_stat(argc, argv, run_idx);
2854443f2d5bSSrikar Dronamraju if (forever && status != -1 && !interval) {
2855d4f63a47SJiri Olsa print_counters(NULL, argc, argv);
2856254ecbc7SJiri Olsa perf_stat__reset_stats();
2857a7e191c3SFrederik Deweerdt }
285842202dd5SIngo Molnar }
285942202dd5SIngo Molnar
2860dada1a1fSNamhyung Kim if (!forever && status != -1 && (!interval || stat_config.summary)) {
2861dada1a1fSNamhyung Kim if (stat_config.run_count > 1)
2862dada1a1fSNamhyung Kim evlist__copy_res_stats(&stat_config, evsel_list);
2863d4f63a47SJiri Olsa print_counters(NULL, argc, argv);
2864dada1a1fSNamhyung Kim }
2865d134ffb9SArnaldo Carvalho de Melo
286627e9769aSAlexey Budankov evlist__finalize_ctlfd(evsel_list);
286727e9769aSAlexey Budankov
28684979d0c7SJiri Olsa if (STAT_RECORD) {
28694979d0c7SJiri Olsa /*
28704979d0c7SJiri Olsa * We synthesize the kernel mmap record just so that older tools
28714979d0c7SJiri Olsa * don't emit warnings about not being able to resolve symbols
28724d39c89fSIngo Molnar * due to /proc/sys/kernel/kptr_restrict settings and instead provide
28734979d0c7SJiri Olsa * a saner message about no samples being in the perf.data file.
28744979d0c7SJiri Olsa *
28754979d0c7SJiri Olsa * This also serves to suppress a warning about f_header.data.size == 0
28768b99b1a4SJiri Olsa * in header.c at the moment 'perf stat record' gets introduced, which
28778b99b1a4SJiri Olsa * is not really needed once we start adding the stat specific PERF_RECORD_
28788b99b1a4SJiri Olsa * records, but the need to suppress the kptr_restrict messages in older
28798b99b1a4SJiri Olsa * tools remain -acme
28804979d0c7SJiri Olsa */
28818ceb41d7SJiri Olsa int fd = perf_data__fd(&perf_stat.data);
2882fa853c4bSSong Liu
2883fa853c4bSSong Liu err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
28844979d0c7SJiri Olsa process_synthesized_event,
28854979d0c7SJiri Olsa &perf_stat.session->machines.host);
28864979d0c7SJiri Olsa if (err) {
28874979d0c7SJiri Olsa pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
28884979d0c7SJiri Olsa "older tools may produce warnings about this file\n.");
28894979d0c7SJiri Olsa }
28904979d0c7SJiri Olsa
28917aad0c32SJiri Olsa if (!interval) {
28927aad0c32SJiri Olsa if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
28937aad0c32SJiri Olsa pr_err("failed to write stat round event\n");
28947aad0c32SJiri Olsa }
28957aad0c32SJiri Olsa
28968ceb41d7SJiri Olsa if (!perf_stat.data.is_pipe) {
28974979d0c7SJiri Olsa perf_stat.session->header.data_size += perf_stat.bytes_written;
28984979d0c7SJiri Olsa perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2899664c98d4SJiri Olsa }
29004979d0c7SJiri Olsa
2901750b4edeSJiri Olsa evlist__close(evsel_list);
29024979d0c7SJiri Olsa perf_session__delete(perf_stat.session);
29034979d0c7SJiri Olsa }
29044979d0c7SJiri Olsa
2905544c2ae7SMasami Hiramatsu perf_stat__exit_aggr_mode();
290653f5e908SArnaldo Carvalho de Melo evlist__free_stats(evsel_list);
29070015e2e1SArnaldo Carvalho de Melo out:
2908f07952b1SAlexander Antonov if (stat_config.iostat_run)
2909f07952b1SAlexander Antonov iostat_release(evsel_list);
2910f07952b1SAlexander Antonov
2911d8f9da24SArnaldo Carvalho de Melo zfree(&stat_config.walltime_run);
29121725e9cdSIan Rogers zfree(&stat_config.user_requested_cpu_list);
2913e55c14afSJiri Olsa
2914daefd0bcSKan Liang if (smi_cost && smi_reset)
2915daefd0bcSKan Liang sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
2916daefd0bcSKan Liang
2917c12995a5SJiri Olsa evlist__delete(evsel_list);
291856739444SJin Yao
29199afe5658SJiri Olsa metricgroup__rblist_exit(&stat_config.metric_events);
2920ee7fe31eSAdrian Hunter evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
292156739444SJin Yao
292242202dd5SIngo Molnar return status;
292386470930SIngo Molnar }
2924