1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "util/cgroup.h" 45 #include <subcmd/parse-options.h> 46 #include "util/parse-events.h" 47 #include "util/pmus.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/util.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 #include <internal/threadmap.h> 97 98 #define DEFAULT_SEPARATOR " " 99 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 100 101 static void print_counters(struct timespec *ts, int argc, const char **argv); 102 103 static struct evlist *evsel_list; 104 static struct parse_events_option_args parse_events_option_args = { 105 .evlistp = &evsel_list, 106 }; 107 108 static bool all_counters_use_bpf = true; 109 110 static struct target target = { 111 .uid = UINT_MAX, 112 }; 113 114 #define METRIC_ONLY_LEN 20 115 116 static volatile sig_atomic_t child_pid = -1; 117 static int detailed_run = 0; 118 static bool transaction_run; 119 static bool topdown_run = false; 120 static bool smi_cost = false; 121 static bool smi_reset = false; 122 static int big_num_opt = -1; 123 static const char *pre_cmd = NULL; 124 static const char *post_cmd = NULL; 125 static bool sync_run = false; 126 static bool forever = false; 127 static bool force_metric_only = false; 128 static struct timespec ref_time; 129 static bool append_file; 130 static bool interval_count; 131 static const char *output_name; 132 static int output_fd; 133 static char *metrics; 134 135 struct perf_stat { 136 bool record; 137 struct perf_data data; 138 struct perf_session *session; 139 u64 bytes_written; 140 struct perf_tool tool; 141 bool maps_allocated; 142 struct perf_cpu_map *cpus; 143 struct perf_thread_map *threads; 144 enum aggr_mode aggr_mode; 145 u32 aggr_level; 146 }; 147 148 static struct perf_stat perf_stat; 149 #define STAT_RECORD perf_stat.record 150 151 static volatile sig_atomic_t done = 0; 152 153 static struct perf_stat_config stat_config = { 154 .aggr_mode = AGGR_GLOBAL, 155 .aggr_level = MAX_CACHE_LVL + 1, 156 .scale = true, 157 .unit_width = 4, /* strlen("unit") */ 158 .run_count = 1, 159 .metric_only_len = METRIC_ONLY_LEN, 160 .walltime_nsecs_stats = &walltime_nsecs_stats, 161 .ru_stats = &ru_stats, 162 .big_num = true, 163 .ctl_fd = -1, 164 .ctl_fd_ack = -1, 165 .iostat_run = false, 166 }; 167 168 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 169 { 170 if (!a->core.cpus && !b->core.cpus) 171 return true; 172 173 if (!a->core.cpus || !b->core.cpus) 174 return false; 175 176 if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) 177 return false; 178 179 for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { 180 if (perf_cpu_map__cpu(a->core.cpus, i).cpu != 181 perf_cpu_map__cpu(b->core.cpus, i).cpu) 182 return false; 183 } 184 185 return true; 186 } 187 188 static void evlist__check_cpu_maps(struct evlist *evlist) 189 { 190 struct evsel *evsel, *warned_leader = NULL; 191 192 if (evlist__has_hybrid(evlist)) 193 evlist__warn_hybrid_group(evlist); 194 195 evlist__for_each_entry(evlist, evsel) { 196 struct evsel *leader = evsel__leader(evsel); 197 198 /* Check that leader matches cpus with each member. */ 199 if (leader == evsel) 200 continue; 201 if (cpus_map_matched(leader, evsel)) 202 continue; 203 204 /* If there's mismatch disable the group and warn user. */ 205 if (warned_leader != leader) { 206 char buf[200]; 207 208 pr_warning("WARNING: grouped events cpus do not match.\n" 209 "Events with CPUs not matching the leader will " 210 "be removed from the group.\n"); 211 evsel__group_desc(leader, buf, sizeof(buf)); 212 pr_warning(" %s\n", buf); 213 warned_leader = leader; 214 } 215 if (verbose > 0) { 216 char buf[200]; 217 218 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 219 pr_warning(" %s: %s\n", leader->name, buf); 220 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 221 pr_warning(" %s: %s\n", evsel->name, buf); 222 } 223 224 evsel__remove_from_group(evsel, leader); 225 } 226 } 227 228 static inline void diff_timespec(struct timespec *r, struct timespec *a, 229 struct timespec *b) 230 { 231 r->tv_sec = a->tv_sec - b->tv_sec; 232 if (a->tv_nsec < b->tv_nsec) { 233 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 234 r->tv_sec--; 235 } else { 236 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 237 } 238 } 239 240 static void perf_stat__reset_stats(void) 241 { 242 evlist__reset_stats(evsel_list); 243 perf_stat__reset_shadow_stats(); 244 } 245 246 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 247 union perf_event *event, 248 struct perf_sample *sample __maybe_unused, 249 struct machine *machine __maybe_unused) 250 { 251 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 252 pr_err("failed to write perf data, error: %m\n"); 253 return -1; 254 } 255 256 perf_stat.bytes_written += event->header.size; 257 return 0; 258 } 259 260 static int write_stat_round_event(u64 tm, u64 type) 261 { 262 return perf_event__synthesize_stat_round(NULL, tm, type, 263 process_synthesized_event, 264 NULL); 265 } 266 267 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 268 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 269 270 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 271 272 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 273 struct perf_counts_values *count) 274 { 275 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 276 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 277 278 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 279 process_synthesized_event, NULL); 280 } 281 282 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 283 int thread, struct timespec *rs) 284 { 285 switch(counter->tool_event) { 286 case PERF_TOOL_DURATION_TIME: { 287 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 288 struct perf_counts_values *count = 289 perf_counts(counter->counts, cpu_map_idx, thread); 290 count->ena = count->run = val; 291 count->val = val; 292 return 0; 293 } 294 case PERF_TOOL_USER_TIME: 295 case PERF_TOOL_SYSTEM_TIME: { 296 u64 val; 297 struct perf_counts_values *count = 298 perf_counts(counter->counts, cpu_map_idx, thread); 299 if (counter->tool_event == PERF_TOOL_USER_TIME) 300 val = ru_stats.ru_utime_usec_stat.mean; 301 else 302 val = ru_stats.ru_stime_usec_stat.mean; 303 count->ena = count->run = val; 304 count->val = val; 305 return 0; 306 } 307 default: 308 case PERF_TOOL_NONE: 309 return evsel__read_counter(counter, cpu_map_idx, thread); 310 case PERF_TOOL_MAX: 311 /* This should never be reached */ 312 return 0; 313 } 314 } 315 316 /* 317 * Read out the results of a single counter: 318 * do not aggregate counts across CPUs in system-wide mode 319 */ 320 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 321 { 322 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 323 int thread; 324 325 if (!counter->supported) 326 return -ENOENT; 327 328 for (thread = 0; thread < nthreads; thread++) { 329 struct perf_counts_values *count; 330 331 count = perf_counts(counter->counts, cpu_map_idx, thread); 332 333 /* 334 * The leader's group read loads data into its group members 335 * (via evsel__read_counter()) and sets their count->loaded. 336 */ 337 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 338 read_single_counter(counter, cpu_map_idx, thread, rs)) { 339 counter->counts->scaled = -1; 340 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 341 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 342 return -1; 343 } 344 345 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 346 347 if (STAT_RECORD) { 348 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 349 pr_err("failed to write stat event\n"); 350 return -1; 351 } 352 } 353 354 if (verbose > 1) { 355 fprintf(stat_config.output, 356 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 357 evsel__name(counter), 358 perf_cpu_map__cpu(evsel__cpus(counter), 359 cpu_map_idx).cpu, 360 count->val, count->ena, count->run); 361 } 362 } 363 364 return 0; 365 } 366 367 static int read_affinity_counters(struct timespec *rs) 368 { 369 struct evlist_cpu_iterator evlist_cpu_itr; 370 struct affinity saved_affinity, *affinity; 371 372 if (all_counters_use_bpf) 373 return 0; 374 375 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 376 affinity = NULL; 377 else if (affinity__setup(&saved_affinity) < 0) 378 return -1; 379 else 380 affinity = &saved_affinity; 381 382 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 383 struct evsel *counter = evlist_cpu_itr.evsel; 384 385 if (evsel__is_bpf(counter)) 386 continue; 387 388 if (!counter->err) { 389 counter->err = read_counter_cpu(counter, rs, 390 evlist_cpu_itr.cpu_map_idx); 391 } 392 } 393 if (affinity) 394 affinity__cleanup(&saved_affinity); 395 396 return 0; 397 } 398 399 static int read_bpf_map_counters(void) 400 { 401 struct evsel *counter; 402 int err; 403 404 evlist__for_each_entry(evsel_list, counter) { 405 if (!evsel__is_bpf(counter)) 406 continue; 407 408 err = bpf_counter__read(counter); 409 if (err) 410 return err; 411 } 412 return 0; 413 } 414 415 static int read_counters(struct timespec *rs) 416 { 417 if (!stat_config.stop_read_counter) { 418 if (read_bpf_map_counters() || 419 read_affinity_counters(rs)) 420 return -1; 421 } 422 return 0; 423 } 424 425 static void process_counters(void) 426 { 427 struct evsel *counter; 428 429 evlist__for_each_entry(evsel_list, counter) { 430 if (counter->err) 431 pr_debug("failed to read counter %s\n", counter->name); 432 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 433 pr_warning("failed to process counter %s\n", counter->name); 434 counter->err = 0; 435 } 436 437 perf_stat_merge_counters(&stat_config, evsel_list); 438 perf_stat_process_percore(&stat_config, evsel_list); 439 } 440 441 static void process_interval(void) 442 { 443 struct timespec ts, rs; 444 445 clock_gettime(CLOCK_MONOTONIC, &ts); 446 diff_timespec(&rs, &ts, &ref_time); 447 448 evlist__reset_aggr_stats(evsel_list); 449 450 if (read_counters(&rs) == 0) 451 process_counters(); 452 453 if (STAT_RECORD) { 454 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 455 pr_err("failed to write stat round event\n"); 456 } 457 458 init_stats(&walltime_nsecs_stats); 459 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 460 print_counters(&rs, 0, NULL); 461 } 462 463 static bool handle_interval(unsigned int interval, int *times) 464 { 465 if (interval) { 466 process_interval(); 467 if (interval_count && !(--(*times))) 468 return true; 469 } 470 return false; 471 } 472 473 static int enable_counters(void) 474 { 475 struct evsel *evsel; 476 int err; 477 478 evlist__for_each_entry(evsel_list, evsel) { 479 if (!evsel__is_bpf(evsel)) 480 continue; 481 482 err = bpf_counter__enable(evsel); 483 if (err) 484 return err; 485 } 486 487 if (!target__enable_on_exec(&target)) { 488 if (!all_counters_use_bpf) 489 evlist__enable(evsel_list); 490 } 491 return 0; 492 } 493 494 static void disable_counters(void) 495 { 496 struct evsel *counter; 497 498 /* 499 * If we don't have tracee (attaching to task or cpu), counters may 500 * still be running. To get accurate group ratios, we must stop groups 501 * from counting before reading their constituent counters. 502 */ 503 if (!target__none(&target)) { 504 evlist__for_each_entry(evsel_list, counter) 505 bpf_counter__disable(counter); 506 if (!all_counters_use_bpf) 507 evlist__disable(evsel_list); 508 } 509 } 510 511 static volatile sig_atomic_t workload_exec_errno; 512 513 /* 514 * evlist__prepare_workload will send a SIGUSR1 515 * if the fork fails, since we asked by setting its 516 * want_signal to true. 517 */ 518 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 519 void *ucontext __maybe_unused) 520 { 521 workload_exec_errno = info->si_value.sival_int; 522 } 523 524 static bool evsel__should_store_id(struct evsel *counter) 525 { 526 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 527 } 528 529 static bool is_target_alive(struct target *_target, 530 struct perf_thread_map *threads) 531 { 532 struct stat st; 533 int i; 534 535 if (!target__has_task(_target)) 536 return true; 537 538 for (i = 0; i < threads->nr; i++) { 539 char path[PATH_MAX]; 540 541 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 542 threads->map[i].pid); 543 544 if (!stat(path, &st)) 545 return true; 546 } 547 548 return false; 549 } 550 551 static void process_evlist(struct evlist *evlist, unsigned int interval) 552 { 553 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 554 555 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 556 switch (cmd) { 557 case EVLIST_CTL_CMD_ENABLE: 558 fallthrough; 559 case EVLIST_CTL_CMD_DISABLE: 560 if (interval) 561 process_interval(); 562 break; 563 case EVLIST_CTL_CMD_SNAPSHOT: 564 case EVLIST_CTL_CMD_ACK: 565 case EVLIST_CTL_CMD_UNSUPPORTED: 566 case EVLIST_CTL_CMD_EVLIST: 567 case EVLIST_CTL_CMD_STOP: 568 case EVLIST_CTL_CMD_PING: 569 default: 570 break; 571 } 572 } 573 } 574 575 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 576 int *time_to_sleep) 577 { 578 int tts = *time_to_sleep; 579 struct timespec time_diff; 580 581 diff_timespec(&time_diff, time_stop, time_start); 582 583 tts -= time_diff.tv_sec * MSEC_PER_SEC + 584 time_diff.tv_nsec / NSEC_PER_MSEC; 585 586 if (tts < 0) 587 tts = 0; 588 589 *time_to_sleep = tts; 590 } 591 592 static int dispatch_events(bool forks, int timeout, int interval, int *times) 593 { 594 int child_exited = 0, status = 0; 595 int time_to_sleep, sleep_time; 596 struct timespec time_start, time_stop; 597 598 if (interval) 599 sleep_time = interval; 600 else if (timeout) 601 sleep_time = timeout; 602 else 603 sleep_time = 1000; 604 605 time_to_sleep = sleep_time; 606 607 while (!done) { 608 if (forks) 609 child_exited = waitpid(child_pid, &status, WNOHANG); 610 else 611 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 612 613 if (child_exited) 614 break; 615 616 clock_gettime(CLOCK_MONOTONIC, &time_start); 617 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 618 if (timeout || handle_interval(interval, times)) 619 break; 620 time_to_sleep = sleep_time; 621 } else { /* fd revent */ 622 process_evlist(evsel_list, interval); 623 clock_gettime(CLOCK_MONOTONIC, &time_stop); 624 compute_tts(&time_start, &time_stop, &time_to_sleep); 625 } 626 } 627 628 return status; 629 } 630 631 enum counter_recovery { 632 COUNTER_SKIP, 633 COUNTER_RETRY, 634 COUNTER_FATAL, 635 }; 636 637 static enum counter_recovery stat_handle_error(struct evsel *counter) 638 { 639 char msg[BUFSIZ]; 640 /* 641 * PPC returns ENXIO for HW counters until 2.6.37 642 * (behavior changed with commit b0a873e). 643 */ 644 if (errno == EINVAL || errno == ENOSYS || 645 errno == ENOENT || errno == EOPNOTSUPP || 646 errno == ENXIO) { 647 if (verbose > 0) 648 ui__warning("%s event is not supported by the kernel.\n", 649 evsel__name(counter)); 650 counter->supported = false; 651 /* 652 * errored is a sticky flag that means one of the counter's 653 * cpu event had a problem and needs to be reexamined. 654 */ 655 counter->errored = true; 656 657 if ((evsel__leader(counter) != counter) || 658 !(counter->core.leader->nr_members > 1)) 659 return COUNTER_SKIP; 660 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 661 if (verbose > 0) 662 ui__warning("%s\n", msg); 663 return COUNTER_RETRY; 664 } else if (target__has_per_thread(&target) && 665 evsel_list->core.threads && 666 evsel_list->core.threads->err_thread != -1) { 667 /* 668 * For global --per-thread case, skip current 669 * error thread. 670 */ 671 if (!thread_map__remove(evsel_list->core.threads, 672 evsel_list->core.threads->err_thread)) { 673 evsel_list->core.threads->err_thread = -1; 674 return COUNTER_RETRY; 675 } 676 } else if (counter->skippable) { 677 if (verbose > 0) 678 ui__warning("skipping event %s that kernel failed to open .\n", 679 evsel__name(counter)); 680 counter->supported = false; 681 counter->errored = true; 682 return COUNTER_SKIP; 683 } 684 685 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 686 ui__error("%s\n", msg); 687 688 if (child_pid != -1) 689 kill(child_pid, SIGTERM); 690 return COUNTER_FATAL; 691 } 692 693 static int __run_perf_stat(int argc, const char **argv, int run_idx) 694 { 695 int interval = stat_config.interval; 696 int times = stat_config.times; 697 int timeout = stat_config.timeout; 698 char msg[BUFSIZ]; 699 unsigned long long t0, t1; 700 struct evsel *counter; 701 size_t l; 702 int status = 0; 703 const bool forks = (argc > 0); 704 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 705 struct evlist_cpu_iterator evlist_cpu_itr; 706 struct affinity saved_affinity, *affinity = NULL; 707 int err; 708 bool second_pass = false; 709 710 if (forks) { 711 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 712 perror("failed to prepare workload"); 713 return -1; 714 } 715 child_pid = evsel_list->workload.pid; 716 } 717 718 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { 719 if (affinity__setup(&saved_affinity) < 0) 720 return -1; 721 affinity = &saved_affinity; 722 } 723 724 evlist__for_each_entry(evsel_list, counter) { 725 counter->reset_group = false; 726 if (bpf_counter__load(counter, &target)) 727 return -1; 728 if (!(evsel__is_bperf(counter))) 729 all_counters_use_bpf = false; 730 } 731 732 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 733 counter = evlist_cpu_itr.evsel; 734 735 /* 736 * bperf calls evsel__open_per_cpu() in bperf__load(), so 737 * no need to call it again here. 738 */ 739 if (target.use_bpf) 740 break; 741 742 if (counter->reset_group || counter->errored) 743 continue; 744 if (evsel__is_bperf(counter)) 745 continue; 746 try_again: 747 if (create_perf_stat_counter(counter, &stat_config, &target, 748 evlist_cpu_itr.cpu_map_idx) < 0) { 749 750 /* 751 * Weak group failed. We cannot just undo this here 752 * because earlier CPUs might be in group mode, and the kernel 753 * doesn't support mixing group and non group reads. Defer 754 * it to later. 755 * Don't close here because we're in the wrong affinity. 756 */ 757 if ((errno == EINVAL || errno == EBADF) && 758 evsel__leader(counter) != counter && 759 counter->weak_group) { 760 evlist__reset_weak_group(evsel_list, counter, false); 761 assert(counter->reset_group); 762 second_pass = true; 763 continue; 764 } 765 766 switch (stat_handle_error(counter)) { 767 case COUNTER_FATAL: 768 return -1; 769 case COUNTER_RETRY: 770 goto try_again; 771 case COUNTER_SKIP: 772 continue; 773 default: 774 break; 775 } 776 777 } 778 counter->supported = true; 779 } 780 781 if (second_pass) { 782 /* 783 * Now redo all the weak group after closing them, 784 * and also close errored counters. 785 */ 786 787 /* First close errored or weak retry */ 788 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 789 counter = evlist_cpu_itr.evsel; 790 791 if (!counter->reset_group && !counter->errored) 792 continue; 793 794 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 795 } 796 /* Now reopen weak */ 797 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 798 counter = evlist_cpu_itr.evsel; 799 800 if (!counter->reset_group) 801 continue; 802 try_again_reset: 803 pr_debug2("reopening weak %s\n", evsel__name(counter)); 804 if (create_perf_stat_counter(counter, &stat_config, &target, 805 evlist_cpu_itr.cpu_map_idx) < 0) { 806 807 switch (stat_handle_error(counter)) { 808 case COUNTER_FATAL: 809 return -1; 810 case COUNTER_RETRY: 811 goto try_again_reset; 812 case COUNTER_SKIP: 813 continue; 814 default: 815 break; 816 } 817 } 818 counter->supported = true; 819 } 820 } 821 affinity__cleanup(affinity); 822 823 evlist__for_each_entry(evsel_list, counter) { 824 if (!counter->supported) { 825 perf_evsel__free_fd(&counter->core); 826 continue; 827 } 828 829 l = strlen(counter->unit); 830 if (l > stat_config.unit_width) 831 stat_config.unit_width = l; 832 833 if (evsel__should_store_id(counter) && 834 evsel__store_ids(counter, evsel_list)) 835 return -1; 836 } 837 838 if (evlist__apply_filters(evsel_list, &counter)) { 839 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 840 counter->filter, evsel__name(counter), errno, 841 str_error_r(errno, msg, sizeof(msg))); 842 return -1; 843 } 844 845 if (STAT_RECORD) { 846 int fd = perf_data__fd(&perf_stat.data); 847 848 if (is_pipe) { 849 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 850 } else { 851 err = perf_session__write_header(perf_stat.session, evsel_list, 852 fd, false); 853 } 854 855 if (err < 0) 856 return err; 857 858 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 859 process_synthesized_event, is_pipe); 860 if (err < 0) 861 return err; 862 } 863 864 if (target.initial_delay) { 865 pr_info(EVLIST_DISABLED_MSG); 866 } else { 867 err = enable_counters(); 868 if (err) 869 return -1; 870 } 871 872 /* Exec the command, if any */ 873 if (forks) 874 evlist__start_workload(evsel_list); 875 876 if (target.initial_delay > 0) { 877 usleep(target.initial_delay * USEC_PER_MSEC); 878 err = enable_counters(); 879 if (err) 880 return -1; 881 882 pr_info(EVLIST_ENABLED_MSG); 883 } 884 885 t0 = rdclock(); 886 clock_gettime(CLOCK_MONOTONIC, &ref_time); 887 888 if (forks) { 889 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 890 status = dispatch_events(forks, timeout, interval, ×); 891 if (child_pid != -1) { 892 if (timeout) 893 kill(child_pid, SIGTERM); 894 wait4(child_pid, &status, 0, &stat_config.ru_data); 895 } 896 897 if (workload_exec_errno) { 898 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 899 pr_err("Workload failed: %s\n", emsg); 900 return -1; 901 } 902 903 if (WIFSIGNALED(status)) 904 psignal(WTERMSIG(status), argv[0]); 905 } else { 906 status = dispatch_events(forks, timeout, interval, ×); 907 } 908 909 disable_counters(); 910 911 t1 = rdclock(); 912 913 if (stat_config.walltime_run_table) 914 stat_config.walltime_run[run_idx] = t1 - t0; 915 916 if (interval && stat_config.summary) { 917 stat_config.interval = 0; 918 stat_config.stop_read_counter = true; 919 init_stats(&walltime_nsecs_stats); 920 update_stats(&walltime_nsecs_stats, t1 - t0); 921 922 evlist__copy_prev_raw_counts(evsel_list); 923 evlist__reset_prev_raw_counts(evsel_list); 924 evlist__reset_aggr_stats(evsel_list); 925 } else { 926 update_stats(&walltime_nsecs_stats, t1 - t0); 927 update_rusage_stats(&ru_stats, &stat_config.ru_data); 928 } 929 930 /* 931 * Closing a group leader splits the group, and as we only disable 932 * group leaders, results in remaining events becoming enabled. To 933 * avoid arbitrary skew, we must read all counters before closing any 934 * group leaders. 935 */ 936 if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0) 937 process_counters(); 938 939 /* 940 * We need to keep evsel_list alive, because it's processed 941 * later the evsel_list will be closed after. 942 */ 943 if (!STAT_RECORD) 944 evlist__close(evsel_list); 945 946 return WEXITSTATUS(status); 947 } 948 949 static int run_perf_stat(int argc, const char **argv, int run_idx) 950 { 951 int ret; 952 953 if (pre_cmd) { 954 ret = system(pre_cmd); 955 if (ret) 956 return ret; 957 } 958 959 if (sync_run) 960 sync(); 961 962 ret = __run_perf_stat(argc, argv, run_idx); 963 if (ret) 964 return ret; 965 966 if (post_cmd) { 967 ret = system(post_cmd); 968 if (ret) 969 return ret; 970 } 971 972 return ret; 973 } 974 975 static void print_counters(struct timespec *ts, int argc, const char **argv) 976 { 977 /* Do not print anything if we record to the pipe. */ 978 if (STAT_RECORD && perf_stat.data.is_pipe) 979 return; 980 if (quiet) 981 return; 982 983 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 984 } 985 986 static volatile sig_atomic_t signr = -1; 987 988 static void skip_signal(int signo) 989 { 990 if ((child_pid == -1) || stat_config.interval) 991 done = 1; 992 993 signr = signo; 994 /* 995 * render child_pid harmless 996 * won't send SIGTERM to a random 997 * process in case of race condition 998 * and fast PID recycling 999 */ 1000 child_pid = -1; 1001 } 1002 1003 static void sig_atexit(void) 1004 { 1005 sigset_t set, oset; 1006 1007 /* 1008 * avoid race condition with SIGCHLD handler 1009 * in skip_signal() which is modifying child_pid 1010 * goal is to avoid send SIGTERM to a random 1011 * process 1012 */ 1013 sigemptyset(&set); 1014 sigaddset(&set, SIGCHLD); 1015 sigprocmask(SIG_BLOCK, &set, &oset); 1016 1017 if (child_pid != -1) 1018 kill(child_pid, SIGTERM); 1019 1020 sigprocmask(SIG_SETMASK, &oset, NULL); 1021 1022 if (signr == -1) 1023 return; 1024 1025 signal(signr, SIG_DFL); 1026 kill(getpid(), signr); 1027 } 1028 1029 void perf_stat__set_big_num(int set) 1030 { 1031 stat_config.big_num = (set != 0); 1032 } 1033 1034 void perf_stat__set_no_csv_summary(int set) 1035 { 1036 stat_config.no_csv_summary = (set != 0); 1037 } 1038 1039 static int stat__set_big_num(const struct option *opt __maybe_unused, 1040 const char *s __maybe_unused, int unset) 1041 { 1042 big_num_opt = unset ? 0 : 1; 1043 perf_stat__set_big_num(!unset); 1044 return 0; 1045 } 1046 1047 static int enable_metric_only(const struct option *opt __maybe_unused, 1048 const char *s __maybe_unused, int unset) 1049 { 1050 force_metric_only = true; 1051 stat_config.metric_only = !unset; 1052 return 0; 1053 } 1054 1055 static int append_metric_groups(const struct option *opt __maybe_unused, 1056 const char *str, 1057 int unset __maybe_unused) 1058 { 1059 if (metrics) { 1060 char *tmp; 1061 1062 if (asprintf(&tmp, "%s,%s", metrics, str) < 0) 1063 return -ENOMEM; 1064 free(metrics); 1065 metrics = tmp; 1066 } else { 1067 metrics = strdup(str); 1068 if (!metrics) 1069 return -ENOMEM; 1070 } 1071 return 0; 1072 } 1073 1074 static int parse_control_option(const struct option *opt, 1075 const char *str, 1076 int unset __maybe_unused) 1077 { 1078 struct perf_stat_config *config = opt->value; 1079 1080 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1081 } 1082 1083 static int parse_stat_cgroups(const struct option *opt, 1084 const char *str, int unset) 1085 { 1086 if (stat_config.cgroup_list) { 1087 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1088 return -1; 1089 } 1090 1091 return parse_cgroups(opt, str, unset); 1092 } 1093 1094 static int parse_cputype(const struct option *opt, 1095 const char *str, 1096 int unset __maybe_unused) 1097 { 1098 const struct perf_pmu *pmu; 1099 struct evlist *evlist = *(struct evlist **)opt->value; 1100 1101 if (!list_empty(&evlist->core.entries)) { 1102 fprintf(stderr, "Must define cputype before events/metrics\n"); 1103 return -1; 1104 } 1105 1106 pmu = perf_pmus__pmu_for_pmu_filter(str); 1107 if (!pmu) { 1108 fprintf(stderr, "--cputype %s is not supported!\n", str); 1109 return -1; 1110 } 1111 parse_events_option_args.pmu_filter = pmu->name; 1112 1113 return 0; 1114 } 1115 1116 static int parse_cache_level(const struct option *opt, 1117 const char *str, 1118 int unset __maybe_unused) 1119 { 1120 int level; 1121 u32 *aggr_mode = (u32 *)opt->value; 1122 u32 *aggr_level = (u32 *)opt->data; 1123 1124 /* 1125 * If no string is specified, aggregate based on the topology of 1126 * Last Level Cache (LLC). Since the LLC level can change from 1127 * architecture to architecture, set level greater than 1128 * MAX_CACHE_LVL which will be interpreted as LLC. 1129 */ 1130 if (str == NULL) { 1131 level = MAX_CACHE_LVL + 1; 1132 goto out; 1133 } 1134 1135 /* 1136 * The format to specify cache level is LX or lX where X is the 1137 * cache level. 1138 */ 1139 if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) { 1140 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", 1141 MAX_CACHE_LVL, 1142 MAX_CACHE_LVL); 1143 return -EINVAL; 1144 } 1145 1146 level = atoi(&str[1]); 1147 if (level < 1) { 1148 pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", 1149 MAX_CACHE_LVL, 1150 MAX_CACHE_LVL); 1151 return -EINVAL; 1152 } 1153 1154 if (level > MAX_CACHE_LVL) { 1155 pr_err("perf only supports max cache level of %d.\n" 1156 "Consider increasing MAX_CACHE_LVL\n", MAX_CACHE_LVL); 1157 return -EINVAL; 1158 } 1159 out: 1160 *aggr_mode = AGGR_CACHE; 1161 *aggr_level = level; 1162 return 0; 1163 } 1164 1165 static struct option stat_options[] = { 1166 OPT_BOOLEAN('T', "transaction", &transaction_run, 1167 "hardware transaction statistics"), 1168 OPT_CALLBACK('e', "event", &parse_events_option_args, "event", 1169 "event selector. use 'perf list' to list available events", 1170 parse_events_option), 1171 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1172 "event filter", parse_filter), 1173 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1174 "child tasks do not inherit counters"), 1175 OPT_STRING('p', "pid", &target.pid, "pid", 1176 "stat events on existing process id"), 1177 OPT_STRING('t', "tid", &target.tid, "tid", 1178 "stat events on existing thread id"), 1179 #ifdef HAVE_BPF_SKEL 1180 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1181 "stat events on existing bpf program id"), 1182 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1183 "use bpf program to count events"), 1184 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1185 "path to perf_event_attr map"), 1186 #endif 1187 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1188 "system-wide collection from all CPUs"), 1189 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1190 "Use --no-scale to disable counter scaling for multiplexing"), 1191 OPT_INCR('v', "verbose", &verbose, 1192 "be more verbose (show counter open errors, etc)"), 1193 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1194 "repeat command and print average + stddev (max: 100, forever: 0)"), 1195 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1196 "display details about each run (only with -r option)"), 1197 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1198 "null run - dont start any counters"), 1199 OPT_INCR('d', "detailed", &detailed_run, 1200 "detailed run - start a lot of events"), 1201 OPT_BOOLEAN('S', "sync", &sync_run, 1202 "call sync() before starting a run"), 1203 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1204 "print large numbers with thousands\' separators", 1205 stat__set_big_num), 1206 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1207 "list of cpus to monitor in system-wide"), 1208 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1209 "disable CPU count aggregation", AGGR_NONE), 1210 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1211 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, 1212 "Merge identical named hybrid events"), 1213 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1214 "print counts with custom separator"), 1215 OPT_BOOLEAN('j', "json-output", &stat_config.json_output, 1216 "print counts in JSON format"), 1217 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1218 "monitor event in cgroup name only", parse_stat_cgroups), 1219 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1220 "expand events for each cgroup"), 1221 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1222 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1223 OPT_INTEGER(0, "log-fd", &output_fd, 1224 "log output to fd, instead of stderr"), 1225 OPT_STRING(0, "pre", &pre_cmd, "command", 1226 "command to run prior to the measured command"), 1227 OPT_STRING(0, "post", &post_cmd, "command", 1228 "command to run after to the measured command"), 1229 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1230 "print counts at regular interval in ms " 1231 "(overhead is possible for values <= 100ms)"), 1232 OPT_INTEGER(0, "interval-count", &stat_config.times, 1233 "print counts for fixed number of times"), 1234 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1235 "clear screen in between new interval"), 1236 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1237 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1238 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1239 "aggregate counts per processor socket", AGGR_SOCKET), 1240 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1241 "aggregate counts per processor die", AGGR_DIE), 1242 OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level, 1243 "cache level", "aggregate count at this cache level (Default: LLC)", 1244 parse_cache_level), 1245 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1246 "aggregate counts per physical processor core", AGGR_CORE), 1247 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1248 "aggregate counts per thread", AGGR_THREAD), 1249 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1250 "aggregate counts per numa node", AGGR_NODE), 1251 OPT_INTEGER('D', "delay", &target.initial_delay, 1252 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1253 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1254 "Only print computed metrics. No raw values", enable_metric_only), 1255 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1256 "don't group metric events, impacts multiplexing"), 1257 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1258 "don't try to share events between metrics in a group"), 1259 OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold, 1260 "don't try to share events between metrics in a group "), 1261 OPT_BOOLEAN(0, "topdown", &topdown_run, 1262 "measure top-down statistics"), 1263 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1264 "Set the metrics level for the top-down statistics (0: max level)"), 1265 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1266 "measure SMI cost"), 1267 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1268 "monitor specified metrics or metric groups (separated by ,)", 1269 append_metric_groups), 1270 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1271 "Configure all used events to run in kernel space.", 1272 PARSE_OPT_EXCLUSIVE), 1273 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1274 "Configure all used events to run in user space.", 1275 PARSE_OPT_EXCLUSIVE), 1276 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1277 "Use with 'percore' event qualifier to show the event " 1278 "counts of one hardware thread by sum up total hardware " 1279 "threads of same physical core"), 1280 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1281 "print summary for interval mode"), 1282 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1283 "don't print 'summary' for CSV summary output"), 1284 OPT_BOOLEAN(0, "quiet", &quiet, 1285 "don't print any output, messages or warnings (useful with record)"), 1286 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1287 "Only enable events on applying cpu with this type " 1288 "for hybrid platform (e.g. core or atom)", 1289 parse_cputype), 1290 #ifdef HAVE_LIBPFM 1291 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1292 "libpfm4 event selector. use 'perf list' to list available events", 1293 parse_libpfm_events_option), 1294 #endif 1295 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1296 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1297 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1298 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1299 parse_control_option), 1300 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1301 "measure I/O performance metrics provided by arch/platform", 1302 iostat_parse), 1303 OPT_END() 1304 }; 1305 1306 /** 1307 * Calculate the cache instance ID from the map in 1308 * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list 1309 * Cache instance ID is the first CPU reported in the shared_cpu_list file. 1310 */ 1311 static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) 1312 { 1313 int id; 1314 struct perf_cpu_map *cpu_map = perf_cpu_map__new(map); 1315 1316 /* 1317 * If the map contains no CPU, consider the current CPU to 1318 * be the first online CPU in the cache domain else use the 1319 * first online CPU of the cache domain as the ID. 1320 */ 1321 if (perf_cpu_map__empty(cpu_map)) 1322 id = cpu.cpu; 1323 else 1324 id = perf_cpu_map__cpu(cpu_map, 0).cpu; 1325 1326 /* Free the perf_cpu_map used to find the cache ID */ 1327 perf_cpu_map__put(cpu_map); 1328 1329 return id; 1330 } 1331 1332 /** 1333 * cpu__get_cache_id - Returns 0 if successful in populating the 1334 * cache level and cache id. Cache level is read from 1335 * /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID 1336 * is the first CPU reported by 1337 * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list 1338 */ 1339 static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache) 1340 { 1341 int ret = 0; 1342 u32 cache_level = stat_config.aggr_level; 1343 struct cpu_cache_level caches[MAX_CACHE_LVL]; 1344 u32 i = 0, caches_cnt = 0; 1345 1346 cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; 1347 cache->cache = -1; 1348 1349 ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt); 1350 if (ret) { 1351 /* 1352 * If caches_cnt is not 0, cpu_cache_level data 1353 * was allocated when building the topology. 1354 * Free the allocated data before returning. 1355 */ 1356 if (caches_cnt) 1357 goto free_caches; 1358 1359 return ret; 1360 } 1361 1362 if (!caches_cnt) 1363 return -1; 1364 1365 /* 1366 * Save the data for the highest level if no 1367 * level was specified by the user. 1368 */ 1369 if (cache_level > MAX_CACHE_LVL) { 1370 int max_level_index = 0; 1371 1372 for (i = 1; i < caches_cnt; ++i) { 1373 if (caches[i].level > caches[max_level_index].level) 1374 max_level_index = i; 1375 } 1376 1377 cache->cache_lvl = caches[max_level_index].level; 1378 cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map); 1379 1380 /* Reset i to 0 to free entire caches[] */ 1381 i = 0; 1382 goto free_caches; 1383 } 1384 1385 for (i = 0; i < caches_cnt; ++i) { 1386 if (caches[i].level == cache_level) { 1387 cache->cache_lvl = cache_level; 1388 cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); 1389 } 1390 1391 cpu_cache_level__free(&caches[i]); 1392 } 1393 1394 free_caches: 1395 /* 1396 * Free all the allocated cpu_cache_level data. 1397 */ 1398 while (i < caches_cnt) 1399 cpu_cache_level__free(&caches[i++]); 1400 1401 return ret; 1402 } 1403 1404 /** 1405 * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache 1406 * level, die and socket populated with the cache instache ID, cache level, 1407 * die and socket for cpu. The function signature is compatible with 1408 * aggr_cpu_id_get_t. 1409 */ 1410 static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data) 1411 { 1412 int ret; 1413 struct aggr_cpu_id id; 1414 struct perf_cache cache; 1415 1416 id = aggr_cpu_id__die(cpu, data); 1417 if (aggr_cpu_id__is_empty(&id)) 1418 return id; 1419 1420 ret = cpu__get_cache_details(cpu, &cache); 1421 if (ret) 1422 return id; 1423 1424 id.cache_lvl = cache.cache_lvl; 1425 id.cache = cache.cache; 1426 return id; 1427 } 1428 1429 static const char *const aggr_mode__string[] = { 1430 [AGGR_CORE] = "core", 1431 [AGGR_CACHE] = "cache", 1432 [AGGR_DIE] = "die", 1433 [AGGR_GLOBAL] = "global", 1434 [AGGR_NODE] = "node", 1435 [AGGR_NONE] = "none", 1436 [AGGR_SOCKET] = "socket", 1437 [AGGR_THREAD] = "thread", 1438 [AGGR_UNSET] = "unset", 1439 }; 1440 1441 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1442 struct perf_cpu cpu) 1443 { 1444 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1445 } 1446 1447 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1448 struct perf_cpu cpu) 1449 { 1450 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1451 } 1452 1453 static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused, 1454 struct perf_cpu cpu) 1455 { 1456 return aggr_cpu_id__cache(cpu, /*data=*/NULL); 1457 } 1458 1459 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1460 struct perf_cpu cpu) 1461 { 1462 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1463 } 1464 1465 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1466 struct perf_cpu cpu) 1467 { 1468 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1469 } 1470 1471 static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused, 1472 struct perf_cpu cpu) 1473 { 1474 return aggr_cpu_id__global(cpu, /*data=*/NULL); 1475 } 1476 1477 static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused, 1478 struct perf_cpu cpu) 1479 { 1480 return aggr_cpu_id__cpu(cpu, /*data=*/NULL); 1481 } 1482 1483 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1484 aggr_get_id_t get_id, struct perf_cpu cpu) 1485 { 1486 struct aggr_cpu_id id; 1487 1488 /* per-process mode - should use global aggr mode */ 1489 if (cpu.cpu == -1) 1490 return get_id(config, cpu); 1491 1492 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1493 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1494 1495 id = config->cpus_aggr_map->map[cpu.cpu]; 1496 return id; 1497 } 1498 1499 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1500 struct perf_cpu cpu) 1501 { 1502 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1503 } 1504 1505 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1506 struct perf_cpu cpu) 1507 { 1508 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1509 } 1510 1511 static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config, 1512 struct perf_cpu cpu) 1513 { 1514 return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu); 1515 } 1516 1517 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1518 struct perf_cpu cpu) 1519 { 1520 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1521 } 1522 1523 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1524 struct perf_cpu cpu) 1525 { 1526 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1527 } 1528 1529 static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config, 1530 struct perf_cpu cpu) 1531 { 1532 return perf_stat__get_aggr(config, perf_stat__get_global, cpu); 1533 } 1534 1535 static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config, 1536 struct perf_cpu cpu) 1537 { 1538 return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu); 1539 } 1540 1541 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1542 { 1543 switch (aggr_mode) { 1544 case AGGR_SOCKET: 1545 return aggr_cpu_id__socket; 1546 case AGGR_DIE: 1547 return aggr_cpu_id__die; 1548 case AGGR_CACHE: 1549 return aggr_cpu_id__cache; 1550 case AGGR_CORE: 1551 return aggr_cpu_id__core; 1552 case AGGR_NODE: 1553 return aggr_cpu_id__node; 1554 case AGGR_NONE: 1555 return aggr_cpu_id__cpu; 1556 case AGGR_GLOBAL: 1557 return aggr_cpu_id__global; 1558 case AGGR_THREAD: 1559 case AGGR_UNSET: 1560 case AGGR_MAX: 1561 default: 1562 return NULL; 1563 } 1564 } 1565 1566 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1567 { 1568 switch (aggr_mode) { 1569 case AGGR_SOCKET: 1570 return perf_stat__get_socket_cached; 1571 case AGGR_DIE: 1572 return perf_stat__get_die_cached; 1573 case AGGR_CACHE: 1574 return perf_stat__get_cache_id_cached; 1575 case AGGR_CORE: 1576 return perf_stat__get_core_cached; 1577 case AGGR_NODE: 1578 return perf_stat__get_node_cached; 1579 case AGGR_NONE: 1580 return perf_stat__get_cpu_cached; 1581 case AGGR_GLOBAL: 1582 return perf_stat__get_global_cached; 1583 case AGGR_THREAD: 1584 case AGGR_UNSET: 1585 case AGGR_MAX: 1586 default: 1587 return NULL; 1588 } 1589 } 1590 1591 static int perf_stat_init_aggr_mode(void) 1592 { 1593 int nr; 1594 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1595 1596 if (get_id) { 1597 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1598 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1599 get_id, /*data=*/NULL, needs_sort); 1600 if (!stat_config.aggr_map) { 1601 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1602 return -1; 1603 } 1604 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1605 } 1606 1607 if (stat_config.aggr_mode == AGGR_THREAD) { 1608 nr = perf_thread_map__nr(evsel_list->core.threads); 1609 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1610 if (stat_config.aggr_map == NULL) 1611 return -ENOMEM; 1612 1613 for (int s = 0; s < nr; s++) { 1614 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1615 1616 id.thread_idx = s; 1617 stat_config.aggr_map->map[s] = id; 1618 } 1619 return 0; 1620 } 1621 1622 /* 1623 * The evsel_list->cpus is the base we operate on, 1624 * taking the highest cpu number to be the size of 1625 * the aggregation translate cpumap. 1626 */ 1627 if (evsel_list->core.user_requested_cpus) 1628 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; 1629 else 1630 nr = 0; 1631 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1632 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1633 } 1634 1635 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1636 { 1637 if (map) { 1638 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1639 "cpu_aggr_map refcnt unbalanced\n"); 1640 free(map); 1641 } 1642 } 1643 1644 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1645 { 1646 if (map && refcount_dec_and_test(&map->refcnt)) 1647 cpu_aggr_map__delete(map); 1648 } 1649 1650 static void perf_stat__exit_aggr_mode(void) 1651 { 1652 cpu_aggr_map__put(stat_config.aggr_map); 1653 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1654 stat_config.aggr_map = NULL; 1655 stat_config.cpus_aggr_map = NULL; 1656 } 1657 1658 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1659 { 1660 struct perf_env *env = data; 1661 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1662 1663 if (cpu.cpu != -1) 1664 id.socket = env->cpu[cpu.cpu].socket_id; 1665 1666 return id; 1667 } 1668 1669 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1670 { 1671 struct perf_env *env = data; 1672 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1673 1674 if (cpu.cpu != -1) { 1675 /* 1676 * die_id is relative to socket, so start 1677 * with the socket ID and then add die to 1678 * make a unique ID. 1679 */ 1680 id.socket = env->cpu[cpu.cpu].socket_id; 1681 id.die = env->cpu[cpu.cpu].die_id; 1682 } 1683 1684 return id; 1685 } 1686 1687 static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env, 1688 u32 cache_level, struct aggr_cpu_id *id) 1689 { 1690 int i; 1691 int caches_cnt = env->caches_cnt; 1692 struct cpu_cache_level *caches = env->caches; 1693 1694 id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; 1695 id->cache = -1; 1696 1697 if (!caches_cnt) 1698 return; 1699 1700 for (i = caches_cnt - 1; i > -1; --i) { 1701 struct perf_cpu_map *cpu_map; 1702 int map_contains_cpu; 1703 1704 /* 1705 * If user has not specified a level, find the fist level with 1706 * the cpu in the map. Since building the map is expensive, do 1707 * this only if levels match. 1708 */ 1709 if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level) 1710 continue; 1711 1712 cpu_map = perf_cpu_map__new(caches[i].map); 1713 map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu); 1714 perf_cpu_map__put(cpu_map); 1715 1716 if (map_contains_cpu != -1) { 1717 id->cache_lvl = caches[i].level; 1718 id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); 1719 return; 1720 } 1721 } 1722 } 1723 1724 static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu, 1725 void *data) 1726 { 1727 struct perf_env *env = data; 1728 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1729 1730 if (cpu.cpu != -1) { 1731 u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level; 1732 1733 id.socket = env->cpu[cpu.cpu].socket_id; 1734 id.die = env->cpu[cpu.cpu].die_id; 1735 perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id); 1736 } 1737 1738 return id; 1739 } 1740 1741 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1742 { 1743 struct perf_env *env = data; 1744 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1745 1746 if (cpu.cpu != -1) { 1747 /* 1748 * core_id is relative to socket and die, 1749 * we need a global id. So we set 1750 * socket, die id and core id 1751 */ 1752 id.socket = env->cpu[cpu.cpu].socket_id; 1753 id.die = env->cpu[cpu.cpu].die_id; 1754 id.core = env->cpu[cpu.cpu].core_id; 1755 } 1756 1757 return id; 1758 } 1759 1760 static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data) 1761 { 1762 struct perf_env *env = data; 1763 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1764 1765 if (cpu.cpu != -1) { 1766 /* 1767 * core_id is relative to socket and die, 1768 * we need a global id. So we set 1769 * socket, die id and core id 1770 */ 1771 id.socket = env->cpu[cpu.cpu].socket_id; 1772 id.die = env->cpu[cpu.cpu].die_id; 1773 id.core = env->cpu[cpu.cpu].core_id; 1774 id.cpu = cpu; 1775 } 1776 1777 return id; 1778 } 1779 1780 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1781 { 1782 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1783 1784 id.node = perf_env__numa_node(data, cpu); 1785 return id; 1786 } 1787 1788 static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused, 1789 void *data __maybe_unused) 1790 { 1791 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1792 1793 /* it always aggregates to the cpu 0 */ 1794 id.cpu = (struct perf_cpu){ .cpu = 0 }; 1795 return id; 1796 } 1797 1798 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1799 struct perf_cpu cpu) 1800 { 1801 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1802 } 1803 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1804 struct perf_cpu cpu) 1805 { 1806 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1807 } 1808 1809 static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused, 1810 struct perf_cpu cpu) 1811 { 1812 return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1813 } 1814 1815 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1816 struct perf_cpu cpu) 1817 { 1818 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1819 } 1820 1821 static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, 1822 struct perf_cpu cpu) 1823 { 1824 return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1825 } 1826 1827 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1828 struct perf_cpu cpu) 1829 { 1830 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1831 } 1832 1833 static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, 1834 struct perf_cpu cpu) 1835 { 1836 return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1837 } 1838 1839 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1840 { 1841 switch (aggr_mode) { 1842 case AGGR_SOCKET: 1843 return perf_env__get_socket_aggr_by_cpu; 1844 case AGGR_DIE: 1845 return perf_env__get_die_aggr_by_cpu; 1846 case AGGR_CACHE: 1847 return perf_env__get_cache_aggr_by_cpu; 1848 case AGGR_CORE: 1849 return perf_env__get_core_aggr_by_cpu; 1850 case AGGR_NODE: 1851 return perf_env__get_node_aggr_by_cpu; 1852 case AGGR_GLOBAL: 1853 return perf_env__get_global_aggr_by_cpu; 1854 case AGGR_NONE: 1855 return perf_env__get_cpu_aggr_by_cpu; 1856 case AGGR_THREAD: 1857 case AGGR_UNSET: 1858 case AGGR_MAX: 1859 default: 1860 return NULL; 1861 } 1862 } 1863 1864 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1865 { 1866 switch (aggr_mode) { 1867 case AGGR_SOCKET: 1868 return perf_stat__get_socket_file; 1869 case AGGR_DIE: 1870 return perf_stat__get_die_file; 1871 case AGGR_CACHE: 1872 return perf_stat__get_cache_file; 1873 case AGGR_CORE: 1874 return perf_stat__get_core_file; 1875 case AGGR_NODE: 1876 return perf_stat__get_node_file; 1877 case AGGR_GLOBAL: 1878 return perf_stat__get_global_file; 1879 case AGGR_NONE: 1880 return perf_stat__get_cpu_file; 1881 case AGGR_THREAD: 1882 case AGGR_UNSET: 1883 case AGGR_MAX: 1884 default: 1885 return NULL; 1886 } 1887 } 1888 1889 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1890 { 1891 struct perf_env *env = &st->session->header.env; 1892 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1893 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1894 1895 if (stat_config.aggr_mode == AGGR_THREAD) { 1896 int nr = perf_thread_map__nr(evsel_list->core.threads); 1897 1898 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1899 if (stat_config.aggr_map == NULL) 1900 return -ENOMEM; 1901 1902 for (int s = 0; s < nr; s++) { 1903 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1904 1905 id.thread_idx = s; 1906 stat_config.aggr_map->map[s] = id; 1907 } 1908 return 0; 1909 } 1910 1911 if (!get_id) 1912 return 0; 1913 1914 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1915 get_id, env, needs_sort); 1916 if (!stat_config.aggr_map) { 1917 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1918 return -1; 1919 } 1920 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1921 return 0; 1922 } 1923 1924 /* 1925 * Add default attributes, if there were no attributes specified or 1926 * if -d/--detailed, -d -d or -d -d -d is used: 1927 */ 1928 static int add_default_attributes(void) 1929 { 1930 struct perf_event_attr default_attrs0[] = { 1931 1932 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1933 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1934 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1935 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1936 1937 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1938 }; 1939 struct perf_event_attr frontend_attrs[] = { 1940 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1941 }; 1942 struct perf_event_attr backend_attrs[] = { 1943 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1944 }; 1945 struct perf_event_attr default_attrs1[] = { 1946 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1947 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1948 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1949 1950 }; 1951 1952 /* 1953 * Detailed stats (-d), covering the L1 and last level data caches: 1954 */ 1955 struct perf_event_attr detailed_attrs[] = { 1956 1957 { .type = PERF_TYPE_HW_CACHE, 1958 .config = 1959 PERF_COUNT_HW_CACHE_L1D << 0 | 1960 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1961 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1962 1963 { .type = PERF_TYPE_HW_CACHE, 1964 .config = 1965 PERF_COUNT_HW_CACHE_L1D << 0 | 1966 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1967 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1968 1969 { .type = PERF_TYPE_HW_CACHE, 1970 .config = 1971 PERF_COUNT_HW_CACHE_LL << 0 | 1972 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1973 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1974 1975 { .type = PERF_TYPE_HW_CACHE, 1976 .config = 1977 PERF_COUNT_HW_CACHE_LL << 0 | 1978 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1979 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1980 }; 1981 1982 /* 1983 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1984 */ 1985 struct perf_event_attr very_detailed_attrs[] = { 1986 1987 { .type = PERF_TYPE_HW_CACHE, 1988 .config = 1989 PERF_COUNT_HW_CACHE_L1I << 0 | 1990 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1991 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1992 1993 { .type = PERF_TYPE_HW_CACHE, 1994 .config = 1995 PERF_COUNT_HW_CACHE_L1I << 0 | 1996 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1997 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1998 1999 { .type = PERF_TYPE_HW_CACHE, 2000 .config = 2001 PERF_COUNT_HW_CACHE_DTLB << 0 | 2002 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2003 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 2004 2005 { .type = PERF_TYPE_HW_CACHE, 2006 .config = 2007 PERF_COUNT_HW_CACHE_DTLB << 0 | 2008 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2009 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2010 2011 { .type = PERF_TYPE_HW_CACHE, 2012 .config = 2013 PERF_COUNT_HW_CACHE_ITLB << 0 | 2014 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2015 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 2016 2017 { .type = PERF_TYPE_HW_CACHE, 2018 .config = 2019 PERF_COUNT_HW_CACHE_ITLB << 0 | 2020 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 2021 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2022 2023 }; 2024 2025 /* 2026 * Very, very detailed stats (-d -d -d), adding prefetch events: 2027 */ 2028 struct perf_event_attr very_very_detailed_attrs[] = { 2029 2030 { .type = PERF_TYPE_HW_CACHE, 2031 .config = 2032 PERF_COUNT_HW_CACHE_L1D << 0 | 2033 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 2034 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 2035 2036 { .type = PERF_TYPE_HW_CACHE, 2037 .config = 2038 PERF_COUNT_HW_CACHE_L1D << 0 | 2039 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 2040 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 2041 }; 2042 2043 struct perf_event_attr default_null_attrs[] = {}; 2044 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 2045 2046 /* Set attrs if no event is selected and !null_run: */ 2047 if (stat_config.null_run) 2048 return 0; 2049 2050 if (transaction_run) { 2051 /* Handle -T as -M transaction. Once platform specific metrics 2052 * support has been added to the json files, all architectures 2053 * will use this approach. To determine transaction support 2054 * on an architecture test for such a metric name. 2055 */ 2056 if (!metricgroup__has_metric(pmu, "transaction")) { 2057 pr_err("Missing transaction metrics"); 2058 return -1; 2059 } 2060 return metricgroup__parse_groups(evsel_list, pmu, "transaction", 2061 stat_config.metric_no_group, 2062 stat_config.metric_no_merge, 2063 stat_config.metric_no_threshold, 2064 stat_config.user_requested_cpu_list, 2065 stat_config.system_wide, 2066 &stat_config.metric_events); 2067 } 2068 2069 if (smi_cost) { 2070 int smi; 2071 2072 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 2073 pr_err("freeze_on_smi is not supported."); 2074 return -1; 2075 } 2076 2077 if (!smi) { 2078 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 2079 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 2080 return -1; 2081 } 2082 smi_reset = true; 2083 } 2084 2085 if (!metricgroup__has_metric(pmu, "smi")) { 2086 pr_err("Missing smi metrics"); 2087 return -1; 2088 } 2089 2090 if (!force_metric_only) 2091 stat_config.metric_only = true; 2092 2093 return metricgroup__parse_groups(evsel_list, pmu, "smi", 2094 stat_config.metric_no_group, 2095 stat_config.metric_no_merge, 2096 stat_config.metric_no_threshold, 2097 stat_config.user_requested_cpu_list, 2098 stat_config.system_wide, 2099 &stat_config.metric_events); 2100 } 2101 2102 if (topdown_run) { 2103 unsigned int max_level = metricgroups__topdown_max_level(); 2104 char str[] = "TopdownL1"; 2105 2106 if (!force_metric_only) 2107 stat_config.metric_only = true; 2108 2109 if (!max_level) { 2110 pr_err("Topdown requested but the topdown metric groups aren't present.\n" 2111 "(See perf list the metric groups have names like TopdownL1)"); 2112 return -1; 2113 } 2114 if (stat_config.topdown_level > max_level) { 2115 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 2116 return -1; 2117 } else if (!stat_config.topdown_level) 2118 stat_config.topdown_level = 1; 2119 2120 if (!stat_config.interval && !stat_config.metric_only) { 2121 fprintf(stat_config.output, 2122 "Topdown accuracy may decrease when measuring long periods.\n" 2123 "Please print the result regularly, e.g. -I1000\n"); 2124 } 2125 str[8] = stat_config.topdown_level + '0'; 2126 if (metricgroup__parse_groups(evsel_list, 2127 pmu, str, 2128 /*metric_no_group=*/false, 2129 /*metric_no_merge=*/false, 2130 /*metric_no_threshold=*/true, 2131 stat_config.user_requested_cpu_list, 2132 stat_config.system_wide, 2133 &stat_config.metric_events) < 0) 2134 return -1; 2135 } 2136 2137 if (!stat_config.topdown_level) 2138 stat_config.topdown_level = 1; 2139 2140 if (!evsel_list->core.nr_entries) { 2141 /* No events so add defaults. */ 2142 if (target__has_cpu(&target)) 2143 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 2144 2145 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 2146 return -1; 2147 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 2148 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 2149 return -1; 2150 } 2151 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 2152 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 2153 return -1; 2154 } 2155 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 2156 return -1; 2157 /* 2158 * Add TopdownL1 metrics if they exist. To minimize 2159 * multiplexing, don't request threshold computation. 2160 */ 2161 if (metricgroup__has_metric(pmu, "TopdownL1")) { 2162 struct evlist *metric_evlist = evlist__new(); 2163 struct evsel *metric_evsel; 2164 2165 if (!metric_evlist) 2166 return -1; 2167 2168 if (metricgroup__parse_groups(metric_evlist, pmu, "TopdownL1", 2169 /*metric_no_group=*/false, 2170 /*metric_no_merge=*/false, 2171 /*metric_no_threshold=*/true, 2172 stat_config.user_requested_cpu_list, 2173 stat_config.system_wide, 2174 &stat_config.metric_events) < 0) 2175 return -1; 2176 2177 evlist__for_each_entry(metric_evlist, metric_evsel) { 2178 metric_evsel->skippable = true; 2179 } 2180 evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries); 2181 evlist__delete(metric_evlist); 2182 } 2183 2184 /* Platform specific attrs */ 2185 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) 2186 return -1; 2187 } 2188 2189 /* Detailed events get appended to the event list: */ 2190 2191 if (detailed_run < 1) 2192 return 0; 2193 2194 /* Append detailed run extra attributes: */ 2195 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 2196 return -1; 2197 2198 if (detailed_run < 2) 2199 return 0; 2200 2201 /* Append very detailed run extra attributes: */ 2202 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 2203 return -1; 2204 2205 if (detailed_run < 3) 2206 return 0; 2207 2208 /* Append very, very detailed run extra attributes: */ 2209 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 2210 } 2211 2212 static const char * const stat_record_usage[] = { 2213 "perf stat record [<options>]", 2214 NULL, 2215 }; 2216 2217 static void init_features(struct perf_session *session) 2218 { 2219 int feat; 2220 2221 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 2222 perf_header__set_feat(&session->header, feat); 2223 2224 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 2225 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 2226 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 2227 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 2228 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 2229 } 2230 2231 static int __cmd_record(int argc, const char **argv) 2232 { 2233 struct perf_session *session; 2234 struct perf_data *data = &perf_stat.data; 2235 2236 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2237 PARSE_OPT_STOP_AT_NON_OPTION); 2238 2239 if (output_name) 2240 data->path = output_name; 2241 2242 if (stat_config.run_count != 1 || forever) { 2243 pr_err("Cannot use -r option with perf stat record.\n"); 2244 return -1; 2245 } 2246 2247 session = perf_session__new(data, NULL); 2248 if (IS_ERR(session)) { 2249 pr_err("Perf session creation failed\n"); 2250 return PTR_ERR(session); 2251 } 2252 2253 init_features(session); 2254 2255 session->evlist = evsel_list; 2256 perf_stat.session = session; 2257 perf_stat.record = true; 2258 return argc; 2259 } 2260 2261 static int process_stat_round_event(struct perf_session *session, 2262 union perf_event *event) 2263 { 2264 struct perf_record_stat_round *stat_round = &event->stat_round; 2265 struct timespec tsh, *ts = NULL; 2266 const char **argv = session->header.env.cmdline_argv; 2267 int argc = session->header.env.nr_cmdline; 2268 2269 process_counters(); 2270 2271 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2272 update_stats(&walltime_nsecs_stats, stat_round->time); 2273 2274 if (stat_config.interval && stat_round->time) { 2275 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2276 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2277 ts = &tsh; 2278 } 2279 2280 print_counters(ts, argc, argv); 2281 return 0; 2282 } 2283 2284 static 2285 int process_stat_config_event(struct perf_session *session, 2286 union perf_event *event) 2287 { 2288 struct perf_tool *tool = session->tool; 2289 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2290 2291 perf_event__read_stat_config(&stat_config, &event->stat_config); 2292 2293 if (perf_cpu_map__empty(st->cpus)) { 2294 if (st->aggr_mode != AGGR_UNSET) 2295 pr_warning("warning: processing task data, aggregation mode not set\n"); 2296 } else if (st->aggr_mode != AGGR_UNSET) { 2297 stat_config.aggr_mode = st->aggr_mode; 2298 } 2299 2300 if (perf_stat.data.is_pipe) 2301 perf_stat_init_aggr_mode(); 2302 else 2303 perf_stat_init_aggr_mode_file(st); 2304 2305 if (stat_config.aggr_map) { 2306 int nr_aggr = stat_config.aggr_map->nr; 2307 2308 if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) { 2309 pr_err("cannot allocate aggr counts\n"); 2310 return -1; 2311 } 2312 } 2313 return 0; 2314 } 2315 2316 static int set_maps(struct perf_stat *st) 2317 { 2318 if (!st->cpus || !st->threads) 2319 return 0; 2320 2321 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2322 return -EINVAL; 2323 2324 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2325 2326 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true)) 2327 return -ENOMEM; 2328 2329 st->maps_allocated = true; 2330 return 0; 2331 } 2332 2333 static 2334 int process_thread_map_event(struct perf_session *session, 2335 union perf_event *event) 2336 { 2337 struct perf_tool *tool = session->tool; 2338 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2339 2340 if (st->threads) { 2341 pr_warning("Extra thread map event, ignoring.\n"); 2342 return 0; 2343 } 2344 2345 st->threads = thread_map__new_event(&event->thread_map); 2346 if (!st->threads) 2347 return -ENOMEM; 2348 2349 return set_maps(st); 2350 } 2351 2352 static 2353 int process_cpu_map_event(struct perf_session *session, 2354 union perf_event *event) 2355 { 2356 struct perf_tool *tool = session->tool; 2357 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2358 struct perf_cpu_map *cpus; 2359 2360 if (st->cpus) { 2361 pr_warning("Extra cpu map event, ignoring.\n"); 2362 return 0; 2363 } 2364 2365 cpus = cpu_map__new_data(&event->cpu_map.data); 2366 if (!cpus) 2367 return -ENOMEM; 2368 2369 st->cpus = cpus; 2370 return set_maps(st); 2371 } 2372 2373 static const char * const stat_report_usage[] = { 2374 "perf stat report [<options>]", 2375 NULL, 2376 }; 2377 2378 static struct perf_stat perf_stat = { 2379 .tool = { 2380 .attr = perf_event__process_attr, 2381 .event_update = perf_event__process_event_update, 2382 .thread_map = process_thread_map_event, 2383 .cpu_map = process_cpu_map_event, 2384 .stat_config = process_stat_config_event, 2385 .stat = perf_event__process_stat_event, 2386 .stat_round = process_stat_round_event, 2387 }, 2388 .aggr_mode = AGGR_UNSET, 2389 .aggr_level = 0, 2390 }; 2391 2392 static int __cmd_report(int argc, const char **argv) 2393 { 2394 struct perf_session *session; 2395 const struct option options[] = { 2396 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2397 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2398 "aggregate counts per processor socket", AGGR_SOCKET), 2399 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2400 "aggregate counts per processor die", AGGR_DIE), 2401 OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level, 2402 "cache level", 2403 "aggregate count at this cache level (Default: LLC)", 2404 parse_cache_level), 2405 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2406 "aggregate counts per physical processor core", AGGR_CORE), 2407 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2408 "aggregate counts per numa node", AGGR_NODE), 2409 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2410 "disable CPU count aggregation", AGGR_NONE), 2411 OPT_END() 2412 }; 2413 struct stat st; 2414 int ret; 2415 2416 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2417 2418 if (!input_name || !strlen(input_name)) { 2419 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2420 input_name = "-"; 2421 else 2422 input_name = "perf.data"; 2423 } 2424 2425 perf_stat.data.path = input_name; 2426 perf_stat.data.mode = PERF_DATA_MODE_READ; 2427 2428 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2429 if (IS_ERR(session)) 2430 return PTR_ERR(session); 2431 2432 perf_stat.session = session; 2433 stat_config.output = stderr; 2434 evsel_list = session->evlist; 2435 2436 ret = perf_session__process_events(session); 2437 if (ret) 2438 return ret; 2439 2440 perf_session__delete(session); 2441 return 0; 2442 } 2443 2444 static void setup_system_wide(int forks) 2445 { 2446 /* 2447 * Make system wide (-a) the default target if 2448 * no target was specified and one of following 2449 * conditions is met: 2450 * 2451 * - there's no workload specified 2452 * - there is workload specified but all requested 2453 * events are system wide events 2454 */ 2455 if (!target__none(&target)) 2456 return; 2457 2458 if (!forks) 2459 target.system_wide = true; 2460 else { 2461 struct evsel *counter; 2462 2463 evlist__for_each_entry(evsel_list, counter) { 2464 if (!counter->core.requires_cpu && 2465 !evsel__name_is(counter, "duration_time")) { 2466 return; 2467 } 2468 } 2469 2470 if (evsel_list->core.nr_entries) 2471 target.system_wide = true; 2472 } 2473 } 2474 2475 int cmd_stat(int argc, const char **argv) 2476 { 2477 const char * const stat_usage[] = { 2478 "perf stat [<options>] [<command>]", 2479 NULL 2480 }; 2481 int status = -EINVAL, run_idx, err; 2482 const char *mode; 2483 FILE *output = stderr; 2484 unsigned int interval, timeout; 2485 const char * const stat_subcommands[] = { "record", "report" }; 2486 char errbuf[BUFSIZ]; 2487 2488 setlocale(LC_ALL, ""); 2489 2490 evsel_list = evlist__new(); 2491 if (evsel_list == NULL) 2492 return -ENOMEM; 2493 2494 parse_events__shrink_config_terms(); 2495 2496 /* String-parsing callback-based options would segfault when negated */ 2497 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2498 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2499 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2500 2501 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2502 (const char **) stat_usage, 2503 PARSE_OPT_STOP_AT_NON_OPTION); 2504 2505 if (stat_config.csv_sep) { 2506 stat_config.csv_output = true; 2507 if (!strcmp(stat_config.csv_sep, "\\t")) 2508 stat_config.csv_sep = "\t"; 2509 } else 2510 stat_config.csv_sep = DEFAULT_SEPARATOR; 2511 2512 if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2513 argc = __cmd_record(argc, argv); 2514 if (argc < 0) 2515 return -1; 2516 } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2517 return __cmd_report(argc, argv); 2518 2519 interval = stat_config.interval; 2520 timeout = stat_config.timeout; 2521 2522 /* 2523 * For record command the -o is already taken care of. 2524 */ 2525 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2526 output = NULL; 2527 2528 if (output_name && output_fd) { 2529 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2530 parse_options_usage(stat_usage, stat_options, "o", 1); 2531 parse_options_usage(NULL, stat_options, "log-fd", 0); 2532 goto out; 2533 } 2534 2535 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2536 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2537 goto out; 2538 } 2539 2540 if (stat_config.metric_only && stat_config.run_count > 1) { 2541 fprintf(stderr, "--metric-only is not supported with -r\n"); 2542 goto out; 2543 } 2544 2545 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2546 fprintf(stderr, "--table is only supported with -r\n"); 2547 parse_options_usage(stat_usage, stat_options, "r", 1); 2548 parse_options_usage(NULL, stat_options, "table", 0); 2549 goto out; 2550 } 2551 2552 if (output_fd < 0) { 2553 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2554 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2555 goto out; 2556 } 2557 2558 if (!output && !quiet) { 2559 struct timespec tm; 2560 mode = append_file ? "a" : "w"; 2561 2562 output = fopen(output_name, mode); 2563 if (!output) { 2564 perror("failed to create output file"); 2565 return -1; 2566 } 2567 if (!stat_config.json_output) { 2568 clock_gettime(CLOCK_REALTIME, &tm); 2569 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2570 } 2571 } else if (output_fd > 0) { 2572 mode = append_file ? "a" : "w"; 2573 output = fdopen(output_fd, mode); 2574 if (!output) { 2575 perror("Failed opening logfd"); 2576 return -errno; 2577 } 2578 } 2579 2580 if (stat_config.interval_clear && !isatty(fileno(output))) { 2581 fprintf(stderr, "--interval-clear does not work with output\n"); 2582 parse_options_usage(stat_usage, stat_options, "o", 1); 2583 parse_options_usage(NULL, stat_options, "log-fd", 0); 2584 parse_options_usage(NULL, stat_options, "interval-clear", 0); 2585 return -1; 2586 } 2587 2588 stat_config.output = output; 2589 2590 /* 2591 * let the spreadsheet do the pretty-printing 2592 */ 2593 if (stat_config.csv_output) { 2594 /* User explicitly passed -B? */ 2595 if (big_num_opt == 1) { 2596 fprintf(stderr, "-B option not supported with -x\n"); 2597 parse_options_usage(stat_usage, stat_options, "B", 1); 2598 parse_options_usage(NULL, stat_options, "x", 1); 2599 goto out; 2600 } else /* Nope, so disable big number formatting */ 2601 stat_config.big_num = false; 2602 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2603 stat_config.big_num = false; 2604 2605 err = target__validate(&target); 2606 if (err) { 2607 target__strerror(&target, err, errbuf, BUFSIZ); 2608 pr_warning("%s\n", errbuf); 2609 } 2610 2611 setup_system_wide(argc); 2612 2613 /* 2614 * Display user/system times only for single 2615 * run and when there's specified tracee. 2616 */ 2617 if ((stat_config.run_count == 1) && target__none(&target)) 2618 stat_config.ru_display = true; 2619 2620 if (stat_config.run_count < 0) { 2621 pr_err("Run count must be a positive number\n"); 2622 parse_options_usage(stat_usage, stat_options, "r", 1); 2623 goto out; 2624 } else if (stat_config.run_count == 0) { 2625 forever = true; 2626 stat_config.run_count = 1; 2627 } 2628 2629 if (stat_config.walltime_run_table) { 2630 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2631 if (!stat_config.walltime_run) { 2632 pr_err("failed to setup -r option"); 2633 goto out; 2634 } 2635 } 2636 2637 if ((stat_config.aggr_mode == AGGR_THREAD) && 2638 !target__has_task(&target)) { 2639 if (!target.system_wide || target.cpu_list) { 2640 fprintf(stderr, "The --per-thread option is only " 2641 "available when monitoring via -p -t -a " 2642 "options or only --per-thread.\n"); 2643 parse_options_usage(NULL, stat_options, "p", 1); 2644 parse_options_usage(NULL, stat_options, "t", 1); 2645 goto out; 2646 } 2647 } 2648 2649 /* 2650 * no_aggr, cgroup are for system-wide only 2651 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2652 */ 2653 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2654 stat_config.aggr_mode != AGGR_THREAD) || 2655 (nr_cgroups || stat_config.cgroup_list)) && 2656 !target__has_cpu(&target)) { 2657 fprintf(stderr, "both cgroup and no-aggregation " 2658 "modes only available in system-wide mode\n"); 2659 2660 parse_options_usage(stat_usage, stat_options, "G", 1); 2661 parse_options_usage(NULL, stat_options, "A", 1); 2662 parse_options_usage(NULL, stat_options, "a", 1); 2663 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2664 goto out; 2665 } 2666 2667 if (stat_config.iostat_run) { 2668 status = iostat_prepare(evsel_list, &stat_config); 2669 if (status) 2670 goto out; 2671 if (iostat_mode == IOSTAT_LIST) { 2672 iostat_list(evsel_list, &stat_config); 2673 goto out; 2674 } else if (verbose > 0) 2675 iostat_list(evsel_list, &stat_config); 2676 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2677 target.system_wide = true; 2678 } 2679 2680 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2681 target.per_thread = true; 2682 2683 stat_config.system_wide = target.system_wide; 2684 if (target.cpu_list) { 2685 stat_config.user_requested_cpu_list = strdup(target.cpu_list); 2686 if (!stat_config.user_requested_cpu_list) { 2687 status = -ENOMEM; 2688 goto out; 2689 } 2690 } 2691 2692 /* 2693 * Metric parsing needs to be delayed as metrics may optimize events 2694 * knowing the target is system-wide. 2695 */ 2696 if (metrics) { 2697 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 2698 2699 metricgroup__parse_groups(evsel_list, pmu, metrics, 2700 stat_config.metric_no_group, 2701 stat_config.metric_no_merge, 2702 stat_config.metric_no_threshold, 2703 stat_config.user_requested_cpu_list, 2704 stat_config.system_wide, 2705 &stat_config.metric_events); 2706 zfree(&metrics); 2707 } 2708 2709 if (add_default_attributes()) 2710 goto out; 2711 2712 if (stat_config.cgroup_list) { 2713 if (nr_cgroups > 0) { 2714 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2715 parse_options_usage(stat_usage, stat_options, "G", 1); 2716 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2717 goto out; 2718 } 2719 2720 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2721 &stat_config.metric_events, true) < 0) { 2722 parse_options_usage(stat_usage, stat_options, 2723 "for-each-cgroup", 0); 2724 goto out; 2725 } 2726 } 2727 2728 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2729 pr_err("failed to use cpu list %s\n", target.cpu_list); 2730 goto out; 2731 } 2732 2733 target.hybrid = perf_pmu__has_hybrid(); 2734 if (evlist__create_maps(evsel_list, &target) < 0) { 2735 if (target__has_task(&target)) { 2736 pr_err("Problems finding threads of monitor\n"); 2737 parse_options_usage(stat_usage, stat_options, "p", 1); 2738 parse_options_usage(NULL, stat_options, "t", 1); 2739 } else if (target__has_cpu(&target)) { 2740 perror("failed to parse CPUs map"); 2741 parse_options_usage(stat_usage, stat_options, "C", 1); 2742 parse_options_usage(NULL, stat_options, "a", 1); 2743 } 2744 goto out; 2745 } 2746 2747 evlist__check_cpu_maps(evsel_list); 2748 2749 /* 2750 * Initialize thread_map with comm names, 2751 * so we could print it out on output. 2752 */ 2753 if (stat_config.aggr_mode == AGGR_THREAD) { 2754 thread_map__read_comms(evsel_list->core.threads); 2755 } 2756 2757 if (stat_config.aggr_mode == AGGR_NODE) 2758 cpu__setup_cpunode_map(); 2759 2760 if (stat_config.times && interval) 2761 interval_count = true; 2762 else if (stat_config.times && !interval) { 2763 pr_err("interval-count option should be used together with " 2764 "interval-print.\n"); 2765 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2766 parse_options_usage(stat_usage, stat_options, "I", 1); 2767 goto out; 2768 } 2769 2770 if (timeout && timeout < 100) { 2771 if (timeout < 10) { 2772 pr_err("timeout must be >= 10ms.\n"); 2773 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2774 goto out; 2775 } else 2776 pr_warning("timeout < 100ms. " 2777 "The overhead percentage could be high in some cases. " 2778 "Please proceed with caution.\n"); 2779 } 2780 if (timeout && interval) { 2781 pr_err("timeout option is not supported with interval-print.\n"); 2782 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2783 parse_options_usage(stat_usage, stat_options, "I", 1); 2784 goto out; 2785 } 2786 2787 if (perf_stat_init_aggr_mode()) 2788 goto out; 2789 2790 if (evlist__alloc_stats(&stat_config, evsel_list, interval)) 2791 goto out; 2792 2793 /* 2794 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2795 * while avoiding that older tools show confusing messages. 2796 * 2797 * However for pipe sessions we need to keep it zero, 2798 * because script's perf_evsel__check_attr is triggered 2799 * by attr->sample_type != 0, and we can't run it on 2800 * stat sessions. 2801 */ 2802 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2803 2804 /* 2805 * We dont want to block the signals - that would cause 2806 * child tasks to inherit that and Ctrl-C would not work. 2807 * What we want is for Ctrl-C to work in the exec()-ed 2808 * task, but being ignored by perf stat itself: 2809 */ 2810 atexit(sig_atexit); 2811 if (!forever) 2812 signal(SIGINT, skip_signal); 2813 signal(SIGCHLD, skip_signal); 2814 signal(SIGALRM, skip_signal); 2815 signal(SIGABRT, skip_signal); 2816 2817 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2818 goto out; 2819 2820 /* Enable ignoring missing threads when -p option is defined. */ 2821 evlist__first(evsel_list)->ignore_missing_thread = target.pid; 2822 status = 0; 2823 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2824 if (stat_config.run_count != 1 && verbose > 0) 2825 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2826 run_idx + 1); 2827 2828 if (run_idx != 0) 2829 evlist__reset_prev_raw_counts(evsel_list); 2830 2831 status = run_perf_stat(argc, argv, run_idx); 2832 if (forever && status != -1 && !interval) { 2833 print_counters(NULL, argc, argv); 2834 perf_stat__reset_stats(); 2835 } 2836 } 2837 2838 if (!forever && status != -1 && (!interval || stat_config.summary)) 2839 print_counters(NULL, argc, argv); 2840 2841 evlist__finalize_ctlfd(evsel_list); 2842 2843 if (STAT_RECORD) { 2844 /* 2845 * We synthesize the kernel mmap record just so that older tools 2846 * don't emit warnings about not being able to resolve symbols 2847 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2848 * a saner message about no samples being in the perf.data file. 2849 * 2850 * This also serves to suppress a warning about f_header.data.size == 0 2851 * in header.c at the moment 'perf stat record' gets introduced, which 2852 * is not really needed once we start adding the stat specific PERF_RECORD_ 2853 * records, but the need to suppress the kptr_restrict messages in older 2854 * tools remain -acme 2855 */ 2856 int fd = perf_data__fd(&perf_stat.data); 2857 2858 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2859 process_synthesized_event, 2860 &perf_stat.session->machines.host); 2861 if (err) { 2862 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2863 "older tools may produce warnings about this file\n."); 2864 } 2865 2866 if (!interval) { 2867 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2868 pr_err("failed to write stat round event\n"); 2869 } 2870 2871 if (!perf_stat.data.is_pipe) { 2872 perf_stat.session->header.data_size += perf_stat.bytes_written; 2873 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2874 } 2875 2876 evlist__close(evsel_list); 2877 perf_session__delete(perf_stat.session); 2878 } 2879 2880 perf_stat__exit_aggr_mode(); 2881 evlist__free_stats(evsel_list); 2882 out: 2883 if (stat_config.iostat_run) 2884 iostat_release(evsel_list); 2885 2886 zfree(&stat_config.walltime_run); 2887 zfree(&stat_config.user_requested_cpu_list); 2888 2889 if (smi_cost && smi_reset) 2890 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2891 2892 evlist__delete(evsel_list); 2893 2894 metricgroup__rblist_exit(&stat_config.metric_events); 2895 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2896 2897 return status; 2898 } 2899