1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/pmu-hybrid.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 97 #define DEFAULT_SEPARATOR " " 98 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 99 100 static void print_counters(struct timespec *ts, int argc, const char **argv); 101 102 /* Default events used for perf stat -T */ 103 static const char *transaction_attrs = { 104 "task-clock," 105 "{" 106 "instructions," 107 "cycles," 108 "cpu/cycles-t/," 109 "cpu/tx-start/," 110 "cpu/el-start/," 111 "cpu/cycles-ct/" 112 "}" 113 }; 114 115 /* More limited version when the CPU does not have all events. */ 116 static const char * transaction_limited_attrs = { 117 "task-clock," 118 "{" 119 "instructions," 120 "cycles," 121 "cpu/cycles-t/," 122 "cpu/tx-start/" 123 "}" 124 }; 125 126 static const char * topdown_attrs[] = { 127 "topdown-total-slots", 128 "topdown-slots-retired", 129 "topdown-recovery-bubbles", 130 "topdown-fetch-bubbles", 131 "topdown-slots-issued", 132 NULL, 133 }; 134 135 static const char *topdown_metric_attrs[] = { 136 "slots", 137 "topdown-retiring", 138 "topdown-bad-spec", 139 "topdown-fe-bound", 140 "topdown-be-bound", 141 NULL, 142 }; 143 144 static const char *topdown_metric_L2_attrs[] = { 145 "slots", 146 "topdown-retiring", 147 "topdown-bad-spec", 148 "topdown-fe-bound", 149 "topdown-be-bound", 150 "topdown-heavy-ops", 151 "topdown-br-mispredict", 152 "topdown-fetch-lat", 153 "topdown-mem-bound", 154 NULL, 155 }; 156 157 static const char *smi_cost_attrs = { 158 "{" 159 "msr/aperf/," 160 "msr/smi/," 161 "cycles" 162 "}" 163 }; 164 165 static struct evlist *evsel_list; 166 static bool all_counters_use_bpf = true; 167 168 static struct target target = { 169 .uid = UINT_MAX, 170 }; 171 172 #define METRIC_ONLY_LEN 20 173 174 static volatile pid_t child_pid = -1; 175 static int detailed_run = 0; 176 static bool transaction_run; 177 static bool topdown_run = false; 178 static bool smi_cost = false; 179 static bool smi_reset = false; 180 static int big_num_opt = -1; 181 static bool group = false; 182 static const char *pre_cmd = NULL; 183 static const char *post_cmd = NULL; 184 static bool sync_run = false; 185 static bool forever = false; 186 static bool force_metric_only = false; 187 static struct timespec ref_time; 188 static bool append_file; 189 static bool interval_count; 190 static const char *output_name; 191 static int output_fd; 192 193 struct perf_stat { 194 bool record; 195 struct perf_data data; 196 struct perf_session *session; 197 u64 bytes_written; 198 struct perf_tool tool; 199 bool maps_allocated; 200 struct perf_cpu_map *cpus; 201 struct perf_thread_map *threads; 202 enum aggr_mode aggr_mode; 203 }; 204 205 static struct perf_stat perf_stat; 206 #define STAT_RECORD perf_stat.record 207 208 static volatile int done = 0; 209 210 static struct perf_stat_config stat_config = { 211 .aggr_mode = AGGR_GLOBAL, 212 .scale = true, 213 .unit_width = 4, /* strlen("unit") */ 214 .run_count = 1, 215 .metric_only_len = METRIC_ONLY_LEN, 216 .walltime_nsecs_stats = &walltime_nsecs_stats, 217 .big_num = true, 218 .ctl_fd = -1, 219 .ctl_fd_ack = -1, 220 .iostat_run = false, 221 }; 222 223 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 224 { 225 if (!a->core.cpus && !b->core.cpus) 226 return true; 227 228 if (!a->core.cpus || !b->core.cpus) 229 return false; 230 231 if (a->core.cpus->nr != b->core.cpus->nr) 232 return false; 233 234 for (int i = 0; i < a->core.cpus->nr; i++) { 235 if (a->core.cpus->map[i] != b->core.cpus->map[i]) 236 return false; 237 } 238 239 return true; 240 } 241 242 static void evlist__check_cpu_maps(struct evlist *evlist) 243 { 244 struct evsel *evsel, *pos, *leader; 245 char buf[1024]; 246 247 if (evlist__has_hybrid(evlist)) 248 evlist__warn_hybrid_group(evlist); 249 250 evlist__for_each_entry(evlist, evsel) { 251 leader = evsel->leader; 252 253 /* Check that leader matches cpus with each member. */ 254 if (leader == evsel) 255 continue; 256 if (cpus_map_matched(leader, evsel)) 257 continue; 258 259 /* If there's mismatch disable the group and warn user. */ 260 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 261 evsel__group_desc(leader, buf, sizeof(buf)); 262 pr_warning(" %s\n", buf); 263 264 if (verbose) { 265 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 266 pr_warning(" %s: %s\n", leader->name, buf); 267 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 268 pr_warning(" %s: %s\n", evsel->name, buf); 269 } 270 271 for_each_group_evsel(pos, leader) { 272 pos->leader = pos; 273 pos->core.nr_members = 0; 274 } 275 evsel->leader->core.nr_members = 0; 276 } 277 } 278 279 static inline void diff_timespec(struct timespec *r, struct timespec *a, 280 struct timespec *b) 281 { 282 r->tv_sec = a->tv_sec - b->tv_sec; 283 if (a->tv_nsec < b->tv_nsec) { 284 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 285 r->tv_sec--; 286 } else { 287 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 288 } 289 } 290 291 static void perf_stat__reset_stats(void) 292 { 293 int i; 294 295 evlist__reset_stats(evsel_list); 296 perf_stat__reset_shadow_stats(); 297 298 for (i = 0; i < stat_config.stats_num; i++) 299 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 300 } 301 302 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 303 union perf_event *event, 304 struct perf_sample *sample __maybe_unused, 305 struct machine *machine __maybe_unused) 306 { 307 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 308 pr_err("failed to write perf data, error: %m\n"); 309 return -1; 310 } 311 312 perf_stat.bytes_written += event->header.size; 313 return 0; 314 } 315 316 static int write_stat_round_event(u64 tm, u64 type) 317 { 318 return perf_event__synthesize_stat_round(NULL, tm, type, 319 process_synthesized_event, 320 NULL); 321 } 322 323 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 324 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 325 326 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 327 328 static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, 329 struct perf_counts_values *count) 330 { 331 struct perf_sample_id *sid = SID(counter, cpu, thread); 332 333 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 334 process_synthesized_event, NULL); 335 } 336 337 static int read_single_counter(struct evsel *counter, int cpu, 338 int thread, struct timespec *rs) 339 { 340 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 341 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 342 struct perf_counts_values *count = 343 perf_counts(counter->counts, cpu, thread); 344 count->ena = count->run = val; 345 count->val = val; 346 return 0; 347 } 348 return evsel__read_counter(counter, cpu, thread); 349 } 350 351 /* 352 * Read out the results of a single counter: 353 * do not aggregate counts across CPUs in system-wide mode 354 */ 355 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) 356 { 357 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 358 int thread; 359 360 if (!counter->supported) 361 return -ENOENT; 362 363 if (counter->core.system_wide) 364 nthreads = 1; 365 366 for (thread = 0; thread < nthreads; thread++) { 367 struct perf_counts_values *count; 368 369 count = perf_counts(counter->counts, cpu, thread); 370 371 /* 372 * The leader's group read loads data into its group members 373 * (via evsel__read_counter()) and sets their count->loaded. 374 */ 375 if (!perf_counts__is_loaded(counter->counts, cpu, thread) && 376 read_single_counter(counter, cpu, thread, rs)) { 377 counter->counts->scaled = -1; 378 perf_counts(counter->counts, cpu, thread)->ena = 0; 379 perf_counts(counter->counts, cpu, thread)->run = 0; 380 return -1; 381 } 382 383 perf_counts__set_loaded(counter->counts, cpu, thread, false); 384 385 if (STAT_RECORD) { 386 if (evsel__write_stat_event(counter, cpu, thread, count)) { 387 pr_err("failed to write stat event\n"); 388 return -1; 389 } 390 } 391 392 if (verbose > 1) { 393 fprintf(stat_config.output, 394 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 395 evsel__name(counter), 396 cpu, 397 count->val, count->ena, count->run); 398 } 399 } 400 401 return 0; 402 } 403 404 static int read_affinity_counters(struct timespec *rs) 405 { 406 struct evsel *counter; 407 struct affinity affinity; 408 int i, ncpus, cpu; 409 410 if (all_counters_use_bpf) 411 return 0; 412 413 if (affinity__setup(&affinity) < 0) 414 return -1; 415 416 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); 417 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 418 ncpus = 1; 419 evlist__for_each_cpu(evsel_list, i, cpu) { 420 if (i >= ncpus) 421 break; 422 affinity__set(&affinity, cpu); 423 424 evlist__for_each_entry(evsel_list, counter) { 425 if (evsel__cpu_iter_skip(counter, cpu)) 426 continue; 427 if (evsel__is_bpf(counter)) 428 continue; 429 if (!counter->err) { 430 counter->err = read_counter_cpu(counter, rs, 431 counter->cpu_iter - 1); 432 } 433 } 434 } 435 affinity__cleanup(&affinity); 436 return 0; 437 } 438 439 static int read_bpf_map_counters(void) 440 { 441 struct evsel *counter; 442 int err; 443 444 evlist__for_each_entry(evsel_list, counter) { 445 if (!evsel__is_bpf(counter)) 446 continue; 447 448 err = bpf_counter__read(counter); 449 if (err) 450 return err; 451 } 452 return 0; 453 } 454 455 static void read_counters(struct timespec *rs) 456 { 457 struct evsel *counter; 458 459 if (!stat_config.stop_read_counter) { 460 if (read_bpf_map_counters() || 461 read_affinity_counters(rs)) 462 return; 463 } 464 465 evlist__for_each_entry(evsel_list, counter) { 466 if (counter->err) 467 pr_debug("failed to read counter %s\n", counter->name); 468 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 469 pr_warning("failed to process counter %s\n", counter->name); 470 counter->err = 0; 471 } 472 } 473 474 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 475 { 476 int i; 477 478 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 479 if (!config->stats) 480 return -1; 481 482 config->stats_num = nthreads; 483 484 for (i = 0; i < nthreads; i++) 485 runtime_stat__init(&config->stats[i]); 486 487 return 0; 488 } 489 490 static void runtime_stat_delete(struct perf_stat_config *config) 491 { 492 int i; 493 494 if (!config->stats) 495 return; 496 497 for (i = 0; i < config->stats_num; i++) 498 runtime_stat__exit(&config->stats[i]); 499 500 zfree(&config->stats); 501 } 502 503 static void runtime_stat_reset(struct perf_stat_config *config) 504 { 505 int i; 506 507 if (!config->stats) 508 return; 509 510 for (i = 0; i < config->stats_num; i++) 511 perf_stat__reset_shadow_per_stat(&config->stats[i]); 512 } 513 514 static void process_interval(void) 515 { 516 struct timespec ts, rs; 517 518 clock_gettime(CLOCK_MONOTONIC, &ts); 519 diff_timespec(&rs, &ts, &ref_time); 520 521 perf_stat__reset_shadow_per_stat(&rt_stat); 522 runtime_stat_reset(&stat_config); 523 read_counters(&rs); 524 525 if (STAT_RECORD) { 526 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 527 pr_err("failed to write stat round event\n"); 528 } 529 530 init_stats(&walltime_nsecs_stats); 531 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 532 print_counters(&rs, 0, NULL); 533 } 534 535 static bool handle_interval(unsigned int interval, int *times) 536 { 537 if (interval) { 538 process_interval(); 539 if (interval_count && !(--(*times))) 540 return true; 541 } 542 return false; 543 } 544 545 static int enable_counters(void) 546 { 547 struct evsel *evsel; 548 int err; 549 550 evlist__for_each_entry(evsel_list, evsel) { 551 if (!evsel__is_bpf(evsel)) 552 continue; 553 554 err = bpf_counter__enable(evsel); 555 if (err) 556 return err; 557 } 558 559 if (stat_config.initial_delay < 0) { 560 pr_info(EVLIST_DISABLED_MSG); 561 return 0; 562 } 563 564 if (stat_config.initial_delay > 0) { 565 pr_info(EVLIST_DISABLED_MSG); 566 usleep(stat_config.initial_delay * USEC_PER_MSEC); 567 } 568 569 /* 570 * We need to enable counters only if: 571 * - we don't have tracee (attaching to task or cpu) 572 * - we have initial delay configured 573 */ 574 if (!target__none(&target) || stat_config.initial_delay) { 575 evlist__enable(evsel_list); 576 if (stat_config.initial_delay > 0) 577 pr_info(EVLIST_ENABLED_MSG); 578 } 579 return 0; 580 } 581 582 static void disable_counters(void) 583 { 584 /* 585 * If we don't have tracee (attaching to task or cpu), counters may 586 * still be running. To get accurate group ratios, we must stop groups 587 * from counting before reading their constituent counters. 588 */ 589 if (!target__none(&target)) 590 evlist__disable(evsel_list); 591 } 592 593 static volatile int workload_exec_errno; 594 595 /* 596 * evlist__prepare_workload will send a SIGUSR1 597 * if the fork fails, since we asked by setting its 598 * want_signal to true. 599 */ 600 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 601 void *ucontext __maybe_unused) 602 { 603 workload_exec_errno = info->si_value.sival_int; 604 } 605 606 static bool evsel__should_store_id(struct evsel *counter) 607 { 608 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 609 } 610 611 static bool is_target_alive(struct target *_target, 612 struct perf_thread_map *threads) 613 { 614 struct stat st; 615 int i; 616 617 if (!target__has_task(_target)) 618 return true; 619 620 for (i = 0; i < threads->nr; i++) { 621 char path[PATH_MAX]; 622 623 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 624 threads->map[i].pid); 625 626 if (!stat(path, &st)) 627 return true; 628 } 629 630 return false; 631 } 632 633 static void process_evlist(struct evlist *evlist, unsigned int interval) 634 { 635 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 636 637 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 638 switch (cmd) { 639 case EVLIST_CTL_CMD_ENABLE: 640 if (interval) 641 process_interval(); 642 break; 643 case EVLIST_CTL_CMD_DISABLE: 644 if (interval) 645 process_interval(); 646 break; 647 case EVLIST_CTL_CMD_SNAPSHOT: 648 case EVLIST_CTL_CMD_ACK: 649 case EVLIST_CTL_CMD_UNSUPPORTED: 650 case EVLIST_CTL_CMD_EVLIST: 651 case EVLIST_CTL_CMD_STOP: 652 case EVLIST_CTL_CMD_PING: 653 default: 654 break; 655 } 656 } 657 } 658 659 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 660 int *time_to_sleep) 661 { 662 int tts = *time_to_sleep; 663 struct timespec time_diff; 664 665 diff_timespec(&time_diff, time_stop, time_start); 666 667 tts -= time_diff.tv_sec * MSEC_PER_SEC + 668 time_diff.tv_nsec / NSEC_PER_MSEC; 669 670 if (tts < 0) 671 tts = 0; 672 673 *time_to_sleep = tts; 674 } 675 676 static int dispatch_events(bool forks, int timeout, int interval, int *times) 677 { 678 int child_exited = 0, status = 0; 679 int time_to_sleep, sleep_time; 680 struct timespec time_start, time_stop; 681 682 if (interval) 683 sleep_time = interval; 684 else if (timeout) 685 sleep_time = timeout; 686 else 687 sleep_time = 1000; 688 689 time_to_sleep = sleep_time; 690 691 while (!done) { 692 if (forks) 693 child_exited = waitpid(child_pid, &status, WNOHANG); 694 else 695 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 696 697 if (child_exited) 698 break; 699 700 clock_gettime(CLOCK_MONOTONIC, &time_start); 701 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 702 if (timeout || handle_interval(interval, times)) 703 break; 704 time_to_sleep = sleep_time; 705 } else { /* fd revent */ 706 process_evlist(evsel_list, interval); 707 clock_gettime(CLOCK_MONOTONIC, &time_stop); 708 compute_tts(&time_start, &time_stop, &time_to_sleep); 709 } 710 } 711 712 return status; 713 } 714 715 enum counter_recovery { 716 COUNTER_SKIP, 717 COUNTER_RETRY, 718 COUNTER_FATAL, 719 }; 720 721 static enum counter_recovery stat_handle_error(struct evsel *counter) 722 { 723 char msg[BUFSIZ]; 724 /* 725 * PPC returns ENXIO for HW counters until 2.6.37 726 * (behavior changed with commit b0a873e). 727 */ 728 if (errno == EINVAL || errno == ENOSYS || 729 errno == ENOENT || errno == EOPNOTSUPP || 730 errno == ENXIO) { 731 if (verbose > 0) 732 ui__warning("%s event is not supported by the kernel.\n", 733 evsel__name(counter)); 734 counter->supported = false; 735 /* 736 * errored is a sticky flag that means one of the counter's 737 * cpu event had a problem and needs to be reexamined. 738 */ 739 counter->errored = true; 740 741 if ((counter->leader != counter) || 742 !(counter->leader->core.nr_members > 1)) 743 return COUNTER_SKIP; 744 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 745 if (verbose > 0) 746 ui__warning("%s\n", msg); 747 return COUNTER_RETRY; 748 } else if (target__has_per_thread(&target) && 749 evsel_list->core.threads && 750 evsel_list->core.threads->err_thread != -1) { 751 /* 752 * For global --per-thread case, skip current 753 * error thread. 754 */ 755 if (!thread_map__remove(evsel_list->core.threads, 756 evsel_list->core.threads->err_thread)) { 757 evsel_list->core.threads->err_thread = -1; 758 return COUNTER_RETRY; 759 } 760 } 761 762 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 763 ui__error("%s\n", msg); 764 765 if (child_pid != -1) 766 kill(child_pid, SIGTERM); 767 return COUNTER_FATAL; 768 } 769 770 static int __run_perf_stat(int argc, const char **argv, int run_idx) 771 { 772 int interval = stat_config.interval; 773 int times = stat_config.times; 774 int timeout = stat_config.timeout; 775 char msg[BUFSIZ]; 776 unsigned long long t0, t1; 777 struct evsel *counter; 778 size_t l; 779 int status = 0; 780 const bool forks = (argc > 0); 781 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 782 struct affinity affinity; 783 int i, cpu, err; 784 bool second_pass = false; 785 786 if (forks) { 787 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 788 perror("failed to prepare workload"); 789 return -1; 790 } 791 child_pid = evsel_list->workload.pid; 792 } 793 794 if (group) 795 evlist__set_leader(evsel_list); 796 797 if (affinity__setup(&affinity) < 0) 798 return -1; 799 800 evlist__for_each_entry(evsel_list, counter) { 801 if (bpf_counter__load(counter, &target)) 802 return -1; 803 if (!evsel__is_bpf(counter)) 804 all_counters_use_bpf = false; 805 } 806 807 evlist__for_each_cpu (evsel_list, i, cpu) { 808 /* 809 * bperf calls evsel__open_per_cpu() in bperf__load(), so 810 * no need to call it again here. 811 */ 812 if (target.use_bpf) 813 break; 814 affinity__set(&affinity, cpu); 815 816 evlist__for_each_entry(evsel_list, counter) { 817 if (evsel__cpu_iter_skip(counter, cpu)) 818 continue; 819 if (counter->reset_group || counter->errored) 820 continue; 821 if (evsel__is_bpf(counter)) 822 continue; 823 try_again: 824 if (create_perf_stat_counter(counter, &stat_config, &target, 825 counter->cpu_iter - 1) < 0) { 826 827 /* 828 * Weak group failed. We cannot just undo this here 829 * because earlier CPUs might be in group mode, and the kernel 830 * doesn't support mixing group and non group reads. Defer 831 * it to later. 832 * Don't close here because we're in the wrong affinity. 833 */ 834 if ((errno == EINVAL || errno == EBADF) && 835 counter->leader != counter && 836 counter->weak_group) { 837 evlist__reset_weak_group(evsel_list, counter, false); 838 assert(counter->reset_group); 839 second_pass = true; 840 continue; 841 } 842 843 switch (stat_handle_error(counter)) { 844 case COUNTER_FATAL: 845 return -1; 846 case COUNTER_RETRY: 847 goto try_again; 848 case COUNTER_SKIP: 849 continue; 850 default: 851 break; 852 } 853 854 } 855 counter->supported = true; 856 } 857 } 858 859 if (second_pass) { 860 /* 861 * Now redo all the weak group after closing them, 862 * and also close errored counters. 863 */ 864 865 evlist__for_each_cpu(evsel_list, i, cpu) { 866 affinity__set(&affinity, cpu); 867 /* First close errored or weak retry */ 868 evlist__for_each_entry(evsel_list, counter) { 869 if (!counter->reset_group && !counter->errored) 870 continue; 871 if (evsel__cpu_iter_skip_no_inc(counter, cpu)) 872 continue; 873 perf_evsel__close_cpu(&counter->core, counter->cpu_iter); 874 } 875 /* Now reopen weak */ 876 evlist__for_each_entry(evsel_list, counter) { 877 if (!counter->reset_group && !counter->errored) 878 continue; 879 if (evsel__cpu_iter_skip(counter, cpu)) 880 continue; 881 if (!counter->reset_group) 882 continue; 883 try_again_reset: 884 pr_debug2("reopening weak %s\n", evsel__name(counter)); 885 if (create_perf_stat_counter(counter, &stat_config, &target, 886 counter->cpu_iter - 1) < 0) { 887 888 switch (stat_handle_error(counter)) { 889 case COUNTER_FATAL: 890 return -1; 891 case COUNTER_RETRY: 892 goto try_again_reset; 893 case COUNTER_SKIP: 894 continue; 895 default: 896 break; 897 } 898 } 899 counter->supported = true; 900 } 901 } 902 } 903 affinity__cleanup(&affinity); 904 905 evlist__for_each_entry(evsel_list, counter) { 906 if (!counter->supported) { 907 perf_evsel__free_fd(&counter->core); 908 continue; 909 } 910 911 l = strlen(counter->unit); 912 if (l > stat_config.unit_width) 913 stat_config.unit_width = l; 914 915 if (evsel__should_store_id(counter) && 916 evsel__store_ids(counter, evsel_list)) 917 return -1; 918 } 919 920 if (evlist__apply_filters(evsel_list, &counter)) { 921 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 922 counter->filter, evsel__name(counter), errno, 923 str_error_r(errno, msg, sizeof(msg))); 924 return -1; 925 } 926 927 if (STAT_RECORD) { 928 int fd = perf_data__fd(&perf_stat.data); 929 930 if (is_pipe) { 931 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 932 } else { 933 err = perf_session__write_header(perf_stat.session, evsel_list, 934 fd, false); 935 } 936 937 if (err < 0) 938 return err; 939 940 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 941 process_synthesized_event, is_pipe); 942 if (err < 0) 943 return err; 944 } 945 946 /* 947 * Enable counters and exec the command: 948 */ 949 if (forks) { 950 evlist__start_workload(evsel_list); 951 err = enable_counters(); 952 if (err) 953 return -1; 954 955 t0 = rdclock(); 956 clock_gettime(CLOCK_MONOTONIC, &ref_time); 957 958 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 959 status = dispatch_events(forks, timeout, interval, ×); 960 if (child_pid != -1) { 961 if (timeout) 962 kill(child_pid, SIGTERM); 963 wait4(child_pid, &status, 0, &stat_config.ru_data); 964 } 965 966 if (workload_exec_errno) { 967 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 968 pr_err("Workload failed: %s\n", emsg); 969 return -1; 970 } 971 972 if (WIFSIGNALED(status)) 973 psignal(WTERMSIG(status), argv[0]); 974 } else { 975 err = enable_counters(); 976 if (err) 977 return -1; 978 979 t0 = rdclock(); 980 clock_gettime(CLOCK_MONOTONIC, &ref_time); 981 982 status = dispatch_events(forks, timeout, interval, ×); 983 } 984 985 disable_counters(); 986 987 t1 = rdclock(); 988 989 if (stat_config.walltime_run_table) 990 stat_config.walltime_run[run_idx] = t1 - t0; 991 992 if (interval && stat_config.summary) { 993 stat_config.interval = 0; 994 stat_config.stop_read_counter = true; 995 init_stats(&walltime_nsecs_stats); 996 update_stats(&walltime_nsecs_stats, t1 - t0); 997 998 if (stat_config.aggr_mode == AGGR_GLOBAL) 999 evlist__save_aggr_prev_raw_counts(evsel_list); 1000 1001 evlist__copy_prev_raw_counts(evsel_list); 1002 evlist__reset_prev_raw_counts(evsel_list); 1003 runtime_stat_reset(&stat_config); 1004 perf_stat__reset_shadow_per_stat(&rt_stat); 1005 } else 1006 update_stats(&walltime_nsecs_stats, t1 - t0); 1007 1008 /* 1009 * Closing a group leader splits the group, and as we only disable 1010 * group leaders, results in remaining events becoming enabled. To 1011 * avoid arbitrary skew, we must read all counters before closing any 1012 * group leaders. 1013 */ 1014 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 1015 1016 /* 1017 * We need to keep evsel_list alive, because it's processed 1018 * later the evsel_list will be closed after. 1019 */ 1020 if (!STAT_RECORD) 1021 evlist__close(evsel_list); 1022 1023 return WEXITSTATUS(status); 1024 } 1025 1026 static int run_perf_stat(int argc, const char **argv, int run_idx) 1027 { 1028 int ret; 1029 1030 if (pre_cmd) { 1031 ret = system(pre_cmd); 1032 if (ret) 1033 return ret; 1034 } 1035 1036 if (sync_run) 1037 sync(); 1038 1039 ret = __run_perf_stat(argc, argv, run_idx); 1040 if (ret) 1041 return ret; 1042 1043 if (post_cmd) { 1044 ret = system(post_cmd); 1045 if (ret) 1046 return ret; 1047 } 1048 1049 return ret; 1050 } 1051 1052 static void print_counters(struct timespec *ts, int argc, const char **argv) 1053 { 1054 /* Do not print anything if we record to the pipe. */ 1055 if (STAT_RECORD && perf_stat.data.is_pipe) 1056 return; 1057 if (stat_config.quiet) 1058 return; 1059 1060 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1061 } 1062 1063 static volatile int signr = -1; 1064 1065 static void skip_signal(int signo) 1066 { 1067 if ((child_pid == -1) || stat_config.interval) 1068 done = 1; 1069 1070 signr = signo; 1071 /* 1072 * render child_pid harmless 1073 * won't send SIGTERM to a random 1074 * process in case of race condition 1075 * and fast PID recycling 1076 */ 1077 child_pid = -1; 1078 } 1079 1080 static void sig_atexit(void) 1081 { 1082 sigset_t set, oset; 1083 1084 /* 1085 * avoid race condition with SIGCHLD handler 1086 * in skip_signal() which is modifying child_pid 1087 * goal is to avoid send SIGTERM to a random 1088 * process 1089 */ 1090 sigemptyset(&set); 1091 sigaddset(&set, SIGCHLD); 1092 sigprocmask(SIG_BLOCK, &set, &oset); 1093 1094 if (child_pid != -1) 1095 kill(child_pid, SIGTERM); 1096 1097 sigprocmask(SIG_SETMASK, &oset, NULL); 1098 1099 if (signr == -1) 1100 return; 1101 1102 signal(signr, SIG_DFL); 1103 kill(getpid(), signr); 1104 } 1105 1106 void perf_stat__set_big_num(int set) 1107 { 1108 stat_config.big_num = (set != 0); 1109 } 1110 1111 void perf_stat__set_no_csv_summary(int set) 1112 { 1113 stat_config.no_csv_summary = (set != 0); 1114 } 1115 1116 static int stat__set_big_num(const struct option *opt __maybe_unused, 1117 const char *s __maybe_unused, int unset) 1118 { 1119 big_num_opt = unset ? 0 : 1; 1120 perf_stat__set_big_num(!unset); 1121 return 0; 1122 } 1123 1124 static int enable_metric_only(const struct option *opt __maybe_unused, 1125 const char *s __maybe_unused, int unset) 1126 { 1127 force_metric_only = true; 1128 stat_config.metric_only = !unset; 1129 return 0; 1130 } 1131 1132 static int parse_metric_groups(const struct option *opt, 1133 const char *str, 1134 int unset __maybe_unused) 1135 { 1136 return metricgroup__parse_groups(opt, str, 1137 stat_config.metric_no_group, 1138 stat_config.metric_no_merge, 1139 &stat_config.metric_events); 1140 } 1141 1142 static int parse_control_option(const struct option *opt, 1143 const char *str, 1144 int unset __maybe_unused) 1145 { 1146 struct perf_stat_config *config = opt->value; 1147 1148 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1149 } 1150 1151 static int parse_stat_cgroups(const struct option *opt, 1152 const char *str, int unset) 1153 { 1154 if (stat_config.cgroup_list) { 1155 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1156 return -1; 1157 } 1158 1159 return parse_cgroups(opt, str, unset); 1160 } 1161 1162 static struct option stat_options[] = { 1163 OPT_BOOLEAN('T', "transaction", &transaction_run, 1164 "hardware transaction statistics"), 1165 OPT_CALLBACK('e', "event", &evsel_list, "event", 1166 "event selector. use 'perf list' to list available events", 1167 parse_events_option), 1168 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1169 "event filter", parse_filter), 1170 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1171 "child tasks do not inherit counters"), 1172 OPT_STRING('p', "pid", &target.pid, "pid", 1173 "stat events on existing process id"), 1174 OPT_STRING('t', "tid", &target.tid, "tid", 1175 "stat events on existing thread id"), 1176 #ifdef HAVE_BPF_SKEL 1177 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1178 "stat events on existing bpf program id"), 1179 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1180 "use bpf program to count events"), 1181 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1182 "path to perf_event_attr map"), 1183 #endif 1184 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1185 "system-wide collection from all CPUs"), 1186 OPT_BOOLEAN('g', "group", &group, 1187 "put the counters into a counter group"), 1188 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1189 "Use --no-scale to disable counter scaling for multiplexing"), 1190 OPT_INCR('v', "verbose", &verbose, 1191 "be more verbose (show counter open errors, etc)"), 1192 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1193 "repeat command and print average + stddev (max: 100, forever: 0)"), 1194 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1195 "display details about each run (only with -r option)"), 1196 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1197 "null run - dont start any counters"), 1198 OPT_INCR('d', "detailed", &detailed_run, 1199 "detailed run - start a lot of events"), 1200 OPT_BOOLEAN('S', "sync", &sync_run, 1201 "call sync() before starting a run"), 1202 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1203 "print large numbers with thousands\' separators", 1204 stat__set_big_num), 1205 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1206 "list of cpus to monitor in system-wide"), 1207 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1208 "disable CPU count aggregation", AGGR_NONE), 1209 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1210 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1211 "print counts with custom separator"), 1212 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1213 "monitor event in cgroup name only", parse_stat_cgroups), 1214 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1215 "expand events for each cgroup"), 1216 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1217 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1218 OPT_INTEGER(0, "log-fd", &output_fd, 1219 "log output to fd, instead of stderr"), 1220 OPT_STRING(0, "pre", &pre_cmd, "command", 1221 "command to run prior to the measured command"), 1222 OPT_STRING(0, "post", &post_cmd, "command", 1223 "command to run after to the measured command"), 1224 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1225 "print counts at regular interval in ms " 1226 "(overhead is possible for values <= 100ms)"), 1227 OPT_INTEGER(0, "interval-count", &stat_config.times, 1228 "print counts for fixed number of times"), 1229 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1230 "clear screen in between new interval"), 1231 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1232 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1233 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1234 "aggregate counts per processor socket", AGGR_SOCKET), 1235 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1236 "aggregate counts per processor die", AGGR_DIE), 1237 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1238 "aggregate counts per physical processor core", AGGR_CORE), 1239 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1240 "aggregate counts per thread", AGGR_THREAD), 1241 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1242 "aggregate counts per numa node", AGGR_NODE), 1243 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1244 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1245 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1246 "Only print computed metrics. No raw values", enable_metric_only), 1247 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1248 "don't group metric events, impacts multiplexing"), 1249 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1250 "don't try to share events between metrics in a group"), 1251 OPT_BOOLEAN(0, "topdown", &topdown_run, 1252 "measure top-down statistics"), 1253 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1254 "Set the metrics level for the top-down statistics (0: max level)"), 1255 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1256 "measure SMI cost"), 1257 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1258 "monitor specified metrics or metric groups (separated by ,)", 1259 parse_metric_groups), 1260 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1261 "Configure all used events to run in kernel space.", 1262 PARSE_OPT_EXCLUSIVE), 1263 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1264 "Configure all used events to run in user space.", 1265 PARSE_OPT_EXCLUSIVE), 1266 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1267 "Use with 'percore' event qualifier to show the event " 1268 "counts of one hardware thread by sum up total hardware " 1269 "threads of same physical core"), 1270 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1271 "print summary for interval mode"), 1272 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1273 "don't print 'summary' for CSV summary output"), 1274 OPT_BOOLEAN(0, "quiet", &stat_config.quiet, 1275 "don't print output (useful with record)"), 1276 #ifdef HAVE_LIBPFM 1277 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1278 "libpfm4 event selector. use 'perf list' to list available events", 1279 parse_libpfm_events_option), 1280 #endif 1281 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1282 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1283 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1284 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1285 parse_control_option), 1286 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1287 "measure I/O performance metrics provided by arch/platform", 1288 iostat_parse), 1289 OPT_END() 1290 }; 1291 1292 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1293 struct perf_cpu_map *map, int cpu) 1294 { 1295 return cpu_map__get_socket(map, cpu, NULL); 1296 } 1297 1298 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1299 struct perf_cpu_map *map, int cpu) 1300 { 1301 return cpu_map__get_die(map, cpu, NULL); 1302 } 1303 1304 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1305 struct perf_cpu_map *map, int cpu) 1306 { 1307 return cpu_map__get_core(map, cpu, NULL); 1308 } 1309 1310 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1311 struct perf_cpu_map *map, int cpu) 1312 { 1313 return cpu_map__get_node(map, cpu, NULL); 1314 } 1315 1316 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1317 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) 1318 { 1319 int cpu; 1320 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1321 1322 if (idx >= map->nr) 1323 return id; 1324 1325 cpu = map->map[idx]; 1326 1327 if (cpu_map__aggr_cpu_id_is_empty(config->cpus_aggr_map->map[cpu])) 1328 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); 1329 1330 id = config->cpus_aggr_map->map[cpu]; 1331 return id; 1332 } 1333 1334 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1335 struct perf_cpu_map *map, int idx) 1336 { 1337 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); 1338 } 1339 1340 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1341 struct perf_cpu_map *map, int idx) 1342 { 1343 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); 1344 } 1345 1346 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1347 struct perf_cpu_map *map, int idx) 1348 { 1349 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); 1350 } 1351 1352 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1353 struct perf_cpu_map *map, int idx) 1354 { 1355 return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); 1356 } 1357 1358 static bool term_percore_set(void) 1359 { 1360 struct evsel *counter; 1361 1362 evlist__for_each_entry(evsel_list, counter) { 1363 if (counter->percore) 1364 return true; 1365 } 1366 1367 return false; 1368 } 1369 1370 static int perf_stat_init_aggr_mode(void) 1371 { 1372 int nr; 1373 1374 switch (stat_config.aggr_mode) { 1375 case AGGR_SOCKET: 1376 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1377 perror("cannot build socket map"); 1378 return -1; 1379 } 1380 stat_config.aggr_get_id = perf_stat__get_socket_cached; 1381 break; 1382 case AGGR_DIE: 1383 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1384 perror("cannot build die map"); 1385 return -1; 1386 } 1387 stat_config.aggr_get_id = perf_stat__get_die_cached; 1388 break; 1389 case AGGR_CORE: 1390 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1391 perror("cannot build core map"); 1392 return -1; 1393 } 1394 stat_config.aggr_get_id = perf_stat__get_core_cached; 1395 break; 1396 case AGGR_NODE: 1397 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1398 perror("cannot build core map"); 1399 return -1; 1400 } 1401 stat_config.aggr_get_id = perf_stat__get_node_cached; 1402 break; 1403 case AGGR_NONE: 1404 if (term_percore_set()) { 1405 if (cpu_map__build_core_map(evsel_list->core.cpus, 1406 &stat_config.aggr_map)) { 1407 perror("cannot build core map"); 1408 return -1; 1409 } 1410 stat_config.aggr_get_id = perf_stat__get_core_cached; 1411 } 1412 break; 1413 case AGGR_GLOBAL: 1414 case AGGR_THREAD: 1415 case AGGR_UNSET: 1416 default: 1417 break; 1418 } 1419 1420 /* 1421 * The evsel_list->cpus is the base we operate on, 1422 * taking the highest cpu number to be the size of 1423 * the aggregation translate cpumap. 1424 */ 1425 nr = perf_cpu_map__max(evsel_list->core.cpus); 1426 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1427 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1428 } 1429 1430 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1431 { 1432 if (map) { 1433 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1434 "cpu_aggr_map refcnt unbalanced\n"); 1435 free(map); 1436 } 1437 } 1438 1439 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1440 { 1441 if (map && refcount_dec_and_test(&map->refcnt)) 1442 cpu_aggr_map__delete(map); 1443 } 1444 1445 static void perf_stat__exit_aggr_mode(void) 1446 { 1447 cpu_aggr_map__put(stat_config.aggr_map); 1448 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1449 stat_config.aggr_map = NULL; 1450 stat_config.cpus_aggr_map = NULL; 1451 } 1452 1453 static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) 1454 { 1455 int cpu; 1456 1457 if (idx > map->nr) 1458 return -1; 1459 1460 cpu = map->map[idx]; 1461 1462 if (cpu >= env->nr_cpus_avail) 1463 return -1; 1464 1465 return cpu; 1466 } 1467 1468 static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) 1469 { 1470 struct perf_env *env = data; 1471 int cpu = perf_env__get_cpu(env, map, idx); 1472 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1473 1474 if (cpu != -1) 1475 id.socket = env->cpu[cpu].socket_id; 1476 1477 return id; 1478 } 1479 1480 static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) 1481 { 1482 struct perf_env *env = data; 1483 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1484 int cpu = perf_env__get_cpu(env, map, idx); 1485 1486 if (cpu != -1) { 1487 /* 1488 * die_id is relative to socket, so start 1489 * with the socket ID and then add die to 1490 * make a unique ID. 1491 */ 1492 id.socket = env->cpu[cpu].socket_id; 1493 id.die = env->cpu[cpu].die_id; 1494 } 1495 1496 return id; 1497 } 1498 1499 static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) 1500 { 1501 struct perf_env *env = data; 1502 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1503 int cpu = perf_env__get_cpu(env, map, idx); 1504 1505 if (cpu != -1) { 1506 /* 1507 * core_id is relative to socket and die, 1508 * we need a global id. So we set 1509 * socket, die id and core id 1510 */ 1511 id.socket = env->cpu[cpu].socket_id; 1512 id.die = env->cpu[cpu].die_id; 1513 id.core = env->cpu[cpu].core_id; 1514 } 1515 1516 return id; 1517 } 1518 1519 static struct aggr_cpu_id perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) 1520 { 1521 int cpu = perf_env__get_cpu(data, map, idx); 1522 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); 1523 1524 id.node = perf_env__numa_node(data, cpu); 1525 return id; 1526 } 1527 1528 static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, 1529 struct cpu_aggr_map **sockp) 1530 { 1531 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); 1532 } 1533 1534 static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, 1535 struct cpu_aggr_map **diep) 1536 { 1537 return cpu_map__build_map(cpus, diep, perf_env__get_die, env); 1538 } 1539 1540 static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, 1541 struct cpu_aggr_map **corep) 1542 { 1543 return cpu_map__build_map(cpus, corep, perf_env__get_core, env); 1544 } 1545 1546 static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, 1547 struct cpu_aggr_map **nodep) 1548 { 1549 return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); 1550 } 1551 1552 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1553 struct perf_cpu_map *map, int idx) 1554 { 1555 return perf_env__get_socket(map, idx, &perf_stat.session->header.env); 1556 } 1557 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1558 struct perf_cpu_map *map, int idx) 1559 { 1560 return perf_env__get_die(map, idx, &perf_stat.session->header.env); 1561 } 1562 1563 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1564 struct perf_cpu_map *map, int idx) 1565 { 1566 return perf_env__get_core(map, idx, &perf_stat.session->header.env); 1567 } 1568 1569 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1570 struct perf_cpu_map *map, int idx) 1571 { 1572 return perf_env__get_node(map, idx, &perf_stat.session->header.env); 1573 } 1574 1575 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1576 { 1577 struct perf_env *env = &st->session->header.env; 1578 1579 switch (stat_config.aggr_mode) { 1580 case AGGR_SOCKET: 1581 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1582 perror("cannot build socket map"); 1583 return -1; 1584 } 1585 stat_config.aggr_get_id = perf_stat__get_socket_file; 1586 break; 1587 case AGGR_DIE: 1588 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1589 perror("cannot build die map"); 1590 return -1; 1591 } 1592 stat_config.aggr_get_id = perf_stat__get_die_file; 1593 break; 1594 case AGGR_CORE: 1595 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1596 perror("cannot build core map"); 1597 return -1; 1598 } 1599 stat_config.aggr_get_id = perf_stat__get_core_file; 1600 break; 1601 case AGGR_NODE: 1602 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1603 perror("cannot build core map"); 1604 return -1; 1605 } 1606 stat_config.aggr_get_id = perf_stat__get_node_file; 1607 break; 1608 case AGGR_NONE: 1609 case AGGR_GLOBAL: 1610 case AGGR_THREAD: 1611 case AGGR_UNSET: 1612 default: 1613 break; 1614 } 1615 1616 return 0; 1617 } 1618 1619 /* 1620 * Add default attributes, if there were no attributes specified or 1621 * if -d/--detailed, -d -d or -d -d -d is used: 1622 */ 1623 static int add_default_attributes(void) 1624 { 1625 int err; 1626 struct perf_event_attr default_attrs0[] = { 1627 1628 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1629 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1630 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1631 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1632 1633 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1634 }; 1635 struct perf_event_attr frontend_attrs[] = { 1636 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1637 }; 1638 struct perf_event_attr backend_attrs[] = { 1639 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1640 }; 1641 struct perf_event_attr default_attrs1[] = { 1642 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1643 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1644 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1645 1646 }; 1647 struct perf_event_attr default_sw_attrs[] = { 1648 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1649 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1650 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1651 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1652 }; 1653 1654 /* 1655 * Detailed stats (-d), covering the L1 and last level data caches: 1656 */ 1657 struct perf_event_attr detailed_attrs[] = { 1658 1659 { .type = PERF_TYPE_HW_CACHE, 1660 .config = 1661 PERF_COUNT_HW_CACHE_L1D << 0 | 1662 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1663 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1664 1665 { .type = PERF_TYPE_HW_CACHE, 1666 .config = 1667 PERF_COUNT_HW_CACHE_L1D << 0 | 1668 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1669 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1670 1671 { .type = PERF_TYPE_HW_CACHE, 1672 .config = 1673 PERF_COUNT_HW_CACHE_LL << 0 | 1674 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1675 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1676 1677 { .type = PERF_TYPE_HW_CACHE, 1678 .config = 1679 PERF_COUNT_HW_CACHE_LL << 0 | 1680 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1681 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1682 }; 1683 1684 /* 1685 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1686 */ 1687 struct perf_event_attr very_detailed_attrs[] = { 1688 1689 { .type = PERF_TYPE_HW_CACHE, 1690 .config = 1691 PERF_COUNT_HW_CACHE_L1I << 0 | 1692 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1693 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1694 1695 { .type = PERF_TYPE_HW_CACHE, 1696 .config = 1697 PERF_COUNT_HW_CACHE_L1I << 0 | 1698 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1699 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1700 1701 { .type = PERF_TYPE_HW_CACHE, 1702 .config = 1703 PERF_COUNT_HW_CACHE_DTLB << 0 | 1704 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1705 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1706 1707 { .type = PERF_TYPE_HW_CACHE, 1708 .config = 1709 PERF_COUNT_HW_CACHE_DTLB << 0 | 1710 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1711 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1712 1713 { .type = PERF_TYPE_HW_CACHE, 1714 .config = 1715 PERF_COUNT_HW_CACHE_ITLB << 0 | 1716 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1717 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1718 1719 { .type = PERF_TYPE_HW_CACHE, 1720 .config = 1721 PERF_COUNT_HW_CACHE_ITLB << 0 | 1722 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1723 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1724 1725 }; 1726 1727 /* 1728 * Very, very detailed stats (-d -d -d), adding prefetch events: 1729 */ 1730 struct perf_event_attr very_very_detailed_attrs[] = { 1731 1732 { .type = PERF_TYPE_HW_CACHE, 1733 .config = 1734 PERF_COUNT_HW_CACHE_L1D << 0 | 1735 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1736 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1737 1738 { .type = PERF_TYPE_HW_CACHE, 1739 .config = 1740 PERF_COUNT_HW_CACHE_L1D << 0 | 1741 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1742 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1743 }; 1744 struct parse_events_error errinfo; 1745 1746 /* Set attrs if no event is selected and !null_run: */ 1747 if (stat_config.null_run) 1748 return 0; 1749 1750 bzero(&errinfo, sizeof(errinfo)); 1751 if (transaction_run) { 1752 /* Handle -T as -M transaction. Once platform specific metrics 1753 * support has been added to the json files, all architectures 1754 * will use this approach. To determine transaction support 1755 * on an architecture test for such a metric name. 1756 */ 1757 if (metricgroup__has_metric("transaction")) { 1758 struct option opt = { .value = &evsel_list }; 1759 1760 return metricgroup__parse_groups(&opt, "transaction", 1761 stat_config.metric_no_group, 1762 stat_config.metric_no_merge, 1763 &stat_config.metric_events); 1764 } 1765 1766 if (pmu_have_event("cpu", "cycles-ct") && 1767 pmu_have_event("cpu", "el-start")) 1768 err = parse_events(evsel_list, transaction_attrs, 1769 &errinfo); 1770 else 1771 err = parse_events(evsel_list, 1772 transaction_limited_attrs, 1773 &errinfo); 1774 if (err) { 1775 fprintf(stderr, "Cannot set up transaction events\n"); 1776 parse_events_print_error(&errinfo, transaction_attrs); 1777 return -1; 1778 } 1779 return 0; 1780 } 1781 1782 if (smi_cost) { 1783 int smi; 1784 1785 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1786 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1787 return -1; 1788 } 1789 1790 if (!smi) { 1791 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1792 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1793 return -1; 1794 } 1795 smi_reset = true; 1796 } 1797 1798 if (pmu_have_event("msr", "aperf") && 1799 pmu_have_event("msr", "smi")) { 1800 if (!force_metric_only) 1801 stat_config.metric_only = true; 1802 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1803 } else { 1804 fprintf(stderr, "To measure SMI cost, it needs " 1805 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1806 parse_events_print_error(&errinfo, smi_cost_attrs); 1807 return -1; 1808 } 1809 if (err) { 1810 parse_events_print_error(&errinfo, smi_cost_attrs); 1811 fprintf(stderr, "Cannot set up SMI cost events\n"); 1812 return -1; 1813 } 1814 return 0; 1815 } 1816 1817 if (topdown_run) { 1818 const char **metric_attrs = topdown_metric_attrs; 1819 unsigned int max_level = 1; 1820 char *str = NULL; 1821 bool warn = false; 1822 1823 if (!force_metric_only) 1824 stat_config.metric_only = true; 1825 1826 if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) { 1827 metric_attrs = topdown_metric_L2_attrs; 1828 max_level = 2; 1829 } 1830 1831 if (stat_config.topdown_level > max_level) { 1832 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1833 return -1; 1834 } else if (!stat_config.topdown_level) 1835 stat_config.topdown_level = max_level; 1836 1837 if (topdown_filter_events(metric_attrs, &str, 1) < 0) { 1838 pr_err("Out of memory\n"); 1839 return -1; 1840 } 1841 if (metric_attrs[0] && str) { 1842 if (!stat_config.interval && !stat_config.metric_only) { 1843 fprintf(stat_config.output, 1844 "Topdown accuracy may decrease when measuring long periods.\n" 1845 "Please print the result regularly, e.g. -I1000\n"); 1846 } 1847 goto setup_metrics; 1848 } 1849 1850 zfree(&str); 1851 1852 if (stat_config.aggr_mode != AGGR_GLOBAL && 1853 stat_config.aggr_mode != AGGR_CORE) { 1854 pr_err("top down event configuration requires --per-core mode\n"); 1855 return -1; 1856 } 1857 stat_config.aggr_mode = AGGR_CORE; 1858 if (nr_cgroups || !target__has_cpu(&target)) { 1859 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1860 return -1; 1861 } 1862 1863 if (topdown_filter_events(topdown_attrs, &str, 1864 arch_topdown_check_group(&warn)) < 0) { 1865 pr_err("Out of memory\n"); 1866 return -1; 1867 } 1868 if (topdown_attrs[0] && str) { 1869 if (warn) 1870 arch_topdown_group_warn(); 1871 setup_metrics: 1872 err = parse_events(evsel_list, str, &errinfo); 1873 if (err) { 1874 fprintf(stderr, 1875 "Cannot set up top down events %s: %d\n", 1876 str, err); 1877 parse_events_print_error(&errinfo, str); 1878 free(str); 1879 return -1; 1880 } 1881 } else { 1882 fprintf(stderr, "System does not support topdown\n"); 1883 return -1; 1884 } 1885 free(str); 1886 } 1887 1888 if (!evsel_list->core.nr_entries) { 1889 if (perf_pmu__has_hybrid()) { 1890 const char *hybrid_str = "cycles,instructions,branches,branch-misses"; 1891 1892 if (target__has_cpu(&target)) 1893 default_sw_attrs[0].config = PERF_COUNT_SW_CPU_CLOCK; 1894 1895 if (evlist__add_default_attrs(evsel_list, 1896 default_sw_attrs) < 0) { 1897 return -1; 1898 } 1899 1900 err = parse_events(evsel_list, hybrid_str, &errinfo); 1901 if (err) { 1902 fprintf(stderr, 1903 "Cannot set up hybrid events %s: %d\n", 1904 hybrid_str, err); 1905 parse_events_print_error(&errinfo, hybrid_str); 1906 return -1; 1907 } 1908 return err; 1909 } 1910 1911 if (target__has_cpu(&target)) 1912 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1913 1914 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1915 return -1; 1916 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1917 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1918 return -1; 1919 } 1920 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1921 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1922 return -1; 1923 } 1924 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1925 return -1; 1926 1927 if (arch_evlist__add_default_attrs(evsel_list) < 0) 1928 return -1; 1929 } 1930 1931 /* Detailed events get appended to the event list: */ 1932 1933 if (detailed_run < 1) 1934 return 0; 1935 1936 /* Append detailed run extra attributes: */ 1937 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1938 return -1; 1939 1940 if (detailed_run < 2) 1941 return 0; 1942 1943 /* Append very detailed run extra attributes: */ 1944 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1945 return -1; 1946 1947 if (detailed_run < 3) 1948 return 0; 1949 1950 /* Append very, very detailed run extra attributes: */ 1951 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1952 } 1953 1954 static const char * const stat_record_usage[] = { 1955 "perf stat record [<options>]", 1956 NULL, 1957 }; 1958 1959 static void init_features(struct perf_session *session) 1960 { 1961 int feat; 1962 1963 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1964 perf_header__set_feat(&session->header, feat); 1965 1966 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1967 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1968 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1969 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1970 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1971 } 1972 1973 static int __cmd_record(int argc, const char **argv) 1974 { 1975 struct perf_session *session; 1976 struct perf_data *data = &perf_stat.data; 1977 1978 argc = parse_options(argc, argv, stat_options, stat_record_usage, 1979 PARSE_OPT_STOP_AT_NON_OPTION); 1980 1981 if (output_name) 1982 data->path = output_name; 1983 1984 if (stat_config.run_count != 1 || forever) { 1985 pr_err("Cannot use -r option with perf stat record.\n"); 1986 return -1; 1987 } 1988 1989 session = perf_session__new(data, false, NULL); 1990 if (IS_ERR(session)) { 1991 pr_err("Perf session creation failed\n"); 1992 return PTR_ERR(session); 1993 } 1994 1995 init_features(session); 1996 1997 session->evlist = evsel_list; 1998 perf_stat.session = session; 1999 perf_stat.record = true; 2000 return argc; 2001 } 2002 2003 static int process_stat_round_event(struct perf_session *session, 2004 union perf_event *event) 2005 { 2006 struct perf_record_stat_round *stat_round = &event->stat_round; 2007 struct evsel *counter; 2008 struct timespec tsh, *ts = NULL; 2009 const char **argv = session->header.env.cmdline_argv; 2010 int argc = session->header.env.nr_cmdline; 2011 2012 evlist__for_each_entry(evsel_list, counter) 2013 perf_stat_process_counter(&stat_config, counter); 2014 2015 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2016 update_stats(&walltime_nsecs_stats, stat_round->time); 2017 2018 if (stat_config.interval && stat_round->time) { 2019 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2020 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2021 ts = &tsh; 2022 } 2023 2024 print_counters(ts, argc, argv); 2025 return 0; 2026 } 2027 2028 static 2029 int process_stat_config_event(struct perf_session *session, 2030 union perf_event *event) 2031 { 2032 struct perf_tool *tool = session->tool; 2033 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2034 2035 perf_event__read_stat_config(&stat_config, &event->stat_config); 2036 2037 if (perf_cpu_map__empty(st->cpus)) { 2038 if (st->aggr_mode != AGGR_UNSET) 2039 pr_warning("warning: processing task data, aggregation mode not set\n"); 2040 return 0; 2041 } 2042 2043 if (st->aggr_mode != AGGR_UNSET) 2044 stat_config.aggr_mode = st->aggr_mode; 2045 2046 if (perf_stat.data.is_pipe) 2047 perf_stat_init_aggr_mode(); 2048 else 2049 perf_stat_init_aggr_mode_file(st); 2050 2051 return 0; 2052 } 2053 2054 static int set_maps(struct perf_stat *st) 2055 { 2056 if (!st->cpus || !st->threads) 2057 return 0; 2058 2059 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2060 return -EINVAL; 2061 2062 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2063 2064 if (evlist__alloc_stats(evsel_list, true)) 2065 return -ENOMEM; 2066 2067 st->maps_allocated = true; 2068 return 0; 2069 } 2070 2071 static 2072 int process_thread_map_event(struct perf_session *session, 2073 union perf_event *event) 2074 { 2075 struct perf_tool *tool = session->tool; 2076 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2077 2078 if (st->threads) { 2079 pr_warning("Extra thread map event, ignoring.\n"); 2080 return 0; 2081 } 2082 2083 st->threads = thread_map__new_event(&event->thread_map); 2084 if (!st->threads) 2085 return -ENOMEM; 2086 2087 return set_maps(st); 2088 } 2089 2090 static 2091 int process_cpu_map_event(struct perf_session *session, 2092 union perf_event *event) 2093 { 2094 struct perf_tool *tool = session->tool; 2095 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2096 struct perf_cpu_map *cpus; 2097 2098 if (st->cpus) { 2099 pr_warning("Extra cpu map event, ignoring.\n"); 2100 return 0; 2101 } 2102 2103 cpus = cpu_map__new_data(&event->cpu_map.data); 2104 if (!cpus) 2105 return -ENOMEM; 2106 2107 st->cpus = cpus; 2108 return set_maps(st); 2109 } 2110 2111 static const char * const stat_report_usage[] = { 2112 "perf stat report [<options>]", 2113 NULL, 2114 }; 2115 2116 static struct perf_stat perf_stat = { 2117 .tool = { 2118 .attr = perf_event__process_attr, 2119 .event_update = perf_event__process_event_update, 2120 .thread_map = process_thread_map_event, 2121 .cpu_map = process_cpu_map_event, 2122 .stat_config = process_stat_config_event, 2123 .stat = perf_event__process_stat_event, 2124 .stat_round = process_stat_round_event, 2125 }, 2126 .aggr_mode = AGGR_UNSET, 2127 }; 2128 2129 static int __cmd_report(int argc, const char **argv) 2130 { 2131 struct perf_session *session; 2132 const struct option options[] = { 2133 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2134 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2135 "aggregate counts per processor socket", AGGR_SOCKET), 2136 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2137 "aggregate counts per processor die", AGGR_DIE), 2138 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2139 "aggregate counts per physical processor core", AGGR_CORE), 2140 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2141 "aggregate counts per numa node", AGGR_NODE), 2142 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2143 "disable CPU count aggregation", AGGR_NONE), 2144 OPT_END() 2145 }; 2146 struct stat st; 2147 int ret; 2148 2149 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2150 2151 if (!input_name || !strlen(input_name)) { 2152 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2153 input_name = "-"; 2154 else 2155 input_name = "perf.data"; 2156 } 2157 2158 perf_stat.data.path = input_name; 2159 perf_stat.data.mode = PERF_DATA_MODE_READ; 2160 2161 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool); 2162 if (IS_ERR(session)) 2163 return PTR_ERR(session); 2164 2165 perf_stat.session = session; 2166 stat_config.output = stderr; 2167 evsel_list = session->evlist; 2168 2169 ret = perf_session__process_events(session); 2170 if (ret) 2171 return ret; 2172 2173 perf_session__delete(session); 2174 return 0; 2175 } 2176 2177 static void setup_system_wide(int forks) 2178 { 2179 /* 2180 * Make system wide (-a) the default target if 2181 * no target was specified and one of following 2182 * conditions is met: 2183 * 2184 * - there's no workload specified 2185 * - there is workload specified but all requested 2186 * events are system wide events 2187 */ 2188 if (!target__none(&target)) 2189 return; 2190 2191 if (!forks) 2192 target.system_wide = true; 2193 else { 2194 struct evsel *counter; 2195 2196 evlist__for_each_entry(evsel_list, counter) { 2197 if (!counter->core.system_wide && 2198 strcmp(counter->name, "duration_time")) { 2199 return; 2200 } 2201 } 2202 2203 if (evsel_list->core.nr_entries) 2204 target.system_wide = true; 2205 } 2206 } 2207 2208 int cmd_stat(int argc, const char **argv) 2209 { 2210 const char * const stat_usage[] = { 2211 "perf stat [<options>] [<command>]", 2212 NULL 2213 }; 2214 int status = -EINVAL, run_idx, err; 2215 const char *mode; 2216 FILE *output = stderr; 2217 unsigned int interval, timeout; 2218 const char * const stat_subcommands[] = { "record", "report" }; 2219 char errbuf[BUFSIZ]; 2220 2221 setlocale(LC_ALL, ""); 2222 2223 evsel_list = evlist__new(); 2224 if (evsel_list == NULL) 2225 return -ENOMEM; 2226 2227 parse_events__shrink_config_terms(); 2228 2229 /* String-parsing callback-based options would segfault when negated */ 2230 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2231 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2232 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2233 2234 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2235 (const char **) stat_usage, 2236 PARSE_OPT_STOP_AT_NON_OPTION); 2237 perf_stat__collect_metric_expr(evsel_list); 2238 perf_stat__init_shadow_stats(); 2239 2240 if (stat_config.csv_sep) { 2241 stat_config.csv_output = true; 2242 if (!strcmp(stat_config.csv_sep, "\\t")) 2243 stat_config.csv_sep = "\t"; 2244 } else 2245 stat_config.csv_sep = DEFAULT_SEPARATOR; 2246 2247 if (argc && !strncmp(argv[0], "rec", 3)) { 2248 argc = __cmd_record(argc, argv); 2249 if (argc < 0) 2250 return -1; 2251 } else if (argc && !strncmp(argv[0], "rep", 3)) 2252 return __cmd_report(argc, argv); 2253 2254 interval = stat_config.interval; 2255 timeout = stat_config.timeout; 2256 2257 /* 2258 * For record command the -o is already taken care of. 2259 */ 2260 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2261 output = NULL; 2262 2263 if (output_name && output_fd) { 2264 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2265 parse_options_usage(stat_usage, stat_options, "o", 1); 2266 parse_options_usage(NULL, stat_options, "log-fd", 0); 2267 goto out; 2268 } 2269 2270 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2271 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2272 goto out; 2273 } 2274 2275 if (stat_config.metric_only && stat_config.run_count > 1) { 2276 fprintf(stderr, "--metric-only is not supported with -r\n"); 2277 goto out; 2278 } 2279 2280 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2281 fprintf(stderr, "--table is only supported with -r\n"); 2282 parse_options_usage(stat_usage, stat_options, "r", 1); 2283 parse_options_usage(NULL, stat_options, "table", 0); 2284 goto out; 2285 } 2286 2287 if (output_fd < 0) { 2288 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2289 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2290 goto out; 2291 } 2292 2293 if (!output && !stat_config.quiet) { 2294 struct timespec tm; 2295 mode = append_file ? "a" : "w"; 2296 2297 output = fopen(output_name, mode); 2298 if (!output) { 2299 perror("failed to create output file"); 2300 return -1; 2301 } 2302 clock_gettime(CLOCK_REALTIME, &tm); 2303 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2304 } else if (output_fd > 0) { 2305 mode = append_file ? "a" : "w"; 2306 output = fdopen(output_fd, mode); 2307 if (!output) { 2308 perror("Failed opening logfd"); 2309 return -errno; 2310 } 2311 } 2312 2313 stat_config.output = output; 2314 2315 /* 2316 * let the spreadsheet do the pretty-printing 2317 */ 2318 if (stat_config.csv_output) { 2319 /* User explicitly passed -B? */ 2320 if (big_num_opt == 1) { 2321 fprintf(stderr, "-B option not supported with -x\n"); 2322 parse_options_usage(stat_usage, stat_options, "B", 1); 2323 parse_options_usage(NULL, stat_options, "x", 1); 2324 goto out; 2325 } else /* Nope, so disable big number formatting */ 2326 stat_config.big_num = false; 2327 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2328 stat_config.big_num = false; 2329 2330 err = target__validate(&target); 2331 if (err) { 2332 target__strerror(&target, err, errbuf, BUFSIZ); 2333 pr_warning("%s\n", errbuf); 2334 } 2335 2336 setup_system_wide(argc); 2337 2338 /* 2339 * Display user/system times only for single 2340 * run and when there's specified tracee. 2341 */ 2342 if ((stat_config.run_count == 1) && target__none(&target)) 2343 stat_config.ru_display = true; 2344 2345 if (stat_config.run_count < 0) { 2346 pr_err("Run count must be a positive number\n"); 2347 parse_options_usage(stat_usage, stat_options, "r", 1); 2348 goto out; 2349 } else if (stat_config.run_count == 0) { 2350 forever = true; 2351 stat_config.run_count = 1; 2352 } 2353 2354 if (stat_config.walltime_run_table) { 2355 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2356 if (!stat_config.walltime_run) { 2357 pr_err("failed to setup -r option"); 2358 goto out; 2359 } 2360 } 2361 2362 if ((stat_config.aggr_mode == AGGR_THREAD) && 2363 !target__has_task(&target)) { 2364 if (!target.system_wide || target.cpu_list) { 2365 fprintf(stderr, "The --per-thread option is only " 2366 "available when monitoring via -p -t -a " 2367 "options or only --per-thread.\n"); 2368 parse_options_usage(NULL, stat_options, "p", 1); 2369 parse_options_usage(NULL, stat_options, "t", 1); 2370 goto out; 2371 } 2372 } 2373 2374 /* 2375 * no_aggr, cgroup are for system-wide only 2376 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2377 */ 2378 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2379 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) && 2380 !target__has_cpu(&target)) { 2381 fprintf(stderr, "both cgroup and no-aggregation " 2382 "modes only available in system-wide mode\n"); 2383 2384 parse_options_usage(stat_usage, stat_options, "G", 1); 2385 parse_options_usage(NULL, stat_options, "A", 1); 2386 parse_options_usage(NULL, stat_options, "a", 1); 2387 goto out; 2388 } 2389 2390 if (stat_config.iostat_run) { 2391 status = iostat_prepare(evsel_list, &stat_config); 2392 if (status) 2393 goto out; 2394 if (iostat_mode == IOSTAT_LIST) { 2395 iostat_list(evsel_list, &stat_config); 2396 goto out; 2397 } else if (verbose) 2398 iostat_list(evsel_list, &stat_config); 2399 } 2400 2401 if (add_default_attributes()) 2402 goto out; 2403 2404 if (stat_config.cgroup_list) { 2405 if (nr_cgroups > 0) { 2406 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2407 parse_options_usage(stat_usage, stat_options, "G", 1); 2408 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2409 goto out; 2410 } 2411 2412 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2413 &stat_config.metric_events, true) < 0) { 2414 parse_options_usage(stat_usage, stat_options, 2415 "for-each-cgroup", 0); 2416 goto out; 2417 } 2418 } 2419 2420 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2421 target.per_thread = true; 2422 2423 if (evlist__create_maps(evsel_list, &target) < 0) { 2424 if (target__has_task(&target)) { 2425 pr_err("Problems finding threads of monitor\n"); 2426 parse_options_usage(stat_usage, stat_options, "p", 1); 2427 parse_options_usage(NULL, stat_options, "t", 1); 2428 } else if (target__has_cpu(&target)) { 2429 perror("failed to parse CPUs map"); 2430 parse_options_usage(stat_usage, stat_options, "C", 1); 2431 parse_options_usage(NULL, stat_options, "a", 1); 2432 } 2433 goto out; 2434 } 2435 2436 evlist__check_cpu_maps(evsel_list); 2437 2438 if (perf_pmu__has_hybrid()) 2439 stat_config.no_merge = true; 2440 2441 /* 2442 * Initialize thread_map with comm names, 2443 * so we could print it out on output. 2444 */ 2445 if (stat_config.aggr_mode == AGGR_THREAD) { 2446 thread_map__read_comms(evsel_list->core.threads); 2447 if (target.system_wide) { 2448 if (runtime_stat_new(&stat_config, 2449 perf_thread_map__nr(evsel_list->core.threads))) { 2450 goto out; 2451 } 2452 } 2453 } 2454 2455 if (stat_config.aggr_mode == AGGR_NODE) 2456 cpu__setup_cpunode_map(); 2457 2458 if (stat_config.times && interval) 2459 interval_count = true; 2460 else if (stat_config.times && !interval) { 2461 pr_err("interval-count option should be used together with " 2462 "interval-print.\n"); 2463 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2464 parse_options_usage(stat_usage, stat_options, "I", 1); 2465 goto out; 2466 } 2467 2468 if (timeout && timeout < 100) { 2469 if (timeout < 10) { 2470 pr_err("timeout must be >= 10ms.\n"); 2471 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2472 goto out; 2473 } else 2474 pr_warning("timeout < 100ms. " 2475 "The overhead percentage could be high in some cases. " 2476 "Please proceed with caution.\n"); 2477 } 2478 if (timeout && interval) { 2479 pr_err("timeout option is not supported with interval-print.\n"); 2480 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2481 parse_options_usage(stat_usage, stat_options, "I", 1); 2482 goto out; 2483 } 2484 2485 if (evlist__alloc_stats(evsel_list, interval)) 2486 goto out; 2487 2488 if (perf_stat_init_aggr_mode()) 2489 goto out; 2490 2491 /* 2492 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2493 * while avoiding that older tools show confusing messages. 2494 * 2495 * However for pipe sessions we need to keep it zero, 2496 * because script's perf_evsel__check_attr is triggered 2497 * by attr->sample_type != 0, and we can't run it on 2498 * stat sessions. 2499 */ 2500 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2501 2502 /* 2503 * We dont want to block the signals - that would cause 2504 * child tasks to inherit that and Ctrl-C would not work. 2505 * What we want is for Ctrl-C to work in the exec()-ed 2506 * task, but being ignored by perf stat itself: 2507 */ 2508 atexit(sig_atexit); 2509 if (!forever) 2510 signal(SIGINT, skip_signal); 2511 signal(SIGCHLD, skip_signal); 2512 signal(SIGALRM, skip_signal); 2513 signal(SIGABRT, skip_signal); 2514 2515 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2516 goto out; 2517 2518 status = 0; 2519 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2520 if (stat_config.run_count != 1 && verbose > 0) 2521 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2522 run_idx + 1); 2523 2524 if (run_idx != 0) 2525 evlist__reset_prev_raw_counts(evsel_list); 2526 2527 status = run_perf_stat(argc, argv, run_idx); 2528 if (forever && status != -1 && !interval) { 2529 print_counters(NULL, argc, argv); 2530 perf_stat__reset_stats(); 2531 } 2532 } 2533 2534 if (!forever && status != -1 && (!interval || stat_config.summary)) 2535 print_counters(NULL, argc, argv); 2536 2537 evlist__finalize_ctlfd(evsel_list); 2538 2539 if (STAT_RECORD) { 2540 /* 2541 * We synthesize the kernel mmap record just so that older tools 2542 * don't emit warnings about not being able to resolve symbols 2543 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2544 * a saner message about no samples being in the perf.data file. 2545 * 2546 * This also serves to suppress a warning about f_header.data.size == 0 2547 * in header.c at the moment 'perf stat record' gets introduced, which 2548 * is not really needed once we start adding the stat specific PERF_RECORD_ 2549 * records, but the need to suppress the kptr_restrict messages in older 2550 * tools remain -acme 2551 */ 2552 int fd = perf_data__fd(&perf_stat.data); 2553 2554 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2555 process_synthesized_event, 2556 &perf_stat.session->machines.host); 2557 if (err) { 2558 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2559 "older tools may produce warnings about this file\n."); 2560 } 2561 2562 if (!interval) { 2563 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2564 pr_err("failed to write stat round event\n"); 2565 } 2566 2567 if (!perf_stat.data.is_pipe) { 2568 perf_stat.session->header.data_size += perf_stat.bytes_written; 2569 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2570 } 2571 2572 evlist__close(evsel_list); 2573 perf_session__delete(perf_stat.session); 2574 } 2575 2576 perf_stat__exit_aggr_mode(); 2577 evlist__free_stats(evsel_list); 2578 out: 2579 if (stat_config.iostat_run) 2580 iostat_release(evsel_list); 2581 2582 zfree(&stat_config.walltime_run); 2583 2584 if (smi_cost && smi_reset) 2585 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2586 2587 evlist__delete(evsel_list); 2588 2589 metricgroup__rblist_exit(&stat_config.metric_events); 2590 runtime_stat_delete(&stat_config); 2591 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2592 2593 return status; 2594 } 2595