1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/pmu-hybrid.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 97 #define DEFAULT_SEPARATOR " " 98 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 99 100 static void print_counters(struct timespec *ts, int argc, const char **argv); 101 102 /* Default events used for perf stat -T */ 103 static const char *transaction_attrs = { 104 "task-clock," 105 "{" 106 "instructions," 107 "cycles," 108 "cpu/cycles-t/," 109 "cpu/tx-start/," 110 "cpu/el-start/," 111 "cpu/cycles-ct/" 112 "}" 113 }; 114 115 /* More limited version when the CPU does not have all events. */ 116 static const char * transaction_limited_attrs = { 117 "task-clock," 118 "{" 119 "instructions," 120 "cycles," 121 "cpu/cycles-t/," 122 "cpu/tx-start/" 123 "}" 124 }; 125 126 static const char * topdown_attrs[] = { 127 "topdown-total-slots", 128 "topdown-slots-retired", 129 "topdown-recovery-bubbles", 130 "topdown-fetch-bubbles", 131 "topdown-slots-issued", 132 NULL, 133 }; 134 135 static const char *topdown_metric_attrs[] = { 136 "slots", 137 "topdown-retiring", 138 "topdown-bad-spec", 139 "topdown-fe-bound", 140 "topdown-be-bound", 141 NULL, 142 }; 143 144 static const char *topdown_metric_L2_attrs[] = { 145 "slots", 146 "topdown-retiring", 147 "topdown-bad-spec", 148 "topdown-fe-bound", 149 "topdown-be-bound", 150 "topdown-heavy-ops", 151 "topdown-br-mispredict", 152 "topdown-fetch-lat", 153 "topdown-mem-bound", 154 NULL, 155 }; 156 157 #define TOPDOWN_MAX_LEVEL 2 158 159 static const char *smi_cost_attrs = { 160 "{" 161 "msr/aperf/," 162 "msr/smi/," 163 "cycles" 164 "}" 165 }; 166 167 static struct evlist *evsel_list; 168 static bool all_counters_use_bpf = true; 169 170 static struct target target = { 171 .uid = UINT_MAX, 172 }; 173 174 #define METRIC_ONLY_LEN 20 175 176 static volatile pid_t child_pid = -1; 177 static int detailed_run = 0; 178 static bool transaction_run; 179 static bool topdown_run = false; 180 static bool smi_cost = false; 181 static bool smi_reset = false; 182 static int big_num_opt = -1; 183 static bool group = false; 184 static const char *pre_cmd = NULL; 185 static const char *post_cmd = NULL; 186 static bool sync_run = false; 187 static bool forever = false; 188 static bool force_metric_only = false; 189 static struct timespec ref_time; 190 static bool append_file; 191 static bool interval_count; 192 static const char *output_name; 193 static int output_fd; 194 195 struct perf_stat { 196 bool record; 197 struct perf_data data; 198 struct perf_session *session; 199 u64 bytes_written; 200 struct perf_tool tool; 201 bool maps_allocated; 202 struct perf_cpu_map *cpus; 203 struct perf_thread_map *threads; 204 enum aggr_mode aggr_mode; 205 }; 206 207 static struct perf_stat perf_stat; 208 #define STAT_RECORD perf_stat.record 209 210 static volatile int done = 0; 211 212 static struct perf_stat_config stat_config = { 213 .aggr_mode = AGGR_GLOBAL, 214 .scale = true, 215 .unit_width = 4, /* strlen("unit") */ 216 .run_count = 1, 217 .metric_only_len = METRIC_ONLY_LEN, 218 .walltime_nsecs_stats = &walltime_nsecs_stats, 219 .big_num = true, 220 .ctl_fd = -1, 221 .ctl_fd_ack = -1, 222 .iostat_run = false, 223 }; 224 225 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 226 { 227 if (!a->core.cpus && !b->core.cpus) 228 return true; 229 230 if (!a->core.cpus || !b->core.cpus) 231 return false; 232 233 if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) 234 return false; 235 236 for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { 237 if (perf_cpu_map__cpu(a->core.cpus, i).cpu != 238 perf_cpu_map__cpu(b->core.cpus, i).cpu) 239 return false; 240 } 241 242 return true; 243 } 244 245 static void evlist__check_cpu_maps(struct evlist *evlist) 246 { 247 struct evsel *evsel, *pos, *leader; 248 char buf[1024]; 249 250 if (evlist__has_hybrid(evlist)) 251 evlist__warn_hybrid_group(evlist); 252 253 evlist__for_each_entry(evlist, evsel) { 254 leader = evsel__leader(evsel); 255 256 /* Check that leader matches cpus with each member. */ 257 if (leader == evsel) 258 continue; 259 if (cpus_map_matched(leader, evsel)) 260 continue; 261 262 /* If there's mismatch disable the group and warn user. */ 263 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 264 evsel__group_desc(leader, buf, sizeof(buf)); 265 pr_warning(" %s\n", buf); 266 267 if (verbose) { 268 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 269 pr_warning(" %s: %s\n", leader->name, buf); 270 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 271 pr_warning(" %s: %s\n", evsel->name, buf); 272 } 273 274 for_each_group_evsel(pos, leader) { 275 evsel__set_leader(pos, pos); 276 pos->core.nr_members = 0; 277 } 278 evsel->core.leader->nr_members = 0; 279 } 280 } 281 282 static inline void diff_timespec(struct timespec *r, struct timespec *a, 283 struct timespec *b) 284 { 285 r->tv_sec = a->tv_sec - b->tv_sec; 286 if (a->tv_nsec < b->tv_nsec) { 287 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 288 r->tv_sec--; 289 } else { 290 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 291 } 292 } 293 294 static void perf_stat__reset_stats(void) 295 { 296 int i; 297 298 evlist__reset_stats(evsel_list); 299 perf_stat__reset_shadow_stats(); 300 301 for (i = 0; i < stat_config.stats_num; i++) 302 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 303 } 304 305 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 306 union perf_event *event, 307 struct perf_sample *sample __maybe_unused, 308 struct machine *machine __maybe_unused) 309 { 310 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 311 pr_err("failed to write perf data, error: %m\n"); 312 return -1; 313 } 314 315 perf_stat.bytes_written += event->header.size; 316 return 0; 317 } 318 319 static int write_stat_round_event(u64 tm, u64 type) 320 { 321 return perf_event__synthesize_stat_round(NULL, tm, type, 322 process_synthesized_event, 323 NULL); 324 } 325 326 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 327 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 328 329 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 330 331 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 332 struct perf_counts_values *count) 333 { 334 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 335 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 336 337 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 338 process_synthesized_event, NULL); 339 } 340 341 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 342 int thread, struct timespec *rs) 343 { 344 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 345 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 346 struct perf_counts_values *count = 347 perf_counts(counter->counts, cpu_map_idx, thread); 348 count->ena = count->run = val; 349 count->val = val; 350 return 0; 351 } 352 return evsel__read_counter(counter, cpu_map_idx, thread); 353 } 354 355 /* 356 * Read out the results of a single counter: 357 * do not aggregate counts across CPUs in system-wide mode 358 */ 359 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 360 { 361 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 362 int thread; 363 364 if (!counter->supported) 365 return -ENOENT; 366 367 if (counter->core.system_wide) 368 nthreads = 1; 369 370 for (thread = 0; thread < nthreads; thread++) { 371 struct perf_counts_values *count; 372 373 count = perf_counts(counter->counts, cpu_map_idx, thread); 374 375 /* 376 * The leader's group read loads data into its group members 377 * (via evsel__read_counter()) and sets their count->loaded. 378 */ 379 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 380 read_single_counter(counter, cpu_map_idx, thread, rs)) { 381 counter->counts->scaled = -1; 382 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 383 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 384 return -1; 385 } 386 387 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 388 389 if (STAT_RECORD) { 390 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 391 pr_err("failed to write stat event\n"); 392 return -1; 393 } 394 } 395 396 if (verbose > 1) { 397 fprintf(stat_config.output, 398 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 399 evsel__name(counter), 400 perf_cpu_map__cpu(evsel__cpus(counter), 401 cpu_map_idx).cpu, 402 count->val, count->ena, count->run); 403 } 404 } 405 406 return 0; 407 } 408 409 static int read_affinity_counters(struct timespec *rs) 410 { 411 struct evlist_cpu_iterator evlist_cpu_itr; 412 struct affinity saved_affinity, *affinity; 413 414 if (all_counters_use_bpf) 415 return 0; 416 417 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 418 affinity = NULL; 419 else if (affinity__setup(&saved_affinity) < 0) 420 return -1; 421 else 422 affinity = &saved_affinity; 423 424 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 425 struct evsel *counter = evlist_cpu_itr.evsel; 426 427 if (evsel__is_bpf(counter)) 428 continue; 429 430 if (!counter->err) { 431 counter->err = read_counter_cpu(counter, rs, 432 evlist_cpu_itr.cpu_map_idx); 433 } 434 } 435 if (affinity) 436 affinity__cleanup(&saved_affinity); 437 438 return 0; 439 } 440 441 static int read_bpf_map_counters(void) 442 { 443 struct evsel *counter; 444 int err; 445 446 evlist__for_each_entry(evsel_list, counter) { 447 if (!evsel__is_bpf(counter)) 448 continue; 449 450 err = bpf_counter__read(counter); 451 if (err) 452 return err; 453 } 454 return 0; 455 } 456 457 static void read_counters(struct timespec *rs) 458 { 459 struct evsel *counter; 460 461 if (!stat_config.stop_read_counter) { 462 if (read_bpf_map_counters() || 463 read_affinity_counters(rs)) 464 return; 465 } 466 467 evlist__for_each_entry(evsel_list, counter) { 468 if (counter->err) 469 pr_debug("failed to read counter %s\n", counter->name); 470 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 471 pr_warning("failed to process counter %s\n", counter->name); 472 counter->err = 0; 473 } 474 } 475 476 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 477 { 478 int i; 479 480 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 481 if (!config->stats) 482 return -1; 483 484 config->stats_num = nthreads; 485 486 for (i = 0; i < nthreads; i++) 487 runtime_stat__init(&config->stats[i]); 488 489 return 0; 490 } 491 492 static void runtime_stat_delete(struct perf_stat_config *config) 493 { 494 int i; 495 496 if (!config->stats) 497 return; 498 499 for (i = 0; i < config->stats_num; i++) 500 runtime_stat__exit(&config->stats[i]); 501 502 zfree(&config->stats); 503 } 504 505 static void runtime_stat_reset(struct perf_stat_config *config) 506 { 507 int i; 508 509 if (!config->stats) 510 return; 511 512 for (i = 0; i < config->stats_num; i++) 513 perf_stat__reset_shadow_per_stat(&config->stats[i]); 514 } 515 516 static void process_interval(void) 517 { 518 struct timespec ts, rs; 519 520 clock_gettime(CLOCK_MONOTONIC, &ts); 521 diff_timespec(&rs, &ts, &ref_time); 522 523 perf_stat__reset_shadow_per_stat(&rt_stat); 524 runtime_stat_reset(&stat_config); 525 read_counters(&rs); 526 527 if (STAT_RECORD) { 528 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 529 pr_err("failed to write stat round event\n"); 530 } 531 532 init_stats(&walltime_nsecs_stats); 533 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 534 print_counters(&rs, 0, NULL); 535 } 536 537 static bool handle_interval(unsigned int interval, int *times) 538 { 539 if (interval) { 540 process_interval(); 541 if (interval_count && !(--(*times))) 542 return true; 543 } 544 return false; 545 } 546 547 static int enable_counters(void) 548 { 549 struct evsel *evsel; 550 int err; 551 552 evlist__for_each_entry(evsel_list, evsel) { 553 if (!evsel__is_bpf(evsel)) 554 continue; 555 556 err = bpf_counter__enable(evsel); 557 if (err) 558 return err; 559 } 560 561 if (stat_config.initial_delay < 0) { 562 pr_info(EVLIST_DISABLED_MSG); 563 return 0; 564 } 565 566 if (stat_config.initial_delay > 0) { 567 pr_info(EVLIST_DISABLED_MSG); 568 usleep(stat_config.initial_delay * USEC_PER_MSEC); 569 } 570 571 /* 572 * We need to enable counters only if: 573 * - we don't have tracee (attaching to task or cpu) 574 * - we have initial delay configured 575 */ 576 if (!target__none(&target) || stat_config.initial_delay) { 577 if (!all_counters_use_bpf) 578 evlist__enable(evsel_list); 579 if (stat_config.initial_delay > 0) 580 pr_info(EVLIST_ENABLED_MSG); 581 } 582 return 0; 583 } 584 585 static void disable_counters(void) 586 { 587 struct evsel *counter; 588 589 /* 590 * If we don't have tracee (attaching to task or cpu), counters may 591 * still be running. To get accurate group ratios, we must stop groups 592 * from counting before reading their constituent counters. 593 */ 594 if (!target__none(&target)) { 595 evlist__for_each_entry(evsel_list, counter) 596 bpf_counter__disable(counter); 597 if (!all_counters_use_bpf) 598 evlist__disable(evsel_list); 599 } 600 } 601 602 static volatile int workload_exec_errno; 603 604 /* 605 * evlist__prepare_workload will send a SIGUSR1 606 * if the fork fails, since we asked by setting its 607 * want_signal to true. 608 */ 609 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 610 void *ucontext __maybe_unused) 611 { 612 workload_exec_errno = info->si_value.sival_int; 613 } 614 615 static bool evsel__should_store_id(struct evsel *counter) 616 { 617 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 618 } 619 620 static bool is_target_alive(struct target *_target, 621 struct perf_thread_map *threads) 622 { 623 struct stat st; 624 int i; 625 626 if (!target__has_task(_target)) 627 return true; 628 629 for (i = 0; i < threads->nr; i++) { 630 char path[PATH_MAX]; 631 632 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 633 threads->map[i].pid); 634 635 if (!stat(path, &st)) 636 return true; 637 } 638 639 return false; 640 } 641 642 static void process_evlist(struct evlist *evlist, unsigned int interval) 643 { 644 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 645 646 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 647 switch (cmd) { 648 case EVLIST_CTL_CMD_ENABLE: 649 if (interval) 650 process_interval(); 651 break; 652 case EVLIST_CTL_CMD_DISABLE: 653 if (interval) 654 process_interval(); 655 break; 656 case EVLIST_CTL_CMD_SNAPSHOT: 657 case EVLIST_CTL_CMD_ACK: 658 case EVLIST_CTL_CMD_UNSUPPORTED: 659 case EVLIST_CTL_CMD_EVLIST: 660 case EVLIST_CTL_CMD_STOP: 661 case EVLIST_CTL_CMD_PING: 662 default: 663 break; 664 } 665 } 666 } 667 668 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 669 int *time_to_sleep) 670 { 671 int tts = *time_to_sleep; 672 struct timespec time_diff; 673 674 diff_timespec(&time_diff, time_stop, time_start); 675 676 tts -= time_diff.tv_sec * MSEC_PER_SEC + 677 time_diff.tv_nsec / NSEC_PER_MSEC; 678 679 if (tts < 0) 680 tts = 0; 681 682 *time_to_sleep = tts; 683 } 684 685 static int dispatch_events(bool forks, int timeout, int interval, int *times) 686 { 687 int child_exited = 0, status = 0; 688 int time_to_sleep, sleep_time; 689 struct timespec time_start, time_stop; 690 691 if (interval) 692 sleep_time = interval; 693 else if (timeout) 694 sleep_time = timeout; 695 else 696 sleep_time = 1000; 697 698 time_to_sleep = sleep_time; 699 700 while (!done) { 701 if (forks) 702 child_exited = waitpid(child_pid, &status, WNOHANG); 703 else 704 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 705 706 if (child_exited) 707 break; 708 709 clock_gettime(CLOCK_MONOTONIC, &time_start); 710 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 711 if (timeout || handle_interval(interval, times)) 712 break; 713 time_to_sleep = sleep_time; 714 } else { /* fd revent */ 715 process_evlist(evsel_list, interval); 716 clock_gettime(CLOCK_MONOTONIC, &time_stop); 717 compute_tts(&time_start, &time_stop, &time_to_sleep); 718 } 719 } 720 721 return status; 722 } 723 724 enum counter_recovery { 725 COUNTER_SKIP, 726 COUNTER_RETRY, 727 COUNTER_FATAL, 728 }; 729 730 static enum counter_recovery stat_handle_error(struct evsel *counter) 731 { 732 char msg[BUFSIZ]; 733 /* 734 * PPC returns ENXIO for HW counters until 2.6.37 735 * (behavior changed with commit b0a873e). 736 */ 737 if (errno == EINVAL || errno == ENOSYS || 738 errno == ENOENT || errno == EOPNOTSUPP || 739 errno == ENXIO) { 740 if (verbose > 0) 741 ui__warning("%s event is not supported by the kernel.\n", 742 evsel__name(counter)); 743 counter->supported = false; 744 /* 745 * errored is a sticky flag that means one of the counter's 746 * cpu event had a problem and needs to be reexamined. 747 */ 748 counter->errored = true; 749 750 if ((evsel__leader(counter) != counter) || 751 !(counter->core.leader->nr_members > 1)) 752 return COUNTER_SKIP; 753 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 754 if (verbose > 0) 755 ui__warning("%s\n", msg); 756 return COUNTER_RETRY; 757 } else if (target__has_per_thread(&target) && 758 evsel_list->core.threads && 759 evsel_list->core.threads->err_thread != -1) { 760 /* 761 * For global --per-thread case, skip current 762 * error thread. 763 */ 764 if (!thread_map__remove(evsel_list->core.threads, 765 evsel_list->core.threads->err_thread)) { 766 evsel_list->core.threads->err_thread = -1; 767 return COUNTER_RETRY; 768 } 769 } 770 771 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 772 ui__error("%s\n", msg); 773 774 if (child_pid != -1) 775 kill(child_pid, SIGTERM); 776 return COUNTER_FATAL; 777 } 778 779 static int __run_perf_stat(int argc, const char **argv, int run_idx) 780 { 781 int interval = stat_config.interval; 782 int times = stat_config.times; 783 int timeout = stat_config.timeout; 784 char msg[BUFSIZ]; 785 unsigned long long t0, t1; 786 struct evsel *counter; 787 size_t l; 788 int status = 0; 789 const bool forks = (argc > 0); 790 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 791 struct evlist_cpu_iterator evlist_cpu_itr; 792 struct affinity saved_affinity, *affinity = NULL; 793 int err; 794 bool second_pass = false; 795 796 if (forks) { 797 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 798 perror("failed to prepare workload"); 799 return -1; 800 } 801 child_pid = evsel_list->workload.pid; 802 } 803 804 if (group) 805 evlist__set_leader(evsel_list); 806 807 if (!cpu_map__is_dummy(evsel_list->core.cpus)) { 808 if (affinity__setup(&saved_affinity) < 0) 809 return -1; 810 affinity = &saved_affinity; 811 } 812 813 evlist__for_each_entry(evsel_list, counter) { 814 if (bpf_counter__load(counter, &target)) 815 return -1; 816 if (!evsel__is_bpf(counter)) 817 all_counters_use_bpf = false; 818 } 819 820 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 821 counter = evlist_cpu_itr.evsel; 822 823 /* 824 * bperf calls evsel__open_per_cpu() in bperf__load(), so 825 * no need to call it again here. 826 */ 827 if (target.use_bpf) 828 break; 829 830 if (counter->reset_group || counter->errored) 831 continue; 832 if (evsel__is_bpf(counter)) 833 continue; 834 try_again: 835 if (create_perf_stat_counter(counter, &stat_config, &target, 836 evlist_cpu_itr.cpu_map_idx) < 0) { 837 838 /* 839 * Weak group failed. We cannot just undo this here 840 * because earlier CPUs might be in group mode, and the kernel 841 * doesn't support mixing group and non group reads. Defer 842 * it to later. 843 * Don't close here because we're in the wrong affinity. 844 */ 845 if ((errno == EINVAL || errno == EBADF) && 846 evsel__leader(counter) != counter && 847 counter->weak_group) { 848 evlist__reset_weak_group(evsel_list, counter, false); 849 assert(counter->reset_group); 850 second_pass = true; 851 continue; 852 } 853 854 switch (stat_handle_error(counter)) { 855 case COUNTER_FATAL: 856 return -1; 857 case COUNTER_RETRY: 858 goto try_again; 859 case COUNTER_SKIP: 860 continue; 861 default: 862 break; 863 } 864 865 } 866 counter->supported = true; 867 } 868 869 if (second_pass) { 870 /* 871 * Now redo all the weak group after closing them, 872 * and also close errored counters. 873 */ 874 875 /* First close errored or weak retry */ 876 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 877 counter = evlist_cpu_itr.evsel; 878 879 if (!counter->reset_group && !counter->errored) 880 continue; 881 882 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 883 } 884 /* Now reopen weak */ 885 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 886 counter = evlist_cpu_itr.evsel; 887 888 if (!counter->reset_group && !counter->errored) 889 continue; 890 if (!counter->reset_group) 891 continue; 892 try_again_reset: 893 pr_debug2("reopening weak %s\n", evsel__name(counter)); 894 if (create_perf_stat_counter(counter, &stat_config, &target, 895 evlist_cpu_itr.cpu_map_idx) < 0) { 896 897 switch (stat_handle_error(counter)) { 898 case COUNTER_FATAL: 899 return -1; 900 case COUNTER_RETRY: 901 goto try_again_reset; 902 case COUNTER_SKIP: 903 continue; 904 default: 905 break; 906 } 907 } 908 counter->supported = true; 909 } 910 } 911 affinity__cleanup(affinity); 912 913 evlist__for_each_entry(evsel_list, counter) { 914 if (!counter->supported) { 915 perf_evsel__free_fd(&counter->core); 916 continue; 917 } 918 919 l = strlen(counter->unit); 920 if (l > stat_config.unit_width) 921 stat_config.unit_width = l; 922 923 if (evsel__should_store_id(counter) && 924 evsel__store_ids(counter, evsel_list)) 925 return -1; 926 } 927 928 if (evlist__apply_filters(evsel_list, &counter)) { 929 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 930 counter->filter, evsel__name(counter), errno, 931 str_error_r(errno, msg, sizeof(msg))); 932 return -1; 933 } 934 935 if (STAT_RECORD) { 936 int fd = perf_data__fd(&perf_stat.data); 937 938 if (is_pipe) { 939 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 940 } else { 941 err = perf_session__write_header(perf_stat.session, evsel_list, 942 fd, false); 943 } 944 945 if (err < 0) 946 return err; 947 948 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 949 process_synthesized_event, is_pipe); 950 if (err < 0) 951 return err; 952 } 953 954 /* 955 * Enable counters and exec the command: 956 */ 957 if (forks) { 958 evlist__start_workload(evsel_list); 959 err = enable_counters(); 960 if (err) 961 return -1; 962 963 t0 = rdclock(); 964 clock_gettime(CLOCK_MONOTONIC, &ref_time); 965 966 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 967 status = dispatch_events(forks, timeout, interval, ×); 968 if (child_pid != -1) { 969 if (timeout) 970 kill(child_pid, SIGTERM); 971 wait4(child_pid, &status, 0, &stat_config.ru_data); 972 } 973 974 if (workload_exec_errno) { 975 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 976 pr_err("Workload failed: %s\n", emsg); 977 return -1; 978 } 979 980 if (WIFSIGNALED(status)) 981 psignal(WTERMSIG(status), argv[0]); 982 } else { 983 err = enable_counters(); 984 if (err) 985 return -1; 986 987 t0 = rdclock(); 988 clock_gettime(CLOCK_MONOTONIC, &ref_time); 989 990 status = dispatch_events(forks, timeout, interval, ×); 991 } 992 993 disable_counters(); 994 995 t1 = rdclock(); 996 997 if (stat_config.walltime_run_table) 998 stat_config.walltime_run[run_idx] = t1 - t0; 999 1000 if (interval && stat_config.summary) { 1001 stat_config.interval = 0; 1002 stat_config.stop_read_counter = true; 1003 init_stats(&walltime_nsecs_stats); 1004 update_stats(&walltime_nsecs_stats, t1 - t0); 1005 1006 if (stat_config.aggr_mode == AGGR_GLOBAL) 1007 evlist__save_aggr_prev_raw_counts(evsel_list); 1008 1009 evlist__copy_prev_raw_counts(evsel_list); 1010 evlist__reset_prev_raw_counts(evsel_list); 1011 runtime_stat_reset(&stat_config); 1012 perf_stat__reset_shadow_per_stat(&rt_stat); 1013 } else 1014 update_stats(&walltime_nsecs_stats, t1 - t0); 1015 1016 /* 1017 * Closing a group leader splits the group, and as we only disable 1018 * group leaders, results in remaining events becoming enabled. To 1019 * avoid arbitrary skew, we must read all counters before closing any 1020 * group leaders. 1021 */ 1022 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 1023 1024 /* 1025 * We need to keep evsel_list alive, because it's processed 1026 * later the evsel_list will be closed after. 1027 */ 1028 if (!STAT_RECORD) 1029 evlist__close(evsel_list); 1030 1031 return WEXITSTATUS(status); 1032 } 1033 1034 static int run_perf_stat(int argc, const char **argv, int run_idx) 1035 { 1036 int ret; 1037 1038 if (pre_cmd) { 1039 ret = system(pre_cmd); 1040 if (ret) 1041 return ret; 1042 } 1043 1044 if (sync_run) 1045 sync(); 1046 1047 ret = __run_perf_stat(argc, argv, run_idx); 1048 if (ret) 1049 return ret; 1050 1051 if (post_cmd) { 1052 ret = system(post_cmd); 1053 if (ret) 1054 return ret; 1055 } 1056 1057 return ret; 1058 } 1059 1060 static void print_counters(struct timespec *ts, int argc, const char **argv) 1061 { 1062 /* Do not print anything if we record to the pipe. */ 1063 if (STAT_RECORD && perf_stat.data.is_pipe) 1064 return; 1065 if (stat_config.quiet) 1066 return; 1067 1068 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1069 } 1070 1071 static volatile int signr = -1; 1072 1073 static void skip_signal(int signo) 1074 { 1075 if ((child_pid == -1) || stat_config.interval) 1076 done = 1; 1077 1078 signr = signo; 1079 /* 1080 * render child_pid harmless 1081 * won't send SIGTERM to a random 1082 * process in case of race condition 1083 * and fast PID recycling 1084 */ 1085 child_pid = -1; 1086 } 1087 1088 static void sig_atexit(void) 1089 { 1090 sigset_t set, oset; 1091 1092 /* 1093 * avoid race condition with SIGCHLD handler 1094 * in skip_signal() which is modifying child_pid 1095 * goal is to avoid send SIGTERM to a random 1096 * process 1097 */ 1098 sigemptyset(&set); 1099 sigaddset(&set, SIGCHLD); 1100 sigprocmask(SIG_BLOCK, &set, &oset); 1101 1102 if (child_pid != -1) 1103 kill(child_pid, SIGTERM); 1104 1105 sigprocmask(SIG_SETMASK, &oset, NULL); 1106 1107 if (signr == -1) 1108 return; 1109 1110 signal(signr, SIG_DFL); 1111 kill(getpid(), signr); 1112 } 1113 1114 void perf_stat__set_big_num(int set) 1115 { 1116 stat_config.big_num = (set != 0); 1117 } 1118 1119 void perf_stat__set_no_csv_summary(int set) 1120 { 1121 stat_config.no_csv_summary = (set != 0); 1122 } 1123 1124 static int stat__set_big_num(const struct option *opt __maybe_unused, 1125 const char *s __maybe_unused, int unset) 1126 { 1127 big_num_opt = unset ? 0 : 1; 1128 perf_stat__set_big_num(!unset); 1129 return 0; 1130 } 1131 1132 static int enable_metric_only(const struct option *opt __maybe_unused, 1133 const char *s __maybe_unused, int unset) 1134 { 1135 force_metric_only = true; 1136 stat_config.metric_only = !unset; 1137 return 0; 1138 } 1139 1140 static int parse_metric_groups(const struct option *opt, 1141 const char *str, 1142 int unset __maybe_unused) 1143 { 1144 return metricgroup__parse_groups(opt, str, 1145 stat_config.metric_no_group, 1146 stat_config.metric_no_merge, 1147 &stat_config.metric_events); 1148 } 1149 1150 static int parse_control_option(const struct option *opt, 1151 const char *str, 1152 int unset __maybe_unused) 1153 { 1154 struct perf_stat_config *config = opt->value; 1155 1156 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1157 } 1158 1159 static int parse_stat_cgroups(const struct option *opt, 1160 const char *str, int unset) 1161 { 1162 if (stat_config.cgroup_list) { 1163 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1164 return -1; 1165 } 1166 1167 return parse_cgroups(opt, str, unset); 1168 } 1169 1170 static int parse_hybrid_type(const struct option *opt, 1171 const char *str, 1172 int unset __maybe_unused) 1173 { 1174 struct evlist *evlist = *(struct evlist **)opt->value; 1175 1176 if (!list_empty(&evlist->core.entries)) { 1177 fprintf(stderr, "Must define cputype before events/metrics\n"); 1178 return -1; 1179 } 1180 1181 evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); 1182 if (!evlist->hybrid_pmu_name) { 1183 fprintf(stderr, "--cputype %s is not supported!\n", str); 1184 return -1; 1185 } 1186 1187 return 0; 1188 } 1189 1190 static struct option stat_options[] = { 1191 OPT_BOOLEAN('T', "transaction", &transaction_run, 1192 "hardware transaction statistics"), 1193 OPT_CALLBACK('e', "event", &evsel_list, "event", 1194 "event selector. use 'perf list' to list available events", 1195 parse_events_option), 1196 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1197 "event filter", parse_filter), 1198 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1199 "child tasks do not inherit counters"), 1200 OPT_STRING('p', "pid", &target.pid, "pid", 1201 "stat events on existing process id"), 1202 OPT_STRING('t', "tid", &target.tid, "tid", 1203 "stat events on existing thread id"), 1204 #ifdef HAVE_BPF_SKEL 1205 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1206 "stat events on existing bpf program id"), 1207 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1208 "use bpf program to count events"), 1209 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1210 "path to perf_event_attr map"), 1211 #endif 1212 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1213 "system-wide collection from all CPUs"), 1214 OPT_BOOLEAN('g', "group", &group, 1215 "put the counters into a counter group"), 1216 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1217 "Use --no-scale to disable counter scaling for multiplexing"), 1218 OPT_INCR('v', "verbose", &verbose, 1219 "be more verbose (show counter open errors, etc)"), 1220 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1221 "repeat command and print average + stddev (max: 100, forever: 0)"), 1222 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1223 "display details about each run (only with -r option)"), 1224 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1225 "null run - dont start any counters"), 1226 OPT_INCR('d', "detailed", &detailed_run, 1227 "detailed run - start a lot of events"), 1228 OPT_BOOLEAN('S', "sync", &sync_run, 1229 "call sync() before starting a run"), 1230 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1231 "print large numbers with thousands\' separators", 1232 stat__set_big_num), 1233 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1234 "list of cpus to monitor in system-wide"), 1235 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1236 "disable CPU count aggregation", AGGR_NONE), 1237 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1238 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1239 "print counts with custom separator"), 1240 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1241 "monitor event in cgroup name only", parse_stat_cgroups), 1242 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1243 "expand events for each cgroup"), 1244 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1245 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1246 OPT_INTEGER(0, "log-fd", &output_fd, 1247 "log output to fd, instead of stderr"), 1248 OPT_STRING(0, "pre", &pre_cmd, "command", 1249 "command to run prior to the measured command"), 1250 OPT_STRING(0, "post", &post_cmd, "command", 1251 "command to run after to the measured command"), 1252 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1253 "print counts at regular interval in ms " 1254 "(overhead is possible for values <= 100ms)"), 1255 OPT_INTEGER(0, "interval-count", &stat_config.times, 1256 "print counts for fixed number of times"), 1257 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1258 "clear screen in between new interval"), 1259 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1260 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1261 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1262 "aggregate counts per processor socket", AGGR_SOCKET), 1263 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1264 "aggregate counts per processor die", AGGR_DIE), 1265 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1266 "aggregate counts per physical processor core", AGGR_CORE), 1267 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1268 "aggregate counts per thread", AGGR_THREAD), 1269 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1270 "aggregate counts per numa node", AGGR_NODE), 1271 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1272 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1273 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1274 "Only print computed metrics. No raw values", enable_metric_only), 1275 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1276 "don't group metric events, impacts multiplexing"), 1277 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1278 "don't try to share events between metrics in a group"), 1279 OPT_BOOLEAN(0, "topdown", &topdown_run, 1280 "measure top-down statistics"), 1281 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1282 "Set the metrics level for the top-down statistics (0: max level)"), 1283 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1284 "measure SMI cost"), 1285 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1286 "monitor specified metrics or metric groups (separated by ,)", 1287 parse_metric_groups), 1288 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1289 "Configure all used events to run in kernel space.", 1290 PARSE_OPT_EXCLUSIVE), 1291 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1292 "Configure all used events to run in user space.", 1293 PARSE_OPT_EXCLUSIVE), 1294 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1295 "Use with 'percore' event qualifier to show the event " 1296 "counts of one hardware thread by sum up total hardware " 1297 "threads of same physical core"), 1298 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1299 "print summary for interval mode"), 1300 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1301 "don't print 'summary' for CSV summary output"), 1302 OPT_BOOLEAN(0, "quiet", &stat_config.quiet, 1303 "don't print output (useful with record)"), 1304 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1305 "Only enable events on applying cpu with this type " 1306 "for hybrid platform (e.g. core or atom)", 1307 parse_hybrid_type), 1308 #ifdef HAVE_LIBPFM 1309 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1310 "libpfm4 event selector. use 'perf list' to list available events", 1311 parse_libpfm_events_option), 1312 #endif 1313 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1314 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1315 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1316 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1317 parse_control_option), 1318 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1319 "measure I/O performance metrics provided by arch/platform", 1320 iostat_parse), 1321 OPT_END() 1322 }; 1323 1324 static const char *const aggr_mode__string[] = { 1325 [AGGR_CORE] = "core", 1326 [AGGR_DIE] = "die", 1327 [AGGR_GLOBAL] = "global", 1328 [AGGR_NODE] = "node", 1329 [AGGR_NONE] = "none", 1330 [AGGR_SOCKET] = "socket", 1331 [AGGR_THREAD] = "thread", 1332 [AGGR_UNSET] = "unset", 1333 }; 1334 1335 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1336 struct perf_cpu cpu) 1337 { 1338 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1339 } 1340 1341 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1342 struct perf_cpu cpu) 1343 { 1344 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1345 } 1346 1347 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1348 struct perf_cpu cpu) 1349 { 1350 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1351 } 1352 1353 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1354 struct perf_cpu cpu) 1355 { 1356 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1357 } 1358 1359 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1360 aggr_get_id_t get_id, struct perf_cpu cpu) 1361 { 1362 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1363 1364 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1365 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1366 1367 id = config->cpus_aggr_map->map[cpu.cpu]; 1368 return id; 1369 } 1370 1371 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1372 struct perf_cpu cpu) 1373 { 1374 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1375 } 1376 1377 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1378 struct perf_cpu cpu) 1379 { 1380 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1381 } 1382 1383 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1384 struct perf_cpu cpu) 1385 { 1386 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1387 } 1388 1389 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1390 struct perf_cpu cpu) 1391 { 1392 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1393 } 1394 1395 static bool term_percore_set(void) 1396 { 1397 struct evsel *counter; 1398 1399 evlist__for_each_entry(evsel_list, counter) { 1400 if (counter->percore) 1401 return true; 1402 } 1403 1404 return false; 1405 } 1406 1407 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1408 { 1409 switch (aggr_mode) { 1410 case AGGR_SOCKET: 1411 return aggr_cpu_id__socket; 1412 case AGGR_DIE: 1413 return aggr_cpu_id__die; 1414 case AGGR_CORE: 1415 return aggr_cpu_id__core; 1416 case AGGR_NODE: 1417 return aggr_cpu_id__node; 1418 case AGGR_NONE: 1419 if (term_percore_set()) 1420 return aggr_cpu_id__core; 1421 1422 return NULL; 1423 case AGGR_GLOBAL: 1424 case AGGR_THREAD: 1425 case AGGR_UNSET: 1426 default: 1427 return NULL; 1428 } 1429 } 1430 1431 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1432 { 1433 switch (aggr_mode) { 1434 case AGGR_SOCKET: 1435 return perf_stat__get_socket_cached; 1436 case AGGR_DIE: 1437 return perf_stat__get_die_cached; 1438 case AGGR_CORE: 1439 return perf_stat__get_core_cached; 1440 case AGGR_NODE: 1441 return perf_stat__get_node_cached; 1442 case AGGR_NONE: 1443 if (term_percore_set()) { 1444 return perf_stat__get_core_cached; 1445 } 1446 return NULL; 1447 case AGGR_GLOBAL: 1448 case AGGR_THREAD: 1449 case AGGR_UNSET: 1450 default: 1451 return NULL; 1452 } 1453 } 1454 1455 static int perf_stat_init_aggr_mode(void) 1456 { 1457 int nr; 1458 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1459 1460 if (get_id) { 1461 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, 1462 get_id, /*data=*/NULL); 1463 if (!stat_config.aggr_map) { 1464 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1465 return -1; 1466 } 1467 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1468 } 1469 1470 /* 1471 * The evsel_list->cpus is the base we operate on, 1472 * taking the highest cpu number to be the size of 1473 * the aggregation translate cpumap. 1474 */ 1475 nr = perf_cpu_map__max(evsel_list->core.cpus).cpu; 1476 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1477 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1478 } 1479 1480 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1481 { 1482 if (map) { 1483 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1484 "cpu_aggr_map refcnt unbalanced\n"); 1485 free(map); 1486 } 1487 } 1488 1489 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1490 { 1491 if (map && refcount_dec_and_test(&map->refcnt)) 1492 cpu_aggr_map__delete(map); 1493 } 1494 1495 static void perf_stat__exit_aggr_mode(void) 1496 { 1497 cpu_aggr_map__put(stat_config.aggr_map); 1498 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1499 stat_config.aggr_map = NULL; 1500 stat_config.cpus_aggr_map = NULL; 1501 } 1502 1503 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1504 { 1505 struct perf_env *env = data; 1506 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1507 1508 if (cpu.cpu != -1) 1509 id.socket = env->cpu[cpu.cpu].socket_id; 1510 1511 return id; 1512 } 1513 1514 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1515 { 1516 struct perf_env *env = data; 1517 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1518 1519 if (cpu.cpu != -1) { 1520 /* 1521 * die_id is relative to socket, so start 1522 * with the socket ID and then add die to 1523 * make a unique ID. 1524 */ 1525 id.socket = env->cpu[cpu.cpu].socket_id; 1526 id.die = env->cpu[cpu.cpu].die_id; 1527 } 1528 1529 return id; 1530 } 1531 1532 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1533 { 1534 struct perf_env *env = data; 1535 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1536 1537 if (cpu.cpu != -1) { 1538 /* 1539 * core_id is relative to socket and die, 1540 * we need a global id. So we set 1541 * socket, die id and core id 1542 */ 1543 id.socket = env->cpu[cpu.cpu].socket_id; 1544 id.die = env->cpu[cpu.cpu].die_id; 1545 id.core = env->cpu[cpu.cpu].core_id; 1546 } 1547 1548 return id; 1549 } 1550 1551 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1552 { 1553 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1554 1555 id.node = perf_env__numa_node(data, cpu); 1556 return id; 1557 } 1558 1559 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1560 struct perf_cpu cpu) 1561 { 1562 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1563 } 1564 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1565 struct perf_cpu cpu) 1566 { 1567 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1568 } 1569 1570 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1571 struct perf_cpu cpu) 1572 { 1573 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1574 } 1575 1576 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1577 struct perf_cpu cpu) 1578 { 1579 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1580 } 1581 1582 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1583 { 1584 switch (aggr_mode) { 1585 case AGGR_SOCKET: 1586 return perf_env__get_socket_aggr_by_cpu; 1587 case AGGR_DIE: 1588 return perf_env__get_die_aggr_by_cpu; 1589 case AGGR_CORE: 1590 return perf_env__get_core_aggr_by_cpu; 1591 case AGGR_NODE: 1592 return perf_env__get_node_aggr_by_cpu; 1593 case AGGR_NONE: 1594 case AGGR_GLOBAL: 1595 case AGGR_THREAD: 1596 case AGGR_UNSET: 1597 default: 1598 return NULL; 1599 } 1600 } 1601 1602 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1603 { 1604 switch (aggr_mode) { 1605 case AGGR_SOCKET: 1606 return perf_stat__get_socket_file; 1607 case AGGR_DIE: 1608 return perf_stat__get_die_file; 1609 case AGGR_CORE: 1610 return perf_stat__get_core_file; 1611 case AGGR_NODE: 1612 return perf_stat__get_node_file; 1613 case AGGR_NONE: 1614 case AGGR_GLOBAL: 1615 case AGGR_THREAD: 1616 case AGGR_UNSET: 1617 default: 1618 return NULL; 1619 } 1620 } 1621 1622 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1623 { 1624 struct perf_env *env = &st->session->header.env; 1625 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1626 1627 if (!get_id) 1628 return 0; 1629 1630 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env); 1631 if (!stat_config.aggr_map) { 1632 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1633 return -1; 1634 } 1635 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1636 return 0; 1637 } 1638 1639 /* 1640 * Add default attributes, if there were no attributes specified or 1641 * if -d/--detailed, -d -d or -d -d -d is used: 1642 */ 1643 static int add_default_attributes(void) 1644 { 1645 int err; 1646 struct perf_event_attr default_attrs0[] = { 1647 1648 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1649 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1650 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1651 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1652 1653 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1654 }; 1655 struct perf_event_attr frontend_attrs[] = { 1656 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1657 }; 1658 struct perf_event_attr backend_attrs[] = { 1659 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1660 }; 1661 struct perf_event_attr default_attrs1[] = { 1662 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1663 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1664 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1665 1666 }; 1667 struct perf_event_attr default_sw_attrs[] = { 1668 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1669 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1670 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1671 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1672 }; 1673 1674 /* 1675 * Detailed stats (-d), covering the L1 and last level data caches: 1676 */ 1677 struct perf_event_attr detailed_attrs[] = { 1678 1679 { .type = PERF_TYPE_HW_CACHE, 1680 .config = 1681 PERF_COUNT_HW_CACHE_L1D << 0 | 1682 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1683 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1684 1685 { .type = PERF_TYPE_HW_CACHE, 1686 .config = 1687 PERF_COUNT_HW_CACHE_L1D << 0 | 1688 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1689 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1690 1691 { .type = PERF_TYPE_HW_CACHE, 1692 .config = 1693 PERF_COUNT_HW_CACHE_LL << 0 | 1694 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1695 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1696 1697 { .type = PERF_TYPE_HW_CACHE, 1698 .config = 1699 PERF_COUNT_HW_CACHE_LL << 0 | 1700 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1701 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1702 }; 1703 1704 /* 1705 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1706 */ 1707 struct perf_event_attr very_detailed_attrs[] = { 1708 1709 { .type = PERF_TYPE_HW_CACHE, 1710 .config = 1711 PERF_COUNT_HW_CACHE_L1I << 0 | 1712 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1713 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1714 1715 { .type = PERF_TYPE_HW_CACHE, 1716 .config = 1717 PERF_COUNT_HW_CACHE_L1I << 0 | 1718 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1719 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1720 1721 { .type = PERF_TYPE_HW_CACHE, 1722 .config = 1723 PERF_COUNT_HW_CACHE_DTLB << 0 | 1724 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1725 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1726 1727 { .type = PERF_TYPE_HW_CACHE, 1728 .config = 1729 PERF_COUNT_HW_CACHE_DTLB << 0 | 1730 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1731 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1732 1733 { .type = PERF_TYPE_HW_CACHE, 1734 .config = 1735 PERF_COUNT_HW_CACHE_ITLB << 0 | 1736 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1737 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1738 1739 { .type = PERF_TYPE_HW_CACHE, 1740 .config = 1741 PERF_COUNT_HW_CACHE_ITLB << 0 | 1742 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1743 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1744 1745 }; 1746 1747 /* 1748 * Very, very detailed stats (-d -d -d), adding prefetch events: 1749 */ 1750 struct perf_event_attr very_very_detailed_attrs[] = { 1751 1752 { .type = PERF_TYPE_HW_CACHE, 1753 .config = 1754 PERF_COUNT_HW_CACHE_L1D << 0 | 1755 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1756 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1757 1758 { .type = PERF_TYPE_HW_CACHE, 1759 .config = 1760 PERF_COUNT_HW_CACHE_L1D << 0 | 1761 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1762 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1763 }; 1764 /* Set attrs if no event is selected and !null_run: */ 1765 if (stat_config.null_run) 1766 return 0; 1767 1768 if (transaction_run) { 1769 struct parse_events_error errinfo; 1770 /* Handle -T as -M transaction. Once platform specific metrics 1771 * support has been added to the json files, all architectures 1772 * will use this approach. To determine transaction support 1773 * on an architecture test for such a metric name. 1774 */ 1775 if (metricgroup__has_metric("transaction")) { 1776 struct option opt = { .value = &evsel_list }; 1777 1778 return metricgroup__parse_groups(&opt, "transaction", 1779 stat_config.metric_no_group, 1780 stat_config.metric_no_merge, 1781 &stat_config.metric_events); 1782 } 1783 1784 parse_events_error__init(&errinfo); 1785 if (pmu_have_event("cpu", "cycles-ct") && 1786 pmu_have_event("cpu", "el-start")) 1787 err = parse_events(evsel_list, transaction_attrs, 1788 &errinfo); 1789 else 1790 err = parse_events(evsel_list, 1791 transaction_limited_attrs, 1792 &errinfo); 1793 if (err) { 1794 fprintf(stderr, "Cannot set up transaction events\n"); 1795 parse_events_error__print(&errinfo, transaction_attrs); 1796 } 1797 parse_events_error__exit(&errinfo); 1798 return err ? -1 : 0; 1799 } 1800 1801 if (smi_cost) { 1802 struct parse_events_error errinfo; 1803 int smi; 1804 1805 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1806 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1807 return -1; 1808 } 1809 1810 if (!smi) { 1811 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1812 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1813 return -1; 1814 } 1815 smi_reset = true; 1816 } 1817 1818 if (!pmu_have_event("msr", "aperf") || 1819 !pmu_have_event("msr", "smi")) { 1820 fprintf(stderr, "To measure SMI cost, it needs " 1821 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1822 return -1; 1823 } 1824 if (!force_metric_only) 1825 stat_config.metric_only = true; 1826 1827 parse_events_error__init(&errinfo); 1828 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1829 if (err) { 1830 parse_events_error__print(&errinfo, smi_cost_attrs); 1831 fprintf(stderr, "Cannot set up SMI cost events\n"); 1832 } 1833 parse_events_error__exit(&errinfo); 1834 return err ? -1 : 0; 1835 } 1836 1837 if (topdown_run) { 1838 const char **metric_attrs = topdown_metric_attrs; 1839 unsigned int max_level = 1; 1840 char *str = NULL; 1841 bool warn = false; 1842 1843 if (!force_metric_only) 1844 stat_config.metric_only = true; 1845 1846 if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) { 1847 metric_attrs = topdown_metric_L2_attrs; 1848 max_level = 2; 1849 } 1850 1851 if (stat_config.topdown_level > max_level) { 1852 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1853 return -1; 1854 } else if (!stat_config.topdown_level) 1855 stat_config.topdown_level = max_level; 1856 1857 if (topdown_filter_events(metric_attrs, &str, 1) < 0) { 1858 pr_err("Out of memory\n"); 1859 return -1; 1860 } 1861 if (metric_attrs[0] && str) { 1862 if (!stat_config.interval && !stat_config.metric_only) { 1863 fprintf(stat_config.output, 1864 "Topdown accuracy may decrease when measuring long periods.\n" 1865 "Please print the result regularly, e.g. -I1000\n"); 1866 } 1867 goto setup_metrics; 1868 } 1869 1870 zfree(&str); 1871 1872 if (stat_config.aggr_mode != AGGR_GLOBAL && 1873 stat_config.aggr_mode != AGGR_CORE) { 1874 pr_err("top down event configuration requires --per-core mode\n"); 1875 return -1; 1876 } 1877 stat_config.aggr_mode = AGGR_CORE; 1878 if (nr_cgroups || !target__has_cpu(&target)) { 1879 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1880 return -1; 1881 } 1882 1883 if (topdown_filter_events(topdown_attrs, &str, 1884 arch_topdown_check_group(&warn)) < 0) { 1885 pr_err("Out of memory\n"); 1886 return -1; 1887 } 1888 if (topdown_attrs[0] && str) { 1889 struct parse_events_error errinfo; 1890 if (warn) 1891 arch_topdown_group_warn(); 1892 setup_metrics: 1893 parse_events_error__init(&errinfo); 1894 err = parse_events(evsel_list, str, &errinfo); 1895 if (err) { 1896 fprintf(stderr, 1897 "Cannot set up top down events %s: %d\n", 1898 str, err); 1899 parse_events_error__print(&errinfo, str); 1900 parse_events_error__exit(&errinfo); 1901 free(str); 1902 return -1; 1903 } 1904 parse_events_error__exit(&errinfo); 1905 } else { 1906 fprintf(stderr, "System does not support topdown\n"); 1907 return -1; 1908 } 1909 free(str); 1910 } 1911 1912 if (!evsel_list->core.nr_entries) { 1913 if (perf_pmu__has_hybrid()) { 1914 struct parse_events_error errinfo; 1915 const char *hybrid_str = "cycles,instructions,branches,branch-misses"; 1916 1917 if (target__has_cpu(&target)) 1918 default_sw_attrs[0].config = PERF_COUNT_SW_CPU_CLOCK; 1919 1920 if (evlist__add_default_attrs(evsel_list, 1921 default_sw_attrs) < 0) { 1922 return -1; 1923 } 1924 1925 parse_events_error__init(&errinfo); 1926 err = parse_events(evsel_list, hybrid_str, &errinfo); 1927 if (err) { 1928 fprintf(stderr, 1929 "Cannot set up hybrid events %s: %d\n", 1930 hybrid_str, err); 1931 parse_events_error__print(&errinfo, hybrid_str); 1932 } 1933 parse_events_error__exit(&errinfo); 1934 return err ? -1 : 0; 1935 } 1936 1937 if (target__has_cpu(&target)) 1938 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1939 1940 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1941 return -1; 1942 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1943 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1944 return -1; 1945 } 1946 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1947 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1948 return -1; 1949 } 1950 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1951 return -1; 1952 1953 stat_config.topdown_level = TOPDOWN_MAX_LEVEL; 1954 if (arch_evlist__add_default_attrs(evsel_list) < 0) 1955 return -1; 1956 } 1957 1958 /* Detailed events get appended to the event list: */ 1959 1960 if (detailed_run < 1) 1961 return 0; 1962 1963 /* Append detailed run extra attributes: */ 1964 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1965 return -1; 1966 1967 if (detailed_run < 2) 1968 return 0; 1969 1970 /* Append very detailed run extra attributes: */ 1971 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1972 return -1; 1973 1974 if (detailed_run < 3) 1975 return 0; 1976 1977 /* Append very, very detailed run extra attributes: */ 1978 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1979 } 1980 1981 static const char * const stat_record_usage[] = { 1982 "perf stat record [<options>]", 1983 NULL, 1984 }; 1985 1986 static void init_features(struct perf_session *session) 1987 { 1988 int feat; 1989 1990 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1991 perf_header__set_feat(&session->header, feat); 1992 1993 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1994 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1995 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1996 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1997 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1998 } 1999 2000 static int __cmd_record(int argc, const char **argv) 2001 { 2002 struct perf_session *session; 2003 struct perf_data *data = &perf_stat.data; 2004 2005 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2006 PARSE_OPT_STOP_AT_NON_OPTION); 2007 2008 if (output_name) 2009 data->path = output_name; 2010 2011 if (stat_config.run_count != 1 || forever) { 2012 pr_err("Cannot use -r option with perf stat record.\n"); 2013 return -1; 2014 } 2015 2016 session = perf_session__new(data, NULL); 2017 if (IS_ERR(session)) { 2018 pr_err("Perf session creation failed\n"); 2019 return PTR_ERR(session); 2020 } 2021 2022 init_features(session); 2023 2024 session->evlist = evsel_list; 2025 perf_stat.session = session; 2026 perf_stat.record = true; 2027 return argc; 2028 } 2029 2030 static int process_stat_round_event(struct perf_session *session, 2031 union perf_event *event) 2032 { 2033 struct perf_record_stat_round *stat_round = &event->stat_round; 2034 struct evsel *counter; 2035 struct timespec tsh, *ts = NULL; 2036 const char **argv = session->header.env.cmdline_argv; 2037 int argc = session->header.env.nr_cmdline; 2038 2039 evlist__for_each_entry(evsel_list, counter) 2040 perf_stat_process_counter(&stat_config, counter); 2041 2042 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2043 update_stats(&walltime_nsecs_stats, stat_round->time); 2044 2045 if (stat_config.interval && stat_round->time) { 2046 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2047 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2048 ts = &tsh; 2049 } 2050 2051 print_counters(ts, argc, argv); 2052 return 0; 2053 } 2054 2055 static 2056 int process_stat_config_event(struct perf_session *session, 2057 union perf_event *event) 2058 { 2059 struct perf_tool *tool = session->tool; 2060 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2061 2062 perf_event__read_stat_config(&stat_config, &event->stat_config); 2063 2064 if (perf_cpu_map__empty(st->cpus)) { 2065 if (st->aggr_mode != AGGR_UNSET) 2066 pr_warning("warning: processing task data, aggregation mode not set\n"); 2067 return 0; 2068 } 2069 2070 if (st->aggr_mode != AGGR_UNSET) 2071 stat_config.aggr_mode = st->aggr_mode; 2072 2073 if (perf_stat.data.is_pipe) 2074 perf_stat_init_aggr_mode(); 2075 else 2076 perf_stat_init_aggr_mode_file(st); 2077 2078 return 0; 2079 } 2080 2081 static int set_maps(struct perf_stat *st) 2082 { 2083 if (!st->cpus || !st->threads) 2084 return 0; 2085 2086 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2087 return -EINVAL; 2088 2089 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2090 2091 if (evlist__alloc_stats(evsel_list, true)) 2092 return -ENOMEM; 2093 2094 st->maps_allocated = true; 2095 return 0; 2096 } 2097 2098 static 2099 int process_thread_map_event(struct perf_session *session, 2100 union perf_event *event) 2101 { 2102 struct perf_tool *tool = session->tool; 2103 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2104 2105 if (st->threads) { 2106 pr_warning("Extra thread map event, ignoring.\n"); 2107 return 0; 2108 } 2109 2110 st->threads = thread_map__new_event(&event->thread_map); 2111 if (!st->threads) 2112 return -ENOMEM; 2113 2114 return set_maps(st); 2115 } 2116 2117 static 2118 int process_cpu_map_event(struct perf_session *session, 2119 union perf_event *event) 2120 { 2121 struct perf_tool *tool = session->tool; 2122 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2123 struct perf_cpu_map *cpus; 2124 2125 if (st->cpus) { 2126 pr_warning("Extra cpu map event, ignoring.\n"); 2127 return 0; 2128 } 2129 2130 cpus = cpu_map__new_data(&event->cpu_map.data); 2131 if (!cpus) 2132 return -ENOMEM; 2133 2134 st->cpus = cpus; 2135 return set_maps(st); 2136 } 2137 2138 static const char * const stat_report_usage[] = { 2139 "perf stat report [<options>]", 2140 NULL, 2141 }; 2142 2143 static struct perf_stat perf_stat = { 2144 .tool = { 2145 .attr = perf_event__process_attr, 2146 .event_update = perf_event__process_event_update, 2147 .thread_map = process_thread_map_event, 2148 .cpu_map = process_cpu_map_event, 2149 .stat_config = process_stat_config_event, 2150 .stat = perf_event__process_stat_event, 2151 .stat_round = process_stat_round_event, 2152 }, 2153 .aggr_mode = AGGR_UNSET, 2154 }; 2155 2156 static int __cmd_report(int argc, const char **argv) 2157 { 2158 struct perf_session *session; 2159 const struct option options[] = { 2160 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2161 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2162 "aggregate counts per processor socket", AGGR_SOCKET), 2163 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2164 "aggregate counts per processor die", AGGR_DIE), 2165 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2166 "aggregate counts per physical processor core", AGGR_CORE), 2167 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2168 "aggregate counts per numa node", AGGR_NODE), 2169 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2170 "disable CPU count aggregation", AGGR_NONE), 2171 OPT_END() 2172 }; 2173 struct stat st; 2174 int ret; 2175 2176 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2177 2178 if (!input_name || !strlen(input_name)) { 2179 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2180 input_name = "-"; 2181 else 2182 input_name = "perf.data"; 2183 } 2184 2185 perf_stat.data.path = input_name; 2186 perf_stat.data.mode = PERF_DATA_MODE_READ; 2187 2188 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2189 if (IS_ERR(session)) 2190 return PTR_ERR(session); 2191 2192 perf_stat.session = session; 2193 stat_config.output = stderr; 2194 evsel_list = session->evlist; 2195 2196 ret = perf_session__process_events(session); 2197 if (ret) 2198 return ret; 2199 2200 perf_session__delete(session); 2201 return 0; 2202 } 2203 2204 static void setup_system_wide(int forks) 2205 { 2206 /* 2207 * Make system wide (-a) the default target if 2208 * no target was specified and one of following 2209 * conditions is met: 2210 * 2211 * - there's no workload specified 2212 * - there is workload specified but all requested 2213 * events are system wide events 2214 */ 2215 if (!target__none(&target)) 2216 return; 2217 2218 if (!forks) 2219 target.system_wide = true; 2220 else { 2221 struct evsel *counter; 2222 2223 evlist__for_each_entry(evsel_list, counter) { 2224 if (!counter->core.system_wide && 2225 strcmp(counter->name, "duration_time")) { 2226 return; 2227 } 2228 } 2229 2230 if (evsel_list->core.nr_entries) 2231 target.system_wide = true; 2232 } 2233 } 2234 2235 int cmd_stat(int argc, const char **argv) 2236 { 2237 const char * const stat_usage[] = { 2238 "perf stat [<options>] [<command>]", 2239 NULL 2240 }; 2241 int status = -EINVAL, run_idx, err; 2242 const char *mode; 2243 FILE *output = stderr; 2244 unsigned int interval, timeout; 2245 const char * const stat_subcommands[] = { "record", "report" }; 2246 char errbuf[BUFSIZ]; 2247 2248 setlocale(LC_ALL, ""); 2249 2250 evsel_list = evlist__new(); 2251 if (evsel_list == NULL) 2252 return -ENOMEM; 2253 2254 parse_events__shrink_config_terms(); 2255 2256 /* String-parsing callback-based options would segfault when negated */ 2257 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2258 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2259 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2260 2261 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2262 (const char **) stat_usage, 2263 PARSE_OPT_STOP_AT_NON_OPTION); 2264 perf_stat__collect_metric_expr(evsel_list); 2265 perf_stat__init_shadow_stats(); 2266 2267 if (stat_config.csv_sep) { 2268 stat_config.csv_output = true; 2269 if (!strcmp(stat_config.csv_sep, "\\t")) 2270 stat_config.csv_sep = "\t"; 2271 } else 2272 stat_config.csv_sep = DEFAULT_SEPARATOR; 2273 2274 if (argc && !strncmp(argv[0], "rec", 3)) { 2275 argc = __cmd_record(argc, argv); 2276 if (argc < 0) 2277 return -1; 2278 } else if (argc && !strncmp(argv[0], "rep", 3)) 2279 return __cmd_report(argc, argv); 2280 2281 interval = stat_config.interval; 2282 timeout = stat_config.timeout; 2283 2284 /* 2285 * For record command the -o is already taken care of. 2286 */ 2287 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2288 output = NULL; 2289 2290 if (output_name && output_fd) { 2291 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2292 parse_options_usage(stat_usage, stat_options, "o", 1); 2293 parse_options_usage(NULL, stat_options, "log-fd", 0); 2294 goto out; 2295 } 2296 2297 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2298 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2299 goto out; 2300 } 2301 2302 if (stat_config.metric_only && stat_config.run_count > 1) { 2303 fprintf(stderr, "--metric-only is not supported with -r\n"); 2304 goto out; 2305 } 2306 2307 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2308 fprintf(stderr, "--table is only supported with -r\n"); 2309 parse_options_usage(stat_usage, stat_options, "r", 1); 2310 parse_options_usage(NULL, stat_options, "table", 0); 2311 goto out; 2312 } 2313 2314 if (output_fd < 0) { 2315 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2316 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2317 goto out; 2318 } 2319 2320 if (!output && !stat_config.quiet) { 2321 struct timespec tm; 2322 mode = append_file ? "a" : "w"; 2323 2324 output = fopen(output_name, mode); 2325 if (!output) { 2326 perror("failed to create output file"); 2327 return -1; 2328 } 2329 clock_gettime(CLOCK_REALTIME, &tm); 2330 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2331 } else if (output_fd > 0) { 2332 mode = append_file ? "a" : "w"; 2333 output = fdopen(output_fd, mode); 2334 if (!output) { 2335 perror("Failed opening logfd"); 2336 return -errno; 2337 } 2338 } 2339 2340 stat_config.output = output; 2341 2342 /* 2343 * let the spreadsheet do the pretty-printing 2344 */ 2345 if (stat_config.csv_output) { 2346 /* User explicitly passed -B? */ 2347 if (big_num_opt == 1) { 2348 fprintf(stderr, "-B option not supported with -x\n"); 2349 parse_options_usage(stat_usage, stat_options, "B", 1); 2350 parse_options_usage(NULL, stat_options, "x", 1); 2351 goto out; 2352 } else /* Nope, so disable big number formatting */ 2353 stat_config.big_num = false; 2354 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2355 stat_config.big_num = false; 2356 2357 err = target__validate(&target); 2358 if (err) { 2359 target__strerror(&target, err, errbuf, BUFSIZ); 2360 pr_warning("%s\n", errbuf); 2361 } 2362 2363 setup_system_wide(argc); 2364 2365 /* 2366 * Display user/system times only for single 2367 * run and when there's specified tracee. 2368 */ 2369 if ((stat_config.run_count == 1) && target__none(&target)) 2370 stat_config.ru_display = true; 2371 2372 if (stat_config.run_count < 0) { 2373 pr_err("Run count must be a positive number\n"); 2374 parse_options_usage(stat_usage, stat_options, "r", 1); 2375 goto out; 2376 } else if (stat_config.run_count == 0) { 2377 forever = true; 2378 stat_config.run_count = 1; 2379 } 2380 2381 if (stat_config.walltime_run_table) { 2382 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2383 if (!stat_config.walltime_run) { 2384 pr_err("failed to setup -r option"); 2385 goto out; 2386 } 2387 } 2388 2389 if ((stat_config.aggr_mode == AGGR_THREAD) && 2390 !target__has_task(&target)) { 2391 if (!target.system_wide || target.cpu_list) { 2392 fprintf(stderr, "The --per-thread option is only " 2393 "available when monitoring via -p -t -a " 2394 "options or only --per-thread.\n"); 2395 parse_options_usage(NULL, stat_options, "p", 1); 2396 parse_options_usage(NULL, stat_options, "t", 1); 2397 goto out; 2398 } 2399 } 2400 2401 /* 2402 * no_aggr, cgroup are for system-wide only 2403 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2404 */ 2405 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2406 stat_config.aggr_mode != AGGR_THREAD) || 2407 (nr_cgroups || stat_config.cgroup_list)) && 2408 !target__has_cpu(&target)) { 2409 fprintf(stderr, "both cgroup and no-aggregation " 2410 "modes only available in system-wide mode\n"); 2411 2412 parse_options_usage(stat_usage, stat_options, "G", 1); 2413 parse_options_usage(NULL, stat_options, "A", 1); 2414 parse_options_usage(NULL, stat_options, "a", 1); 2415 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2416 goto out; 2417 } 2418 2419 if (stat_config.iostat_run) { 2420 status = iostat_prepare(evsel_list, &stat_config); 2421 if (status) 2422 goto out; 2423 if (iostat_mode == IOSTAT_LIST) { 2424 iostat_list(evsel_list, &stat_config); 2425 goto out; 2426 } else if (verbose) 2427 iostat_list(evsel_list, &stat_config); 2428 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2429 target.system_wide = true; 2430 } 2431 2432 if (add_default_attributes()) 2433 goto out; 2434 2435 if (stat_config.cgroup_list) { 2436 if (nr_cgroups > 0) { 2437 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2438 parse_options_usage(stat_usage, stat_options, "G", 1); 2439 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2440 goto out; 2441 } 2442 2443 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2444 &stat_config.metric_events, true) < 0) { 2445 parse_options_usage(stat_usage, stat_options, 2446 "for-each-cgroup", 0); 2447 goto out; 2448 } 2449 } 2450 2451 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2452 target.per_thread = true; 2453 2454 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2455 pr_err("failed to use cpu list %s\n", target.cpu_list); 2456 goto out; 2457 } 2458 2459 target.hybrid = perf_pmu__has_hybrid(); 2460 if (evlist__create_maps(evsel_list, &target) < 0) { 2461 if (target__has_task(&target)) { 2462 pr_err("Problems finding threads of monitor\n"); 2463 parse_options_usage(stat_usage, stat_options, "p", 1); 2464 parse_options_usage(NULL, stat_options, "t", 1); 2465 } else if (target__has_cpu(&target)) { 2466 perror("failed to parse CPUs map"); 2467 parse_options_usage(stat_usage, stat_options, "C", 1); 2468 parse_options_usage(NULL, stat_options, "a", 1); 2469 } 2470 goto out; 2471 } 2472 2473 evlist__check_cpu_maps(evsel_list); 2474 2475 /* 2476 * Initialize thread_map with comm names, 2477 * so we could print it out on output. 2478 */ 2479 if (stat_config.aggr_mode == AGGR_THREAD) { 2480 thread_map__read_comms(evsel_list->core.threads); 2481 if (target.system_wide) { 2482 if (runtime_stat_new(&stat_config, 2483 perf_thread_map__nr(evsel_list->core.threads))) { 2484 goto out; 2485 } 2486 } 2487 } 2488 2489 if (stat_config.aggr_mode == AGGR_NODE) 2490 cpu__setup_cpunode_map(); 2491 2492 if (stat_config.times && interval) 2493 interval_count = true; 2494 else if (stat_config.times && !interval) { 2495 pr_err("interval-count option should be used together with " 2496 "interval-print.\n"); 2497 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2498 parse_options_usage(stat_usage, stat_options, "I", 1); 2499 goto out; 2500 } 2501 2502 if (timeout && timeout < 100) { 2503 if (timeout < 10) { 2504 pr_err("timeout must be >= 10ms.\n"); 2505 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2506 goto out; 2507 } else 2508 pr_warning("timeout < 100ms. " 2509 "The overhead percentage could be high in some cases. " 2510 "Please proceed with caution.\n"); 2511 } 2512 if (timeout && interval) { 2513 pr_err("timeout option is not supported with interval-print.\n"); 2514 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2515 parse_options_usage(stat_usage, stat_options, "I", 1); 2516 goto out; 2517 } 2518 2519 if (evlist__alloc_stats(evsel_list, interval)) 2520 goto out; 2521 2522 if (perf_stat_init_aggr_mode()) 2523 goto out; 2524 2525 /* 2526 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2527 * while avoiding that older tools show confusing messages. 2528 * 2529 * However for pipe sessions we need to keep it zero, 2530 * because script's perf_evsel__check_attr is triggered 2531 * by attr->sample_type != 0, and we can't run it on 2532 * stat sessions. 2533 */ 2534 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2535 2536 /* 2537 * We dont want to block the signals - that would cause 2538 * child tasks to inherit that and Ctrl-C would not work. 2539 * What we want is for Ctrl-C to work in the exec()-ed 2540 * task, but being ignored by perf stat itself: 2541 */ 2542 atexit(sig_atexit); 2543 if (!forever) 2544 signal(SIGINT, skip_signal); 2545 signal(SIGCHLD, skip_signal); 2546 signal(SIGALRM, skip_signal); 2547 signal(SIGABRT, skip_signal); 2548 2549 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2550 goto out; 2551 2552 status = 0; 2553 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2554 if (stat_config.run_count != 1 && verbose > 0) 2555 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2556 run_idx + 1); 2557 2558 if (run_idx != 0) 2559 evlist__reset_prev_raw_counts(evsel_list); 2560 2561 status = run_perf_stat(argc, argv, run_idx); 2562 if (forever && status != -1 && !interval) { 2563 print_counters(NULL, argc, argv); 2564 perf_stat__reset_stats(); 2565 } 2566 } 2567 2568 if (!forever && status != -1 && (!interval || stat_config.summary)) 2569 print_counters(NULL, argc, argv); 2570 2571 evlist__finalize_ctlfd(evsel_list); 2572 2573 if (STAT_RECORD) { 2574 /* 2575 * We synthesize the kernel mmap record just so that older tools 2576 * don't emit warnings about not being able to resolve symbols 2577 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2578 * a saner message about no samples being in the perf.data file. 2579 * 2580 * This also serves to suppress a warning about f_header.data.size == 0 2581 * in header.c at the moment 'perf stat record' gets introduced, which 2582 * is not really needed once we start adding the stat specific PERF_RECORD_ 2583 * records, but the need to suppress the kptr_restrict messages in older 2584 * tools remain -acme 2585 */ 2586 int fd = perf_data__fd(&perf_stat.data); 2587 2588 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2589 process_synthesized_event, 2590 &perf_stat.session->machines.host); 2591 if (err) { 2592 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2593 "older tools may produce warnings about this file\n."); 2594 } 2595 2596 if (!interval) { 2597 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2598 pr_err("failed to write stat round event\n"); 2599 } 2600 2601 if (!perf_stat.data.is_pipe) { 2602 perf_stat.session->header.data_size += perf_stat.bytes_written; 2603 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2604 } 2605 2606 evlist__close(evsel_list); 2607 perf_session__delete(perf_stat.session); 2608 } 2609 2610 perf_stat__exit_aggr_mode(); 2611 evlist__free_stats(evsel_list); 2612 out: 2613 if (stat_config.iostat_run) 2614 iostat_release(evsel_list); 2615 2616 zfree(&stat_config.walltime_run); 2617 2618 if (smi_cost && smi_reset) 2619 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2620 2621 evlist__delete(evsel_list); 2622 2623 metricgroup__rblist_exit(&stat_config.metric_events); 2624 runtime_stat_delete(&stat_config); 2625 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2626 2627 return status; 2628 } 2629