1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/pmu-hybrid.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 97 #define DEFAULT_SEPARATOR " " 98 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 99 100 static void print_counters(struct timespec *ts, int argc, const char **argv); 101 102 /* Default events used for perf stat -T */ 103 static const char *transaction_attrs = { 104 "task-clock," 105 "{" 106 "instructions," 107 "cycles," 108 "cpu/cycles-t/," 109 "cpu/tx-start/," 110 "cpu/el-start/," 111 "cpu/cycles-ct/" 112 "}" 113 }; 114 115 /* More limited version when the CPU does not have all events. */ 116 static const char * transaction_limited_attrs = { 117 "task-clock," 118 "{" 119 "instructions," 120 "cycles," 121 "cpu/cycles-t/," 122 "cpu/tx-start/" 123 "}" 124 }; 125 126 static const char * topdown_attrs[] = { 127 "topdown-total-slots", 128 "topdown-slots-retired", 129 "topdown-recovery-bubbles", 130 "topdown-fetch-bubbles", 131 "topdown-slots-issued", 132 NULL, 133 }; 134 135 static const char *topdown_metric_attrs[] = { 136 "slots", 137 "topdown-retiring", 138 "topdown-bad-spec", 139 "topdown-fe-bound", 140 "topdown-be-bound", 141 NULL, 142 }; 143 144 static const char *topdown_metric_L2_attrs[] = { 145 "slots", 146 "topdown-retiring", 147 "topdown-bad-spec", 148 "topdown-fe-bound", 149 "topdown-be-bound", 150 "topdown-heavy-ops", 151 "topdown-br-mispredict", 152 "topdown-fetch-lat", 153 "topdown-mem-bound", 154 NULL, 155 }; 156 157 #define TOPDOWN_MAX_LEVEL 2 158 159 static const char *smi_cost_attrs = { 160 "{" 161 "msr/aperf/," 162 "msr/smi/," 163 "cycles" 164 "}" 165 }; 166 167 static struct evlist *evsel_list; 168 static bool all_counters_use_bpf = true; 169 170 static struct target target = { 171 .uid = UINT_MAX, 172 }; 173 174 #define METRIC_ONLY_LEN 20 175 176 static volatile pid_t child_pid = -1; 177 static int detailed_run = 0; 178 static bool transaction_run; 179 static bool topdown_run = false; 180 static bool smi_cost = false; 181 static bool smi_reset = false; 182 static int big_num_opt = -1; 183 static bool group = false; 184 static const char *pre_cmd = NULL; 185 static const char *post_cmd = NULL; 186 static bool sync_run = false; 187 static bool forever = false; 188 static bool force_metric_only = false; 189 static struct timespec ref_time; 190 static bool append_file; 191 static bool interval_count; 192 static const char *output_name; 193 static int output_fd; 194 195 struct perf_stat { 196 bool record; 197 struct perf_data data; 198 struct perf_session *session; 199 u64 bytes_written; 200 struct perf_tool tool; 201 bool maps_allocated; 202 struct perf_cpu_map *cpus; 203 struct perf_thread_map *threads; 204 enum aggr_mode aggr_mode; 205 }; 206 207 static struct perf_stat perf_stat; 208 #define STAT_RECORD perf_stat.record 209 210 static volatile int done = 0; 211 212 static struct perf_stat_config stat_config = { 213 .aggr_mode = AGGR_GLOBAL, 214 .scale = true, 215 .unit_width = 4, /* strlen("unit") */ 216 .run_count = 1, 217 .metric_only_len = METRIC_ONLY_LEN, 218 .walltime_nsecs_stats = &walltime_nsecs_stats, 219 .big_num = true, 220 .ctl_fd = -1, 221 .ctl_fd_ack = -1, 222 .iostat_run = false, 223 }; 224 225 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 226 { 227 if (!a->core.cpus && !b->core.cpus) 228 return true; 229 230 if (!a->core.cpus || !b->core.cpus) 231 return false; 232 233 if (a->core.cpus->nr != b->core.cpus->nr) 234 return false; 235 236 for (int i = 0; i < a->core.cpus->nr; i++) { 237 if (a->core.cpus->map[i].cpu != b->core.cpus->map[i].cpu) 238 return false; 239 } 240 241 return true; 242 } 243 244 static void evlist__check_cpu_maps(struct evlist *evlist) 245 { 246 struct evsel *evsel, *pos, *leader; 247 char buf[1024]; 248 249 if (evlist__has_hybrid(evlist)) 250 evlist__warn_hybrid_group(evlist); 251 252 evlist__for_each_entry(evlist, evsel) { 253 leader = evsel__leader(evsel); 254 255 /* Check that leader matches cpus with each member. */ 256 if (leader == evsel) 257 continue; 258 if (cpus_map_matched(leader, evsel)) 259 continue; 260 261 /* If there's mismatch disable the group and warn user. */ 262 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 263 evsel__group_desc(leader, buf, sizeof(buf)); 264 pr_warning(" %s\n", buf); 265 266 if (verbose) { 267 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 268 pr_warning(" %s: %s\n", leader->name, buf); 269 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 270 pr_warning(" %s: %s\n", evsel->name, buf); 271 } 272 273 for_each_group_evsel(pos, leader) { 274 evsel__set_leader(pos, pos); 275 pos->core.nr_members = 0; 276 } 277 evsel->core.leader->nr_members = 0; 278 } 279 } 280 281 static inline void diff_timespec(struct timespec *r, struct timespec *a, 282 struct timespec *b) 283 { 284 r->tv_sec = a->tv_sec - b->tv_sec; 285 if (a->tv_nsec < b->tv_nsec) { 286 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 287 r->tv_sec--; 288 } else { 289 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 290 } 291 } 292 293 static void perf_stat__reset_stats(void) 294 { 295 int i; 296 297 evlist__reset_stats(evsel_list); 298 perf_stat__reset_shadow_stats(); 299 300 for (i = 0; i < stat_config.stats_num; i++) 301 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 302 } 303 304 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 305 union perf_event *event, 306 struct perf_sample *sample __maybe_unused, 307 struct machine *machine __maybe_unused) 308 { 309 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 310 pr_err("failed to write perf data, error: %m\n"); 311 return -1; 312 } 313 314 perf_stat.bytes_written += event->header.size; 315 return 0; 316 } 317 318 static int write_stat_round_event(u64 tm, u64 type) 319 { 320 return perf_event__synthesize_stat_round(NULL, tm, type, 321 process_synthesized_event, 322 NULL); 323 } 324 325 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 326 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 327 328 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 329 330 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 331 struct perf_counts_values *count) 332 { 333 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 334 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 335 336 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 337 process_synthesized_event, NULL); 338 } 339 340 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 341 int thread, struct timespec *rs) 342 { 343 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 344 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 345 struct perf_counts_values *count = 346 perf_counts(counter->counts, cpu_map_idx, thread); 347 count->ena = count->run = val; 348 count->val = val; 349 return 0; 350 } 351 return evsel__read_counter(counter, cpu_map_idx, thread); 352 } 353 354 /* 355 * Read out the results of a single counter: 356 * do not aggregate counts across CPUs in system-wide mode 357 */ 358 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 359 { 360 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 361 int thread; 362 363 if (!counter->supported) 364 return -ENOENT; 365 366 if (counter->core.system_wide) 367 nthreads = 1; 368 369 for (thread = 0; thread < nthreads; thread++) { 370 struct perf_counts_values *count; 371 372 count = perf_counts(counter->counts, cpu_map_idx, thread); 373 374 /* 375 * The leader's group read loads data into its group members 376 * (via evsel__read_counter()) and sets their count->loaded. 377 */ 378 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 379 read_single_counter(counter, cpu_map_idx, thread, rs)) { 380 counter->counts->scaled = -1; 381 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 382 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 383 return -1; 384 } 385 386 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 387 388 if (STAT_RECORD) { 389 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 390 pr_err("failed to write stat event\n"); 391 return -1; 392 } 393 } 394 395 if (verbose > 1) { 396 fprintf(stat_config.output, 397 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 398 evsel__name(counter), 399 perf_cpu_map__cpu(evsel__cpus(counter), 400 cpu_map_idx).cpu, 401 count->val, count->ena, count->run); 402 } 403 } 404 405 return 0; 406 } 407 408 static int read_affinity_counters(struct timespec *rs) 409 { 410 struct evlist_cpu_iterator evlist_cpu_itr; 411 struct affinity saved_affinity, *affinity; 412 413 if (all_counters_use_bpf) 414 return 0; 415 416 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 417 affinity = NULL; 418 else if (affinity__setup(&saved_affinity) < 0) 419 return -1; 420 else 421 affinity = &saved_affinity; 422 423 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 424 struct evsel *counter = evlist_cpu_itr.evsel; 425 426 if (evsel__is_bpf(counter)) 427 continue; 428 429 if (!counter->err) { 430 counter->err = read_counter_cpu(counter, rs, 431 evlist_cpu_itr.cpu_map_idx); 432 } 433 } 434 if (affinity) 435 affinity__cleanup(&saved_affinity); 436 437 return 0; 438 } 439 440 static int read_bpf_map_counters(void) 441 { 442 struct evsel *counter; 443 int err; 444 445 evlist__for_each_entry(evsel_list, counter) { 446 if (!evsel__is_bpf(counter)) 447 continue; 448 449 err = bpf_counter__read(counter); 450 if (err) 451 return err; 452 } 453 return 0; 454 } 455 456 static void read_counters(struct timespec *rs) 457 { 458 struct evsel *counter; 459 460 if (!stat_config.stop_read_counter) { 461 if (read_bpf_map_counters() || 462 read_affinity_counters(rs)) 463 return; 464 } 465 466 evlist__for_each_entry(evsel_list, counter) { 467 if (counter->err) 468 pr_debug("failed to read counter %s\n", counter->name); 469 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 470 pr_warning("failed to process counter %s\n", counter->name); 471 counter->err = 0; 472 } 473 } 474 475 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 476 { 477 int i; 478 479 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 480 if (!config->stats) 481 return -1; 482 483 config->stats_num = nthreads; 484 485 for (i = 0; i < nthreads; i++) 486 runtime_stat__init(&config->stats[i]); 487 488 return 0; 489 } 490 491 static void runtime_stat_delete(struct perf_stat_config *config) 492 { 493 int i; 494 495 if (!config->stats) 496 return; 497 498 for (i = 0; i < config->stats_num; i++) 499 runtime_stat__exit(&config->stats[i]); 500 501 zfree(&config->stats); 502 } 503 504 static void runtime_stat_reset(struct perf_stat_config *config) 505 { 506 int i; 507 508 if (!config->stats) 509 return; 510 511 for (i = 0; i < config->stats_num; i++) 512 perf_stat__reset_shadow_per_stat(&config->stats[i]); 513 } 514 515 static void process_interval(void) 516 { 517 struct timespec ts, rs; 518 519 clock_gettime(CLOCK_MONOTONIC, &ts); 520 diff_timespec(&rs, &ts, &ref_time); 521 522 perf_stat__reset_shadow_per_stat(&rt_stat); 523 runtime_stat_reset(&stat_config); 524 read_counters(&rs); 525 526 if (STAT_RECORD) { 527 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 528 pr_err("failed to write stat round event\n"); 529 } 530 531 init_stats(&walltime_nsecs_stats); 532 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 533 print_counters(&rs, 0, NULL); 534 } 535 536 static bool handle_interval(unsigned int interval, int *times) 537 { 538 if (interval) { 539 process_interval(); 540 if (interval_count && !(--(*times))) 541 return true; 542 } 543 return false; 544 } 545 546 static int enable_counters(void) 547 { 548 struct evsel *evsel; 549 int err; 550 551 evlist__for_each_entry(evsel_list, evsel) { 552 if (!evsel__is_bpf(evsel)) 553 continue; 554 555 err = bpf_counter__enable(evsel); 556 if (err) 557 return err; 558 } 559 560 if (stat_config.initial_delay < 0) { 561 pr_info(EVLIST_DISABLED_MSG); 562 return 0; 563 } 564 565 if (stat_config.initial_delay > 0) { 566 pr_info(EVLIST_DISABLED_MSG); 567 usleep(stat_config.initial_delay * USEC_PER_MSEC); 568 } 569 570 /* 571 * We need to enable counters only if: 572 * - we don't have tracee (attaching to task or cpu) 573 * - we have initial delay configured 574 */ 575 if (!target__none(&target) || stat_config.initial_delay) { 576 if (!all_counters_use_bpf) 577 evlist__enable(evsel_list); 578 if (stat_config.initial_delay > 0) 579 pr_info(EVLIST_ENABLED_MSG); 580 } 581 return 0; 582 } 583 584 static void disable_counters(void) 585 { 586 struct evsel *counter; 587 588 /* 589 * If we don't have tracee (attaching to task or cpu), counters may 590 * still be running. To get accurate group ratios, we must stop groups 591 * from counting before reading their constituent counters. 592 */ 593 if (!target__none(&target)) { 594 evlist__for_each_entry(evsel_list, counter) 595 bpf_counter__disable(counter); 596 if (!all_counters_use_bpf) 597 evlist__disable(evsel_list); 598 } 599 } 600 601 static volatile int workload_exec_errno; 602 603 /* 604 * evlist__prepare_workload will send a SIGUSR1 605 * if the fork fails, since we asked by setting its 606 * want_signal to true. 607 */ 608 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 609 void *ucontext __maybe_unused) 610 { 611 workload_exec_errno = info->si_value.sival_int; 612 } 613 614 static bool evsel__should_store_id(struct evsel *counter) 615 { 616 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 617 } 618 619 static bool is_target_alive(struct target *_target, 620 struct perf_thread_map *threads) 621 { 622 struct stat st; 623 int i; 624 625 if (!target__has_task(_target)) 626 return true; 627 628 for (i = 0; i < threads->nr; i++) { 629 char path[PATH_MAX]; 630 631 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 632 threads->map[i].pid); 633 634 if (!stat(path, &st)) 635 return true; 636 } 637 638 return false; 639 } 640 641 static void process_evlist(struct evlist *evlist, unsigned int interval) 642 { 643 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 644 645 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 646 switch (cmd) { 647 case EVLIST_CTL_CMD_ENABLE: 648 if (interval) 649 process_interval(); 650 break; 651 case EVLIST_CTL_CMD_DISABLE: 652 if (interval) 653 process_interval(); 654 break; 655 case EVLIST_CTL_CMD_SNAPSHOT: 656 case EVLIST_CTL_CMD_ACK: 657 case EVLIST_CTL_CMD_UNSUPPORTED: 658 case EVLIST_CTL_CMD_EVLIST: 659 case EVLIST_CTL_CMD_STOP: 660 case EVLIST_CTL_CMD_PING: 661 default: 662 break; 663 } 664 } 665 } 666 667 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 668 int *time_to_sleep) 669 { 670 int tts = *time_to_sleep; 671 struct timespec time_diff; 672 673 diff_timespec(&time_diff, time_stop, time_start); 674 675 tts -= time_diff.tv_sec * MSEC_PER_SEC + 676 time_diff.tv_nsec / NSEC_PER_MSEC; 677 678 if (tts < 0) 679 tts = 0; 680 681 *time_to_sleep = tts; 682 } 683 684 static int dispatch_events(bool forks, int timeout, int interval, int *times) 685 { 686 int child_exited = 0, status = 0; 687 int time_to_sleep, sleep_time; 688 struct timespec time_start, time_stop; 689 690 if (interval) 691 sleep_time = interval; 692 else if (timeout) 693 sleep_time = timeout; 694 else 695 sleep_time = 1000; 696 697 time_to_sleep = sleep_time; 698 699 while (!done) { 700 if (forks) 701 child_exited = waitpid(child_pid, &status, WNOHANG); 702 else 703 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 704 705 if (child_exited) 706 break; 707 708 clock_gettime(CLOCK_MONOTONIC, &time_start); 709 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 710 if (timeout || handle_interval(interval, times)) 711 break; 712 time_to_sleep = sleep_time; 713 } else { /* fd revent */ 714 process_evlist(evsel_list, interval); 715 clock_gettime(CLOCK_MONOTONIC, &time_stop); 716 compute_tts(&time_start, &time_stop, &time_to_sleep); 717 } 718 } 719 720 return status; 721 } 722 723 enum counter_recovery { 724 COUNTER_SKIP, 725 COUNTER_RETRY, 726 COUNTER_FATAL, 727 }; 728 729 static enum counter_recovery stat_handle_error(struct evsel *counter) 730 { 731 char msg[BUFSIZ]; 732 /* 733 * PPC returns ENXIO for HW counters until 2.6.37 734 * (behavior changed with commit b0a873e). 735 */ 736 if (errno == EINVAL || errno == ENOSYS || 737 errno == ENOENT || errno == EOPNOTSUPP || 738 errno == ENXIO) { 739 if (verbose > 0) 740 ui__warning("%s event is not supported by the kernel.\n", 741 evsel__name(counter)); 742 counter->supported = false; 743 /* 744 * errored is a sticky flag that means one of the counter's 745 * cpu event had a problem and needs to be reexamined. 746 */ 747 counter->errored = true; 748 749 if ((evsel__leader(counter) != counter) || 750 !(counter->core.leader->nr_members > 1)) 751 return COUNTER_SKIP; 752 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 753 if (verbose > 0) 754 ui__warning("%s\n", msg); 755 return COUNTER_RETRY; 756 } else if (target__has_per_thread(&target) && 757 evsel_list->core.threads && 758 evsel_list->core.threads->err_thread != -1) { 759 /* 760 * For global --per-thread case, skip current 761 * error thread. 762 */ 763 if (!thread_map__remove(evsel_list->core.threads, 764 evsel_list->core.threads->err_thread)) { 765 evsel_list->core.threads->err_thread = -1; 766 return COUNTER_RETRY; 767 } 768 } 769 770 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 771 ui__error("%s\n", msg); 772 773 if (child_pid != -1) 774 kill(child_pid, SIGTERM); 775 return COUNTER_FATAL; 776 } 777 778 static int __run_perf_stat(int argc, const char **argv, int run_idx) 779 { 780 int interval = stat_config.interval; 781 int times = stat_config.times; 782 int timeout = stat_config.timeout; 783 char msg[BUFSIZ]; 784 unsigned long long t0, t1; 785 struct evsel *counter; 786 size_t l; 787 int status = 0; 788 const bool forks = (argc > 0); 789 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 790 struct evlist_cpu_iterator evlist_cpu_itr; 791 struct affinity saved_affinity, *affinity = NULL; 792 int err; 793 bool second_pass = false; 794 795 if (forks) { 796 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 797 perror("failed to prepare workload"); 798 return -1; 799 } 800 child_pid = evsel_list->workload.pid; 801 } 802 803 if (group) 804 evlist__set_leader(evsel_list); 805 806 if (!cpu_map__is_dummy(evsel_list->core.cpus)) { 807 if (affinity__setup(&saved_affinity) < 0) 808 return -1; 809 affinity = &saved_affinity; 810 } 811 812 evlist__for_each_entry(evsel_list, counter) { 813 if (bpf_counter__load(counter, &target)) 814 return -1; 815 if (!evsel__is_bpf(counter)) 816 all_counters_use_bpf = false; 817 } 818 819 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 820 counter = evlist_cpu_itr.evsel; 821 822 /* 823 * bperf calls evsel__open_per_cpu() in bperf__load(), so 824 * no need to call it again here. 825 */ 826 if (target.use_bpf) 827 break; 828 829 if (counter->reset_group || counter->errored) 830 continue; 831 if (evsel__is_bpf(counter)) 832 continue; 833 try_again: 834 if (create_perf_stat_counter(counter, &stat_config, &target, 835 evlist_cpu_itr.cpu_map_idx) < 0) { 836 837 /* 838 * Weak group failed. We cannot just undo this here 839 * because earlier CPUs might be in group mode, and the kernel 840 * doesn't support mixing group and non group reads. Defer 841 * it to later. 842 * Don't close here because we're in the wrong affinity. 843 */ 844 if ((errno == EINVAL || errno == EBADF) && 845 evsel__leader(counter) != counter && 846 counter->weak_group) { 847 evlist__reset_weak_group(evsel_list, counter, false); 848 assert(counter->reset_group); 849 second_pass = true; 850 continue; 851 } 852 853 switch (stat_handle_error(counter)) { 854 case COUNTER_FATAL: 855 return -1; 856 case COUNTER_RETRY: 857 goto try_again; 858 case COUNTER_SKIP: 859 continue; 860 default: 861 break; 862 } 863 864 } 865 counter->supported = true; 866 } 867 868 if (second_pass) { 869 /* 870 * Now redo all the weak group after closing them, 871 * and also close errored counters. 872 */ 873 874 /* First close errored or weak retry */ 875 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 876 counter = evlist_cpu_itr.evsel; 877 878 if (!counter->reset_group && !counter->errored) 879 continue; 880 881 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 882 } 883 /* Now reopen weak */ 884 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 885 counter = evlist_cpu_itr.evsel; 886 887 if (!counter->reset_group && !counter->errored) 888 continue; 889 if (!counter->reset_group) 890 continue; 891 try_again_reset: 892 pr_debug2("reopening weak %s\n", evsel__name(counter)); 893 if (create_perf_stat_counter(counter, &stat_config, &target, 894 evlist_cpu_itr.cpu_map_idx) < 0) { 895 896 switch (stat_handle_error(counter)) { 897 case COUNTER_FATAL: 898 return -1; 899 case COUNTER_RETRY: 900 goto try_again_reset; 901 case COUNTER_SKIP: 902 continue; 903 default: 904 break; 905 } 906 } 907 counter->supported = true; 908 } 909 } 910 affinity__cleanup(affinity); 911 912 evlist__for_each_entry(evsel_list, counter) { 913 if (!counter->supported) { 914 perf_evsel__free_fd(&counter->core); 915 continue; 916 } 917 918 l = strlen(counter->unit); 919 if (l > stat_config.unit_width) 920 stat_config.unit_width = l; 921 922 if (evsel__should_store_id(counter) && 923 evsel__store_ids(counter, evsel_list)) 924 return -1; 925 } 926 927 if (evlist__apply_filters(evsel_list, &counter)) { 928 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 929 counter->filter, evsel__name(counter), errno, 930 str_error_r(errno, msg, sizeof(msg))); 931 return -1; 932 } 933 934 if (STAT_RECORD) { 935 int fd = perf_data__fd(&perf_stat.data); 936 937 if (is_pipe) { 938 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 939 } else { 940 err = perf_session__write_header(perf_stat.session, evsel_list, 941 fd, false); 942 } 943 944 if (err < 0) 945 return err; 946 947 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 948 process_synthesized_event, is_pipe); 949 if (err < 0) 950 return err; 951 } 952 953 /* 954 * Enable counters and exec the command: 955 */ 956 if (forks) { 957 evlist__start_workload(evsel_list); 958 err = enable_counters(); 959 if (err) 960 return -1; 961 962 t0 = rdclock(); 963 clock_gettime(CLOCK_MONOTONIC, &ref_time); 964 965 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 966 status = dispatch_events(forks, timeout, interval, ×); 967 if (child_pid != -1) { 968 if (timeout) 969 kill(child_pid, SIGTERM); 970 wait4(child_pid, &status, 0, &stat_config.ru_data); 971 } 972 973 if (workload_exec_errno) { 974 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 975 pr_err("Workload failed: %s\n", emsg); 976 return -1; 977 } 978 979 if (WIFSIGNALED(status)) 980 psignal(WTERMSIG(status), argv[0]); 981 } else { 982 err = enable_counters(); 983 if (err) 984 return -1; 985 986 t0 = rdclock(); 987 clock_gettime(CLOCK_MONOTONIC, &ref_time); 988 989 status = dispatch_events(forks, timeout, interval, ×); 990 } 991 992 disable_counters(); 993 994 t1 = rdclock(); 995 996 if (stat_config.walltime_run_table) 997 stat_config.walltime_run[run_idx] = t1 - t0; 998 999 if (interval && stat_config.summary) { 1000 stat_config.interval = 0; 1001 stat_config.stop_read_counter = true; 1002 init_stats(&walltime_nsecs_stats); 1003 update_stats(&walltime_nsecs_stats, t1 - t0); 1004 1005 if (stat_config.aggr_mode == AGGR_GLOBAL) 1006 evlist__save_aggr_prev_raw_counts(evsel_list); 1007 1008 evlist__copy_prev_raw_counts(evsel_list); 1009 evlist__reset_prev_raw_counts(evsel_list); 1010 runtime_stat_reset(&stat_config); 1011 perf_stat__reset_shadow_per_stat(&rt_stat); 1012 } else 1013 update_stats(&walltime_nsecs_stats, t1 - t0); 1014 1015 /* 1016 * Closing a group leader splits the group, and as we only disable 1017 * group leaders, results in remaining events becoming enabled. To 1018 * avoid arbitrary skew, we must read all counters before closing any 1019 * group leaders. 1020 */ 1021 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 1022 1023 /* 1024 * We need to keep evsel_list alive, because it's processed 1025 * later the evsel_list will be closed after. 1026 */ 1027 if (!STAT_RECORD) 1028 evlist__close(evsel_list); 1029 1030 return WEXITSTATUS(status); 1031 } 1032 1033 static int run_perf_stat(int argc, const char **argv, int run_idx) 1034 { 1035 int ret; 1036 1037 if (pre_cmd) { 1038 ret = system(pre_cmd); 1039 if (ret) 1040 return ret; 1041 } 1042 1043 if (sync_run) 1044 sync(); 1045 1046 ret = __run_perf_stat(argc, argv, run_idx); 1047 if (ret) 1048 return ret; 1049 1050 if (post_cmd) { 1051 ret = system(post_cmd); 1052 if (ret) 1053 return ret; 1054 } 1055 1056 return ret; 1057 } 1058 1059 static void print_counters(struct timespec *ts, int argc, const char **argv) 1060 { 1061 /* Do not print anything if we record to the pipe. */ 1062 if (STAT_RECORD && perf_stat.data.is_pipe) 1063 return; 1064 if (stat_config.quiet) 1065 return; 1066 1067 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1068 } 1069 1070 static volatile int signr = -1; 1071 1072 static void skip_signal(int signo) 1073 { 1074 if ((child_pid == -1) || stat_config.interval) 1075 done = 1; 1076 1077 signr = signo; 1078 /* 1079 * render child_pid harmless 1080 * won't send SIGTERM to a random 1081 * process in case of race condition 1082 * and fast PID recycling 1083 */ 1084 child_pid = -1; 1085 } 1086 1087 static void sig_atexit(void) 1088 { 1089 sigset_t set, oset; 1090 1091 /* 1092 * avoid race condition with SIGCHLD handler 1093 * in skip_signal() which is modifying child_pid 1094 * goal is to avoid send SIGTERM to a random 1095 * process 1096 */ 1097 sigemptyset(&set); 1098 sigaddset(&set, SIGCHLD); 1099 sigprocmask(SIG_BLOCK, &set, &oset); 1100 1101 if (child_pid != -1) 1102 kill(child_pid, SIGTERM); 1103 1104 sigprocmask(SIG_SETMASK, &oset, NULL); 1105 1106 if (signr == -1) 1107 return; 1108 1109 signal(signr, SIG_DFL); 1110 kill(getpid(), signr); 1111 } 1112 1113 void perf_stat__set_big_num(int set) 1114 { 1115 stat_config.big_num = (set != 0); 1116 } 1117 1118 void perf_stat__set_no_csv_summary(int set) 1119 { 1120 stat_config.no_csv_summary = (set != 0); 1121 } 1122 1123 static int stat__set_big_num(const struct option *opt __maybe_unused, 1124 const char *s __maybe_unused, int unset) 1125 { 1126 big_num_opt = unset ? 0 : 1; 1127 perf_stat__set_big_num(!unset); 1128 return 0; 1129 } 1130 1131 static int enable_metric_only(const struct option *opt __maybe_unused, 1132 const char *s __maybe_unused, int unset) 1133 { 1134 force_metric_only = true; 1135 stat_config.metric_only = !unset; 1136 return 0; 1137 } 1138 1139 static int parse_metric_groups(const struct option *opt, 1140 const char *str, 1141 int unset __maybe_unused) 1142 { 1143 return metricgroup__parse_groups(opt, str, 1144 stat_config.metric_no_group, 1145 stat_config.metric_no_merge, 1146 &stat_config.metric_events); 1147 } 1148 1149 static int parse_control_option(const struct option *opt, 1150 const char *str, 1151 int unset __maybe_unused) 1152 { 1153 struct perf_stat_config *config = opt->value; 1154 1155 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1156 } 1157 1158 static int parse_stat_cgroups(const struct option *opt, 1159 const char *str, int unset) 1160 { 1161 if (stat_config.cgroup_list) { 1162 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1163 return -1; 1164 } 1165 1166 return parse_cgroups(opt, str, unset); 1167 } 1168 1169 static int parse_hybrid_type(const struct option *opt, 1170 const char *str, 1171 int unset __maybe_unused) 1172 { 1173 struct evlist *evlist = *(struct evlist **)opt->value; 1174 1175 if (!list_empty(&evlist->core.entries)) { 1176 fprintf(stderr, "Must define cputype before events/metrics\n"); 1177 return -1; 1178 } 1179 1180 evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); 1181 if (!evlist->hybrid_pmu_name) { 1182 fprintf(stderr, "--cputype %s is not supported!\n", str); 1183 return -1; 1184 } 1185 1186 return 0; 1187 } 1188 1189 static struct option stat_options[] = { 1190 OPT_BOOLEAN('T', "transaction", &transaction_run, 1191 "hardware transaction statistics"), 1192 OPT_CALLBACK('e', "event", &evsel_list, "event", 1193 "event selector. use 'perf list' to list available events", 1194 parse_events_option), 1195 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1196 "event filter", parse_filter), 1197 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1198 "child tasks do not inherit counters"), 1199 OPT_STRING('p', "pid", &target.pid, "pid", 1200 "stat events on existing process id"), 1201 OPT_STRING('t', "tid", &target.tid, "tid", 1202 "stat events on existing thread id"), 1203 #ifdef HAVE_BPF_SKEL 1204 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1205 "stat events on existing bpf program id"), 1206 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1207 "use bpf program to count events"), 1208 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1209 "path to perf_event_attr map"), 1210 #endif 1211 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1212 "system-wide collection from all CPUs"), 1213 OPT_BOOLEAN('g', "group", &group, 1214 "put the counters into a counter group"), 1215 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1216 "Use --no-scale to disable counter scaling for multiplexing"), 1217 OPT_INCR('v', "verbose", &verbose, 1218 "be more verbose (show counter open errors, etc)"), 1219 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1220 "repeat command and print average + stddev (max: 100, forever: 0)"), 1221 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1222 "display details about each run (only with -r option)"), 1223 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1224 "null run - dont start any counters"), 1225 OPT_INCR('d', "detailed", &detailed_run, 1226 "detailed run - start a lot of events"), 1227 OPT_BOOLEAN('S', "sync", &sync_run, 1228 "call sync() before starting a run"), 1229 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1230 "print large numbers with thousands\' separators", 1231 stat__set_big_num), 1232 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1233 "list of cpus to monitor in system-wide"), 1234 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1235 "disable CPU count aggregation", AGGR_NONE), 1236 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1237 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1238 "print counts with custom separator"), 1239 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1240 "monitor event in cgroup name only", parse_stat_cgroups), 1241 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1242 "expand events for each cgroup"), 1243 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1244 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1245 OPT_INTEGER(0, "log-fd", &output_fd, 1246 "log output to fd, instead of stderr"), 1247 OPT_STRING(0, "pre", &pre_cmd, "command", 1248 "command to run prior to the measured command"), 1249 OPT_STRING(0, "post", &post_cmd, "command", 1250 "command to run after to the measured command"), 1251 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1252 "print counts at regular interval in ms " 1253 "(overhead is possible for values <= 100ms)"), 1254 OPT_INTEGER(0, "interval-count", &stat_config.times, 1255 "print counts for fixed number of times"), 1256 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1257 "clear screen in between new interval"), 1258 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1259 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1260 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1261 "aggregate counts per processor socket", AGGR_SOCKET), 1262 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1263 "aggregate counts per processor die", AGGR_DIE), 1264 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1265 "aggregate counts per physical processor core", AGGR_CORE), 1266 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1267 "aggregate counts per thread", AGGR_THREAD), 1268 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1269 "aggregate counts per numa node", AGGR_NODE), 1270 OPT_INTEGER('D', "delay", &stat_config.initial_delay, 1271 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1272 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1273 "Only print computed metrics. No raw values", enable_metric_only), 1274 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1275 "don't group metric events, impacts multiplexing"), 1276 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1277 "don't try to share events between metrics in a group"), 1278 OPT_BOOLEAN(0, "topdown", &topdown_run, 1279 "measure top-down statistics"), 1280 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1281 "Set the metrics level for the top-down statistics (0: max level)"), 1282 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1283 "measure SMI cost"), 1284 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1285 "monitor specified metrics or metric groups (separated by ,)", 1286 parse_metric_groups), 1287 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1288 "Configure all used events to run in kernel space.", 1289 PARSE_OPT_EXCLUSIVE), 1290 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1291 "Configure all used events to run in user space.", 1292 PARSE_OPT_EXCLUSIVE), 1293 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1294 "Use with 'percore' event qualifier to show the event " 1295 "counts of one hardware thread by sum up total hardware " 1296 "threads of same physical core"), 1297 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1298 "print summary for interval mode"), 1299 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1300 "don't print 'summary' for CSV summary output"), 1301 OPT_BOOLEAN(0, "quiet", &stat_config.quiet, 1302 "don't print output (useful with record)"), 1303 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1304 "Only enable events on applying cpu with this type " 1305 "for hybrid platform (e.g. core or atom)", 1306 parse_hybrid_type), 1307 #ifdef HAVE_LIBPFM 1308 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1309 "libpfm4 event selector. use 'perf list' to list available events", 1310 parse_libpfm_events_option), 1311 #endif 1312 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1313 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1314 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1315 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1316 parse_control_option), 1317 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1318 "measure I/O performance metrics provided by arch/platform", 1319 iostat_parse), 1320 OPT_END() 1321 }; 1322 1323 static const char *const aggr_mode__string[] = { 1324 [AGGR_CORE] = "core", 1325 [AGGR_DIE] = "die", 1326 [AGGR_GLOBAL] = "global", 1327 [AGGR_NODE] = "node", 1328 [AGGR_NONE] = "none", 1329 [AGGR_SOCKET] = "socket", 1330 [AGGR_THREAD] = "thread", 1331 [AGGR_UNSET] = "unset", 1332 }; 1333 1334 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1335 struct perf_cpu cpu) 1336 { 1337 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1338 } 1339 1340 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1341 struct perf_cpu cpu) 1342 { 1343 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1344 } 1345 1346 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1347 struct perf_cpu cpu) 1348 { 1349 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1350 } 1351 1352 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1353 struct perf_cpu cpu) 1354 { 1355 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1356 } 1357 1358 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1359 aggr_get_id_t get_id, struct perf_cpu cpu) 1360 { 1361 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1362 1363 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1364 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1365 1366 id = config->cpus_aggr_map->map[cpu.cpu]; 1367 return id; 1368 } 1369 1370 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1371 struct perf_cpu cpu) 1372 { 1373 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1374 } 1375 1376 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1377 struct perf_cpu cpu) 1378 { 1379 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1380 } 1381 1382 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1383 struct perf_cpu cpu) 1384 { 1385 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1386 } 1387 1388 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1389 struct perf_cpu cpu) 1390 { 1391 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1392 } 1393 1394 static bool term_percore_set(void) 1395 { 1396 struct evsel *counter; 1397 1398 evlist__for_each_entry(evsel_list, counter) { 1399 if (counter->percore) 1400 return true; 1401 } 1402 1403 return false; 1404 } 1405 1406 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1407 { 1408 switch (aggr_mode) { 1409 case AGGR_SOCKET: 1410 return aggr_cpu_id__socket; 1411 case AGGR_DIE: 1412 return aggr_cpu_id__die; 1413 case AGGR_CORE: 1414 return aggr_cpu_id__core; 1415 case AGGR_NODE: 1416 return aggr_cpu_id__node; 1417 case AGGR_NONE: 1418 if (term_percore_set()) 1419 return aggr_cpu_id__core; 1420 1421 return NULL; 1422 case AGGR_GLOBAL: 1423 case AGGR_THREAD: 1424 case AGGR_UNSET: 1425 default: 1426 return NULL; 1427 } 1428 } 1429 1430 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1431 { 1432 switch (aggr_mode) { 1433 case AGGR_SOCKET: 1434 return perf_stat__get_socket_cached; 1435 case AGGR_DIE: 1436 return perf_stat__get_die_cached; 1437 case AGGR_CORE: 1438 return perf_stat__get_core_cached; 1439 case AGGR_NODE: 1440 return perf_stat__get_node_cached; 1441 case AGGR_NONE: 1442 if (term_percore_set()) { 1443 return perf_stat__get_core_cached; 1444 } 1445 return NULL; 1446 case AGGR_GLOBAL: 1447 case AGGR_THREAD: 1448 case AGGR_UNSET: 1449 default: 1450 return NULL; 1451 } 1452 } 1453 1454 static int perf_stat_init_aggr_mode(void) 1455 { 1456 int nr; 1457 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1458 1459 if (get_id) { 1460 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, 1461 get_id, /*data=*/NULL); 1462 if (!stat_config.aggr_map) { 1463 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1464 return -1; 1465 } 1466 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1467 } 1468 1469 /* 1470 * The evsel_list->cpus is the base we operate on, 1471 * taking the highest cpu number to be the size of 1472 * the aggregation translate cpumap. 1473 */ 1474 nr = perf_cpu_map__max(evsel_list->core.cpus).cpu; 1475 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1476 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1477 } 1478 1479 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1480 { 1481 if (map) { 1482 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1483 "cpu_aggr_map refcnt unbalanced\n"); 1484 free(map); 1485 } 1486 } 1487 1488 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1489 { 1490 if (map && refcount_dec_and_test(&map->refcnt)) 1491 cpu_aggr_map__delete(map); 1492 } 1493 1494 static void perf_stat__exit_aggr_mode(void) 1495 { 1496 cpu_aggr_map__put(stat_config.aggr_map); 1497 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1498 stat_config.aggr_map = NULL; 1499 stat_config.cpus_aggr_map = NULL; 1500 } 1501 1502 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1503 { 1504 struct perf_env *env = data; 1505 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1506 1507 if (cpu.cpu != -1) 1508 id.socket = env->cpu[cpu.cpu].socket_id; 1509 1510 return id; 1511 } 1512 1513 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1514 { 1515 struct perf_env *env = data; 1516 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1517 1518 if (cpu.cpu != -1) { 1519 /* 1520 * die_id is relative to socket, so start 1521 * with the socket ID and then add die to 1522 * make a unique ID. 1523 */ 1524 id.socket = env->cpu[cpu.cpu].socket_id; 1525 id.die = env->cpu[cpu.cpu].die_id; 1526 } 1527 1528 return id; 1529 } 1530 1531 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1532 { 1533 struct perf_env *env = data; 1534 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1535 1536 if (cpu.cpu != -1) { 1537 /* 1538 * core_id is relative to socket and die, 1539 * we need a global id. So we set 1540 * socket, die id and core id 1541 */ 1542 id.socket = env->cpu[cpu.cpu].socket_id; 1543 id.die = env->cpu[cpu.cpu].die_id; 1544 id.core = env->cpu[cpu.cpu].core_id; 1545 } 1546 1547 return id; 1548 } 1549 1550 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1551 { 1552 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1553 1554 id.node = perf_env__numa_node(data, cpu); 1555 return id; 1556 } 1557 1558 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1559 struct perf_cpu cpu) 1560 { 1561 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1562 } 1563 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1564 struct perf_cpu cpu) 1565 { 1566 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1567 } 1568 1569 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1570 struct perf_cpu cpu) 1571 { 1572 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1573 } 1574 1575 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1576 struct perf_cpu cpu) 1577 { 1578 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1579 } 1580 1581 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1582 { 1583 switch (aggr_mode) { 1584 case AGGR_SOCKET: 1585 return perf_env__get_socket_aggr_by_cpu; 1586 case AGGR_DIE: 1587 return perf_env__get_die_aggr_by_cpu; 1588 case AGGR_CORE: 1589 return perf_env__get_core_aggr_by_cpu; 1590 case AGGR_NODE: 1591 return perf_env__get_node_aggr_by_cpu; 1592 case AGGR_NONE: 1593 case AGGR_GLOBAL: 1594 case AGGR_THREAD: 1595 case AGGR_UNSET: 1596 default: 1597 return NULL; 1598 } 1599 } 1600 1601 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1602 { 1603 switch (aggr_mode) { 1604 case AGGR_SOCKET: 1605 return perf_stat__get_socket_file; 1606 case AGGR_DIE: 1607 return perf_stat__get_die_file; 1608 case AGGR_CORE: 1609 return perf_stat__get_core_file; 1610 case AGGR_NODE: 1611 return perf_stat__get_node_file; 1612 case AGGR_NONE: 1613 case AGGR_GLOBAL: 1614 case AGGR_THREAD: 1615 case AGGR_UNSET: 1616 default: 1617 return NULL; 1618 } 1619 } 1620 1621 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1622 { 1623 struct perf_env *env = &st->session->header.env; 1624 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1625 1626 if (!get_id) 1627 return 0; 1628 1629 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env); 1630 if (!stat_config.aggr_map) { 1631 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1632 return -1; 1633 } 1634 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1635 return 0; 1636 } 1637 1638 /* 1639 * Add default attributes, if there were no attributes specified or 1640 * if -d/--detailed, -d -d or -d -d -d is used: 1641 */ 1642 static int add_default_attributes(void) 1643 { 1644 int err; 1645 struct perf_event_attr default_attrs0[] = { 1646 1647 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1648 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1649 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1650 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1651 1652 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1653 }; 1654 struct perf_event_attr frontend_attrs[] = { 1655 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1656 }; 1657 struct perf_event_attr backend_attrs[] = { 1658 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1659 }; 1660 struct perf_event_attr default_attrs1[] = { 1661 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1662 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1663 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1664 1665 }; 1666 struct perf_event_attr default_sw_attrs[] = { 1667 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1668 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1669 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1670 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1671 }; 1672 1673 /* 1674 * Detailed stats (-d), covering the L1 and last level data caches: 1675 */ 1676 struct perf_event_attr detailed_attrs[] = { 1677 1678 { .type = PERF_TYPE_HW_CACHE, 1679 .config = 1680 PERF_COUNT_HW_CACHE_L1D << 0 | 1681 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1682 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1683 1684 { .type = PERF_TYPE_HW_CACHE, 1685 .config = 1686 PERF_COUNT_HW_CACHE_L1D << 0 | 1687 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1688 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1689 1690 { .type = PERF_TYPE_HW_CACHE, 1691 .config = 1692 PERF_COUNT_HW_CACHE_LL << 0 | 1693 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1694 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1695 1696 { .type = PERF_TYPE_HW_CACHE, 1697 .config = 1698 PERF_COUNT_HW_CACHE_LL << 0 | 1699 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1700 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1701 }; 1702 1703 /* 1704 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1705 */ 1706 struct perf_event_attr very_detailed_attrs[] = { 1707 1708 { .type = PERF_TYPE_HW_CACHE, 1709 .config = 1710 PERF_COUNT_HW_CACHE_L1I << 0 | 1711 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1712 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1713 1714 { .type = PERF_TYPE_HW_CACHE, 1715 .config = 1716 PERF_COUNT_HW_CACHE_L1I << 0 | 1717 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1718 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1719 1720 { .type = PERF_TYPE_HW_CACHE, 1721 .config = 1722 PERF_COUNT_HW_CACHE_DTLB << 0 | 1723 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1724 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1725 1726 { .type = PERF_TYPE_HW_CACHE, 1727 .config = 1728 PERF_COUNT_HW_CACHE_DTLB << 0 | 1729 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1730 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1731 1732 { .type = PERF_TYPE_HW_CACHE, 1733 .config = 1734 PERF_COUNT_HW_CACHE_ITLB << 0 | 1735 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1736 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1737 1738 { .type = PERF_TYPE_HW_CACHE, 1739 .config = 1740 PERF_COUNT_HW_CACHE_ITLB << 0 | 1741 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1742 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1743 1744 }; 1745 1746 /* 1747 * Very, very detailed stats (-d -d -d), adding prefetch events: 1748 */ 1749 struct perf_event_attr very_very_detailed_attrs[] = { 1750 1751 { .type = PERF_TYPE_HW_CACHE, 1752 .config = 1753 PERF_COUNT_HW_CACHE_L1D << 0 | 1754 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1755 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1756 1757 { .type = PERF_TYPE_HW_CACHE, 1758 .config = 1759 PERF_COUNT_HW_CACHE_L1D << 0 | 1760 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1761 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1762 }; 1763 /* Set attrs if no event is selected and !null_run: */ 1764 if (stat_config.null_run) 1765 return 0; 1766 1767 if (transaction_run) { 1768 struct parse_events_error errinfo; 1769 /* Handle -T as -M transaction. Once platform specific metrics 1770 * support has been added to the json files, all architectures 1771 * will use this approach. To determine transaction support 1772 * on an architecture test for such a metric name. 1773 */ 1774 if (metricgroup__has_metric("transaction")) { 1775 struct option opt = { .value = &evsel_list }; 1776 1777 return metricgroup__parse_groups(&opt, "transaction", 1778 stat_config.metric_no_group, 1779 stat_config.metric_no_merge, 1780 &stat_config.metric_events); 1781 } 1782 1783 parse_events_error__init(&errinfo); 1784 if (pmu_have_event("cpu", "cycles-ct") && 1785 pmu_have_event("cpu", "el-start")) 1786 err = parse_events(evsel_list, transaction_attrs, 1787 &errinfo); 1788 else 1789 err = parse_events(evsel_list, 1790 transaction_limited_attrs, 1791 &errinfo); 1792 if (err) { 1793 fprintf(stderr, "Cannot set up transaction events\n"); 1794 parse_events_error__print(&errinfo, transaction_attrs); 1795 } 1796 parse_events_error__exit(&errinfo); 1797 return err ? -1 : 0; 1798 } 1799 1800 if (smi_cost) { 1801 struct parse_events_error errinfo; 1802 int smi; 1803 1804 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1805 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1806 return -1; 1807 } 1808 1809 if (!smi) { 1810 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1811 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1812 return -1; 1813 } 1814 smi_reset = true; 1815 } 1816 1817 if (!pmu_have_event("msr", "aperf") || 1818 !pmu_have_event("msr", "smi")) { 1819 fprintf(stderr, "To measure SMI cost, it needs " 1820 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1821 return -1; 1822 } 1823 if (!force_metric_only) 1824 stat_config.metric_only = true; 1825 1826 parse_events_error__init(&errinfo); 1827 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1828 if (err) { 1829 parse_events_error__print(&errinfo, smi_cost_attrs); 1830 fprintf(stderr, "Cannot set up SMI cost events\n"); 1831 } 1832 parse_events_error__exit(&errinfo); 1833 return err ? -1 : 0; 1834 } 1835 1836 if (topdown_run) { 1837 const char **metric_attrs = topdown_metric_attrs; 1838 unsigned int max_level = 1; 1839 char *str = NULL; 1840 bool warn = false; 1841 1842 if (!force_metric_only) 1843 stat_config.metric_only = true; 1844 1845 if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) { 1846 metric_attrs = topdown_metric_L2_attrs; 1847 max_level = 2; 1848 } 1849 1850 if (stat_config.topdown_level > max_level) { 1851 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1852 return -1; 1853 } else if (!stat_config.topdown_level) 1854 stat_config.topdown_level = max_level; 1855 1856 if (topdown_filter_events(metric_attrs, &str, 1) < 0) { 1857 pr_err("Out of memory\n"); 1858 return -1; 1859 } 1860 if (metric_attrs[0] && str) { 1861 if (!stat_config.interval && !stat_config.metric_only) { 1862 fprintf(stat_config.output, 1863 "Topdown accuracy may decrease when measuring long periods.\n" 1864 "Please print the result regularly, e.g. -I1000\n"); 1865 } 1866 goto setup_metrics; 1867 } 1868 1869 zfree(&str); 1870 1871 if (stat_config.aggr_mode != AGGR_GLOBAL && 1872 stat_config.aggr_mode != AGGR_CORE) { 1873 pr_err("top down event configuration requires --per-core mode\n"); 1874 return -1; 1875 } 1876 stat_config.aggr_mode = AGGR_CORE; 1877 if (nr_cgroups || !target__has_cpu(&target)) { 1878 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1879 return -1; 1880 } 1881 1882 if (topdown_filter_events(topdown_attrs, &str, 1883 arch_topdown_check_group(&warn)) < 0) { 1884 pr_err("Out of memory\n"); 1885 return -1; 1886 } 1887 if (topdown_attrs[0] && str) { 1888 struct parse_events_error errinfo; 1889 if (warn) 1890 arch_topdown_group_warn(); 1891 setup_metrics: 1892 parse_events_error__init(&errinfo); 1893 err = parse_events(evsel_list, str, &errinfo); 1894 if (err) { 1895 fprintf(stderr, 1896 "Cannot set up top down events %s: %d\n", 1897 str, err); 1898 parse_events_error__print(&errinfo, str); 1899 parse_events_error__exit(&errinfo); 1900 free(str); 1901 return -1; 1902 } 1903 parse_events_error__exit(&errinfo); 1904 } else { 1905 fprintf(stderr, "System does not support topdown\n"); 1906 return -1; 1907 } 1908 free(str); 1909 } 1910 1911 if (!evsel_list->core.nr_entries) { 1912 if (perf_pmu__has_hybrid()) { 1913 struct parse_events_error errinfo; 1914 const char *hybrid_str = "cycles,instructions,branches,branch-misses"; 1915 1916 if (target__has_cpu(&target)) 1917 default_sw_attrs[0].config = PERF_COUNT_SW_CPU_CLOCK; 1918 1919 if (evlist__add_default_attrs(evsel_list, 1920 default_sw_attrs) < 0) { 1921 return -1; 1922 } 1923 1924 parse_events_error__init(&errinfo); 1925 err = parse_events(evsel_list, hybrid_str, &errinfo); 1926 if (err) { 1927 fprintf(stderr, 1928 "Cannot set up hybrid events %s: %d\n", 1929 hybrid_str, err); 1930 parse_events_error__print(&errinfo, hybrid_str); 1931 } 1932 parse_events_error__exit(&errinfo); 1933 return err ? -1 : 0; 1934 } 1935 1936 if (target__has_cpu(&target)) 1937 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1938 1939 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1940 return -1; 1941 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1942 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1943 return -1; 1944 } 1945 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1946 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1947 return -1; 1948 } 1949 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1950 return -1; 1951 1952 stat_config.topdown_level = TOPDOWN_MAX_LEVEL; 1953 if (arch_evlist__add_default_attrs(evsel_list) < 0) 1954 return -1; 1955 } 1956 1957 /* Detailed events get appended to the event list: */ 1958 1959 if (detailed_run < 1) 1960 return 0; 1961 1962 /* Append detailed run extra attributes: */ 1963 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1964 return -1; 1965 1966 if (detailed_run < 2) 1967 return 0; 1968 1969 /* Append very detailed run extra attributes: */ 1970 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1971 return -1; 1972 1973 if (detailed_run < 3) 1974 return 0; 1975 1976 /* Append very, very detailed run extra attributes: */ 1977 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1978 } 1979 1980 static const char * const stat_record_usage[] = { 1981 "perf stat record [<options>]", 1982 NULL, 1983 }; 1984 1985 static void init_features(struct perf_session *session) 1986 { 1987 int feat; 1988 1989 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1990 perf_header__set_feat(&session->header, feat); 1991 1992 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1993 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1994 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1995 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1996 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1997 } 1998 1999 static int __cmd_record(int argc, const char **argv) 2000 { 2001 struct perf_session *session; 2002 struct perf_data *data = &perf_stat.data; 2003 2004 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2005 PARSE_OPT_STOP_AT_NON_OPTION); 2006 2007 if (output_name) 2008 data->path = output_name; 2009 2010 if (stat_config.run_count != 1 || forever) { 2011 pr_err("Cannot use -r option with perf stat record.\n"); 2012 return -1; 2013 } 2014 2015 session = perf_session__new(data, NULL); 2016 if (IS_ERR(session)) { 2017 pr_err("Perf session creation failed\n"); 2018 return PTR_ERR(session); 2019 } 2020 2021 init_features(session); 2022 2023 session->evlist = evsel_list; 2024 perf_stat.session = session; 2025 perf_stat.record = true; 2026 return argc; 2027 } 2028 2029 static int process_stat_round_event(struct perf_session *session, 2030 union perf_event *event) 2031 { 2032 struct perf_record_stat_round *stat_round = &event->stat_round; 2033 struct evsel *counter; 2034 struct timespec tsh, *ts = NULL; 2035 const char **argv = session->header.env.cmdline_argv; 2036 int argc = session->header.env.nr_cmdline; 2037 2038 evlist__for_each_entry(evsel_list, counter) 2039 perf_stat_process_counter(&stat_config, counter); 2040 2041 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2042 update_stats(&walltime_nsecs_stats, stat_round->time); 2043 2044 if (stat_config.interval && stat_round->time) { 2045 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2046 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2047 ts = &tsh; 2048 } 2049 2050 print_counters(ts, argc, argv); 2051 return 0; 2052 } 2053 2054 static 2055 int process_stat_config_event(struct perf_session *session, 2056 union perf_event *event) 2057 { 2058 struct perf_tool *tool = session->tool; 2059 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2060 2061 perf_event__read_stat_config(&stat_config, &event->stat_config); 2062 2063 if (perf_cpu_map__empty(st->cpus)) { 2064 if (st->aggr_mode != AGGR_UNSET) 2065 pr_warning("warning: processing task data, aggregation mode not set\n"); 2066 return 0; 2067 } 2068 2069 if (st->aggr_mode != AGGR_UNSET) 2070 stat_config.aggr_mode = st->aggr_mode; 2071 2072 if (perf_stat.data.is_pipe) 2073 perf_stat_init_aggr_mode(); 2074 else 2075 perf_stat_init_aggr_mode_file(st); 2076 2077 return 0; 2078 } 2079 2080 static int set_maps(struct perf_stat *st) 2081 { 2082 if (!st->cpus || !st->threads) 2083 return 0; 2084 2085 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2086 return -EINVAL; 2087 2088 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2089 2090 if (evlist__alloc_stats(evsel_list, true)) 2091 return -ENOMEM; 2092 2093 st->maps_allocated = true; 2094 return 0; 2095 } 2096 2097 static 2098 int process_thread_map_event(struct perf_session *session, 2099 union perf_event *event) 2100 { 2101 struct perf_tool *tool = session->tool; 2102 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2103 2104 if (st->threads) { 2105 pr_warning("Extra thread map event, ignoring.\n"); 2106 return 0; 2107 } 2108 2109 st->threads = thread_map__new_event(&event->thread_map); 2110 if (!st->threads) 2111 return -ENOMEM; 2112 2113 return set_maps(st); 2114 } 2115 2116 static 2117 int process_cpu_map_event(struct perf_session *session, 2118 union perf_event *event) 2119 { 2120 struct perf_tool *tool = session->tool; 2121 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2122 struct perf_cpu_map *cpus; 2123 2124 if (st->cpus) { 2125 pr_warning("Extra cpu map event, ignoring.\n"); 2126 return 0; 2127 } 2128 2129 cpus = cpu_map__new_data(&event->cpu_map.data); 2130 if (!cpus) 2131 return -ENOMEM; 2132 2133 st->cpus = cpus; 2134 return set_maps(st); 2135 } 2136 2137 static const char * const stat_report_usage[] = { 2138 "perf stat report [<options>]", 2139 NULL, 2140 }; 2141 2142 static struct perf_stat perf_stat = { 2143 .tool = { 2144 .attr = perf_event__process_attr, 2145 .event_update = perf_event__process_event_update, 2146 .thread_map = process_thread_map_event, 2147 .cpu_map = process_cpu_map_event, 2148 .stat_config = process_stat_config_event, 2149 .stat = perf_event__process_stat_event, 2150 .stat_round = process_stat_round_event, 2151 }, 2152 .aggr_mode = AGGR_UNSET, 2153 }; 2154 2155 static int __cmd_report(int argc, const char **argv) 2156 { 2157 struct perf_session *session; 2158 const struct option options[] = { 2159 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2160 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2161 "aggregate counts per processor socket", AGGR_SOCKET), 2162 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2163 "aggregate counts per processor die", AGGR_DIE), 2164 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2165 "aggregate counts per physical processor core", AGGR_CORE), 2166 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2167 "aggregate counts per numa node", AGGR_NODE), 2168 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2169 "disable CPU count aggregation", AGGR_NONE), 2170 OPT_END() 2171 }; 2172 struct stat st; 2173 int ret; 2174 2175 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2176 2177 if (!input_name || !strlen(input_name)) { 2178 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2179 input_name = "-"; 2180 else 2181 input_name = "perf.data"; 2182 } 2183 2184 perf_stat.data.path = input_name; 2185 perf_stat.data.mode = PERF_DATA_MODE_READ; 2186 2187 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2188 if (IS_ERR(session)) 2189 return PTR_ERR(session); 2190 2191 perf_stat.session = session; 2192 stat_config.output = stderr; 2193 evsel_list = session->evlist; 2194 2195 ret = perf_session__process_events(session); 2196 if (ret) 2197 return ret; 2198 2199 perf_session__delete(session); 2200 return 0; 2201 } 2202 2203 static void setup_system_wide(int forks) 2204 { 2205 /* 2206 * Make system wide (-a) the default target if 2207 * no target was specified and one of following 2208 * conditions is met: 2209 * 2210 * - there's no workload specified 2211 * - there is workload specified but all requested 2212 * events are system wide events 2213 */ 2214 if (!target__none(&target)) 2215 return; 2216 2217 if (!forks) 2218 target.system_wide = true; 2219 else { 2220 struct evsel *counter; 2221 2222 evlist__for_each_entry(evsel_list, counter) { 2223 if (!counter->core.system_wide && 2224 strcmp(counter->name, "duration_time")) { 2225 return; 2226 } 2227 } 2228 2229 if (evsel_list->core.nr_entries) 2230 target.system_wide = true; 2231 } 2232 } 2233 2234 int cmd_stat(int argc, const char **argv) 2235 { 2236 const char * const stat_usage[] = { 2237 "perf stat [<options>] [<command>]", 2238 NULL 2239 }; 2240 int status = -EINVAL, run_idx, err; 2241 const char *mode; 2242 FILE *output = stderr; 2243 unsigned int interval, timeout; 2244 const char * const stat_subcommands[] = { "record", "report" }; 2245 char errbuf[BUFSIZ]; 2246 2247 setlocale(LC_ALL, ""); 2248 2249 evsel_list = evlist__new(); 2250 if (evsel_list == NULL) 2251 return -ENOMEM; 2252 2253 parse_events__shrink_config_terms(); 2254 2255 /* String-parsing callback-based options would segfault when negated */ 2256 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2257 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2258 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2259 2260 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2261 (const char **) stat_usage, 2262 PARSE_OPT_STOP_AT_NON_OPTION); 2263 perf_stat__collect_metric_expr(evsel_list); 2264 perf_stat__init_shadow_stats(); 2265 2266 if (stat_config.csv_sep) { 2267 stat_config.csv_output = true; 2268 if (!strcmp(stat_config.csv_sep, "\\t")) 2269 stat_config.csv_sep = "\t"; 2270 } else 2271 stat_config.csv_sep = DEFAULT_SEPARATOR; 2272 2273 if (argc && !strncmp(argv[0], "rec", 3)) { 2274 argc = __cmd_record(argc, argv); 2275 if (argc < 0) 2276 return -1; 2277 } else if (argc && !strncmp(argv[0], "rep", 3)) 2278 return __cmd_report(argc, argv); 2279 2280 interval = stat_config.interval; 2281 timeout = stat_config.timeout; 2282 2283 /* 2284 * For record command the -o is already taken care of. 2285 */ 2286 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2287 output = NULL; 2288 2289 if (output_name && output_fd) { 2290 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2291 parse_options_usage(stat_usage, stat_options, "o", 1); 2292 parse_options_usage(NULL, stat_options, "log-fd", 0); 2293 goto out; 2294 } 2295 2296 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2297 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2298 goto out; 2299 } 2300 2301 if (stat_config.metric_only && stat_config.run_count > 1) { 2302 fprintf(stderr, "--metric-only is not supported with -r\n"); 2303 goto out; 2304 } 2305 2306 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2307 fprintf(stderr, "--table is only supported with -r\n"); 2308 parse_options_usage(stat_usage, stat_options, "r", 1); 2309 parse_options_usage(NULL, stat_options, "table", 0); 2310 goto out; 2311 } 2312 2313 if (output_fd < 0) { 2314 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2315 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2316 goto out; 2317 } 2318 2319 if (!output && !stat_config.quiet) { 2320 struct timespec tm; 2321 mode = append_file ? "a" : "w"; 2322 2323 output = fopen(output_name, mode); 2324 if (!output) { 2325 perror("failed to create output file"); 2326 return -1; 2327 } 2328 clock_gettime(CLOCK_REALTIME, &tm); 2329 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2330 } else if (output_fd > 0) { 2331 mode = append_file ? "a" : "w"; 2332 output = fdopen(output_fd, mode); 2333 if (!output) { 2334 perror("Failed opening logfd"); 2335 return -errno; 2336 } 2337 } 2338 2339 stat_config.output = output; 2340 2341 /* 2342 * let the spreadsheet do the pretty-printing 2343 */ 2344 if (stat_config.csv_output) { 2345 /* User explicitly passed -B? */ 2346 if (big_num_opt == 1) { 2347 fprintf(stderr, "-B option not supported with -x\n"); 2348 parse_options_usage(stat_usage, stat_options, "B", 1); 2349 parse_options_usage(NULL, stat_options, "x", 1); 2350 goto out; 2351 } else /* Nope, so disable big number formatting */ 2352 stat_config.big_num = false; 2353 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2354 stat_config.big_num = false; 2355 2356 err = target__validate(&target); 2357 if (err) { 2358 target__strerror(&target, err, errbuf, BUFSIZ); 2359 pr_warning("%s\n", errbuf); 2360 } 2361 2362 setup_system_wide(argc); 2363 2364 /* 2365 * Display user/system times only for single 2366 * run and when there's specified tracee. 2367 */ 2368 if ((stat_config.run_count == 1) && target__none(&target)) 2369 stat_config.ru_display = true; 2370 2371 if (stat_config.run_count < 0) { 2372 pr_err("Run count must be a positive number\n"); 2373 parse_options_usage(stat_usage, stat_options, "r", 1); 2374 goto out; 2375 } else if (stat_config.run_count == 0) { 2376 forever = true; 2377 stat_config.run_count = 1; 2378 } 2379 2380 if (stat_config.walltime_run_table) { 2381 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2382 if (!stat_config.walltime_run) { 2383 pr_err("failed to setup -r option"); 2384 goto out; 2385 } 2386 } 2387 2388 if ((stat_config.aggr_mode == AGGR_THREAD) && 2389 !target__has_task(&target)) { 2390 if (!target.system_wide || target.cpu_list) { 2391 fprintf(stderr, "The --per-thread option is only " 2392 "available when monitoring via -p -t -a " 2393 "options or only --per-thread.\n"); 2394 parse_options_usage(NULL, stat_options, "p", 1); 2395 parse_options_usage(NULL, stat_options, "t", 1); 2396 goto out; 2397 } 2398 } 2399 2400 /* 2401 * no_aggr, cgroup are for system-wide only 2402 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2403 */ 2404 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2405 stat_config.aggr_mode != AGGR_THREAD) || 2406 (nr_cgroups || stat_config.cgroup_list)) && 2407 !target__has_cpu(&target)) { 2408 fprintf(stderr, "both cgroup and no-aggregation " 2409 "modes only available in system-wide mode\n"); 2410 2411 parse_options_usage(stat_usage, stat_options, "G", 1); 2412 parse_options_usage(NULL, stat_options, "A", 1); 2413 parse_options_usage(NULL, stat_options, "a", 1); 2414 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2415 goto out; 2416 } 2417 2418 if (stat_config.iostat_run) { 2419 status = iostat_prepare(evsel_list, &stat_config); 2420 if (status) 2421 goto out; 2422 if (iostat_mode == IOSTAT_LIST) { 2423 iostat_list(evsel_list, &stat_config); 2424 goto out; 2425 } else if (verbose) 2426 iostat_list(evsel_list, &stat_config); 2427 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2428 target.system_wide = true; 2429 } 2430 2431 if (add_default_attributes()) 2432 goto out; 2433 2434 if (stat_config.cgroup_list) { 2435 if (nr_cgroups > 0) { 2436 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2437 parse_options_usage(stat_usage, stat_options, "G", 1); 2438 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2439 goto out; 2440 } 2441 2442 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2443 &stat_config.metric_events, true) < 0) { 2444 parse_options_usage(stat_usage, stat_options, 2445 "for-each-cgroup", 0); 2446 goto out; 2447 } 2448 } 2449 2450 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2451 target.per_thread = true; 2452 2453 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2454 pr_err("failed to use cpu list %s\n", target.cpu_list); 2455 goto out; 2456 } 2457 2458 target.hybrid = perf_pmu__has_hybrid(); 2459 if (evlist__create_maps(evsel_list, &target) < 0) { 2460 if (target__has_task(&target)) { 2461 pr_err("Problems finding threads of monitor\n"); 2462 parse_options_usage(stat_usage, stat_options, "p", 1); 2463 parse_options_usage(NULL, stat_options, "t", 1); 2464 } else if (target__has_cpu(&target)) { 2465 perror("failed to parse CPUs map"); 2466 parse_options_usage(stat_usage, stat_options, "C", 1); 2467 parse_options_usage(NULL, stat_options, "a", 1); 2468 } 2469 goto out; 2470 } 2471 2472 evlist__check_cpu_maps(evsel_list); 2473 2474 /* 2475 * Initialize thread_map with comm names, 2476 * so we could print it out on output. 2477 */ 2478 if (stat_config.aggr_mode == AGGR_THREAD) { 2479 thread_map__read_comms(evsel_list->core.threads); 2480 if (target.system_wide) { 2481 if (runtime_stat_new(&stat_config, 2482 perf_thread_map__nr(evsel_list->core.threads))) { 2483 goto out; 2484 } 2485 } 2486 } 2487 2488 if (stat_config.aggr_mode == AGGR_NODE) 2489 cpu__setup_cpunode_map(); 2490 2491 if (stat_config.times && interval) 2492 interval_count = true; 2493 else if (stat_config.times && !interval) { 2494 pr_err("interval-count option should be used together with " 2495 "interval-print.\n"); 2496 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2497 parse_options_usage(stat_usage, stat_options, "I", 1); 2498 goto out; 2499 } 2500 2501 if (timeout && timeout < 100) { 2502 if (timeout < 10) { 2503 pr_err("timeout must be >= 10ms.\n"); 2504 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2505 goto out; 2506 } else 2507 pr_warning("timeout < 100ms. " 2508 "The overhead percentage could be high in some cases. " 2509 "Please proceed with caution.\n"); 2510 } 2511 if (timeout && interval) { 2512 pr_err("timeout option is not supported with interval-print.\n"); 2513 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2514 parse_options_usage(stat_usage, stat_options, "I", 1); 2515 goto out; 2516 } 2517 2518 if (evlist__alloc_stats(evsel_list, interval)) 2519 goto out; 2520 2521 if (perf_stat_init_aggr_mode()) 2522 goto out; 2523 2524 /* 2525 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2526 * while avoiding that older tools show confusing messages. 2527 * 2528 * However for pipe sessions we need to keep it zero, 2529 * because script's perf_evsel__check_attr is triggered 2530 * by attr->sample_type != 0, and we can't run it on 2531 * stat sessions. 2532 */ 2533 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2534 2535 /* 2536 * We dont want to block the signals - that would cause 2537 * child tasks to inherit that and Ctrl-C would not work. 2538 * What we want is for Ctrl-C to work in the exec()-ed 2539 * task, but being ignored by perf stat itself: 2540 */ 2541 atexit(sig_atexit); 2542 if (!forever) 2543 signal(SIGINT, skip_signal); 2544 signal(SIGCHLD, skip_signal); 2545 signal(SIGALRM, skip_signal); 2546 signal(SIGABRT, skip_signal); 2547 2548 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2549 goto out; 2550 2551 status = 0; 2552 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2553 if (stat_config.run_count != 1 && verbose > 0) 2554 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2555 run_idx + 1); 2556 2557 if (run_idx != 0) 2558 evlist__reset_prev_raw_counts(evsel_list); 2559 2560 status = run_perf_stat(argc, argv, run_idx); 2561 if (forever && status != -1 && !interval) { 2562 print_counters(NULL, argc, argv); 2563 perf_stat__reset_stats(); 2564 } 2565 } 2566 2567 if (!forever && status != -1 && (!interval || stat_config.summary)) 2568 print_counters(NULL, argc, argv); 2569 2570 evlist__finalize_ctlfd(evsel_list); 2571 2572 if (STAT_RECORD) { 2573 /* 2574 * We synthesize the kernel mmap record just so that older tools 2575 * don't emit warnings about not being able to resolve symbols 2576 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2577 * a saner message about no samples being in the perf.data file. 2578 * 2579 * This also serves to suppress a warning about f_header.data.size == 0 2580 * in header.c at the moment 'perf stat record' gets introduced, which 2581 * is not really needed once we start adding the stat specific PERF_RECORD_ 2582 * records, but the need to suppress the kptr_restrict messages in older 2583 * tools remain -acme 2584 */ 2585 int fd = perf_data__fd(&perf_stat.data); 2586 2587 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2588 process_synthesized_event, 2589 &perf_stat.session->machines.host); 2590 if (err) { 2591 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2592 "older tools may produce warnings about this file\n."); 2593 } 2594 2595 if (!interval) { 2596 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2597 pr_err("failed to write stat round event\n"); 2598 } 2599 2600 if (!perf_stat.data.is_pipe) { 2601 perf_stat.session->header.data_size += perf_stat.bytes_written; 2602 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2603 } 2604 2605 evlist__close(evsel_list); 2606 perf_session__delete(perf_stat.session); 2607 } 2608 2609 perf_stat__exit_aggr_mode(); 2610 evlist__free_stats(evsel_list); 2611 out: 2612 if (stat_config.iostat_run) 2613 iostat_release(evsel_list); 2614 2615 zfree(&stat_config.walltime_run); 2616 2617 if (smi_cost && smi_reset) 2618 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2619 2620 evlist__delete(evsel_list); 2621 2622 metricgroup__rblist_exit(&stat_config.metric_events); 2623 runtime_stat_delete(&stat_config); 2624 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2625 2626 return status; 2627 } 2628