1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evlist-hybrid.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread_map.h" 59 #include "util/counts.h" 60 #include "util/topdown.h" 61 #include "util/session.h" 62 #include "util/tool.h" 63 #include "util/string2.h" 64 #include "util/metricgroup.h" 65 #include "util/synthetic-events.h" 66 #include "util/target.h" 67 #include "util/time-utils.h" 68 #include "util/top.h" 69 #include "util/affinity.h" 70 #include "util/pfm.h" 71 #include "util/bpf_counter.h" 72 #include "util/iostat.h" 73 #include "util/pmu-hybrid.h" 74 #include "asm/bug.h" 75 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <api/fs/fs.h> 79 #include <errno.h> 80 #include <signal.h> 81 #include <stdlib.h> 82 #include <sys/prctl.h> 83 #include <inttypes.h> 84 #include <locale.h> 85 #include <math.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/wait.h> 89 #include <unistd.h> 90 #include <sys/time.h> 91 #include <sys/resource.h> 92 #include <linux/err.h> 93 94 #include <linux/ctype.h> 95 #include <perf/evlist.h> 96 #include <internal/threadmap.h> 97 98 #define DEFAULT_SEPARATOR " " 99 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 100 101 static void print_counters(struct timespec *ts, int argc, const char **argv); 102 103 /* Default events used for perf stat -T */ 104 static const char *transaction_attrs = { 105 "task-clock," 106 "{" 107 "instructions," 108 "cycles," 109 "cpu/cycles-t/," 110 "cpu/tx-start/," 111 "cpu/el-start/," 112 "cpu/cycles-ct/" 113 "}" 114 }; 115 116 /* More limited version when the CPU does not have all events. */ 117 static const char * transaction_limited_attrs = { 118 "task-clock," 119 "{" 120 "instructions," 121 "cycles," 122 "cpu/cycles-t/," 123 "cpu/tx-start/" 124 "}" 125 }; 126 127 static const char * topdown_attrs[] = { 128 "topdown-total-slots", 129 "topdown-slots-retired", 130 "topdown-recovery-bubbles", 131 "topdown-fetch-bubbles", 132 "topdown-slots-issued", 133 NULL, 134 }; 135 136 static const char *topdown_metric_attrs[] = { 137 "slots", 138 "topdown-retiring", 139 "topdown-bad-spec", 140 "topdown-fe-bound", 141 "topdown-be-bound", 142 NULL, 143 }; 144 145 static const char *topdown_metric_L2_attrs[] = { 146 "slots", 147 "topdown-retiring", 148 "topdown-bad-spec", 149 "topdown-fe-bound", 150 "topdown-be-bound", 151 "topdown-heavy-ops", 152 "topdown-br-mispredict", 153 "topdown-fetch-lat", 154 "topdown-mem-bound", 155 NULL, 156 }; 157 158 #define TOPDOWN_MAX_LEVEL 2 159 160 static const char *smi_cost_attrs = { 161 "{" 162 "msr/aperf/," 163 "msr/smi/," 164 "cycles" 165 "}" 166 }; 167 168 static struct evlist *evsel_list; 169 static bool all_counters_use_bpf = true; 170 171 static struct target target = { 172 .uid = UINT_MAX, 173 }; 174 175 #define METRIC_ONLY_LEN 20 176 177 static volatile sig_atomic_t child_pid = -1; 178 static int detailed_run = 0; 179 static bool transaction_run; 180 static bool topdown_run = false; 181 static bool smi_cost = false; 182 static bool smi_reset = false; 183 static int big_num_opt = -1; 184 static const char *pre_cmd = NULL; 185 static const char *post_cmd = NULL; 186 static bool sync_run = false; 187 static bool forever = false; 188 static bool force_metric_only = false; 189 static struct timespec ref_time; 190 static bool append_file; 191 static bool interval_count; 192 static const char *output_name; 193 static int output_fd; 194 static char *metrics; 195 196 struct perf_stat { 197 bool record; 198 struct perf_data data; 199 struct perf_session *session; 200 u64 bytes_written; 201 struct perf_tool tool; 202 bool maps_allocated; 203 struct perf_cpu_map *cpus; 204 struct perf_thread_map *threads; 205 enum aggr_mode aggr_mode; 206 }; 207 208 static struct perf_stat perf_stat; 209 #define STAT_RECORD perf_stat.record 210 211 static volatile sig_atomic_t done = 0; 212 213 static struct perf_stat_config stat_config = { 214 .aggr_mode = AGGR_GLOBAL, 215 .scale = true, 216 .unit_width = 4, /* strlen("unit") */ 217 .run_count = 1, 218 .metric_only_len = METRIC_ONLY_LEN, 219 .walltime_nsecs_stats = &walltime_nsecs_stats, 220 .ru_stats = &ru_stats, 221 .big_num = true, 222 .ctl_fd = -1, 223 .ctl_fd_ack = -1, 224 .iostat_run = false, 225 }; 226 227 static bool cpus_map_matched(struct evsel *a, struct evsel *b) 228 { 229 if (!a->core.cpus && !b->core.cpus) 230 return true; 231 232 if (!a->core.cpus || !b->core.cpus) 233 return false; 234 235 if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) 236 return false; 237 238 for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { 239 if (perf_cpu_map__cpu(a->core.cpus, i).cpu != 240 perf_cpu_map__cpu(b->core.cpus, i).cpu) 241 return false; 242 } 243 244 return true; 245 } 246 247 static void evlist__check_cpu_maps(struct evlist *evlist) 248 { 249 struct evsel *evsel, *pos, *leader; 250 char buf[1024]; 251 252 if (evlist__has_hybrid(evlist)) 253 evlist__warn_hybrid_group(evlist); 254 255 evlist__for_each_entry(evlist, evsel) { 256 leader = evsel__leader(evsel); 257 258 /* Check that leader matches cpus with each member. */ 259 if (leader == evsel) 260 continue; 261 if (cpus_map_matched(leader, evsel)) 262 continue; 263 264 /* If there's mismatch disable the group and warn user. */ 265 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); 266 evsel__group_desc(leader, buf, sizeof(buf)); 267 pr_warning(" %s\n", buf); 268 269 if (verbose > 0) { 270 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); 271 pr_warning(" %s: %s\n", leader->name, buf); 272 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); 273 pr_warning(" %s: %s\n", evsel->name, buf); 274 } 275 276 for_each_group_evsel(pos, leader) 277 evsel__remove_from_group(pos, leader); 278 } 279 } 280 281 static inline void diff_timespec(struct timespec *r, struct timespec *a, 282 struct timespec *b) 283 { 284 r->tv_sec = a->tv_sec - b->tv_sec; 285 if (a->tv_nsec < b->tv_nsec) { 286 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 287 r->tv_sec--; 288 } else { 289 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 290 } 291 } 292 293 static void perf_stat__reset_stats(void) 294 { 295 evlist__reset_stats(evsel_list); 296 perf_stat__reset_shadow_stats(); 297 } 298 299 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 300 union perf_event *event, 301 struct perf_sample *sample __maybe_unused, 302 struct machine *machine __maybe_unused) 303 { 304 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 305 pr_err("failed to write perf data, error: %m\n"); 306 return -1; 307 } 308 309 perf_stat.bytes_written += event->header.size; 310 return 0; 311 } 312 313 static int write_stat_round_event(u64 tm, u64 type) 314 { 315 return perf_event__synthesize_stat_round(NULL, tm, type, 316 process_synthesized_event, 317 NULL); 318 } 319 320 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 321 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 322 323 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 324 325 static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, 326 struct perf_counts_values *count) 327 { 328 struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); 329 struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); 330 331 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 332 process_synthesized_event, NULL); 333 } 334 335 static int read_single_counter(struct evsel *counter, int cpu_map_idx, 336 int thread, struct timespec *rs) 337 { 338 switch(counter->tool_event) { 339 case PERF_TOOL_DURATION_TIME: { 340 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 341 struct perf_counts_values *count = 342 perf_counts(counter->counts, cpu_map_idx, thread); 343 count->ena = count->run = val; 344 count->val = val; 345 return 0; 346 } 347 case PERF_TOOL_USER_TIME: 348 case PERF_TOOL_SYSTEM_TIME: { 349 u64 val; 350 struct perf_counts_values *count = 351 perf_counts(counter->counts, cpu_map_idx, thread); 352 if (counter->tool_event == PERF_TOOL_USER_TIME) 353 val = ru_stats.ru_utime_usec_stat.mean; 354 else 355 val = ru_stats.ru_stime_usec_stat.mean; 356 count->ena = count->run = val; 357 count->val = val; 358 return 0; 359 } 360 default: 361 case PERF_TOOL_NONE: 362 return evsel__read_counter(counter, cpu_map_idx, thread); 363 case PERF_TOOL_MAX: 364 /* This should never be reached */ 365 return 0; 366 } 367 } 368 369 /* 370 * Read out the results of a single counter: 371 * do not aggregate counts across CPUs in system-wide mode 372 */ 373 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) 374 { 375 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 376 int thread; 377 378 if (!counter->supported) 379 return -ENOENT; 380 381 for (thread = 0; thread < nthreads; thread++) { 382 struct perf_counts_values *count; 383 384 count = perf_counts(counter->counts, cpu_map_idx, thread); 385 386 /* 387 * The leader's group read loads data into its group members 388 * (via evsel__read_counter()) and sets their count->loaded. 389 */ 390 if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && 391 read_single_counter(counter, cpu_map_idx, thread, rs)) { 392 counter->counts->scaled = -1; 393 perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; 394 perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; 395 return -1; 396 } 397 398 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); 399 400 if (STAT_RECORD) { 401 if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { 402 pr_err("failed to write stat event\n"); 403 return -1; 404 } 405 } 406 407 if (verbose > 1) { 408 fprintf(stat_config.output, 409 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 410 evsel__name(counter), 411 perf_cpu_map__cpu(evsel__cpus(counter), 412 cpu_map_idx).cpu, 413 count->val, count->ena, count->run); 414 } 415 } 416 417 return 0; 418 } 419 420 static int read_affinity_counters(struct timespec *rs) 421 { 422 struct evlist_cpu_iterator evlist_cpu_itr; 423 struct affinity saved_affinity, *affinity; 424 425 if (all_counters_use_bpf) 426 return 0; 427 428 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 429 affinity = NULL; 430 else if (affinity__setup(&saved_affinity) < 0) 431 return -1; 432 else 433 affinity = &saved_affinity; 434 435 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 436 struct evsel *counter = evlist_cpu_itr.evsel; 437 438 if (evsel__is_bpf(counter)) 439 continue; 440 441 if (!counter->err) { 442 counter->err = read_counter_cpu(counter, rs, 443 evlist_cpu_itr.cpu_map_idx); 444 } 445 } 446 if (affinity) 447 affinity__cleanup(&saved_affinity); 448 449 return 0; 450 } 451 452 static int read_bpf_map_counters(void) 453 { 454 struct evsel *counter; 455 int err; 456 457 evlist__for_each_entry(evsel_list, counter) { 458 if (!evsel__is_bpf(counter)) 459 continue; 460 461 err = bpf_counter__read(counter); 462 if (err) 463 return err; 464 } 465 return 0; 466 } 467 468 static int read_counters(struct timespec *rs) 469 { 470 if (!stat_config.stop_read_counter) { 471 if (read_bpf_map_counters() || 472 read_affinity_counters(rs)) 473 return -1; 474 } 475 return 0; 476 } 477 478 static void process_counters(void) 479 { 480 struct evsel *counter; 481 482 evlist__for_each_entry(evsel_list, counter) { 483 if (counter->err) 484 pr_debug("failed to read counter %s\n", counter->name); 485 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 486 pr_warning("failed to process counter %s\n", counter->name); 487 counter->err = 0; 488 } 489 490 perf_stat_merge_counters(&stat_config, evsel_list); 491 perf_stat_process_percore(&stat_config, evsel_list); 492 perf_stat_process_shadow_stats(&stat_config, evsel_list); 493 } 494 495 static void process_interval(void) 496 { 497 struct timespec ts, rs; 498 499 clock_gettime(CLOCK_MONOTONIC, &ts); 500 diff_timespec(&rs, &ts, &ref_time); 501 502 perf_stat__reset_shadow_per_stat(&rt_stat); 503 evlist__reset_aggr_stats(evsel_list); 504 505 if (read_counters(&rs) == 0) 506 process_counters(); 507 508 if (STAT_RECORD) { 509 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 510 pr_err("failed to write stat round event\n"); 511 } 512 513 init_stats(&walltime_nsecs_stats); 514 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); 515 print_counters(&rs, 0, NULL); 516 } 517 518 static bool handle_interval(unsigned int interval, int *times) 519 { 520 if (interval) { 521 process_interval(); 522 if (interval_count && !(--(*times))) 523 return true; 524 } 525 return false; 526 } 527 528 static int enable_counters(void) 529 { 530 struct evsel *evsel; 531 int err; 532 533 evlist__for_each_entry(evsel_list, evsel) { 534 if (!evsel__is_bpf(evsel)) 535 continue; 536 537 err = bpf_counter__enable(evsel); 538 if (err) 539 return err; 540 } 541 542 if (!target__enable_on_exec(&target)) { 543 if (!all_counters_use_bpf) 544 evlist__enable(evsel_list); 545 } 546 return 0; 547 } 548 549 static void disable_counters(void) 550 { 551 struct evsel *counter; 552 553 /* 554 * If we don't have tracee (attaching to task or cpu), counters may 555 * still be running. To get accurate group ratios, we must stop groups 556 * from counting before reading their constituent counters. 557 */ 558 if (!target__none(&target)) { 559 evlist__for_each_entry(evsel_list, counter) 560 bpf_counter__disable(counter); 561 if (!all_counters_use_bpf) 562 evlist__disable(evsel_list); 563 } 564 } 565 566 static volatile sig_atomic_t workload_exec_errno; 567 568 /* 569 * evlist__prepare_workload will send a SIGUSR1 570 * if the fork fails, since we asked by setting its 571 * want_signal to true. 572 */ 573 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 574 void *ucontext __maybe_unused) 575 { 576 workload_exec_errno = info->si_value.sival_int; 577 } 578 579 static bool evsel__should_store_id(struct evsel *counter) 580 { 581 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 582 } 583 584 static bool is_target_alive(struct target *_target, 585 struct perf_thread_map *threads) 586 { 587 struct stat st; 588 int i; 589 590 if (!target__has_task(_target)) 591 return true; 592 593 for (i = 0; i < threads->nr; i++) { 594 char path[PATH_MAX]; 595 596 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 597 threads->map[i].pid); 598 599 if (!stat(path, &st)) 600 return true; 601 } 602 603 return false; 604 } 605 606 static void process_evlist(struct evlist *evlist, unsigned int interval) 607 { 608 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; 609 610 if (evlist__ctlfd_process(evlist, &cmd) > 0) { 611 switch (cmd) { 612 case EVLIST_CTL_CMD_ENABLE: 613 __fallthrough; 614 case EVLIST_CTL_CMD_DISABLE: 615 if (interval) 616 process_interval(); 617 break; 618 case EVLIST_CTL_CMD_SNAPSHOT: 619 case EVLIST_CTL_CMD_ACK: 620 case EVLIST_CTL_CMD_UNSUPPORTED: 621 case EVLIST_CTL_CMD_EVLIST: 622 case EVLIST_CTL_CMD_STOP: 623 case EVLIST_CTL_CMD_PING: 624 default: 625 break; 626 } 627 } 628 } 629 630 static void compute_tts(struct timespec *time_start, struct timespec *time_stop, 631 int *time_to_sleep) 632 { 633 int tts = *time_to_sleep; 634 struct timespec time_diff; 635 636 diff_timespec(&time_diff, time_stop, time_start); 637 638 tts -= time_diff.tv_sec * MSEC_PER_SEC + 639 time_diff.tv_nsec / NSEC_PER_MSEC; 640 641 if (tts < 0) 642 tts = 0; 643 644 *time_to_sleep = tts; 645 } 646 647 static int dispatch_events(bool forks, int timeout, int interval, int *times) 648 { 649 int child_exited = 0, status = 0; 650 int time_to_sleep, sleep_time; 651 struct timespec time_start, time_stop; 652 653 if (interval) 654 sleep_time = interval; 655 else if (timeout) 656 sleep_time = timeout; 657 else 658 sleep_time = 1000; 659 660 time_to_sleep = sleep_time; 661 662 while (!done) { 663 if (forks) 664 child_exited = waitpid(child_pid, &status, WNOHANG); 665 else 666 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; 667 668 if (child_exited) 669 break; 670 671 clock_gettime(CLOCK_MONOTONIC, &time_start); 672 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ 673 if (timeout || handle_interval(interval, times)) 674 break; 675 time_to_sleep = sleep_time; 676 } else { /* fd revent */ 677 process_evlist(evsel_list, interval); 678 clock_gettime(CLOCK_MONOTONIC, &time_stop); 679 compute_tts(&time_start, &time_stop, &time_to_sleep); 680 } 681 } 682 683 return status; 684 } 685 686 enum counter_recovery { 687 COUNTER_SKIP, 688 COUNTER_RETRY, 689 COUNTER_FATAL, 690 }; 691 692 static enum counter_recovery stat_handle_error(struct evsel *counter) 693 { 694 char msg[BUFSIZ]; 695 /* 696 * PPC returns ENXIO for HW counters until 2.6.37 697 * (behavior changed with commit b0a873e). 698 */ 699 if (errno == EINVAL || errno == ENOSYS || 700 errno == ENOENT || errno == EOPNOTSUPP || 701 errno == ENXIO) { 702 if (verbose > 0) 703 ui__warning("%s event is not supported by the kernel.\n", 704 evsel__name(counter)); 705 counter->supported = false; 706 /* 707 * errored is a sticky flag that means one of the counter's 708 * cpu event had a problem and needs to be reexamined. 709 */ 710 counter->errored = true; 711 712 if ((evsel__leader(counter) != counter) || 713 !(counter->core.leader->nr_members > 1)) 714 return COUNTER_SKIP; 715 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 716 if (verbose > 0) 717 ui__warning("%s\n", msg); 718 return COUNTER_RETRY; 719 } else if (target__has_per_thread(&target) && 720 evsel_list->core.threads && 721 evsel_list->core.threads->err_thread != -1) { 722 /* 723 * For global --per-thread case, skip current 724 * error thread. 725 */ 726 if (!thread_map__remove(evsel_list->core.threads, 727 evsel_list->core.threads->err_thread)) { 728 evsel_list->core.threads->err_thread = -1; 729 return COUNTER_RETRY; 730 } 731 } 732 733 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 734 ui__error("%s\n", msg); 735 736 if (child_pid != -1) 737 kill(child_pid, SIGTERM); 738 return COUNTER_FATAL; 739 } 740 741 static int __run_perf_stat(int argc, const char **argv, int run_idx) 742 { 743 int interval = stat_config.interval; 744 int times = stat_config.times; 745 int timeout = stat_config.timeout; 746 char msg[BUFSIZ]; 747 unsigned long long t0, t1; 748 struct evsel *counter; 749 size_t l; 750 int status = 0; 751 const bool forks = (argc > 0); 752 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 753 struct evlist_cpu_iterator evlist_cpu_itr; 754 struct affinity saved_affinity, *affinity = NULL; 755 int err; 756 bool second_pass = false; 757 758 if (forks) { 759 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { 760 perror("failed to prepare workload"); 761 return -1; 762 } 763 child_pid = evsel_list->workload.pid; 764 } 765 766 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { 767 if (affinity__setup(&saved_affinity) < 0) 768 return -1; 769 affinity = &saved_affinity; 770 } 771 772 evlist__for_each_entry(evsel_list, counter) { 773 counter->reset_group = false; 774 if (bpf_counter__load(counter, &target)) 775 return -1; 776 if (!evsel__is_bpf(counter)) 777 all_counters_use_bpf = false; 778 } 779 780 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 781 counter = evlist_cpu_itr.evsel; 782 783 /* 784 * bperf calls evsel__open_per_cpu() in bperf__load(), so 785 * no need to call it again here. 786 */ 787 if (target.use_bpf) 788 break; 789 790 if (counter->reset_group || counter->errored) 791 continue; 792 if (evsel__is_bpf(counter)) 793 continue; 794 try_again: 795 if (create_perf_stat_counter(counter, &stat_config, &target, 796 evlist_cpu_itr.cpu_map_idx) < 0) { 797 798 /* 799 * Weak group failed. We cannot just undo this here 800 * because earlier CPUs might be in group mode, and the kernel 801 * doesn't support mixing group and non group reads. Defer 802 * it to later. 803 * Don't close here because we're in the wrong affinity. 804 */ 805 if ((errno == EINVAL || errno == EBADF) && 806 evsel__leader(counter) != counter && 807 counter->weak_group) { 808 evlist__reset_weak_group(evsel_list, counter, false); 809 assert(counter->reset_group); 810 second_pass = true; 811 continue; 812 } 813 814 switch (stat_handle_error(counter)) { 815 case COUNTER_FATAL: 816 return -1; 817 case COUNTER_RETRY: 818 goto try_again; 819 case COUNTER_SKIP: 820 continue; 821 default: 822 break; 823 } 824 825 } 826 counter->supported = true; 827 } 828 829 if (second_pass) { 830 /* 831 * Now redo all the weak group after closing them, 832 * and also close errored counters. 833 */ 834 835 /* First close errored or weak retry */ 836 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 837 counter = evlist_cpu_itr.evsel; 838 839 if (!counter->reset_group && !counter->errored) 840 continue; 841 842 perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); 843 } 844 /* Now reopen weak */ 845 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { 846 counter = evlist_cpu_itr.evsel; 847 848 if (!counter->reset_group) 849 continue; 850 try_again_reset: 851 pr_debug2("reopening weak %s\n", evsel__name(counter)); 852 if (create_perf_stat_counter(counter, &stat_config, &target, 853 evlist_cpu_itr.cpu_map_idx) < 0) { 854 855 switch (stat_handle_error(counter)) { 856 case COUNTER_FATAL: 857 return -1; 858 case COUNTER_RETRY: 859 goto try_again_reset; 860 case COUNTER_SKIP: 861 continue; 862 default: 863 break; 864 } 865 } 866 counter->supported = true; 867 } 868 } 869 affinity__cleanup(affinity); 870 871 evlist__for_each_entry(evsel_list, counter) { 872 if (!counter->supported) { 873 perf_evsel__free_fd(&counter->core); 874 continue; 875 } 876 877 l = strlen(counter->unit); 878 if (l > stat_config.unit_width) 879 stat_config.unit_width = l; 880 881 if (evsel__should_store_id(counter) && 882 evsel__store_ids(counter, evsel_list)) 883 return -1; 884 } 885 886 if (evlist__apply_filters(evsel_list, &counter)) { 887 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 888 counter->filter, evsel__name(counter), errno, 889 str_error_r(errno, msg, sizeof(msg))); 890 return -1; 891 } 892 893 if (STAT_RECORD) { 894 int fd = perf_data__fd(&perf_stat.data); 895 896 if (is_pipe) { 897 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 898 } else { 899 err = perf_session__write_header(perf_stat.session, evsel_list, 900 fd, false); 901 } 902 903 if (err < 0) 904 return err; 905 906 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 907 process_synthesized_event, is_pipe); 908 if (err < 0) 909 return err; 910 } 911 912 if (target.initial_delay) { 913 pr_info(EVLIST_DISABLED_MSG); 914 } else { 915 err = enable_counters(); 916 if (err) 917 return -1; 918 } 919 920 /* Exec the command, if any */ 921 if (forks) 922 evlist__start_workload(evsel_list); 923 924 if (target.initial_delay > 0) { 925 usleep(target.initial_delay * USEC_PER_MSEC); 926 err = enable_counters(); 927 if (err) 928 return -1; 929 930 pr_info(EVLIST_ENABLED_MSG); 931 } 932 933 t0 = rdclock(); 934 clock_gettime(CLOCK_MONOTONIC, &ref_time); 935 936 if (forks) { 937 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) 938 status = dispatch_events(forks, timeout, interval, ×); 939 if (child_pid != -1) { 940 if (timeout) 941 kill(child_pid, SIGTERM); 942 wait4(child_pid, &status, 0, &stat_config.ru_data); 943 } 944 945 if (workload_exec_errno) { 946 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 947 pr_err("Workload failed: %s\n", emsg); 948 return -1; 949 } 950 951 if (WIFSIGNALED(status)) 952 psignal(WTERMSIG(status), argv[0]); 953 } else { 954 status = dispatch_events(forks, timeout, interval, ×); 955 } 956 957 disable_counters(); 958 959 t1 = rdclock(); 960 961 if (stat_config.walltime_run_table) 962 stat_config.walltime_run[run_idx] = t1 - t0; 963 964 if (interval && stat_config.summary) { 965 stat_config.interval = 0; 966 stat_config.stop_read_counter = true; 967 init_stats(&walltime_nsecs_stats); 968 update_stats(&walltime_nsecs_stats, t1 - t0); 969 970 evlist__copy_prev_raw_counts(evsel_list); 971 evlist__reset_prev_raw_counts(evsel_list); 972 evlist__reset_aggr_stats(evsel_list); 973 perf_stat__reset_shadow_per_stat(&rt_stat); 974 } else { 975 update_stats(&walltime_nsecs_stats, t1 - t0); 976 update_rusage_stats(&ru_stats, &stat_config.ru_data); 977 } 978 979 /* 980 * Closing a group leader splits the group, and as we only disable 981 * group leaders, results in remaining events becoming enabled. To 982 * avoid arbitrary skew, we must read all counters before closing any 983 * group leaders. 984 */ 985 if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0) 986 process_counters(); 987 988 /* 989 * We need to keep evsel_list alive, because it's processed 990 * later the evsel_list will be closed after. 991 */ 992 if (!STAT_RECORD) 993 evlist__close(evsel_list); 994 995 return WEXITSTATUS(status); 996 } 997 998 static int run_perf_stat(int argc, const char **argv, int run_idx) 999 { 1000 int ret; 1001 1002 if (pre_cmd) { 1003 ret = system(pre_cmd); 1004 if (ret) 1005 return ret; 1006 } 1007 1008 if (sync_run) 1009 sync(); 1010 1011 ret = __run_perf_stat(argc, argv, run_idx); 1012 if (ret) 1013 return ret; 1014 1015 if (post_cmd) { 1016 ret = system(post_cmd); 1017 if (ret) 1018 return ret; 1019 } 1020 1021 return ret; 1022 } 1023 1024 static void print_counters(struct timespec *ts, int argc, const char **argv) 1025 { 1026 /* Do not print anything if we record to the pipe. */ 1027 if (STAT_RECORD && perf_stat.data.is_pipe) 1028 return; 1029 if (quiet) 1030 return; 1031 1032 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); 1033 } 1034 1035 static volatile sig_atomic_t signr = -1; 1036 1037 static void skip_signal(int signo) 1038 { 1039 if ((child_pid == -1) || stat_config.interval) 1040 done = 1; 1041 1042 signr = signo; 1043 /* 1044 * render child_pid harmless 1045 * won't send SIGTERM to a random 1046 * process in case of race condition 1047 * and fast PID recycling 1048 */ 1049 child_pid = -1; 1050 } 1051 1052 static void sig_atexit(void) 1053 { 1054 sigset_t set, oset; 1055 1056 /* 1057 * avoid race condition with SIGCHLD handler 1058 * in skip_signal() which is modifying child_pid 1059 * goal is to avoid send SIGTERM to a random 1060 * process 1061 */ 1062 sigemptyset(&set); 1063 sigaddset(&set, SIGCHLD); 1064 sigprocmask(SIG_BLOCK, &set, &oset); 1065 1066 if (child_pid != -1) 1067 kill(child_pid, SIGTERM); 1068 1069 sigprocmask(SIG_SETMASK, &oset, NULL); 1070 1071 if (signr == -1) 1072 return; 1073 1074 signal(signr, SIG_DFL); 1075 kill(getpid(), signr); 1076 } 1077 1078 void perf_stat__set_big_num(int set) 1079 { 1080 stat_config.big_num = (set != 0); 1081 } 1082 1083 void perf_stat__set_no_csv_summary(int set) 1084 { 1085 stat_config.no_csv_summary = (set != 0); 1086 } 1087 1088 static int stat__set_big_num(const struct option *opt __maybe_unused, 1089 const char *s __maybe_unused, int unset) 1090 { 1091 big_num_opt = unset ? 0 : 1; 1092 perf_stat__set_big_num(!unset); 1093 return 0; 1094 } 1095 1096 static int enable_metric_only(const struct option *opt __maybe_unused, 1097 const char *s __maybe_unused, int unset) 1098 { 1099 force_metric_only = true; 1100 stat_config.metric_only = !unset; 1101 return 0; 1102 } 1103 1104 static int append_metric_groups(const struct option *opt __maybe_unused, 1105 const char *str, 1106 int unset __maybe_unused) 1107 { 1108 if (metrics) { 1109 char *tmp; 1110 1111 if (asprintf(&tmp, "%s,%s", metrics, str) < 0) 1112 return -ENOMEM; 1113 free(metrics); 1114 metrics = tmp; 1115 } else { 1116 metrics = strdup(str); 1117 if (!metrics) 1118 return -ENOMEM; 1119 } 1120 return 0; 1121 } 1122 1123 static int parse_control_option(const struct option *opt, 1124 const char *str, 1125 int unset __maybe_unused) 1126 { 1127 struct perf_stat_config *config = opt->value; 1128 1129 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); 1130 } 1131 1132 static int parse_stat_cgroups(const struct option *opt, 1133 const char *str, int unset) 1134 { 1135 if (stat_config.cgroup_list) { 1136 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 1137 return -1; 1138 } 1139 1140 return parse_cgroups(opt, str, unset); 1141 } 1142 1143 static int parse_hybrid_type(const struct option *opt, 1144 const char *str, 1145 int unset __maybe_unused) 1146 { 1147 struct evlist *evlist = *(struct evlist **)opt->value; 1148 1149 if (!list_empty(&evlist->core.entries)) { 1150 fprintf(stderr, "Must define cputype before events/metrics\n"); 1151 return -1; 1152 } 1153 1154 evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); 1155 if (!evlist->hybrid_pmu_name) { 1156 fprintf(stderr, "--cputype %s is not supported!\n", str); 1157 return -1; 1158 } 1159 1160 return 0; 1161 } 1162 1163 static struct option stat_options[] = { 1164 OPT_BOOLEAN('T', "transaction", &transaction_run, 1165 "hardware transaction statistics"), 1166 OPT_CALLBACK('e', "event", &evsel_list, "event", 1167 "event selector. use 'perf list' to list available events", 1168 parse_events_option), 1169 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1170 "event filter", parse_filter), 1171 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 1172 "child tasks do not inherit counters"), 1173 OPT_STRING('p', "pid", &target.pid, "pid", 1174 "stat events on existing process id"), 1175 OPT_STRING('t', "tid", &target.tid, "tid", 1176 "stat events on existing thread id"), 1177 #ifdef HAVE_BPF_SKEL 1178 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", 1179 "stat events on existing bpf program id"), 1180 OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, 1181 "use bpf program to count events"), 1182 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", 1183 "path to perf_event_attr map"), 1184 #endif 1185 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1186 "system-wide collection from all CPUs"), 1187 OPT_BOOLEAN(0, "scale", &stat_config.scale, 1188 "Use --no-scale to disable counter scaling for multiplexing"), 1189 OPT_INCR('v', "verbose", &verbose, 1190 "be more verbose (show counter open errors, etc)"), 1191 OPT_INTEGER('r', "repeat", &stat_config.run_count, 1192 "repeat command and print average + stddev (max: 100, forever: 0)"), 1193 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 1194 "display details about each run (only with -r option)"), 1195 OPT_BOOLEAN('n', "null", &stat_config.null_run, 1196 "null run - dont start any counters"), 1197 OPT_INCR('d', "detailed", &detailed_run, 1198 "detailed run - start a lot of events"), 1199 OPT_BOOLEAN('S', "sync", &sync_run, 1200 "call sync() before starting a run"), 1201 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1202 "print large numbers with thousands\' separators", 1203 stat__set_big_num), 1204 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1205 "list of cpus to monitor in system-wide"), 1206 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 1207 "disable CPU count aggregation", AGGR_NONE), 1208 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 1209 OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, 1210 "Merge identical named hybrid events"), 1211 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 1212 "print counts with custom separator"), 1213 OPT_BOOLEAN('j', "json-output", &stat_config.json_output, 1214 "print counts in JSON format"), 1215 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1216 "monitor event in cgroup name only", parse_stat_cgroups), 1217 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", 1218 "expand events for each cgroup"), 1219 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1220 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1221 OPT_INTEGER(0, "log-fd", &output_fd, 1222 "log output to fd, instead of stderr"), 1223 OPT_STRING(0, "pre", &pre_cmd, "command", 1224 "command to run prior to the measured command"), 1225 OPT_STRING(0, "post", &post_cmd, "command", 1226 "command to run after to the measured command"), 1227 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1228 "print counts at regular interval in ms " 1229 "(overhead is possible for values <= 100ms)"), 1230 OPT_INTEGER(0, "interval-count", &stat_config.times, 1231 "print counts for fixed number of times"), 1232 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 1233 "clear screen in between new interval"), 1234 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1235 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 1236 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1237 "aggregate counts per processor socket", AGGR_SOCKET), 1238 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 1239 "aggregate counts per processor die", AGGR_DIE), 1240 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1241 "aggregate counts per physical processor core", AGGR_CORE), 1242 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 1243 "aggregate counts per thread", AGGR_THREAD), 1244 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 1245 "aggregate counts per numa node", AGGR_NODE), 1246 OPT_INTEGER('D', "delay", &target.initial_delay, 1247 "ms to wait before starting measurement after program start (-1: start with events disabled)"), 1248 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 1249 "Only print computed metrics. No raw values", enable_metric_only), 1250 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, 1251 "don't group metric events, impacts multiplexing"), 1252 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, 1253 "don't try to share events between metrics in a group"), 1254 OPT_BOOLEAN(0, "topdown", &topdown_run, 1255 "measure top-down statistics"), 1256 OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, 1257 "Set the metrics level for the top-down statistics (0: max level)"), 1258 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1259 "measure SMI cost"), 1260 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 1261 "monitor specified metrics or metric groups (separated by ,)", 1262 append_metric_groups), 1263 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 1264 "Configure all used events to run in kernel space.", 1265 PARSE_OPT_EXCLUSIVE), 1266 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 1267 "Configure all used events to run in user space.", 1268 PARSE_OPT_EXCLUSIVE), 1269 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 1270 "Use with 'percore' event qualifier to show the event " 1271 "counts of one hardware thread by sum up total hardware " 1272 "threads of same physical core"), 1273 OPT_BOOLEAN(0, "summary", &stat_config.summary, 1274 "print summary for interval mode"), 1275 OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, 1276 "don't print 'summary' for CSV summary output"), 1277 OPT_BOOLEAN(0, "quiet", &quiet, 1278 "don't print any output, messages or warnings (useful with record)"), 1279 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", 1280 "Only enable events on applying cpu with this type " 1281 "for hybrid platform (e.g. core or atom)", 1282 parse_hybrid_type), 1283 #ifdef HAVE_LIBPFM 1284 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", 1285 "libpfm4 event selector. use 'perf list' to list available events", 1286 parse_libpfm_events_option), 1287 #endif 1288 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", 1289 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" 1290 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" 1291 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", 1292 parse_control_option), 1293 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", 1294 "measure I/O performance metrics provided by arch/platform", 1295 iostat_parse), 1296 OPT_END() 1297 }; 1298 1299 static const char *const aggr_mode__string[] = { 1300 [AGGR_CORE] = "core", 1301 [AGGR_DIE] = "die", 1302 [AGGR_GLOBAL] = "global", 1303 [AGGR_NODE] = "node", 1304 [AGGR_NONE] = "none", 1305 [AGGR_SOCKET] = "socket", 1306 [AGGR_THREAD] = "thread", 1307 [AGGR_UNSET] = "unset", 1308 }; 1309 1310 static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 1311 struct perf_cpu cpu) 1312 { 1313 return aggr_cpu_id__socket(cpu, /*data=*/NULL); 1314 } 1315 1316 static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 1317 struct perf_cpu cpu) 1318 { 1319 return aggr_cpu_id__die(cpu, /*data=*/NULL); 1320 } 1321 1322 static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 1323 struct perf_cpu cpu) 1324 { 1325 return aggr_cpu_id__core(cpu, /*data=*/NULL); 1326 } 1327 1328 static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 1329 struct perf_cpu cpu) 1330 { 1331 return aggr_cpu_id__node(cpu, /*data=*/NULL); 1332 } 1333 1334 static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused, 1335 struct perf_cpu cpu) 1336 { 1337 return aggr_cpu_id__global(cpu, /*data=*/NULL); 1338 } 1339 1340 static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused, 1341 struct perf_cpu cpu) 1342 { 1343 return aggr_cpu_id__cpu(cpu, /*data=*/NULL); 1344 } 1345 1346 static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, 1347 aggr_get_id_t get_id, struct perf_cpu cpu) 1348 { 1349 struct aggr_cpu_id id; 1350 1351 /* per-process mode - should use global aggr mode */ 1352 if (cpu.cpu == -1) 1353 return get_id(config, cpu); 1354 1355 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) 1356 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); 1357 1358 id = config->cpus_aggr_map->map[cpu.cpu]; 1359 return id; 1360 } 1361 1362 static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, 1363 struct perf_cpu cpu) 1364 { 1365 return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); 1366 } 1367 1368 static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, 1369 struct perf_cpu cpu) 1370 { 1371 return perf_stat__get_aggr(config, perf_stat__get_die, cpu); 1372 } 1373 1374 static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, 1375 struct perf_cpu cpu) 1376 { 1377 return perf_stat__get_aggr(config, perf_stat__get_core, cpu); 1378 } 1379 1380 static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, 1381 struct perf_cpu cpu) 1382 { 1383 return perf_stat__get_aggr(config, perf_stat__get_node, cpu); 1384 } 1385 1386 static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config, 1387 struct perf_cpu cpu) 1388 { 1389 return perf_stat__get_aggr(config, perf_stat__get_global, cpu); 1390 } 1391 1392 static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config, 1393 struct perf_cpu cpu) 1394 { 1395 return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu); 1396 } 1397 1398 static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) 1399 { 1400 switch (aggr_mode) { 1401 case AGGR_SOCKET: 1402 return aggr_cpu_id__socket; 1403 case AGGR_DIE: 1404 return aggr_cpu_id__die; 1405 case AGGR_CORE: 1406 return aggr_cpu_id__core; 1407 case AGGR_NODE: 1408 return aggr_cpu_id__node; 1409 case AGGR_NONE: 1410 return aggr_cpu_id__cpu; 1411 case AGGR_GLOBAL: 1412 return aggr_cpu_id__global; 1413 case AGGR_THREAD: 1414 case AGGR_UNSET: 1415 case AGGR_MAX: 1416 default: 1417 return NULL; 1418 } 1419 } 1420 1421 static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) 1422 { 1423 switch (aggr_mode) { 1424 case AGGR_SOCKET: 1425 return perf_stat__get_socket_cached; 1426 case AGGR_DIE: 1427 return perf_stat__get_die_cached; 1428 case AGGR_CORE: 1429 return perf_stat__get_core_cached; 1430 case AGGR_NODE: 1431 return perf_stat__get_node_cached; 1432 case AGGR_NONE: 1433 return perf_stat__get_cpu_cached; 1434 case AGGR_GLOBAL: 1435 return perf_stat__get_global_cached; 1436 case AGGR_THREAD: 1437 case AGGR_UNSET: 1438 case AGGR_MAX: 1439 default: 1440 return NULL; 1441 } 1442 } 1443 1444 static int perf_stat_init_aggr_mode(void) 1445 { 1446 int nr; 1447 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); 1448 1449 if (get_id) { 1450 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1451 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1452 get_id, /*data=*/NULL, needs_sort); 1453 if (!stat_config.aggr_map) { 1454 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1455 return -1; 1456 } 1457 stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); 1458 } 1459 1460 if (stat_config.aggr_mode == AGGR_THREAD) { 1461 nr = perf_thread_map__nr(evsel_list->core.threads); 1462 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1463 if (stat_config.aggr_map == NULL) 1464 return -ENOMEM; 1465 1466 for (int s = 0; s < nr; s++) { 1467 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1468 1469 id.thread_idx = s; 1470 stat_config.aggr_map->map[s] = id; 1471 } 1472 return 0; 1473 } 1474 1475 /* 1476 * The evsel_list->cpus is the base we operate on, 1477 * taking the highest cpu number to be the size of 1478 * the aggregation translate cpumap. 1479 */ 1480 if (evsel_list->core.user_requested_cpus) 1481 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; 1482 else 1483 nr = 0; 1484 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); 1485 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1486 } 1487 1488 static void cpu_aggr_map__delete(struct cpu_aggr_map *map) 1489 { 1490 if (map) { 1491 WARN_ONCE(refcount_read(&map->refcnt) != 0, 1492 "cpu_aggr_map refcnt unbalanced\n"); 1493 free(map); 1494 } 1495 } 1496 1497 static void cpu_aggr_map__put(struct cpu_aggr_map *map) 1498 { 1499 if (map && refcount_dec_and_test(&map->refcnt)) 1500 cpu_aggr_map__delete(map); 1501 } 1502 1503 static void perf_stat__exit_aggr_mode(void) 1504 { 1505 cpu_aggr_map__put(stat_config.aggr_map); 1506 cpu_aggr_map__put(stat_config.cpus_aggr_map); 1507 stat_config.aggr_map = NULL; 1508 stat_config.cpus_aggr_map = NULL; 1509 } 1510 1511 static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) 1512 { 1513 struct perf_env *env = data; 1514 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1515 1516 if (cpu.cpu != -1) 1517 id.socket = env->cpu[cpu.cpu].socket_id; 1518 1519 return id; 1520 } 1521 1522 static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) 1523 { 1524 struct perf_env *env = data; 1525 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1526 1527 if (cpu.cpu != -1) { 1528 /* 1529 * die_id is relative to socket, so start 1530 * with the socket ID and then add die to 1531 * make a unique ID. 1532 */ 1533 id.socket = env->cpu[cpu.cpu].socket_id; 1534 id.die = env->cpu[cpu.cpu].die_id; 1535 } 1536 1537 return id; 1538 } 1539 1540 static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) 1541 { 1542 struct perf_env *env = data; 1543 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1544 1545 if (cpu.cpu != -1) { 1546 /* 1547 * core_id is relative to socket and die, 1548 * we need a global id. So we set 1549 * socket, die id and core id 1550 */ 1551 id.socket = env->cpu[cpu.cpu].socket_id; 1552 id.die = env->cpu[cpu.cpu].die_id; 1553 id.core = env->cpu[cpu.cpu].core_id; 1554 } 1555 1556 return id; 1557 } 1558 1559 static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data) 1560 { 1561 struct perf_env *env = data; 1562 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1563 1564 if (cpu.cpu != -1) { 1565 /* 1566 * core_id is relative to socket and die, 1567 * we need a global id. So we set 1568 * socket, die id and core id 1569 */ 1570 id.socket = env->cpu[cpu.cpu].socket_id; 1571 id.die = env->cpu[cpu.cpu].die_id; 1572 id.core = env->cpu[cpu.cpu].core_id; 1573 id.cpu = cpu; 1574 } 1575 1576 return id; 1577 } 1578 1579 static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) 1580 { 1581 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1582 1583 id.node = perf_env__numa_node(data, cpu); 1584 return id; 1585 } 1586 1587 static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused, 1588 void *data __maybe_unused) 1589 { 1590 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1591 1592 /* it always aggregates to the cpu 0 */ 1593 id.cpu = (struct perf_cpu){ .cpu = 0 }; 1594 return id; 1595 } 1596 1597 static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1598 struct perf_cpu cpu) 1599 { 1600 return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1601 } 1602 static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1603 struct perf_cpu cpu) 1604 { 1605 return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1606 } 1607 1608 static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1609 struct perf_cpu cpu) 1610 { 1611 return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1612 } 1613 1614 static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, 1615 struct perf_cpu cpu) 1616 { 1617 return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1618 } 1619 1620 static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1621 struct perf_cpu cpu) 1622 { 1623 return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1624 } 1625 1626 static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, 1627 struct perf_cpu cpu) 1628 { 1629 return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env); 1630 } 1631 1632 static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) 1633 { 1634 switch (aggr_mode) { 1635 case AGGR_SOCKET: 1636 return perf_env__get_socket_aggr_by_cpu; 1637 case AGGR_DIE: 1638 return perf_env__get_die_aggr_by_cpu; 1639 case AGGR_CORE: 1640 return perf_env__get_core_aggr_by_cpu; 1641 case AGGR_NODE: 1642 return perf_env__get_node_aggr_by_cpu; 1643 case AGGR_GLOBAL: 1644 return perf_env__get_global_aggr_by_cpu; 1645 case AGGR_NONE: 1646 return perf_env__get_cpu_aggr_by_cpu; 1647 case AGGR_THREAD: 1648 case AGGR_UNSET: 1649 case AGGR_MAX: 1650 default: 1651 return NULL; 1652 } 1653 } 1654 1655 static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) 1656 { 1657 switch (aggr_mode) { 1658 case AGGR_SOCKET: 1659 return perf_stat__get_socket_file; 1660 case AGGR_DIE: 1661 return perf_stat__get_die_file; 1662 case AGGR_CORE: 1663 return perf_stat__get_core_file; 1664 case AGGR_NODE: 1665 return perf_stat__get_node_file; 1666 case AGGR_GLOBAL: 1667 return perf_stat__get_global_file; 1668 case AGGR_NONE: 1669 return perf_stat__get_cpu_file; 1670 case AGGR_THREAD: 1671 case AGGR_UNSET: 1672 case AGGR_MAX: 1673 default: 1674 return NULL; 1675 } 1676 } 1677 1678 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1679 { 1680 struct perf_env *env = &st->session->header.env; 1681 aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); 1682 bool needs_sort = stat_config.aggr_mode != AGGR_NONE; 1683 1684 if (stat_config.aggr_mode == AGGR_THREAD) { 1685 int nr = perf_thread_map__nr(evsel_list->core.threads); 1686 1687 stat_config.aggr_map = cpu_aggr_map__empty_new(nr); 1688 if (stat_config.aggr_map == NULL) 1689 return -ENOMEM; 1690 1691 for (int s = 0; s < nr; s++) { 1692 struct aggr_cpu_id id = aggr_cpu_id__empty(); 1693 1694 id.thread_idx = s; 1695 stat_config.aggr_map->map[s] = id; 1696 } 1697 return 0; 1698 } 1699 1700 if (!get_id) 1701 return 0; 1702 1703 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, 1704 get_id, env, needs_sort); 1705 if (!stat_config.aggr_map) { 1706 pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); 1707 return -1; 1708 } 1709 stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); 1710 return 0; 1711 } 1712 1713 /* 1714 * Add default attributes, if there were no attributes specified or 1715 * if -d/--detailed, -d -d or -d -d -d is used: 1716 */ 1717 static int add_default_attributes(void) 1718 { 1719 int err; 1720 struct perf_event_attr default_attrs0[] = { 1721 1722 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1723 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1724 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1725 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1726 1727 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1728 }; 1729 struct perf_event_attr frontend_attrs[] = { 1730 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1731 }; 1732 struct perf_event_attr backend_attrs[] = { 1733 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1734 }; 1735 struct perf_event_attr default_attrs1[] = { 1736 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1737 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1738 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1739 1740 }; 1741 1742 /* 1743 * Detailed stats (-d), covering the L1 and last level data caches: 1744 */ 1745 struct perf_event_attr detailed_attrs[] = { 1746 1747 { .type = PERF_TYPE_HW_CACHE, 1748 .config = 1749 PERF_COUNT_HW_CACHE_L1D << 0 | 1750 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1751 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1752 1753 { .type = PERF_TYPE_HW_CACHE, 1754 .config = 1755 PERF_COUNT_HW_CACHE_L1D << 0 | 1756 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1757 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1758 1759 { .type = PERF_TYPE_HW_CACHE, 1760 .config = 1761 PERF_COUNT_HW_CACHE_LL << 0 | 1762 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1763 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1764 1765 { .type = PERF_TYPE_HW_CACHE, 1766 .config = 1767 PERF_COUNT_HW_CACHE_LL << 0 | 1768 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1769 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1770 }; 1771 1772 /* 1773 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1774 */ 1775 struct perf_event_attr very_detailed_attrs[] = { 1776 1777 { .type = PERF_TYPE_HW_CACHE, 1778 .config = 1779 PERF_COUNT_HW_CACHE_L1I << 0 | 1780 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1781 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1782 1783 { .type = PERF_TYPE_HW_CACHE, 1784 .config = 1785 PERF_COUNT_HW_CACHE_L1I << 0 | 1786 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1787 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1788 1789 { .type = PERF_TYPE_HW_CACHE, 1790 .config = 1791 PERF_COUNT_HW_CACHE_DTLB << 0 | 1792 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1793 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1794 1795 { .type = PERF_TYPE_HW_CACHE, 1796 .config = 1797 PERF_COUNT_HW_CACHE_DTLB << 0 | 1798 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1799 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1800 1801 { .type = PERF_TYPE_HW_CACHE, 1802 .config = 1803 PERF_COUNT_HW_CACHE_ITLB << 0 | 1804 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1805 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1806 1807 { .type = PERF_TYPE_HW_CACHE, 1808 .config = 1809 PERF_COUNT_HW_CACHE_ITLB << 0 | 1810 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1811 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1812 1813 }; 1814 1815 /* 1816 * Very, very detailed stats (-d -d -d), adding prefetch events: 1817 */ 1818 struct perf_event_attr very_very_detailed_attrs[] = { 1819 1820 { .type = PERF_TYPE_HW_CACHE, 1821 .config = 1822 PERF_COUNT_HW_CACHE_L1D << 0 | 1823 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1824 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1825 1826 { .type = PERF_TYPE_HW_CACHE, 1827 .config = 1828 PERF_COUNT_HW_CACHE_L1D << 0 | 1829 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1830 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1831 }; 1832 1833 struct perf_event_attr default_null_attrs[] = {}; 1834 1835 /* Set attrs if no event is selected and !null_run: */ 1836 if (stat_config.null_run) 1837 return 0; 1838 1839 if (transaction_run) { 1840 struct parse_events_error errinfo; 1841 /* Handle -T as -M transaction. Once platform specific metrics 1842 * support has been added to the json files, all architectures 1843 * will use this approach. To determine transaction support 1844 * on an architecture test for such a metric name. 1845 */ 1846 if (metricgroup__has_metric("transaction")) { 1847 return metricgroup__parse_groups(evsel_list, "transaction", 1848 stat_config.metric_no_group, 1849 stat_config.metric_no_merge, 1850 stat_config.user_requested_cpu_list, 1851 stat_config.system_wide, 1852 &stat_config.metric_events); 1853 } 1854 1855 parse_events_error__init(&errinfo); 1856 if (pmu_have_event("cpu", "cycles-ct") && 1857 pmu_have_event("cpu", "el-start")) 1858 err = parse_events(evsel_list, transaction_attrs, 1859 &errinfo); 1860 else 1861 err = parse_events(evsel_list, 1862 transaction_limited_attrs, 1863 &errinfo); 1864 if (err) { 1865 fprintf(stderr, "Cannot set up transaction events\n"); 1866 parse_events_error__print(&errinfo, transaction_attrs); 1867 } 1868 parse_events_error__exit(&errinfo); 1869 return err ? -1 : 0; 1870 } 1871 1872 if (smi_cost) { 1873 struct parse_events_error errinfo; 1874 int smi; 1875 1876 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1877 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1878 return -1; 1879 } 1880 1881 if (!smi) { 1882 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1883 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1884 return -1; 1885 } 1886 smi_reset = true; 1887 } 1888 1889 if (!pmu_have_event("msr", "aperf") || 1890 !pmu_have_event("msr", "smi")) { 1891 fprintf(stderr, "To measure SMI cost, it needs " 1892 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1893 return -1; 1894 } 1895 if (!force_metric_only) 1896 stat_config.metric_only = true; 1897 1898 parse_events_error__init(&errinfo); 1899 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1900 if (err) { 1901 parse_events_error__print(&errinfo, smi_cost_attrs); 1902 fprintf(stderr, "Cannot set up SMI cost events\n"); 1903 } 1904 parse_events_error__exit(&errinfo); 1905 return err ? -1 : 0; 1906 } 1907 1908 if (topdown_run) { 1909 const char **metric_attrs = topdown_metric_attrs; 1910 unsigned int max_level = 1; 1911 char *str = NULL; 1912 bool warn = false; 1913 const char *pmu_name = arch_get_topdown_pmu_name(evsel_list, true); 1914 1915 if (!force_metric_only) 1916 stat_config.metric_only = true; 1917 1918 if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) { 1919 metric_attrs = topdown_metric_L2_attrs; 1920 max_level = 2; 1921 } 1922 1923 if (stat_config.topdown_level > max_level) { 1924 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1925 return -1; 1926 } else if (!stat_config.topdown_level) 1927 stat_config.topdown_level = max_level; 1928 1929 if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) { 1930 pr_err("Out of memory\n"); 1931 return -1; 1932 } 1933 1934 if (metric_attrs[0] && str) { 1935 if (!stat_config.interval && !stat_config.metric_only) { 1936 fprintf(stat_config.output, 1937 "Topdown accuracy may decrease when measuring long periods.\n" 1938 "Please print the result regularly, e.g. -I1000\n"); 1939 } 1940 goto setup_metrics; 1941 } 1942 1943 zfree(&str); 1944 1945 if (stat_config.aggr_mode != AGGR_GLOBAL && 1946 stat_config.aggr_mode != AGGR_CORE) { 1947 pr_err("top down event configuration requires --per-core mode\n"); 1948 return -1; 1949 } 1950 stat_config.aggr_mode = AGGR_CORE; 1951 if (nr_cgroups || !target__has_cpu(&target)) { 1952 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1953 return -1; 1954 } 1955 1956 if (topdown_filter_events(topdown_attrs, &str, 1957 arch_topdown_check_group(&warn), 1958 pmu_name) < 0) { 1959 pr_err("Out of memory\n"); 1960 return -1; 1961 } 1962 1963 if (topdown_attrs[0] && str) { 1964 struct parse_events_error errinfo; 1965 if (warn) 1966 arch_topdown_group_warn(); 1967 setup_metrics: 1968 parse_events_error__init(&errinfo); 1969 err = parse_events(evsel_list, str, &errinfo); 1970 if (err) { 1971 fprintf(stderr, 1972 "Cannot set up top down events %s: %d\n", 1973 str, err); 1974 parse_events_error__print(&errinfo, str); 1975 parse_events_error__exit(&errinfo); 1976 free(str); 1977 return -1; 1978 } 1979 parse_events_error__exit(&errinfo); 1980 } else { 1981 fprintf(stderr, "System does not support topdown\n"); 1982 return -1; 1983 } 1984 free(str); 1985 } 1986 1987 if (!stat_config.topdown_level) 1988 stat_config.topdown_level = TOPDOWN_MAX_LEVEL; 1989 1990 if (!evsel_list->core.nr_entries) { 1991 if (target__has_cpu(&target)) 1992 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1993 1994 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1995 return -1; 1996 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1997 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1998 return -1; 1999 } 2000 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 2001 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 2002 return -1; 2003 } 2004 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 2005 return -1; 2006 /* Platform specific attrs */ 2007 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) 2008 return -1; 2009 } 2010 2011 /* Detailed events get appended to the event list: */ 2012 2013 if (detailed_run < 1) 2014 return 0; 2015 2016 /* Append detailed run extra attributes: */ 2017 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 2018 return -1; 2019 2020 if (detailed_run < 2) 2021 return 0; 2022 2023 /* Append very detailed run extra attributes: */ 2024 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 2025 return -1; 2026 2027 if (detailed_run < 3) 2028 return 0; 2029 2030 /* Append very, very detailed run extra attributes: */ 2031 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 2032 } 2033 2034 static const char * const stat_record_usage[] = { 2035 "perf stat record [<options>]", 2036 NULL, 2037 }; 2038 2039 static void init_features(struct perf_session *session) 2040 { 2041 int feat; 2042 2043 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 2044 perf_header__set_feat(&session->header, feat); 2045 2046 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 2047 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 2048 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 2049 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 2050 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 2051 } 2052 2053 static int __cmd_record(int argc, const char **argv) 2054 { 2055 struct perf_session *session; 2056 struct perf_data *data = &perf_stat.data; 2057 2058 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2059 PARSE_OPT_STOP_AT_NON_OPTION); 2060 2061 if (output_name) 2062 data->path = output_name; 2063 2064 if (stat_config.run_count != 1 || forever) { 2065 pr_err("Cannot use -r option with perf stat record.\n"); 2066 return -1; 2067 } 2068 2069 session = perf_session__new(data, NULL); 2070 if (IS_ERR(session)) { 2071 pr_err("Perf session creation failed\n"); 2072 return PTR_ERR(session); 2073 } 2074 2075 init_features(session); 2076 2077 session->evlist = evsel_list; 2078 perf_stat.session = session; 2079 perf_stat.record = true; 2080 return argc; 2081 } 2082 2083 static int process_stat_round_event(struct perf_session *session, 2084 union perf_event *event) 2085 { 2086 struct perf_record_stat_round *stat_round = &event->stat_round; 2087 struct timespec tsh, *ts = NULL; 2088 const char **argv = session->header.env.cmdline_argv; 2089 int argc = session->header.env.nr_cmdline; 2090 2091 process_counters(); 2092 2093 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2094 update_stats(&walltime_nsecs_stats, stat_round->time); 2095 2096 if (stat_config.interval && stat_round->time) { 2097 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 2098 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 2099 ts = &tsh; 2100 } 2101 2102 print_counters(ts, argc, argv); 2103 return 0; 2104 } 2105 2106 static 2107 int process_stat_config_event(struct perf_session *session, 2108 union perf_event *event) 2109 { 2110 struct perf_tool *tool = session->tool; 2111 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2112 2113 perf_event__read_stat_config(&stat_config, &event->stat_config); 2114 2115 if (perf_cpu_map__empty(st->cpus)) { 2116 if (st->aggr_mode != AGGR_UNSET) 2117 pr_warning("warning: processing task data, aggregation mode not set\n"); 2118 } else if (st->aggr_mode != AGGR_UNSET) { 2119 stat_config.aggr_mode = st->aggr_mode; 2120 } 2121 2122 if (perf_stat.data.is_pipe) 2123 perf_stat_init_aggr_mode(); 2124 else 2125 perf_stat_init_aggr_mode_file(st); 2126 2127 if (stat_config.aggr_map) { 2128 int nr_aggr = stat_config.aggr_map->nr; 2129 2130 if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) { 2131 pr_err("cannot allocate aggr counts\n"); 2132 return -1; 2133 } 2134 } 2135 return 0; 2136 } 2137 2138 static int set_maps(struct perf_stat *st) 2139 { 2140 if (!st->cpus || !st->threads) 2141 return 0; 2142 2143 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 2144 return -EINVAL; 2145 2146 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 2147 2148 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true)) 2149 return -ENOMEM; 2150 2151 st->maps_allocated = true; 2152 return 0; 2153 } 2154 2155 static 2156 int process_thread_map_event(struct perf_session *session, 2157 union perf_event *event) 2158 { 2159 struct perf_tool *tool = session->tool; 2160 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2161 2162 if (st->threads) { 2163 pr_warning("Extra thread map event, ignoring.\n"); 2164 return 0; 2165 } 2166 2167 st->threads = thread_map__new_event(&event->thread_map); 2168 if (!st->threads) 2169 return -ENOMEM; 2170 2171 return set_maps(st); 2172 } 2173 2174 static 2175 int process_cpu_map_event(struct perf_session *session, 2176 union perf_event *event) 2177 { 2178 struct perf_tool *tool = session->tool; 2179 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 2180 struct perf_cpu_map *cpus; 2181 2182 if (st->cpus) { 2183 pr_warning("Extra cpu map event, ignoring.\n"); 2184 return 0; 2185 } 2186 2187 cpus = cpu_map__new_data(&event->cpu_map.data); 2188 if (!cpus) 2189 return -ENOMEM; 2190 2191 st->cpus = cpus; 2192 return set_maps(st); 2193 } 2194 2195 static const char * const stat_report_usage[] = { 2196 "perf stat report [<options>]", 2197 NULL, 2198 }; 2199 2200 static struct perf_stat perf_stat = { 2201 .tool = { 2202 .attr = perf_event__process_attr, 2203 .event_update = perf_event__process_event_update, 2204 .thread_map = process_thread_map_event, 2205 .cpu_map = process_cpu_map_event, 2206 .stat_config = process_stat_config_event, 2207 .stat = perf_event__process_stat_event, 2208 .stat_round = process_stat_round_event, 2209 }, 2210 .aggr_mode = AGGR_UNSET, 2211 }; 2212 2213 static int __cmd_report(int argc, const char **argv) 2214 { 2215 struct perf_session *session; 2216 const struct option options[] = { 2217 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2218 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 2219 "aggregate counts per processor socket", AGGR_SOCKET), 2220 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 2221 "aggregate counts per processor die", AGGR_DIE), 2222 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 2223 "aggregate counts per physical processor core", AGGR_CORE), 2224 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 2225 "aggregate counts per numa node", AGGR_NODE), 2226 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 2227 "disable CPU count aggregation", AGGR_NONE), 2228 OPT_END() 2229 }; 2230 struct stat st; 2231 int ret; 2232 2233 argc = parse_options(argc, argv, options, stat_report_usage, 0); 2234 2235 if (!input_name || !strlen(input_name)) { 2236 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 2237 input_name = "-"; 2238 else 2239 input_name = "perf.data"; 2240 } 2241 2242 perf_stat__init_shadow_stats(); 2243 2244 perf_stat.data.path = input_name; 2245 perf_stat.data.mode = PERF_DATA_MODE_READ; 2246 2247 session = perf_session__new(&perf_stat.data, &perf_stat.tool); 2248 if (IS_ERR(session)) 2249 return PTR_ERR(session); 2250 2251 perf_stat.session = session; 2252 stat_config.output = stderr; 2253 evsel_list = session->evlist; 2254 2255 ret = perf_session__process_events(session); 2256 if (ret) 2257 return ret; 2258 2259 perf_session__delete(session); 2260 return 0; 2261 } 2262 2263 static void setup_system_wide(int forks) 2264 { 2265 /* 2266 * Make system wide (-a) the default target if 2267 * no target was specified and one of following 2268 * conditions is met: 2269 * 2270 * - there's no workload specified 2271 * - there is workload specified but all requested 2272 * events are system wide events 2273 */ 2274 if (!target__none(&target)) 2275 return; 2276 2277 if (!forks) 2278 target.system_wide = true; 2279 else { 2280 struct evsel *counter; 2281 2282 evlist__for_each_entry(evsel_list, counter) { 2283 if (!counter->core.requires_cpu && 2284 strcmp(counter->name, "duration_time")) { 2285 return; 2286 } 2287 } 2288 2289 if (evsel_list->core.nr_entries) 2290 target.system_wide = true; 2291 } 2292 } 2293 2294 int cmd_stat(int argc, const char **argv) 2295 { 2296 const char * const stat_usage[] = { 2297 "perf stat [<options>] [<command>]", 2298 NULL 2299 }; 2300 int status = -EINVAL, run_idx, err; 2301 const char *mode; 2302 FILE *output = stderr; 2303 unsigned int interval, timeout; 2304 const char * const stat_subcommands[] = { "record", "report" }; 2305 char errbuf[BUFSIZ]; 2306 2307 setlocale(LC_ALL, ""); 2308 2309 evsel_list = evlist__new(); 2310 if (evsel_list == NULL) 2311 return -ENOMEM; 2312 2313 parse_events__shrink_config_terms(); 2314 2315 /* String-parsing callback-based options would segfault when negated */ 2316 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 2317 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 2318 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 2319 2320 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 2321 (const char **) stat_usage, 2322 PARSE_OPT_STOP_AT_NON_OPTION); 2323 2324 if (stat_config.csv_sep) { 2325 stat_config.csv_output = true; 2326 if (!strcmp(stat_config.csv_sep, "\\t")) 2327 stat_config.csv_sep = "\t"; 2328 } else 2329 stat_config.csv_sep = DEFAULT_SEPARATOR; 2330 2331 if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2332 argc = __cmd_record(argc, argv); 2333 if (argc < 0) 2334 return -1; 2335 } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2336 return __cmd_report(argc, argv); 2337 2338 interval = stat_config.interval; 2339 timeout = stat_config.timeout; 2340 2341 /* 2342 * For record command the -o is already taken care of. 2343 */ 2344 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 2345 output = NULL; 2346 2347 if (output_name && output_fd) { 2348 fprintf(stderr, "cannot use both --output and --log-fd\n"); 2349 parse_options_usage(stat_usage, stat_options, "o", 1); 2350 parse_options_usage(NULL, stat_options, "log-fd", 0); 2351 goto out; 2352 } 2353 2354 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 2355 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 2356 goto out; 2357 } 2358 2359 if (stat_config.metric_only && stat_config.run_count > 1) { 2360 fprintf(stderr, "--metric-only is not supported with -r\n"); 2361 goto out; 2362 } 2363 2364 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 2365 fprintf(stderr, "--table is only supported with -r\n"); 2366 parse_options_usage(stat_usage, stat_options, "r", 1); 2367 parse_options_usage(NULL, stat_options, "table", 0); 2368 goto out; 2369 } 2370 2371 if (output_fd < 0) { 2372 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 2373 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 2374 goto out; 2375 } 2376 2377 if (!output && !quiet) { 2378 struct timespec tm; 2379 mode = append_file ? "a" : "w"; 2380 2381 output = fopen(output_name, mode); 2382 if (!output) { 2383 perror("failed to create output file"); 2384 return -1; 2385 } 2386 clock_gettime(CLOCK_REALTIME, &tm); 2387 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 2388 } else if (output_fd > 0) { 2389 mode = append_file ? "a" : "w"; 2390 output = fdopen(output_fd, mode); 2391 if (!output) { 2392 perror("Failed opening logfd"); 2393 return -errno; 2394 } 2395 } 2396 2397 if (stat_config.interval_clear && !isatty(fileno(output))) { 2398 fprintf(stderr, "--interval-clear does not work with output\n"); 2399 parse_options_usage(stat_usage, stat_options, "o", 1); 2400 parse_options_usage(NULL, stat_options, "log-fd", 0); 2401 parse_options_usage(NULL, stat_options, "interval-clear", 0); 2402 return -1; 2403 } 2404 2405 stat_config.output = output; 2406 2407 /* 2408 * let the spreadsheet do the pretty-printing 2409 */ 2410 if (stat_config.csv_output) { 2411 /* User explicitly passed -B? */ 2412 if (big_num_opt == 1) { 2413 fprintf(stderr, "-B option not supported with -x\n"); 2414 parse_options_usage(stat_usage, stat_options, "B", 1); 2415 parse_options_usage(NULL, stat_options, "x", 1); 2416 goto out; 2417 } else /* Nope, so disable big number formatting */ 2418 stat_config.big_num = false; 2419 } else if (big_num_opt == 0) /* User passed --no-big-num */ 2420 stat_config.big_num = false; 2421 2422 err = target__validate(&target); 2423 if (err) { 2424 target__strerror(&target, err, errbuf, BUFSIZ); 2425 pr_warning("%s\n", errbuf); 2426 } 2427 2428 setup_system_wide(argc); 2429 2430 /* 2431 * Display user/system times only for single 2432 * run and when there's specified tracee. 2433 */ 2434 if ((stat_config.run_count == 1) && target__none(&target)) 2435 stat_config.ru_display = true; 2436 2437 if (stat_config.run_count < 0) { 2438 pr_err("Run count must be a positive number\n"); 2439 parse_options_usage(stat_usage, stat_options, "r", 1); 2440 goto out; 2441 } else if (stat_config.run_count == 0) { 2442 forever = true; 2443 stat_config.run_count = 1; 2444 } 2445 2446 if (stat_config.walltime_run_table) { 2447 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2448 if (!stat_config.walltime_run) { 2449 pr_err("failed to setup -r option"); 2450 goto out; 2451 } 2452 } 2453 2454 if ((stat_config.aggr_mode == AGGR_THREAD) && 2455 !target__has_task(&target)) { 2456 if (!target.system_wide || target.cpu_list) { 2457 fprintf(stderr, "The --per-thread option is only " 2458 "available when monitoring via -p -t -a " 2459 "options or only --per-thread.\n"); 2460 parse_options_usage(NULL, stat_options, "p", 1); 2461 parse_options_usage(NULL, stat_options, "t", 1); 2462 goto out; 2463 } 2464 } 2465 2466 /* 2467 * no_aggr, cgroup are for system-wide only 2468 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2469 */ 2470 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2471 stat_config.aggr_mode != AGGR_THREAD) || 2472 (nr_cgroups || stat_config.cgroup_list)) && 2473 !target__has_cpu(&target)) { 2474 fprintf(stderr, "both cgroup and no-aggregation " 2475 "modes only available in system-wide mode\n"); 2476 2477 parse_options_usage(stat_usage, stat_options, "G", 1); 2478 parse_options_usage(NULL, stat_options, "A", 1); 2479 parse_options_usage(NULL, stat_options, "a", 1); 2480 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2481 goto out; 2482 } 2483 2484 if (stat_config.iostat_run) { 2485 status = iostat_prepare(evsel_list, &stat_config); 2486 if (status) 2487 goto out; 2488 if (iostat_mode == IOSTAT_LIST) { 2489 iostat_list(evsel_list, &stat_config); 2490 goto out; 2491 } else if (verbose > 0) 2492 iostat_list(evsel_list, &stat_config); 2493 if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2494 target.system_wide = true; 2495 } 2496 2497 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2498 target.per_thread = true; 2499 2500 stat_config.system_wide = target.system_wide; 2501 if (target.cpu_list) { 2502 stat_config.user_requested_cpu_list = strdup(target.cpu_list); 2503 if (!stat_config.user_requested_cpu_list) { 2504 status = -ENOMEM; 2505 goto out; 2506 } 2507 } 2508 2509 /* 2510 * Metric parsing needs to be delayed as metrics may optimize events 2511 * knowing the target is system-wide. 2512 */ 2513 if (metrics) { 2514 metricgroup__parse_groups(evsel_list, metrics, 2515 stat_config.metric_no_group, 2516 stat_config.metric_no_merge, 2517 stat_config.user_requested_cpu_list, 2518 stat_config.system_wide, 2519 &stat_config.metric_events); 2520 zfree(&metrics); 2521 } 2522 perf_stat__init_shadow_stats(); 2523 2524 if (add_default_attributes()) 2525 goto out; 2526 2527 if (stat_config.cgroup_list) { 2528 if (nr_cgroups > 0) { 2529 pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); 2530 parse_options_usage(stat_usage, stat_options, "G", 1); 2531 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); 2532 goto out; 2533 } 2534 2535 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, 2536 &stat_config.metric_events, true) < 0) { 2537 parse_options_usage(stat_usage, stat_options, 2538 "for-each-cgroup", 0); 2539 goto out; 2540 } 2541 } 2542 2543 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { 2544 pr_err("failed to use cpu list %s\n", target.cpu_list); 2545 goto out; 2546 } 2547 2548 target.hybrid = perf_pmu__has_hybrid(); 2549 if (evlist__create_maps(evsel_list, &target) < 0) { 2550 if (target__has_task(&target)) { 2551 pr_err("Problems finding threads of monitor\n"); 2552 parse_options_usage(stat_usage, stat_options, "p", 1); 2553 parse_options_usage(NULL, stat_options, "t", 1); 2554 } else if (target__has_cpu(&target)) { 2555 perror("failed to parse CPUs map"); 2556 parse_options_usage(stat_usage, stat_options, "C", 1); 2557 parse_options_usage(NULL, stat_options, "a", 1); 2558 } 2559 goto out; 2560 } 2561 2562 evlist__check_cpu_maps(evsel_list); 2563 2564 /* 2565 * Initialize thread_map with comm names, 2566 * so we could print it out on output. 2567 */ 2568 if (stat_config.aggr_mode == AGGR_THREAD) { 2569 thread_map__read_comms(evsel_list->core.threads); 2570 } 2571 2572 if (stat_config.aggr_mode == AGGR_NODE) 2573 cpu__setup_cpunode_map(); 2574 2575 if (stat_config.times && interval) 2576 interval_count = true; 2577 else if (stat_config.times && !interval) { 2578 pr_err("interval-count option should be used together with " 2579 "interval-print.\n"); 2580 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2581 parse_options_usage(stat_usage, stat_options, "I", 1); 2582 goto out; 2583 } 2584 2585 if (timeout && timeout < 100) { 2586 if (timeout < 10) { 2587 pr_err("timeout must be >= 10ms.\n"); 2588 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2589 goto out; 2590 } else 2591 pr_warning("timeout < 100ms. " 2592 "The overhead percentage could be high in some cases. " 2593 "Please proceed with caution.\n"); 2594 } 2595 if (timeout && interval) { 2596 pr_err("timeout option is not supported with interval-print.\n"); 2597 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2598 parse_options_usage(stat_usage, stat_options, "I", 1); 2599 goto out; 2600 } 2601 2602 if (perf_stat_init_aggr_mode()) 2603 goto out; 2604 2605 if (evlist__alloc_stats(&stat_config, evsel_list, interval)) 2606 goto out; 2607 2608 /* 2609 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2610 * while avoiding that older tools show confusing messages. 2611 * 2612 * However for pipe sessions we need to keep it zero, 2613 * because script's perf_evsel__check_attr is triggered 2614 * by attr->sample_type != 0, and we can't run it on 2615 * stat sessions. 2616 */ 2617 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2618 2619 /* 2620 * We dont want to block the signals - that would cause 2621 * child tasks to inherit that and Ctrl-C would not work. 2622 * What we want is for Ctrl-C to work in the exec()-ed 2623 * task, but being ignored by perf stat itself: 2624 */ 2625 atexit(sig_atexit); 2626 if (!forever) 2627 signal(SIGINT, skip_signal); 2628 signal(SIGCHLD, skip_signal); 2629 signal(SIGALRM, skip_signal); 2630 signal(SIGABRT, skip_signal); 2631 2632 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) 2633 goto out; 2634 2635 /* Enable ignoring missing threads when -p option is defined. */ 2636 evlist__first(evsel_list)->ignore_missing_thread = target.pid; 2637 status = 0; 2638 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2639 if (stat_config.run_count != 1 && verbose > 0) 2640 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2641 run_idx + 1); 2642 2643 if (run_idx != 0) 2644 evlist__reset_prev_raw_counts(evsel_list); 2645 2646 status = run_perf_stat(argc, argv, run_idx); 2647 if (forever && status != -1 && !interval) { 2648 print_counters(NULL, argc, argv); 2649 perf_stat__reset_stats(); 2650 } 2651 } 2652 2653 if (!forever && status != -1 && (!interval || stat_config.summary)) 2654 print_counters(NULL, argc, argv); 2655 2656 evlist__finalize_ctlfd(evsel_list); 2657 2658 if (STAT_RECORD) { 2659 /* 2660 * We synthesize the kernel mmap record just so that older tools 2661 * don't emit warnings about not being able to resolve symbols 2662 * due to /proc/sys/kernel/kptr_restrict settings and instead provide 2663 * a saner message about no samples being in the perf.data file. 2664 * 2665 * This also serves to suppress a warning about f_header.data.size == 0 2666 * in header.c at the moment 'perf stat record' gets introduced, which 2667 * is not really needed once we start adding the stat specific PERF_RECORD_ 2668 * records, but the need to suppress the kptr_restrict messages in older 2669 * tools remain -acme 2670 */ 2671 int fd = perf_data__fd(&perf_stat.data); 2672 2673 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2674 process_synthesized_event, 2675 &perf_stat.session->machines.host); 2676 if (err) { 2677 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2678 "older tools may produce warnings about this file\n."); 2679 } 2680 2681 if (!interval) { 2682 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2683 pr_err("failed to write stat round event\n"); 2684 } 2685 2686 if (!perf_stat.data.is_pipe) { 2687 perf_stat.session->header.data_size += perf_stat.bytes_written; 2688 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2689 } 2690 2691 evlist__close(evsel_list); 2692 perf_session__delete(perf_stat.session); 2693 } 2694 2695 perf_stat__exit_aggr_mode(); 2696 evlist__free_stats(evsel_list); 2697 out: 2698 if (stat_config.iostat_run) 2699 iostat_release(evsel_list); 2700 2701 zfree(&stat_config.walltime_run); 2702 zfree(&stat_config.user_requested_cpu_list); 2703 2704 if (smi_cost && smi_reset) 2705 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2706 2707 evlist__delete(evsel_list); 2708 2709 metricgroup__rblist_exit(&stat_config.metric_events); 2710 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); 2711 2712 return status; 2713 } 2714