1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-stat.c 4 * 5 * Builtin stat command: Give a precise performance counters summary 6 * overview about any workload, CPU or specific PID. 7 * 8 * Sample output: 9 10 $ perf stat ./hackbench 10 11 12 Time: 0.118 13 14 Performance counter stats for './hackbench 10': 15 16 1708.761321 task-clock # 11.037 CPUs utilized 17 41,190 context-switches # 0.024 M/sec 18 6,735 CPU-migrations # 0.004 M/sec 19 17,318 page-faults # 0.010 M/sec 20 5,205,202,243 cycles # 3.046 GHz 21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 23 2,603,501,247 instructions # 0.50 insns per cycle 24 # 1.48 stalled cycles per insn 25 484,357,498 branches # 283.455 M/sec 26 6,388,934 branch-misses # 1.32% of all branches 27 28 0.154822978 seconds time elapsed 29 30 * 31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 32 * 33 * Improvements and fixes by: 34 * 35 * Arjan van de Ven <arjan@linux.intel.com> 36 * Yanmin Zhang <yanmin.zhang@intel.com> 37 * Wu Fengguang <fengguang.wu@intel.com> 38 * Mike Galbraith <efault@gmx.de> 39 * Paul Mackerras <paulus@samba.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 41 */ 42 43 #include "builtin.h" 44 #include "perf.h" 45 #include "util/cgroup.h" 46 #include <subcmd/parse-options.h> 47 #include "util/parse-events.h" 48 #include "util/pmu.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evsel.h" 52 #include "util/debug.h" 53 #include "util/color.h" 54 #include "util/stat.h" 55 #include "util/header.h" 56 #include "util/cpumap.h" 57 #include "util/thread_map.h" 58 #include "util/counts.h" 59 #include "util/group.h" 60 #include "util/session.h" 61 #include "util/tool.h" 62 #include "util/string2.h" 63 #include "util/metricgroup.h" 64 #include "util/synthetic-events.h" 65 #include "util/target.h" 66 #include "util/time-utils.h" 67 #include "util/top.h" 68 #include "util/affinity.h" 69 #include "asm/bug.h" 70 71 #include <linux/time64.h> 72 #include <linux/zalloc.h> 73 #include <api/fs/fs.h> 74 #include <errno.h> 75 #include <signal.h> 76 #include <stdlib.h> 77 #include <sys/prctl.h> 78 #include <inttypes.h> 79 #include <locale.h> 80 #include <math.h> 81 #include <sys/types.h> 82 #include <sys/stat.h> 83 #include <sys/wait.h> 84 #include <unistd.h> 85 #include <sys/time.h> 86 #include <sys/resource.h> 87 #include <linux/err.h> 88 89 #include <linux/ctype.h> 90 #include <perf/evlist.h> 91 92 #define DEFAULT_SEPARATOR " " 93 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 94 95 static void print_counters(struct timespec *ts, int argc, const char **argv); 96 97 /* Default events used for perf stat -T */ 98 static const char *transaction_attrs = { 99 "task-clock," 100 "{" 101 "instructions," 102 "cycles," 103 "cpu/cycles-t/," 104 "cpu/tx-start/," 105 "cpu/el-start/," 106 "cpu/cycles-ct/" 107 "}" 108 }; 109 110 /* More limited version when the CPU does not have all events. */ 111 static const char * transaction_limited_attrs = { 112 "task-clock," 113 "{" 114 "instructions," 115 "cycles," 116 "cpu/cycles-t/," 117 "cpu/tx-start/" 118 "}" 119 }; 120 121 static const char * topdown_attrs[] = { 122 "topdown-total-slots", 123 "topdown-slots-retired", 124 "topdown-recovery-bubbles", 125 "topdown-fetch-bubbles", 126 "topdown-slots-issued", 127 NULL, 128 }; 129 130 static const char *smi_cost_attrs = { 131 "{" 132 "msr/aperf/," 133 "msr/smi/," 134 "cycles" 135 "}" 136 }; 137 138 static struct evlist *evsel_list; 139 140 static struct target target = { 141 .uid = UINT_MAX, 142 }; 143 144 #define METRIC_ONLY_LEN 20 145 146 static volatile pid_t child_pid = -1; 147 static int detailed_run = 0; 148 static bool transaction_run; 149 static bool topdown_run = false; 150 static bool smi_cost = false; 151 static bool smi_reset = false; 152 static int big_num_opt = -1; 153 static bool group = false; 154 static const char *pre_cmd = NULL; 155 static const char *post_cmd = NULL; 156 static bool sync_run = false; 157 static bool forever = false; 158 static bool force_metric_only = false; 159 static struct timespec ref_time; 160 static bool append_file; 161 static bool interval_count; 162 static const char *output_name; 163 static int output_fd; 164 165 struct perf_stat { 166 bool record; 167 struct perf_data data; 168 struct perf_session *session; 169 u64 bytes_written; 170 struct perf_tool tool; 171 bool maps_allocated; 172 struct perf_cpu_map *cpus; 173 struct perf_thread_map *threads; 174 enum aggr_mode aggr_mode; 175 }; 176 177 static struct perf_stat perf_stat; 178 #define STAT_RECORD perf_stat.record 179 180 static volatile int done = 0; 181 182 static struct perf_stat_config stat_config = { 183 .aggr_mode = AGGR_GLOBAL, 184 .scale = true, 185 .unit_width = 4, /* strlen("unit") */ 186 .run_count = 1, 187 .metric_only_len = METRIC_ONLY_LEN, 188 .walltime_nsecs_stats = &walltime_nsecs_stats, 189 .big_num = true, 190 }; 191 192 static inline void diff_timespec(struct timespec *r, struct timespec *a, 193 struct timespec *b) 194 { 195 r->tv_sec = a->tv_sec - b->tv_sec; 196 if (a->tv_nsec < b->tv_nsec) { 197 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec; 198 r->tv_sec--; 199 } else { 200 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 201 } 202 } 203 204 static void perf_stat__reset_stats(void) 205 { 206 int i; 207 208 perf_evlist__reset_stats(evsel_list); 209 perf_stat__reset_shadow_stats(); 210 211 for (i = 0; i < stat_config.stats_num; i++) 212 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); 213 } 214 215 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, 216 union perf_event *event, 217 struct perf_sample *sample __maybe_unused, 218 struct machine *machine __maybe_unused) 219 { 220 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) { 221 pr_err("failed to write perf data, error: %m\n"); 222 return -1; 223 } 224 225 perf_stat.bytes_written += event->header.size; 226 return 0; 227 } 228 229 static int write_stat_round_event(u64 tm, u64 type) 230 { 231 return perf_event__synthesize_stat_round(NULL, tm, type, 232 process_synthesized_event, 233 NULL); 234 } 235 236 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 237 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 238 239 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 240 241 static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, 242 struct perf_counts_values *count) 243 { 244 struct perf_sample_id *sid = SID(counter, cpu, thread); 245 246 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, 247 process_synthesized_event, NULL); 248 } 249 250 static int read_single_counter(struct evsel *counter, int cpu, 251 int thread, struct timespec *rs) 252 { 253 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 254 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; 255 struct perf_counts_values *count = 256 perf_counts(counter->counts, cpu, thread); 257 count->ena = count->run = val; 258 count->val = val; 259 return 0; 260 } 261 return evsel__read_counter(counter, cpu, thread); 262 } 263 264 /* 265 * Read out the results of a single counter: 266 * do not aggregate counts across CPUs in system-wide mode 267 */ 268 static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) 269 { 270 int nthreads = perf_thread_map__nr(evsel_list->core.threads); 271 int thread; 272 273 if (!counter->supported) 274 return -ENOENT; 275 276 if (counter->core.system_wide) 277 nthreads = 1; 278 279 for (thread = 0; thread < nthreads; thread++) { 280 struct perf_counts_values *count; 281 282 count = perf_counts(counter->counts, cpu, thread); 283 284 /* 285 * The leader's group read loads data into its group members 286 * (via evsel__read_counter()) and sets their count->loaded. 287 */ 288 if (!perf_counts__is_loaded(counter->counts, cpu, thread) && 289 read_single_counter(counter, cpu, thread, rs)) { 290 counter->counts->scaled = -1; 291 perf_counts(counter->counts, cpu, thread)->ena = 0; 292 perf_counts(counter->counts, cpu, thread)->run = 0; 293 return -1; 294 } 295 296 perf_counts__set_loaded(counter->counts, cpu, thread, false); 297 298 if (STAT_RECORD) { 299 if (evsel__write_stat_event(counter, cpu, thread, count)) { 300 pr_err("failed to write stat event\n"); 301 return -1; 302 } 303 } 304 305 if (verbose > 1) { 306 fprintf(stat_config.output, 307 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 308 evsel__name(counter), 309 cpu, 310 count->val, count->ena, count->run); 311 } 312 } 313 314 return 0; 315 } 316 317 static void read_counters(struct timespec *rs) 318 { 319 struct evsel *counter; 320 struct affinity affinity; 321 int i, ncpus, cpu; 322 323 if (affinity__setup(&affinity) < 0) 324 return; 325 326 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); 327 if (!target__has_cpu(&target) || target__has_per_thread(&target)) 328 ncpus = 1; 329 evlist__for_each_cpu(evsel_list, i, cpu) { 330 if (i >= ncpus) 331 break; 332 affinity__set(&affinity, cpu); 333 334 evlist__for_each_entry(evsel_list, counter) { 335 if (evsel__cpu_iter_skip(counter, cpu)) 336 continue; 337 if (!counter->err) { 338 counter->err = read_counter_cpu(counter, rs, 339 counter->cpu_iter - 1); 340 } 341 } 342 } 343 affinity__cleanup(&affinity); 344 345 evlist__for_each_entry(evsel_list, counter) { 346 if (counter->err) 347 pr_debug("failed to read counter %s\n", counter->name); 348 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) 349 pr_warning("failed to process counter %s\n", counter->name); 350 counter->err = 0; 351 } 352 } 353 354 static void process_interval(void) 355 { 356 struct timespec ts, rs; 357 358 clock_gettime(CLOCK_MONOTONIC, &ts); 359 diff_timespec(&rs, &ts, &ref_time); 360 361 perf_stat__reset_shadow_per_stat(&rt_stat); 362 read_counters(&rs); 363 364 if (STAT_RECORD) { 365 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) 366 pr_err("failed to write stat round event\n"); 367 } 368 369 init_stats(&walltime_nsecs_stats); 370 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000); 371 print_counters(&rs, 0, NULL); 372 } 373 374 static void enable_counters(void) 375 { 376 if (stat_config.initial_delay) 377 usleep(stat_config.initial_delay * USEC_PER_MSEC); 378 379 /* 380 * We need to enable counters only if: 381 * - we don't have tracee (attaching to task or cpu) 382 * - we have initial delay configured 383 */ 384 if (!target__none(&target) || stat_config.initial_delay) 385 evlist__enable(evsel_list); 386 } 387 388 static void disable_counters(void) 389 { 390 /* 391 * If we don't have tracee (attaching to task or cpu), counters may 392 * still be running. To get accurate group ratios, we must stop groups 393 * from counting before reading their constituent counters. 394 */ 395 if (!target__none(&target)) 396 evlist__disable(evsel_list); 397 } 398 399 static volatile int workload_exec_errno; 400 401 /* 402 * perf_evlist__prepare_workload will send a SIGUSR1 403 * if the fork fails, since we asked by setting its 404 * want_signal to true. 405 */ 406 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, 407 void *ucontext __maybe_unused) 408 { 409 workload_exec_errno = info->si_value.sival_int; 410 } 411 412 static bool evsel__should_store_id(struct evsel *counter) 413 { 414 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; 415 } 416 417 static bool is_target_alive(struct target *_target, 418 struct perf_thread_map *threads) 419 { 420 struct stat st; 421 int i; 422 423 if (!target__has_task(_target)) 424 return true; 425 426 for (i = 0; i < threads->nr; i++) { 427 char path[PATH_MAX]; 428 429 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), 430 threads->map[i].pid); 431 432 if (!stat(path, &st)) 433 return true; 434 } 435 436 return false; 437 } 438 439 enum counter_recovery { 440 COUNTER_SKIP, 441 COUNTER_RETRY, 442 COUNTER_FATAL, 443 }; 444 445 static enum counter_recovery stat_handle_error(struct evsel *counter) 446 { 447 char msg[BUFSIZ]; 448 /* 449 * PPC returns ENXIO for HW counters until 2.6.37 450 * (behavior changed with commit b0a873e). 451 */ 452 if (errno == EINVAL || errno == ENOSYS || 453 errno == ENOENT || errno == EOPNOTSUPP || 454 errno == ENXIO) { 455 if (verbose > 0) 456 ui__warning("%s event is not supported by the kernel.\n", 457 evsel__name(counter)); 458 counter->supported = false; 459 /* 460 * errored is a sticky flag that means one of the counter's 461 * cpu event had a problem and needs to be reexamined. 462 */ 463 counter->errored = true; 464 465 if ((counter->leader != counter) || 466 !(counter->leader->core.nr_members > 1)) 467 return COUNTER_SKIP; 468 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { 469 if (verbose > 0) 470 ui__warning("%s\n", msg); 471 return COUNTER_RETRY; 472 } else if (target__has_per_thread(&target) && 473 evsel_list->core.threads && 474 evsel_list->core.threads->err_thread != -1) { 475 /* 476 * For global --per-thread case, skip current 477 * error thread. 478 */ 479 if (!thread_map__remove(evsel_list->core.threads, 480 evsel_list->core.threads->err_thread)) { 481 evsel_list->core.threads->err_thread = -1; 482 return COUNTER_RETRY; 483 } 484 } 485 486 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); 487 ui__error("%s\n", msg); 488 489 if (child_pid != -1) 490 kill(child_pid, SIGTERM); 491 return COUNTER_FATAL; 492 } 493 494 static int __run_perf_stat(int argc, const char **argv, int run_idx) 495 { 496 int interval = stat_config.interval; 497 int times = stat_config.times; 498 int timeout = stat_config.timeout; 499 char msg[BUFSIZ]; 500 unsigned long long t0, t1; 501 struct evsel *counter; 502 struct timespec ts; 503 size_t l; 504 int status = 0; 505 const bool forks = (argc > 0); 506 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; 507 struct affinity affinity; 508 int i, cpu; 509 bool second_pass = false; 510 511 if (interval) { 512 ts.tv_sec = interval / USEC_PER_MSEC; 513 ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC; 514 } else if (timeout) { 515 ts.tv_sec = timeout / USEC_PER_MSEC; 516 ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC; 517 } else { 518 ts.tv_sec = 1; 519 ts.tv_nsec = 0; 520 } 521 522 if (forks) { 523 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe, 524 workload_exec_failed_signal) < 0) { 525 perror("failed to prepare workload"); 526 return -1; 527 } 528 child_pid = evsel_list->workload.pid; 529 } 530 531 if (group) 532 perf_evlist__set_leader(evsel_list); 533 534 if (affinity__setup(&affinity) < 0) 535 return -1; 536 537 evlist__for_each_cpu (evsel_list, i, cpu) { 538 affinity__set(&affinity, cpu); 539 540 evlist__for_each_entry(evsel_list, counter) { 541 if (evsel__cpu_iter_skip(counter, cpu)) 542 continue; 543 if (counter->reset_group || counter->errored) 544 continue; 545 try_again: 546 if (create_perf_stat_counter(counter, &stat_config, &target, 547 counter->cpu_iter - 1) < 0) { 548 549 /* 550 * Weak group failed. We cannot just undo this here 551 * because earlier CPUs might be in group mode, and the kernel 552 * doesn't support mixing group and non group reads. Defer 553 * it to later. 554 * Don't close here because we're in the wrong affinity. 555 */ 556 if ((errno == EINVAL || errno == EBADF) && 557 counter->leader != counter && 558 counter->weak_group) { 559 perf_evlist__reset_weak_group(evsel_list, counter, false); 560 assert(counter->reset_group); 561 second_pass = true; 562 continue; 563 } 564 565 switch (stat_handle_error(counter)) { 566 case COUNTER_FATAL: 567 return -1; 568 case COUNTER_RETRY: 569 goto try_again; 570 case COUNTER_SKIP: 571 continue; 572 default: 573 break; 574 } 575 576 } 577 counter->supported = true; 578 } 579 } 580 581 if (second_pass) { 582 /* 583 * Now redo all the weak group after closing them, 584 * and also close errored counters. 585 */ 586 587 evlist__for_each_cpu(evsel_list, i, cpu) { 588 affinity__set(&affinity, cpu); 589 /* First close errored or weak retry */ 590 evlist__for_each_entry(evsel_list, counter) { 591 if (!counter->reset_group && !counter->errored) 592 continue; 593 if (evsel__cpu_iter_skip_no_inc(counter, cpu)) 594 continue; 595 perf_evsel__close_cpu(&counter->core, counter->cpu_iter); 596 } 597 /* Now reopen weak */ 598 evlist__for_each_entry(evsel_list, counter) { 599 if (!counter->reset_group && !counter->errored) 600 continue; 601 if (evsel__cpu_iter_skip(counter, cpu)) 602 continue; 603 if (!counter->reset_group) 604 continue; 605 try_again_reset: 606 pr_debug2("reopening weak %s\n", evsel__name(counter)); 607 if (create_perf_stat_counter(counter, &stat_config, &target, 608 counter->cpu_iter - 1) < 0) { 609 610 switch (stat_handle_error(counter)) { 611 case COUNTER_FATAL: 612 return -1; 613 case COUNTER_RETRY: 614 goto try_again_reset; 615 case COUNTER_SKIP: 616 continue; 617 default: 618 break; 619 } 620 } 621 counter->supported = true; 622 } 623 } 624 } 625 affinity__cleanup(&affinity); 626 627 evlist__for_each_entry(evsel_list, counter) { 628 if (!counter->supported) { 629 perf_evsel__free_fd(&counter->core); 630 continue; 631 } 632 633 l = strlen(counter->unit); 634 if (l > stat_config.unit_width) 635 stat_config.unit_width = l; 636 637 if (evsel__should_store_id(counter) && 638 evsel__store_ids(counter, evsel_list)) 639 return -1; 640 } 641 642 if (perf_evlist__apply_filters(evsel_list, &counter)) { 643 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", 644 counter->filter, evsel__name(counter), errno, 645 str_error_r(errno, msg, sizeof(msg))); 646 return -1; 647 } 648 649 if (STAT_RECORD) { 650 int err, fd = perf_data__fd(&perf_stat.data); 651 652 if (is_pipe) { 653 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); 654 } else { 655 err = perf_session__write_header(perf_stat.session, evsel_list, 656 fd, false); 657 } 658 659 if (err < 0) 660 return err; 661 662 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 663 process_synthesized_event, is_pipe); 664 if (err < 0) 665 return err; 666 } 667 668 /* 669 * Enable counters and exec the command: 670 */ 671 t0 = rdclock(); 672 clock_gettime(CLOCK_MONOTONIC, &ref_time); 673 674 if (forks) { 675 perf_evlist__start_workload(evsel_list); 676 enable_counters(); 677 678 if (interval || timeout) { 679 while (!waitpid(child_pid, &status, WNOHANG)) { 680 nanosleep(&ts, NULL); 681 if (timeout) 682 break; 683 process_interval(); 684 if (interval_count && !(--times)) 685 break; 686 } 687 } 688 if (child_pid != -1) { 689 if (timeout) 690 kill(child_pid, SIGTERM); 691 wait4(child_pid, &status, 0, &stat_config.ru_data); 692 } 693 694 if (workload_exec_errno) { 695 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 696 pr_err("Workload failed: %s\n", emsg); 697 return -1; 698 } 699 700 if (WIFSIGNALED(status)) 701 psignal(WTERMSIG(status), argv[0]); 702 } else { 703 enable_counters(); 704 while (!done) { 705 nanosleep(&ts, NULL); 706 if (!is_target_alive(&target, evsel_list->core.threads)) 707 break; 708 if (timeout) 709 break; 710 if (interval) { 711 process_interval(); 712 if (interval_count && !(--times)) 713 break; 714 } 715 } 716 } 717 718 disable_counters(); 719 720 t1 = rdclock(); 721 722 if (stat_config.walltime_run_table) 723 stat_config.walltime_run[run_idx] = t1 - t0; 724 725 update_stats(&walltime_nsecs_stats, t1 - t0); 726 727 /* 728 * Closing a group leader splits the group, and as we only disable 729 * group leaders, results in remaining events becoming enabled. To 730 * avoid arbitrary skew, we must read all counters before closing any 731 * group leaders. 732 */ 733 read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); 734 735 /* 736 * We need to keep evsel_list alive, because it's processed 737 * later the evsel_list will be closed after. 738 */ 739 if (!STAT_RECORD) 740 evlist__close(evsel_list); 741 742 return WEXITSTATUS(status); 743 } 744 745 static int run_perf_stat(int argc, const char **argv, int run_idx) 746 { 747 int ret; 748 749 if (pre_cmd) { 750 ret = system(pre_cmd); 751 if (ret) 752 return ret; 753 } 754 755 if (sync_run) 756 sync(); 757 758 ret = __run_perf_stat(argc, argv, run_idx); 759 if (ret) 760 return ret; 761 762 if (post_cmd) { 763 ret = system(post_cmd); 764 if (ret) 765 return ret; 766 } 767 768 return ret; 769 } 770 771 static void print_counters(struct timespec *ts, int argc, const char **argv) 772 { 773 /* Do not print anything if we record to the pipe. */ 774 if (STAT_RECORD && perf_stat.data.is_pipe) 775 return; 776 777 perf_evlist__print_counters(evsel_list, &stat_config, &target, 778 ts, argc, argv); 779 } 780 781 static volatile int signr = -1; 782 783 static void skip_signal(int signo) 784 { 785 if ((child_pid == -1) || stat_config.interval) 786 done = 1; 787 788 signr = signo; 789 /* 790 * render child_pid harmless 791 * won't send SIGTERM to a random 792 * process in case of race condition 793 * and fast PID recycling 794 */ 795 child_pid = -1; 796 } 797 798 static void sig_atexit(void) 799 { 800 sigset_t set, oset; 801 802 /* 803 * avoid race condition with SIGCHLD handler 804 * in skip_signal() which is modifying child_pid 805 * goal is to avoid send SIGTERM to a random 806 * process 807 */ 808 sigemptyset(&set); 809 sigaddset(&set, SIGCHLD); 810 sigprocmask(SIG_BLOCK, &set, &oset); 811 812 if (child_pid != -1) 813 kill(child_pid, SIGTERM); 814 815 sigprocmask(SIG_SETMASK, &oset, NULL); 816 817 if (signr == -1) 818 return; 819 820 signal(signr, SIG_DFL); 821 kill(getpid(), signr); 822 } 823 824 static int stat__set_big_num(const struct option *opt __maybe_unused, 825 const char *s __maybe_unused, int unset) 826 { 827 big_num_opt = unset ? 0 : 1; 828 return 0; 829 } 830 831 static int enable_metric_only(const struct option *opt __maybe_unused, 832 const char *s __maybe_unused, int unset) 833 { 834 force_metric_only = true; 835 stat_config.metric_only = !unset; 836 return 0; 837 } 838 839 static int parse_metric_groups(const struct option *opt, 840 const char *str, 841 int unset __maybe_unused) 842 { 843 return metricgroup__parse_groups(opt, str, &stat_config.metric_events); 844 } 845 846 static struct option stat_options[] = { 847 OPT_BOOLEAN('T', "transaction", &transaction_run, 848 "hardware transaction statistics"), 849 OPT_CALLBACK('e', "event", &evsel_list, "event", 850 "event selector. use 'perf list' to list available events", 851 parse_events_option), 852 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 853 "event filter", parse_filter), 854 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit, 855 "child tasks do not inherit counters"), 856 OPT_STRING('p', "pid", &target.pid, "pid", 857 "stat events on existing process id"), 858 OPT_STRING('t', "tid", &target.tid, "tid", 859 "stat events on existing thread id"), 860 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 861 "system-wide collection from all CPUs"), 862 OPT_BOOLEAN('g', "group", &group, 863 "put the counters into a counter group"), 864 OPT_BOOLEAN(0, "scale", &stat_config.scale, 865 "Use --no-scale to disable counter scaling for multiplexing"), 866 OPT_INCR('v', "verbose", &verbose, 867 "be more verbose (show counter open errors, etc)"), 868 OPT_INTEGER('r', "repeat", &stat_config.run_count, 869 "repeat command and print average + stddev (max: 100, forever: 0)"), 870 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table, 871 "display details about each run (only with -r option)"), 872 OPT_BOOLEAN('n', "null", &stat_config.null_run, 873 "null run - dont start any counters"), 874 OPT_INCR('d', "detailed", &detailed_run, 875 "detailed run - start a lot of events"), 876 OPT_BOOLEAN('S', "sync", &sync_run, 877 "call sync() before starting a run"), 878 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 879 "print large numbers with thousands\' separators", 880 stat__set_big_num), 881 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 882 "list of cpus to monitor in system-wide"), 883 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, 884 "disable CPU count aggregation", AGGR_NONE), 885 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), 886 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", 887 "print counts with custom separator"), 888 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 889 "monitor event in cgroup name only", parse_cgroups), 890 OPT_STRING('o', "output", &output_name, "file", "output file name"), 891 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 892 OPT_INTEGER(0, "log-fd", &output_fd, 893 "log output to fd, instead of stderr"), 894 OPT_STRING(0, "pre", &pre_cmd, "command", 895 "command to run prior to the measured command"), 896 OPT_STRING(0, "post", &post_cmd, "command", 897 "command to run after to the measured command"), 898 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 899 "print counts at regular interval in ms " 900 "(overhead is possible for values <= 100ms)"), 901 OPT_INTEGER(0, "interval-count", &stat_config.times, 902 "print counts for fixed number of times"), 903 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear, 904 "clear screen in between new interval"), 905 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 906 "stop workload and print counts after a timeout period in ms (>= 10ms)"), 907 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 908 "aggregate counts per processor socket", AGGR_SOCKET), 909 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, 910 "aggregate counts per processor die", AGGR_DIE), 911 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 912 "aggregate counts per physical processor core", AGGR_CORE), 913 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, 914 "aggregate counts per thread", AGGR_THREAD), 915 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, 916 "aggregate counts per numa node", AGGR_NODE), 917 OPT_UINTEGER('D', "delay", &stat_config.initial_delay, 918 "ms to wait before starting measurement after program start"), 919 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, 920 "Only print computed metrics. No raw values", enable_metric_only), 921 OPT_BOOLEAN(0, "topdown", &topdown_run, 922 "measure topdown level 1 statistics"), 923 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 924 "measure SMI cost"), 925 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", 926 "monitor specified metrics or metric groups (separated by ,)", 927 parse_metric_groups), 928 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, 929 "Configure all used events to run in kernel space.", 930 PARSE_OPT_EXCLUSIVE), 931 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, 932 "Configure all used events to run in user space.", 933 PARSE_OPT_EXCLUSIVE), 934 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, 935 "Use with 'percore' event qualifier to show the event " 936 "counts of one hardware thread by sum up total hardware " 937 "threads of same physical core"), 938 OPT_END() 939 }; 940 941 static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 942 struct perf_cpu_map *map, int cpu) 943 { 944 return cpu_map__get_socket(map, cpu, NULL); 945 } 946 947 static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 948 struct perf_cpu_map *map, int cpu) 949 { 950 return cpu_map__get_die(map, cpu, NULL); 951 } 952 953 static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 954 struct perf_cpu_map *map, int cpu) 955 { 956 return cpu_map__get_core(map, cpu, NULL); 957 } 958 959 static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused, 960 struct perf_cpu_map *map, int cpu) 961 { 962 return cpu_map__get_node(map, cpu, NULL); 963 } 964 965 static int perf_stat__get_aggr(struct perf_stat_config *config, 966 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) 967 { 968 int cpu; 969 970 if (idx >= map->nr) 971 return -1; 972 973 cpu = map->map[idx]; 974 975 if (config->cpus_aggr_map->map[cpu] == -1) 976 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); 977 978 return config->cpus_aggr_map->map[cpu]; 979 } 980 981 static int perf_stat__get_socket_cached(struct perf_stat_config *config, 982 struct perf_cpu_map *map, int idx) 983 { 984 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); 985 } 986 987 static int perf_stat__get_die_cached(struct perf_stat_config *config, 988 struct perf_cpu_map *map, int idx) 989 { 990 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); 991 } 992 993 static int perf_stat__get_core_cached(struct perf_stat_config *config, 994 struct perf_cpu_map *map, int idx) 995 { 996 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); 997 } 998 999 static int perf_stat__get_node_cached(struct perf_stat_config *config, 1000 struct perf_cpu_map *map, int idx) 1001 { 1002 return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); 1003 } 1004 1005 static bool term_percore_set(void) 1006 { 1007 struct evsel *counter; 1008 1009 evlist__for_each_entry(evsel_list, counter) { 1010 if (counter->percore) 1011 return true; 1012 } 1013 1014 return false; 1015 } 1016 1017 static int perf_stat_init_aggr_mode(void) 1018 { 1019 int nr; 1020 1021 switch (stat_config.aggr_mode) { 1022 case AGGR_SOCKET: 1023 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1024 perror("cannot build socket map"); 1025 return -1; 1026 } 1027 stat_config.aggr_get_id = perf_stat__get_socket_cached; 1028 break; 1029 case AGGR_DIE: 1030 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1031 perror("cannot build die map"); 1032 return -1; 1033 } 1034 stat_config.aggr_get_id = perf_stat__get_die_cached; 1035 break; 1036 case AGGR_CORE: 1037 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1038 perror("cannot build core map"); 1039 return -1; 1040 } 1041 stat_config.aggr_get_id = perf_stat__get_core_cached; 1042 break; 1043 case AGGR_NODE: 1044 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { 1045 perror("cannot build core map"); 1046 return -1; 1047 } 1048 stat_config.aggr_get_id = perf_stat__get_node_cached; 1049 break; 1050 case AGGR_NONE: 1051 if (term_percore_set()) { 1052 if (cpu_map__build_core_map(evsel_list->core.cpus, 1053 &stat_config.aggr_map)) { 1054 perror("cannot build core map"); 1055 return -1; 1056 } 1057 stat_config.aggr_get_id = perf_stat__get_core_cached; 1058 } 1059 break; 1060 case AGGR_GLOBAL: 1061 case AGGR_THREAD: 1062 case AGGR_UNSET: 1063 default: 1064 break; 1065 } 1066 1067 /* 1068 * The evsel_list->cpus is the base we operate on, 1069 * taking the highest cpu number to be the size of 1070 * the aggregation translate cpumap. 1071 */ 1072 nr = perf_cpu_map__max(evsel_list->core.cpus); 1073 stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1); 1074 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 1075 } 1076 1077 static void perf_stat__exit_aggr_mode(void) 1078 { 1079 perf_cpu_map__put(stat_config.aggr_map); 1080 perf_cpu_map__put(stat_config.cpus_aggr_map); 1081 stat_config.aggr_map = NULL; 1082 stat_config.cpus_aggr_map = NULL; 1083 } 1084 1085 static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) 1086 { 1087 int cpu; 1088 1089 if (idx > map->nr) 1090 return -1; 1091 1092 cpu = map->map[idx]; 1093 1094 if (cpu >= env->nr_cpus_avail) 1095 return -1; 1096 1097 return cpu; 1098 } 1099 1100 static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) 1101 { 1102 struct perf_env *env = data; 1103 int cpu = perf_env__get_cpu(env, map, idx); 1104 1105 return cpu == -1 ? -1 : env->cpu[cpu].socket_id; 1106 } 1107 1108 static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) 1109 { 1110 struct perf_env *env = data; 1111 int die_id = -1, cpu = perf_env__get_cpu(env, map, idx); 1112 1113 if (cpu != -1) { 1114 /* 1115 * Encode socket in bit range 15:8 1116 * die_id is relative to socket, 1117 * we need a global id. So we combine 1118 * socket + die id 1119 */ 1120 if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) 1121 return -1; 1122 1123 if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) 1124 return -1; 1125 1126 die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff); 1127 } 1128 1129 return die_id; 1130 } 1131 1132 static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) 1133 { 1134 struct perf_env *env = data; 1135 int core = -1, cpu = perf_env__get_cpu(env, map, idx); 1136 1137 if (cpu != -1) { 1138 /* 1139 * Encode socket in bit range 31:24 1140 * encode die id in bit range 23:16 1141 * core_id is relative to socket and die, 1142 * we need a global id. So we combine 1143 * socket + die id + core id 1144 */ 1145 if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) 1146 return -1; 1147 1148 if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) 1149 return -1; 1150 1151 if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n")) 1152 return -1; 1153 1154 core = (env->cpu[cpu].socket_id << 24) | 1155 (env->cpu[cpu].die_id << 16) | 1156 (env->cpu[cpu].core_id & 0xffff); 1157 } 1158 1159 return core; 1160 } 1161 1162 static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) 1163 { 1164 int cpu = perf_env__get_cpu(data, map, idx); 1165 1166 return perf_env__numa_node(data, cpu); 1167 } 1168 1169 static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, 1170 struct perf_cpu_map **sockp) 1171 { 1172 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); 1173 } 1174 1175 static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, 1176 struct perf_cpu_map **diep) 1177 { 1178 return cpu_map__build_map(cpus, diep, perf_env__get_die, env); 1179 } 1180 1181 static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, 1182 struct perf_cpu_map **corep) 1183 { 1184 return cpu_map__build_map(cpus, corep, perf_env__get_core, env); 1185 } 1186 1187 static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, 1188 struct perf_cpu_map **nodep) 1189 { 1190 return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); 1191 } 1192 1193 static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1194 struct perf_cpu_map *map, int idx) 1195 { 1196 return perf_env__get_socket(map, idx, &perf_stat.session->header.env); 1197 } 1198 static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1199 struct perf_cpu_map *map, int idx) 1200 { 1201 return perf_env__get_die(map, idx, &perf_stat.session->header.env); 1202 } 1203 1204 static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1205 struct perf_cpu_map *map, int idx) 1206 { 1207 return perf_env__get_core(map, idx, &perf_stat.session->header.env); 1208 } 1209 1210 static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, 1211 struct perf_cpu_map *map, int idx) 1212 { 1213 return perf_env__get_node(map, idx, &perf_stat.session->header.env); 1214 } 1215 1216 static int perf_stat_init_aggr_mode_file(struct perf_stat *st) 1217 { 1218 struct perf_env *env = &st->session->header.env; 1219 1220 switch (stat_config.aggr_mode) { 1221 case AGGR_SOCKET: 1222 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1223 perror("cannot build socket map"); 1224 return -1; 1225 } 1226 stat_config.aggr_get_id = perf_stat__get_socket_file; 1227 break; 1228 case AGGR_DIE: 1229 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1230 perror("cannot build die map"); 1231 return -1; 1232 } 1233 stat_config.aggr_get_id = perf_stat__get_die_file; 1234 break; 1235 case AGGR_CORE: 1236 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1237 perror("cannot build core map"); 1238 return -1; 1239 } 1240 stat_config.aggr_get_id = perf_stat__get_core_file; 1241 break; 1242 case AGGR_NODE: 1243 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { 1244 perror("cannot build core map"); 1245 return -1; 1246 } 1247 stat_config.aggr_get_id = perf_stat__get_node_file; 1248 break; 1249 case AGGR_NONE: 1250 case AGGR_GLOBAL: 1251 case AGGR_THREAD: 1252 case AGGR_UNSET: 1253 default: 1254 break; 1255 } 1256 1257 return 0; 1258 } 1259 1260 static int topdown_filter_events(const char **attr, char **str, bool use_group) 1261 { 1262 int off = 0; 1263 int i; 1264 int len = 0; 1265 char *s; 1266 1267 for (i = 0; attr[i]; i++) { 1268 if (pmu_have_event("cpu", attr[i])) { 1269 len += strlen(attr[i]) + 1; 1270 attr[i - off] = attr[i]; 1271 } else 1272 off++; 1273 } 1274 attr[i - off] = NULL; 1275 1276 *str = malloc(len + 1 + 2); 1277 if (!*str) 1278 return -1; 1279 s = *str; 1280 if (i - off == 0) { 1281 *s = 0; 1282 return 0; 1283 } 1284 if (use_group) 1285 *s++ = '{'; 1286 for (i = 0; attr[i]; i++) { 1287 strcpy(s, attr[i]); 1288 s += strlen(s); 1289 *s++ = ','; 1290 } 1291 if (use_group) { 1292 s[-1] = '}'; 1293 *s = 0; 1294 } else 1295 s[-1] = 0; 1296 return 0; 1297 } 1298 1299 __weak bool arch_topdown_check_group(bool *warn) 1300 { 1301 *warn = false; 1302 return false; 1303 } 1304 1305 __weak void arch_topdown_group_warn(void) 1306 { 1307 } 1308 1309 /* 1310 * Add default attributes, if there were no attributes specified or 1311 * if -d/--detailed, -d -d or -d -d -d is used: 1312 */ 1313 static int add_default_attributes(void) 1314 { 1315 int err; 1316 struct perf_event_attr default_attrs0[] = { 1317 1318 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1319 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1320 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1321 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1322 1323 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1324 }; 1325 struct perf_event_attr frontend_attrs[] = { 1326 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1327 }; 1328 struct perf_event_attr backend_attrs[] = { 1329 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1330 }; 1331 struct perf_event_attr default_attrs1[] = { 1332 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1333 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1334 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1335 1336 }; 1337 1338 /* 1339 * Detailed stats (-d), covering the L1 and last level data caches: 1340 */ 1341 struct perf_event_attr detailed_attrs[] = { 1342 1343 { .type = PERF_TYPE_HW_CACHE, 1344 .config = 1345 PERF_COUNT_HW_CACHE_L1D << 0 | 1346 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1347 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1348 1349 { .type = PERF_TYPE_HW_CACHE, 1350 .config = 1351 PERF_COUNT_HW_CACHE_L1D << 0 | 1352 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1353 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1354 1355 { .type = PERF_TYPE_HW_CACHE, 1356 .config = 1357 PERF_COUNT_HW_CACHE_LL << 0 | 1358 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1359 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1360 1361 { .type = PERF_TYPE_HW_CACHE, 1362 .config = 1363 PERF_COUNT_HW_CACHE_LL << 0 | 1364 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1365 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1366 }; 1367 1368 /* 1369 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1370 */ 1371 struct perf_event_attr very_detailed_attrs[] = { 1372 1373 { .type = PERF_TYPE_HW_CACHE, 1374 .config = 1375 PERF_COUNT_HW_CACHE_L1I << 0 | 1376 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1377 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1378 1379 { .type = PERF_TYPE_HW_CACHE, 1380 .config = 1381 PERF_COUNT_HW_CACHE_L1I << 0 | 1382 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1383 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1384 1385 { .type = PERF_TYPE_HW_CACHE, 1386 .config = 1387 PERF_COUNT_HW_CACHE_DTLB << 0 | 1388 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1389 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1390 1391 { .type = PERF_TYPE_HW_CACHE, 1392 .config = 1393 PERF_COUNT_HW_CACHE_DTLB << 0 | 1394 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1395 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1396 1397 { .type = PERF_TYPE_HW_CACHE, 1398 .config = 1399 PERF_COUNT_HW_CACHE_ITLB << 0 | 1400 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1401 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1402 1403 { .type = PERF_TYPE_HW_CACHE, 1404 .config = 1405 PERF_COUNT_HW_CACHE_ITLB << 0 | 1406 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1407 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1408 1409 }; 1410 1411 /* 1412 * Very, very detailed stats (-d -d -d), adding prefetch events: 1413 */ 1414 struct perf_event_attr very_very_detailed_attrs[] = { 1415 1416 { .type = PERF_TYPE_HW_CACHE, 1417 .config = 1418 PERF_COUNT_HW_CACHE_L1D << 0 | 1419 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1420 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1421 1422 { .type = PERF_TYPE_HW_CACHE, 1423 .config = 1424 PERF_COUNT_HW_CACHE_L1D << 0 | 1425 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1426 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1427 }; 1428 struct parse_events_error errinfo; 1429 1430 /* Set attrs if no event is selected and !null_run: */ 1431 if (stat_config.null_run) 1432 return 0; 1433 1434 bzero(&errinfo, sizeof(errinfo)); 1435 if (transaction_run) { 1436 /* Handle -T as -M transaction. Once platform specific metrics 1437 * support has been added to the json files, all archictures 1438 * will use this approach. To determine transaction support 1439 * on an architecture test for such a metric name. 1440 */ 1441 if (metricgroup__has_metric("transaction")) { 1442 struct option opt = { .value = &evsel_list }; 1443 1444 return metricgroup__parse_groups(&opt, "transaction", 1445 &stat_config.metric_events); 1446 } 1447 1448 if (pmu_have_event("cpu", "cycles-ct") && 1449 pmu_have_event("cpu", "el-start")) 1450 err = parse_events(evsel_list, transaction_attrs, 1451 &errinfo); 1452 else 1453 err = parse_events(evsel_list, 1454 transaction_limited_attrs, 1455 &errinfo); 1456 if (err) { 1457 fprintf(stderr, "Cannot set up transaction events\n"); 1458 parse_events_print_error(&errinfo, transaction_attrs); 1459 return -1; 1460 } 1461 return 0; 1462 } 1463 1464 if (smi_cost) { 1465 int smi; 1466 1467 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1468 fprintf(stderr, "freeze_on_smi is not supported.\n"); 1469 return -1; 1470 } 1471 1472 if (!smi) { 1473 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1474 fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1475 return -1; 1476 } 1477 smi_reset = true; 1478 } 1479 1480 if (pmu_have_event("msr", "aperf") && 1481 pmu_have_event("msr", "smi")) { 1482 if (!force_metric_only) 1483 stat_config.metric_only = true; 1484 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); 1485 } else { 1486 fprintf(stderr, "To measure SMI cost, it needs " 1487 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); 1488 parse_events_print_error(&errinfo, smi_cost_attrs); 1489 return -1; 1490 } 1491 if (err) { 1492 parse_events_print_error(&errinfo, smi_cost_attrs); 1493 fprintf(stderr, "Cannot set up SMI cost events\n"); 1494 return -1; 1495 } 1496 return 0; 1497 } 1498 1499 if (topdown_run) { 1500 char *str = NULL; 1501 bool warn = false; 1502 1503 if (stat_config.aggr_mode != AGGR_GLOBAL && 1504 stat_config.aggr_mode != AGGR_CORE) { 1505 pr_err("top down event configuration requires --per-core mode\n"); 1506 return -1; 1507 } 1508 stat_config.aggr_mode = AGGR_CORE; 1509 if (nr_cgroups || !target__has_cpu(&target)) { 1510 pr_err("top down event configuration requires system-wide mode (-a)\n"); 1511 return -1; 1512 } 1513 1514 if (!force_metric_only) 1515 stat_config.metric_only = true; 1516 if (topdown_filter_events(topdown_attrs, &str, 1517 arch_topdown_check_group(&warn)) < 0) { 1518 pr_err("Out of memory\n"); 1519 return -1; 1520 } 1521 if (topdown_attrs[0] && str) { 1522 if (warn) 1523 arch_topdown_group_warn(); 1524 err = parse_events(evsel_list, str, &errinfo); 1525 if (err) { 1526 fprintf(stderr, 1527 "Cannot set up top down events %s: %d\n", 1528 str, err); 1529 parse_events_print_error(&errinfo, str); 1530 free(str); 1531 return -1; 1532 } 1533 } else { 1534 fprintf(stderr, "System does not support topdown\n"); 1535 return -1; 1536 } 1537 free(str); 1538 } 1539 1540 if (!evsel_list->core.nr_entries) { 1541 if (target__has_cpu(&target)) 1542 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1543 1544 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1545 return -1; 1546 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1547 if (perf_evlist__add_default_attrs(evsel_list, 1548 frontend_attrs) < 0) 1549 return -1; 1550 } 1551 if (pmu_have_event("cpu", "stalled-cycles-backend")) { 1552 if (perf_evlist__add_default_attrs(evsel_list, 1553 backend_attrs) < 0) 1554 return -1; 1555 } 1556 if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1557 return -1; 1558 } 1559 1560 /* Detailed events get appended to the event list: */ 1561 1562 if (detailed_run < 1) 1563 return 0; 1564 1565 /* Append detailed run extra attributes: */ 1566 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1567 return -1; 1568 1569 if (detailed_run < 2) 1570 return 0; 1571 1572 /* Append very detailed run extra attributes: */ 1573 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1574 return -1; 1575 1576 if (detailed_run < 3) 1577 return 0; 1578 1579 /* Append very, very detailed run extra attributes: */ 1580 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1581 } 1582 1583 static const char * const stat_record_usage[] = { 1584 "perf stat record [<options>]", 1585 NULL, 1586 }; 1587 1588 static void init_features(struct perf_session *session) 1589 { 1590 int feat; 1591 1592 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) 1593 perf_header__set_feat(&session->header, feat); 1594 1595 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); 1596 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); 1597 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); 1598 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 1599 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); 1600 } 1601 1602 static int __cmd_record(int argc, const char **argv) 1603 { 1604 struct perf_session *session; 1605 struct perf_data *data = &perf_stat.data; 1606 1607 argc = parse_options(argc, argv, stat_options, stat_record_usage, 1608 PARSE_OPT_STOP_AT_NON_OPTION); 1609 1610 if (output_name) 1611 data->path = output_name; 1612 1613 if (stat_config.run_count != 1 || forever) { 1614 pr_err("Cannot use -r option with perf stat record.\n"); 1615 return -1; 1616 } 1617 1618 session = perf_session__new(data, false, NULL); 1619 if (IS_ERR(session)) { 1620 pr_err("Perf session creation failed\n"); 1621 return PTR_ERR(session); 1622 } 1623 1624 init_features(session); 1625 1626 session->evlist = evsel_list; 1627 perf_stat.session = session; 1628 perf_stat.record = true; 1629 return argc; 1630 } 1631 1632 static int process_stat_round_event(struct perf_session *session, 1633 union perf_event *event) 1634 { 1635 struct perf_record_stat_round *stat_round = &event->stat_round; 1636 struct evsel *counter; 1637 struct timespec tsh, *ts = NULL; 1638 const char **argv = session->header.env.cmdline_argv; 1639 int argc = session->header.env.nr_cmdline; 1640 1641 evlist__for_each_entry(evsel_list, counter) 1642 perf_stat_process_counter(&stat_config, counter); 1643 1644 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 1645 update_stats(&walltime_nsecs_stats, stat_round->time); 1646 1647 if (stat_config.interval && stat_round->time) { 1648 tsh.tv_sec = stat_round->time / NSEC_PER_SEC; 1649 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC; 1650 ts = &tsh; 1651 } 1652 1653 print_counters(ts, argc, argv); 1654 return 0; 1655 } 1656 1657 static 1658 int process_stat_config_event(struct perf_session *session, 1659 union perf_event *event) 1660 { 1661 struct perf_tool *tool = session->tool; 1662 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1663 1664 perf_event__read_stat_config(&stat_config, &event->stat_config); 1665 1666 if (perf_cpu_map__empty(st->cpus)) { 1667 if (st->aggr_mode != AGGR_UNSET) 1668 pr_warning("warning: processing task data, aggregation mode not set\n"); 1669 return 0; 1670 } 1671 1672 if (st->aggr_mode != AGGR_UNSET) 1673 stat_config.aggr_mode = st->aggr_mode; 1674 1675 if (perf_stat.data.is_pipe) 1676 perf_stat_init_aggr_mode(); 1677 else 1678 perf_stat_init_aggr_mode_file(st); 1679 1680 return 0; 1681 } 1682 1683 static int set_maps(struct perf_stat *st) 1684 { 1685 if (!st->cpus || !st->threads) 1686 return 0; 1687 1688 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 1689 return -EINVAL; 1690 1691 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); 1692 1693 if (perf_evlist__alloc_stats(evsel_list, true)) 1694 return -ENOMEM; 1695 1696 st->maps_allocated = true; 1697 return 0; 1698 } 1699 1700 static 1701 int process_thread_map_event(struct perf_session *session, 1702 union perf_event *event) 1703 { 1704 struct perf_tool *tool = session->tool; 1705 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1706 1707 if (st->threads) { 1708 pr_warning("Extra thread map event, ignoring.\n"); 1709 return 0; 1710 } 1711 1712 st->threads = thread_map__new_event(&event->thread_map); 1713 if (!st->threads) 1714 return -ENOMEM; 1715 1716 return set_maps(st); 1717 } 1718 1719 static 1720 int process_cpu_map_event(struct perf_session *session, 1721 union perf_event *event) 1722 { 1723 struct perf_tool *tool = session->tool; 1724 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1725 struct perf_cpu_map *cpus; 1726 1727 if (st->cpus) { 1728 pr_warning("Extra cpu map event, ignoring.\n"); 1729 return 0; 1730 } 1731 1732 cpus = cpu_map__new_data(&event->cpu_map.data); 1733 if (!cpus) 1734 return -ENOMEM; 1735 1736 st->cpus = cpus; 1737 return set_maps(st); 1738 } 1739 1740 static int runtime_stat_new(struct perf_stat_config *config, int nthreads) 1741 { 1742 int i; 1743 1744 config->stats = calloc(nthreads, sizeof(struct runtime_stat)); 1745 if (!config->stats) 1746 return -1; 1747 1748 config->stats_num = nthreads; 1749 1750 for (i = 0; i < nthreads; i++) 1751 runtime_stat__init(&config->stats[i]); 1752 1753 return 0; 1754 } 1755 1756 static void runtime_stat_delete(struct perf_stat_config *config) 1757 { 1758 int i; 1759 1760 if (!config->stats) 1761 return; 1762 1763 for (i = 0; i < config->stats_num; i++) 1764 runtime_stat__exit(&config->stats[i]); 1765 1766 zfree(&config->stats); 1767 } 1768 1769 static const char * const stat_report_usage[] = { 1770 "perf stat report [<options>]", 1771 NULL, 1772 }; 1773 1774 static struct perf_stat perf_stat = { 1775 .tool = { 1776 .attr = perf_event__process_attr, 1777 .event_update = perf_event__process_event_update, 1778 .thread_map = process_thread_map_event, 1779 .cpu_map = process_cpu_map_event, 1780 .stat_config = process_stat_config_event, 1781 .stat = perf_event__process_stat_event, 1782 .stat_round = process_stat_round_event, 1783 }, 1784 .aggr_mode = AGGR_UNSET, 1785 }; 1786 1787 static int __cmd_report(int argc, const char **argv) 1788 { 1789 struct perf_session *session; 1790 const struct option options[] = { 1791 OPT_STRING('i', "input", &input_name, "file", "input file name"), 1792 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode, 1793 "aggregate counts per processor socket", AGGR_SOCKET), 1794 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, 1795 "aggregate counts per processor die", AGGR_DIE), 1796 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, 1797 "aggregate counts per physical processor core", AGGR_CORE), 1798 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, 1799 "aggregate counts per numa node", AGGR_NODE), 1800 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode, 1801 "disable CPU count aggregation", AGGR_NONE), 1802 OPT_END() 1803 }; 1804 struct stat st; 1805 int ret; 1806 1807 argc = parse_options(argc, argv, options, stat_report_usage, 0); 1808 1809 if (!input_name || !strlen(input_name)) { 1810 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 1811 input_name = "-"; 1812 else 1813 input_name = "perf.data"; 1814 } 1815 1816 perf_stat.data.path = input_name; 1817 perf_stat.data.mode = PERF_DATA_MODE_READ; 1818 1819 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool); 1820 if (IS_ERR(session)) 1821 return PTR_ERR(session); 1822 1823 perf_stat.session = session; 1824 stat_config.output = stderr; 1825 evsel_list = session->evlist; 1826 1827 ret = perf_session__process_events(session); 1828 if (ret) 1829 return ret; 1830 1831 perf_session__delete(session); 1832 return 0; 1833 } 1834 1835 static void setup_system_wide(int forks) 1836 { 1837 /* 1838 * Make system wide (-a) the default target if 1839 * no target was specified and one of following 1840 * conditions is met: 1841 * 1842 * - there's no workload specified 1843 * - there is workload specified but all requested 1844 * events are system wide events 1845 */ 1846 if (!target__none(&target)) 1847 return; 1848 1849 if (!forks) 1850 target.system_wide = true; 1851 else { 1852 struct evsel *counter; 1853 1854 evlist__for_each_entry(evsel_list, counter) { 1855 if (!counter->core.system_wide) 1856 return; 1857 } 1858 1859 if (evsel_list->core.nr_entries) 1860 target.system_wide = true; 1861 } 1862 } 1863 1864 int cmd_stat(int argc, const char **argv) 1865 { 1866 const char * const stat_usage[] = { 1867 "perf stat [<options>] [<command>]", 1868 NULL 1869 }; 1870 int status = -EINVAL, run_idx; 1871 const char *mode; 1872 FILE *output = stderr; 1873 unsigned int interval, timeout; 1874 const char * const stat_subcommands[] = { "record", "report" }; 1875 1876 setlocale(LC_ALL, ""); 1877 1878 evsel_list = evlist__new(); 1879 if (evsel_list == NULL) 1880 return -ENOMEM; 1881 1882 parse_events__shrink_config_terms(); 1883 1884 /* String-parsing callback-based options would segfault when negated */ 1885 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); 1886 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); 1887 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); 1888 1889 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, 1890 (const char **) stat_usage, 1891 PARSE_OPT_STOP_AT_NON_OPTION); 1892 perf_stat__collect_metric_expr(evsel_list); 1893 perf_stat__init_shadow_stats(); 1894 1895 if (stat_config.csv_sep) { 1896 stat_config.csv_output = true; 1897 if (!strcmp(stat_config.csv_sep, "\\t")) 1898 stat_config.csv_sep = "\t"; 1899 } else 1900 stat_config.csv_sep = DEFAULT_SEPARATOR; 1901 1902 if (argc && !strncmp(argv[0], "rec", 3)) { 1903 argc = __cmd_record(argc, argv); 1904 if (argc < 0) 1905 return -1; 1906 } else if (argc && !strncmp(argv[0], "rep", 3)) 1907 return __cmd_report(argc, argv); 1908 1909 interval = stat_config.interval; 1910 timeout = stat_config.timeout; 1911 1912 /* 1913 * For record command the -o is already taken care of. 1914 */ 1915 if (!STAT_RECORD && output_name && strcmp(output_name, "-")) 1916 output = NULL; 1917 1918 if (output_name && output_fd) { 1919 fprintf(stderr, "cannot use both --output and --log-fd\n"); 1920 parse_options_usage(stat_usage, stat_options, "o", 1); 1921 parse_options_usage(NULL, stat_options, "log-fd", 0); 1922 goto out; 1923 } 1924 1925 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) { 1926 fprintf(stderr, "--metric-only is not supported with --per-thread\n"); 1927 goto out; 1928 } 1929 1930 if (stat_config.metric_only && stat_config.run_count > 1) { 1931 fprintf(stderr, "--metric-only is not supported with -r\n"); 1932 goto out; 1933 } 1934 1935 if (stat_config.walltime_run_table && stat_config.run_count <= 1) { 1936 fprintf(stderr, "--table is only supported with -r\n"); 1937 parse_options_usage(stat_usage, stat_options, "r", 1); 1938 parse_options_usage(NULL, stat_options, "table", 0); 1939 goto out; 1940 } 1941 1942 if (output_fd < 0) { 1943 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 1944 parse_options_usage(stat_usage, stat_options, "log-fd", 0); 1945 goto out; 1946 } 1947 1948 if (!output) { 1949 struct timespec tm; 1950 mode = append_file ? "a" : "w"; 1951 1952 output = fopen(output_name, mode); 1953 if (!output) { 1954 perror("failed to create output file"); 1955 return -1; 1956 } 1957 clock_gettime(CLOCK_REALTIME, &tm); 1958 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 1959 } else if (output_fd > 0) { 1960 mode = append_file ? "a" : "w"; 1961 output = fdopen(output_fd, mode); 1962 if (!output) { 1963 perror("Failed opening logfd"); 1964 return -errno; 1965 } 1966 } 1967 1968 stat_config.output = output; 1969 1970 /* 1971 * let the spreadsheet do the pretty-printing 1972 */ 1973 if (stat_config.csv_output) { 1974 /* User explicitly passed -B? */ 1975 if (big_num_opt == 1) { 1976 fprintf(stderr, "-B option not supported with -x\n"); 1977 parse_options_usage(stat_usage, stat_options, "B", 1); 1978 parse_options_usage(NULL, stat_options, "x", 1); 1979 goto out; 1980 } else /* Nope, so disable big number formatting */ 1981 stat_config.big_num = false; 1982 } else if (big_num_opt == 0) /* User passed --no-big-num */ 1983 stat_config.big_num = false; 1984 1985 setup_system_wide(argc); 1986 1987 /* 1988 * Display user/system times only for single 1989 * run and when there's specified tracee. 1990 */ 1991 if ((stat_config.run_count == 1) && target__none(&target)) 1992 stat_config.ru_display = true; 1993 1994 if (stat_config.run_count < 0) { 1995 pr_err("Run count must be a positive number\n"); 1996 parse_options_usage(stat_usage, stat_options, "r", 1); 1997 goto out; 1998 } else if (stat_config.run_count == 0) { 1999 forever = true; 2000 stat_config.run_count = 1; 2001 } 2002 2003 if (stat_config.walltime_run_table) { 2004 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0])); 2005 if (!stat_config.walltime_run) { 2006 pr_err("failed to setup -r option"); 2007 goto out; 2008 } 2009 } 2010 2011 if ((stat_config.aggr_mode == AGGR_THREAD) && 2012 !target__has_task(&target)) { 2013 if (!target.system_wide || target.cpu_list) { 2014 fprintf(stderr, "The --per-thread option is only " 2015 "available when monitoring via -p -t -a " 2016 "options or only --per-thread.\n"); 2017 parse_options_usage(NULL, stat_options, "p", 1); 2018 parse_options_usage(NULL, stat_options, "t", 1); 2019 goto out; 2020 } 2021 } 2022 2023 /* 2024 * no_aggr, cgroup are for system-wide only 2025 * --per-thread is aggregated per thread, we dont mix it with cpu mode 2026 */ 2027 if (((stat_config.aggr_mode != AGGR_GLOBAL && 2028 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) && 2029 !target__has_cpu(&target)) { 2030 fprintf(stderr, "both cgroup and no-aggregation " 2031 "modes only available in system-wide mode\n"); 2032 2033 parse_options_usage(stat_usage, stat_options, "G", 1); 2034 parse_options_usage(NULL, stat_options, "A", 1); 2035 parse_options_usage(NULL, stat_options, "a", 1); 2036 goto out; 2037 } 2038 2039 if (add_default_attributes()) 2040 goto out; 2041 2042 target__validate(&target); 2043 2044 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) 2045 target.per_thread = true; 2046 2047 if (perf_evlist__create_maps(evsel_list, &target) < 0) { 2048 if (target__has_task(&target)) { 2049 pr_err("Problems finding threads of monitor\n"); 2050 parse_options_usage(stat_usage, stat_options, "p", 1); 2051 parse_options_usage(NULL, stat_options, "t", 1); 2052 } else if (target__has_cpu(&target)) { 2053 perror("failed to parse CPUs map"); 2054 parse_options_usage(stat_usage, stat_options, "C", 1); 2055 parse_options_usage(NULL, stat_options, "a", 1); 2056 } 2057 goto out; 2058 } 2059 2060 /* 2061 * Initialize thread_map with comm names, 2062 * so we could print it out on output. 2063 */ 2064 if (stat_config.aggr_mode == AGGR_THREAD) { 2065 thread_map__read_comms(evsel_list->core.threads); 2066 if (target.system_wide) { 2067 if (runtime_stat_new(&stat_config, 2068 perf_thread_map__nr(evsel_list->core.threads))) { 2069 goto out; 2070 } 2071 } 2072 } 2073 2074 if (stat_config.aggr_mode == AGGR_NODE) 2075 cpu__setup_cpunode_map(); 2076 2077 if (stat_config.times && interval) 2078 interval_count = true; 2079 else if (stat_config.times && !interval) { 2080 pr_err("interval-count option should be used together with " 2081 "interval-print.\n"); 2082 parse_options_usage(stat_usage, stat_options, "interval-count", 0); 2083 parse_options_usage(stat_usage, stat_options, "I", 1); 2084 goto out; 2085 } 2086 2087 if (timeout && timeout < 100) { 2088 if (timeout < 10) { 2089 pr_err("timeout must be >= 10ms.\n"); 2090 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2091 goto out; 2092 } else 2093 pr_warning("timeout < 100ms. " 2094 "The overhead percentage could be high in some cases. " 2095 "Please proceed with caution.\n"); 2096 } 2097 if (timeout && interval) { 2098 pr_err("timeout option is not supported with interval-print.\n"); 2099 parse_options_usage(stat_usage, stat_options, "timeout", 0); 2100 parse_options_usage(stat_usage, stat_options, "I", 1); 2101 goto out; 2102 } 2103 2104 if (perf_evlist__alloc_stats(evsel_list, interval)) 2105 goto out; 2106 2107 if (perf_stat_init_aggr_mode()) 2108 goto out; 2109 2110 /* 2111 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless 2112 * while avoiding that older tools show confusing messages. 2113 * 2114 * However for pipe sessions we need to keep it zero, 2115 * because script's perf_evsel__check_attr is triggered 2116 * by attr->sample_type != 0, and we can't run it on 2117 * stat sessions. 2118 */ 2119 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe); 2120 2121 /* 2122 * We dont want to block the signals - that would cause 2123 * child tasks to inherit that and Ctrl-C would not work. 2124 * What we want is for Ctrl-C to work in the exec()-ed 2125 * task, but being ignored by perf stat itself: 2126 */ 2127 atexit(sig_atexit); 2128 if (!forever) 2129 signal(SIGINT, skip_signal); 2130 signal(SIGCHLD, skip_signal); 2131 signal(SIGALRM, skip_signal); 2132 signal(SIGABRT, skip_signal); 2133 2134 status = 0; 2135 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { 2136 if (stat_config.run_count != 1 && verbose > 0) 2137 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 2138 run_idx + 1); 2139 2140 if (run_idx != 0) 2141 perf_evlist__reset_prev_raw_counts(evsel_list); 2142 2143 status = run_perf_stat(argc, argv, run_idx); 2144 if (forever && status != -1 && !interval) { 2145 print_counters(NULL, argc, argv); 2146 perf_stat__reset_stats(); 2147 } 2148 } 2149 2150 if (!forever && status != -1 && !interval) 2151 print_counters(NULL, argc, argv); 2152 2153 if (STAT_RECORD) { 2154 /* 2155 * We synthesize the kernel mmap record just so that older tools 2156 * don't emit warnings about not being able to resolve symbols 2157 * due to /proc/sys/kernel/kptr_restrict settings and instear provide 2158 * a saner message about no samples being in the perf.data file. 2159 * 2160 * This also serves to suppress a warning about f_header.data.size == 0 2161 * in header.c at the moment 'perf stat record' gets introduced, which 2162 * is not really needed once we start adding the stat specific PERF_RECORD_ 2163 * records, but the need to suppress the kptr_restrict messages in older 2164 * tools remain -acme 2165 */ 2166 int fd = perf_data__fd(&perf_stat.data); 2167 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2168 process_synthesized_event, 2169 &perf_stat.session->machines.host); 2170 if (err) { 2171 pr_warning("Couldn't synthesize the kernel mmap record, harmless, " 2172 "older tools may produce warnings about this file\n."); 2173 } 2174 2175 if (!interval) { 2176 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL)) 2177 pr_err("failed to write stat round event\n"); 2178 } 2179 2180 if (!perf_stat.data.is_pipe) { 2181 perf_stat.session->header.data_size += perf_stat.bytes_written; 2182 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2183 } 2184 2185 evlist__close(evsel_list); 2186 perf_session__delete(perf_stat.session); 2187 } 2188 2189 perf_stat__exit_aggr_mode(); 2190 perf_evlist__free_stats(evsel_list); 2191 out: 2192 zfree(&stat_config.walltime_run); 2193 2194 if (smi_cost && smi_reset) 2195 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2196 2197 evlist__delete(evsel_list); 2198 2199 runtime_stat_delete(&stat_config); 2200 2201 return status; 2202 } 2203