1 /* 2 * builtin-stat.c 3 * 4 * Builtin stat command: Give a precise performance counters summary 5 * overview about any workload, CPU or specific PID. 6 * 7 * Sample output: 8 9 $ perf stat ./hackbench 10 10 11 Time: 0.118 12 13 Performance counter stats for './hackbench 10': 14 15 1708.761321 task-clock # 11.037 CPUs utilized 16 41,190 context-switches # 0.024 M/sec 17 6,735 CPU-migrations # 0.004 M/sec 18 17,318 page-faults # 0.010 M/sec 19 5,205,202,243 cycles # 3.046 GHz 20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 22 2,603,501,247 instructions # 0.50 insns per cycle 23 # 1.48 stalled cycles per insn 24 484,357,498 branches # 283.455 M/sec 25 6,388,934 branch-misses # 1.32% of all branches 26 27 0.154822978 seconds time elapsed 28 29 * 30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 31 * 32 * Improvements and fixes by: 33 * 34 * Arjan van de Ven <arjan@linux.intel.com> 35 * Yanmin Zhang <yanmin.zhang@intel.com> 36 * Wu Fengguang <fengguang.wu@intel.com> 37 * Mike Galbraith <efault@gmx.de> 38 * Paul Mackerras <paulus@samba.org> 39 * Jaswinder Singh Rajput <jaswinder@kernel.org> 40 * 41 * Released under the GPL v2. (and only v2, not any later version) 42 */ 43 44 #include "perf.h" 45 #include "builtin.h" 46 #include "util/util.h" 47 #include "util/parse-options.h" 48 #include "util/parse-events.h" 49 #include "util/event.h" 50 #include "util/evlist.h" 51 #include "util/evsel.h" 52 #include "util/debug.h" 53 #include "util/color.h" 54 #include "util/stat.h" 55 #include "util/header.h" 56 #include "util/cpumap.h" 57 #include "util/thread.h" 58 #include "util/thread_map.h" 59 60 #include <stdlib.h> 61 #include <sys/prctl.h> 62 #include <locale.h> 63 64 #define DEFAULT_SEPARATOR " " 65 #define CNTR_NOT_SUPPORTED "<not supported>" 66 #define CNTR_NOT_COUNTED "<not counted>" 67 68 static void print_stat(int argc, const char **argv); 69 static void print_counter_aggr(struct perf_evsel *counter, char *prefix); 70 static void print_counter(struct perf_evsel *counter, char *prefix); 71 static void print_aggr(char *prefix); 72 73 static struct perf_evlist *evsel_list; 74 75 static struct perf_target target = { 76 .uid = UINT_MAX, 77 }; 78 79 enum aggr_mode { 80 AGGR_NONE, 81 AGGR_GLOBAL, 82 AGGR_SOCKET, 83 AGGR_CORE, 84 }; 85 86 static int run_count = 1; 87 static bool no_inherit = false; 88 static bool scale = true; 89 static enum aggr_mode aggr_mode = AGGR_GLOBAL; 90 static pid_t child_pid = -1; 91 static bool null_run = false; 92 static int detailed_run = 0; 93 static bool big_num = true; 94 static int big_num_opt = -1; 95 static const char *csv_sep = NULL; 96 static bool csv_output = false; 97 static bool group = false; 98 static FILE *output = NULL; 99 static const char *pre_cmd = NULL; 100 static const char *post_cmd = NULL; 101 static bool sync_run = false; 102 static unsigned int interval = 0; 103 static bool forever = false; 104 static struct timespec ref_time; 105 static struct cpu_map *aggr_map; 106 static int (*aggr_get_id)(struct cpu_map *m, int cpu); 107 108 static volatile int done = 0; 109 110 struct perf_stat { 111 struct stats res_stats[3]; 112 }; 113 114 static inline void diff_timespec(struct timespec *r, struct timespec *a, 115 struct timespec *b) 116 { 117 r->tv_sec = a->tv_sec - b->tv_sec; 118 if (a->tv_nsec < b->tv_nsec) { 119 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec; 120 r->tv_sec--; 121 } else { 122 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 123 } 124 } 125 126 static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) 127 { 128 return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; 129 } 130 131 static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) 132 { 133 return perf_evsel__cpus(evsel)->nr; 134 } 135 136 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) 137 { 138 memset(evsel->priv, 0, sizeof(struct perf_stat)); 139 } 140 141 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) 142 { 143 evsel->priv = zalloc(sizeof(struct perf_stat)); 144 return evsel->priv == NULL ? -ENOMEM : 0; 145 } 146 147 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) 148 { 149 free(evsel->priv); 150 evsel->priv = NULL; 151 } 152 153 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel) 154 { 155 void *addr; 156 size_t sz; 157 158 sz = sizeof(*evsel->counts) + 159 (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values)); 160 161 addr = zalloc(sz); 162 if (!addr) 163 return -ENOMEM; 164 165 evsel->prev_raw_counts = addr; 166 167 return 0; 168 } 169 170 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) 171 { 172 free(evsel->prev_raw_counts); 173 evsel->prev_raw_counts = NULL; 174 } 175 176 static void perf_evlist__free_stats(struct perf_evlist *evlist) 177 { 178 struct perf_evsel *evsel; 179 180 list_for_each_entry(evsel, &evlist->entries, node) { 181 perf_evsel__free_stat_priv(evsel); 182 perf_evsel__free_counts(evsel); 183 perf_evsel__free_prev_raw_counts(evsel); 184 } 185 } 186 187 static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) 188 { 189 struct perf_evsel *evsel; 190 191 list_for_each_entry(evsel, &evlist->entries, node) { 192 if (perf_evsel__alloc_stat_priv(evsel) < 0 || 193 perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 || 194 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0)) 195 goto out_free; 196 } 197 198 return 0; 199 200 out_free: 201 perf_evlist__free_stats(evlist); 202 return -1; 203 } 204 205 static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 206 static struct stats runtime_cycles_stats[MAX_NR_CPUS]; 207 static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; 208 static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; 209 static struct stats runtime_branches_stats[MAX_NR_CPUS]; 210 static struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; 211 static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; 212 static struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; 213 static struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; 214 static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; 215 static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; 216 static struct stats walltime_nsecs_stats; 217 218 static void perf_stat__reset_stats(struct perf_evlist *evlist) 219 { 220 struct perf_evsel *evsel; 221 222 list_for_each_entry(evsel, &evlist->entries, node) { 223 perf_evsel__reset_stat_priv(evsel); 224 perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel)); 225 } 226 227 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats)); 228 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats)); 229 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats)); 230 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats)); 231 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats)); 232 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats)); 233 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats)); 234 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats)); 235 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats)); 236 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats)); 237 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats)); 238 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); 239 } 240 241 static int create_perf_stat_counter(struct perf_evsel *evsel) 242 { 243 struct perf_event_attr *attr = &evsel->attr; 244 245 if (scale) 246 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 247 PERF_FORMAT_TOTAL_TIME_RUNNING; 248 249 attr->inherit = !no_inherit; 250 251 if (perf_target__has_cpu(&target)) 252 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); 253 254 if (!perf_target__has_task(&target) && 255 perf_evsel__is_group_leader(evsel)) { 256 attr->disabled = 1; 257 attr->enable_on_exec = 1; 258 } 259 260 return perf_evsel__open_per_thread(evsel, evsel_list->threads); 261 } 262 263 /* 264 * Does the counter have nsecs as a unit? 265 */ 266 static inline int nsec_counter(struct perf_evsel *evsel) 267 { 268 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || 269 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) 270 return 1; 271 272 return 0; 273 } 274 275 /* 276 * Update various tracking values we maintain to print 277 * more semantic information such as miss/hit ratios, 278 * instruction rates, etc: 279 */ 280 static void update_shadow_stats(struct perf_evsel *counter, u64 *count) 281 { 282 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) 283 update_stats(&runtime_nsecs_stats[0], count[0]); 284 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 285 update_stats(&runtime_cycles_stats[0], count[0]); 286 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 287 update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); 288 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 289 update_stats(&runtime_stalled_cycles_back_stats[0], count[0]); 290 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 291 update_stats(&runtime_branches_stats[0], count[0]); 292 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) 293 update_stats(&runtime_cacherefs_stats[0], count[0]); 294 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) 295 update_stats(&runtime_l1_dcache_stats[0], count[0]); 296 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) 297 update_stats(&runtime_l1_icache_stats[0], count[0]); 298 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) 299 update_stats(&runtime_ll_cache_stats[0], count[0]); 300 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) 301 update_stats(&runtime_dtlb_cache_stats[0], count[0]); 302 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) 303 update_stats(&runtime_itlb_cache_stats[0], count[0]); 304 } 305 306 /* 307 * Read out the results of a single counter: 308 * aggregate counts across CPUs in system-wide mode 309 */ 310 static int read_counter_aggr(struct perf_evsel *counter) 311 { 312 struct perf_stat *ps = counter->priv; 313 u64 *count = counter->counts->aggr.values; 314 int i; 315 316 if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter), 317 thread_map__nr(evsel_list->threads), scale) < 0) 318 return -1; 319 320 for (i = 0; i < 3; i++) 321 update_stats(&ps->res_stats[i], count[i]); 322 323 if (verbose) { 324 fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 325 perf_evsel__name(counter), count[0], count[1], count[2]); 326 } 327 328 /* 329 * Save the full runtime - to allow normalization during printout: 330 */ 331 update_shadow_stats(counter, count); 332 333 return 0; 334 } 335 336 /* 337 * Read out the results of a single counter: 338 * do not aggregate counts across CPUs in system-wide mode 339 */ 340 static int read_counter(struct perf_evsel *counter) 341 { 342 u64 *count; 343 int cpu; 344 345 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { 346 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0) 347 return -1; 348 349 count = counter->counts->cpu[cpu].values; 350 351 update_shadow_stats(counter, count); 352 } 353 354 return 0; 355 } 356 357 static void print_interval(void) 358 { 359 static int num_print_interval; 360 struct perf_evsel *counter; 361 struct perf_stat *ps; 362 struct timespec ts, rs; 363 char prefix[64]; 364 365 if (aggr_mode == AGGR_GLOBAL) { 366 list_for_each_entry(counter, &evsel_list->entries, node) { 367 ps = counter->priv; 368 memset(ps->res_stats, 0, sizeof(ps->res_stats)); 369 read_counter_aggr(counter); 370 } 371 } else { 372 list_for_each_entry(counter, &evsel_list->entries, node) { 373 ps = counter->priv; 374 memset(ps->res_stats, 0, sizeof(ps->res_stats)); 375 read_counter(counter); 376 } 377 } 378 379 clock_gettime(CLOCK_MONOTONIC, &ts); 380 diff_timespec(&rs, &ts, &ref_time); 381 sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep); 382 383 if (num_print_interval == 0 && !csv_output) { 384 switch (aggr_mode) { 385 case AGGR_SOCKET: 386 fprintf(output, "# time socket cpus counts events\n"); 387 break; 388 case AGGR_CORE: 389 fprintf(output, "# time core cpus counts events\n"); 390 break; 391 case AGGR_NONE: 392 fprintf(output, "# time CPU counts events\n"); 393 break; 394 case AGGR_GLOBAL: 395 default: 396 fprintf(output, "# time counts events\n"); 397 } 398 } 399 400 if (++num_print_interval == 25) 401 num_print_interval = 0; 402 403 switch (aggr_mode) { 404 case AGGR_CORE: 405 case AGGR_SOCKET: 406 print_aggr(prefix); 407 break; 408 case AGGR_NONE: 409 list_for_each_entry(counter, &evsel_list->entries, node) 410 print_counter(counter, prefix); 411 break; 412 case AGGR_GLOBAL: 413 default: 414 list_for_each_entry(counter, &evsel_list->entries, node) 415 print_counter_aggr(counter, prefix); 416 } 417 } 418 419 static int __run_perf_stat(int argc, const char **argv) 420 { 421 char msg[512]; 422 unsigned long long t0, t1; 423 struct perf_evsel *counter; 424 struct timespec ts; 425 int status = 0; 426 const bool forks = (argc > 0); 427 428 if (interval) { 429 ts.tv_sec = interval / 1000; 430 ts.tv_nsec = (interval % 1000) * 1000000; 431 } else { 432 ts.tv_sec = 1; 433 ts.tv_nsec = 0; 434 } 435 436 if (forks) { 437 if (perf_evlist__prepare_workload(evsel_list, &target, argv, 438 false, false) < 0) { 439 perror("failed to prepare workload"); 440 return -1; 441 } 442 } 443 444 if (group) 445 perf_evlist__set_leader(evsel_list); 446 447 list_for_each_entry(counter, &evsel_list->entries, node) { 448 if (create_perf_stat_counter(counter) < 0) { 449 /* 450 * PPC returns ENXIO for HW counters until 2.6.37 451 * (behavior changed with commit b0a873e). 452 */ 453 if (errno == EINVAL || errno == ENOSYS || 454 errno == ENOENT || errno == EOPNOTSUPP || 455 errno == ENXIO) { 456 if (verbose) 457 ui__warning("%s event is not supported by the kernel.\n", 458 perf_evsel__name(counter)); 459 counter->supported = false; 460 continue; 461 } 462 463 perf_evsel__open_strerror(counter, &target, 464 errno, msg, sizeof(msg)); 465 ui__error("%s\n", msg); 466 467 if (child_pid != -1) 468 kill(child_pid, SIGTERM); 469 470 return -1; 471 } 472 counter->supported = true; 473 } 474 475 if (perf_evlist__apply_filters(evsel_list)) { 476 error("failed to set filter with %d (%s)\n", errno, 477 strerror(errno)); 478 return -1; 479 } 480 481 /* 482 * Enable counters and exec the command: 483 */ 484 t0 = rdclock(); 485 clock_gettime(CLOCK_MONOTONIC, &ref_time); 486 487 if (forks) { 488 perf_evlist__start_workload(evsel_list); 489 490 if (interval) { 491 while (!waitpid(child_pid, &status, WNOHANG)) { 492 nanosleep(&ts, NULL); 493 print_interval(); 494 } 495 } 496 wait(&status); 497 if (WIFSIGNALED(status)) 498 psignal(WTERMSIG(status), argv[0]); 499 } else { 500 while (!done) { 501 nanosleep(&ts, NULL); 502 if (interval) 503 print_interval(); 504 } 505 } 506 507 t1 = rdclock(); 508 509 update_stats(&walltime_nsecs_stats, t1 - t0); 510 511 if (aggr_mode == AGGR_GLOBAL) { 512 list_for_each_entry(counter, &evsel_list->entries, node) { 513 read_counter_aggr(counter); 514 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 515 thread_map__nr(evsel_list->threads)); 516 } 517 } else { 518 list_for_each_entry(counter, &evsel_list->entries, node) { 519 read_counter(counter); 520 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1); 521 } 522 } 523 524 return WEXITSTATUS(status); 525 } 526 527 static int run_perf_stat(int argc __maybe_unused, const char **argv) 528 { 529 int ret; 530 531 if (pre_cmd) { 532 ret = system(pre_cmd); 533 if (ret) 534 return ret; 535 } 536 537 if (sync_run) 538 sync(); 539 540 ret = __run_perf_stat(argc, argv); 541 if (ret) 542 return ret; 543 544 if (post_cmd) { 545 ret = system(post_cmd); 546 if (ret) 547 return ret; 548 } 549 550 return ret; 551 } 552 553 static void print_noise_pct(double total, double avg) 554 { 555 double pct = rel_stddev_stats(total, avg); 556 557 if (csv_output) 558 fprintf(output, "%s%.2f%%", csv_sep, pct); 559 else if (pct) 560 fprintf(output, " ( +-%6.2f%% )", pct); 561 } 562 563 static void print_noise(struct perf_evsel *evsel, double avg) 564 { 565 struct perf_stat *ps; 566 567 if (run_count == 1) 568 return; 569 570 ps = evsel->priv; 571 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); 572 } 573 574 static void aggr_printout(struct perf_evsel *evsel, int id, int nr) 575 { 576 switch (aggr_mode) { 577 case AGGR_CORE: 578 fprintf(output, "S%d-C%*d%s%*d%s", 579 cpu_map__id_to_socket(id), 580 csv_output ? 0 : -8, 581 cpu_map__id_to_cpu(id), 582 csv_sep, 583 csv_output ? 0 : 4, 584 nr, 585 csv_sep); 586 break; 587 case AGGR_SOCKET: 588 fprintf(output, "S%*d%s%*d%s", 589 csv_output ? 0 : -5, 590 id, 591 csv_sep, 592 csv_output ? 0 : 4, 593 nr, 594 csv_sep); 595 break; 596 case AGGR_NONE: 597 fprintf(output, "CPU%*d%s", 598 csv_output ? 0 : -4, 599 perf_evsel__cpus(evsel)->map[id], csv_sep); 600 break; 601 case AGGR_GLOBAL: 602 default: 603 break; 604 } 605 } 606 607 static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) 608 { 609 double msecs = avg / 1e6; 610 const char *fmt = csv_output ? "%.6f%s%s" : "%18.6f%s%-25s"; 611 612 aggr_printout(evsel, cpu, nr); 613 614 fprintf(output, fmt, msecs, csv_sep, perf_evsel__name(evsel)); 615 616 if (evsel->cgrp) 617 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 618 619 if (csv_output || interval) 620 return; 621 622 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) 623 fprintf(output, " # %8.3f CPUs utilized ", 624 avg / avg_stats(&walltime_nsecs_stats)); 625 else 626 fprintf(output, " "); 627 } 628 629 /* used for get_ratio_color() */ 630 enum grc_type { 631 GRC_STALLED_CYCLES_FE, 632 GRC_STALLED_CYCLES_BE, 633 GRC_CACHE_MISSES, 634 GRC_MAX_NR 635 }; 636 637 static const char *get_ratio_color(enum grc_type type, double ratio) 638 { 639 static const double grc_table[GRC_MAX_NR][3] = { 640 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, 641 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, 642 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, 643 }; 644 const char *color = PERF_COLOR_NORMAL; 645 646 if (ratio > grc_table[type][0]) 647 color = PERF_COLOR_RED; 648 else if (ratio > grc_table[type][1]) 649 color = PERF_COLOR_MAGENTA; 650 else if (ratio > grc_table[type][2]) 651 color = PERF_COLOR_YELLOW; 652 653 return color; 654 } 655 656 static void print_stalled_cycles_frontend(int cpu, 657 struct perf_evsel *evsel 658 __maybe_unused, double avg) 659 { 660 double total, ratio = 0.0; 661 const char *color; 662 663 total = avg_stats(&runtime_cycles_stats[cpu]); 664 665 if (total) 666 ratio = avg / total * 100.0; 667 668 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); 669 670 fprintf(output, " # "); 671 color_fprintf(output, color, "%6.2f%%", ratio); 672 fprintf(output, " frontend cycles idle "); 673 } 674 675 static void print_stalled_cycles_backend(int cpu, 676 struct perf_evsel *evsel 677 __maybe_unused, double avg) 678 { 679 double total, ratio = 0.0; 680 const char *color; 681 682 total = avg_stats(&runtime_cycles_stats[cpu]); 683 684 if (total) 685 ratio = avg / total * 100.0; 686 687 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); 688 689 fprintf(output, " # "); 690 color_fprintf(output, color, "%6.2f%%", ratio); 691 fprintf(output, " backend cycles idle "); 692 } 693 694 static void print_branch_misses(int cpu, 695 struct perf_evsel *evsel __maybe_unused, 696 double avg) 697 { 698 double total, ratio = 0.0; 699 const char *color; 700 701 total = avg_stats(&runtime_branches_stats[cpu]); 702 703 if (total) 704 ratio = avg / total * 100.0; 705 706 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 707 708 fprintf(output, " # "); 709 color_fprintf(output, color, "%6.2f%%", ratio); 710 fprintf(output, " of all branches "); 711 } 712 713 static void print_l1_dcache_misses(int cpu, 714 struct perf_evsel *evsel __maybe_unused, 715 double avg) 716 { 717 double total, ratio = 0.0; 718 const char *color; 719 720 total = avg_stats(&runtime_l1_dcache_stats[cpu]); 721 722 if (total) 723 ratio = avg / total * 100.0; 724 725 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 726 727 fprintf(output, " # "); 728 color_fprintf(output, color, "%6.2f%%", ratio); 729 fprintf(output, " of all L1-dcache hits "); 730 } 731 732 static void print_l1_icache_misses(int cpu, 733 struct perf_evsel *evsel __maybe_unused, 734 double avg) 735 { 736 double total, ratio = 0.0; 737 const char *color; 738 739 total = avg_stats(&runtime_l1_icache_stats[cpu]); 740 741 if (total) 742 ratio = avg / total * 100.0; 743 744 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 745 746 fprintf(output, " # "); 747 color_fprintf(output, color, "%6.2f%%", ratio); 748 fprintf(output, " of all L1-icache hits "); 749 } 750 751 static void print_dtlb_cache_misses(int cpu, 752 struct perf_evsel *evsel __maybe_unused, 753 double avg) 754 { 755 double total, ratio = 0.0; 756 const char *color; 757 758 total = avg_stats(&runtime_dtlb_cache_stats[cpu]); 759 760 if (total) 761 ratio = avg / total * 100.0; 762 763 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 764 765 fprintf(output, " # "); 766 color_fprintf(output, color, "%6.2f%%", ratio); 767 fprintf(output, " of all dTLB cache hits "); 768 } 769 770 static void print_itlb_cache_misses(int cpu, 771 struct perf_evsel *evsel __maybe_unused, 772 double avg) 773 { 774 double total, ratio = 0.0; 775 const char *color; 776 777 total = avg_stats(&runtime_itlb_cache_stats[cpu]); 778 779 if (total) 780 ratio = avg / total * 100.0; 781 782 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 783 784 fprintf(output, " # "); 785 color_fprintf(output, color, "%6.2f%%", ratio); 786 fprintf(output, " of all iTLB cache hits "); 787 } 788 789 static void print_ll_cache_misses(int cpu, 790 struct perf_evsel *evsel __maybe_unused, 791 double avg) 792 { 793 double total, ratio = 0.0; 794 const char *color; 795 796 total = avg_stats(&runtime_ll_cache_stats[cpu]); 797 798 if (total) 799 ratio = avg / total * 100.0; 800 801 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 802 803 fprintf(output, " # "); 804 color_fprintf(output, color, "%6.2f%%", ratio); 805 fprintf(output, " of all LL-cache hits "); 806 } 807 808 static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) 809 { 810 double total, ratio = 0.0; 811 const char *fmt; 812 813 if (csv_output) 814 fmt = "%.0f%s%s"; 815 else if (big_num) 816 fmt = "%'18.0f%s%-25s"; 817 else 818 fmt = "%18.0f%s%-25s"; 819 820 aggr_printout(evsel, cpu, nr); 821 822 if (aggr_mode == AGGR_GLOBAL) 823 cpu = 0; 824 825 fprintf(output, fmt, avg, csv_sep, perf_evsel__name(evsel)); 826 827 if (evsel->cgrp) 828 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 829 830 if (csv_output || interval) 831 return; 832 833 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { 834 total = avg_stats(&runtime_cycles_stats[cpu]); 835 if (total) 836 ratio = avg / total; 837 838 fprintf(output, " # %5.2f insns per cycle ", ratio); 839 840 total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); 841 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); 842 843 if (total && avg) { 844 ratio = total / avg; 845 fprintf(output, "\n # %5.2f stalled cycles per insn", ratio); 846 } 847 848 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && 849 runtime_branches_stats[cpu].n != 0) { 850 print_branch_misses(cpu, evsel, avg); 851 } else if ( 852 evsel->attr.type == PERF_TYPE_HW_CACHE && 853 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | 854 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 855 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 856 runtime_l1_dcache_stats[cpu].n != 0) { 857 print_l1_dcache_misses(cpu, evsel, avg); 858 } else if ( 859 evsel->attr.type == PERF_TYPE_HW_CACHE && 860 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | 861 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 862 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 863 runtime_l1_icache_stats[cpu].n != 0) { 864 print_l1_icache_misses(cpu, evsel, avg); 865 } else if ( 866 evsel->attr.type == PERF_TYPE_HW_CACHE && 867 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | 868 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 869 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 870 runtime_dtlb_cache_stats[cpu].n != 0) { 871 print_dtlb_cache_misses(cpu, evsel, avg); 872 } else if ( 873 evsel->attr.type == PERF_TYPE_HW_CACHE && 874 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | 875 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 876 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 877 runtime_itlb_cache_stats[cpu].n != 0) { 878 print_itlb_cache_misses(cpu, evsel, avg); 879 } else if ( 880 evsel->attr.type == PERF_TYPE_HW_CACHE && 881 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | 882 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 883 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 884 runtime_ll_cache_stats[cpu].n != 0) { 885 print_ll_cache_misses(cpu, evsel, avg); 886 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && 887 runtime_cacherefs_stats[cpu].n != 0) { 888 total = avg_stats(&runtime_cacherefs_stats[cpu]); 889 890 if (total) 891 ratio = avg * 100 / total; 892 893 fprintf(output, " # %8.3f %% of all cache refs ", ratio); 894 895 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { 896 print_stalled_cycles_frontend(cpu, evsel, avg); 897 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { 898 print_stalled_cycles_backend(cpu, evsel, avg); 899 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { 900 total = avg_stats(&runtime_nsecs_stats[cpu]); 901 902 if (total) 903 ratio = 1.0 * avg / total; 904 905 fprintf(output, " # %8.3f GHz ", ratio); 906 } else if (runtime_nsecs_stats[cpu].n != 0) { 907 char unit = 'M'; 908 909 total = avg_stats(&runtime_nsecs_stats[cpu]); 910 911 if (total) 912 ratio = 1000.0 * avg / total; 913 if (ratio < 0.001) { 914 ratio *= 1000; 915 unit = 'K'; 916 } 917 918 fprintf(output, " # %8.3f %c/sec ", ratio, unit); 919 } else { 920 fprintf(output, " "); 921 } 922 } 923 924 static void print_aggr(char *prefix) 925 { 926 struct perf_evsel *counter; 927 int cpu, s, s2, id, nr; 928 u64 ena, run, val; 929 930 if (!(aggr_map || aggr_get_id)) 931 return; 932 933 for (s = 0; s < aggr_map->nr; s++) { 934 id = aggr_map->map[s]; 935 list_for_each_entry(counter, &evsel_list->entries, node) { 936 val = ena = run = 0; 937 nr = 0; 938 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { 939 s2 = aggr_get_id(evsel_list->cpus, cpu); 940 if (s2 != id) 941 continue; 942 val += counter->counts->cpu[cpu].val; 943 ena += counter->counts->cpu[cpu].ena; 944 run += counter->counts->cpu[cpu].run; 945 nr++; 946 } 947 if (prefix) 948 fprintf(output, "%s", prefix); 949 950 if (run == 0 || ena == 0) { 951 aggr_printout(counter, cpu, nr); 952 953 fprintf(output, "%*s%s%*s", 954 csv_output ? 0 : 18, 955 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 956 csv_sep, 957 csv_output ? 0 : -24, 958 perf_evsel__name(counter)); 959 960 if (counter->cgrp) 961 fprintf(output, "%s%s", 962 csv_sep, counter->cgrp->name); 963 964 fputc('\n', output); 965 continue; 966 } 967 968 if (nsec_counter(counter)) 969 nsec_printout(id, nr, counter, val); 970 else 971 abs_printout(id, nr, counter, val); 972 973 if (!csv_output) { 974 print_noise(counter, 1.0); 975 976 if (run != ena) 977 fprintf(output, " (%.2f%%)", 978 100.0 * run / ena); 979 } 980 fputc('\n', output); 981 } 982 } 983 } 984 985 /* 986 * Print out the results of a single counter: 987 * aggregated counts in system-wide mode 988 */ 989 static void print_counter_aggr(struct perf_evsel *counter, char *prefix) 990 { 991 struct perf_stat *ps = counter->priv; 992 double avg = avg_stats(&ps->res_stats[0]); 993 int scaled = counter->counts->scaled; 994 995 if (prefix) 996 fprintf(output, "%s", prefix); 997 998 if (scaled == -1) { 999 fprintf(output, "%*s%s%*s", 1000 csv_output ? 0 : 18, 1001 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1002 csv_sep, 1003 csv_output ? 0 : -24, 1004 perf_evsel__name(counter)); 1005 1006 if (counter->cgrp) 1007 fprintf(output, "%s%s", csv_sep, counter->cgrp->name); 1008 1009 fputc('\n', output); 1010 return; 1011 } 1012 1013 if (nsec_counter(counter)) 1014 nsec_printout(-1, 0, counter, avg); 1015 else 1016 abs_printout(-1, 0, counter, avg); 1017 1018 print_noise(counter, avg); 1019 1020 if (csv_output) { 1021 fputc('\n', output); 1022 return; 1023 } 1024 1025 if (scaled) { 1026 double avg_enabled, avg_running; 1027 1028 avg_enabled = avg_stats(&ps->res_stats[1]); 1029 avg_running = avg_stats(&ps->res_stats[2]); 1030 1031 fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled); 1032 } 1033 fprintf(output, "\n"); 1034 } 1035 1036 /* 1037 * Print out the results of a single counter: 1038 * does not use aggregated count in system-wide 1039 */ 1040 static void print_counter(struct perf_evsel *counter, char *prefix) 1041 { 1042 u64 ena, run, val; 1043 int cpu; 1044 1045 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { 1046 val = counter->counts->cpu[cpu].val; 1047 ena = counter->counts->cpu[cpu].ena; 1048 run = counter->counts->cpu[cpu].run; 1049 1050 if (prefix) 1051 fprintf(output, "%s", prefix); 1052 1053 if (run == 0 || ena == 0) { 1054 fprintf(output, "CPU%*d%s%*s%s%*s", 1055 csv_output ? 0 : -4, 1056 perf_evsel__cpus(counter)->map[cpu], csv_sep, 1057 csv_output ? 0 : 18, 1058 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1059 csv_sep, 1060 csv_output ? 0 : -24, 1061 perf_evsel__name(counter)); 1062 1063 if (counter->cgrp) 1064 fprintf(output, "%s%s", 1065 csv_sep, counter->cgrp->name); 1066 1067 fputc('\n', output); 1068 continue; 1069 } 1070 1071 if (nsec_counter(counter)) 1072 nsec_printout(cpu, 0, counter, val); 1073 else 1074 abs_printout(cpu, 0, counter, val); 1075 1076 if (!csv_output) { 1077 print_noise(counter, 1.0); 1078 1079 if (run != ena) 1080 fprintf(output, " (%.2f%%)", 1081 100.0 * run / ena); 1082 } 1083 fputc('\n', output); 1084 } 1085 } 1086 1087 static void print_stat(int argc, const char **argv) 1088 { 1089 struct perf_evsel *counter; 1090 int i; 1091 1092 fflush(stdout); 1093 1094 if (!csv_output) { 1095 fprintf(output, "\n"); 1096 fprintf(output, " Performance counter stats for "); 1097 if (!perf_target__has_task(&target)) { 1098 fprintf(output, "\'%s", argv[0]); 1099 for (i = 1; i < argc; i++) 1100 fprintf(output, " %s", argv[i]); 1101 } else if (target.pid) 1102 fprintf(output, "process id \'%s", target.pid); 1103 else 1104 fprintf(output, "thread id \'%s", target.tid); 1105 1106 fprintf(output, "\'"); 1107 if (run_count > 1) 1108 fprintf(output, " (%d runs)", run_count); 1109 fprintf(output, ":\n\n"); 1110 } 1111 1112 switch (aggr_mode) { 1113 case AGGR_CORE: 1114 case AGGR_SOCKET: 1115 print_aggr(NULL); 1116 break; 1117 case AGGR_GLOBAL: 1118 list_for_each_entry(counter, &evsel_list->entries, node) 1119 print_counter_aggr(counter, NULL); 1120 break; 1121 case AGGR_NONE: 1122 list_for_each_entry(counter, &evsel_list->entries, node) 1123 print_counter(counter, NULL); 1124 break; 1125 default: 1126 break; 1127 } 1128 1129 if (!csv_output) { 1130 if (!null_run) 1131 fprintf(output, "\n"); 1132 fprintf(output, " %17.9f seconds time elapsed", 1133 avg_stats(&walltime_nsecs_stats)/1e9); 1134 if (run_count > 1) { 1135 fprintf(output, " "); 1136 print_noise_pct(stddev_stats(&walltime_nsecs_stats), 1137 avg_stats(&walltime_nsecs_stats)); 1138 } 1139 fprintf(output, "\n\n"); 1140 } 1141 } 1142 1143 static volatile int signr = -1; 1144 1145 static void skip_signal(int signo) 1146 { 1147 if ((child_pid == -1) || interval) 1148 done = 1; 1149 1150 signr = signo; 1151 } 1152 1153 static void sig_atexit(void) 1154 { 1155 if (child_pid != -1) 1156 kill(child_pid, SIGTERM); 1157 1158 if (signr == -1) 1159 return; 1160 1161 signal(signr, SIG_DFL); 1162 kill(getpid(), signr); 1163 } 1164 1165 static int stat__set_big_num(const struct option *opt __maybe_unused, 1166 const char *s __maybe_unused, int unset) 1167 { 1168 big_num_opt = unset ? 0 : 1; 1169 return 0; 1170 } 1171 1172 static int perf_stat_init_aggr_mode(void) 1173 { 1174 switch (aggr_mode) { 1175 case AGGR_SOCKET: 1176 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) { 1177 perror("cannot build socket map"); 1178 return -1; 1179 } 1180 aggr_get_id = cpu_map__get_socket; 1181 break; 1182 case AGGR_CORE: 1183 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) { 1184 perror("cannot build core map"); 1185 return -1; 1186 } 1187 aggr_get_id = cpu_map__get_core; 1188 break; 1189 case AGGR_NONE: 1190 case AGGR_GLOBAL: 1191 default: 1192 break; 1193 } 1194 return 0; 1195 } 1196 1197 1198 /* 1199 * Add default attributes, if there were no attributes specified or 1200 * if -d/--detailed, -d -d or -d -d -d is used: 1201 */ 1202 static int add_default_attributes(void) 1203 { 1204 struct perf_event_attr default_attrs[] = { 1205 1206 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1207 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1208 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1209 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1210 1211 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1212 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1213 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1214 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1215 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1216 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1217 1218 }; 1219 1220 /* 1221 * Detailed stats (-d), covering the L1 and last level data caches: 1222 */ 1223 struct perf_event_attr detailed_attrs[] = { 1224 1225 { .type = PERF_TYPE_HW_CACHE, 1226 .config = 1227 PERF_COUNT_HW_CACHE_L1D << 0 | 1228 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1229 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1230 1231 { .type = PERF_TYPE_HW_CACHE, 1232 .config = 1233 PERF_COUNT_HW_CACHE_L1D << 0 | 1234 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1235 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1236 1237 { .type = PERF_TYPE_HW_CACHE, 1238 .config = 1239 PERF_COUNT_HW_CACHE_LL << 0 | 1240 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1241 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1242 1243 { .type = PERF_TYPE_HW_CACHE, 1244 .config = 1245 PERF_COUNT_HW_CACHE_LL << 0 | 1246 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1247 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1248 }; 1249 1250 /* 1251 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1252 */ 1253 struct perf_event_attr very_detailed_attrs[] = { 1254 1255 { .type = PERF_TYPE_HW_CACHE, 1256 .config = 1257 PERF_COUNT_HW_CACHE_L1I << 0 | 1258 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1259 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1260 1261 { .type = PERF_TYPE_HW_CACHE, 1262 .config = 1263 PERF_COUNT_HW_CACHE_L1I << 0 | 1264 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1265 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1266 1267 { .type = PERF_TYPE_HW_CACHE, 1268 .config = 1269 PERF_COUNT_HW_CACHE_DTLB << 0 | 1270 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1271 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1272 1273 { .type = PERF_TYPE_HW_CACHE, 1274 .config = 1275 PERF_COUNT_HW_CACHE_DTLB << 0 | 1276 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1277 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1278 1279 { .type = PERF_TYPE_HW_CACHE, 1280 .config = 1281 PERF_COUNT_HW_CACHE_ITLB << 0 | 1282 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1283 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1284 1285 { .type = PERF_TYPE_HW_CACHE, 1286 .config = 1287 PERF_COUNT_HW_CACHE_ITLB << 0 | 1288 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1289 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1290 1291 }; 1292 1293 /* 1294 * Very, very detailed stats (-d -d -d), adding prefetch events: 1295 */ 1296 struct perf_event_attr very_very_detailed_attrs[] = { 1297 1298 { .type = PERF_TYPE_HW_CACHE, 1299 .config = 1300 PERF_COUNT_HW_CACHE_L1D << 0 | 1301 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1302 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1303 1304 { .type = PERF_TYPE_HW_CACHE, 1305 .config = 1306 PERF_COUNT_HW_CACHE_L1D << 0 | 1307 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1308 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1309 }; 1310 1311 /* Set attrs if no event is selected and !null_run: */ 1312 if (null_run) 1313 return 0; 1314 1315 if (!evsel_list->nr_entries) { 1316 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) 1317 return -1; 1318 } 1319 1320 /* Detailed events get appended to the event list: */ 1321 1322 if (detailed_run < 1) 1323 return 0; 1324 1325 /* Append detailed run extra attributes: */ 1326 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1327 return -1; 1328 1329 if (detailed_run < 2) 1330 return 0; 1331 1332 /* Append very detailed run extra attributes: */ 1333 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1334 return -1; 1335 1336 if (detailed_run < 3) 1337 return 0; 1338 1339 /* Append very, very detailed run extra attributes: */ 1340 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1341 } 1342 1343 int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) 1344 { 1345 bool append_file = false; 1346 int output_fd = 0; 1347 const char *output_name = NULL; 1348 const struct option options[] = { 1349 OPT_CALLBACK('e', "event", &evsel_list, "event", 1350 "event selector. use 'perf list' to list available events", 1351 parse_events_option), 1352 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1353 "event filter", parse_filter), 1354 OPT_BOOLEAN('i', "no-inherit", &no_inherit, 1355 "child tasks do not inherit counters"), 1356 OPT_STRING('p', "pid", &target.pid, "pid", 1357 "stat events on existing process id"), 1358 OPT_STRING('t', "tid", &target.tid, "tid", 1359 "stat events on existing thread id"), 1360 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1361 "system-wide collection from all CPUs"), 1362 OPT_BOOLEAN('g', "group", &group, 1363 "put the counters into a counter group"), 1364 OPT_BOOLEAN('c', "scale", &scale, "scale/normalize counters"), 1365 OPT_INCR('v', "verbose", &verbose, 1366 "be more verbose (show counter open errors, etc)"), 1367 OPT_INTEGER('r', "repeat", &run_count, 1368 "repeat command and print average + stddev (max: 100, forever: 0)"), 1369 OPT_BOOLEAN('n', "null", &null_run, 1370 "null run - dont start any counters"), 1371 OPT_INCR('d', "detailed", &detailed_run, 1372 "detailed run - start a lot of events"), 1373 OPT_BOOLEAN('S', "sync", &sync_run, 1374 "call sync() before starting a run"), 1375 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1376 "print large numbers with thousands\' separators", 1377 stat__set_big_num), 1378 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1379 "list of cpus to monitor in system-wide"), 1380 OPT_SET_UINT('A', "no-aggr", &aggr_mode, 1381 "disable CPU count aggregation", AGGR_NONE), 1382 OPT_STRING('x', "field-separator", &csv_sep, "separator", 1383 "print counts with custom separator"), 1384 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1385 "monitor event in cgroup name only", parse_cgroups), 1386 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1387 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1388 OPT_INTEGER(0, "log-fd", &output_fd, 1389 "log output to fd, instead of stderr"), 1390 OPT_STRING(0, "pre", &pre_cmd, "command", 1391 "command to run prior to the measured command"), 1392 OPT_STRING(0, "post", &post_cmd, "command", 1393 "command to run after to the measured command"), 1394 OPT_UINTEGER('I', "interval-print", &interval, 1395 "print counts at regular interval in ms (>= 100)"), 1396 OPT_SET_UINT(0, "per-socket", &aggr_mode, 1397 "aggregate counts per processor socket", AGGR_SOCKET), 1398 OPT_SET_UINT(0, "per-core", &aggr_mode, 1399 "aggregate counts per physical processor core", AGGR_CORE), 1400 OPT_END() 1401 }; 1402 const char * const stat_usage[] = { 1403 "perf stat [<options>] [<command>]", 1404 NULL 1405 }; 1406 int status = -ENOMEM, run_idx; 1407 const char *mode; 1408 1409 setlocale(LC_ALL, ""); 1410 1411 evsel_list = perf_evlist__new(); 1412 if (evsel_list == NULL) 1413 return -ENOMEM; 1414 1415 argc = parse_options(argc, argv, options, stat_usage, 1416 PARSE_OPT_STOP_AT_NON_OPTION); 1417 1418 output = stderr; 1419 if (output_name && strcmp(output_name, "-")) 1420 output = NULL; 1421 1422 if (output_name && output_fd) { 1423 fprintf(stderr, "cannot use both --output and --log-fd\n"); 1424 usage_with_options(stat_usage, options); 1425 } 1426 1427 if (output_fd < 0) { 1428 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 1429 usage_with_options(stat_usage, options); 1430 } 1431 1432 if (!output) { 1433 struct timespec tm; 1434 mode = append_file ? "a" : "w"; 1435 1436 output = fopen(output_name, mode); 1437 if (!output) { 1438 perror("failed to create output file"); 1439 return -1; 1440 } 1441 clock_gettime(CLOCK_REALTIME, &tm); 1442 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 1443 } else if (output_fd > 0) { 1444 mode = append_file ? "a" : "w"; 1445 output = fdopen(output_fd, mode); 1446 if (!output) { 1447 perror("Failed opening logfd"); 1448 return -errno; 1449 } 1450 } 1451 1452 if (csv_sep) { 1453 csv_output = true; 1454 if (!strcmp(csv_sep, "\\t")) 1455 csv_sep = "\t"; 1456 } else 1457 csv_sep = DEFAULT_SEPARATOR; 1458 1459 /* 1460 * let the spreadsheet do the pretty-printing 1461 */ 1462 if (csv_output) { 1463 /* User explicitly passed -B? */ 1464 if (big_num_opt == 1) { 1465 fprintf(stderr, "-B option not supported with -x\n"); 1466 usage_with_options(stat_usage, options); 1467 } else /* Nope, so disable big number formatting */ 1468 big_num = false; 1469 } else if (big_num_opt == 0) /* User passed --no-big-num */ 1470 big_num = false; 1471 1472 if (!argc && !perf_target__has_task(&target)) 1473 usage_with_options(stat_usage, options); 1474 if (run_count < 0) { 1475 usage_with_options(stat_usage, options); 1476 } else if (run_count == 0) { 1477 forever = true; 1478 run_count = 1; 1479 } 1480 1481 /* no_aggr, cgroup are for system-wide only */ 1482 if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) 1483 && !perf_target__has_cpu(&target)) { 1484 fprintf(stderr, "both cgroup and no-aggregation " 1485 "modes only available in system-wide mode\n"); 1486 1487 usage_with_options(stat_usage, options); 1488 return -1; 1489 } 1490 1491 if (add_default_attributes()) 1492 goto out; 1493 1494 perf_target__validate(&target); 1495 1496 if (perf_evlist__create_maps(evsel_list, &target) < 0) { 1497 if (perf_target__has_task(&target)) 1498 pr_err("Problems finding threads of monitor\n"); 1499 if (perf_target__has_cpu(&target)) 1500 perror("failed to parse CPUs map"); 1501 1502 usage_with_options(stat_usage, options); 1503 return -1; 1504 } 1505 if (interval && interval < 100) { 1506 pr_err("print interval must be >= 100ms\n"); 1507 usage_with_options(stat_usage, options); 1508 return -1; 1509 } 1510 1511 if (perf_evlist__alloc_stats(evsel_list, interval)) 1512 goto out_free_maps; 1513 1514 if (perf_stat_init_aggr_mode()) 1515 goto out; 1516 1517 /* 1518 * We dont want to block the signals - that would cause 1519 * child tasks to inherit that and Ctrl-C would not work. 1520 * What we want is for Ctrl-C to work in the exec()-ed 1521 * task, but being ignored by perf stat itself: 1522 */ 1523 atexit(sig_atexit); 1524 if (!forever) 1525 signal(SIGINT, skip_signal); 1526 signal(SIGCHLD, skip_signal); 1527 signal(SIGALRM, skip_signal); 1528 signal(SIGABRT, skip_signal); 1529 1530 status = 0; 1531 for (run_idx = 0; forever || run_idx < run_count; run_idx++) { 1532 if (run_count != 1 && verbose) 1533 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 1534 run_idx + 1); 1535 1536 status = run_perf_stat(argc, argv); 1537 if (forever && status != -1) { 1538 print_stat(argc, argv); 1539 perf_stat__reset_stats(evsel_list); 1540 } 1541 } 1542 1543 if (!forever && status != -1 && !interval) 1544 print_stat(argc, argv); 1545 1546 perf_evlist__free_stats(evsel_list); 1547 out_free_maps: 1548 perf_evlist__delete_maps(evsel_list); 1549 out: 1550 perf_evlist__delete(evsel_list); 1551 return status; 1552 } 1553