1 /* 2 * builtin-stat.c 3 * 4 * Builtin stat command: Give a precise performance counters summary 5 * overview about any workload, CPU or specific PID. 6 * 7 * Sample output: 8 9 $ perf stat ./hackbench 10 10 11 Time: 0.118 12 13 Performance counter stats for './hackbench 10': 14 15 1708.761321 task-clock # 11.037 CPUs utilized 16 41,190 context-switches # 0.024 M/sec 17 6,735 CPU-migrations # 0.004 M/sec 18 17,318 page-faults # 0.010 M/sec 19 5,205,202,243 cycles # 3.046 GHz 20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle 21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle 22 2,603,501,247 instructions # 0.50 insns per cycle 23 # 1.48 stalled cycles per insn 24 484,357,498 branches # 283.455 M/sec 25 6,388,934 branch-misses # 1.32% of all branches 26 27 0.154822978 seconds time elapsed 28 29 * 30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 31 * 32 * Improvements and fixes by: 33 * 34 * Arjan van de Ven <arjan@linux.intel.com> 35 * Yanmin Zhang <yanmin.zhang@intel.com> 36 * Wu Fengguang <fengguang.wu@intel.com> 37 * Mike Galbraith <efault@gmx.de> 38 * Paul Mackerras <paulus@samba.org> 39 * Jaswinder Singh Rajput <jaswinder@kernel.org> 40 * 41 * Released under the GPL v2. (and only v2, not any later version) 42 */ 43 44 #include "perf.h" 45 #include "builtin.h" 46 #include "util/util.h" 47 #include "util/parse-options.h" 48 #include "util/parse-events.h" 49 #include "util/pmu.h" 50 #include "util/event.h" 51 #include "util/evlist.h" 52 #include "util/evsel.h" 53 #include "util/debug.h" 54 #include "util/color.h" 55 #include "util/stat.h" 56 #include "util/header.h" 57 #include "util/cpumap.h" 58 #include "util/thread.h" 59 #include "util/thread_map.h" 60 61 #include <stdlib.h> 62 #include <sys/prctl.h> 63 #include <locale.h> 64 65 #define DEFAULT_SEPARATOR " " 66 #define CNTR_NOT_SUPPORTED "<not supported>" 67 #define CNTR_NOT_COUNTED "<not counted>" 68 69 static void print_stat(int argc, const char **argv); 70 static void print_counter_aggr(struct perf_evsel *counter, char *prefix); 71 static void print_counter(struct perf_evsel *counter, char *prefix); 72 static void print_aggr(char *prefix); 73 74 /* Default events used for perf stat -T */ 75 static const char * const transaction_attrs[] = { 76 "task-clock", 77 "{" 78 "instructions," 79 "cycles," 80 "cpu/cycles-t/," 81 "cpu/tx-start/," 82 "cpu/el-start/," 83 "cpu/cycles-ct/" 84 "}" 85 }; 86 87 /* More limited version when the CPU does not have all events. */ 88 static const char * const transaction_limited_attrs[] = { 89 "task-clock", 90 "{" 91 "instructions," 92 "cycles," 93 "cpu/cycles-t/," 94 "cpu/tx-start/" 95 "}" 96 }; 97 98 /* must match transaction_attrs and the beginning limited_attrs */ 99 enum { 100 T_TASK_CLOCK, 101 T_INSTRUCTIONS, 102 T_CYCLES, 103 T_CYCLES_IN_TX, 104 T_TRANSACTION_START, 105 T_ELISION_START, 106 T_CYCLES_IN_TX_CP, 107 }; 108 109 static struct perf_evlist *evsel_list; 110 111 static struct target target = { 112 .uid = UINT_MAX, 113 }; 114 115 enum aggr_mode { 116 AGGR_NONE, 117 AGGR_GLOBAL, 118 AGGR_SOCKET, 119 AGGR_CORE, 120 }; 121 122 static int run_count = 1; 123 static bool no_inherit = false; 124 static bool scale = true; 125 static enum aggr_mode aggr_mode = AGGR_GLOBAL; 126 static volatile pid_t child_pid = -1; 127 static bool null_run = false; 128 static int detailed_run = 0; 129 static bool transaction_run; 130 static bool big_num = true; 131 static int big_num_opt = -1; 132 static const char *csv_sep = NULL; 133 static bool csv_output = false; 134 static bool group = false; 135 static FILE *output = NULL; 136 static const char *pre_cmd = NULL; 137 static const char *post_cmd = NULL; 138 static bool sync_run = false; 139 static unsigned int interval = 0; 140 static unsigned int initial_delay = 0; 141 static bool forever = false; 142 static struct timespec ref_time; 143 static struct cpu_map *aggr_map; 144 static int (*aggr_get_id)(struct cpu_map *m, int cpu); 145 146 static volatile int done = 0; 147 148 struct perf_stat { 149 struct stats res_stats[3]; 150 }; 151 152 static inline void diff_timespec(struct timespec *r, struct timespec *a, 153 struct timespec *b) 154 { 155 r->tv_sec = a->tv_sec - b->tv_sec; 156 if (a->tv_nsec < b->tv_nsec) { 157 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec; 158 r->tv_sec--; 159 } else { 160 r->tv_nsec = a->tv_nsec - b->tv_nsec ; 161 } 162 } 163 164 static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) 165 { 166 return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; 167 } 168 169 static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) 170 { 171 return perf_evsel__cpus(evsel)->nr; 172 } 173 174 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) 175 { 176 memset(evsel->priv, 0, sizeof(struct perf_stat)); 177 } 178 179 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) 180 { 181 evsel->priv = zalloc(sizeof(struct perf_stat)); 182 return evsel->priv == NULL ? -ENOMEM : 0; 183 } 184 185 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) 186 { 187 free(evsel->priv); 188 evsel->priv = NULL; 189 } 190 191 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel) 192 { 193 void *addr; 194 size_t sz; 195 196 sz = sizeof(*evsel->counts) + 197 (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values)); 198 199 addr = zalloc(sz); 200 if (!addr) 201 return -ENOMEM; 202 203 evsel->prev_raw_counts = addr; 204 205 return 0; 206 } 207 208 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) 209 { 210 free(evsel->prev_raw_counts); 211 evsel->prev_raw_counts = NULL; 212 } 213 214 static void perf_evlist__free_stats(struct perf_evlist *evlist) 215 { 216 struct perf_evsel *evsel; 217 218 list_for_each_entry(evsel, &evlist->entries, node) { 219 perf_evsel__free_stat_priv(evsel); 220 perf_evsel__free_counts(evsel); 221 perf_evsel__free_prev_raw_counts(evsel); 222 } 223 } 224 225 static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) 226 { 227 struct perf_evsel *evsel; 228 229 list_for_each_entry(evsel, &evlist->entries, node) { 230 if (perf_evsel__alloc_stat_priv(evsel) < 0 || 231 perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 || 232 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0)) 233 goto out_free; 234 } 235 236 return 0; 237 238 out_free: 239 perf_evlist__free_stats(evlist); 240 return -1; 241 } 242 243 static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 244 static struct stats runtime_cycles_stats[MAX_NR_CPUS]; 245 static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; 246 static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; 247 static struct stats runtime_branches_stats[MAX_NR_CPUS]; 248 static struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; 249 static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; 250 static struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; 251 static struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; 252 static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; 253 static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; 254 static struct stats runtime_cycles_in_tx_stats[MAX_NR_CPUS]; 255 static struct stats walltime_nsecs_stats; 256 static struct stats runtime_transaction_stats[MAX_NR_CPUS]; 257 static struct stats runtime_elision_stats[MAX_NR_CPUS]; 258 259 static void perf_stat__reset_stats(struct perf_evlist *evlist) 260 { 261 struct perf_evsel *evsel; 262 263 list_for_each_entry(evsel, &evlist->entries, node) { 264 perf_evsel__reset_stat_priv(evsel); 265 perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel)); 266 } 267 268 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats)); 269 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats)); 270 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats)); 271 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats)); 272 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats)); 273 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats)); 274 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats)); 275 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats)); 276 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats)); 277 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats)); 278 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats)); 279 memset(runtime_cycles_in_tx_stats, 0, 280 sizeof(runtime_cycles_in_tx_stats)); 281 memset(runtime_transaction_stats, 0, 282 sizeof(runtime_transaction_stats)); 283 memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats)); 284 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); 285 } 286 287 static int create_perf_stat_counter(struct perf_evsel *evsel) 288 { 289 struct perf_event_attr *attr = &evsel->attr; 290 291 if (scale) 292 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 293 PERF_FORMAT_TOTAL_TIME_RUNNING; 294 295 attr->inherit = !no_inherit; 296 297 if (target__has_cpu(&target)) 298 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); 299 300 if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) { 301 attr->disabled = 1; 302 if (!initial_delay) 303 attr->enable_on_exec = 1; 304 } 305 306 return perf_evsel__open_per_thread(evsel, evsel_list->threads); 307 } 308 309 /* 310 * Does the counter have nsecs as a unit? 311 */ 312 static inline int nsec_counter(struct perf_evsel *evsel) 313 { 314 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || 315 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) 316 return 1; 317 318 return 0; 319 } 320 321 static struct perf_evsel *nth_evsel(int n) 322 { 323 static struct perf_evsel **array; 324 static int array_len; 325 struct perf_evsel *ev; 326 int j; 327 328 /* Assumes this only called when evsel_list does not change anymore. */ 329 if (!array) { 330 list_for_each_entry(ev, &evsel_list->entries, node) 331 array_len++; 332 array = malloc(array_len * sizeof(void *)); 333 if (!array) 334 exit(ENOMEM); 335 j = 0; 336 list_for_each_entry(ev, &evsel_list->entries, node) 337 array[j++] = ev; 338 } 339 if (n < array_len) 340 return array[n]; 341 return NULL; 342 } 343 344 /* 345 * Update various tracking values we maintain to print 346 * more semantic information such as miss/hit ratios, 347 * instruction rates, etc: 348 */ 349 static void update_shadow_stats(struct perf_evsel *counter, u64 *count) 350 { 351 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) 352 update_stats(&runtime_nsecs_stats[0], count[0]); 353 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 354 update_stats(&runtime_cycles_stats[0], count[0]); 355 else if (transaction_run && 356 perf_evsel__cmp(counter, nth_evsel(T_CYCLES_IN_TX))) 357 update_stats(&runtime_cycles_in_tx_stats[0], count[0]); 358 else if (transaction_run && 359 perf_evsel__cmp(counter, nth_evsel(T_TRANSACTION_START))) 360 update_stats(&runtime_transaction_stats[0], count[0]); 361 else if (transaction_run && 362 perf_evsel__cmp(counter, nth_evsel(T_ELISION_START))) 363 update_stats(&runtime_elision_stats[0], count[0]); 364 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 365 update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); 366 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 367 update_stats(&runtime_stalled_cycles_back_stats[0], count[0]); 368 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 369 update_stats(&runtime_branches_stats[0], count[0]); 370 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) 371 update_stats(&runtime_cacherefs_stats[0], count[0]); 372 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) 373 update_stats(&runtime_l1_dcache_stats[0], count[0]); 374 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) 375 update_stats(&runtime_l1_icache_stats[0], count[0]); 376 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) 377 update_stats(&runtime_ll_cache_stats[0], count[0]); 378 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) 379 update_stats(&runtime_dtlb_cache_stats[0], count[0]); 380 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) 381 update_stats(&runtime_itlb_cache_stats[0], count[0]); 382 } 383 384 /* 385 * Read out the results of a single counter: 386 * aggregate counts across CPUs in system-wide mode 387 */ 388 static int read_counter_aggr(struct perf_evsel *counter) 389 { 390 struct perf_stat *ps = counter->priv; 391 u64 *count = counter->counts->aggr.values; 392 int i; 393 394 if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter), 395 thread_map__nr(evsel_list->threads), scale) < 0) 396 return -1; 397 398 for (i = 0; i < 3; i++) 399 update_stats(&ps->res_stats[i], count[i]); 400 401 if (verbose) { 402 fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 403 perf_evsel__name(counter), count[0], count[1], count[2]); 404 } 405 406 /* 407 * Save the full runtime - to allow normalization during printout: 408 */ 409 update_shadow_stats(counter, count); 410 411 return 0; 412 } 413 414 /* 415 * Read out the results of a single counter: 416 * do not aggregate counts across CPUs in system-wide mode 417 */ 418 static int read_counter(struct perf_evsel *counter) 419 { 420 u64 *count; 421 int cpu; 422 423 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { 424 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0) 425 return -1; 426 427 count = counter->counts->cpu[cpu].values; 428 429 update_shadow_stats(counter, count); 430 } 431 432 return 0; 433 } 434 435 static void print_interval(void) 436 { 437 static int num_print_interval; 438 struct perf_evsel *counter; 439 struct perf_stat *ps; 440 struct timespec ts, rs; 441 char prefix[64]; 442 443 if (aggr_mode == AGGR_GLOBAL) { 444 list_for_each_entry(counter, &evsel_list->entries, node) { 445 ps = counter->priv; 446 memset(ps->res_stats, 0, sizeof(ps->res_stats)); 447 read_counter_aggr(counter); 448 } 449 } else { 450 list_for_each_entry(counter, &evsel_list->entries, node) { 451 ps = counter->priv; 452 memset(ps->res_stats, 0, sizeof(ps->res_stats)); 453 read_counter(counter); 454 } 455 } 456 457 clock_gettime(CLOCK_MONOTONIC, &ts); 458 diff_timespec(&rs, &ts, &ref_time); 459 sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep); 460 461 if (num_print_interval == 0 && !csv_output) { 462 switch (aggr_mode) { 463 case AGGR_SOCKET: 464 fprintf(output, "# time socket cpus counts events\n"); 465 break; 466 case AGGR_CORE: 467 fprintf(output, "# time core cpus counts events\n"); 468 break; 469 case AGGR_NONE: 470 fprintf(output, "# time CPU counts events\n"); 471 break; 472 case AGGR_GLOBAL: 473 default: 474 fprintf(output, "# time counts events\n"); 475 } 476 } 477 478 if (++num_print_interval == 25) 479 num_print_interval = 0; 480 481 switch (aggr_mode) { 482 case AGGR_CORE: 483 case AGGR_SOCKET: 484 print_aggr(prefix); 485 break; 486 case AGGR_NONE: 487 list_for_each_entry(counter, &evsel_list->entries, node) 488 print_counter(counter, prefix); 489 break; 490 case AGGR_GLOBAL: 491 default: 492 list_for_each_entry(counter, &evsel_list->entries, node) 493 print_counter_aggr(counter, prefix); 494 } 495 496 fflush(output); 497 } 498 499 static void handle_initial_delay(void) 500 { 501 struct perf_evsel *counter; 502 503 if (initial_delay) { 504 const int ncpus = cpu_map__nr(evsel_list->cpus), 505 nthreads = thread_map__nr(evsel_list->threads); 506 507 usleep(initial_delay * 1000); 508 list_for_each_entry(counter, &evsel_list->entries, node) 509 perf_evsel__enable(counter, ncpus, nthreads); 510 } 511 } 512 513 static int __run_perf_stat(int argc, const char **argv) 514 { 515 char msg[512]; 516 unsigned long long t0, t1; 517 struct perf_evsel *counter; 518 struct timespec ts; 519 int status = 0; 520 const bool forks = (argc > 0); 521 522 if (interval) { 523 ts.tv_sec = interval / 1000; 524 ts.tv_nsec = (interval % 1000) * 1000000; 525 } else { 526 ts.tv_sec = 1; 527 ts.tv_nsec = 0; 528 } 529 530 if (forks) { 531 if (perf_evlist__prepare_workload(evsel_list, &target, argv, 532 false, false) < 0) { 533 perror("failed to prepare workload"); 534 return -1; 535 } 536 child_pid = evsel_list->workload.pid; 537 } 538 539 if (group) 540 perf_evlist__set_leader(evsel_list); 541 542 list_for_each_entry(counter, &evsel_list->entries, node) { 543 if (create_perf_stat_counter(counter) < 0) { 544 /* 545 * PPC returns ENXIO for HW counters until 2.6.37 546 * (behavior changed with commit b0a873e). 547 */ 548 if (errno == EINVAL || errno == ENOSYS || 549 errno == ENOENT || errno == EOPNOTSUPP || 550 errno == ENXIO) { 551 if (verbose) 552 ui__warning("%s event is not supported by the kernel.\n", 553 perf_evsel__name(counter)); 554 counter->supported = false; 555 continue; 556 } 557 558 perf_evsel__open_strerror(counter, &target, 559 errno, msg, sizeof(msg)); 560 ui__error("%s\n", msg); 561 562 if (child_pid != -1) 563 kill(child_pid, SIGTERM); 564 565 return -1; 566 } 567 counter->supported = true; 568 } 569 570 if (perf_evlist__apply_filters(evsel_list)) { 571 error("failed to set filter with %d (%s)\n", errno, 572 strerror(errno)); 573 return -1; 574 } 575 576 /* 577 * Enable counters and exec the command: 578 */ 579 t0 = rdclock(); 580 clock_gettime(CLOCK_MONOTONIC, &ref_time); 581 582 if (forks) { 583 perf_evlist__start_workload(evsel_list); 584 handle_initial_delay(); 585 586 if (interval) { 587 while (!waitpid(child_pid, &status, WNOHANG)) { 588 nanosleep(&ts, NULL); 589 print_interval(); 590 } 591 } 592 wait(&status); 593 if (WIFSIGNALED(status)) 594 psignal(WTERMSIG(status), argv[0]); 595 } else { 596 handle_initial_delay(); 597 while (!done) { 598 nanosleep(&ts, NULL); 599 if (interval) 600 print_interval(); 601 } 602 } 603 604 t1 = rdclock(); 605 606 update_stats(&walltime_nsecs_stats, t1 - t0); 607 608 if (aggr_mode == AGGR_GLOBAL) { 609 list_for_each_entry(counter, &evsel_list->entries, node) { 610 read_counter_aggr(counter); 611 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 612 thread_map__nr(evsel_list->threads)); 613 } 614 } else { 615 list_for_each_entry(counter, &evsel_list->entries, node) { 616 read_counter(counter); 617 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1); 618 } 619 } 620 621 return WEXITSTATUS(status); 622 } 623 624 static int run_perf_stat(int argc __maybe_unused, const char **argv) 625 { 626 int ret; 627 628 if (pre_cmd) { 629 ret = system(pre_cmd); 630 if (ret) 631 return ret; 632 } 633 634 if (sync_run) 635 sync(); 636 637 ret = __run_perf_stat(argc, argv); 638 if (ret) 639 return ret; 640 641 if (post_cmd) { 642 ret = system(post_cmd); 643 if (ret) 644 return ret; 645 } 646 647 return ret; 648 } 649 650 static void print_noise_pct(double total, double avg) 651 { 652 double pct = rel_stddev_stats(total, avg); 653 654 if (csv_output) 655 fprintf(output, "%s%.2f%%", csv_sep, pct); 656 else if (pct) 657 fprintf(output, " ( +-%6.2f%% )", pct); 658 } 659 660 static void print_noise(struct perf_evsel *evsel, double avg) 661 { 662 struct perf_stat *ps; 663 664 if (run_count == 1) 665 return; 666 667 ps = evsel->priv; 668 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); 669 } 670 671 static void aggr_printout(struct perf_evsel *evsel, int id, int nr) 672 { 673 switch (aggr_mode) { 674 case AGGR_CORE: 675 fprintf(output, "S%d-C%*d%s%*d%s", 676 cpu_map__id_to_socket(id), 677 csv_output ? 0 : -8, 678 cpu_map__id_to_cpu(id), 679 csv_sep, 680 csv_output ? 0 : 4, 681 nr, 682 csv_sep); 683 break; 684 case AGGR_SOCKET: 685 fprintf(output, "S%*d%s%*d%s", 686 csv_output ? 0 : -5, 687 id, 688 csv_sep, 689 csv_output ? 0 : 4, 690 nr, 691 csv_sep); 692 break; 693 case AGGR_NONE: 694 fprintf(output, "CPU%*d%s", 695 csv_output ? 0 : -4, 696 perf_evsel__cpus(evsel)->map[id], csv_sep); 697 break; 698 case AGGR_GLOBAL: 699 default: 700 break; 701 } 702 } 703 704 static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) 705 { 706 double msecs = avg / 1e6; 707 const char *fmt = csv_output ? "%.6f%s%s" : "%18.6f%s%-25s"; 708 char name[25]; 709 710 aggr_printout(evsel, cpu, nr); 711 712 scnprintf(name, sizeof(name), "%s%s", 713 perf_evsel__name(evsel), csv_output ? "" : " (msec)"); 714 fprintf(output, fmt, msecs, csv_sep, name); 715 716 if (evsel->cgrp) 717 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 718 719 if (csv_output || interval) 720 return; 721 722 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) 723 fprintf(output, " # %8.3f CPUs utilized ", 724 avg / avg_stats(&walltime_nsecs_stats)); 725 else 726 fprintf(output, " "); 727 } 728 729 /* used for get_ratio_color() */ 730 enum grc_type { 731 GRC_STALLED_CYCLES_FE, 732 GRC_STALLED_CYCLES_BE, 733 GRC_CACHE_MISSES, 734 GRC_MAX_NR 735 }; 736 737 static const char *get_ratio_color(enum grc_type type, double ratio) 738 { 739 static const double grc_table[GRC_MAX_NR][3] = { 740 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, 741 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, 742 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, 743 }; 744 const char *color = PERF_COLOR_NORMAL; 745 746 if (ratio > grc_table[type][0]) 747 color = PERF_COLOR_RED; 748 else if (ratio > grc_table[type][1]) 749 color = PERF_COLOR_MAGENTA; 750 else if (ratio > grc_table[type][2]) 751 color = PERF_COLOR_YELLOW; 752 753 return color; 754 } 755 756 static void print_stalled_cycles_frontend(int cpu, 757 struct perf_evsel *evsel 758 __maybe_unused, double avg) 759 { 760 double total, ratio = 0.0; 761 const char *color; 762 763 total = avg_stats(&runtime_cycles_stats[cpu]); 764 765 if (total) 766 ratio = avg / total * 100.0; 767 768 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); 769 770 fprintf(output, " # "); 771 color_fprintf(output, color, "%6.2f%%", ratio); 772 fprintf(output, " frontend cycles idle "); 773 } 774 775 static void print_stalled_cycles_backend(int cpu, 776 struct perf_evsel *evsel 777 __maybe_unused, double avg) 778 { 779 double total, ratio = 0.0; 780 const char *color; 781 782 total = avg_stats(&runtime_cycles_stats[cpu]); 783 784 if (total) 785 ratio = avg / total * 100.0; 786 787 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); 788 789 fprintf(output, " # "); 790 color_fprintf(output, color, "%6.2f%%", ratio); 791 fprintf(output, " backend cycles idle "); 792 } 793 794 static void print_branch_misses(int cpu, 795 struct perf_evsel *evsel __maybe_unused, 796 double avg) 797 { 798 double total, ratio = 0.0; 799 const char *color; 800 801 total = avg_stats(&runtime_branches_stats[cpu]); 802 803 if (total) 804 ratio = avg / total * 100.0; 805 806 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 807 808 fprintf(output, " # "); 809 color_fprintf(output, color, "%6.2f%%", ratio); 810 fprintf(output, " of all branches "); 811 } 812 813 static void print_l1_dcache_misses(int cpu, 814 struct perf_evsel *evsel __maybe_unused, 815 double avg) 816 { 817 double total, ratio = 0.0; 818 const char *color; 819 820 total = avg_stats(&runtime_l1_dcache_stats[cpu]); 821 822 if (total) 823 ratio = avg / total * 100.0; 824 825 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 826 827 fprintf(output, " # "); 828 color_fprintf(output, color, "%6.2f%%", ratio); 829 fprintf(output, " of all L1-dcache hits "); 830 } 831 832 static void print_l1_icache_misses(int cpu, 833 struct perf_evsel *evsel __maybe_unused, 834 double avg) 835 { 836 double total, ratio = 0.0; 837 const char *color; 838 839 total = avg_stats(&runtime_l1_icache_stats[cpu]); 840 841 if (total) 842 ratio = avg / total * 100.0; 843 844 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 845 846 fprintf(output, " # "); 847 color_fprintf(output, color, "%6.2f%%", ratio); 848 fprintf(output, " of all L1-icache hits "); 849 } 850 851 static void print_dtlb_cache_misses(int cpu, 852 struct perf_evsel *evsel __maybe_unused, 853 double avg) 854 { 855 double total, ratio = 0.0; 856 const char *color; 857 858 total = avg_stats(&runtime_dtlb_cache_stats[cpu]); 859 860 if (total) 861 ratio = avg / total * 100.0; 862 863 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 864 865 fprintf(output, " # "); 866 color_fprintf(output, color, "%6.2f%%", ratio); 867 fprintf(output, " of all dTLB cache hits "); 868 } 869 870 static void print_itlb_cache_misses(int cpu, 871 struct perf_evsel *evsel __maybe_unused, 872 double avg) 873 { 874 double total, ratio = 0.0; 875 const char *color; 876 877 total = avg_stats(&runtime_itlb_cache_stats[cpu]); 878 879 if (total) 880 ratio = avg / total * 100.0; 881 882 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 883 884 fprintf(output, " # "); 885 color_fprintf(output, color, "%6.2f%%", ratio); 886 fprintf(output, " of all iTLB cache hits "); 887 } 888 889 static void print_ll_cache_misses(int cpu, 890 struct perf_evsel *evsel __maybe_unused, 891 double avg) 892 { 893 double total, ratio = 0.0; 894 const char *color; 895 896 total = avg_stats(&runtime_ll_cache_stats[cpu]); 897 898 if (total) 899 ratio = avg / total * 100.0; 900 901 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 902 903 fprintf(output, " # "); 904 color_fprintf(output, color, "%6.2f%%", ratio); 905 fprintf(output, " of all LL-cache hits "); 906 } 907 908 static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) 909 { 910 double total, ratio = 0.0, total2; 911 const char *fmt; 912 913 if (csv_output) 914 fmt = "%.0f%s%s"; 915 else if (big_num) 916 fmt = "%'18.0f%s%-25s"; 917 else 918 fmt = "%18.0f%s%-25s"; 919 920 aggr_printout(evsel, cpu, nr); 921 922 if (aggr_mode == AGGR_GLOBAL) 923 cpu = 0; 924 925 fprintf(output, fmt, avg, csv_sep, perf_evsel__name(evsel)); 926 927 if (evsel->cgrp) 928 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 929 930 if (csv_output || interval) 931 return; 932 933 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { 934 total = avg_stats(&runtime_cycles_stats[cpu]); 935 if (total) { 936 ratio = avg / total; 937 fprintf(output, " # %5.2f insns per cycle ", ratio); 938 } 939 total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); 940 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); 941 942 if (total && avg) { 943 ratio = total / avg; 944 fprintf(output, "\n # %5.2f stalled cycles per insn", ratio); 945 } 946 947 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && 948 runtime_branches_stats[cpu].n != 0) { 949 print_branch_misses(cpu, evsel, avg); 950 } else if ( 951 evsel->attr.type == PERF_TYPE_HW_CACHE && 952 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | 953 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 954 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 955 runtime_l1_dcache_stats[cpu].n != 0) { 956 print_l1_dcache_misses(cpu, evsel, avg); 957 } else if ( 958 evsel->attr.type == PERF_TYPE_HW_CACHE && 959 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | 960 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 961 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 962 runtime_l1_icache_stats[cpu].n != 0) { 963 print_l1_icache_misses(cpu, evsel, avg); 964 } else if ( 965 evsel->attr.type == PERF_TYPE_HW_CACHE && 966 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | 967 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 968 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 969 runtime_dtlb_cache_stats[cpu].n != 0) { 970 print_dtlb_cache_misses(cpu, evsel, avg); 971 } else if ( 972 evsel->attr.type == PERF_TYPE_HW_CACHE && 973 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | 974 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 975 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 976 runtime_itlb_cache_stats[cpu].n != 0) { 977 print_itlb_cache_misses(cpu, evsel, avg); 978 } else if ( 979 evsel->attr.type == PERF_TYPE_HW_CACHE && 980 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | 981 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 982 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && 983 runtime_ll_cache_stats[cpu].n != 0) { 984 print_ll_cache_misses(cpu, evsel, avg); 985 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && 986 runtime_cacherefs_stats[cpu].n != 0) { 987 total = avg_stats(&runtime_cacherefs_stats[cpu]); 988 989 if (total) 990 ratio = avg * 100 / total; 991 992 fprintf(output, " # %8.3f %% of all cache refs ", ratio); 993 994 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { 995 print_stalled_cycles_frontend(cpu, evsel, avg); 996 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { 997 print_stalled_cycles_backend(cpu, evsel, avg); 998 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { 999 total = avg_stats(&runtime_nsecs_stats[cpu]); 1000 1001 if (total) { 1002 ratio = avg / total; 1003 fprintf(output, " # %8.3f GHz ", ratio); 1004 } 1005 } else if (transaction_run && 1006 perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX))) { 1007 total = avg_stats(&runtime_cycles_stats[cpu]); 1008 if (total) 1009 fprintf(output, 1010 " # %5.2f%% transactional cycles ", 1011 100.0 * (avg / total)); 1012 } else if (transaction_run && 1013 perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX_CP))) { 1014 total = avg_stats(&runtime_cycles_stats[cpu]); 1015 total2 = avg_stats(&runtime_cycles_in_tx_stats[cpu]); 1016 if (total2 < avg) 1017 total2 = avg; 1018 if (total) 1019 fprintf(output, 1020 " # %5.2f%% aborted cycles ", 1021 100.0 * ((total2-avg) / total)); 1022 } else if (transaction_run && 1023 perf_evsel__cmp(evsel, nth_evsel(T_TRANSACTION_START)) && 1024 avg > 0 && 1025 runtime_cycles_in_tx_stats[cpu].n != 0) { 1026 total = avg_stats(&runtime_cycles_in_tx_stats[cpu]); 1027 1028 if (total) 1029 ratio = total / avg; 1030 1031 fprintf(output, " # %8.0f cycles / transaction ", ratio); 1032 } else if (transaction_run && 1033 perf_evsel__cmp(evsel, nth_evsel(T_ELISION_START)) && 1034 avg > 0 && 1035 runtime_cycles_in_tx_stats[cpu].n != 0) { 1036 total = avg_stats(&runtime_cycles_in_tx_stats[cpu]); 1037 1038 if (total) 1039 ratio = total / avg; 1040 1041 fprintf(output, " # %8.0f cycles / elision ", ratio); 1042 } else if (runtime_nsecs_stats[cpu].n != 0) { 1043 char unit = 'M'; 1044 1045 total = avg_stats(&runtime_nsecs_stats[cpu]); 1046 1047 if (total) 1048 ratio = 1000.0 * avg / total; 1049 if (ratio < 0.001) { 1050 ratio *= 1000; 1051 unit = 'K'; 1052 } 1053 1054 fprintf(output, " # %8.3f %c/sec ", ratio, unit); 1055 } else { 1056 fprintf(output, " "); 1057 } 1058 } 1059 1060 static void print_aggr(char *prefix) 1061 { 1062 struct perf_evsel *counter; 1063 int cpu, cpu2, s, s2, id, nr; 1064 u64 ena, run, val; 1065 1066 if (!(aggr_map || aggr_get_id)) 1067 return; 1068 1069 for (s = 0; s < aggr_map->nr; s++) { 1070 id = aggr_map->map[s]; 1071 list_for_each_entry(counter, &evsel_list->entries, node) { 1072 val = ena = run = 0; 1073 nr = 0; 1074 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { 1075 cpu2 = perf_evsel__cpus(counter)->map[cpu]; 1076 s2 = aggr_get_id(evsel_list->cpus, cpu2); 1077 if (s2 != id) 1078 continue; 1079 val += counter->counts->cpu[cpu].val; 1080 ena += counter->counts->cpu[cpu].ena; 1081 run += counter->counts->cpu[cpu].run; 1082 nr++; 1083 } 1084 if (prefix) 1085 fprintf(output, "%s", prefix); 1086 1087 if (run == 0 || ena == 0) { 1088 aggr_printout(counter, id, nr); 1089 1090 fprintf(output, "%*s%s%*s", 1091 csv_output ? 0 : 18, 1092 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1093 csv_sep, 1094 csv_output ? 0 : -24, 1095 perf_evsel__name(counter)); 1096 1097 if (counter->cgrp) 1098 fprintf(output, "%s%s", 1099 csv_sep, counter->cgrp->name); 1100 1101 fputc('\n', output); 1102 continue; 1103 } 1104 1105 if (nsec_counter(counter)) 1106 nsec_printout(id, nr, counter, val); 1107 else 1108 abs_printout(id, nr, counter, val); 1109 1110 if (!csv_output) { 1111 print_noise(counter, 1.0); 1112 1113 if (run != ena) 1114 fprintf(output, " (%.2f%%)", 1115 100.0 * run / ena); 1116 } 1117 fputc('\n', output); 1118 } 1119 } 1120 } 1121 1122 /* 1123 * Print out the results of a single counter: 1124 * aggregated counts in system-wide mode 1125 */ 1126 static void print_counter_aggr(struct perf_evsel *counter, char *prefix) 1127 { 1128 struct perf_stat *ps = counter->priv; 1129 double avg = avg_stats(&ps->res_stats[0]); 1130 int scaled = counter->counts->scaled; 1131 1132 if (prefix) 1133 fprintf(output, "%s", prefix); 1134 1135 if (scaled == -1) { 1136 fprintf(output, "%*s%s%*s", 1137 csv_output ? 0 : 18, 1138 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1139 csv_sep, 1140 csv_output ? 0 : -24, 1141 perf_evsel__name(counter)); 1142 1143 if (counter->cgrp) 1144 fprintf(output, "%s%s", csv_sep, counter->cgrp->name); 1145 1146 fputc('\n', output); 1147 return; 1148 } 1149 1150 if (nsec_counter(counter)) 1151 nsec_printout(-1, 0, counter, avg); 1152 else 1153 abs_printout(-1, 0, counter, avg); 1154 1155 print_noise(counter, avg); 1156 1157 if (csv_output) { 1158 fputc('\n', output); 1159 return; 1160 } 1161 1162 if (scaled) { 1163 double avg_enabled, avg_running; 1164 1165 avg_enabled = avg_stats(&ps->res_stats[1]); 1166 avg_running = avg_stats(&ps->res_stats[2]); 1167 1168 fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled); 1169 } 1170 fprintf(output, "\n"); 1171 } 1172 1173 /* 1174 * Print out the results of a single counter: 1175 * does not use aggregated count in system-wide 1176 */ 1177 static void print_counter(struct perf_evsel *counter, char *prefix) 1178 { 1179 u64 ena, run, val; 1180 int cpu; 1181 1182 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { 1183 val = counter->counts->cpu[cpu].val; 1184 ena = counter->counts->cpu[cpu].ena; 1185 run = counter->counts->cpu[cpu].run; 1186 1187 if (prefix) 1188 fprintf(output, "%s", prefix); 1189 1190 if (run == 0 || ena == 0) { 1191 fprintf(output, "CPU%*d%s%*s%s%*s", 1192 csv_output ? 0 : -4, 1193 perf_evsel__cpus(counter)->map[cpu], csv_sep, 1194 csv_output ? 0 : 18, 1195 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1196 csv_sep, 1197 csv_output ? 0 : -24, 1198 perf_evsel__name(counter)); 1199 1200 if (counter->cgrp) 1201 fprintf(output, "%s%s", 1202 csv_sep, counter->cgrp->name); 1203 1204 fputc('\n', output); 1205 continue; 1206 } 1207 1208 if (nsec_counter(counter)) 1209 nsec_printout(cpu, 0, counter, val); 1210 else 1211 abs_printout(cpu, 0, counter, val); 1212 1213 if (!csv_output) { 1214 print_noise(counter, 1.0); 1215 1216 if (run != ena) 1217 fprintf(output, " (%.2f%%)", 1218 100.0 * run / ena); 1219 } 1220 fputc('\n', output); 1221 } 1222 } 1223 1224 static void print_stat(int argc, const char **argv) 1225 { 1226 struct perf_evsel *counter; 1227 int i; 1228 1229 fflush(stdout); 1230 1231 if (!csv_output) { 1232 fprintf(output, "\n"); 1233 fprintf(output, " Performance counter stats for "); 1234 if (target.system_wide) 1235 fprintf(output, "\'system wide"); 1236 else if (target.cpu_list) 1237 fprintf(output, "\'CPU(s) %s", target.cpu_list); 1238 else if (!target__has_task(&target)) { 1239 fprintf(output, "\'%s", argv[0]); 1240 for (i = 1; i < argc; i++) 1241 fprintf(output, " %s", argv[i]); 1242 } else if (target.pid) 1243 fprintf(output, "process id \'%s", target.pid); 1244 else 1245 fprintf(output, "thread id \'%s", target.tid); 1246 1247 fprintf(output, "\'"); 1248 if (run_count > 1) 1249 fprintf(output, " (%d runs)", run_count); 1250 fprintf(output, ":\n\n"); 1251 } 1252 1253 switch (aggr_mode) { 1254 case AGGR_CORE: 1255 case AGGR_SOCKET: 1256 print_aggr(NULL); 1257 break; 1258 case AGGR_GLOBAL: 1259 list_for_each_entry(counter, &evsel_list->entries, node) 1260 print_counter_aggr(counter, NULL); 1261 break; 1262 case AGGR_NONE: 1263 list_for_each_entry(counter, &evsel_list->entries, node) 1264 print_counter(counter, NULL); 1265 break; 1266 default: 1267 break; 1268 } 1269 1270 if (!csv_output) { 1271 if (!null_run) 1272 fprintf(output, "\n"); 1273 fprintf(output, " %17.9f seconds time elapsed", 1274 avg_stats(&walltime_nsecs_stats)/1e9); 1275 if (run_count > 1) { 1276 fprintf(output, " "); 1277 print_noise_pct(stddev_stats(&walltime_nsecs_stats), 1278 avg_stats(&walltime_nsecs_stats)); 1279 } 1280 fprintf(output, "\n\n"); 1281 } 1282 } 1283 1284 static volatile int signr = -1; 1285 1286 static void skip_signal(int signo) 1287 { 1288 if ((child_pid == -1) || interval) 1289 done = 1; 1290 1291 signr = signo; 1292 /* 1293 * render child_pid harmless 1294 * won't send SIGTERM to a random 1295 * process in case of race condition 1296 * and fast PID recycling 1297 */ 1298 child_pid = -1; 1299 } 1300 1301 static void sig_atexit(void) 1302 { 1303 sigset_t set, oset; 1304 1305 /* 1306 * avoid race condition with SIGCHLD handler 1307 * in skip_signal() which is modifying child_pid 1308 * goal is to avoid send SIGTERM to a random 1309 * process 1310 */ 1311 sigemptyset(&set); 1312 sigaddset(&set, SIGCHLD); 1313 sigprocmask(SIG_BLOCK, &set, &oset); 1314 1315 if (child_pid != -1) 1316 kill(child_pid, SIGTERM); 1317 1318 sigprocmask(SIG_SETMASK, &oset, NULL); 1319 1320 if (signr == -1) 1321 return; 1322 1323 signal(signr, SIG_DFL); 1324 kill(getpid(), signr); 1325 } 1326 1327 static int stat__set_big_num(const struct option *opt __maybe_unused, 1328 const char *s __maybe_unused, int unset) 1329 { 1330 big_num_opt = unset ? 0 : 1; 1331 return 0; 1332 } 1333 1334 static int perf_stat_init_aggr_mode(void) 1335 { 1336 switch (aggr_mode) { 1337 case AGGR_SOCKET: 1338 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) { 1339 perror("cannot build socket map"); 1340 return -1; 1341 } 1342 aggr_get_id = cpu_map__get_socket; 1343 break; 1344 case AGGR_CORE: 1345 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) { 1346 perror("cannot build core map"); 1347 return -1; 1348 } 1349 aggr_get_id = cpu_map__get_core; 1350 break; 1351 case AGGR_NONE: 1352 case AGGR_GLOBAL: 1353 default: 1354 break; 1355 } 1356 return 0; 1357 } 1358 1359 static int setup_events(const char * const *attrs, unsigned len) 1360 { 1361 unsigned i; 1362 1363 for (i = 0; i < len; i++) { 1364 if (parse_events(evsel_list, attrs[i])) 1365 return -1; 1366 } 1367 return 0; 1368 } 1369 1370 /* 1371 * Add default attributes, if there were no attributes specified or 1372 * if -d/--detailed, -d -d or -d -d -d is used: 1373 */ 1374 static int add_default_attributes(void) 1375 { 1376 struct perf_event_attr default_attrs[] = { 1377 1378 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1379 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1380 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1381 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1382 1383 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1384 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1385 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1386 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1387 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1388 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1389 1390 }; 1391 1392 /* 1393 * Detailed stats (-d), covering the L1 and last level data caches: 1394 */ 1395 struct perf_event_attr detailed_attrs[] = { 1396 1397 { .type = PERF_TYPE_HW_CACHE, 1398 .config = 1399 PERF_COUNT_HW_CACHE_L1D << 0 | 1400 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1401 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1402 1403 { .type = PERF_TYPE_HW_CACHE, 1404 .config = 1405 PERF_COUNT_HW_CACHE_L1D << 0 | 1406 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1407 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1408 1409 { .type = PERF_TYPE_HW_CACHE, 1410 .config = 1411 PERF_COUNT_HW_CACHE_LL << 0 | 1412 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1413 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1414 1415 { .type = PERF_TYPE_HW_CACHE, 1416 .config = 1417 PERF_COUNT_HW_CACHE_LL << 0 | 1418 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1419 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1420 }; 1421 1422 /* 1423 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1424 */ 1425 struct perf_event_attr very_detailed_attrs[] = { 1426 1427 { .type = PERF_TYPE_HW_CACHE, 1428 .config = 1429 PERF_COUNT_HW_CACHE_L1I << 0 | 1430 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1431 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1432 1433 { .type = PERF_TYPE_HW_CACHE, 1434 .config = 1435 PERF_COUNT_HW_CACHE_L1I << 0 | 1436 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1437 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1438 1439 { .type = PERF_TYPE_HW_CACHE, 1440 .config = 1441 PERF_COUNT_HW_CACHE_DTLB << 0 | 1442 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1443 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1444 1445 { .type = PERF_TYPE_HW_CACHE, 1446 .config = 1447 PERF_COUNT_HW_CACHE_DTLB << 0 | 1448 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1449 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1450 1451 { .type = PERF_TYPE_HW_CACHE, 1452 .config = 1453 PERF_COUNT_HW_CACHE_ITLB << 0 | 1454 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1455 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1456 1457 { .type = PERF_TYPE_HW_CACHE, 1458 .config = 1459 PERF_COUNT_HW_CACHE_ITLB << 0 | 1460 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1461 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1462 1463 }; 1464 1465 /* 1466 * Very, very detailed stats (-d -d -d), adding prefetch events: 1467 */ 1468 struct perf_event_attr very_very_detailed_attrs[] = { 1469 1470 { .type = PERF_TYPE_HW_CACHE, 1471 .config = 1472 PERF_COUNT_HW_CACHE_L1D << 0 | 1473 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1474 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1475 1476 { .type = PERF_TYPE_HW_CACHE, 1477 .config = 1478 PERF_COUNT_HW_CACHE_L1D << 0 | 1479 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1480 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1481 }; 1482 1483 /* Set attrs if no event is selected and !null_run: */ 1484 if (null_run) 1485 return 0; 1486 1487 if (transaction_run) { 1488 int err; 1489 if (pmu_have_event("cpu", "cycles-ct") && 1490 pmu_have_event("cpu", "el-start")) 1491 err = setup_events(transaction_attrs, 1492 ARRAY_SIZE(transaction_attrs)); 1493 else 1494 err = setup_events(transaction_limited_attrs, 1495 ARRAY_SIZE(transaction_limited_attrs)); 1496 if (err < 0) { 1497 fprintf(stderr, "Cannot set up transaction events\n"); 1498 return -1; 1499 } 1500 return 0; 1501 } 1502 1503 if (!evsel_list->nr_entries) { 1504 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) 1505 return -1; 1506 } 1507 1508 /* Detailed events get appended to the event list: */ 1509 1510 if (detailed_run < 1) 1511 return 0; 1512 1513 /* Append detailed run extra attributes: */ 1514 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1515 return -1; 1516 1517 if (detailed_run < 2) 1518 return 0; 1519 1520 /* Append very detailed run extra attributes: */ 1521 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1522 return -1; 1523 1524 if (detailed_run < 3) 1525 return 0; 1526 1527 /* Append very, very detailed run extra attributes: */ 1528 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1529 } 1530 1531 int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) 1532 { 1533 bool append_file = false; 1534 int output_fd = 0; 1535 const char *output_name = NULL; 1536 const struct option options[] = { 1537 OPT_BOOLEAN('T', "transaction", &transaction_run, 1538 "hardware transaction statistics"), 1539 OPT_CALLBACK('e', "event", &evsel_list, "event", 1540 "event selector. use 'perf list' to list available events", 1541 parse_events_option), 1542 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 1543 "event filter", parse_filter), 1544 OPT_BOOLEAN('i', "no-inherit", &no_inherit, 1545 "child tasks do not inherit counters"), 1546 OPT_STRING('p', "pid", &target.pid, "pid", 1547 "stat events on existing process id"), 1548 OPT_STRING('t', "tid", &target.tid, "tid", 1549 "stat events on existing thread id"), 1550 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 1551 "system-wide collection from all CPUs"), 1552 OPT_BOOLEAN('g', "group", &group, 1553 "put the counters into a counter group"), 1554 OPT_BOOLEAN('c', "scale", &scale, "scale/normalize counters"), 1555 OPT_INCR('v', "verbose", &verbose, 1556 "be more verbose (show counter open errors, etc)"), 1557 OPT_INTEGER('r', "repeat", &run_count, 1558 "repeat command and print average + stddev (max: 100, forever: 0)"), 1559 OPT_BOOLEAN('n', "null", &null_run, 1560 "null run - dont start any counters"), 1561 OPT_INCR('d', "detailed", &detailed_run, 1562 "detailed run - start a lot of events"), 1563 OPT_BOOLEAN('S', "sync", &sync_run, 1564 "call sync() before starting a run"), 1565 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1566 "print large numbers with thousands\' separators", 1567 stat__set_big_num), 1568 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1569 "list of cpus to monitor in system-wide"), 1570 OPT_SET_UINT('A', "no-aggr", &aggr_mode, 1571 "disable CPU count aggregation", AGGR_NONE), 1572 OPT_STRING('x', "field-separator", &csv_sep, "separator", 1573 "print counts with custom separator"), 1574 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 1575 "monitor event in cgroup name only", parse_cgroups), 1576 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1577 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), 1578 OPT_INTEGER(0, "log-fd", &output_fd, 1579 "log output to fd, instead of stderr"), 1580 OPT_STRING(0, "pre", &pre_cmd, "command", 1581 "command to run prior to the measured command"), 1582 OPT_STRING(0, "post", &post_cmd, "command", 1583 "command to run after to the measured command"), 1584 OPT_UINTEGER('I', "interval-print", &interval, 1585 "print counts at regular interval in ms (>= 100)"), 1586 OPT_SET_UINT(0, "per-socket", &aggr_mode, 1587 "aggregate counts per processor socket", AGGR_SOCKET), 1588 OPT_SET_UINT(0, "per-core", &aggr_mode, 1589 "aggregate counts per physical processor core", AGGR_CORE), 1590 OPT_UINTEGER('D', "delay", &initial_delay, 1591 "ms to wait before starting measurement after program start"), 1592 OPT_END() 1593 }; 1594 const char * const stat_usage[] = { 1595 "perf stat [<options>] [<command>]", 1596 NULL 1597 }; 1598 int status = -EINVAL, run_idx; 1599 const char *mode; 1600 1601 setlocale(LC_ALL, ""); 1602 1603 evsel_list = perf_evlist__new(); 1604 if (evsel_list == NULL) 1605 return -ENOMEM; 1606 1607 argc = parse_options(argc, argv, options, stat_usage, 1608 PARSE_OPT_STOP_AT_NON_OPTION); 1609 1610 output = stderr; 1611 if (output_name && strcmp(output_name, "-")) 1612 output = NULL; 1613 1614 if (output_name && output_fd) { 1615 fprintf(stderr, "cannot use both --output and --log-fd\n"); 1616 parse_options_usage(stat_usage, options, "o", 1); 1617 parse_options_usage(NULL, options, "log-fd", 0); 1618 goto out; 1619 } 1620 1621 if (output_fd < 0) { 1622 fprintf(stderr, "argument to --log-fd must be a > 0\n"); 1623 parse_options_usage(stat_usage, options, "log-fd", 0); 1624 goto out; 1625 } 1626 1627 if (!output) { 1628 struct timespec tm; 1629 mode = append_file ? "a" : "w"; 1630 1631 output = fopen(output_name, mode); 1632 if (!output) { 1633 perror("failed to create output file"); 1634 return -1; 1635 } 1636 clock_gettime(CLOCK_REALTIME, &tm); 1637 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 1638 } else if (output_fd > 0) { 1639 mode = append_file ? "a" : "w"; 1640 output = fdopen(output_fd, mode); 1641 if (!output) { 1642 perror("Failed opening logfd"); 1643 return -errno; 1644 } 1645 } 1646 1647 if (csv_sep) { 1648 csv_output = true; 1649 if (!strcmp(csv_sep, "\\t")) 1650 csv_sep = "\t"; 1651 } else 1652 csv_sep = DEFAULT_SEPARATOR; 1653 1654 /* 1655 * let the spreadsheet do the pretty-printing 1656 */ 1657 if (csv_output) { 1658 /* User explicitly passed -B? */ 1659 if (big_num_opt == 1) { 1660 fprintf(stderr, "-B option not supported with -x\n"); 1661 parse_options_usage(stat_usage, options, "B", 1); 1662 parse_options_usage(NULL, options, "x", 1); 1663 goto out; 1664 } else /* Nope, so disable big number formatting */ 1665 big_num = false; 1666 } else if (big_num_opt == 0) /* User passed --no-big-num */ 1667 big_num = false; 1668 1669 if (!argc && target__none(&target)) 1670 usage_with_options(stat_usage, options); 1671 1672 if (run_count < 0) { 1673 pr_err("Run count must be a positive number\n"); 1674 parse_options_usage(stat_usage, options, "r", 1); 1675 goto out; 1676 } else if (run_count == 0) { 1677 forever = true; 1678 run_count = 1; 1679 } 1680 1681 /* no_aggr, cgroup are for system-wide only */ 1682 if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) && 1683 !target__has_cpu(&target)) { 1684 fprintf(stderr, "both cgroup and no-aggregation " 1685 "modes only available in system-wide mode\n"); 1686 1687 parse_options_usage(stat_usage, options, "G", 1); 1688 parse_options_usage(NULL, options, "A", 1); 1689 parse_options_usage(NULL, options, "a", 1); 1690 goto out; 1691 } 1692 1693 if (add_default_attributes()) 1694 goto out; 1695 1696 target__validate(&target); 1697 1698 if (perf_evlist__create_maps(evsel_list, &target) < 0) { 1699 if (target__has_task(&target)) { 1700 pr_err("Problems finding threads of monitor\n"); 1701 parse_options_usage(stat_usage, options, "p", 1); 1702 parse_options_usage(NULL, options, "t", 1); 1703 } else if (target__has_cpu(&target)) { 1704 perror("failed to parse CPUs map"); 1705 parse_options_usage(stat_usage, options, "C", 1); 1706 parse_options_usage(NULL, options, "a", 1); 1707 } 1708 goto out; 1709 } 1710 if (interval && interval < 100) { 1711 pr_err("print interval must be >= 100ms\n"); 1712 parse_options_usage(stat_usage, options, "I", 1); 1713 goto out_free_maps; 1714 } 1715 1716 if (perf_evlist__alloc_stats(evsel_list, interval)) 1717 goto out_free_maps; 1718 1719 if (perf_stat_init_aggr_mode()) 1720 goto out_free_maps; 1721 1722 /* 1723 * We dont want to block the signals - that would cause 1724 * child tasks to inherit that and Ctrl-C would not work. 1725 * What we want is for Ctrl-C to work in the exec()-ed 1726 * task, but being ignored by perf stat itself: 1727 */ 1728 atexit(sig_atexit); 1729 if (!forever) 1730 signal(SIGINT, skip_signal); 1731 signal(SIGCHLD, skip_signal); 1732 signal(SIGALRM, skip_signal); 1733 signal(SIGABRT, skip_signal); 1734 1735 status = 0; 1736 for (run_idx = 0; forever || run_idx < run_count; run_idx++) { 1737 if (run_count != 1 && verbose) 1738 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 1739 run_idx + 1); 1740 1741 status = run_perf_stat(argc, argv); 1742 if (forever && status != -1) { 1743 print_stat(argc, argv); 1744 perf_stat__reset_stats(evsel_list); 1745 } 1746 } 1747 1748 if (!forever && status != -1 && !interval) 1749 print_stat(argc, argv); 1750 1751 perf_evlist__free_stats(evsel_list); 1752 out_free_maps: 1753 perf_evlist__delete_maps(evsel_list); 1754 out: 1755 perf_evlist__delete(evsel_list); 1756 return status; 1757 } 1758