1 // SPDX-License-Identifier: GPL-2.0 2 #include <stdio.h> 3 #include "evsel.h" 4 #include "stat.h" 5 #include "color.h" 6 #include "pmu.h" 7 #include "rblist.h" 8 #include "evlist.h" 9 #include "expr.h" 10 #include "metricgroup.h" 11 12 enum { 13 CTX_BIT_USER = 1 << 0, 14 CTX_BIT_KERNEL = 1 << 1, 15 CTX_BIT_HV = 1 << 2, 16 CTX_BIT_HOST = 1 << 3, 17 CTX_BIT_IDLE = 1 << 4, 18 CTX_BIT_MAX = 1 << 5, 19 }; 20 21 #define NUM_CTX CTX_BIT_MAX 22 23 /* 24 * AGGR_GLOBAL: Use CPU 0 25 * AGGR_SOCKET: Use first CPU of socket 26 * AGGR_CORE: Use first CPU of core 27 * AGGR_NONE: Use matching CPU 28 * AGGR_THREAD: Not supported? 29 */ 30 static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 31 static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS]; 32 static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS]; 33 static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS]; 34 static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS]; 35 static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS]; 36 static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS]; 37 static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS]; 38 static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS]; 39 static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; 40 static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; 41 static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS]; 42 static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS]; 43 static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS]; 44 static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS]; 45 static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS]; 46 static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS]; 47 static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS]; 48 static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS]; 49 static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS]; 50 static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS]; 51 static struct rblist runtime_saved_values; 52 static bool have_frontend_stalled; 53 54 struct stats walltime_nsecs_stats; 55 56 struct saved_value { 57 struct rb_node rb_node; 58 struct perf_evsel *evsel; 59 int cpu; 60 struct stats stats; 61 }; 62 63 static int saved_value_cmp(struct rb_node *rb_node, const void *entry) 64 { 65 struct saved_value *a = container_of(rb_node, 66 struct saved_value, 67 rb_node); 68 const struct saved_value *b = entry; 69 70 if (a->cpu != b->cpu) 71 return a->cpu - b->cpu; 72 if (a->evsel == b->evsel) 73 return 0; 74 if ((char *)a->evsel < (char *)b->evsel) 75 return -1; 76 return +1; 77 } 78 79 static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused, 80 const void *entry) 81 { 82 struct saved_value *nd = malloc(sizeof(struct saved_value)); 83 84 if (!nd) 85 return NULL; 86 memcpy(nd, entry, sizeof(struct saved_value)); 87 return &nd->rb_node; 88 } 89 90 static struct saved_value *saved_value_lookup(struct perf_evsel *evsel, 91 int cpu, 92 bool create) 93 { 94 struct rb_node *nd; 95 struct saved_value dm = { 96 .cpu = cpu, 97 .evsel = evsel, 98 }; 99 nd = rblist__find(&runtime_saved_values, &dm); 100 if (nd) 101 return container_of(nd, struct saved_value, rb_node); 102 if (create) { 103 rblist__add_node(&runtime_saved_values, &dm); 104 nd = rblist__find(&runtime_saved_values, &dm); 105 if (nd) 106 return container_of(nd, struct saved_value, rb_node); 107 } 108 return NULL; 109 } 110 111 void perf_stat__init_shadow_stats(void) 112 { 113 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend"); 114 rblist__init(&runtime_saved_values); 115 runtime_saved_values.node_cmp = saved_value_cmp; 116 runtime_saved_values.node_new = saved_value_new; 117 /* No delete for now */ 118 } 119 120 static int evsel_context(struct perf_evsel *evsel) 121 { 122 int ctx = 0; 123 124 if (evsel->attr.exclude_kernel) 125 ctx |= CTX_BIT_KERNEL; 126 if (evsel->attr.exclude_user) 127 ctx |= CTX_BIT_USER; 128 if (evsel->attr.exclude_hv) 129 ctx |= CTX_BIT_HV; 130 if (evsel->attr.exclude_host) 131 ctx |= CTX_BIT_HOST; 132 if (evsel->attr.exclude_idle) 133 ctx |= CTX_BIT_IDLE; 134 135 return ctx; 136 } 137 138 void perf_stat__reset_shadow_stats(void) 139 { 140 struct rb_node *pos, *next; 141 142 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats)); 143 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats)); 144 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats)); 145 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats)); 146 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats)); 147 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats)); 148 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats)); 149 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats)); 150 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats)); 151 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats)); 152 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats)); 153 memset(runtime_cycles_in_tx_stats, 0, 154 sizeof(runtime_cycles_in_tx_stats)); 155 memset(runtime_transaction_stats, 0, 156 sizeof(runtime_transaction_stats)); 157 memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats)); 158 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); 159 memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots)); 160 memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired)); 161 memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued)); 162 memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles)); 163 memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles)); 164 memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats)); 165 memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats)); 166 167 next = rb_first(&runtime_saved_values.entries); 168 while (next) { 169 pos = next; 170 next = rb_next(pos); 171 memset(&container_of(pos, struct saved_value, rb_node)->stats, 172 0, 173 sizeof(struct stats)); 174 } 175 } 176 177 /* 178 * Update various tracking values we maintain to print 179 * more semantic information such as miss/hit ratios, 180 * instruction rates, etc: 181 */ 182 void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count, 183 int cpu) 184 { 185 int ctx = evsel_context(counter); 186 187 count *= counter->scale; 188 189 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || 190 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) 191 update_stats(&runtime_nsecs_stats[cpu], count); 192 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 193 update_stats(&runtime_cycles_stats[ctx][cpu], count); 194 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 195 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count); 196 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 197 update_stats(&runtime_transaction_stats[ctx][cpu], count); 198 else if (perf_stat_evsel__is(counter, ELISION_START)) 199 update_stats(&runtime_elision_stats[ctx][cpu], count); 200 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) 201 update_stats(&runtime_topdown_total_slots[ctx][cpu], count); 202 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) 203 update_stats(&runtime_topdown_slots_issued[ctx][cpu], count); 204 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED)) 205 update_stats(&runtime_topdown_slots_retired[ctx][cpu], count); 206 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES)) 207 update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count); 208 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES)) 209 update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count); 210 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 211 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count); 212 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 213 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count); 214 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 215 update_stats(&runtime_branches_stats[ctx][cpu], count); 216 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) 217 update_stats(&runtime_cacherefs_stats[ctx][cpu], count); 218 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) 219 update_stats(&runtime_l1_dcache_stats[ctx][cpu], count); 220 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) 221 update_stats(&runtime_ll_cache_stats[ctx][cpu], count); 222 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) 223 update_stats(&runtime_ll_cache_stats[ctx][cpu], count); 224 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) 225 update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count); 226 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) 227 update_stats(&runtime_itlb_cache_stats[ctx][cpu], count); 228 else if (perf_stat_evsel__is(counter, SMI_NUM)) 229 update_stats(&runtime_smi_num_stats[ctx][cpu], count); 230 else if (perf_stat_evsel__is(counter, APERF)) 231 update_stats(&runtime_aperf_stats[ctx][cpu], count); 232 233 if (counter->collect_stat) { 234 struct saved_value *v = saved_value_lookup(counter, cpu, true); 235 update_stats(&v->stats, count); 236 } 237 } 238 239 /* used for get_ratio_color() */ 240 enum grc_type { 241 GRC_STALLED_CYCLES_FE, 242 GRC_STALLED_CYCLES_BE, 243 GRC_CACHE_MISSES, 244 GRC_MAX_NR 245 }; 246 247 static const char *get_ratio_color(enum grc_type type, double ratio) 248 { 249 static const double grc_table[GRC_MAX_NR][3] = { 250 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, 251 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, 252 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, 253 }; 254 const char *color = PERF_COLOR_NORMAL; 255 256 if (ratio > grc_table[type][0]) 257 color = PERF_COLOR_RED; 258 else if (ratio > grc_table[type][1]) 259 color = PERF_COLOR_MAGENTA; 260 else if (ratio > grc_table[type][2]) 261 color = PERF_COLOR_YELLOW; 262 263 return color; 264 } 265 266 static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list, 267 const char *name) 268 { 269 struct perf_evsel *c2; 270 271 evlist__for_each_entry (evsel_list, c2) { 272 if (!strcasecmp(c2->name, name)) 273 return c2; 274 } 275 return NULL; 276 } 277 278 /* Mark MetricExpr target events and link events using them to them. */ 279 void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list) 280 { 281 struct perf_evsel *counter, *leader, **metric_events, *oc; 282 bool found; 283 const char **metric_names; 284 int i; 285 int num_metric_names; 286 287 evlist__for_each_entry(evsel_list, counter) { 288 bool invalid = false; 289 290 leader = counter->leader; 291 if (!counter->metric_expr) 292 continue; 293 metric_events = counter->metric_events; 294 if (!metric_events) { 295 if (expr__find_other(counter->metric_expr, counter->name, 296 &metric_names, &num_metric_names) < 0) 297 continue; 298 299 metric_events = calloc(sizeof(struct perf_evsel *), 300 num_metric_names + 1); 301 if (!metric_events) 302 return; 303 counter->metric_events = metric_events; 304 } 305 306 for (i = 0; i < num_metric_names; i++) { 307 found = false; 308 if (leader) { 309 /* Search in group */ 310 for_each_group_member (oc, leader) { 311 if (!strcasecmp(oc->name, metric_names[i])) { 312 found = true; 313 break; 314 } 315 } 316 } 317 if (!found) { 318 /* Search ignoring groups */ 319 oc = perf_stat__find_event(evsel_list, metric_names[i]); 320 } 321 if (!oc) { 322 /* Deduping one is good enough to handle duplicated PMUs. */ 323 static char *printed; 324 325 /* 326 * Adding events automatically would be difficult, because 327 * it would risk creating groups that are not schedulable. 328 * perf stat doesn't understand all the scheduling constraints 329 * of events. So we ask the user instead to add the missing 330 * events. 331 */ 332 if (!printed || strcasecmp(printed, metric_names[i])) { 333 fprintf(stderr, 334 "Add %s event to groups to get metric expression for %s\n", 335 metric_names[i], 336 counter->name); 337 printed = strdup(metric_names[i]); 338 } 339 invalid = true; 340 continue; 341 } 342 metric_events[i] = oc; 343 oc->collect_stat = true; 344 } 345 metric_events[i] = NULL; 346 free(metric_names); 347 if (invalid) { 348 free(metric_events); 349 counter->metric_events = NULL; 350 counter->metric_expr = NULL; 351 } 352 } 353 } 354 355 static void print_stalled_cycles_frontend(int cpu, 356 struct perf_evsel *evsel, double avg, 357 struct perf_stat_output_ctx *out) 358 { 359 double total, ratio = 0.0; 360 const char *color; 361 int ctx = evsel_context(evsel); 362 363 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 364 365 if (total) 366 ratio = avg / total * 100.0; 367 368 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); 369 370 if (ratio) 371 out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle", 372 ratio); 373 else 374 out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0); 375 } 376 377 static void print_stalled_cycles_backend(int cpu, 378 struct perf_evsel *evsel, double avg, 379 struct perf_stat_output_ctx *out) 380 { 381 double total, ratio = 0.0; 382 const char *color; 383 int ctx = evsel_context(evsel); 384 385 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 386 387 if (total) 388 ratio = avg / total * 100.0; 389 390 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); 391 392 out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio); 393 } 394 395 static void print_branch_misses(int cpu, 396 struct perf_evsel *evsel, 397 double avg, 398 struct perf_stat_output_ctx *out) 399 { 400 double total, ratio = 0.0; 401 const char *color; 402 int ctx = evsel_context(evsel); 403 404 total = avg_stats(&runtime_branches_stats[ctx][cpu]); 405 406 if (total) 407 ratio = avg / total * 100.0; 408 409 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 410 411 out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio); 412 } 413 414 static void print_l1_dcache_misses(int cpu, 415 struct perf_evsel *evsel, 416 double avg, 417 struct perf_stat_output_ctx *out) 418 { 419 double total, ratio = 0.0; 420 const char *color; 421 int ctx = evsel_context(evsel); 422 423 total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]); 424 425 if (total) 426 ratio = avg / total * 100.0; 427 428 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 429 430 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio); 431 } 432 433 static void print_l1_icache_misses(int cpu, 434 struct perf_evsel *evsel, 435 double avg, 436 struct perf_stat_output_ctx *out) 437 { 438 double total, ratio = 0.0; 439 const char *color; 440 int ctx = evsel_context(evsel); 441 442 total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]); 443 444 if (total) 445 ratio = avg / total * 100.0; 446 447 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 448 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio); 449 } 450 451 static void print_dtlb_cache_misses(int cpu, 452 struct perf_evsel *evsel, 453 double avg, 454 struct perf_stat_output_ctx *out) 455 { 456 double total, ratio = 0.0; 457 const char *color; 458 int ctx = evsel_context(evsel); 459 460 total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]); 461 462 if (total) 463 ratio = avg / total * 100.0; 464 465 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 466 out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio); 467 } 468 469 static void print_itlb_cache_misses(int cpu, 470 struct perf_evsel *evsel, 471 double avg, 472 struct perf_stat_output_ctx *out) 473 { 474 double total, ratio = 0.0; 475 const char *color; 476 int ctx = evsel_context(evsel); 477 478 total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]); 479 480 if (total) 481 ratio = avg / total * 100.0; 482 483 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 484 out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio); 485 } 486 487 static void print_ll_cache_misses(int cpu, 488 struct perf_evsel *evsel, 489 double avg, 490 struct perf_stat_output_ctx *out) 491 { 492 double total, ratio = 0.0; 493 const char *color; 494 int ctx = evsel_context(evsel); 495 496 total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]); 497 498 if (total) 499 ratio = avg / total * 100.0; 500 501 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 502 out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio); 503 } 504 505 /* 506 * High level "TopDown" CPU core pipe line bottleneck break down. 507 * 508 * Basic concept following 509 * Yasin, A Top Down Method for Performance analysis and Counter architecture 510 * ISPASS14 511 * 512 * The CPU pipeline is divided into 4 areas that can be bottlenecks: 513 * 514 * Frontend -> Backend -> Retiring 515 * BadSpeculation in addition means out of order execution that is thrown away 516 * (for example branch mispredictions) 517 * Frontend is instruction decoding. 518 * Backend is execution, like computation and accessing data in memory 519 * Retiring is good execution that is not directly bottlenecked 520 * 521 * The formulas are computed in slots. 522 * A slot is an entry in the pipeline each for the pipeline width 523 * (for example a 4-wide pipeline has 4 slots for each cycle) 524 * 525 * Formulas: 526 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) / 527 * TotalSlots 528 * Retiring = SlotsRetired / TotalSlots 529 * FrontendBound = FetchBubbles / TotalSlots 530 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound 531 * 532 * The kernel provides the mapping to the low level CPU events and any scaling 533 * needed for the CPU pipeline width, for example: 534 * 535 * TotalSlots = Cycles * 4 536 * 537 * The scaling factor is communicated in the sysfs unit. 538 * 539 * In some cases the CPU may not be able to measure all the formulas due to 540 * missing events. In this case multiple formulas are combined, as possible. 541 * 542 * Full TopDown supports more levels to sub-divide each area: for example 543 * BackendBound into computing bound and memory bound. For now we only 544 * support Level 1 TopDown. 545 */ 546 547 static double sanitize_val(double x) 548 { 549 if (x < 0 && x >= -0.02) 550 return 0.0; 551 return x; 552 } 553 554 static double td_total_slots(int ctx, int cpu) 555 { 556 return avg_stats(&runtime_topdown_total_slots[ctx][cpu]); 557 } 558 559 static double td_bad_spec(int ctx, int cpu) 560 { 561 double bad_spec = 0; 562 double total_slots; 563 double total; 564 565 total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) - 566 avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) + 567 avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]); 568 total_slots = td_total_slots(ctx, cpu); 569 if (total_slots) 570 bad_spec = total / total_slots; 571 return sanitize_val(bad_spec); 572 } 573 574 static double td_retiring(int ctx, int cpu) 575 { 576 double retiring = 0; 577 double total_slots = td_total_slots(ctx, cpu); 578 double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]); 579 580 if (total_slots) 581 retiring = ret_slots / total_slots; 582 return retiring; 583 } 584 585 static double td_fe_bound(int ctx, int cpu) 586 { 587 double fe_bound = 0; 588 double total_slots = td_total_slots(ctx, cpu); 589 double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]); 590 591 if (total_slots) 592 fe_bound = fetch_bub / total_slots; 593 return fe_bound; 594 } 595 596 static double td_be_bound(int ctx, int cpu) 597 { 598 double sum = (td_fe_bound(ctx, cpu) + 599 td_bad_spec(ctx, cpu) + 600 td_retiring(ctx, cpu)); 601 if (sum == 0) 602 return 0; 603 return sanitize_val(1.0 - sum); 604 } 605 606 static void print_smi_cost(int cpu, struct perf_evsel *evsel, 607 struct perf_stat_output_ctx *out) 608 { 609 double smi_num, aperf, cycles, cost = 0.0; 610 int ctx = evsel_context(evsel); 611 const char *color = NULL; 612 613 smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]); 614 aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]); 615 cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]); 616 617 if ((cycles == 0) || (aperf == 0)) 618 return; 619 620 if (smi_num) 621 cost = (aperf - cycles) / aperf * 100.00; 622 623 if (cost > 10) 624 color = PERF_COLOR_RED; 625 out->print_metric(out->ctx, color, "%8.1f%%", "SMI cycles%", cost); 626 out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num); 627 } 628 629 static void generic_metric(const char *metric_expr, 630 struct perf_evsel **metric_events, 631 char *name, 632 const char *metric_name, 633 double avg, 634 int cpu, 635 struct perf_stat_output_ctx *out) 636 { 637 print_metric_t print_metric = out->print_metric; 638 struct parse_ctx pctx; 639 double ratio; 640 int i; 641 void *ctxp = out->ctx; 642 643 expr__ctx_init(&pctx); 644 expr__add_id(&pctx, name, avg); 645 for (i = 0; metric_events[i]; i++) { 646 struct saved_value *v; 647 struct stats *stats; 648 double scale; 649 650 if (!strcmp(metric_events[i]->name, "duration_time")) { 651 stats = &walltime_nsecs_stats; 652 scale = 1e-9; 653 } else { 654 v = saved_value_lookup(metric_events[i], cpu, false); 655 if (!v) 656 break; 657 stats = &v->stats; 658 scale = 1.0; 659 } 660 expr__add_id(&pctx, metric_events[i]->name, avg_stats(stats)*scale); 661 } 662 if (!metric_events[i]) { 663 const char *p = metric_expr; 664 665 if (expr__parse(&ratio, &pctx, &p) == 0) 666 print_metric(ctxp, NULL, "%8.1f", 667 metric_name ? 668 metric_name : 669 out->force_header ? name : "", 670 ratio); 671 else 672 print_metric(ctxp, NULL, NULL, 673 out->force_header ? 674 (metric_name ? metric_name : name) : "", 0); 675 } else 676 print_metric(ctxp, NULL, NULL, "", 0); 677 } 678 679 void perf_stat__print_shadow_stats(struct perf_evsel *evsel, 680 double avg, int cpu, 681 struct perf_stat_output_ctx *out, 682 struct rblist *metric_events) 683 { 684 void *ctxp = out->ctx; 685 print_metric_t print_metric = out->print_metric; 686 double total, ratio = 0.0, total2; 687 const char *color = NULL; 688 int ctx = evsel_context(evsel); 689 struct metric_event *me; 690 int num = 1; 691 692 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { 693 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 694 if (total) { 695 ratio = avg / total; 696 print_metric(ctxp, NULL, "%7.2f ", 697 "insn per cycle", ratio); 698 } else { 699 print_metric(ctxp, NULL, NULL, "insn per cycle", 0); 700 } 701 total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]); 702 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu])); 703 704 if (total && avg) { 705 out->new_line(ctxp); 706 ratio = total / avg; 707 print_metric(ctxp, NULL, "%7.2f ", 708 "stalled cycles per insn", 709 ratio); 710 } else if (have_frontend_stalled) { 711 print_metric(ctxp, NULL, NULL, 712 "stalled cycles per insn", 0); 713 } 714 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { 715 if (runtime_branches_stats[ctx][cpu].n != 0) 716 print_branch_misses(cpu, evsel, avg, out); 717 else 718 print_metric(ctxp, NULL, NULL, "of all branches", 0); 719 } else if ( 720 evsel->attr.type == PERF_TYPE_HW_CACHE && 721 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | 722 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 723 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 724 if (runtime_l1_dcache_stats[ctx][cpu].n != 0) 725 print_l1_dcache_misses(cpu, evsel, avg, out); 726 else 727 print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0); 728 } else if ( 729 evsel->attr.type == PERF_TYPE_HW_CACHE && 730 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | 731 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 732 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 733 if (runtime_l1_icache_stats[ctx][cpu].n != 0) 734 print_l1_icache_misses(cpu, evsel, avg, out); 735 else 736 print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0); 737 } else if ( 738 evsel->attr.type == PERF_TYPE_HW_CACHE && 739 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | 740 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 741 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 742 if (runtime_dtlb_cache_stats[ctx][cpu].n != 0) 743 print_dtlb_cache_misses(cpu, evsel, avg, out); 744 else 745 print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0); 746 } else if ( 747 evsel->attr.type == PERF_TYPE_HW_CACHE && 748 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | 749 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 750 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 751 if (runtime_itlb_cache_stats[ctx][cpu].n != 0) 752 print_itlb_cache_misses(cpu, evsel, avg, out); 753 else 754 print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0); 755 } else if ( 756 evsel->attr.type == PERF_TYPE_HW_CACHE && 757 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | 758 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 759 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 760 if (runtime_ll_cache_stats[ctx][cpu].n != 0) 761 print_ll_cache_misses(cpu, evsel, avg, out); 762 else 763 print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0); 764 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) { 765 total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]); 766 767 if (total) 768 ratio = avg * 100 / total; 769 770 if (runtime_cacherefs_stats[ctx][cpu].n != 0) 771 print_metric(ctxp, NULL, "%8.3f %%", 772 "of all cache refs", ratio); 773 else 774 print_metric(ctxp, NULL, NULL, "of all cache refs", 0); 775 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { 776 print_stalled_cycles_frontend(cpu, evsel, avg, out); 777 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { 778 print_stalled_cycles_backend(cpu, evsel, avg, out); 779 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { 780 total = avg_stats(&runtime_nsecs_stats[cpu]); 781 782 if (total) { 783 ratio = avg / total; 784 print_metric(ctxp, NULL, "%8.3f", "GHz", ratio); 785 } else { 786 print_metric(ctxp, NULL, NULL, "Ghz", 0); 787 } 788 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) { 789 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 790 if (total) 791 print_metric(ctxp, NULL, 792 "%7.2f%%", "transactional cycles", 793 100.0 * (avg / total)); 794 else 795 print_metric(ctxp, NULL, NULL, "transactional cycles", 796 0); 797 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) { 798 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 799 total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 800 if (total2 < avg) 801 total2 = avg; 802 if (total) 803 print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles", 804 100.0 * ((total2-avg) / total)); 805 else 806 print_metric(ctxp, NULL, NULL, "aborted cycles", 0); 807 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) { 808 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 809 810 if (avg) 811 ratio = total / avg; 812 813 if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0) 814 print_metric(ctxp, NULL, "%8.0f", 815 "cycles / transaction", ratio); 816 else 817 print_metric(ctxp, NULL, NULL, "cycles / transaction", 818 0); 819 } else if (perf_stat_evsel__is(evsel, ELISION_START)) { 820 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 821 822 if (avg) 823 ratio = total / avg; 824 825 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); 826 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) || 827 perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) { 828 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) 829 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", 830 avg / ratio); 831 else 832 print_metric(ctxp, NULL, NULL, "CPUs utilized", 0); 833 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) { 834 double fe_bound = td_fe_bound(ctx, cpu); 835 836 if (fe_bound > 0.2) 837 color = PERF_COLOR_RED; 838 print_metric(ctxp, color, "%8.1f%%", "frontend bound", 839 fe_bound * 100.); 840 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) { 841 double retiring = td_retiring(ctx, cpu); 842 843 if (retiring > 0.7) 844 color = PERF_COLOR_GREEN; 845 print_metric(ctxp, color, "%8.1f%%", "retiring", 846 retiring * 100.); 847 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) { 848 double bad_spec = td_bad_spec(ctx, cpu); 849 850 if (bad_spec > 0.1) 851 color = PERF_COLOR_RED; 852 print_metric(ctxp, color, "%8.1f%%", "bad speculation", 853 bad_spec * 100.); 854 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) { 855 double be_bound = td_be_bound(ctx, cpu); 856 const char *name = "backend bound"; 857 static int have_recovery_bubbles = -1; 858 859 /* In case the CPU does not support topdown-recovery-bubbles */ 860 if (have_recovery_bubbles < 0) 861 have_recovery_bubbles = pmu_have_event("cpu", 862 "topdown-recovery-bubbles"); 863 if (!have_recovery_bubbles) 864 name = "backend bound/bad spec"; 865 866 if (be_bound > 0.2) 867 color = PERF_COLOR_RED; 868 if (td_total_slots(ctx, cpu) > 0) 869 print_metric(ctxp, color, "%8.1f%%", name, 870 be_bound * 100.); 871 else 872 print_metric(ctxp, NULL, NULL, name, 0); 873 } else if (evsel->metric_expr) { 874 generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name, 875 evsel->metric_name, avg, cpu, out); 876 } else if (runtime_nsecs_stats[cpu].n != 0) { 877 char unit = 'M'; 878 char unit_buf[10]; 879 880 total = avg_stats(&runtime_nsecs_stats[cpu]); 881 882 if (total) 883 ratio = 1000.0 * avg / total; 884 if (ratio < 0.001) { 885 ratio *= 1000; 886 unit = 'K'; 887 } 888 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); 889 print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio); 890 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) { 891 print_smi_cost(cpu, evsel, out); 892 } else { 893 num = 0; 894 } 895 896 if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) { 897 struct metric_expr *mexp; 898 899 list_for_each_entry (mexp, &me->head, nd) { 900 if (num++ > 0) 901 out->new_line(ctxp); 902 generic_metric(mexp->metric_expr, mexp->metric_events, 903 evsel->name, mexp->metric_name, 904 avg, cpu, out); 905 } 906 } 907 if (num == 0) 908 print_metric(ctxp, NULL, NULL, NULL, 0); 909 } 910