1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "debug.h" 4 #include "dso.h" 5 #include "build-id.h" 6 #include "hist.h" 7 #include "map.h" 8 #include "map_symbol.h" 9 #include "branch.h" 10 #include "mem-events.h" 11 #include "session.h" 12 #include "namespaces.h" 13 #include "cgroup.h" 14 #include "sort.h" 15 #include "units.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "annotate.h" 19 #include "srcline.h" 20 #include "symbol.h" 21 #include "thread.h" 22 #include "block-info.h" 23 #include "ui/progress.h" 24 #include <errno.h> 25 #include <math.h> 26 #include <inttypes.h> 27 #include <sys/param.h> 28 #include <linux/rbtree.h> 29 #include <linux/string.h> 30 #include <linux/time64.h> 31 #include <linux/zalloc.h> 32 33 static bool hists__filter_entry_by_dso(struct hists *hists, 34 struct hist_entry *he); 35 static bool hists__filter_entry_by_thread(struct hists *hists, 36 struct hist_entry *he); 37 static bool hists__filter_entry_by_symbol(struct hists *hists, 38 struct hist_entry *he); 39 static bool hists__filter_entry_by_socket(struct hists *hists, 40 struct hist_entry *he); 41 42 u16 hists__col_len(struct hists *hists, enum hist_column col) 43 { 44 return hists->col_len[col]; 45 } 46 47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 48 { 49 hists->col_len[col] = len; 50 } 51 52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 53 { 54 if (len > hists__col_len(hists, col)) { 55 hists__set_col_len(hists, col, len); 56 return true; 57 } 58 return false; 59 } 60 61 void hists__reset_col_len(struct hists *hists) 62 { 63 enum hist_column col; 64 65 for (col = 0; col < HISTC_NR_COLS; ++col) 66 hists__set_col_len(hists, col, 0); 67 } 68 69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 70 { 71 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 72 73 if (hists__col_len(hists, dso) < unresolved_col_width && 74 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 75 !symbol_conf.dso_list) 76 hists__set_col_len(hists, dso, unresolved_col_width); 77 } 78 79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 80 { 81 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 82 int symlen; 83 u16 len; 84 85 if (h->block_info) 86 return; 87 /* 88 * +4 accounts for '[x] ' priv level info 89 * +2 accounts for 0x prefix on raw addresses 90 * +3 accounts for ' y ' symtab origin info 91 */ 92 if (h->ms.sym) { 93 symlen = h->ms.sym->namelen + 4; 94 if (verbose > 0) 95 symlen += BITS_PER_LONG / 4 + 2 + 3; 96 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 97 } else { 98 symlen = unresolved_col_width + 4 + 2; 99 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 100 hists__set_unres_dso_col_len(hists, HISTC_DSO); 101 } 102 103 len = thread__comm_len(h->thread); 104 if (hists__new_col_len(hists, HISTC_COMM, len)) 105 hists__set_col_len(hists, HISTC_THREAD, len + 8); 106 107 if (h->ms.map) { 108 len = dso__name_len(h->ms.map->dso); 109 hists__new_col_len(hists, HISTC_DSO, len); 110 } 111 112 if (h->parent) 113 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 114 115 if (h->branch_info) { 116 if (h->branch_info->from.ms.sym) { 117 symlen = (int)h->branch_info->from.ms.sym->namelen + 4; 118 if (verbose > 0) 119 symlen += BITS_PER_LONG / 4 + 2 + 3; 120 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 121 122 symlen = dso__name_len(h->branch_info->from.ms.map->dso); 123 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 124 } else { 125 symlen = unresolved_col_width + 4 + 2; 126 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 127 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen); 128 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 129 } 130 131 if (h->branch_info->to.ms.sym) { 132 symlen = (int)h->branch_info->to.ms.sym->namelen + 4; 133 if (verbose > 0) 134 symlen += BITS_PER_LONG / 4 + 2 + 3; 135 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 136 137 symlen = dso__name_len(h->branch_info->to.ms.map->dso); 138 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 139 } else { 140 symlen = unresolved_col_width + 4 + 2; 141 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 142 hists__new_col_len(hists, HISTC_ADDR_TO, symlen); 143 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 144 } 145 146 if (h->branch_info->srcline_from) 147 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 148 strlen(h->branch_info->srcline_from)); 149 if (h->branch_info->srcline_to) 150 hists__new_col_len(hists, HISTC_SRCLINE_TO, 151 strlen(h->branch_info->srcline_to)); 152 } 153 154 if (h->mem_info) { 155 if (h->mem_info->daddr.ms.sym) { 156 symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4 157 + unresolved_col_width + 2; 158 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 159 symlen); 160 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 161 symlen + 1); 162 } else { 163 symlen = unresolved_col_width + 4 + 2; 164 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 165 symlen); 166 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 167 symlen); 168 } 169 170 if (h->mem_info->iaddr.ms.sym) { 171 symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4 172 + unresolved_col_width + 2; 173 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 174 symlen); 175 } else { 176 symlen = unresolved_col_width + 4 + 2; 177 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 178 symlen); 179 } 180 181 if (h->mem_info->daddr.ms.map) { 182 symlen = dso__name_len(h->mem_info->daddr.ms.map->dso); 183 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 184 symlen); 185 } else { 186 symlen = unresolved_col_width + 4 + 2; 187 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 188 } 189 190 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 191 unresolved_col_width + 4 + 2); 192 193 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE, 194 unresolved_col_width + 4 + 2); 195 196 } else { 197 symlen = unresolved_col_width + 4 + 2; 198 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 199 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 200 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 201 } 202 203 hists__new_col_len(hists, HISTC_CGROUP, 6); 204 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 205 hists__new_col_len(hists, HISTC_CPU, 3); 206 hists__new_col_len(hists, HISTC_SOCKET, 6); 207 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 208 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 209 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 210 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 211 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 212 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 213 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10); 214 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13); 215 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13); 216 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13); 217 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13); 218 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2); 219 220 if (symbol_conf.nanosecs) 221 hists__new_col_len(hists, HISTC_TIME, 16); 222 else 223 hists__new_col_len(hists, HISTC_TIME, 12); 224 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6); 225 226 if (h->srcline) { 227 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 228 hists__new_col_len(hists, HISTC_SRCLINE, len); 229 } 230 231 if (h->srcfile) 232 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 233 234 if (h->transaction) 235 hists__new_col_len(hists, HISTC_TRANSACTION, 236 hist_entry__transaction_len()); 237 238 if (h->trace_output) 239 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 240 241 if (h->cgroup) { 242 const char *cgrp_name = "unknown"; 243 struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env, 244 h->cgroup); 245 if (cgrp != NULL) 246 cgrp_name = cgrp->name; 247 248 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name)); 249 } 250 } 251 252 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 253 { 254 struct rb_node *next = rb_first_cached(&hists->entries); 255 struct hist_entry *n; 256 int row = 0; 257 258 hists__reset_col_len(hists); 259 260 while (next && row++ < max_rows) { 261 n = rb_entry(next, struct hist_entry, rb_node); 262 if (!n->filtered) 263 hists__calc_col_len(hists, n); 264 next = rb_next(&n->rb_node); 265 } 266 } 267 268 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 269 unsigned int cpumode, u64 period) 270 { 271 switch (cpumode) { 272 case PERF_RECORD_MISC_KERNEL: 273 he_stat->period_sys += period; 274 break; 275 case PERF_RECORD_MISC_USER: 276 he_stat->period_us += period; 277 break; 278 case PERF_RECORD_MISC_GUEST_KERNEL: 279 he_stat->period_guest_sys += period; 280 break; 281 case PERF_RECORD_MISC_GUEST_USER: 282 he_stat->period_guest_us += period; 283 break; 284 default: 285 break; 286 } 287 } 288 289 static long hist_time(unsigned long htime) 290 { 291 unsigned long time_quantum = symbol_conf.time_quantum; 292 if (time_quantum) 293 return (htime / time_quantum) * time_quantum; 294 return htime; 295 } 296 297 static void he_stat__add_period(struct he_stat *he_stat, u64 period) 298 { 299 he_stat->period += period; 300 he_stat->nr_events += 1; 301 } 302 303 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 304 { 305 dest->period += src->period; 306 dest->period_sys += src->period_sys; 307 dest->period_us += src->period_us; 308 dest->period_guest_sys += src->period_guest_sys; 309 dest->period_guest_us += src->period_guest_us; 310 dest->nr_events += src->nr_events; 311 } 312 313 static void he_stat__decay(struct he_stat *he_stat) 314 { 315 he_stat->period = (he_stat->period * 7) / 8; 316 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 317 /* XXX need decay for weight too? */ 318 } 319 320 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 321 322 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 323 { 324 u64 prev_period = he->stat.period; 325 u64 diff; 326 327 if (prev_period == 0) 328 return true; 329 330 he_stat__decay(&he->stat); 331 if (symbol_conf.cumulate_callchain) 332 he_stat__decay(he->stat_acc); 333 decay_callchain(he->callchain); 334 335 diff = prev_period - he->stat.period; 336 337 if (!he->depth) { 338 hists->stats.total_period -= diff; 339 if (!he->filtered) 340 hists->stats.total_non_filtered_period -= diff; 341 } 342 343 if (!he->leaf) { 344 struct hist_entry *child; 345 struct rb_node *node = rb_first_cached(&he->hroot_out); 346 while (node) { 347 child = rb_entry(node, struct hist_entry, rb_node); 348 node = rb_next(node); 349 350 if (hists__decay_entry(hists, child)) 351 hists__delete_entry(hists, child); 352 } 353 } 354 355 return he->stat.period == 0; 356 } 357 358 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 359 { 360 struct rb_root_cached *root_in; 361 struct rb_root_cached *root_out; 362 363 if (he->parent_he) { 364 root_in = &he->parent_he->hroot_in; 365 root_out = &he->parent_he->hroot_out; 366 } else { 367 if (hists__has(hists, need_collapse)) 368 root_in = &hists->entries_collapsed; 369 else 370 root_in = hists->entries_in; 371 root_out = &hists->entries; 372 } 373 374 rb_erase_cached(&he->rb_node_in, root_in); 375 rb_erase_cached(&he->rb_node, root_out); 376 377 --hists->nr_entries; 378 if (!he->filtered) 379 --hists->nr_non_filtered_entries; 380 381 hist_entry__delete(he); 382 } 383 384 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 385 { 386 struct rb_node *next = rb_first_cached(&hists->entries); 387 struct hist_entry *n; 388 389 while (next) { 390 n = rb_entry(next, struct hist_entry, rb_node); 391 next = rb_next(&n->rb_node); 392 if (((zap_user && n->level == '.') || 393 (zap_kernel && n->level != '.') || 394 hists__decay_entry(hists, n))) { 395 hists__delete_entry(hists, n); 396 } 397 } 398 } 399 400 void hists__delete_entries(struct hists *hists) 401 { 402 struct rb_node *next = rb_first_cached(&hists->entries); 403 struct hist_entry *n; 404 405 while (next) { 406 n = rb_entry(next, struct hist_entry, rb_node); 407 next = rb_next(&n->rb_node); 408 409 hists__delete_entry(hists, n); 410 } 411 } 412 413 struct hist_entry *hists__get_entry(struct hists *hists, int idx) 414 { 415 struct rb_node *next = rb_first_cached(&hists->entries); 416 struct hist_entry *n; 417 int i = 0; 418 419 while (next) { 420 n = rb_entry(next, struct hist_entry, rb_node); 421 if (i == idx) 422 return n; 423 424 next = rb_next(&n->rb_node); 425 i++; 426 } 427 428 return NULL; 429 } 430 431 /* 432 * histogram, sorted on item, collects periods 433 */ 434 435 static int hist_entry__init(struct hist_entry *he, 436 struct hist_entry *template, 437 bool sample_self, 438 size_t callchain_size) 439 { 440 *he = *template; 441 he->callchain_size = callchain_size; 442 443 if (symbol_conf.cumulate_callchain) { 444 he->stat_acc = malloc(sizeof(he->stat)); 445 if (he->stat_acc == NULL) 446 return -ENOMEM; 447 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 448 if (!sample_self) 449 memset(&he->stat, 0, sizeof(he->stat)); 450 } 451 452 map__get(he->ms.map); 453 454 if (he->branch_info) { 455 /* 456 * This branch info is (a part of) allocated from 457 * sample__resolve_bstack() and will be freed after 458 * adding new entries. So we need to save a copy. 459 */ 460 he->branch_info = malloc(sizeof(*he->branch_info)); 461 if (he->branch_info == NULL) 462 goto err; 463 464 memcpy(he->branch_info, template->branch_info, 465 sizeof(*he->branch_info)); 466 467 map__get(he->branch_info->from.ms.map); 468 map__get(he->branch_info->to.ms.map); 469 } 470 471 if (he->mem_info) { 472 map__get(he->mem_info->iaddr.ms.map); 473 map__get(he->mem_info->daddr.ms.map); 474 } 475 476 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 477 callchain_init(he->callchain); 478 479 if (he->raw_data) { 480 he->raw_data = memdup(he->raw_data, he->raw_size); 481 if (he->raw_data == NULL) 482 goto err_infos; 483 } 484 485 if (he->srcline) { 486 he->srcline = strdup(he->srcline); 487 if (he->srcline == NULL) 488 goto err_rawdata; 489 } 490 491 if (symbol_conf.res_sample) { 492 he->res_samples = calloc(sizeof(struct res_sample), 493 symbol_conf.res_sample); 494 if (!he->res_samples) 495 goto err_srcline; 496 } 497 498 INIT_LIST_HEAD(&he->pairs.node); 499 thread__get(he->thread); 500 he->hroot_in = RB_ROOT_CACHED; 501 he->hroot_out = RB_ROOT_CACHED; 502 503 if (!symbol_conf.report_hierarchy) 504 he->leaf = true; 505 506 return 0; 507 508 err_srcline: 509 zfree(&he->srcline); 510 511 err_rawdata: 512 zfree(&he->raw_data); 513 514 err_infos: 515 if (he->branch_info) { 516 map__put(he->branch_info->from.ms.map); 517 map__put(he->branch_info->to.ms.map); 518 zfree(&he->branch_info); 519 } 520 if (he->mem_info) { 521 map__put(he->mem_info->iaddr.ms.map); 522 map__put(he->mem_info->daddr.ms.map); 523 } 524 err: 525 map__zput(he->ms.map); 526 zfree(&he->stat_acc); 527 return -ENOMEM; 528 } 529 530 static void *hist_entry__zalloc(size_t size) 531 { 532 return zalloc(size + sizeof(struct hist_entry)); 533 } 534 535 static void hist_entry__free(void *ptr) 536 { 537 free(ptr); 538 } 539 540 static struct hist_entry_ops default_ops = { 541 .new = hist_entry__zalloc, 542 .free = hist_entry__free, 543 }; 544 545 static struct hist_entry *hist_entry__new(struct hist_entry *template, 546 bool sample_self) 547 { 548 struct hist_entry_ops *ops = template->ops; 549 size_t callchain_size = 0; 550 struct hist_entry *he; 551 int err = 0; 552 553 if (!ops) 554 ops = template->ops = &default_ops; 555 556 if (symbol_conf.use_callchain) 557 callchain_size = sizeof(struct callchain_root); 558 559 he = ops->new(callchain_size); 560 if (he) { 561 err = hist_entry__init(he, template, sample_self, callchain_size); 562 if (err) { 563 ops->free(he); 564 he = NULL; 565 } 566 } 567 568 return he; 569 } 570 571 static u8 symbol__parent_filter(const struct symbol *parent) 572 { 573 if (symbol_conf.exclude_other && parent == NULL) 574 return 1 << HIST_FILTER__PARENT; 575 return 0; 576 } 577 578 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 579 { 580 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 581 return; 582 583 he->hists->callchain_period += period; 584 if (!he->filtered) 585 he->hists->callchain_non_filtered_period += period; 586 } 587 588 static struct hist_entry *hists__findnew_entry(struct hists *hists, 589 struct hist_entry *entry, 590 struct addr_location *al, 591 bool sample_self) 592 { 593 struct rb_node **p; 594 struct rb_node *parent = NULL; 595 struct hist_entry *he; 596 int64_t cmp; 597 u64 period = entry->stat.period; 598 bool leftmost = true; 599 600 p = &hists->entries_in->rb_root.rb_node; 601 602 while (*p != NULL) { 603 parent = *p; 604 he = rb_entry(parent, struct hist_entry, rb_node_in); 605 606 /* 607 * Make sure that it receives arguments in a same order as 608 * hist_entry__collapse() so that we can use an appropriate 609 * function when searching an entry regardless which sort 610 * keys were used. 611 */ 612 cmp = hist_entry__cmp(he, entry); 613 614 if (!cmp) { 615 if (sample_self) { 616 he_stat__add_period(&he->stat, period); 617 hist_entry__add_callchain_period(he, period); 618 } 619 if (symbol_conf.cumulate_callchain) 620 he_stat__add_period(he->stat_acc, period); 621 622 /* 623 * This mem info was allocated from sample__resolve_mem 624 * and will not be used anymore. 625 */ 626 mem_info__zput(entry->mem_info); 627 628 block_info__zput(entry->block_info); 629 630 /* If the map of an existing hist_entry has 631 * become out-of-date due to an exec() or 632 * similar, update it. Otherwise we will 633 * mis-adjust symbol addresses when computing 634 * the history counter to increment. 635 */ 636 if (he->ms.map != entry->ms.map) { 637 map__put(he->ms.map); 638 he->ms.map = map__get(entry->ms.map); 639 } 640 goto out; 641 } 642 643 if (cmp < 0) 644 p = &(*p)->rb_left; 645 else { 646 p = &(*p)->rb_right; 647 leftmost = false; 648 } 649 } 650 651 he = hist_entry__new(entry, sample_self); 652 if (!he) 653 return NULL; 654 655 if (sample_self) 656 hist_entry__add_callchain_period(he, period); 657 hists->nr_entries++; 658 659 rb_link_node(&he->rb_node_in, parent, p); 660 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 661 out: 662 if (sample_self) 663 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 664 if (symbol_conf.cumulate_callchain) 665 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 666 return he; 667 } 668 669 static unsigned random_max(unsigned high) 670 { 671 unsigned thresh = -high % high; 672 for (;;) { 673 unsigned r = random(); 674 if (r >= thresh) 675 return r % high; 676 } 677 } 678 679 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 680 { 681 struct res_sample *r; 682 int j; 683 684 if (he->num_res < symbol_conf.res_sample) { 685 j = he->num_res++; 686 } else { 687 j = random_max(symbol_conf.res_sample); 688 } 689 r = &he->res_samples[j]; 690 r->time = sample->time; 691 r->cpu = sample->cpu; 692 r->tid = sample->tid; 693 } 694 695 static struct hist_entry* 696 __hists__add_entry(struct hists *hists, 697 struct addr_location *al, 698 struct symbol *sym_parent, 699 struct branch_info *bi, 700 struct mem_info *mi, 701 struct block_info *block_info, 702 struct perf_sample *sample, 703 bool sample_self, 704 struct hist_entry_ops *ops) 705 { 706 struct namespaces *ns = thread__namespaces(al->thread); 707 struct hist_entry entry = { 708 .thread = al->thread, 709 .comm = thread__comm(al->thread), 710 .cgroup_id = { 711 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 712 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 713 }, 714 .cgroup = sample->cgroup, 715 .ms = { 716 .maps = al->maps, 717 .map = al->map, 718 .sym = al->sym, 719 }, 720 .srcline = (char *) al->srcline, 721 .socket = al->socket, 722 .cpu = al->cpu, 723 .cpumode = al->cpumode, 724 .ip = al->addr, 725 .level = al->level, 726 .code_page_size = sample->code_page_size, 727 .stat = { 728 .nr_events = 1, 729 .period = sample->period, 730 }, 731 .parent = sym_parent, 732 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 733 .hists = hists, 734 .branch_info = bi, 735 .mem_info = mi, 736 .block_info = block_info, 737 .transaction = sample->transaction, 738 .raw_data = sample->raw_data, 739 .raw_size = sample->raw_size, 740 .ops = ops, 741 .time = hist_time(sample->time), 742 .weight = sample->weight, 743 .ins_lat = sample->ins_lat, 744 .p_stage_cyc = sample->p_stage_cyc, 745 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 746 747 if (!hists->has_callchains && he && he->callchain_size != 0) 748 hists->has_callchains = true; 749 if (he && symbol_conf.res_sample) 750 hists__res_sample(he, sample); 751 return he; 752 } 753 754 struct hist_entry *hists__add_entry(struct hists *hists, 755 struct addr_location *al, 756 struct symbol *sym_parent, 757 struct branch_info *bi, 758 struct mem_info *mi, 759 struct perf_sample *sample, 760 bool sample_self) 761 { 762 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 763 sample, sample_self, NULL); 764 } 765 766 struct hist_entry *hists__add_entry_ops(struct hists *hists, 767 struct hist_entry_ops *ops, 768 struct addr_location *al, 769 struct symbol *sym_parent, 770 struct branch_info *bi, 771 struct mem_info *mi, 772 struct perf_sample *sample, 773 bool sample_self) 774 { 775 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 776 sample, sample_self, ops); 777 } 778 779 struct hist_entry *hists__add_entry_block(struct hists *hists, 780 struct addr_location *al, 781 struct block_info *block_info) 782 { 783 struct hist_entry entry = { 784 .block_info = block_info, 785 .hists = hists, 786 .ms = { 787 .maps = al->maps, 788 .map = al->map, 789 .sym = al->sym, 790 }, 791 }, *he = hists__findnew_entry(hists, &entry, al, false); 792 793 return he; 794 } 795 796 static int 797 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 798 struct addr_location *al __maybe_unused) 799 { 800 return 0; 801 } 802 803 static int 804 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 805 struct addr_location *al __maybe_unused) 806 { 807 return 0; 808 } 809 810 static int 811 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 812 { 813 struct perf_sample *sample = iter->sample; 814 struct mem_info *mi; 815 816 mi = sample__resolve_mem(sample, al); 817 if (mi == NULL) 818 return -ENOMEM; 819 820 iter->priv = mi; 821 return 0; 822 } 823 824 static int 825 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 826 { 827 u64 cost; 828 struct mem_info *mi = iter->priv; 829 struct hists *hists = evsel__hists(iter->evsel); 830 struct perf_sample *sample = iter->sample; 831 struct hist_entry *he; 832 833 if (mi == NULL) 834 return -EINVAL; 835 836 cost = sample->weight; 837 if (!cost) 838 cost = 1; 839 840 /* 841 * must pass period=weight in order to get the correct 842 * sorting from hists__collapse_resort() which is solely 843 * based on periods. We want sorting be done on nr_events * weight 844 * and this is indirectly achieved by passing period=weight here 845 * and the he_stat__add_period() function. 846 */ 847 sample->period = cost; 848 849 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 850 sample, true); 851 if (!he) 852 return -ENOMEM; 853 854 iter->he = he; 855 return 0; 856 } 857 858 static int 859 iter_finish_mem_entry(struct hist_entry_iter *iter, 860 struct addr_location *al __maybe_unused) 861 { 862 struct evsel *evsel = iter->evsel; 863 struct hists *hists = evsel__hists(evsel); 864 struct hist_entry *he = iter->he; 865 int err = -EINVAL; 866 867 if (he == NULL) 868 goto out; 869 870 hists__inc_nr_samples(hists, he->filtered); 871 872 err = hist_entry__append_callchain(he, iter->sample); 873 874 out: 875 /* 876 * We don't need to free iter->priv (mem_info) here since the mem info 877 * was either already freed in hists__findnew_entry() or passed to a 878 * new hist entry by hist_entry__new(). 879 */ 880 iter->priv = NULL; 881 882 iter->he = NULL; 883 return err; 884 } 885 886 static int 887 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 888 { 889 struct branch_info *bi; 890 struct perf_sample *sample = iter->sample; 891 892 bi = sample__resolve_bstack(sample, al); 893 if (!bi) 894 return -ENOMEM; 895 896 iter->curr = 0; 897 iter->total = sample->branch_stack->nr; 898 899 iter->priv = bi; 900 return 0; 901 } 902 903 static int 904 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 905 struct addr_location *al __maybe_unused) 906 { 907 return 0; 908 } 909 910 static int 911 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 912 { 913 struct branch_info *bi = iter->priv; 914 int i = iter->curr; 915 916 if (bi == NULL) 917 return 0; 918 919 if (iter->curr >= iter->total) 920 return 0; 921 922 al->maps = bi[i].to.ms.maps; 923 al->map = bi[i].to.ms.map; 924 al->sym = bi[i].to.ms.sym; 925 al->addr = bi[i].to.addr; 926 return 1; 927 } 928 929 static int 930 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 931 { 932 struct branch_info *bi; 933 struct evsel *evsel = iter->evsel; 934 struct hists *hists = evsel__hists(evsel); 935 struct perf_sample *sample = iter->sample; 936 struct hist_entry *he = NULL; 937 int i = iter->curr; 938 int err = 0; 939 940 bi = iter->priv; 941 942 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym)) 943 goto out; 944 945 /* 946 * The report shows the percentage of total branches captured 947 * and not events sampled. Thus we use a pseudo period of 1. 948 */ 949 sample->period = 1; 950 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 951 952 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 953 sample, true); 954 if (he == NULL) 955 return -ENOMEM; 956 957 hists__inc_nr_samples(hists, he->filtered); 958 959 out: 960 iter->he = he; 961 iter->curr++; 962 return err; 963 } 964 965 static int 966 iter_finish_branch_entry(struct hist_entry_iter *iter, 967 struct addr_location *al __maybe_unused) 968 { 969 zfree(&iter->priv); 970 iter->he = NULL; 971 972 return iter->curr >= iter->total ? 0 : -1; 973 } 974 975 static int 976 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 977 struct addr_location *al __maybe_unused) 978 { 979 return 0; 980 } 981 982 static int 983 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 984 { 985 struct evsel *evsel = iter->evsel; 986 struct perf_sample *sample = iter->sample; 987 struct hist_entry *he; 988 989 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 990 sample, true); 991 if (he == NULL) 992 return -ENOMEM; 993 994 iter->he = he; 995 return 0; 996 } 997 998 static int 999 iter_finish_normal_entry(struct hist_entry_iter *iter, 1000 struct addr_location *al __maybe_unused) 1001 { 1002 struct hist_entry *he = iter->he; 1003 struct evsel *evsel = iter->evsel; 1004 struct perf_sample *sample = iter->sample; 1005 1006 if (he == NULL) 1007 return 0; 1008 1009 iter->he = NULL; 1010 1011 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 1012 1013 return hist_entry__append_callchain(he, sample); 1014 } 1015 1016 static int 1017 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 1018 struct addr_location *al __maybe_unused) 1019 { 1020 struct hist_entry **he_cache; 1021 1022 callchain_cursor_commit(&callchain_cursor); 1023 1024 /* 1025 * This is for detecting cycles or recursions so that they're 1026 * cumulated only one time to prevent entries more than 100% 1027 * overhead. 1028 */ 1029 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); 1030 if (he_cache == NULL) 1031 return -ENOMEM; 1032 1033 iter->priv = he_cache; 1034 iter->curr = 0; 1035 1036 return 0; 1037 } 1038 1039 static int 1040 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 1041 struct addr_location *al) 1042 { 1043 struct evsel *evsel = iter->evsel; 1044 struct hists *hists = evsel__hists(evsel); 1045 struct perf_sample *sample = iter->sample; 1046 struct hist_entry **he_cache = iter->priv; 1047 struct hist_entry *he; 1048 int err = 0; 1049 1050 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 1051 sample, true); 1052 if (he == NULL) 1053 return -ENOMEM; 1054 1055 iter->he = he; 1056 he_cache[iter->curr++] = he; 1057 1058 hist_entry__append_callchain(he, sample); 1059 1060 /* 1061 * We need to re-initialize the cursor since callchain_append() 1062 * advanced the cursor to the end. 1063 */ 1064 callchain_cursor_commit(&callchain_cursor); 1065 1066 hists__inc_nr_samples(hists, he->filtered); 1067 1068 return err; 1069 } 1070 1071 static int 1072 iter_next_cumulative_entry(struct hist_entry_iter *iter, 1073 struct addr_location *al) 1074 { 1075 struct callchain_cursor_node *node; 1076 1077 node = callchain_cursor_current(&callchain_cursor); 1078 if (node == NULL) 1079 return 0; 1080 1081 return fill_callchain_info(al, node, iter->hide_unresolved); 1082 } 1083 1084 static bool 1085 hist_entry__fast__sym_diff(struct hist_entry *left, 1086 struct hist_entry *right) 1087 { 1088 struct symbol *sym_l = left->ms.sym; 1089 struct symbol *sym_r = right->ms.sym; 1090 1091 if (!sym_l && !sym_r) 1092 return left->ip != right->ip; 1093 1094 return !!_sort__sym_cmp(sym_l, sym_r); 1095 } 1096 1097 1098 static int 1099 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1100 struct addr_location *al) 1101 { 1102 struct evsel *evsel = iter->evsel; 1103 struct perf_sample *sample = iter->sample; 1104 struct hist_entry **he_cache = iter->priv; 1105 struct hist_entry *he; 1106 struct hist_entry he_tmp = { 1107 .hists = evsel__hists(evsel), 1108 .cpu = al->cpu, 1109 .thread = al->thread, 1110 .comm = thread__comm(al->thread), 1111 .ip = al->addr, 1112 .ms = { 1113 .maps = al->maps, 1114 .map = al->map, 1115 .sym = al->sym, 1116 }, 1117 .srcline = (char *) al->srcline, 1118 .parent = iter->parent, 1119 .raw_data = sample->raw_data, 1120 .raw_size = sample->raw_size, 1121 }; 1122 int i; 1123 struct callchain_cursor cursor; 1124 bool fast = hists__has(he_tmp.hists, sym); 1125 1126 callchain_cursor_snapshot(&cursor, &callchain_cursor); 1127 1128 callchain_cursor_advance(&callchain_cursor); 1129 1130 /* 1131 * Check if there's duplicate entries in the callchain. 1132 * It's possible that it has cycles or recursive calls. 1133 */ 1134 for (i = 0; i < iter->curr; i++) { 1135 /* 1136 * For most cases, there are no duplicate entries in callchain. 1137 * The symbols are usually different. Do a quick check for 1138 * symbols first. 1139 */ 1140 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp)) 1141 continue; 1142 1143 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1144 /* to avoid calling callback function */ 1145 iter->he = NULL; 1146 return 0; 1147 } 1148 } 1149 1150 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1151 sample, false); 1152 if (he == NULL) 1153 return -ENOMEM; 1154 1155 iter->he = he; 1156 he_cache[iter->curr++] = he; 1157 1158 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1159 callchain_append(he->callchain, &cursor, sample->period); 1160 return 0; 1161 } 1162 1163 static int 1164 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1165 struct addr_location *al __maybe_unused) 1166 { 1167 zfree(&iter->priv); 1168 iter->he = NULL; 1169 1170 return 0; 1171 } 1172 1173 const struct hist_iter_ops hist_iter_mem = { 1174 .prepare_entry = iter_prepare_mem_entry, 1175 .add_single_entry = iter_add_single_mem_entry, 1176 .next_entry = iter_next_nop_entry, 1177 .add_next_entry = iter_add_next_nop_entry, 1178 .finish_entry = iter_finish_mem_entry, 1179 }; 1180 1181 const struct hist_iter_ops hist_iter_branch = { 1182 .prepare_entry = iter_prepare_branch_entry, 1183 .add_single_entry = iter_add_single_branch_entry, 1184 .next_entry = iter_next_branch_entry, 1185 .add_next_entry = iter_add_next_branch_entry, 1186 .finish_entry = iter_finish_branch_entry, 1187 }; 1188 1189 const struct hist_iter_ops hist_iter_normal = { 1190 .prepare_entry = iter_prepare_normal_entry, 1191 .add_single_entry = iter_add_single_normal_entry, 1192 .next_entry = iter_next_nop_entry, 1193 .add_next_entry = iter_add_next_nop_entry, 1194 .finish_entry = iter_finish_normal_entry, 1195 }; 1196 1197 const struct hist_iter_ops hist_iter_cumulative = { 1198 .prepare_entry = iter_prepare_cumulative_entry, 1199 .add_single_entry = iter_add_single_cumulative_entry, 1200 .next_entry = iter_next_cumulative_entry, 1201 .add_next_entry = iter_add_next_cumulative_entry, 1202 .finish_entry = iter_finish_cumulative_entry, 1203 }; 1204 1205 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1206 int max_stack_depth, void *arg) 1207 { 1208 int err, err2; 1209 struct map *alm = NULL; 1210 1211 if (al) 1212 alm = map__get(al->map); 1213 1214 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1215 iter->evsel, al, max_stack_depth); 1216 if (err) { 1217 map__put(alm); 1218 return err; 1219 } 1220 1221 err = iter->ops->prepare_entry(iter, al); 1222 if (err) 1223 goto out; 1224 1225 err = iter->ops->add_single_entry(iter, al); 1226 if (err) 1227 goto out; 1228 1229 if (iter->he && iter->add_entry_cb) { 1230 err = iter->add_entry_cb(iter, al, true, arg); 1231 if (err) 1232 goto out; 1233 } 1234 1235 while (iter->ops->next_entry(iter, al)) { 1236 err = iter->ops->add_next_entry(iter, al); 1237 if (err) 1238 break; 1239 1240 if (iter->he && iter->add_entry_cb) { 1241 err = iter->add_entry_cb(iter, al, false, arg); 1242 if (err) 1243 goto out; 1244 } 1245 } 1246 1247 out: 1248 err2 = iter->ops->finish_entry(iter, al); 1249 if (!err) 1250 err = err2; 1251 1252 map__put(alm); 1253 1254 return err; 1255 } 1256 1257 int64_t 1258 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1259 { 1260 struct hists *hists = left->hists; 1261 struct perf_hpp_fmt *fmt; 1262 int64_t cmp = 0; 1263 1264 hists__for_each_sort_list(hists, fmt) { 1265 if (perf_hpp__is_dynamic_entry(fmt) && 1266 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1267 continue; 1268 1269 cmp = fmt->cmp(fmt, left, right); 1270 if (cmp) 1271 break; 1272 } 1273 1274 return cmp; 1275 } 1276 1277 int64_t 1278 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1279 { 1280 struct hists *hists = left->hists; 1281 struct perf_hpp_fmt *fmt; 1282 int64_t cmp = 0; 1283 1284 hists__for_each_sort_list(hists, fmt) { 1285 if (perf_hpp__is_dynamic_entry(fmt) && 1286 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1287 continue; 1288 1289 cmp = fmt->collapse(fmt, left, right); 1290 if (cmp) 1291 break; 1292 } 1293 1294 return cmp; 1295 } 1296 1297 void hist_entry__delete(struct hist_entry *he) 1298 { 1299 struct hist_entry_ops *ops = he->ops; 1300 1301 thread__zput(he->thread); 1302 map__zput(he->ms.map); 1303 1304 if (he->branch_info) { 1305 map__zput(he->branch_info->from.ms.map); 1306 map__zput(he->branch_info->to.ms.map); 1307 free_srcline(he->branch_info->srcline_from); 1308 free_srcline(he->branch_info->srcline_to); 1309 zfree(&he->branch_info); 1310 } 1311 1312 if (he->mem_info) { 1313 map__zput(he->mem_info->iaddr.ms.map); 1314 map__zput(he->mem_info->daddr.ms.map); 1315 mem_info__zput(he->mem_info); 1316 } 1317 1318 if (he->block_info) 1319 block_info__zput(he->block_info); 1320 1321 zfree(&he->res_samples); 1322 zfree(&he->stat_acc); 1323 free_srcline(he->srcline); 1324 if (he->srcfile && he->srcfile[0]) 1325 zfree(&he->srcfile); 1326 free_callchain(he->callchain); 1327 zfree(&he->trace_output); 1328 zfree(&he->raw_data); 1329 ops->free(he); 1330 } 1331 1332 /* 1333 * If this is not the last column, then we need to pad it according to the 1334 * pre-calculated max length for this column, otherwise don't bother adding 1335 * spaces because that would break viewing this with, for instance, 'less', 1336 * that would show tons of trailing spaces when a long C++ demangled method 1337 * names is sampled. 1338 */ 1339 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1340 struct perf_hpp_fmt *fmt, int printed) 1341 { 1342 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1343 const int width = fmt->width(fmt, hpp, he->hists); 1344 if (printed < width) { 1345 advance_hpp(hpp, printed); 1346 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1347 } 1348 } 1349 1350 return printed; 1351 } 1352 1353 /* 1354 * collapse the histogram 1355 */ 1356 1357 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1358 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1359 enum hist_filter type); 1360 1361 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1362 1363 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1364 { 1365 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1366 } 1367 1368 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1369 enum hist_filter type, 1370 fmt_chk_fn check) 1371 { 1372 struct perf_hpp_fmt *fmt; 1373 bool type_match = false; 1374 struct hist_entry *parent = he->parent_he; 1375 1376 switch (type) { 1377 case HIST_FILTER__THREAD: 1378 if (symbol_conf.comm_list == NULL && 1379 symbol_conf.pid_list == NULL && 1380 symbol_conf.tid_list == NULL) 1381 return; 1382 break; 1383 case HIST_FILTER__DSO: 1384 if (symbol_conf.dso_list == NULL) 1385 return; 1386 break; 1387 case HIST_FILTER__SYMBOL: 1388 if (symbol_conf.sym_list == NULL) 1389 return; 1390 break; 1391 case HIST_FILTER__PARENT: 1392 case HIST_FILTER__GUEST: 1393 case HIST_FILTER__HOST: 1394 case HIST_FILTER__SOCKET: 1395 case HIST_FILTER__C2C: 1396 default: 1397 return; 1398 } 1399 1400 /* if it's filtered by own fmt, it has to have filter bits */ 1401 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1402 if (check(fmt)) { 1403 type_match = true; 1404 break; 1405 } 1406 } 1407 1408 if (type_match) { 1409 /* 1410 * If the filter is for current level entry, propagate 1411 * filter marker to parents. The marker bit was 1412 * already set by default so it only needs to clear 1413 * non-filtered entries. 1414 */ 1415 if (!(he->filtered & (1 << type))) { 1416 while (parent) { 1417 parent->filtered &= ~(1 << type); 1418 parent = parent->parent_he; 1419 } 1420 } 1421 } else { 1422 /* 1423 * If current entry doesn't have matching formats, set 1424 * filter marker for upper level entries. it will be 1425 * cleared if its lower level entries is not filtered. 1426 * 1427 * For lower-level entries, it inherits parent's 1428 * filter bit so that lower level entries of a 1429 * non-filtered entry won't set the filter marker. 1430 */ 1431 if (parent == NULL) 1432 he->filtered |= (1 << type); 1433 else 1434 he->filtered |= (parent->filtered & (1 << type)); 1435 } 1436 } 1437 1438 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1439 { 1440 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1441 check_thread_entry); 1442 1443 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1444 perf_hpp__is_dso_entry); 1445 1446 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1447 perf_hpp__is_sym_entry); 1448 1449 hists__apply_filters(he->hists, he); 1450 } 1451 1452 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1453 struct rb_root_cached *root, 1454 struct hist_entry *he, 1455 struct hist_entry *parent_he, 1456 struct perf_hpp_list *hpp_list) 1457 { 1458 struct rb_node **p = &root->rb_root.rb_node; 1459 struct rb_node *parent = NULL; 1460 struct hist_entry *iter, *new; 1461 struct perf_hpp_fmt *fmt; 1462 int64_t cmp; 1463 bool leftmost = true; 1464 1465 while (*p != NULL) { 1466 parent = *p; 1467 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1468 1469 cmp = 0; 1470 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1471 cmp = fmt->collapse(fmt, iter, he); 1472 if (cmp) 1473 break; 1474 } 1475 1476 if (!cmp) { 1477 he_stat__add_stat(&iter->stat, &he->stat); 1478 return iter; 1479 } 1480 1481 if (cmp < 0) 1482 p = &parent->rb_left; 1483 else { 1484 p = &parent->rb_right; 1485 leftmost = false; 1486 } 1487 } 1488 1489 new = hist_entry__new(he, true); 1490 if (new == NULL) 1491 return NULL; 1492 1493 hists->nr_entries++; 1494 1495 /* save related format list for output */ 1496 new->hpp_list = hpp_list; 1497 new->parent_he = parent_he; 1498 1499 hist_entry__apply_hierarchy_filters(new); 1500 1501 /* some fields are now passed to 'new' */ 1502 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1503 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1504 he->trace_output = NULL; 1505 else 1506 new->trace_output = NULL; 1507 1508 if (perf_hpp__is_srcline_entry(fmt)) 1509 he->srcline = NULL; 1510 else 1511 new->srcline = NULL; 1512 1513 if (perf_hpp__is_srcfile_entry(fmt)) 1514 he->srcfile = NULL; 1515 else 1516 new->srcfile = NULL; 1517 } 1518 1519 rb_link_node(&new->rb_node_in, parent, p); 1520 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1521 return new; 1522 } 1523 1524 static int hists__hierarchy_insert_entry(struct hists *hists, 1525 struct rb_root_cached *root, 1526 struct hist_entry *he) 1527 { 1528 struct perf_hpp_list_node *node; 1529 struct hist_entry *new_he = NULL; 1530 struct hist_entry *parent = NULL; 1531 int depth = 0; 1532 int ret = 0; 1533 1534 list_for_each_entry(node, &hists->hpp_formats, list) { 1535 /* skip period (overhead) and elided columns */ 1536 if (node->level == 0 || node->skip) 1537 continue; 1538 1539 /* insert copy of 'he' for each fmt into the hierarchy */ 1540 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1541 if (new_he == NULL) { 1542 ret = -1; 1543 break; 1544 } 1545 1546 root = &new_he->hroot_in; 1547 new_he->depth = depth++; 1548 parent = new_he; 1549 } 1550 1551 if (new_he) { 1552 new_he->leaf = true; 1553 1554 if (hist_entry__has_callchains(new_he) && 1555 symbol_conf.use_callchain) { 1556 callchain_cursor_reset(&callchain_cursor); 1557 if (callchain_merge(&callchain_cursor, 1558 new_he->callchain, 1559 he->callchain) < 0) 1560 ret = -1; 1561 } 1562 } 1563 1564 /* 'he' is no longer used */ 1565 hist_entry__delete(he); 1566 1567 /* return 0 (or -1) since it already applied filters */ 1568 return ret; 1569 } 1570 1571 static int hists__collapse_insert_entry(struct hists *hists, 1572 struct rb_root_cached *root, 1573 struct hist_entry *he) 1574 { 1575 struct rb_node **p = &root->rb_root.rb_node; 1576 struct rb_node *parent = NULL; 1577 struct hist_entry *iter; 1578 int64_t cmp; 1579 bool leftmost = true; 1580 1581 if (symbol_conf.report_hierarchy) 1582 return hists__hierarchy_insert_entry(hists, root, he); 1583 1584 while (*p != NULL) { 1585 parent = *p; 1586 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1587 1588 cmp = hist_entry__collapse(iter, he); 1589 1590 if (!cmp) { 1591 int ret = 0; 1592 1593 he_stat__add_stat(&iter->stat, &he->stat); 1594 if (symbol_conf.cumulate_callchain) 1595 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1596 1597 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1598 callchain_cursor_reset(&callchain_cursor); 1599 if (callchain_merge(&callchain_cursor, 1600 iter->callchain, 1601 he->callchain) < 0) 1602 ret = -1; 1603 } 1604 hist_entry__delete(he); 1605 return ret; 1606 } 1607 1608 if (cmp < 0) 1609 p = &(*p)->rb_left; 1610 else { 1611 p = &(*p)->rb_right; 1612 leftmost = false; 1613 } 1614 } 1615 hists->nr_entries++; 1616 1617 rb_link_node(&he->rb_node_in, parent, p); 1618 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1619 return 1; 1620 } 1621 1622 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1623 { 1624 struct rb_root_cached *root; 1625 1626 mutex_lock(&hists->lock); 1627 1628 root = hists->entries_in; 1629 if (++hists->entries_in > &hists->entries_in_array[1]) 1630 hists->entries_in = &hists->entries_in_array[0]; 1631 1632 mutex_unlock(&hists->lock); 1633 1634 return root; 1635 } 1636 1637 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1638 { 1639 hists__filter_entry_by_dso(hists, he); 1640 hists__filter_entry_by_thread(hists, he); 1641 hists__filter_entry_by_symbol(hists, he); 1642 hists__filter_entry_by_socket(hists, he); 1643 } 1644 1645 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1646 { 1647 struct rb_root_cached *root; 1648 struct rb_node *next; 1649 struct hist_entry *n; 1650 int ret; 1651 1652 if (!hists__has(hists, need_collapse)) 1653 return 0; 1654 1655 hists->nr_entries = 0; 1656 1657 root = hists__get_rotate_entries_in(hists); 1658 1659 next = rb_first_cached(root); 1660 1661 while (next) { 1662 if (session_done()) 1663 break; 1664 n = rb_entry(next, struct hist_entry, rb_node_in); 1665 next = rb_next(&n->rb_node_in); 1666 1667 rb_erase_cached(&n->rb_node_in, root); 1668 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1669 if (ret < 0) 1670 return -1; 1671 1672 if (ret) { 1673 /* 1674 * If it wasn't combined with one of the entries already 1675 * collapsed, we need to apply the filters that may have 1676 * been set by, say, the hist_browser. 1677 */ 1678 hists__apply_filters(hists, n); 1679 } 1680 if (prog) 1681 ui_progress__update(prog, 1); 1682 } 1683 return 0; 1684 } 1685 1686 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1687 { 1688 struct hists *hists = a->hists; 1689 struct perf_hpp_fmt *fmt; 1690 int64_t cmp = 0; 1691 1692 hists__for_each_sort_list(hists, fmt) { 1693 if (perf_hpp__should_skip(fmt, a->hists)) 1694 continue; 1695 1696 cmp = fmt->sort(fmt, a, b); 1697 if (cmp) 1698 break; 1699 } 1700 1701 return cmp; 1702 } 1703 1704 static void hists__reset_filter_stats(struct hists *hists) 1705 { 1706 hists->nr_non_filtered_entries = 0; 1707 hists->stats.total_non_filtered_period = 0; 1708 } 1709 1710 void hists__reset_stats(struct hists *hists) 1711 { 1712 hists->nr_entries = 0; 1713 hists->stats.total_period = 0; 1714 1715 hists__reset_filter_stats(hists); 1716 } 1717 1718 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1719 { 1720 hists->nr_non_filtered_entries++; 1721 hists->stats.total_non_filtered_period += h->stat.period; 1722 } 1723 1724 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1725 { 1726 if (!h->filtered) 1727 hists__inc_filter_stats(hists, h); 1728 1729 hists->nr_entries++; 1730 hists->stats.total_period += h->stat.period; 1731 } 1732 1733 static void hierarchy_recalc_total_periods(struct hists *hists) 1734 { 1735 struct rb_node *node; 1736 struct hist_entry *he; 1737 1738 node = rb_first_cached(&hists->entries); 1739 1740 hists->stats.total_period = 0; 1741 hists->stats.total_non_filtered_period = 0; 1742 1743 /* 1744 * recalculate total period using top-level entries only 1745 * since lower level entries only see non-filtered entries 1746 * but upper level entries have sum of both entries. 1747 */ 1748 while (node) { 1749 he = rb_entry(node, struct hist_entry, rb_node); 1750 node = rb_next(node); 1751 1752 hists->stats.total_period += he->stat.period; 1753 if (!he->filtered) 1754 hists->stats.total_non_filtered_period += he->stat.period; 1755 } 1756 } 1757 1758 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1759 struct hist_entry *he) 1760 { 1761 struct rb_node **p = &root->rb_root.rb_node; 1762 struct rb_node *parent = NULL; 1763 struct hist_entry *iter; 1764 struct perf_hpp_fmt *fmt; 1765 bool leftmost = true; 1766 1767 while (*p != NULL) { 1768 parent = *p; 1769 iter = rb_entry(parent, struct hist_entry, rb_node); 1770 1771 if (hist_entry__sort(he, iter) > 0) 1772 p = &parent->rb_left; 1773 else { 1774 p = &parent->rb_right; 1775 leftmost = false; 1776 } 1777 } 1778 1779 rb_link_node(&he->rb_node, parent, p); 1780 rb_insert_color_cached(&he->rb_node, root, leftmost); 1781 1782 /* update column width of dynamic entry */ 1783 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1784 if (perf_hpp__is_dynamic_entry(fmt)) 1785 fmt->sort(fmt, he, NULL); 1786 } 1787 } 1788 1789 static void hists__hierarchy_output_resort(struct hists *hists, 1790 struct ui_progress *prog, 1791 struct rb_root_cached *root_in, 1792 struct rb_root_cached *root_out, 1793 u64 min_callchain_hits, 1794 bool use_callchain) 1795 { 1796 struct rb_node *node; 1797 struct hist_entry *he; 1798 1799 *root_out = RB_ROOT_CACHED; 1800 node = rb_first_cached(root_in); 1801 1802 while (node) { 1803 he = rb_entry(node, struct hist_entry, rb_node_in); 1804 node = rb_next(node); 1805 1806 hierarchy_insert_output_entry(root_out, he); 1807 1808 if (prog) 1809 ui_progress__update(prog, 1); 1810 1811 hists->nr_entries++; 1812 if (!he->filtered) { 1813 hists->nr_non_filtered_entries++; 1814 hists__calc_col_len(hists, he); 1815 } 1816 1817 if (!he->leaf) { 1818 hists__hierarchy_output_resort(hists, prog, 1819 &he->hroot_in, 1820 &he->hroot_out, 1821 min_callchain_hits, 1822 use_callchain); 1823 continue; 1824 } 1825 1826 if (!use_callchain) 1827 continue; 1828 1829 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1830 u64 total = he->stat.period; 1831 1832 if (symbol_conf.cumulate_callchain) 1833 total = he->stat_acc->period; 1834 1835 min_callchain_hits = total * (callchain_param.min_percent / 100); 1836 } 1837 1838 callchain_param.sort(&he->sorted_chain, he->callchain, 1839 min_callchain_hits, &callchain_param); 1840 } 1841 } 1842 1843 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1844 struct hist_entry *he, 1845 u64 min_callchain_hits, 1846 bool use_callchain) 1847 { 1848 struct rb_node **p = &entries->rb_root.rb_node; 1849 struct rb_node *parent = NULL; 1850 struct hist_entry *iter; 1851 struct perf_hpp_fmt *fmt; 1852 bool leftmost = true; 1853 1854 if (use_callchain) { 1855 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1856 u64 total = he->stat.period; 1857 1858 if (symbol_conf.cumulate_callchain) 1859 total = he->stat_acc->period; 1860 1861 min_callchain_hits = total * (callchain_param.min_percent / 100); 1862 } 1863 callchain_param.sort(&he->sorted_chain, he->callchain, 1864 min_callchain_hits, &callchain_param); 1865 } 1866 1867 while (*p != NULL) { 1868 parent = *p; 1869 iter = rb_entry(parent, struct hist_entry, rb_node); 1870 1871 if (hist_entry__sort(he, iter) > 0) 1872 p = &(*p)->rb_left; 1873 else { 1874 p = &(*p)->rb_right; 1875 leftmost = false; 1876 } 1877 } 1878 1879 rb_link_node(&he->rb_node, parent, p); 1880 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1881 1882 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1883 if (perf_hpp__is_dynamic_entry(fmt) && 1884 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1885 fmt->sort(fmt, he, NULL); /* update column width */ 1886 } 1887 } 1888 1889 static void output_resort(struct hists *hists, struct ui_progress *prog, 1890 bool use_callchain, hists__resort_cb_t cb, 1891 void *cb_arg) 1892 { 1893 struct rb_root_cached *root; 1894 struct rb_node *next; 1895 struct hist_entry *n; 1896 u64 callchain_total; 1897 u64 min_callchain_hits; 1898 1899 callchain_total = hists->callchain_period; 1900 if (symbol_conf.filter_relative) 1901 callchain_total = hists->callchain_non_filtered_period; 1902 1903 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1904 1905 hists__reset_stats(hists); 1906 hists__reset_col_len(hists); 1907 1908 if (symbol_conf.report_hierarchy) { 1909 hists__hierarchy_output_resort(hists, prog, 1910 &hists->entries_collapsed, 1911 &hists->entries, 1912 min_callchain_hits, 1913 use_callchain); 1914 hierarchy_recalc_total_periods(hists); 1915 return; 1916 } 1917 1918 if (hists__has(hists, need_collapse)) 1919 root = &hists->entries_collapsed; 1920 else 1921 root = hists->entries_in; 1922 1923 next = rb_first_cached(root); 1924 hists->entries = RB_ROOT_CACHED; 1925 1926 while (next) { 1927 n = rb_entry(next, struct hist_entry, rb_node_in); 1928 next = rb_next(&n->rb_node_in); 1929 1930 if (cb && cb(n, cb_arg)) 1931 continue; 1932 1933 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1934 hists__inc_stats(hists, n); 1935 1936 if (!n->filtered) 1937 hists__calc_col_len(hists, n); 1938 1939 if (prog) 1940 ui_progress__update(prog, 1); 1941 } 1942 } 1943 1944 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog, 1945 hists__resort_cb_t cb, void *cb_arg) 1946 { 1947 bool use_callchain; 1948 1949 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1950 use_callchain = evsel__has_callchain(evsel); 1951 else 1952 use_callchain = symbol_conf.use_callchain; 1953 1954 use_callchain |= symbol_conf.show_branchflag_count; 1955 1956 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1957 } 1958 1959 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog) 1960 { 1961 return evsel__output_resort_cb(evsel, prog, NULL, NULL); 1962 } 1963 1964 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1965 { 1966 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 1967 } 1968 1969 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 1970 hists__resort_cb_t cb) 1971 { 1972 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 1973 } 1974 1975 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1976 { 1977 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1978 return false; 1979 1980 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1981 return true; 1982 1983 return false; 1984 } 1985 1986 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1987 { 1988 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1989 1990 while (can_goto_child(he, HMD_NORMAL)) { 1991 node = rb_last(&he->hroot_out.rb_root); 1992 he = rb_entry(node, struct hist_entry, rb_node); 1993 } 1994 return node; 1995 } 1996 1997 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1998 { 1999 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2000 2001 if (can_goto_child(he, hmd)) 2002 node = rb_first_cached(&he->hroot_out); 2003 else 2004 node = rb_next(node); 2005 2006 while (node == NULL) { 2007 he = he->parent_he; 2008 if (he == NULL) 2009 break; 2010 2011 node = rb_next(&he->rb_node); 2012 } 2013 return node; 2014 } 2015 2016 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 2017 { 2018 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2019 2020 node = rb_prev(node); 2021 if (node) 2022 return rb_hierarchy_last(node); 2023 2024 he = he->parent_he; 2025 if (he == NULL) 2026 return NULL; 2027 2028 return &he->rb_node; 2029 } 2030 2031 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 2032 { 2033 struct rb_node *node; 2034 struct hist_entry *child; 2035 float percent; 2036 2037 if (he->leaf) 2038 return false; 2039 2040 node = rb_first_cached(&he->hroot_out); 2041 child = rb_entry(node, struct hist_entry, rb_node); 2042 2043 while (node && child->filtered) { 2044 node = rb_next(node); 2045 child = rb_entry(node, struct hist_entry, rb_node); 2046 } 2047 2048 if (node) 2049 percent = hist_entry__get_percent_limit(child); 2050 else 2051 percent = 0; 2052 2053 return node && percent >= limit; 2054 } 2055 2056 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 2057 enum hist_filter filter) 2058 { 2059 h->filtered &= ~(1 << filter); 2060 2061 if (symbol_conf.report_hierarchy) { 2062 struct hist_entry *parent = h->parent_he; 2063 2064 while (parent) { 2065 he_stat__add_stat(&parent->stat, &h->stat); 2066 2067 parent->filtered &= ~(1 << filter); 2068 2069 if (parent->filtered) 2070 goto next; 2071 2072 /* force fold unfiltered entry for simplicity */ 2073 parent->unfolded = false; 2074 parent->has_no_entry = false; 2075 parent->row_offset = 0; 2076 parent->nr_rows = 0; 2077 next: 2078 parent = parent->parent_he; 2079 } 2080 } 2081 2082 if (h->filtered) 2083 return; 2084 2085 /* force fold unfiltered entry for simplicity */ 2086 h->unfolded = false; 2087 h->has_no_entry = false; 2088 h->row_offset = 0; 2089 h->nr_rows = 0; 2090 2091 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 2092 2093 hists__inc_filter_stats(hists, h); 2094 hists__calc_col_len(hists, h); 2095 } 2096 2097 2098 static bool hists__filter_entry_by_dso(struct hists *hists, 2099 struct hist_entry *he) 2100 { 2101 if (hists->dso_filter != NULL && 2102 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 2103 he->filtered |= (1 << HIST_FILTER__DSO); 2104 return true; 2105 } 2106 2107 return false; 2108 } 2109 2110 static bool hists__filter_entry_by_thread(struct hists *hists, 2111 struct hist_entry *he) 2112 { 2113 if (hists->thread_filter != NULL && 2114 he->thread != hists->thread_filter) { 2115 he->filtered |= (1 << HIST_FILTER__THREAD); 2116 return true; 2117 } 2118 2119 return false; 2120 } 2121 2122 static bool hists__filter_entry_by_symbol(struct hists *hists, 2123 struct hist_entry *he) 2124 { 2125 if (hists->symbol_filter_str != NULL && 2126 (!he->ms.sym || strstr(he->ms.sym->name, 2127 hists->symbol_filter_str) == NULL)) { 2128 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2129 return true; 2130 } 2131 2132 return false; 2133 } 2134 2135 static bool hists__filter_entry_by_socket(struct hists *hists, 2136 struct hist_entry *he) 2137 { 2138 if ((hists->socket_filter > -1) && 2139 (he->socket != hists->socket_filter)) { 2140 he->filtered |= (1 << HIST_FILTER__SOCKET); 2141 return true; 2142 } 2143 2144 return false; 2145 } 2146 2147 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2148 2149 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2150 { 2151 struct rb_node *nd; 2152 2153 hists->stats.nr_non_filtered_samples = 0; 2154 2155 hists__reset_filter_stats(hists); 2156 hists__reset_col_len(hists); 2157 2158 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2159 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2160 2161 if (filter(hists, h)) 2162 continue; 2163 2164 hists__remove_entry_filter(hists, h, type); 2165 } 2166 } 2167 2168 static void resort_filtered_entry(struct rb_root_cached *root, 2169 struct hist_entry *he) 2170 { 2171 struct rb_node **p = &root->rb_root.rb_node; 2172 struct rb_node *parent = NULL; 2173 struct hist_entry *iter; 2174 struct rb_root_cached new_root = RB_ROOT_CACHED; 2175 struct rb_node *nd; 2176 bool leftmost = true; 2177 2178 while (*p != NULL) { 2179 parent = *p; 2180 iter = rb_entry(parent, struct hist_entry, rb_node); 2181 2182 if (hist_entry__sort(he, iter) > 0) 2183 p = &(*p)->rb_left; 2184 else { 2185 p = &(*p)->rb_right; 2186 leftmost = false; 2187 } 2188 } 2189 2190 rb_link_node(&he->rb_node, parent, p); 2191 rb_insert_color_cached(&he->rb_node, root, leftmost); 2192 2193 if (he->leaf || he->filtered) 2194 return; 2195 2196 nd = rb_first_cached(&he->hroot_out); 2197 while (nd) { 2198 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2199 2200 nd = rb_next(nd); 2201 rb_erase_cached(&h->rb_node, &he->hroot_out); 2202 2203 resort_filtered_entry(&new_root, h); 2204 } 2205 2206 he->hroot_out = new_root; 2207 } 2208 2209 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2210 { 2211 struct rb_node *nd; 2212 struct rb_root_cached new_root = RB_ROOT_CACHED; 2213 2214 hists->stats.nr_non_filtered_samples = 0; 2215 2216 hists__reset_filter_stats(hists); 2217 hists__reset_col_len(hists); 2218 2219 nd = rb_first_cached(&hists->entries); 2220 while (nd) { 2221 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2222 int ret; 2223 2224 ret = hist_entry__filter(h, type, arg); 2225 2226 /* 2227 * case 1. non-matching type 2228 * zero out the period, set filter marker and move to child 2229 */ 2230 if (ret < 0) { 2231 memset(&h->stat, 0, sizeof(h->stat)); 2232 h->filtered |= (1 << type); 2233 2234 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2235 } 2236 /* 2237 * case 2. matched type (filter out) 2238 * set filter marker and move to next 2239 */ 2240 else if (ret == 1) { 2241 h->filtered |= (1 << type); 2242 2243 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2244 } 2245 /* 2246 * case 3. ok (not filtered) 2247 * add period to hists and parents, erase the filter marker 2248 * and move to next sibling 2249 */ 2250 else { 2251 hists__remove_entry_filter(hists, h, type); 2252 2253 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2254 } 2255 } 2256 2257 hierarchy_recalc_total_periods(hists); 2258 2259 /* 2260 * resort output after applying a new filter since filter in a lower 2261 * hierarchy can change periods in a upper hierarchy. 2262 */ 2263 nd = rb_first_cached(&hists->entries); 2264 while (nd) { 2265 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2266 2267 nd = rb_next(nd); 2268 rb_erase_cached(&h->rb_node, &hists->entries); 2269 2270 resort_filtered_entry(&new_root, h); 2271 } 2272 2273 hists->entries = new_root; 2274 } 2275 2276 void hists__filter_by_thread(struct hists *hists) 2277 { 2278 if (symbol_conf.report_hierarchy) 2279 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2280 hists->thread_filter); 2281 else 2282 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2283 hists__filter_entry_by_thread); 2284 } 2285 2286 void hists__filter_by_dso(struct hists *hists) 2287 { 2288 if (symbol_conf.report_hierarchy) 2289 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2290 hists->dso_filter); 2291 else 2292 hists__filter_by_type(hists, HIST_FILTER__DSO, 2293 hists__filter_entry_by_dso); 2294 } 2295 2296 void hists__filter_by_symbol(struct hists *hists) 2297 { 2298 if (symbol_conf.report_hierarchy) 2299 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2300 hists->symbol_filter_str); 2301 else 2302 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2303 hists__filter_entry_by_symbol); 2304 } 2305 2306 void hists__filter_by_socket(struct hists *hists) 2307 { 2308 if (symbol_conf.report_hierarchy) 2309 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2310 &hists->socket_filter); 2311 else 2312 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2313 hists__filter_entry_by_socket); 2314 } 2315 2316 void events_stats__inc(struct events_stats *stats, u32 type) 2317 { 2318 ++stats->nr_events[0]; 2319 ++stats->nr_events[type]; 2320 } 2321 2322 static void hists_stats__inc(struct hists_stats *stats) 2323 { 2324 ++stats->nr_samples; 2325 } 2326 2327 void hists__inc_nr_events(struct hists *hists) 2328 { 2329 hists_stats__inc(&hists->stats); 2330 } 2331 2332 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2333 { 2334 hists_stats__inc(&hists->stats); 2335 if (!filtered) 2336 hists->stats.nr_non_filtered_samples++; 2337 } 2338 2339 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost) 2340 { 2341 hists->stats.nr_lost_samples += lost; 2342 } 2343 2344 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2345 struct hist_entry *pair) 2346 { 2347 struct rb_root_cached *root; 2348 struct rb_node **p; 2349 struct rb_node *parent = NULL; 2350 struct hist_entry *he; 2351 int64_t cmp; 2352 bool leftmost = true; 2353 2354 if (hists__has(hists, need_collapse)) 2355 root = &hists->entries_collapsed; 2356 else 2357 root = hists->entries_in; 2358 2359 p = &root->rb_root.rb_node; 2360 2361 while (*p != NULL) { 2362 parent = *p; 2363 he = rb_entry(parent, struct hist_entry, rb_node_in); 2364 2365 cmp = hist_entry__collapse(he, pair); 2366 2367 if (!cmp) 2368 goto out; 2369 2370 if (cmp < 0) 2371 p = &(*p)->rb_left; 2372 else { 2373 p = &(*p)->rb_right; 2374 leftmost = false; 2375 } 2376 } 2377 2378 he = hist_entry__new(pair, true); 2379 if (he) { 2380 memset(&he->stat, 0, sizeof(he->stat)); 2381 he->hists = hists; 2382 if (symbol_conf.cumulate_callchain) 2383 memset(he->stat_acc, 0, sizeof(he->stat)); 2384 rb_link_node(&he->rb_node_in, parent, p); 2385 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2386 hists__inc_stats(hists, he); 2387 he->dummy = true; 2388 } 2389 out: 2390 return he; 2391 } 2392 2393 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2394 struct rb_root_cached *root, 2395 struct hist_entry *pair) 2396 { 2397 struct rb_node **p; 2398 struct rb_node *parent = NULL; 2399 struct hist_entry *he; 2400 struct perf_hpp_fmt *fmt; 2401 bool leftmost = true; 2402 2403 p = &root->rb_root.rb_node; 2404 while (*p != NULL) { 2405 int64_t cmp = 0; 2406 2407 parent = *p; 2408 he = rb_entry(parent, struct hist_entry, rb_node_in); 2409 2410 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2411 cmp = fmt->collapse(fmt, he, pair); 2412 if (cmp) 2413 break; 2414 } 2415 if (!cmp) 2416 goto out; 2417 2418 if (cmp < 0) 2419 p = &parent->rb_left; 2420 else { 2421 p = &parent->rb_right; 2422 leftmost = false; 2423 } 2424 } 2425 2426 he = hist_entry__new(pair, true); 2427 if (he) { 2428 rb_link_node(&he->rb_node_in, parent, p); 2429 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2430 2431 he->dummy = true; 2432 he->hists = hists; 2433 memset(&he->stat, 0, sizeof(he->stat)); 2434 hists__inc_stats(hists, he); 2435 } 2436 out: 2437 return he; 2438 } 2439 2440 static struct hist_entry *hists__find_entry(struct hists *hists, 2441 struct hist_entry *he) 2442 { 2443 struct rb_node *n; 2444 2445 if (hists__has(hists, need_collapse)) 2446 n = hists->entries_collapsed.rb_root.rb_node; 2447 else 2448 n = hists->entries_in->rb_root.rb_node; 2449 2450 while (n) { 2451 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2452 int64_t cmp = hist_entry__collapse(iter, he); 2453 2454 if (cmp < 0) 2455 n = n->rb_left; 2456 else if (cmp > 0) 2457 n = n->rb_right; 2458 else 2459 return iter; 2460 } 2461 2462 return NULL; 2463 } 2464 2465 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2466 struct hist_entry *he) 2467 { 2468 struct rb_node *n = root->rb_root.rb_node; 2469 2470 while (n) { 2471 struct hist_entry *iter; 2472 struct perf_hpp_fmt *fmt; 2473 int64_t cmp = 0; 2474 2475 iter = rb_entry(n, struct hist_entry, rb_node_in); 2476 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2477 cmp = fmt->collapse(fmt, iter, he); 2478 if (cmp) 2479 break; 2480 } 2481 2482 if (cmp < 0) 2483 n = n->rb_left; 2484 else if (cmp > 0) 2485 n = n->rb_right; 2486 else 2487 return iter; 2488 } 2489 2490 return NULL; 2491 } 2492 2493 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2494 struct rb_root_cached *other_root) 2495 { 2496 struct rb_node *nd; 2497 struct hist_entry *pos, *pair; 2498 2499 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2500 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2501 pair = hists__find_hierarchy_entry(other_root, pos); 2502 2503 if (pair) { 2504 hist_entry__add_pair(pair, pos); 2505 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2506 } 2507 } 2508 } 2509 2510 /* 2511 * Look for pairs to link to the leader buckets (hist_entries): 2512 */ 2513 void hists__match(struct hists *leader, struct hists *other) 2514 { 2515 struct rb_root_cached *root; 2516 struct rb_node *nd; 2517 struct hist_entry *pos, *pair; 2518 2519 if (symbol_conf.report_hierarchy) { 2520 /* hierarchy report always collapses entries */ 2521 return hists__match_hierarchy(&leader->entries_collapsed, 2522 &other->entries_collapsed); 2523 } 2524 2525 if (hists__has(leader, need_collapse)) 2526 root = &leader->entries_collapsed; 2527 else 2528 root = leader->entries_in; 2529 2530 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2531 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2532 pair = hists__find_entry(other, pos); 2533 2534 if (pair) 2535 hist_entry__add_pair(pair, pos); 2536 } 2537 } 2538 2539 static int hists__link_hierarchy(struct hists *leader_hists, 2540 struct hist_entry *parent, 2541 struct rb_root_cached *leader_root, 2542 struct rb_root_cached *other_root) 2543 { 2544 struct rb_node *nd; 2545 struct hist_entry *pos, *leader; 2546 2547 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2548 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2549 2550 if (hist_entry__has_pairs(pos)) { 2551 bool found = false; 2552 2553 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2554 if (leader->hists == leader_hists) { 2555 found = true; 2556 break; 2557 } 2558 } 2559 if (!found) 2560 return -1; 2561 } else { 2562 leader = add_dummy_hierarchy_entry(leader_hists, 2563 leader_root, pos); 2564 if (leader == NULL) 2565 return -1; 2566 2567 /* do not point parent in the pos */ 2568 leader->parent_he = parent; 2569 2570 hist_entry__add_pair(pos, leader); 2571 } 2572 2573 if (!pos->leaf) { 2574 if (hists__link_hierarchy(leader_hists, leader, 2575 &leader->hroot_in, 2576 &pos->hroot_in) < 0) 2577 return -1; 2578 } 2579 } 2580 return 0; 2581 } 2582 2583 /* 2584 * Look for entries in the other hists that are not present in the leader, if 2585 * we find them, just add a dummy entry on the leader hists, with period=0, 2586 * nr_events=0, to serve as the list header. 2587 */ 2588 int hists__link(struct hists *leader, struct hists *other) 2589 { 2590 struct rb_root_cached *root; 2591 struct rb_node *nd; 2592 struct hist_entry *pos, *pair; 2593 2594 if (symbol_conf.report_hierarchy) { 2595 /* hierarchy report always collapses entries */ 2596 return hists__link_hierarchy(leader, NULL, 2597 &leader->entries_collapsed, 2598 &other->entries_collapsed); 2599 } 2600 2601 if (hists__has(other, need_collapse)) 2602 root = &other->entries_collapsed; 2603 else 2604 root = other->entries_in; 2605 2606 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2607 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2608 2609 if (!hist_entry__has_pairs(pos)) { 2610 pair = hists__add_dummy_entry(leader, pos); 2611 if (pair == NULL) 2612 return -1; 2613 hist_entry__add_pair(pos, pair); 2614 } 2615 } 2616 2617 return 0; 2618 } 2619 2620 int hists__unlink(struct hists *hists) 2621 { 2622 struct rb_root_cached *root; 2623 struct rb_node *nd; 2624 struct hist_entry *pos; 2625 2626 if (hists__has(hists, need_collapse)) 2627 root = &hists->entries_collapsed; 2628 else 2629 root = hists->entries_in; 2630 2631 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2632 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2633 list_del_init(&pos->pairs.node); 2634 } 2635 2636 return 0; 2637 } 2638 2639 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2640 struct perf_sample *sample, bool nonany_branch_mode, 2641 u64 *total_cycles) 2642 { 2643 struct branch_info *bi; 2644 struct branch_entry *entries = perf_sample__branch_entries(sample); 2645 2646 /* If we have branch cycles always annotate them. */ 2647 if (bs && bs->nr && entries[0].flags.cycles) { 2648 int i; 2649 2650 bi = sample__resolve_bstack(sample, al); 2651 if (bi) { 2652 struct addr_map_symbol *prev = NULL; 2653 2654 /* 2655 * Ignore errors, still want to process the 2656 * other entries. 2657 * 2658 * For non standard branch modes always 2659 * force no IPC (prev == NULL) 2660 * 2661 * Note that perf stores branches reversed from 2662 * program order! 2663 */ 2664 for (i = bs->nr - 1; i >= 0; i--) { 2665 addr_map_symbol__account_cycles(&bi[i].from, 2666 nonany_branch_mode ? NULL : prev, 2667 bi[i].flags.cycles); 2668 prev = &bi[i].to; 2669 2670 if (total_cycles) 2671 *total_cycles += bi[i].flags.cycles; 2672 } 2673 free(bi); 2674 } 2675 } 2676 } 2677 2678 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp, 2679 bool skip_empty) 2680 { 2681 struct evsel *pos; 2682 size_t ret = 0; 2683 2684 evlist__for_each_entry(evlist, pos) { 2685 struct hists *hists = evsel__hists(pos); 2686 2687 if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples) 2688 continue; 2689 2690 ret += fprintf(fp, "%s stats:\n", evsel__name(pos)); 2691 if (hists->stats.nr_samples) 2692 ret += fprintf(fp, "%16s events: %10d\n", 2693 "SAMPLE", hists->stats.nr_samples); 2694 if (hists->stats.nr_lost_samples) 2695 ret += fprintf(fp, "%16s events: %10d\n", 2696 "LOST_SAMPLES", hists->stats.nr_lost_samples); 2697 } 2698 2699 return ret; 2700 } 2701 2702 2703 u64 hists__total_period(struct hists *hists) 2704 { 2705 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2706 hists->stats.total_period; 2707 } 2708 2709 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2710 { 2711 char unit; 2712 int printed; 2713 const struct dso *dso = hists->dso_filter; 2714 struct thread *thread = hists->thread_filter; 2715 int socket_id = hists->socket_filter; 2716 unsigned long nr_samples = hists->stats.nr_samples; 2717 u64 nr_events = hists->stats.total_period; 2718 struct evsel *evsel = hists_to_evsel(hists); 2719 const char *ev_name = evsel__name(evsel); 2720 char buf[512], sample_freq_str[64] = ""; 2721 size_t buflen = sizeof(buf); 2722 char ref[30] = " show reference callgraph, "; 2723 bool enable_ref = false; 2724 2725 if (symbol_conf.filter_relative) { 2726 nr_samples = hists->stats.nr_non_filtered_samples; 2727 nr_events = hists->stats.total_non_filtered_period; 2728 } 2729 2730 if (evsel__is_group_event(evsel)) { 2731 struct evsel *pos; 2732 2733 evsel__group_desc(evsel, buf, buflen); 2734 ev_name = buf; 2735 2736 for_each_group_member(pos, evsel) { 2737 struct hists *pos_hists = evsel__hists(pos); 2738 2739 if (symbol_conf.filter_relative) { 2740 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2741 nr_events += pos_hists->stats.total_non_filtered_period; 2742 } else { 2743 nr_samples += pos_hists->stats.nr_samples; 2744 nr_events += pos_hists->stats.total_period; 2745 } 2746 } 2747 } 2748 2749 if (symbol_conf.show_ref_callgraph && 2750 strstr(ev_name, "call-graph=no")) 2751 enable_ref = true; 2752 2753 if (show_freq) 2754 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq); 2755 2756 nr_samples = convert_unit(nr_samples, &unit); 2757 printed = scnprintf(bf, size, 2758 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2759 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "", 2760 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2761 2762 2763 if (hists->uid_filter_str) 2764 printed += snprintf(bf + printed, size - printed, 2765 ", UID: %s", hists->uid_filter_str); 2766 if (thread) { 2767 if (hists__has(hists, thread)) { 2768 printed += scnprintf(bf + printed, size - printed, 2769 ", Thread: %s(%d)", 2770 (thread->comm_set ? thread__comm_str(thread) : ""), 2771 thread->tid); 2772 } else { 2773 printed += scnprintf(bf + printed, size - printed, 2774 ", Thread: %s", 2775 (thread->comm_set ? thread__comm_str(thread) : "")); 2776 } 2777 } 2778 if (dso) 2779 printed += scnprintf(bf + printed, size - printed, 2780 ", DSO: %s", dso->short_name); 2781 if (socket_id > -1) 2782 printed += scnprintf(bf + printed, size - printed, 2783 ", Processor Socket: %d", socket_id); 2784 2785 return printed; 2786 } 2787 2788 int parse_filter_percentage(const struct option *opt __maybe_unused, 2789 const char *arg, int unset __maybe_unused) 2790 { 2791 if (!strcmp(arg, "relative")) 2792 symbol_conf.filter_relative = true; 2793 else if (!strcmp(arg, "absolute")) 2794 symbol_conf.filter_relative = false; 2795 else { 2796 pr_debug("Invalid percentage: %s\n", arg); 2797 return -1; 2798 } 2799 2800 return 0; 2801 } 2802 2803 int perf_hist_config(const char *var, const char *value) 2804 { 2805 if (!strcmp(var, "hist.percentage")) 2806 return parse_filter_percentage(NULL, value, 0); 2807 2808 return 0; 2809 } 2810 2811 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2812 { 2813 memset(hists, 0, sizeof(*hists)); 2814 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2815 hists->entries_in = &hists->entries_in_array[0]; 2816 hists->entries_collapsed = RB_ROOT_CACHED; 2817 hists->entries = RB_ROOT_CACHED; 2818 mutex_init(&hists->lock); 2819 hists->socket_filter = -1; 2820 hists->hpp_list = hpp_list; 2821 INIT_LIST_HEAD(&hists->hpp_formats); 2822 return 0; 2823 } 2824 2825 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2826 { 2827 struct rb_node *node; 2828 struct hist_entry *he; 2829 2830 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2831 node = rb_first_cached(root); 2832 rb_erase_cached(node, root); 2833 2834 he = rb_entry(node, struct hist_entry, rb_node_in); 2835 hist_entry__delete(he); 2836 } 2837 } 2838 2839 static void hists__delete_all_entries(struct hists *hists) 2840 { 2841 hists__delete_entries(hists); 2842 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2843 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2844 hists__delete_remaining_entries(&hists->entries_collapsed); 2845 } 2846 2847 static void hists_evsel__exit(struct evsel *evsel) 2848 { 2849 struct hists *hists = evsel__hists(evsel); 2850 struct perf_hpp_fmt *fmt, *pos; 2851 struct perf_hpp_list_node *node, *tmp; 2852 2853 hists__delete_all_entries(hists); 2854 2855 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2856 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2857 list_del_init(&fmt->list); 2858 free(fmt); 2859 } 2860 list_del_init(&node->list); 2861 free(node); 2862 } 2863 } 2864 2865 static int hists_evsel__init(struct evsel *evsel) 2866 { 2867 struct hists *hists = evsel__hists(evsel); 2868 2869 __hists__init(hists, &perf_hpp_list); 2870 return 0; 2871 } 2872 2873 /* 2874 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2875 * stored in the rbtree... 2876 */ 2877 2878 int hists__init(void) 2879 { 2880 int err = evsel__object_config(sizeof(struct hists_evsel), 2881 hists_evsel__init, hists_evsel__exit); 2882 if (err) 2883 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2884 2885 return err; 2886 } 2887 2888 void perf_hpp_list__init(struct perf_hpp_list *list) 2889 { 2890 INIT_LIST_HEAD(&list->fields); 2891 INIT_LIST_HEAD(&list->sorts); 2892 } 2893