1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "debug.h" 4 #include "dso.h" 5 #include "build-id.h" 6 #include "hist.h" 7 #include "map.h" 8 #include "map_symbol.h" 9 #include "branch.h" 10 #include "mem-events.h" 11 #include "session.h" 12 #include "namespaces.h" 13 #include "sort.h" 14 #include "units.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include "annotate.h" 18 #include "srcline.h" 19 #include "symbol.h" 20 #include "thread.h" 21 #include "ui/progress.h" 22 #include <errno.h> 23 #include <math.h> 24 #include <inttypes.h> 25 #include <sys/param.h> 26 #include <linux/rbtree.h> 27 #include <linux/string.h> 28 #include <linux/time64.h> 29 #include <linux/zalloc.h> 30 31 static bool hists__filter_entry_by_dso(struct hists *hists, 32 struct hist_entry *he); 33 static bool hists__filter_entry_by_thread(struct hists *hists, 34 struct hist_entry *he); 35 static bool hists__filter_entry_by_symbol(struct hists *hists, 36 struct hist_entry *he); 37 static bool hists__filter_entry_by_socket(struct hists *hists, 38 struct hist_entry *he); 39 40 u16 hists__col_len(struct hists *hists, enum hist_column col) 41 { 42 return hists->col_len[col]; 43 } 44 45 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 46 { 47 hists->col_len[col] = len; 48 } 49 50 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 51 { 52 if (len > hists__col_len(hists, col)) { 53 hists__set_col_len(hists, col, len); 54 return true; 55 } 56 return false; 57 } 58 59 void hists__reset_col_len(struct hists *hists) 60 { 61 enum hist_column col; 62 63 for (col = 0; col < HISTC_NR_COLS; ++col) 64 hists__set_col_len(hists, col, 0); 65 } 66 67 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 68 { 69 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 70 71 if (hists__col_len(hists, dso) < unresolved_col_width && 72 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 73 !symbol_conf.dso_list) 74 hists__set_col_len(hists, dso, unresolved_col_width); 75 } 76 77 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 78 { 79 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 80 int symlen; 81 u16 len; 82 83 /* 84 * +4 accounts for '[x] ' priv level info 85 * +2 accounts for 0x prefix on raw addresses 86 * +3 accounts for ' y ' symtab origin info 87 */ 88 if (h->ms.sym) { 89 symlen = h->ms.sym->namelen + 4; 90 if (verbose > 0) 91 symlen += BITS_PER_LONG / 4 + 2 + 3; 92 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 93 } else { 94 symlen = unresolved_col_width + 4 + 2; 95 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 96 hists__set_unres_dso_col_len(hists, HISTC_DSO); 97 } 98 99 len = thread__comm_len(h->thread); 100 if (hists__new_col_len(hists, HISTC_COMM, len)) 101 hists__set_col_len(hists, HISTC_THREAD, len + 8); 102 103 if (h->ms.map) { 104 len = dso__name_len(h->ms.map->dso); 105 hists__new_col_len(hists, HISTC_DSO, len); 106 } 107 108 if (h->parent) 109 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 110 111 if (h->branch_info) { 112 if (h->branch_info->from.sym) { 113 symlen = (int)h->branch_info->from.sym->namelen + 4; 114 if (verbose > 0) 115 symlen += BITS_PER_LONG / 4 + 2 + 3; 116 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 117 118 symlen = dso__name_len(h->branch_info->from.map->dso); 119 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 120 } else { 121 symlen = unresolved_col_width + 4 + 2; 122 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 123 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 124 } 125 126 if (h->branch_info->to.sym) { 127 symlen = (int)h->branch_info->to.sym->namelen + 4; 128 if (verbose > 0) 129 symlen += BITS_PER_LONG / 4 + 2 + 3; 130 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 131 132 symlen = dso__name_len(h->branch_info->to.map->dso); 133 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 134 } else { 135 symlen = unresolved_col_width + 4 + 2; 136 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 137 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 138 } 139 140 if (h->branch_info->srcline_from) 141 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 142 strlen(h->branch_info->srcline_from)); 143 if (h->branch_info->srcline_to) 144 hists__new_col_len(hists, HISTC_SRCLINE_TO, 145 strlen(h->branch_info->srcline_to)); 146 } 147 148 if (h->mem_info) { 149 if (h->mem_info->daddr.sym) { 150 symlen = (int)h->mem_info->daddr.sym->namelen + 4 151 + unresolved_col_width + 2; 152 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 153 symlen); 154 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 155 symlen + 1); 156 } else { 157 symlen = unresolved_col_width + 4 + 2; 158 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 159 symlen); 160 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 161 symlen); 162 } 163 164 if (h->mem_info->iaddr.sym) { 165 symlen = (int)h->mem_info->iaddr.sym->namelen + 4 166 + unresolved_col_width + 2; 167 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 168 symlen); 169 } else { 170 symlen = unresolved_col_width + 4 + 2; 171 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 172 symlen); 173 } 174 175 if (h->mem_info->daddr.map) { 176 symlen = dso__name_len(h->mem_info->daddr.map->dso); 177 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 178 symlen); 179 } else { 180 symlen = unresolved_col_width + 4 + 2; 181 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 182 } 183 184 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 185 unresolved_col_width + 4 + 2); 186 187 } else { 188 symlen = unresolved_col_width + 4 + 2; 189 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 190 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 191 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 192 } 193 194 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 195 hists__new_col_len(hists, HISTC_CPU, 3); 196 hists__new_col_len(hists, HISTC_SOCKET, 6); 197 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 198 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 199 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 200 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 201 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 202 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 203 if (symbol_conf.nanosecs) 204 hists__new_col_len(hists, HISTC_TIME, 16); 205 else 206 hists__new_col_len(hists, HISTC_TIME, 12); 207 208 if (h->srcline) { 209 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 210 hists__new_col_len(hists, HISTC_SRCLINE, len); 211 } 212 213 if (h->srcfile) 214 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 215 216 if (h->transaction) 217 hists__new_col_len(hists, HISTC_TRANSACTION, 218 hist_entry__transaction_len()); 219 220 if (h->trace_output) 221 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 222 } 223 224 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 225 { 226 struct rb_node *next = rb_first_cached(&hists->entries); 227 struct hist_entry *n; 228 int row = 0; 229 230 hists__reset_col_len(hists); 231 232 while (next && row++ < max_rows) { 233 n = rb_entry(next, struct hist_entry, rb_node); 234 if (!n->filtered) 235 hists__calc_col_len(hists, n); 236 next = rb_next(&n->rb_node); 237 } 238 } 239 240 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 241 unsigned int cpumode, u64 period) 242 { 243 switch (cpumode) { 244 case PERF_RECORD_MISC_KERNEL: 245 he_stat->period_sys += period; 246 break; 247 case PERF_RECORD_MISC_USER: 248 he_stat->period_us += period; 249 break; 250 case PERF_RECORD_MISC_GUEST_KERNEL: 251 he_stat->period_guest_sys += period; 252 break; 253 case PERF_RECORD_MISC_GUEST_USER: 254 he_stat->period_guest_us += period; 255 break; 256 default: 257 break; 258 } 259 } 260 261 static long hist_time(unsigned long htime) 262 { 263 unsigned long time_quantum = symbol_conf.time_quantum; 264 if (time_quantum) 265 return (htime / time_quantum) * time_quantum; 266 return htime; 267 } 268 269 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 270 u64 weight) 271 { 272 273 he_stat->period += period; 274 he_stat->weight += weight; 275 he_stat->nr_events += 1; 276 } 277 278 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 279 { 280 dest->period += src->period; 281 dest->period_sys += src->period_sys; 282 dest->period_us += src->period_us; 283 dest->period_guest_sys += src->period_guest_sys; 284 dest->period_guest_us += src->period_guest_us; 285 dest->nr_events += src->nr_events; 286 dest->weight += src->weight; 287 } 288 289 static void he_stat__decay(struct he_stat *he_stat) 290 { 291 he_stat->period = (he_stat->period * 7) / 8; 292 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 293 /* XXX need decay for weight too? */ 294 } 295 296 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 297 298 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 299 { 300 u64 prev_period = he->stat.period; 301 u64 diff; 302 303 if (prev_period == 0) 304 return true; 305 306 he_stat__decay(&he->stat); 307 if (symbol_conf.cumulate_callchain) 308 he_stat__decay(he->stat_acc); 309 decay_callchain(he->callchain); 310 311 diff = prev_period - he->stat.period; 312 313 if (!he->depth) { 314 hists->stats.total_period -= diff; 315 if (!he->filtered) 316 hists->stats.total_non_filtered_period -= diff; 317 } 318 319 if (!he->leaf) { 320 struct hist_entry *child; 321 struct rb_node *node = rb_first_cached(&he->hroot_out); 322 while (node) { 323 child = rb_entry(node, struct hist_entry, rb_node); 324 node = rb_next(node); 325 326 if (hists__decay_entry(hists, child)) 327 hists__delete_entry(hists, child); 328 } 329 } 330 331 return he->stat.period == 0; 332 } 333 334 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 335 { 336 struct rb_root_cached *root_in; 337 struct rb_root_cached *root_out; 338 339 if (he->parent_he) { 340 root_in = &he->parent_he->hroot_in; 341 root_out = &he->parent_he->hroot_out; 342 } else { 343 if (hists__has(hists, need_collapse)) 344 root_in = &hists->entries_collapsed; 345 else 346 root_in = hists->entries_in; 347 root_out = &hists->entries; 348 } 349 350 rb_erase_cached(&he->rb_node_in, root_in); 351 rb_erase_cached(&he->rb_node, root_out); 352 353 --hists->nr_entries; 354 if (!he->filtered) 355 --hists->nr_non_filtered_entries; 356 357 hist_entry__delete(he); 358 } 359 360 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 361 { 362 struct rb_node *next = rb_first_cached(&hists->entries); 363 struct hist_entry *n; 364 365 while (next) { 366 n = rb_entry(next, struct hist_entry, rb_node); 367 next = rb_next(&n->rb_node); 368 if (((zap_user && n->level == '.') || 369 (zap_kernel && n->level != '.') || 370 hists__decay_entry(hists, n))) { 371 hists__delete_entry(hists, n); 372 } 373 } 374 } 375 376 void hists__delete_entries(struct hists *hists) 377 { 378 struct rb_node *next = rb_first_cached(&hists->entries); 379 struct hist_entry *n; 380 381 while (next) { 382 n = rb_entry(next, struct hist_entry, rb_node); 383 next = rb_next(&n->rb_node); 384 385 hists__delete_entry(hists, n); 386 } 387 } 388 389 struct hist_entry *hists__get_entry(struct hists *hists, int idx) 390 { 391 struct rb_node *next = rb_first_cached(&hists->entries); 392 struct hist_entry *n; 393 int i = 0; 394 395 while (next) { 396 n = rb_entry(next, struct hist_entry, rb_node); 397 if (i == idx) 398 return n; 399 400 next = rb_next(&n->rb_node); 401 i++; 402 } 403 404 return NULL; 405 } 406 407 /* 408 * histogram, sorted on item, collects periods 409 */ 410 411 static int hist_entry__init(struct hist_entry *he, 412 struct hist_entry *template, 413 bool sample_self, 414 size_t callchain_size) 415 { 416 *he = *template; 417 he->callchain_size = callchain_size; 418 419 if (symbol_conf.cumulate_callchain) { 420 he->stat_acc = malloc(sizeof(he->stat)); 421 if (he->stat_acc == NULL) 422 return -ENOMEM; 423 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 424 if (!sample_self) 425 memset(&he->stat, 0, sizeof(he->stat)); 426 } 427 428 map__get(he->ms.map); 429 430 if (he->branch_info) { 431 /* 432 * This branch info is (a part of) allocated from 433 * sample__resolve_bstack() and will be freed after 434 * adding new entries. So we need to save a copy. 435 */ 436 he->branch_info = malloc(sizeof(*he->branch_info)); 437 if (he->branch_info == NULL) 438 goto err; 439 440 memcpy(he->branch_info, template->branch_info, 441 sizeof(*he->branch_info)); 442 443 map__get(he->branch_info->from.map); 444 map__get(he->branch_info->to.map); 445 } 446 447 if (he->mem_info) { 448 map__get(he->mem_info->iaddr.map); 449 map__get(he->mem_info->daddr.map); 450 } 451 452 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 453 callchain_init(he->callchain); 454 455 if (he->raw_data) { 456 he->raw_data = memdup(he->raw_data, he->raw_size); 457 if (he->raw_data == NULL) 458 goto err_infos; 459 } 460 461 if (he->srcline) { 462 he->srcline = strdup(he->srcline); 463 if (he->srcline == NULL) 464 goto err_rawdata; 465 } 466 467 if (symbol_conf.res_sample) { 468 he->res_samples = calloc(sizeof(struct res_sample), 469 symbol_conf.res_sample); 470 if (!he->res_samples) 471 goto err_srcline; 472 } 473 474 INIT_LIST_HEAD(&he->pairs.node); 475 thread__get(he->thread); 476 he->hroot_in = RB_ROOT_CACHED; 477 he->hroot_out = RB_ROOT_CACHED; 478 479 if (!symbol_conf.report_hierarchy) 480 he->leaf = true; 481 482 return 0; 483 484 err_srcline: 485 zfree(&he->srcline); 486 487 err_rawdata: 488 zfree(&he->raw_data); 489 490 err_infos: 491 if (he->branch_info) { 492 map__put(he->branch_info->from.map); 493 map__put(he->branch_info->to.map); 494 zfree(&he->branch_info); 495 } 496 if (he->mem_info) { 497 map__put(he->mem_info->iaddr.map); 498 map__put(he->mem_info->daddr.map); 499 } 500 err: 501 map__zput(he->ms.map); 502 zfree(&he->stat_acc); 503 return -ENOMEM; 504 } 505 506 static void *hist_entry__zalloc(size_t size) 507 { 508 return zalloc(size + sizeof(struct hist_entry)); 509 } 510 511 static void hist_entry__free(void *ptr) 512 { 513 free(ptr); 514 } 515 516 static struct hist_entry_ops default_ops = { 517 .new = hist_entry__zalloc, 518 .free = hist_entry__free, 519 }; 520 521 static struct hist_entry *hist_entry__new(struct hist_entry *template, 522 bool sample_self) 523 { 524 struct hist_entry_ops *ops = template->ops; 525 size_t callchain_size = 0; 526 struct hist_entry *he; 527 int err = 0; 528 529 if (!ops) 530 ops = template->ops = &default_ops; 531 532 if (symbol_conf.use_callchain) 533 callchain_size = sizeof(struct callchain_root); 534 535 he = ops->new(callchain_size); 536 if (he) { 537 err = hist_entry__init(he, template, sample_self, callchain_size); 538 if (err) { 539 ops->free(he); 540 he = NULL; 541 } 542 } 543 544 return he; 545 } 546 547 static u8 symbol__parent_filter(const struct symbol *parent) 548 { 549 if (symbol_conf.exclude_other && parent == NULL) 550 return 1 << HIST_FILTER__PARENT; 551 return 0; 552 } 553 554 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 555 { 556 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 557 return; 558 559 he->hists->callchain_period += period; 560 if (!he->filtered) 561 he->hists->callchain_non_filtered_period += period; 562 } 563 564 static struct hist_entry *hists__findnew_entry(struct hists *hists, 565 struct hist_entry *entry, 566 struct addr_location *al, 567 bool sample_self) 568 { 569 struct rb_node **p; 570 struct rb_node *parent = NULL; 571 struct hist_entry *he; 572 int64_t cmp; 573 u64 period = entry->stat.period; 574 u64 weight = entry->stat.weight; 575 bool leftmost = true; 576 577 p = &hists->entries_in->rb_root.rb_node; 578 579 while (*p != NULL) { 580 parent = *p; 581 he = rb_entry(parent, struct hist_entry, rb_node_in); 582 583 /* 584 * Make sure that it receives arguments in a same order as 585 * hist_entry__collapse() so that we can use an appropriate 586 * function when searching an entry regardless which sort 587 * keys were used. 588 */ 589 cmp = hist_entry__cmp(he, entry); 590 591 if (!cmp) { 592 if (sample_self) { 593 he_stat__add_period(&he->stat, period, weight); 594 hist_entry__add_callchain_period(he, period); 595 } 596 if (symbol_conf.cumulate_callchain) 597 he_stat__add_period(he->stat_acc, period, weight); 598 599 /* 600 * This mem info was allocated from sample__resolve_mem 601 * and will not be used anymore. 602 */ 603 mem_info__zput(entry->mem_info); 604 605 block_info__zput(entry->block_info); 606 607 /* If the map of an existing hist_entry has 608 * become out-of-date due to an exec() or 609 * similar, update it. Otherwise we will 610 * mis-adjust symbol addresses when computing 611 * the history counter to increment. 612 */ 613 if (he->ms.map != entry->ms.map) { 614 map__put(he->ms.map); 615 he->ms.map = map__get(entry->ms.map); 616 } 617 goto out; 618 } 619 620 if (cmp < 0) 621 p = &(*p)->rb_left; 622 else { 623 p = &(*p)->rb_right; 624 leftmost = false; 625 } 626 } 627 628 he = hist_entry__new(entry, sample_self); 629 if (!he) 630 return NULL; 631 632 if (sample_self) 633 hist_entry__add_callchain_period(he, period); 634 hists->nr_entries++; 635 636 rb_link_node(&he->rb_node_in, parent, p); 637 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 638 out: 639 if (sample_self) 640 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 641 if (symbol_conf.cumulate_callchain) 642 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 643 return he; 644 } 645 646 static unsigned random_max(unsigned high) 647 { 648 unsigned thresh = -high % high; 649 for (;;) { 650 unsigned r = random(); 651 if (r >= thresh) 652 return r % high; 653 } 654 } 655 656 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 657 { 658 struct res_sample *r; 659 int j; 660 661 if (he->num_res < symbol_conf.res_sample) { 662 j = he->num_res++; 663 } else { 664 j = random_max(symbol_conf.res_sample); 665 } 666 r = &he->res_samples[j]; 667 r->time = sample->time; 668 r->cpu = sample->cpu; 669 r->tid = sample->tid; 670 } 671 672 static struct hist_entry* 673 __hists__add_entry(struct hists *hists, 674 struct addr_location *al, 675 struct symbol *sym_parent, 676 struct branch_info *bi, 677 struct mem_info *mi, 678 struct block_info *block_info, 679 struct perf_sample *sample, 680 bool sample_self, 681 struct hist_entry_ops *ops) 682 { 683 struct namespaces *ns = thread__namespaces(al->thread); 684 struct hist_entry entry = { 685 .thread = al->thread, 686 .comm = thread__comm(al->thread), 687 .cgroup_id = { 688 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 689 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 690 }, 691 .ms = { 692 .map = al->map, 693 .sym = al->sym, 694 }, 695 .srcline = (char *) al->srcline, 696 .socket = al->socket, 697 .cpu = al->cpu, 698 .cpumode = al->cpumode, 699 .ip = al->addr, 700 .level = al->level, 701 .stat = { 702 .nr_events = 1, 703 .period = sample->period, 704 .weight = sample->weight, 705 }, 706 .parent = sym_parent, 707 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 708 .hists = hists, 709 .branch_info = bi, 710 .mem_info = mi, 711 .block_info = block_info, 712 .transaction = sample->transaction, 713 .raw_data = sample->raw_data, 714 .raw_size = sample->raw_size, 715 .ops = ops, 716 .time = hist_time(sample->time), 717 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 718 719 if (!hists->has_callchains && he && he->callchain_size != 0) 720 hists->has_callchains = true; 721 if (he && symbol_conf.res_sample) 722 hists__res_sample(he, sample); 723 return he; 724 } 725 726 struct hist_entry *hists__add_entry(struct hists *hists, 727 struct addr_location *al, 728 struct symbol *sym_parent, 729 struct branch_info *bi, 730 struct mem_info *mi, 731 struct perf_sample *sample, 732 bool sample_self) 733 { 734 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 735 sample, sample_self, NULL); 736 } 737 738 struct hist_entry *hists__add_entry_ops(struct hists *hists, 739 struct hist_entry_ops *ops, 740 struct addr_location *al, 741 struct symbol *sym_parent, 742 struct branch_info *bi, 743 struct mem_info *mi, 744 struct perf_sample *sample, 745 bool sample_self) 746 { 747 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 748 sample, sample_self, ops); 749 } 750 751 struct hist_entry *hists__add_entry_block(struct hists *hists, 752 struct addr_location *al, 753 struct block_info *block_info) 754 { 755 struct hist_entry entry = { 756 .block_info = block_info, 757 .hists = hists, 758 }, *he = hists__findnew_entry(hists, &entry, al, false); 759 760 return he; 761 } 762 763 static int 764 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 765 struct addr_location *al __maybe_unused) 766 { 767 return 0; 768 } 769 770 static int 771 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 772 struct addr_location *al __maybe_unused) 773 { 774 return 0; 775 } 776 777 static int 778 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 779 { 780 struct perf_sample *sample = iter->sample; 781 struct mem_info *mi; 782 783 mi = sample__resolve_mem(sample, al); 784 if (mi == NULL) 785 return -ENOMEM; 786 787 iter->priv = mi; 788 return 0; 789 } 790 791 static int 792 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 793 { 794 u64 cost; 795 struct mem_info *mi = iter->priv; 796 struct hists *hists = evsel__hists(iter->evsel); 797 struct perf_sample *sample = iter->sample; 798 struct hist_entry *he; 799 800 if (mi == NULL) 801 return -EINVAL; 802 803 cost = sample->weight; 804 if (!cost) 805 cost = 1; 806 807 /* 808 * must pass period=weight in order to get the correct 809 * sorting from hists__collapse_resort() which is solely 810 * based on periods. We want sorting be done on nr_events * weight 811 * and this is indirectly achieved by passing period=weight here 812 * and the he_stat__add_period() function. 813 */ 814 sample->period = cost; 815 816 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 817 sample, true); 818 if (!he) 819 return -ENOMEM; 820 821 iter->he = he; 822 return 0; 823 } 824 825 static int 826 iter_finish_mem_entry(struct hist_entry_iter *iter, 827 struct addr_location *al __maybe_unused) 828 { 829 struct evsel *evsel = iter->evsel; 830 struct hists *hists = evsel__hists(evsel); 831 struct hist_entry *he = iter->he; 832 int err = -EINVAL; 833 834 if (he == NULL) 835 goto out; 836 837 hists__inc_nr_samples(hists, he->filtered); 838 839 err = hist_entry__append_callchain(he, iter->sample); 840 841 out: 842 /* 843 * We don't need to free iter->priv (mem_info) here since the mem info 844 * was either already freed in hists__findnew_entry() or passed to a 845 * new hist entry by hist_entry__new(). 846 */ 847 iter->priv = NULL; 848 849 iter->he = NULL; 850 return err; 851 } 852 853 static int 854 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 855 { 856 struct branch_info *bi; 857 struct perf_sample *sample = iter->sample; 858 859 bi = sample__resolve_bstack(sample, al); 860 if (!bi) 861 return -ENOMEM; 862 863 iter->curr = 0; 864 iter->total = sample->branch_stack->nr; 865 866 iter->priv = bi; 867 return 0; 868 } 869 870 static int 871 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 872 struct addr_location *al __maybe_unused) 873 { 874 return 0; 875 } 876 877 static int 878 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 879 { 880 struct branch_info *bi = iter->priv; 881 int i = iter->curr; 882 883 if (bi == NULL) 884 return 0; 885 886 if (iter->curr >= iter->total) 887 return 0; 888 889 al->map = bi[i].to.map; 890 al->sym = bi[i].to.sym; 891 al->addr = bi[i].to.addr; 892 return 1; 893 } 894 895 static int 896 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 897 { 898 struct branch_info *bi; 899 struct evsel *evsel = iter->evsel; 900 struct hists *hists = evsel__hists(evsel); 901 struct perf_sample *sample = iter->sample; 902 struct hist_entry *he = NULL; 903 int i = iter->curr; 904 int err = 0; 905 906 bi = iter->priv; 907 908 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 909 goto out; 910 911 /* 912 * The report shows the percentage of total branches captured 913 * and not events sampled. Thus we use a pseudo period of 1. 914 */ 915 sample->period = 1; 916 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 917 918 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 919 sample, true); 920 if (he == NULL) 921 return -ENOMEM; 922 923 hists__inc_nr_samples(hists, he->filtered); 924 925 out: 926 iter->he = he; 927 iter->curr++; 928 return err; 929 } 930 931 static int 932 iter_finish_branch_entry(struct hist_entry_iter *iter, 933 struct addr_location *al __maybe_unused) 934 { 935 zfree(&iter->priv); 936 iter->he = NULL; 937 938 return iter->curr >= iter->total ? 0 : -1; 939 } 940 941 static int 942 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 943 struct addr_location *al __maybe_unused) 944 { 945 return 0; 946 } 947 948 static int 949 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 950 { 951 struct evsel *evsel = iter->evsel; 952 struct perf_sample *sample = iter->sample; 953 struct hist_entry *he; 954 955 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 956 sample, true); 957 if (he == NULL) 958 return -ENOMEM; 959 960 iter->he = he; 961 return 0; 962 } 963 964 static int 965 iter_finish_normal_entry(struct hist_entry_iter *iter, 966 struct addr_location *al __maybe_unused) 967 { 968 struct hist_entry *he = iter->he; 969 struct evsel *evsel = iter->evsel; 970 struct perf_sample *sample = iter->sample; 971 972 if (he == NULL) 973 return 0; 974 975 iter->he = NULL; 976 977 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 978 979 return hist_entry__append_callchain(he, sample); 980 } 981 982 static int 983 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 984 struct addr_location *al __maybe_unused) 985 { 986 struct hist_entry **he_cache; 987 988 callchain_cursor_commit(&callchain_cursor); 989 990 /* 991 * This is for detecting cycles or recursions so that they're 992 * cumulated only one time to prevent entries more than 100% 993 * overhead. 994 */ 995 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); 996 if (he_cache == NULL) 997 return -ENOMEM; 998 999 iter->priv = he_cache; 1000 iter->curr = 0; 1001 1002 return 0; 1003 } 1004 1005 static int 1006 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 1007 struct addr_location *al) 1008 { 1009 struct evsel *evsel = iter->evsel; 1010 struct hists *hists = evsel__hists(evsel); 1011 struct perf_sample *sample = iter->sample; 1012 struct hist_entry **he_cache = iter->priv; 1013 struct hist_entry *he; 1014 int err = 0; 1015 1016 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 1017 sample, true); 1018 if (he == NULL) 1019 return -ENOMEM; 1020 1021 iter->he = he; 1022 he_cache[iter->curr++] = he; 1023 1024 hist_entry__append_callchain(he, sample); 1025 1026 /* 1027 * We need to re-initialize the cursor since callchain_append() 1028 * advanced the cursor to the end. 1029 */ 1030 callchain_cursor_commit(&callchain_cursor); 1031 1032 hists__inc_nr_samples(hists, he->filtered); 1033 1034 return err; 1035 } 1036 1037 static int 1038 iter_next_cumulative_entry(struct hist_entry_iter *iter, 1039 struct addr_location *al) 1040 { 1041 struct callchain_cursor_node *node; 1042 1043 node = callchain_cursor_current(&callchain_cursor); 1044 if (node == NULL) 1045 return 0; 1046 1047 return fill_callchain_info(al, node, iter->hide_unresolved); 1048 } 1049 1050 static int 1051 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1052 struct addr_location *al) 1053 { 1054 struct evsel *evsel = iter->evsel; 1055 struct perf_sample *sample = iter->sample; 1056 struct hist_entry **he_cache = iter->priv; 1057 struct hist_entry *he; 1058 struct hist_entry he_tmp = { 1059 .hists = evsel__hists(evsel), 1060 .cpu = al->cpu, 1061 .thread = al->thread, 1062 .comm = thread__comm(al->thread), 1063 .ip = al->addr, 1064 .ms = { 1065 .map = al->map, 1066 .sym = al->sym, 1067 }, 1068 .srcline = (char *) al->srcline, 1069 .parent = iter->parent, 1070 .raw_data = sample->raw_data, 1071 .raw_size = sample->raw_size, 1072 }; 1073 int i; 1074 struct callchain_cursor cursor; 1075 1076 callchain_cursor_snapshot(&cursor, &callchain_cursor); 1077 1078 callchain_cursor_advance(&callchain_cursor); 1079 1080 /* 1081 * Check if there's duplicate entries in the callchain. 1082 * It's possible that it has cycles or recursive calls. 1083 */ 1084 for (i = 0; i < iter->curr; i++) { 1085 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1086 /* to avoid calling callback function */ 1087 iter->he = NULL; 1088 return 0; 1089 } 1090 } 1091 1092 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1093 sample, false); 1094 if (he == NULL) 1095 return -ENOMEM; 1096 1097 iter->he = he; 1098 he_cache[iter->curr++] = he; 1099 1100 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1101 callchain_append(he->callchain, &cursor, sample->period); 1102 return 0; 1103 } 1104 1105 static int 1106 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1107 struct addr_location *al __maybe_unused) 1108 { 1109 zfree(&iter->priv); 1110 iter->he = NULL; 1111 1112 return 0; 1113 } 1114 1115 const struct hist_iter_ops hist_iter_mem = { 1116 .prepare_entry = iter_prepare_mem_entry, 1117 .add_single_entry = iter_add_single_mem_entry, 1118 .next_entry = iter_next_nop_entry, 1119 .add_next_entry = iter_add_next_nop_entry, 1120 .finish_entry = iter_finish_mem_entry, 1121 }; 1122 1123 const struct hist_iter_ops hist_iter_branch = { 1124 .prepare_entry = iter_prepare_branch_entry, 1125 .add_single_entry = iter_add_single_branch_entry, 1126 .next_entry = iter_next_branch_entry, 1127 .add_next_entry = iter_add_next_branch_entry, 1128 .finish_entry = iter_finish_branch_entry, 1129 }; 1130 1131 const struct hist_iter_ops hist_iter_normal = { 1132 .prepare_entry = iter_prepare_normal_entry, 1133 .add_single_entry = iter_add_single_normal_entry, 1134 .next_entry = iter_next_nop_entry, 1135 .add_next_entry = iter_add_next_nop_entry, 1136 .finish_entry = iter_finish_normal_entry, 1137 }; 1138 1139 const struct hist_iter_ops hist_iter_cumulative = { 1140 .prepare_entry = iter_prepare_cumulative_entry, 1141 .add_single_entry = iter_add_single_cumulative_entry, 1142 .next_entry = iter_next_cumulative_entry, 1143 .add_next_entry = iter_add_next_cumulative_entry, 1144 .finish_entry = iter_finish_cumulative_entry, 1145 }; 1146 1147 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1148 int max_stack_depth, void *arg) 1149 { 1150 int err, err2; 1151 struct map *alm = NULL; 1152 1153 if (al) 1154 alm = map__get(al->map); 1155 1156 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1157 iter->evsel, al, max_stack_depth); 1158 if (err) { 1159 map__put(alm); 1160 return err; 1161 } 1162 1163 err = iter->ops->prepare_entry(iter, al); 1164 if (err) 1165 goto out; 1166 1167 err = iter->ops->add_single_entry(iter, al); 1168 if (err) 1169 goto out; 1170 1171 if (iter->he && iter->add_entry_cb) { 1172 err = iter->add_entry_cb(iter, al, true, arg); 1173 if (err) 1174 goto out; 1175 } 1176 1177 while (iter->ops->next_entry(iter, al)) { 1178 err = iter->ops->add_next_entry(iter, al); 1179 if (err) 1180 break; 1181 1182 if (iter->he && iter->add_entry_cb) { 1183 err = iter->add_entry_cb(iter, al, false, arg); 1184 if (err) 1185 goto out; 1186 } 1187 } 1188 1189 out: 1190 err2 = iter->ops->finish_entry(iter, al); 1191 if (!err) 1192 err = err2; 1193 1194 map__put(alm); 1195 1196 return err; 1197 } 1198 1199 int64_t 1200 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1201 { 1202 struct hists *hists = left->hists; 1203 struct perf_hpp_fmt *fmt; 1204 int64_t cmp = 0; 1205 1206 hists__for_each_sort_list(hists, fmt) { 1207 if (perf_hpp__is_dynamic_entry(fmt) && 1208 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1209 continue; 1210 1211 cmp = fmt->cmp(fmt, left, right); 1212 if (cmp) 1213 break; 1214 } 1215 1216 return cmp; 1217 } 1218 1219 int64_t 1220 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1221 { 1222 struct hists *hists = left->hists; 1223 struct perf_hpp_fmt *fmt; 1224 int64_t cmp = 0; 1225 1226 hists__for_each_sort_list(hists, fmt) { 1227 if (perf_hpp__is_dynamic_entry(fmt) && 1228 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1229 continue; 1230 1231 cmp = fmt->collapse(fmt, left, right); 1232 if (cmp) 1233 break; 1234 } 1235 1236 return cmp; 1237 } 1238 1239 void hist_entry__delete(struct hist_entry *he) 1240 { 1241 struct hist_entry_ops *ops = he->ops; 1242 1243 thread__zput(he->thread); 1244 map__zput(he->ms.map); 1245 1246 if (he->branch_info) { 1247 map__zput(he->branch_info->from.map); 1248 map__zput(he->branch_info->to.map); 1249 free_srcline(he->branch_info->srcline_from); 1250 free_srcline(he->branch_info->srcline_to); 1251 zfree(&he->branch_info); 1252 } 1253 1254 if (he->mem_info) { 1255 map__zput(he->mem_info->iaddr.map); 1256 map__zput(he->mem_info->daddr.map); 1257 mem_info__zput(he->mem_info); 1258 } 1259 1260 if (he->block_info) 1261 block_info__zput(he->block_info); 1262 1263 zfree(&he->res_samples); 1264 zfree(&he->stat_acc); 1265 free_srcline(he->srcline); 1266 if (he->srcfile && he->srcfile[0]) 1267 zfree(&he->srcfile); 1268 free_callchain(he->callchain); 1269 zfree(&he->trace_output); 1270 zfree(&he->raw_data); 1271 ops->free(he); 1272 } 1273 1274 /* 1275 * If this is not the last column, then we need to pad it according to the 1276 * pre-calculated max length for this column, otherwise don't bother adding 1277 * spaces because that would break viewing this with, for instance, 'less', 1278 * that would show tons of trailing spaces when a long C++ demangled method 1279 * names is sampled. 1280 */ 1281 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1282 struct perf_hpp_fmt *fmt, int printed) 1283 { 1284 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1285 const int width = fmt->width(fmt, hpp, he->hists); 1286 if (printed < width) { 1287 advance_hpp(hpp, printed); 1288 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1289 } 1290 } 1291 1292 return printed; 1293 } 1294 1295 /* 1296 * collapse the histogram 1297 */ 1298 1299 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1300 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1301 enum hist_filter type); 1302 1303 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1304 1305 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1306 { 1307 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1308 } 1309 1310 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1311 enum hist_filter type, 1312 fmt_chk_fn check) 1313 { 1314 struct perf_hpp_fmt *fmt; 1315 bool type_match = false; 1316 struct hist_entry *parent = he->parent_he; 1317 1318 switch (type) { 1319 case HIST_FILTER__THREAD: 1320 if (symbol_conf.comm_list == NULL && 1321 symbol_conf.pid_list == NULL && 1322 symbol_conf.tid_list == NULL) 1323 return; 1324 break; 1325 case HIST_FILTER__DSO: 1326 if (symbol_conf.dso_list == NULL) 1327 return; 1328 break; 1329 case HIST_FILTER__SYMBOL: 1330 if (symbol_conf.sym_list == NULL) 1331 return; 1332 break; 1333 case HIST_FILTER__PARENT: 1334 case HIST_FILTER__GUEST: 1335 case HIST_FILTER__HOST: 1336 case HIST_FILTER__SOCKET: 1337 case HIST_FILTER__C2C: 1338 default: 1339 return; 1340 } 1341 1342 /* if it's filtered by own fmt, it has to have filter bits */ 1343 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1344 if (check(fmt)) { 1345 type_match = true; 1346 break; 1347 } 1348 } 1349 1350 if (type_match) { 1351 /* 1352 * If the filter is for current level entry, propagate 1353 * filter marker to parents. The marker bit was 1354 * already set by default so it only needs to clear 1355 * non-filtered entries. 1356 */ 1357 if (!(he->filtered & (1 << type))) { 1358 while (parent) { 1359 parent->filtered &= ~(1 << type); 1360 parent = parent->parent_he; 1361 } 1362 } 1363 } else { 1364 /* 1365 * If current entry doesn't have matching formats, set 1366 * filter marker for upper level entries. it will be 1367 * cleared if its lower level entries is not filtered. 1368 * 1369 * For lower-level entries, it inherits parent's 1370 * filter bit so that lower level entries of a 1371 * non-filtered entry won't set the filter marker. 1372 */ 1373 if (parent == NULL) 1374 he->filtered |= (1 << type); 1375 else 1376 he->filtered |= (parent->filtered & (1 << type)); 1377 } 1378 } 1379 1380 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1381 { 1382 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1383 check_thread_entry); 1384 1385 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1386 perf_hpp__is_dso_entry); 1387 1388 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1389 perf_hpp__is_sym_entry); 1390 1391 hists__apply_filters(he->hists, he); 1392 } 1393 1394 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1395 struct rb_root_cached *root, 1396 struct hist_entry *he, 1397 struct hist_entry *parent_he, 1398 struct perf_hpp_list *hpp_list) 1399 { 1400 struct rb_node **p = &root->rb_root.rb_node; 1401 struct rb_node *parent = NULL; 1402 struct hist_entry *iter, *new; 1403 struct perf_hpp_fmt *fmt; 1404 int64_t cmp; 1405 bool leftmost = true; 1406 1407 while (*p != NULL) { 1408 parent = *p; 1409 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1410 1411 cmp = 0; 1412 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1413 cmp = fmt->collapse(fmt, iter, he); 1414 if (cmp) 1415 break; 1416 } 1417 1418 if (!cmp) { 1419 he_stat__add_stat(&iter->stat, &he->stat); 1420 return iter; 1421 } 1422 1423 if (cmp < 0) 1424 p = &parent->rb_left; 1425 else { 1426 p = &parent->rb_right; 1427 leftmost = false; 1428 } 1429 } 1430 1431 new = hist_entry__new(he, true); 1432 if (new == NULL) 1433 return NULL; 1434 1435 hists->nr_entries++; 1436 1437 /* save related format list for output */ 1438 new->hpp_list = hpp_list; 1439 new->parent_he = parent_he; 1440 1441 hist_entry__apply_hierarchy_filters(new); 1442 1443 /* some fields are now passed to 'new' */ 1444 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1445 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1446 he->trace_output = NULL; 1447 else 1448 new->trace_output = NULL; 1449 1450 if (perf_hpp__is_srcline_entry(fmt)) 1451 he->srcline = NULL; 1452 else 1453 new->srcline = NULL; 1454 1455 if (perf_hpp__is_srcfile_entry(fmt)) 1456 he->srcfile = NULL; 1457 else 1458 new->srcfile = NULL; 1459 } 1460 1461 rb_link_node(&new->rb_node_in, parent, p); 1462 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1463 return new; 1464 } 1465 1466 static int hists__hierarchy_insert_entry(struct hists *hists, 1467 struct rb_root_cached *root, 1468 struct hist_entry *he) 1469 { 1470 struct perf_hpp_list_node *node; 1471 struct hist_entry *new_he = NULL; 1472 struct hist_entry *parent = NULL; 1473 int depth = 0; 1474 int ret = 0; 1475 1476 list_for_each_entry(node, &hists->hpp_formats, list) { 1477 /* skip period (overhead) and elided columns */ 1478 if (node->level == 0 || node->skip) 1479 continue; 1480 1481 /* insert copy of 'he' for each fmt into the hierarchy */ 1482 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1483 if (new_he == NULL) { 1484 ret = -1; 1485 break; 1486 } 1487 1488 root = &new_he->hroot_in; 1489 new_he->depth = depth++; 1490 parent = new_he; 1491 } 1492 1493 if (new_he) { 1494 new_he->leaf = true; 1495 1496 if (hist_entry__has_callchains(new_he) && 1497 symbol_conf.use_callchain) { 1498 callchain_cursor_reset(&callchain_cursor); 1499 if (callchain_merge(&callchain_cursor, 1500 new_he->callchain, 1501 he->callchain) < 0) 1502 ret = -1; 1503 } 1504 } 1505 1506 /* 'he' is no longer used */ 1507 hist_entry__delete(he); 1508 1509 /* return 0 (or -1) since it already applied filters */ 1510 return ret; 1511 } 1512 1513 static int hists__collapse_insert_entry(struct hists *hists, 1514 struct rb_root_cached *root, 1515 struct hist_entry *he) 1516 { 1517 struct rb_node **p = &root->rb_root.rb_node; 1518 struct rb_node *parent = NULL; 1519 struct hist_entry *iter; 1520 int64_t cmp; 1521 bool leftmost = true; 1522 1523 if (symbol_conf.report_hierarchy) 1524 return hists__hierarchy_insert_entry(hists, root, he); 1525 1526 while (*p != NULL) { 1527 parent = *p; 1528 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1529 1530 cmp = hist_entry__collapse(iter, he); 1531 1532 if (!cmp) { 1533 int ret = 0; 1534 1535 he_stat__add_stat(&iter->stat, &he->stat); 1536 if (symbol_conf.cumulate_callchain) 1537 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1538 1539 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1540 callchain_cursor_reset(&callchain_cursor); 1541 if (callchain_merge(&callchain_cursor, 1542 iter->callchain, 1543 he->callchain) < 0) 1544 ret = -1; 1545 } 1546 hist_entry__delete(he); 1547 return ret; 1548 } 1549 1550 if (cmp < 0) 1551 p = &(*p)->rb_left; 1552 else { 1553 p = &(*p)->rb_right; 1554 leftmost = false; 1555 } 1556 } 1557 hists->nr_entries++; 1558 1559 rb_link_node(&he->rb_node_in, parent, p); 1560 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1561 return 1; 1562 } 1563 1564 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1565 { 1566 struct rb_root_cached *root; 1567 1568 pthread_mutex_lock(&hists->lock); 1569 1570 root = hists->entries_in; 1571 if (++hists->entries_in > &hists->entries_in_array[1]) 1572 hists->entries_in = &hists->entries_in_array[0]; 1573 1574 pthread_mutex_unlock(&hists->lock); 1575 1576 return root; 1577 } 1578 1579 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1580 { 1581 hists__filter_entry_by_dso(hists, he); 1582 hists__filter_entry_by_thread(hists, he); 1583 hists__filter_entry_by_symbol(hists, he); 1584 hists__filter_entry_by_socket(hists, he); 1585 } 1586 1587 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1588 { 1589 struct rb_root_cached *root; 1590 struct rb_node *next; 1591 struct hist_entry *n; 1592 int ret; 1593 1594 if (!hists__has(hists, need_collapse)) 1595 return 0; 1596 1597 hists->nr_entries = 0; 1598 1599 root = hists__get_rotate_entries_in(hists); 1600 1601 next = rb_first_cached(root); 1602 1603 while (next) { 1604 if (session_done()) 1605 break; 1606 n = rb_entry(next, struct hist_entry, rb_node_in); 1607 next = rb_next(&n->rb_node_in); 1608 1609 rb_erase_cached(&n->rb_node_in, root); 1610 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1611 if (ret < 0) 1612 return -1; 1613 1614 if (ret) { 1615 /* 1616 * If it wasn't combined with one of the entries already 1617 * collapsed, we need to apply the filters that may have 1618 * been set by, say, the hist_browser. 1619 */ 1620 hists__apply_filters(hists, n); 1621 } 1622 if (prog) 1623 ui_progress__update(prog, 1); 1624 } 1625 return 0; 1626 } 1627 1628 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1629 { 1630 struct hists *hists = a->hists; 1631 struct perf_hpp_fmt *fmt; 1632 int64_t cmp = 0; 1633 1634 hists__for_each_sort_list(hists, fmt) { 1635 if (perf_hpp__should_skip(fmt, a->hists)) 1636 continue; 1637 1638 cmp = fmt->sort(fmt, a, b); 1639 if (cmp) 1640 break; 1641 } 1642 1643 return cmp; 1644 } 1645 1646 static void hists__reset_filter_stats(struct hists *hists) 1647 { 1648 hists->nr_non_filtered_entries = 0; 1649 hists->stats.total_non_filtered_period = 0; 1650 } 1651 1652 void hists__reset_stats(struct hists *hists) 1653 { 1654 hists->nr_entries = 0; 1655 hists->stats.total_period = 0; 1656 1657 hists__reset_filter_stats(hists); 1658 } 1659 1660 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1661 { 1662 hists->nr_non_filtered_entries++; 1663 hists->stats.total_non_filtered_period += h->stat.period; 1664 } 1665 1666 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1667 { 1668 if (!h->filtered) 1669 hists__inc_filter_stats(hists, h); 1670 1671 hists->nr_entries++; 1672 hists->stats.total_period += h->stat.period; 1673 } 1674 1675 static void hierarchy_recalc_total_periods(struct hists *hists) 1676 { 1677 struct rb_node *node; 1678 struct hist_entry *he; 1679 1680 node = rb_first_cached(&hists->entries); 1681 1682 hists->stats.total_period = 0; 1683 hists->stats.total_non_filtered_period = 0; 1684 1685 /* 1686 * recalculate total period using top-level entries only 1687 * since lower level entries only see non-filtered entries 1688 * but upper level entries have sum of both entries. 1689 */ 1690 while (node) { 1691 he = rb_entry(node, struct hist_entry, rb_node); 1692 node = rb_next(node); 1693 1694 hists->stats.total_period += he->stat.period; 1695 if (!he->filtered) 1696 hists->stats.total_non_filtered_period += he->stat.period; 1697 } 1698 } 1699 1700 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1701 struct hist_entry *he) 1702 { 1703 struct rb_node **p = &root->rb_root.rb_node; 1704 struct rb_node *parent = NULL; 1705 struct hist_entry *iter; 1706 struct perf_hpp_fmt *fmt; 1707 bool leftmost = true; 1708 1709 while (*p != NULL) { 1710 parent = *p; 1711 iter = rb_entry(parent, struct hist_entry, rb_node); 1712 1713 if (hist_entry__sort(he, iter) > 0) 1714 p = &parent->rb_left; 1715 else { 1716 p = &parent->rb_right; 1717 leftmost = false; 1718 } 1719 } 1720 1721 rb_link_node(&he->rb_node, parent, p); 1722 rb_insert_color_cached(&he->rb_node, root, leftmost); 1723 1724 /* update column width of dynamic entry */ 1725 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1726 if (perf_hpp__is_dynamic_entry(fmt)) 1727 fmt->sort(fmt, he, NULL); 1728 } 1729 } 1730 1731 static void hists__hierarchy_output_resort(struct hists *hists, 1732 struct ui_progress *prog, 1733 struct rb_root_cached *root_in, 1734 struct rb_root_cached *root_out, 1735 u64 min_callchain_hits, 1736 bool use_callchain) 1737 { 1738 struct rb_node *node; 1739 struct hist_entry *he; 1740 1741 *root_out = RB_ROOT_CACHED; 1742 node = rb_first_cached(root_in); 1743 1744 while (node) { 1745 he = rb_entry(node, struct hist_entry, rb_node_in); 1746 node = rb_next(node); 1747 1748 hierarchy_insert_output_entry(root_out, he); 1749 1750 if (prog) 1751 ui_progress__update(prog, 1); 1752 1753 hists->nr_entries++; 1754 if (!he->filtered) { 1755 hists->nr_non_filtered_entries++; 1756 hists__calc_col_len(hists, he); 1757 } 1758 1759 if (!he->leaf) { 1760 hists__hierarchy_output_resort(hists, prog, 1761 &he->hroot_in, 1762 &he->hroot_out, 1763 min_callchain_hits, 1764 use_callchain); 1765 continue; 1766 } 1767 1768 if (!use_callchain) 1769 continue; 1770 1771 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1772 u64 total = he->stat.period; 1773 1774 if (symbol_conf.cumulate_callchain) 1775 total = he->stat_acc->period; 1776 1777 min_callchain_hits = total * (callchain_param.min_percent / 100); 1778 } 1779 1780 callchain_param.sort(&he->sorted_chain, he->callchain, 1781 min_callchain_hits, &callchain_param); 1782 } 1783 } 1784 1785 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1786 struct hist_entry *he, 1787 u64 min_callchain_hits, 1788 bool use_callchain) 1789 { 1790 struct rb_node **p = &entries->rb_root.rb_node; 1791 struct rb_node *parent = NULL; 1792 struct hist_entry *iter; 1793 struct perf_hpp_fmt *fmt; 1794 bool leftmost = true; 1795 1796 if (use_callchain) { 1797 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1798 u64 total = he->stat.period; 1799 1800 if (symbol_conf.cumulate_callchain) 1801 total = he->stat_acc->period; 1802 1803 min_callchain_hits = total * (callchain_param.min_percent / 100); 1804 } 1805 callchain_param.sort(&he->sorted_chain, he->callchain, 1806 min_callchain_hits, &callchain_param); 1807 } 1808 1809 while (*p != NULL) { 1810 parent = *p; 1811 iter = rb_entry(parent, struct hist_entry, rb_node); 1812 1813 if (hist_entry__sort(he, iter) > 0) 1814 p = &(*p)->rb_left; 1815 else { 1816 p = &(*p)->rb_right; 1817 leftmost = false; 1818 } 1819 } 1820 1821 rb_link_node(&he->rb_node, parent, p); 1822 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1823 1824 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1825 if (perf_hpp__is_dynamic_entry(fmt) && 1826 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1827 fmt->sort(fmt, he, NULL); /* update column width */ 1828 } 1829 } 1830 1831 static void output_resort(struct hists *hists, struct ui_progress *prog, 1832 bool use_callchain, hists__resort_cb_t cb, 1833 void *cb_arg) 1834 { 1835 struct rb_root_cached *root; 1836 struct rb_node *next; 1837 struct hist_entry *n; 1838 u64 callchain_total; 1839 u64 min_callchain_hits; 1840 1841 callchain_total = hists->callchain_period; 1842 if (symbol_conf.filter_relative) 1843 callchain_total = hists->callchain_non_filtered_period; 1844 1845 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1846 1847 hists__reset_stats(hists); 1848 hists__reset_col_len(hists); 1849 1850 if (symbol_conf.report_hierarchy) { 1851 hists__hierarchy_output_resort(hists, prog, 1852 &hists->entries_collapsed, 1853 &hists->entries, 1854 min_callchain_hits, 1855 use_callchain); 1856 hierarchy_recalc_total_periods(hists); 1857 return; 1858 } 1859 1860 if (hists__has(hists, need_collapse)) 1861 root = &hists->entries_collapsed; 1862 else 1863 root = hists->entries_in; 1864 1865 next = rb_first_cached(root); 1866 hists->entries = RB_ROOT_CACHED; 1867 1868 while (next) { 1869 n = rb_entry(next, struct hist_entry, rb_node_in); 1870 next = rb_next(&n->rb_node_in); 1871 1872 if (cb && cb(n, cb_arg)) 1873 continue; 1874 1875 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1876 hists__inc_stats(hists, n); 1877 1878 if (!n->filtered) 1879 hists__calc_col_len(hists, n); 1880 1881 if (prog) 1882 ui_progress__update(prog, 1); 1883 } 1884 } 1885 1886 void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog, 1887 hists__resort_cb_t cb, void *cb_arg) 1888 { 1889 bool use_callchain; 1890 1891 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1892 use_callchain = evsel__has_callchain(evsel); 1893 else 1894 use_callchain = symbol_conf.use_callchain; 1895 1896 use_callchain |= symbol_conf.show_branchflag_count; 1897 1898 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1899 } 1900 1901 void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog) 1902 { 1903 return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL); 1904 } 1905 1906 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1907 { 1908 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 1909 } 1910 1911 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 1912 hists__resort_cb_t cb) 1913 { 1914 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 1915 } 1916 1917 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1918 { 1919 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1920 return false; 1921 1922 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1923 return true; 1924 1925 return false; 1926 } 1927 1928 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1929 { 1930 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1931 1932 while (can_goto_child(he, HMD_NORMAL)) { 1933 node = rb_last(&he->hroot_out.rb_root); 1934 he = rb_entry(node, struct hist_entry, rb_node); 1935 } 1936 return node; 1937 } 1938 1939 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1940 { 1941 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1942 1943 if (can_goto_child(he, hmd)) 1944 node = rb_first_cached(&he->hroot_out); 1945 else 1946 node = rb_next(node); 1947 1948 while (node == NULL) { 1949 he = he->parent_he; 1950 if (he == NULL) 1951 break; 1952 1953 node = rb_next(&he->rb_node); 1954 } 1955 return node; 1956 } 1957 1958 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 1959 { 1960 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1961 1962 node = rb_prev(node); 1963 if (node) 1964 return rb_hierarchy_last(node); 1965 1966 he = he->parent_he; 1967 if (he == NULL) 1968 return NULL; 1969 1970 return &he->rb_node; 1971 } 1972 1973 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 1974 { 1975 struct rb_node *node; 1976 struct hist_entry *child; 1977 float percent; 1978 1979 if (he->leaf) 1980 return false; 1981 1982 node = rb_first_cached(&he->hroot_out); 1983 child = rb_entry(node, struct hist_entry, rb_node); 1984 1985 while (node && child->filtered) { 1986 node = rb_next(node); 1987 child = rb_entry(node, struct hist_entry, rb_node); 1988 } 1989 1990 if (node) 1991 percent = hist_entry__get_percent_limit(child); 1992 else 1993 percent = 0; 1994 1995 return node && percent >= limit; 1996 } 1997 1998 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1999 enum hist_filter filter) 2000 { 2001 h->filtered &= ~(1 << filter); 2002 2003 if (symbol_conf.report_hierarchy) { 2004 struct hist_entry *parent = h->parent_he; 2005 2006 while (parent) { 2007 he_stat__add_stat(&parent->stat, &h->stat); 2008 2009 parent->filtered &= ~(1 << filter); 2010 2011 if (parent->filtered) 2012 goto next; 2013 2014 /* force fold unfiltered entry for simplicity */ 2015 parent->unfolded = false; 2016 parent->has_no_entry = false; 2017 parent->row_offset = 0; 2018 parent->nr_rows = 0; 2019 next: 2020 parent = parent->parent_he; 2021 } 2022 } 2023 2024 if (h->filtered) 2025 return; 2026 2027 /* force fold unfiltered entry for simplicity */ 2028 h->unfolded = false; 2029 h->has_no_entry = false; 2030 h->row_offset = 0; 2031 h->nr_rows = 0; 2032 2033 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 2034 2035 hists__inc_filter_stats(hists, h); 2036 hists__calc_col_len(hists, h); 2037 } 2038 2039 2040 static bool hists__filter_entry_by_dso(struct hists *hists, 2041 struct hist_entry *he) 2042 { 2043 if (hists->dso_filter != NULL && 2044 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 2045 he->filtered |= (1 << HIST_FILTER__DSO); 2046 return true; 2047 } 2048 2049 return false; 2050 } 2051 2052 static bool hists__filter_entry_by_thread(struct hists *hists, 2053 struct hist_entry *he) 2054 { 2055 if (hists->thread_filter != NULL && 2056 he->thread != hists->thread_filter) { 2057 he->filtered |= (1 << HIST_FILTER__THREAD); 2058 return true; 2059 } 2060 2061 return false; 2062 } 2063 2064 static bool hists__filter_entry_by_symbol(struct hists *hists, 2065 struct hist_entry *he) 2066 { 2067 if (hists->symbol_filter_str != NULL && 2068 (!he->ms.sym || strstr(he->ms.sym->name, 2069 hists->symbol_filter_str) == NULL)) { 2070 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2071 return true; 2072 } 2073 2074 return false; 2075 } 2076 2077 static bool hists__filter_entry_by_socket(struct hists *hists, 2078 struct hist_entry *he) 2079 { 2080 if ((hists->socket_filter > -1) && 2081 (he->socket != hists->socket_filter)) { 2082 he->filtered |= (1 << HIST_FILTER__SOCKET); 2083 return true; 2084 } 2085 2086 return false; 2087 } 2088 2089 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2090 2091 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2092 { 2093 struct rb_node *nd; 2094 2095 hists->stats.nr_non_filtered_samples = 0; 2096 2097 hists__reset_filter_stats(hists); 2098 hists__reset_col_len(hists); 2099 2100 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2101 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2102 2103 if (filter(hists, h)) 2104 continue; 2105 2106 hists__remove_entry_filter(hists, h, type); 2107 } 2108 } 2109 2110 static void resort_filtered_entry(struct rb_root_cached *root, 2111 struct hist_entry *he) 2112 { 2113 struct rb_node **p = &root->rb_root.rb_node; 2114 struct rb_node *parent = NULL; 2115 struct hist_entry *iter; 2116 struct rb_root_cached new_root = RB_ROOT_CACHED; 2117 struct rb_node *nd; 2118 bool leftmost = true; 2119 2120 while (*p != NULL) { 2121 parent = *p; 2122 iter = rb_entry(parent, struct hist_entry, rb_node); 2123 2124 if (hist_entry__sort(he, iter) > 0) 2125 p = &(*p)->rb_left; 2126 else { 2127 p = &(*p)->rb_right; 2128 leftmost = false; 2129 } 2130 } 2131 2132 rb_link_node(&he->rb_node, parent, p); 2133 rb_insert_color_cached(&he->rb_node, root, leftmost); 2134 2135 if (he->leaf || he->filtered) 2136 return; 2137 2138 nd = rb_first_cached(&he->hroot_out); 2139 while (nd) { 2140 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2141 2142 nd = rb_next(nd); 2143 rb_erase_cached(&h->rb_node, &he->hroot_out); 2144 2145 resort_filtered_entry(&new_root, h); 2146 } 2147 2148 he->hroot_out = new_root; 2149 } 2150 2151 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2152 { 2153 struct rb_node *nd; 2154 struct rb_root_cached new_root = RB_ROOT_CACHED; 2155 2156 hists->stats.nr_non_filtered_samples = 0; 2157 2158 hists__reset_filter_stats(hists); 2159 hists__reset_col_len(hists); 2160 2161 nd = rb_first_cached(&hists->entries); 2162 while (nd) { 2163 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2164 int ret; 2165 2166 ret = hist_entry__filter(h, type, arg); 2167 2168 /* 2169 * case 1. non-matching type 2170 * zero out the period, set filter marker and move to child 2171 */ 2172 if (ret < 0) { 2173 memset(&h->stat, 0, sizeof(h->stat)); 2174 h->filtered |= (1 << type); 2175 2176 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2177 } 2178 /* 2179 * case 2. matched type (filter out) 2180 * set filter marker and move to next 2181 */ 2182 else if (ret == 1) { 2183 h->filtered |= (1 << type); 2184 2185 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2186 } 2187 /* 2188 * case 3. ok (not filtered) 2189 * add period to hists and parents, erase the filter marker 2190 * and move to next sibling 2191 */ 2192 else { 2193 hists__remove_entry_filter(hists, h, type); 2194 2195 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2196 } 2197 } 2198 2199 hierarchy_recalc_total_periods(hists); 2200 2201 /* 2202 * resort output after applying a new filter since filter in a lower 2203 * hierarchy can change periods in a upper hierarchy. 2204 */ 2205 nd = rb_first_cached(&hists->entries); 2206 while (nd) { 2207 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2208 2209 nd = rb_next(nd); 2210 rb_erase_cached(&h->rb_node, &hists->entries); 2211 2212 resort_filtered_entry(&new_root, h); 2213 } 2214 2215 hists->entries = new_root; 2216 } 2217 2218 void hists__filter_by_thread(struct hists *hists) 2219 { 2220 if (symbol_conf.report_hierarchy) 2221 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2222 hists->thread_filter); 2223 else 2224 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2225 hists__filter_entry_by_thread); 2226 } 2227 2228 void hists__filter_by_dso(struct hists *hists) 2229 { 2230 if (symbol_conf.report_hierarchy) 2231 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2232 hists->dso_filter); 2233 else 2234 hists__filter_by_type(hists, HIST_FILTER__DSO, 2235 hists__filter_entry_by_dso); 2236 } 2237 2238 void hists__filter_by_symbol(struct hists *hists) 2239 { 2240 if (symbol_conf.report_hierarchy) 2241 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2242 hists->symbol_filter_str); 2243 else 2244 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2245 hists__filter_entry_by_symbol); 2246 } 2247 2248 void hists__filter_by_socket(struct hists *hists) 2249 { 2250 if (symbol_conf.report_hierarchy) 2251 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2252 &hists->socket_filter); 2253 else 2254 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2255 hists__filter_entry_by_socket); 2256 } 2257 2258 void events_stats__inc(struct events_stats *stats, u32 type) 2259 { 2260 ++stats->nr_events[0]; 2261 ++stats->nr_events[type]; 2262 } 2263 2264 void hists__inc_nr_events(struct hists *hists, u32 type) 2265 { 2266 events_stats__inc(&hists->stats, type); 2267 } 2268 2269 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2270 { 2271 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 2272 if (!filtered) 2273 hists->stats.nr_non_filtered_samples++; 2274 } 2275 2276 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2277 struct hist_entry *pair) 2278 { 2279 struct rb_root_cached *root; 2280 struct rb_node **p; 2281 struct rb_node *parent = NULL; 2282 struct hist_entry *he; 2283 int64_t cmp; 2284 bool leftmost = true; 2285 2286 if (hists__has(hists, need_collapse)) 2287 root = &hists->entries_collapsed; 2288 else 2289 root = hists->entries_in; 2290 2291 p = &root->rb_root.rb_node; 2292 2293 while (*p != NULL) { 2294 parent = *p; 2295 he = rb_entry(parent, struct hist_entry, rb_node_in); 2296 2297 cmp = hist_entry__collapse(he, pair); 2298 2299 if (!cmp) 2300 goto out; 2301 2302 if (cmp < 0) 2303 p = &(*p)->rb_left; 2304 else { 2305 p = &(*p)->rb_right; 2306 leftmost = false; 2307 } 2308 } 2309 2310 he = hist_entry__new(pair, true); 2311 if (he) { 2312 memset(&he->stat, 0, sizeof(he->stat)); 2313 he->hists = hists; 2314 if (symbol_conf.cumulate_callchain) 2315 memset(he->stat_acc, 0, sizeof(he->stat)); 2316 rb_link_node(&he->rb_node_in, parent, p); 2317 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2318 hists__inc_stats(hists, he); 2319 he->dummy = true; 2320 } 2321 out: 2322 return he; 2323 } 2324 2325 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2326 struct rb_root_cached *root, 2327 struct hist_entry *pair) 2328 { 2329 struct rb_node **p; 2330 struct rb_node *parent = NULL; 2331 struct hist_entry *he; 2332 struct perf_hpp_fmt *fmt; 2333 bool leftmost = true; 2334 2335 p = &root->rb_root.rb_node; 2336 while (*p != NULL) { 2337 int64_t cmp = 0; 2338 2339 parent = *p; 2340 he = rb_entry(parent, struct hist_entry, rb_node_in); 2341 2342 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2343 cmp = fmt->collapse(fmt, he, pair); 2344 if (cmp) 2345 break; 2346 } 2347 if (!cmp) 2348 goto out; 2349 2350 if (cmp < 0) 2351 p = &parent->rb_left; 2352 else { 2353 p = &parent->rb_right; 2354 leftmost = false; 2355 } 2356 } 2357 2358 he = hist_entry__new(pair, true); 2359 if (he) { 2360 rb_link_node(&he->rb_node_in, parent, p); 2361 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2362 2363 he->dummy = true; 2364 he->hists = hists; 2365 memset(&he->stat, 0, sizeof(he->stat)); 2366 hists__inc_stats(hists, he); 2367 } 2368 out: 2369 return he; 2370 } 2371 2372 static struct hist_entry *hists__find_entry(struct hists *hists, 2373 struct hist_entry *he) 2374 { 2375 struct rb_node *n; 2376 2377 if (hists__has(hists, need_collapse)) 2378 n = hists->entries_collapsed.rb_root.rb_node; 2379 else 2380 n = hists->entries_in->rb_root.rb_node; 2381 2382 while (n) { 2383 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2384 int64_t cmp = hist_entry__collapse(iter, he); 2385 2386 if (cmp < 0) 2387 n = n->rb_left; 2388 else if (cmp > 0) 2389 n = n->rb_right; 2390 else 2391 return iter; 2392 } 2393 2394 return NULL; 2395 } 2396 2397 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2398 struct hist_entry *he) 2399 { 2400 struct rb_node *n = root->rb_root.rb_node; 2401 2402 while (n) { 2403 struct hist_entry *iter; 2404 struct perf_hpp_fmt *fmt; 2405 int64_t cmp = 0; 2406 2407 iter = rb_entry(n, struct hist_entry, rb_node_in); 2408 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2409 cmp = fmt->collapse(fmt, iter, he); 2410 if (cmp) 2411 break; 2412 } 2413 2414 if (cmp < 0) 2415 n = n->rb_left; 2416 else if (cmp > 0) 2417 n = n->rb_right; 2418 else 2419 return iter; 2420 } 2421 2422 return NULL; 2423 } 2424 2425 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2426 struct rb_root_cached *other_root) 2427 { 2428 struct rb_node *nd; 2429 struct hist_entry *pos, *pair; 2430 2431 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2432 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2433 pair = hists__find_hierarchy_entry(other_root, pos); 2434 2435 if (pair) { 2436 hist_entry__add_pair(pair, pos); 2437 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2438 } 2439 } 2440 } 2441 2442 /* 2443 * Look for pairs to link to the leader buckets (hist_entries): 2444 */ 2445 void hists__match(struct hists *leader, struct hists *other) 2446 { 2447 struct rb_root_cached *root; 2448 struct rb_node *nd; 2449 struct hist_entry *pos, *pair; 2450 2451 if (symbol_conf.report_hierarchy) { 2452 /* hierarchy report always collapses entries */ 2453 return hists__match_hierarchy(&leader->entries_collapsed, 2454 &other->entries_collapsed); 2455 } 2456 2457 if (hists__has(leader, need_collapse)) 2458 root = &leader->entries_collapsed; 2459 else 2460 root = leader->entries_in; 2461 2462 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2463 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2464 pair = hists__find_entry(other, pos); 2465 2466 if (pair) 2467 hist_entry__add_pair(pair, pos); 2468 } 2469 } 2470 2471 static int hists__link_hierarchy(struct hists *leader_hists, 2472 struct hist_entry *parent, 2473 struct rb_root_cached *leader_root, 2474 struct rb_root_cached *other_root) 2475 { 2476 struct rb_node *nd; 2477 struct hist_entry *pos, *leader; 2478 2479 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2480 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2481 2482 if (hist_entry__has_pairs(pos)) { 2483 bool found = false; 2484 2485 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2486 if (leader->hists == leader_hists) { 2487 found = true; 2488 break; 2489 } 2490 } 2491 if (!found) 2492 return -1; 2493 } else { 2494 leader = add_dummy_hierarchy_entry(leader_hists, 2495 leader_root, pos); 2496 if (leader == NULL) 2497 return -1; 2498 2499 /* do not point parent in the pos */ 2500 leader->parent_he = parent; 2501 2502 hist_entry__add_pair(pos, leader); 2503 } 2504 2505 if (!pos->leaf) { 2506 if (hists__link_hierarchy(leader_hists, leader, 2507 &leader->hroot_in, 2508 &pos->hroot_in) < 0) 2509 return -1; 2510 } 2511 } 2512 return 0; 2513 } 2514 2515 /* 2516 * Look for entries in the other hists that are not present in the leader, if 2517 * we find them, just add a dummy entry on the leader hists, with period=0, 2518 * nr_events=0, to serve as the list header. 2519 */ 2520 int hists__link(struct hists *leader, struct hists *other) 2521 { 2522 struct rb_root_cached *root; 2523 struct rb_node *nd; 2524 struct hist_entry *pos, *pair; 2525 2526 if (symbol_conf.report_hierarchy) { 2527 /* hierarchy report always collapses entries */ 2528 return hists__link_hierarchy(leader, NULL, 2529 &leader->entries_collapsed, 2530 &other->entries_collapsed); 2531 } 2532 2533 if (hists__has(other, need_collapse)) 2534 root = &other->entries_collapsed; 2535 else 2536 root = other->entries_in; 2537 2538 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2539 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2540 2541 if (!hist_entry__has_pairs(pos)) { 2542 pair = hists__add_dummy_entry(leader, pos); 2543 if (pair == NULL) 2544 return -1; 2545 hist_entry__add_pair(pos, pair); 2546 } 2547 } 2548 2549 return 0; 2550 } 2551 2552 int hists__unlink(struct hists *hists) 2553 { 2554 struct rb_root_cached *root; 2555 struct rb_node *nd; 2556 struct hist_entry *pos; 2557 2558 if (hists__has(hists, need_collapse)) 2559 root = &hists->entries_collapsed; 2560 else 2561 root = hists->entries_in; 2562 2563 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2564 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2565 list_del_init(&pos->pairs.node); 2566 } 2567 2568 return 0; 2569 } 2570 2571 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2572 struct perf_sample *sample, bool nonany_branch_mode) 2573 { 2574 struct branch_info *bi; 2575 2576 /* If we have branch cycles always annotate them. */ 2577 if (bs && bs->nr && bs->entries[0].flags.cycles) { 2578 int i; 2579 2580 bi = sample__resolve_bstack(sample, al); 2581 if (bi) { 2582 struct addr_map_symbol *prev = NULL; 2583 2584 /* 2585 * Ignore errors, still want to process the 2586 * other entries. 2587 * 2588 * For non standard branch modes always 2589 * force no IPC (prev == NULL) 2590 * 2591 * Note that perf stores branches reversed from 2592 * program order! 2593 */ 2594 for (i = bs->nr - 1; i >= 0; i--) { 2595 addr_map_symbol__account_cycles(&bi[i].from, 2596 nonany_branch_mode ? NULL : prev, 2597 bi[i].flags.cycles); 2598 prev = &bi[i].to; 2599 } 2600 free(bi); 2601 } 2602 } 2603 } 2604 2605 size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp) 2606 { 2607 struct evsel *pos; 2608 size_t ret = 0; 2609 2610 evlist__for_each_entry(evlist, pos) { 2611 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 2612 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 2613 } 2614 2615 return ret; 2616 } 2617 2618 2619 u64 hists__total_period(struct hists *hists) 2620 { 2621 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2622 hists->stats.total_period; 2623 } 2624 2625 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2626 { 2627 char unit; 2628 int printed; 2629 const struct dso *dso = hists->dso_filter; 2630 struct thread *thread = hists->thread_filter; 2631 int socket_id = hists->socket_filter; 2632 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2633 u64 nr_events = hists->stats.total_period; 2634 struct evsel *evsel = hists_to_evsel(hists); 2635 const char *ev_name = perf_evsel__name(evsel); 2636 char buf[512], sample_freq_str[64] = ""; 2637 size_t buflen = sizeof(buf); 2638 char ref[30] = " show reference callgraph, "; 2639 bool enable_ref = false; 2640 2641 if (symbol_conf.filter_relative) { 2642 nr_samples = hists->stats.nr_non_filtered_samples; 2643 nr_events = hists->stats.total_non_filtered_period; 2644 } 2645 2646 if (perf_evsel__is_group_event(evsel)) { 2647 struct evsel *pos; 2648 2649 perf_evsel__group_desc(evsel, buf, buflen); 2650 ev_name = buf; 2651 2652 for_each_group_member(pos, evsel) { 2653 struct hists *pos_hists = evsel__hists(pos); 2654 2655 if (symbol_conf.filter_relative) { 2656 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2657 nr_events += pos_hists->stats.total_non_filtered_period; 2658 } else { 2659 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2660 nr_events += pos_hists->stats.total_period; 2661 } 2662 } 2663 } 2664 2665 if (symbol_conf.show_ref_callgraph && 2666 strstr(ev_name, "call-graph=no")) 2667 enable_ref = true; 2668 2669 if (show_freq) 2670 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq); 2671 2672 nr_samples = convert_unit(nr_samples, &unit); 2673 printed = scnprintf(bf, size, 2674 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2675 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "", 2676 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2677 2678 2679 if (hists->uid_filter_str) 2680 printed += snprintf(bf + printed, size - printed, 2681 ", UID: %s", hists->uid_filter_str); 2682 if (thread) { 2683 if (hists__has(hists, thread)) { 2684 printed += scnprintf(bf + printed, size - printed, 2685 ", Thread: %s(%d)", 2686 (thread->comm_set ? thread__comm_str(thread) : ""), 2687 thread->tid); 2688 } else { 2689 printed += scnprintf(bf + printed, size - printed, 2690 ", Thread: %s", 2691 (thread->comm_set ? thread__comm_str(thread) : "")); 2692 } 2693 } 2694 if (dso) 2695 printed += scnprintf(bf + printed, size - printed, 2696 ", DSO: %s", dso->short_name); 2697 if (socket_id > -1) 2698 printed += scnprintf(bf + printed, size - printed, 2699 ", Processor Socket: %d", socket_id); 2700 2701 return printed; 2702 } 2703 2704 int parse_filter_percentage(const struct option *opt __maybe_unused, 2705 const char *arg, int unset __maybe_unused) 2706 { 2707 if (!strcmp(arg, "relative")) 2708 symbol_conf.filter_relative = true; 2709 else if (!strcmp(arg, "absolute")) 2710 symbol_conf.filter_relative = false; 2711 else { 2712 pr_debug("Invalid percentage: %s\n", arg); 2713 return -1; 2714 } 2715 2716 return 0; 2717 } 2718 2719 int perf_hist_config(const char *var, const char *value) 2720 { 2721 if (!strcmp(var, "hist.percentage")) 2722 return parse_filter_percentage(NULL, value, 0); 2723 2724 return 0; 2725 } 2726 2727 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2728 { 2729 memset(hists, 0, sizeof(*hists)); 2730 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2731 hists->entries_in = &hists->entries_in_array[0]; 2732 hists->entries_collapsed = RB_ROOT_CACHED; 2733 hists->entries = RB_ROOT_CACHED; 2734 pthread_mutex_init(&hists->lock, NULL); 2735 hists->socket_filter = -1; 2736 hists->hpp_list = hpp_list; 2737 INIT_LIST_HEAD(&hists->hpp_formats); 2738 return 0; 2739 } 2740 2741 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2742 { 2743 struct rb_node *node; 2744 struct hist_entry *he; 2745 2746 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2747 node = rb_first_cached(root); 2748 rb_erase_cached(node, root); 2749 2750 he = rb_entry(node, struct hist_entry, rb_node_in); 2751 hist_entry__delete(he); 2752 } 2753 } 2754 2755 static void hists__delete_all_entries(struct hists *hists) 2756 { 2757 hists__delete_entries(hists); 2758 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2759 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2760 hists__delete_remaining_entries(&hists->entries_collapsed); 2761 } 2762 2763 static void hists_evsel__exit(struct evsel *evsel) 2764 { 2765 struct hists *hists = evsel__hists(evsel); 2766 struct perf_hpp_fmt *fmt, *pos; 2767 struct perf_hpp_list_node *node, *tmp; 2768 2769 hists__delete_all_entries(hists); 2770 2771 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2772 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2773 list_del_init(&fmt->list); 2774 free(fmt); 2775 } 2776 list_del_init(&node->list); 2777 free(node); 2778 } 2779 } 2780 2781 static int hists_evsel__init(struct evsel *evsel) 2782 { 2783 struct hists *hists = evsel__hists(evsel); 2784 2785 __hists__init(hists, &perf_hpp_list); 2786 return 0; 2787 } 2788 2789 /* 2790 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2791 * stored in the rbtree... 2792 */ 2793 2794 int hists__init(void) 2795 { 2796 int err = perf_evsel__object_config(sizeof(struct hists_evsel), 2797 hists_evsel__init, 2798 hists_evsel__exit); 2799 if (err) 2800 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2801 2802 return err; 2803 } 2804 2805 void perf_hpp_list__init(struct perf_hpp_list *list) 2806 { 2807 INIT_LIST_HEAD(&list->fields); 2808 INIT_LIST_HEAD(&list->sorts); 2809 } 2810