1 #include "util.h" 2 #include "build-id.h" 3 #include "hist.h" 4 #include "map.h" 5 #include "session.h" 6 #include "namespaces.h" 7 #include "sort.h" 8 #include "evlist.h" 9 #include "evsel.h" 10 #include "annotate.h" 11 #include "srcline.h" 12 #include "thread.h" 13 #include "ui/progress.h" 14 #include <errno.h> 15 #include <math.h> 16 #include <sys/param.h> 17 18 static bool hists__filter_entry_by_dso(struct hists *hists, 19 struct hist_entry *he); 20 static bool hists__filter_entry_by_thread(struct hists *hists, 21 struct hist_entry *he); 22 static bool hists__filter_entry_by_symbol(struct hists *hists, 23 struct hist_entry *he); 24 static bool hists__filter_entry_by_socket(struct hists *hists, 25 struct hist_entry *he); 26 27 u16 hists__col_len(struct hists *hists, enum hist_column col) 28 { 29 return hists->col_len[col]; 30 } 31 32 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 33 { 34 hists->col_len[col] = len; 35 } 36 37 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 38 { 39 if (len > hists__col_len(hists, col)) { 40 hists__set_col_len(hists, col, len); 41 return true; 42 } 43 return false; 44 } 45 46 void hists__reset_col_len(struct hists *hists) 47 { 48 enum hist_column col; 49 50 for (col = 0; col < HISTC_NR_COLS; ++col) 51 hists__set_col_len(hists, col, 0); 52 } 53 54 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 55 { 56 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 57 58 if (hists__col_len(hists, dso) < unresolved_col_width && 59 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 60 !symbol_conf.dso_list) 61 hists__set_col_len(hists, dso, unresolved_col_width); 62 } 63 64 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 65 { 66 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 67 int symlen; 68 u16 len; 69 70 /* 71 * +4 accounts for '[x] ' priv level info 72 * +2 accounts for 0x prefix on raw addresses 73 * +3 accounts for ' y ' symtab origin info 74 */ 75 if (h->ms.sym) { 76 symlen = h->ms.sym->namelen + 4; 77 if (verbose > 0) 78 symlen += BITS_PER_LONG / 4 + 2 + 3; 79 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 80 } else { 81 symlen = unresolved_col_width + 4 + 2; 82 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 83 hists__set_unres_dso_col_len(hists, HISTC_DSO); 84 } 85 86 len = thread__comm_len(h->thread); 87 if (hists__new_col_len(hists, HISTC_COMM, len)) 88 hists__set_col_len(hists, HISTC_THREAD, len + 8); 89 90 if (h->ms.map) { 91 len = dso__name_len(h->ms.map->dso); 92 hists__new_col_len(hists, HISTC_DSO, len); 93 } 94 95 if (h->parent) 96 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 97 98 if (h->branch_info) { 99 if (h->branch_info->from.sym) { 100 symlen = (int)h->branch_info->from.sym->namelen + 4; 101 if (verbose > 0) 102 symlen += BITS_PER_LONG / 4 + 2 + 3; 103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 104 105 symlen = dso__name_len(h->branch_info->from.map->dso); 106 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 107 } else { 108 symlen = unresolved_col_width + 4 + 2; 109 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 110 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 111 } 112 113 if (h->branch_info->to.sym) { 114 symlen = (int)h->branch_info->to.sym->namelen + 4; 115 if (verbose > 0) 116 symlen += BITS_PER_LONG / 4 + 2 + 3; 117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 118 119 symlen = dso__name_len(h->branch_info->to.map->dso); 120 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 121 } else { 122 symlen = unresolved_col_width + 4 + 2; 123 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 124 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 125 } 126 127 if (h->branch_info->srcline_from) 128 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 129 strlen(h->branch_info->srcline_from)); 130 if (h->branch_info->srcline_to) 131 hists__new_col_len(hists, HISTC_SRCLINE_TO, 132 strlen(h->branch_info->srcline_to)); 133 } 134 135 if (h->mem_info) { 136 if (h->mem_info->daddr.sym) { 137 symlen = (int)h->mem_info->daddr.sym->namelen + 4 138 + unresolved_col_width + 2; 139 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 140 symlen); 141 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 142 symlen + 1); 143 } else { 144 symlen = unresolved_col_width + 4 + 2; 145 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 146 symlen); 147 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 148 symlen); 149 } 150 151 if (h->mem_info->iaddr.sym) { 152 symlen = (int)h->mem_info->iaddr.sym->namelen + 4 153 + unresolved_col_width + 2; 154 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 155 symlen); 156 } else { 157 symlen = unresolved_col_width + 4 + 2; 158 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 159 symlen); 160 } 161 162 if (h->mem_info->daddr.map) { 163 symlen = dso__name_len(h->mem_info->daddr.map->dso); 164 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 165 symlen); 166 } else { 167 symlen = unresolved_col_width + 4 + 2; 168 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 169 } 170 171 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 172 unresolved_col_width + 4 + 2); 173 174 } else { 175 symlen = unresolved_col_width + 4 + 2; 176 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 177 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 178 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 179 } 180 181 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 182 hists__new_col_len(hists, HISTC_CPU, 3); 183 hists__new_col_len(hists, HISTC_SOCKET, 6); 184 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 185 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 186 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 187 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 188 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 189 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 190 191 if (h->srcline) { 192 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 193 hists__new_col_len(hists, HISTC_SRCLINE, len); 194 } 195 196 if (h->srcfile) 197 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 198 199 if (h->transaction) 200 hists__new_col_len(hists, HISTC_TRANSACTION, 201 hist_entry__transaction_len()); 202 203 if (h->trace_output) 204 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 205 } 206 207 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 208 { 209 struct rb_node *next = rb_first(&hists->entries); 210 struct hist_entry *n; 211 int row = 0; 212 213 hists__reset_col_len(hists); 214 215 while (next && row++ < max_rows) { 216 n = rb_entry(next, struct hist_entry, rb_node); 217 if (!n->filtered) 218 hists__calc_col_len(hists, n); 219 next = rb_next(&n->rb_node); 220 } 221 } 222 223 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 224 unsigned int cpumode, u64 period) 225 { 226 switch (cpumode) { 227 case PERF_RECORD_MISC_KERNEL: 228 he_stat->period_sys += period; 229 break; 230 case PERF_RECORD_MISC_USER: 231 he_stat->period_us += period; 232 break; 233 case PERF_RECORD_MISC_GUEST_KERNEL: 234 he_stat->period_guest_sys += period; 235 break; 236 case PERF_RECORD_MISC_GUEST_USER: 237 he_stat->period_guest_us += period; 238 break; 239 default: 240 break; 241 } 242 } 243 244 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 245 u64 weight) 246 { 247 248 he_stat->period += period; 249 he_stat->weight += weight; 250 he_stat->nr_events += 1; 251 } 252 253 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 254 { 255 dest->period += src->period; 256 dest->period_sys += src->period_sys; 257 dest->period_us += src->period_us; 258 dest->period_guest_sys += src->period_guest_sys; 259 dest->period_guest_us += src->period_guest_us; 260 dest->nr_events += src->nr_events; 261 dest->weight += src->weight; 262 } 263 264 static void he_stat__decay(struct he_stat *he_stat) 265 { 266 he_stat->period = (he_stat->period * 7) / 8; 267 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 268 /* XXX need decay for weight too? */ 269 } 270 271 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 272 273 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 274 { 275 u64 prev_period = he->stat.period; 276 u64 diff; 277 278 if (prev_period == 0) 279 return true; 280 281 he_stat__decay(&he->stat); 282 if (symbol_conf.cumulate_callchain) 283 he_stat__decay(he->stat_acc); 284 decay_callchain(he->callchain); 285 286 diff = prev_period - he->stat.period; 287 288 if (!he->depth) { 289 hists->stats.total_period -= diff; 290 if (!he->filtered) 291 hists->stats.total_non_filtered_period -= diff; 292 } 293 294 if (!he->leaf) { 295 struct hist_entry *child; 296 struct rb_node *node = rb_first(&he->hroot_out); 297 while (node) { 298 child = rb_entry(node, struct hist_entry, rb_node); 299 node = rb_next(node); 300 301 if (hists__decay_entry(hists, child)) 302 hists__delete_entry(hists, child); 303 } 304 } 305 306 return he->stat.period == 0; 307 } 308 309 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 310 { 311 struct rb_root *root_in; 312 struct rb_root *root_out; 313 314 if (he->parent_he) { 315 root_in = &he->parent_he->hroot_in; 316 root_out = &he->parent_he->hroot_out; 317 } else { 318 if (hists__has(hists, need_collapse)) 319 root_in = &hists->entries_collapsed; 320 else 321 root_in = hists->entries_in; 322 root_out = &hists->entries; 323 } 324 325 rb_erase(&he->rb_node_in, root_in); 326 rb_erase(&he->rb_node, root_out); 327 328 --hists->nr_entries; 329 if (!he->filtered) 330 --hists->nr_non_filtered_entries; 331 332 hist_entry__delete(he); 333 } 334 335 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 336 { 337 struct rb_node *next = rb_first(&hists->entries); 338 struct hist_entry *n; 339 340 while (next) { 341 n = rb_entry(next, struct hist_entry, rb_node); 342 next = rb_next(&n->rb_node); 343 if (((zap_user && n->level == '.') || 344 (zap_kernel && n->level != '.') || 345 hists__decay_entry(hists, n))) { 346 hists__delete_entry(hists, n); 347 } 348 } 349 } 350 351 void hists__delete_entries(struct hists *hists) 352 { 353 struct rb_node *next = rb_first(&hists->entries); 354 struct hist_entry *n; 355 356 while (next) { 357 n = rb_entry(next, struct hist_entry, rb_node); 358 next = rb_next(&n->rb_node); 359 360 hists__delete_entry(hists, n); 361 } 362 } 363 364 /* 365 * histogram, sorted on item, collects periods 366 */ 367 368 static int hist_entry__init(struct hist_entry *he, 369 struct hist_entry *template, 370 bool sample_self) 371 { 372 *he = *template; 373 374 if (symbol_conf.cumulate_callchain) { 375 he->stat_acc = malloc(sizeof(he->stat)); 376 if (he->stat_acc == NULL) 377 return -ENOMEM; 378 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 379 if (!sample_self) 380 memset(&he->stat, 0, sizeof(he->stat)); 381 } 382 383 map__get(he->ms.map); 384 385 if (he->branch_info) { 386 /* 387 * This branch info is (a part of) allocated from 388 * sample__resolve_bstack() and will be freed after 389 * adding new entries. So we need to save a copy. 390 */ 391 he->branch_info = malloc(sizeof(*he->branch_info)); 392 if (he->branch_info == NULL) { 393 map__zput(he->ms.map); 394 free(he->stat_acc); 395 return -ENOMEM; 396 } 397 398 memcpy(he->branch_info, template->branch_info, 399 sizeof(*he->branch_info)); 400 401 map__get(he->branch_info->from.map); 402 map__get(he->branch_info->to.map); 403 } 404 405 if (he->mem_info) { 406 map__get(he->mem_info->iaddr.map); 407 map__get(he->mem_info->daddr.map); 408 } 409 410 if (symbol_conf.use_callchain) 411 callchain_init(he->callchain); 412 413 if (he->raw_data) { 414 he->raw_data = memdup(he->raw_data, he->raw_size); 415 416 if (he->raw_data == NULL) { 417 map__put(he->ms.map); 418 if (he->branch_info) { 419 map__put(he->branch_info->from.map); 420 map__put(he->branch_info->to.map); 421 free(he->branch_info); 422 } 423 if (he->mem_info) { 424 map__put(he->mem_info->iaddr.map); 425 map__put(he->mem_info->daddr.map); 426 } 427 free(he->stat_acc); 428 return -ENOMEM; 429 } 430 } 431 INIT_LIST_HEAD(&he->pairs.node); 432 thread__get(he->thread); 433 he->hroot_in = RB_ROOT; 434 he->hroot_out = RB_ROOT; 435 436 if (!symbol_conf.report_hierarchy) 437 he->leaf = true; 438 439 return 0; 440 } 441 442 static void *hist_entry__zalloc(size_t size) 443 { 444 return zalloc(size + sizeof(struct hist_entry)); 445 } 446 447 static void hist_entry__free(void *ptr) 448 { 449 free(ptr); 450 } 451 452 static struct hist_entry_ops default_ops = { 453 .new = hist_entry__zalloc, 454 .free = hist_entry__free, 455 }; 456 457 static struct hist_entry *hist_entry__new(struct hist_entry *template, 458 bool sample_self) 459 { 460 struct hist_entry_ops *ops = template->ops; 461 size_t callchain_size = 0; 462 struct hist_entry *he; 463 int err = 0; 464 465 if (!ops) 466 ops = template->ops = &default_ops; 467 468 if (symbol_conf.use_callchain) 469 callchain_size = sizeof(struct callchain_root); 470 471 he = ops->new(callchain_size); 472 if (he) { 473 err = hist_entry__init(he, template, sample_self); 474 if (err) { 475 ops->free(he); 476 he = NULL; 477 } 478 } 479 480 return he; 481 } 482 483 static u8 symbol__parent_filter(const struct symbol *parent) 484 { 485 if (symbol_conf.exclude_other && parent == NULL) 486 return 1 << HIST_FILTER__PARENT; 487 return 0; 488 } 489 490 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 491 { 492 if (!symbol_conf.use_callchain) 493 return; 494 495 he->hists->callchain_period += period; 496 if (!he->filtered) 497 he->hists->callchain_non_filtered_period += period; 498 } 499 500 static struct hist_entry *hists__findnew_entry(struct hists *hists, 501 struct hist_entry *entry, 502 struct addr_location *al, 503 bool sample_self) 504 { 505 struct rb_node **p; 506 struct rb_node *parent = NULL; 507 struct hist_entry *he; 508 int64_t cmp; 509 u64 period = entry->stat.period; 510 u64 weight = entry->stat.weight; 511 512 p = &hists->entries_in->rb_node; 513 514 while (*p != NULL) { 515 parent = *p; 516 he = rb_entry(parent, struct hist_entry, rb_node_in); 517 518 /* 519 * Make sure that it receives arguments in a same order as 520 * hist_entry__collapse() so that we can use an appropriate 521 * function when searching an entry regardless which sort 522 * keys were used. 523 */ 524 cmp = hist_entry__cmp(he, entry); 525 526 if (!cmp) { 527 if (sample_self) { 528 he_stat__add_period(&he->stat, period, weight); 529 hist_entry__add_callchain_period(he, period); 530 } 531 if (symbol_conf.cumulate_callchain) 532 he_stat__add_period(he->stat_acc, period, weight); 533 534 /* 535 * This mem info was allocated from sample__resolve_mem 536 * and will not be used anymore. 537 */ 538 zfree(&entry->mem_info); 539 540 /* If the map of an existing hist_entry has 541 * become out-of-date due to an exec() or 542 * similar, update it. Otherwise we will 543 * mis-adjust symbol addresses when computing 544 * the history counter to increment. 545 */ 546 if (he->ms.map != entry->ms.map) { 547 map__put(he->ms.map); 548 he->ms.map = map__get(entry->ms.map); 549 } 550 goto out; 551 } 552 553 if (cmp < 0) 554 p = &(*p)->rb_left; 555 else 556 p = &(*p)->rb_right; 557 } 558 559 he = hist_entry__new(entry, sample_self); 560 if (!he) 561 return NULL; 562 563 if (sample_self) 564 hist_entry__add_callchain_period(he, period); 565 hists->nr_entries++; 566 567 rb_link_node(&he->rb_node_in, parent, p); 568 rb_insert_color(&he->rb_node_in, hists->entries_in); 569 out: 570 if (sample_self) 571 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 572 if (symbol_conf.cumulate_callchain) 573 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 574 return he; 575 } 576 577 static struct hist_entry* 578 __hists__add_entry(struct hists *hists, 579 struct addr_location *al, 580 struct symbol *sym_parent, 581 struct branch_info *bi, 582 struct mem_info *mi, 583 struct perf_sample *sample, 584 bool sample_self, 585 struct hist_entry_ops *ops) 586 { 587 struct namespaces *ns = thread__namespaces(al->thread); 588 struct hist_entry entry = { 589 .thread = al->thread, 590 .comm = thread__comm(al->thread), 591 .cgroup_id = { 592 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 593 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 594 }, 595 .ms = { 596 .map = al->map, 597 .sym = al->sym, 598 }, 599 .socket = al->socket, 600 .cpu = al->cpu, 601 .cpumode = al->cpumode, 602 .ip = al->addr, 603 .level = al->level, 604 .stat = { 605 .nr_events = 1, 606 .period = sample->period, 607 .weight = sample->weight, 608 }, 609 .parent = sym_parent, 610 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 611 .hists = hists, 612 .branch_info = bi, 613 .mem_info = mi, 614 .transaction = sample->transaction, 615 .raw_data = sample->raw_data, 616 .raw_size = sample->raw_size, 617 .ops = ops, 618 }; 619 620 return hists__findnew_entry(hists, &entry, al, sample_self); 621 } 622 623 struct hist_entry *hists__add_entry(struct hists *hists, 624 struct addr_location *al, 625 struct symbol *sym_parent, 626 struct branch_info *bi, 627 struct mem_info *mi, 628 struct perf_sample *sample, 629 bool sample_self) 630 { 631 return __hists__add_entry(hists, al, sym_parent, bi, mi, 632 sample, sample_self, NULL); 633 } 634 635 struct hist_entry *hists__add_entry_ops(struct hists *hists, 636 struct hist_entry_ops *ops, 637 struct addr_location *al, 638 struct symbol *sym_parent, 639 struct branch_info *bi, 640 struct mem_info *mi, 641 struct perf_sample *sample, 642 bool sample_self) 643 { 644 return __hists__add_entry(hists, al, sym_parent, bi, mi, 645 sample, sample_self, ops); 646 } 647 648 static int 649 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 650 struct addr_location *al __maybe_unused) 651 { 652 return 0; 653 } 654 655 static int 656 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 657 struct addr_location *al __maybe_unused) 658 { 659 return 0; 660 } 661 662 static int 663 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 664 { 665 struct perf_sample *sample = iter->sample; 666 struct mem_info *mi; 667 668 mi = sample__resolve_mem(sample, al); 669 if (mi == NULL) 670 return -ENOMEM; 671 672 iter->priv = mi; 673 return 0; 674 } 675 676 static int 677 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 678 { 679 u64 cost; 680 struct mem_info *mi = iter->priv; 681 struct hists *hists = evsel__hists(iter->evsel); 682 struct perf_sample *sample = iter->sample; 683 struct hist_entry *he; 684 685 if (mi == NULL) 686 return -EINVAL; 687 688 cost = sample->weight; 689 if (!cost) 690 cost = 1; 691 692 /* 693 * must pass period=weight in order to get the correct 694 * sorting from hists__collapse_resort() which is solely 695 * based on periods. We want sorting be done on nr_events * weight 696 * and this is indirectly achieved by passing period=weight here 697 * and the he_stat__add_period() function. 698 */ 699 sample->period = cost; 700 701 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 702 sample, true); 703 if (!he) 704 return -ENOMEM; 705 706 iter->he = he; 707 return 0; 708 } 709 710 static int 711 iter_finish_mem_entry(struct hist_entry_iter *iter, 712 struct addr_location *al __maybe_unused) 713 { 714 struct perf_evsel *evsel = iter->evsel; 715 struct hists *hists = evsel__hists(evsel); 716 struct hist_entry *he = iter->he; 717 int err = -EINVAL; 718 719 if (he == NULL) 720 goto out; 721 722 hists__inc_nr_samples(hists, he->filtered); 723 724 err = hist_entry__append_callchain(he, iter->sample); 725 726 out: 727 /* 728 * We don't need to free iter->priv (mem_info) here since the mem info 729 * was either already freed in hists__findnew_entry() or passed to a 730 * new hist entry by hist_entry__new(). 731 */ 732 iter->priv = NULL; 733 734 iter->he = NULL; 735 return err; 736 } 737 738 static int 739 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 740 { 741 struct branch_info *bi; 742 struct perf_sample *sample = iter->sample; 743 744 bi = sample__resolve_bstack(sample, al); 745 if (!bi) 746 return -ENOMEM; 747 748 iter->curr = 0; 749 iter->total = sample->branch_stack->nr; 750 751 iter->priv = bi; 752 return 0; 753 } 754 755 static int 756 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 757 struct addr_location *al __maybe_unused) 758 { 759 return 0; 760 } 761 762 static int 763 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 764 { 765 struct branch_info *bi = iter->priv; 766 int i = iter->curr; 767 768 if (bi == NULL) 769 return 0; 770 771 if (iter->curr >= iter->total) 772 return 0; 773 774 al->map = bi[i].to.map; 775 al->sym = bi[i].to.sym; 776 al->addr = bi[i].to.addr; 777 return 1; 778 } 779 780 static int 781 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 782 { 783 struct branch_info *bi; 784 struct perf_evsel *evsel = iter->evsel; 785 struct hists *hists = evsel__hists(evsel); 786 struct perf_sample *sample = iter->sample; 787 struct hist_entry *he = NULL; 788 int i = iter->curr; 789 int err = 0; 790 791 bi = iter->priv; 792 793 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 794 goto out; 795 796 /* 797 * The report shows the percentage of total branches captured 798 * and not events sampled. Thus we use a pseudo period of 1. 799 */ 800 sample->period = 1; 801 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 802 803 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 804 sample, true); 805 if (he == NULL) 806 return -ENOMEM; 807 808 hists__inc_nr_samples(hists, he->filtered); 809 810 out: 811 iter->he = he; 812 iter->curr++; 813 return err; 814 } 815 816 static int 817 iter_finish_branch_entry(struct hist_entry_iter *iter, 818 struct addr_location *al __maybe_unused) 819 { 820 zfree(&iter->priv); 821 iter->he = NULL; 822 823 return iter->curr >= iter->total ? 0 : -1; 824 } 825 826 static int 827 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 828 struct addr_location *al __maybe_unused) 829 { 830 return 0; 831 } 832 833 static int 834 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 835 { 836 struct perf_evsel *evsel = iter->evsel; 837 struct perf_sample *sample = iter->sample; 838 struct hist_entry *he; 839 840 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 841 sample, true); 842 if (he == NULL) 843 return -ENOMEM; 844 845 iter->he = he; 846 return 0; 847 } 848 849 static int 850 iter_finish_normal_entry(struct hist_entry_iter *iter, 851 struct addr_location *al __maybe_unused) 852 { 853 struct hist_entry *he = iter->he; 854 struct perf_evsel *evsel = iter->evsel; 855 struct perf_sample *sample = iter->sample; 856 857 if (he == NULL) 858 return 0; 859 860 iter->he = NULL; 861 862 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 863 864 return hist_entry__append_callchain(he, sample); 865 } 866 867 static int 868 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 869 struct addr_location *al __maybe_unused) 870 { 871 struct hist_entry **he_cache; 872 873 callchain_cursor_commit(&callchain_cursor); 874 875 /* 876 * This is for detecting cycles or recursions so that they're 877 * cumulated only one time to prevent entries more than 100% 878 * overhead. 879 */ 880 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); 881 if (he_cache == NULL) 882 return -ENOMEM; 883 884 iter->priv = he_cache; 885 iter->curr = 0; 886 887 return 0; 888 } 889 890 static int 891 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 892 struct addr_location *al) 893 { 894 struct perf_evsel *evsel = iter->evsel; 895 struct hists *hists = evsel__hists(evsel); 896 struct perf_sample *sample = iter->sample; 897 struct hist_entry **he_cache = iter->priv; 898 struct hist_entry *he; 899 int err = 0; 900 901 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 902 sample, true); 903 if (he == NULL) 904 return -ENOMEM; 905 906 iter->he = he; 907 he_cache[iter->curr++] = he; 908 909 hist_entry__append_callchain(he, sample); 910 911 /* 912 * We need to re-initialize the cursor since callchain_append() 913 * advanced the cursor to the end. 914 */ 915 callchain_cursor_commit(&callchain_cursor); 916 917 hists__inc_nr_samples(hists, he->filtered); 918 919 return err; 920 } 921 922 static int 923 iter_next_cumulative_entry(struct hist_entry_iter *iter, 924 struct addr_location *al) 925 { 926 struct callchain_cursor_node *node; 927 928 node = callchain_cursor_current(&callchain_cursor); 929 if (node == NULL) 930 return 0; 931 932 return fill_callchain_info(al, node, iter->hide_unresolved); 933 } 934 935 static int 936 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 937 struct addr_location *al) 938 { 939 struct perf_evsel *evsel = iter->evsel; 940 struct perf_sample *sample = iter->sample; 941 struct hist_entry **he_cache = iter->priv; 942 struct hist_entry *he; 943 struct hist_entry he_tmp = { 944 .hists = evsel__hists(evsel), 945 .cpu = al->cpu, 946 .thread = al->thread, 947 .comm = thread__comm(al->thread), 948 .ip = al->addr, 949 .ms = { 950 .map = al->map, 951 .sym = al->sym, 952 }, 953 .parent = iter->parent, 954 .raw_data = sample->raw_data, 955 .raw_size = sample->raw_size, 956 }; 957 int i; 958 struct callchain_cursor cursor; 959 960 callchain_cursor_snapshot(&cursor, &callchain_cursor); 961 962 callchain_cursor_advance(&callchain_cursor); 963 964 /* 965 * Check if there's duplicate entries in the callchain. 966 * It's possible that it has cycles or recursive calls. 967 */ 968 for (i = 0; i < iter->curr; i++) { 969 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 970 /* to avoid calling callback function */ 971 iter->he = NULL; 972 return 0; 973 } 974 } 975 976 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 977 sample, false); 978 if (he == NULL) 979 return -ENOMEM; 980 981 iter->he = he; 982 he_cache[iter->curr++] = he; 983 984 if (symbol_conf.use_callchain) 985 callchain_append(he->callchain, &cursor, sample->period); 986 return 0; 987 } 988 989 static int 990 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 991 struct addr_location *al __maybe_unused) 992 { 993 zfree(&iter->priv); 994 iter->he = NULL; 995 996 return 0; 997 } 998 999 const struct hist_iter_ops hist_iter_mem = { 1000 .prepare_entry = iter_prepare_mem_entry, 1001 .add_single_entry = iter_add_single_mem_entry, 1002 .next_entry = iter_next_nop_entry, 1003 .add_next_entry = iter_add_next_nop_entry, 1004 .finish_entry = iter_finish_mem_entry, 1005 }; 1006 1007 const struct hist_iter_ops hist_iter_branch = { 1008 .prepare_entry = iter_prepare_branch_entry, 1009 .add_single_entry = iter_add_single_branch_entry, 1010 .next_entry = iter_next_branch_entry, 1011 .add_next_entry = iter_add_next_branch_entry, 1012 .finish_entry = iter_finish_branch_entry, 1013 }; 1014 1015 const struct hist_iter_ops hist_iter_normal = { 1016 .prepare_entry = iter_prepare_normal_entry, 1017 .add_single_entry = iter_add_single_normal_entry, 1018 .next_entry = iter_next_nop_entry, 1019 .add_next_entry = iter_add_next_nop_entry, 1020 .finish_entry = iter_finish_normal_entry, 1021 }; 1022 1023 const struct hist_iter_ops hist_iter_cumulative = { 1024 .prepare_entry = iter_prepare_cumulative_entry, 1025 .add_single_entry = iter_add_single_cumulative_entry, 1026 .next_entry = iter_next_cumulative_entry, 1027 .add_next_entry = iter_add_next_cumulative_entry, 1028 .finish_entry = iter_finish_cumulative_entry, 1029 }; 1030 1031 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1032 int max_stack_depth, void *arg) 1033 { 1034 int err, err2; 1035 struct map *alm = NULL; 1036 1037 if (al && al->map) 1038 alm = map__get(al->map); 1039 1040 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1041 iter->evsel, al, max_stack_depth); 1042 if (err) 1043 return err; 1044 1045 iter->max_stack = max_stack_depth; 1046 1047 err = iter->ops->prepare_entry(iter, al); 1048 if (err) 1049 goto out; 1050 1051 err = iter->ops->add_single_entry(iter, al); 1052 if (err) 1053 goto out; 1054 1055 if (iter->he && iter->add_entry_cb) { 1056 err = iter->add_entry_cb(iter, al, true, arg); 1057 if (err) 1058 goto out; 1059 } 1060 1061 while (iter->ops->next_entry(iter, al)) { 1062 err = iter->ops->add_next_entry(iter, al); 1063 if (err) 1064 break; 1065 1066 if (iter->he && iter->add_entry_cb) { 1067 err = iter->add_entry_cb(iter, al, false, arg); 1068 if (err) 1069 goto out; 1070 } 1071 } 1072 1073 out: 1074 err2 = iter->ops->finish_entry(iter, al); 1075 if (!err) 1076 err = err2; 1077 1078 map__put(alm); 1079 1080 return err; 1081 } 1082 1083 int64_t 1084 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1085 { 1086 struct hists *hists = left->hists; 1087 struct perf_hpp_fmt *fmt; 1088 int64_t cmp = 0; 1089 1090 hists__for_each_sort_list(hists, fmt) { 1091 if (perf_hpp__is_dynamic_entry(fmt) && 1092 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1093 continue; 1094 1095 cmp = fmt->cmp(fmt, left, right); 1096 if (cmp) 1097 break; 1098 } 1099 1100 return cmp; 1101 } 1102 1103 int64_t 1104 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1105 { 1106 struct hists *hists = left->hists; 1107 struct perf_hpp_fmt *fmt; 1108 int64_t cmp = 0; 1109 1110 hists__for_each_sort_list(hists, fmt) { 1111 if (perf_hpp__is_dynamic_entry(fmt) && 1112 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1113 continue; 1114 1115 cmp = fmt->collapse(fmt, left, right); 1116 if (cmp) 1117 break; 1118 } 1119 1120 return cmp; 1121 } 1122 1123 void hist_entry__delete(struct hist_entry *he) 1124 { 1125 struct hist_entry_ops *ops = he->ops; 1126 1127 thread__zput(he->thread); 1128 map__zput(he->ms.map); 1129 1130 if (he->branch_info) { 1131 map__zput(he->branch_info->from.map); 1132 map__zput(he->branch_info->to.map); 1133 free_srcline(he->branch_info->srcline_from); 1134 free_srcline(he->branch_info->srcline_to); 1135 zfree(&he->branch_info); 1136 } 1137 1138 if (he->mem_info) { 1139 map__zput(he->mem_info->iaddr.map); 1140 map__zput(he->mem_info->daddr.map); 1141 zfree(&he->mem_info); 1142 } 1143 1144 if (he->inline_node) { 1145 inline_node__delete(he->inline_node); 1146 he->inline_node = NULL; 1147 } 1148 1149 zfree(&he->stat_acc); 1150 free_srcline(he->srcline); 1151 if (he->srcfile && he->srcfile[0]) 1152 free(he->srcfile); 1153 free_callchain(he->callchain); 1154 free(he->trace_output); 1155 free(he->raw_data); 1156 ops->free(he); 1157 } 1158 1159 /* 1160 * If this is not the last column, then we need to pad it according to the 1161 * pre-calculated max lenght for this column, otherwise don't bother adding 1162 * spaces because that would break viewing this with, for instance, 'less', 1163 * that would show tons of trailing spaces when a long C++ demangled method 1164 * names is sampled. 1165 */ 1166 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1167 struct perf_hpp_fmt *fmt, int printed) 1168 { 1169 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1170 const int width = fmt->width(fmt, hpp, he->hists); 1171 if (printed < width) { 1172 advance_hpp(hpp, printed); 1173 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1174 } 1175 } 1176 1177 return printed; 1178 } 1179 1180 /* 1181 * collapse the histogram 1182 */ 1183 1184 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1185 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1186 enum hist_filter type); 1187 1188 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1189 1190 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1191 { 1192 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1193 } 1194 1195 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1196 enum hist_filter type, 1197 fmt_chk_fn check) 1198 { 1199 struct perf_hpp_fmt *fmt; 1200 bool type_match = false; 1201 struct hist_entry *parent = he->parent_he; 1202 1203 switch (type) { 1204 case HIST_FILTER__THREAD: 1205 if (symbol_conf.comm_list == NULL && 1206 symbol_conf.pid_list == NULL && 1207 symbol_conf.tid_list == NULL) 1208 return; 1209 break; 1210 case HIST_FILTER__DSO: 1211 if (symbol_conf.dso_list == NULL) 1212 return; 1213 break; 1214 case HIST_FILTER__SYMBOL: 1215 if (symbol_conf.sym_list == NULL) 1216 return; 1217 break; 1218 case HIST_FILTER__PARENT: 1219 case HIST_FILTER__GUEST: 1220 case HIST_FILTER__HOST: 1221 case HIST_FILTER__SOCKET: 1222 case HIST_FILTER__C2C: 1223 default: 1224 return; 1225 } 1226 1227 /* if it's filtered by own fmt, it has to have filter bits */ 1228 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1229 if (check(fmt)) { 1230 type_match = true; 1231 break; 1232 } 1233 } 1234 1235 if (type_match) { 1236 /* 1237 * If the filter is for current level entry, propagate 1238 * filter marker to parents. The marker bit was 1239 * already set by default so it only needs to clear 1240 * non-filtered entries. 1241 */ 1242 if (!(he->filtered & (1 << type))) { 1243 while (parent) { 1244 parent->filtered &= ~(1 << type); 1245 parent = parent->parent_he; 1246 } 1247 } 1248 } else { 1249 /* 1250 * If current entry doesn't have matching formats, set 1251 * filter marker for upper level entries. it will be 1252 * cleared if its lower level entries is not filtered. 1253 * 1254 * For lower-level entries, it inherits parent's 1255 * filter bit so that lower level entries of a 1256 * non-filtered entry won't set the filter marker. 1257 */ 1258 if (parent == NULL) 1259 he->filtered |= (1 << type); 1260 else 1261 he->filtered |= (parent->filtered & (1 << type)); 1262 } 1263 } 1264 1265 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1266 { 1267 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1268 check_thread_entry); 1269 1270 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1271 perf_hpp__is_dso_entry); 1272 1273 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1274 perf_hpp__is_sym_entry); 1275 1276 hists__apply_filters(he->hists, he); 1277 } 1278 1279 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1280 struct rb_root *root, 1281 struct hist_entry *he, 1282 struct hist_entry *parent_he, 1283 struct perf_hpp_list *hpp_list) 1284 { 1285 struct rb_node **p = &root->rb_node; 1286 struct rb_node *parent = NULL; 1287 struct hist_entry *iter, *new; 1288 struct perf_hpp_fmt *fmt; 1289 int64_t cmp; 1290 1291 while (*p != NULL) { 1292 parent = *p; 1293 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1294 1295 cmp = 0; 1296 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1297 cmp = fmt->collapse(fmt, iter, he); 1298 if (cmp) 1299 break; 1300 } 1301 1302 if (!cmp) { 1303 he_stat__add_stat(&iter->stat, &he->stat); 1304 return iter; 1305 } 1306 1307 if (cmp < 0) 1308 p = &parent->rb_left; 1309 else 1310 p = &parent->rb_right; 1311 } 1312 1313 new = hist_entry__new(he, true); 1314 if (new == NULL) 1315 return NULL; 1316 1317 hists->nr_entries++; 1318 1319 /* save related format list for output */ 1320 new->hpp_list = hpp_list; 1321 new->parent_he = parent_he; 1322 1323 hist_entry__apply_hierarchy_filters(new); 1324 1325 /* some fields are now passed to 'new' */ 1326 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1327 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1328 he->trace_output = NULL; 1329 else 1330 new->trace_output = NULL; 1331 1332 if (perf_hpp__is_srcline_entry(fmt)) 1333 he->srcline = NULL; 1334 else 1335 new->srcline = NULL; 1336 1337 if (perf_hpp__is_srcfile_entry(fmt)) 1338 he->srcfile = NULL; 1339 else 1340 new->srcfile = NULL; 1341 } 1342 1343 rb_link_node(&new->rb_node_in, parent, p); 1344 rb_insert_color(&new->rb_node_in, root); 1345 return new; 1346 } 1347 1348 static int hists__hierarchy_insert_entry(struct hists *hists, 1349 struct rb_root *root, 1350 struct hist_entry *he) 1351 { 1352 struct perf_hpp_list_node *node; 1353 struct hist_entry *new_he = NULL; 1354 struct hist_entry *parent = NULL; 1355 int depth = 0; 1356 int ret = 0; 1357 1358 list_for_each_entry(node, &hists->hpp_formats, list) { 1359 /* skip period (overhead) and elided columns */ 1360 if (node->level == 0 || node->skip) 1361 continue; 1362 1363 /* insert copy of 'he' for each fmt into the hierarchy */ 1364 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1365 if (new_he == NULL) { 1366 ret = -1; 1367 break; 1368 } 1369 1370 root = &new_he->hroot_in; 1371 new_he->depth = depth++; 1372 parent = new_he; 1373 } 1374 1375 if (new_he) { 1376 new_he->leaf = true; 1377 1378 if (symbol_conf.use_callchain) { 1379 callchain_cursor_reset(&callchain_cursor); 1380 if (callchain_merge(&callchain_cursor, 1381 new_he->callchain, 1382 he->callchain) < 0) 1383 ret = -1; 1384 } 1385 } 1386 1387 /* 'he' is no longer used */ 1388 hist_entry__delete(he); 1389 1390 /* return 0 (or -1) since it already applied filters */ 1391 return ret; 1392 } 1393 1394 static int hists__collapse_insert_entry(struct hists *hists, 1395 struct rb_root *root, 1396 struct hist_entry *he) 1397 { 1398 struct rb_node **p = &root->rb_node; 1399 struct rb_node *parent = NULL; 1400 struct hist_entry *iter; 1401 int64_t cmp; 1402 1403 if (symbol_conf.report_hierarchy) 1404 return hists__hierarchy_insert_entry(hists, root, he); 1405 1406 while (*p != NULL) { 1407 parent = *p; 1408 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1409 1410 cmp = hist_entry__collapse(iter, he); 1411 1412 if (!cmp) { 1413 int ret = 0; 1414 1415 he_stat__add_stat(&iter->stat, &he->stat); 1416 if (symbol_conf.cumulate_callchain) 1417 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1418 1419 if (symbol_conf.use_callchain) { 1420 callchain_cursor_reset(&callchain_cursor); 1421 if (callchain_merge(&callchain_cursor, 1422 iter->callchain, 1423 he->callchain) < 0) 1424 ret = -1; 1425 } 1426 hist_entry__delete(he); 1427 return ret; 1428 } 1429 1430 if (cmp < 0) 1431 p = &(*p)->rb_left; 1432 else 1433 p = &(*p)->rb_right; 1434 } 1435 hists->nr_entries++; 1436 1437 rb_link_node(&he->rb_node_in, parent, p); 1438 rb_insert_color(&he->rb_node_in, root); 1439 return 1; 1440 } 1441 1442 struct rb_root *hists__get_rotate_entries_in(struct hists *hists) 1443 { 1444 struct rb_root *root; 1445 1446 pthread_mutex_lock(&hists->lock); 1447 1448 root = hists->entries_in; 1449 if (++hists->entries_in > &hists->entries_in_array[1]) 1450 hists->entries_in = &hists->entries_in_array[0]; 1451 1452 pthread_mutex_unlock(&hists->lock); 1453 1454 return root; 1455 } 1456 1457 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1458 { 1459 hists__filter_entry_by_dso(hists, he); 1460 hists__filter_entry_by_thread(hists, he); 1461 hists__filter_entry_by_symbol(hists, he); 1462 hists__filter_entry_by_socket(hists, he); 1463 } 1464 1465 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1466 { 1467 struct rb_root *root; 1468 struct rb_node *next; 1469 struct hist_entry *n; 1470 int ret; 1471 1472 if (!hists__has(hists, need_collapse)) 1473 return 0; 1474 1475 hists->nr_entries = 0; 1476 1477 root = hists__get_rotate_entries_in(hists); 1478 1479 next = rb_first(root); 1480 1481 while (next) { 1482 if (session_done()) 1483 break; 1484 n = rb_entry(next, struct hist_entry, rb_node_in); 1485 next = rb_next(&n->rb_node_in); 1486 1487 rb_erase(&n->rb_node_in, root); 1488 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1489 if (ret < 0) 1490 return -1; 1491 1492 if (ret) { 1493 /* 1494 * If it wasn't combined with one of the entries already 1495 * collapsed, we need to apply the filters that may have 1496 * been set by, say, the hist_browser. 1497 */ 1498 hists__apply_filters(hists, n); 1499 } 1500 if (prog) 1501 ui_progress__update(prog, 1); 1502 } 1503 return 0; 1504 } 1505 1506 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1507 { 1508 struct hists *hists = a->hists; 1509 struct perf_hpp_fmt *fmt; 1510 int64_t cmp = 0; 1511 1512 hists__for_each_sort_list(hists, fmt) { 1513 if (perf_hpp__should_skip(fmt, a->hists)) 1514 continue; 1515 1516 cmp = fmt->sort(fmt, a, b); 1517 if (cmp) 1518 break; 1519 } 1520 1521 return cmp; 1522 } 1523 1524 static void hists__reset_filter_stats(struct hists *hists) 1525 { 1526 hists->nr_non_filtered_entries = 0; 1527 hists->stats.total_non_filtered_period = 0; 1528 } 1529 1530 void hists__reset_stats(struct hists *hists) 1531 { 1532 hists->nr_entries = 0; 1533 hists->stats.total_period = 0; 1534 1535 hists__reset_filter_stats(hists); 1536 } 1537 1538 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1539 { 1540 hists->nr_non_filtered_entries++; 1541 hists->stats.total_non_filtered_period += h->stat.period; 1542 } 1543 1544 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1545 { 1546 if (!h->filtered) 1547 hists__inc_filter_stats(hists, h); 1548 1549 hists->nr_entries++; 1550 hists->stats.total_period += h->stat.period; 1551 } 1552 1553 static void hierarchy_recalc_total_periods(struct hists *hists) 1554 { 1555 struct rb_node *node; 1556 struct hist_entry *he; 1557 1558 node = rb_first(&hists->entries); 1559 1560 hists->stats.total_period = 0; 1561 hists->stats.total_non_filtered_period = 0; 1562 1563 /* 1564 * recalculate total period using top-level entries only 1565 * since lower level entries only see non-filtered entries 1566 * but upper level entries have sum of both entries. 1567 */ 1568 while (node) { 1569 he = rb_entry(node, struct hist_entry, rb_node); 1570 node = rb_next(node); 1571 1572 hists->stats.total_period += he->stat.period; 1573 if (!he->filtered) 1574 hists->stats.total_non_filtered_period += he->stat.period; 1575 } 1576 } 1577 1578 static void hierarchy_insert_output_entry(struct rb_root *root, 1579 struct hist_entry *he) 1580 { 1581 struct rb_node **p = &root->rb_node; 1582 struct rb_node *parent = NULL; 1583 struct hist_entry *iter; 1584 struct perf_hpp_fmt *fmt; 1585 1586 while (*p != NULL) { 1587 parent = *p; 1588 iter = rb_entry(parent, struct hist_entry, rb_node); 1589 1590 if (hist_entry__sort(he, iter) > 0) 1591 p = &parent->rb_left; 1592 else 1593 p = &parent->rb_right; 1594 } 1595 1596 rb_link_node(&he->rb_node, parent, p); 1597 rb_insert_color(&he->rb_node, root); 1598 1599 /* update column width of dynamic entry */ 1600 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1601 if (perf_hpp__is_dynamic_entry(fmt)) 1602 fmt->sort(fmt, he, NULL); 1603 } 1604 } 1605 1606 static void hists__hierarchy_output_resort(struct hists *hists, 1607 struct ui_progress *prog, 1608 struct rb_root *root_in, 1609 struct rb_root *root_out, 1610 u64 min_callchain_hits, 1611 bool use_callchain) 1612 { 1613 struct rb_node *node; 1614 struct hist_entry *he; 1615 1616 *root_out = RB_ROOT; 1617 node = rb_first(root_in); 1618 1619 while (node) { 1620 he = rb_entry(node, struct hist_entry, rb_node_in); 1621 node = rb_next(node); 1622 1623 hierarchy_insert_output_entry(root_out, he); 1624 1625 if (prog) 1626 ui_progress__update(prog, 1); 1627 1628 hists->nr_entries++; 1629 if (!he->filtered) { 1630 hists->nr_non_filtered_entries++; 1631 hists__calc_col_len(hists, he); 1632 } 1633 1634 if (!he->leaf) { 1635 hists__hierarchy_output_resort(hists, prog, 1636 &he->hroot_in, 1637 &he->hroot_out, 1638 min_callchain_hits, 1639 use_callchain); 1640 continue; 1641 } 1642 1643 if (!use_callchain) 1644 continue; 1645 1646 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1647 u64 total = he->stat.period; 1648 1649 if (symbol_conf.cumulate_callchain) 1650 total = he->stat_acc->period; 1651 1652 min_callchain_hits = total * (callchain_param.min_percent / 100); 1653 } 1654 1655 callchain_param.sort(&he->sorted_chain, he->callchain, 1656 min_callchain_hits, &callchain_param); 1657 } 1658 } 1659 1660 static void __hists__insert_output_entry(struct rb_root *entries, 1661 struct hist_entry *he, 1662 u64 min_callchain_hits, 1663 bool use_callchain) 1664 { 1665 struct rb_node **p = &entries->rb_node; 1666 struct rb_node *parent = NULL; 1667 struct hist_entry *iter; 1668 struct perf_hpp_fmt *fmt; 1669 1670 if (use_callchain) { 1671 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1672 u64 total = he->stat.period; 1673 1674 if (symbol_conf.cumulate_callchain) 1675 total = he->stat_acc->period; 1676 1677 min_callchain_hits = total * (callchain_param.min_percent / 100); 1678 } 1679 callchain_param.sort(&he->sorted_chain, he->callchain, 1680 min_callchain_hits, &callchain_param); 1681 } 1682 1683 while (*p != NULL) { 1684 parent = *p; 1685 iter = rb_entry(parent, struct hist_entry, rb_node); 1686 1687 if (hist_entry__sort(he, iter) > 0) 1688 p = &(*p)->rb_left; 1689 else 1690 p = &(*p)->rb_right; 1691 } 1692 1693 rb_link_node(&he->rb_node, parent, p); 1694 rb_insert_color(&he->rb_node, entries); 1695 1696 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1697 if (perf_hpp__is_dynamic_entry(fmt) && 1698 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1699 fmt->sort(fmt, he, NULL); /* update column width */ 1700 } 1701 } 1702 1703 static void output_resort(struct hists *hists, struct ui_progress *prog, 1704 bool use_callchain, hists__resort_cb_t cb) 1705 { 1706 struct rb_root *root; 1707 struct rb_node *next; 1708 struct hist_entry *n; 1709 u64 callchain_total; 1710 u64 min_callchain_hits; 1711 1712 callchain_total = hists->callchain_period; 1713 if (symbol_conf.filter_relative) 1714 callchain_total = hists->callchain_non_filtered_period; 1715 1716 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1717 1718 hists__reset_stats(hists); 1719 hists__reset_col_len(hists); 1720 1721 if (symbol_conf.report_hierarchy) { 1722 hists__hierarchy_output_resort(hists, prog, 1723 &hists->entries_collapsed, 1724 &hists->entries, 1725 min_callchain_hits, 1726 use_callchain); 1727 hierarchy_recalc_total_periods(hists); 1728 return; 1729 } 1730 1731 if (hists__has(hists, need_collapse)) 1732 root = &hists->entries_collapsed; 1733 else 1734 root = hists->entries_in; 1735 1736 next = rb_first(root); 1737 hists->entries = RB_ROOT; 1738 1739 while (next) { 1740 n = rb_entry(next, struct hist_entry, rb_node_in); 1741 next = rb_next(&n->rb_node_in); 1742 1743 if (cb && cb(n)) 1744 continue; 1745 1746 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1747 hists__inc_stats(hists, n); 1748 1749 if (!n->filtered) 1750 hists__calc_col_len(hists, n); 1751 1752 if (prog) 1753 ui_progress__update(prog, 1); 1754 } 1755 } 1756 1757 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog) 1758 { 1759 bool use_callchain; 1760 1761 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1762 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN; 1763 else 1764 use_callchain = symbol_conf.use_callchain; 1765 1766 use_callchain |= symbol_conf.show_branchflag_count; 1767 1768 output_resort(evsel__hists(evsel), prog, use_callchain, NULL); 1769 } 1770 1771 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1772 { 1773 output_resort(hists, prog, symbol_conf.use_callchain, NULL); 1774 } 1775 1776 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 1777 hists__resort_cb_t cb) 1778 { 1779 output_resort(hists, prog, symbol_conf.use_callchain, cb); 1780 } 1781 1782 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1783 { 1784 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1785 return false; 1786 1787 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1788 return true; 1789 1790 return false; 1791 } 1792 1793 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1794 { 1795 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1796 1797 while (can_goto_child(he, HMD_NORMAL)) { 1798 node = rb_last(&he->hroot_out); 1799 he = rb_entry(node, struct hist_entry, rb_node); 1800 } 1801 return node; 1802 } 1803 1804 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1805 { 1806 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1807 1808 if (can_goto_child(he, hmd)) 1809 node = rb_first(&he->hroot_out); 1810 else 1811 node = rb_next(node); 1812 1813 while (node == NULL) { 1814 he = he->parent_he; 1815 if (he == NULL) 1816 break; 1817 1818 node = rb_next(&he->rb_node); 1819 } 1820 return node; 1821 } 1822 1823 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 1824 { 1825 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1826 1827 node = rb_prev(node); 1828 if (node) 1829 return rb_hierarchy_last(node); 1830 1831 he = he->parent_he; 1832 if (he == NULL) 1833 return NULL; 1834 1835 return &he->rb_node; 1836 } 1837 1838 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 1839 { 1840 struct rb_node *node; 1841 struct hist_entry *child; 1842 float percent; 1843 1844 if (he->leaf) 1845 return false; 1846 1847 node = rb_first(&he->hroot_out); 1848 child = rb_entry(node, struct hist_entry, rb_node); 1849 1850 while (node && child->filtered) { 1851 node = rb_next(node); 1852 child = rb_entry(node, struct hist_entry, rb_node); 1853 } 1854 1855 if (node) 1856 percent = hist_entry__get_percent_limit(child); 1857 else 1858 percent = 0; 1859 1860 return node && percent >= limit; 1861 } 1862 1863 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1864 enum hist_filter filter) 1865 { 1866 h->filtered &= ~(1 << filter); 1867 1868 if (symbol_conf.report_hierarchy) { 1869 struct hist_entry *parent = h->parent_he; 1870 1871 while (parent) { 1872 he_stat__add_stat(&parent->stat, &h->stat); 1873 1874 parent->filtered &= ~(1 << filter); 1875 1876 if (parent->filtered) 1877 goto next; 1878 1879 /* force fold unfiltered entry for simplicity */ 1880 parent->unfolded = false; 1881 parent->has_no_entry = false; 1882 parent->row_offset = 0; 1883 parent->nr_rows = 0; 1884 next: 1885 parent = parent->parent_he; 1886 } 1887 } 1888 1889 if (h->filtered) 1890 return; 1891 1892 /* force fold unfiltered entry for simplicity */ 1893 h->unfolded = false; 1894 h->has_no_entry = false; 1895 h->row_offset = 0; 1896 h->nr_rows = 0; 1897 1898 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 1899 1900 hists__inc_filter_stats(hists, h); 1901 hists__calc_col_len(hists, h); 1902 } 1903 1904 1905 static bool hists__filter_entry_by_dso(struct hists *hists, 1906 struct hist_entry *he) 1907 { 1908 if (hists->dso_filter != NULL && 1909 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 1910 he->filtered |= (1 << HIST_FILTER__DSO); 1911 return true; 1912 } 1913 1914 return false; 1915 } 1916 1917 static bool hists__filter_entry_by_thread(struct hists *hists, 1918 struct hist_entry *he) 1919 { 1920 if (hists->thread_filter != NULL && 1921 he->thread != hists->thread_filter) { 1922 he->filtered |= (1 << HIST_FILTER__THREAD); 1923 return true; 1924 } 1925 1926 return false; 1927 } 1928 1929 static bool hists__filter_entry_by_symbol(struct hists *hists, 1930 struct hist_entry *he) 1931 { 1932 if (hists->symbol_filter_str != NULL && 1933 (!he->ms.sym || strstr(he->ms.sym->name, 1934 hists->symbol_filter_str) == NULL)) { 1935 he->filtered |= (1 << HIST_FILTER__SYMBOL); 1936 return true; 1937 } 1938 1939 return false; 1940 } 1941 1942 static bool hists__filter_entry_by_socket(struct hists *hists, 1943 struct hist_entry *he) 1944 { 1945 if ((hists->socket_filter > -1) && 1946 (he->socket != hists->socket_filter)) { 1947 he->filtered |= (1 << HIST_FILTER__SOCKET); 1948 return true; 1949 } 1950 1951 return false; 1952 } 1953 1954 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 1955 1956 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 1957 { 1958 struct rb_node *nd; 1959 1960 hists->stats.nr_non_filtered_samples = 0; 1961 1962 hists__reset_filter_stats(hists); 1963 hists__reset_col_len(hists); 1964 1965 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1966 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1967 1968 if (filter(hists, h)) 1969 continue; 1970 1971 hists__remove_entry_filter(hists, h, type); 1972 } 1973 } 1974 1975 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he) 1976 { 1977 struct rb_node **p = &root->rb_node; 1978 struct rb_node *parent = NULL; 1979 struct hist_entry *iter; 1980 struct rb_root new_root = RB_ROOT; 1981 struct rb_node *nd; 1982 1983 while (*p != NULL) { 1984 parent = *p; 1985 iter = rb_entry(parent, struct hist_entry, rb_node); 1986 1987 if (hist_entry__sort(he, iter) > 0) 1988 p = &(*p)->rb_left; 1989 else 1990 p = &(*p)->rb_right; 1991 } 1992 1993 rb_link_node(&he->rb_node, parent, p); 1994 rb_insert_color(&he->rb_node, root); 1995 1996 if (he->leaf || he->filtered) 1997 return; 1998 1999 nd = rb_first(&he->hroot_out); 2000 while (nd) { 2001 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2002 2003 nd = rb_next(nd); 2004 rb_erase(&h->rb_node, &he->hroot_out); 2005 2006 resort_filtered_entry(&new_root, h); 2007 } 2008 2009 he->hroot_out = new_root; 2010 } 2011 2012 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2013 { 2014 struct rb_node *nd; 2015 struct rb_root new_root = RB_ROOT; 2016 2017 hists->stats.nr_non_filtered_samples = 0; 2018 2019 hists__reset_filter_stats(hists); 2020 hists__reset_col_len(hists); 2021 2022 nd = rb_first(&hists->entries); 2023 while (nd) { 2024 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2025 int ret; 2026 2027 ret = hist_entry__filter(h, type, arg); 2028 2029 /* 2030 * case 1. non-matching type 2031 * zero out the period, set filter marker and move to child 2032 */ 2033 if (ret < 0) { 2034 memset(&h->stat, 0, sizeof(h->stat)); 2035 h->filtered |= (1 << type); 2036 2037 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2038 } 2039 /* 2040 * case 2. matched type (filter out) 2041 * set filter marker and move to next 2042 */ 2043 else if (ret == 1) { 2044 h->filtered |= (1 << type); 2045 2046 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2047 } 2048 /* 2049 * case 3. ok (not filtered) 2050 * add period to hists and parents, erase the filter marker 2051 * and move to next sibling 2052 */ 2053 else { 2054 hists__remove_entry_filter(hists, h, type); 2055 2056 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2057 } 2058 } 2059 2060 hierarchy_recalc_total_periods(hists); 2061 2062 /* 2063 * resort output after applying a new filter since filter in a lower 2064 * hierarchy can change periods in a upper hierarchy. 2065 */ 2066 nd = rb_first(&hists->entries); 2067 while (nd) { 2068 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2069 2070 nd = rb_next(nd); 2071 rb_erase(&h->rb_node, &hists->entries); 2072 2073 resort_filtered_entry(&new_root, h); 2074 } 2075 2076 hists->entries = new_root; 2077 } 2078 2079 void hists__filter_by_thread(struct hists *hists) 2080 { 2081 if (symbol_conf.report_hierarchy) 2082 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2083 hists->thread_filter); 2084 else 2085 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2086 hists__filter_entry_by_thread); 2087 } 2088 2089 void hists__filter_by_dso(struct hists *hists) 2090 { 2091 if (symbol_conf.report_hierarchy) 2092 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2093 hists->dso_filter); 2094 else 2095 hists__filter_by_type(hists, HIST_FILTER__DSO, 2096 hists__filter_entry_by_dso); 2097 } 2098 2099 void hists__filter_by_symbol(struct hists *hists) 2100 { 2101 if (symbol_conf.report_hierarchy) 2102 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2103 hists->symbol_filter_str); 2104 else 2105 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2106 hists__filter_entry_by_symbol); 2107 } 2108 2109 void hists__filter_by_socket(struct hists *hists) 2110 { 2111 if (symbol_conf.report_hierarchy) 2112 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2113 &hists->socket_filter); 2114 else 2115 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2116 hists__filter_entry_by_socket); 2117 } 2118 2119 void events_stats__inc(struct events_stats *stats, u32 type) 2120 { 2121 ++stats->nr_events[0]; 2122 ++stats->nr_events[type]; 2123 } 2124 2125 void hists__inc_nr_events(struct hists *hists, u32 type) 2126 { 2127 events_stats__inc(&hists->stats, type); 2128 } 2129 2130 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2131 { 2132 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 2133 if (!filtered) 2134 hists->stats.nr_non_filtered_samples++; 2135 } 2136 2137 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2138 struct hist_entry *pair) 2139 { 2140 struct rb_root *root; 2141 struct rb_node **p; 2142 struct rb_node *parent = NULL; 2143 struct hist_entry *he; 2144 int64_t cmp; 2145 2146 if (hists__has(hists, need_collapse)) 2147 root = &hists->entries_collapsed; 2148 else 2149 root = hists->entries_in; 2150 2151 p = &root->rb_node; 2152 2153 while (*p != NULL) { 2154 parent = *p; 2155 he = rb_entry(parent, struct hist_entry, rb_node_in); 2156 2157 cmp = hist_entry__collapse(he, pair); 2158 2159 if (!cmp) 2160 goto out; 2161 2162 if (cmp < 0) 2163 p = &(*p)->rb_left; 2164 else 2165 p = &(*p)->rb_right; 2166 } 2167 2168 he = hist_entry__new(pair, true); 2169 if (he) { 2170 memset(&he->stat, 0, sizeof(he->stat)); 2171 he->hists = hists; 2172 if (symbol_conf.cumulate_callchain) 2173 memset(he->stat_acc, 0, sizeof(he->stat)); 2174 rb_link_node(&he->rb_node_in, parent, p); 2175 rb_insert_color(&he->rb_node_in, root); 2176 hists__inc_stats(hists, he); 2177 he->dummy = true; 2178 } 2179 out: 2180 return he; 2181 } 2182 2183 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2184 struct rb_root *root, 2185 struct hist_entry *pair) 2186 { 2187 struct rb_node **p; 2188 struct rb_node *parent = NULL; 2189 struct hist_entry *he; 2190 struct perf_hpp_fmt *fmt; 2191 2192 p = &root->rb_node; 2193 while (*p != NULL) { 2194 int64_t cmp = 0; 2195 2196 parent = *p; 2197 he = rb_entry(parent, struct hist_entry, rb_node_in); 2198 2199 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2200 cmp = fmt->collapse(fmt, he, pair); 2201 if (cmp) 2202 break; 2203 } 2204 if (!cmp) 2205 goto out; 2206 2207 if (cmp < 0) 2208 p = &parent->rb_left; 2209 else 2210 p = &parent->rb_right; 2211 } 2212 2213 he = hist_entry__new(pair, true); 2214 if (he) { 2215 rb_link_node(&he->rb_node_in, parent, p); 2216 rb_insert_color(&he->rb_node_in, root); 2217 2218 he->dummy = true; 2219 he->hists = hists; 2220 memset(&he->stat, 0, sizeof(he->stat)); 2221 hists__inc_stats(hists, he); 2222 } 2223 out: 2224 return he; 2225 } 2226 2227 static struct hist_entry *hists__find_entry(struct hists *hists, 2228 struct hist_entry *he) 2229 { 2230 struct rb_node *n; 2231 2232 if (hists__has(hists, need_collapse)) 2233 n = hists->entries_collapsed.rb_node; 2234 else 2235 n = hists->entries_in->rb_node; 2236 2237 while (n) { 2238 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2239 int64_t cmp = hist_entry__collapse(iter, he); 2240 2241 if (cmp < 0) 2242 n = n->rb_left; 2243 else if (cmp > 0) 2244 n = n->rb_right; 2245 else 2246 return iter; 2247 } 2248 2249 return NULL; 2250 } 2251 2252 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root, 2253 struct hist_entry *he) 2254 { 2255 struct rb_node *n = root->rb_node; 2256 2257 while (n) { 2258 struct hist_entry *iter; 2259 struct perf_hpp_fmt *fmt; 2260 int64_t cmp = 0; 2261 2262 iter = rb_entry(n, struct hist_entry, rb_node_in); 2263 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2264 cmp = fmt->collapse(fmt, iter, he); 2265 if (cmp) 2266 break; 2267 } 2268 2269 if (cmp < 0) 2270 n = n->rb_left; 2271 else if (cmp > 0) 2272 n = n->rb_right; 2273 else 2274 return iter; 2275 } 2276 2277 return NULL; 2278 } 2279 2280 static void hists__match_hierarchy(struct rb_root *leader_root, 2281 struct rb_root *other_root) 2282 { 2283 struct rb_node *nd; 2284 struct hist_entry *pos, *pair; 2285 2286 for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) { 2287 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2288 pair = hists__find_hierarchy_entry(other_root, pos); 2289 2290 if (pair) { 2291 hist_entry__add_pair(pair, pos); 2292 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2293 } 2294 } 2295 } 2296 2297 /* 2298 * Look for pairs to link to the leader buckets (hist_entries): 2299 */ 2300 void hists__match(struct hists *leader, struct hists *other) 2301 { 2302 struct rb_root *root; 2303 struct rb_node *nd; 2304 struct hist_entry *pos, *pair; 2305 2306 if (symbol_conf.report_hierarchy) { 2307 /* hierarchy report always collapses entries */ 2308 return hists__match_hierarchy(&leader->entries_collapsed, 2309 &other->entries_collapsed); 2310 } 2311 2312 if (hists__has(leader, need_collapse)) 2313 root = &leader->entries_collapsed; 2314 else 2315 root = leader->entries_in; 2316 2317 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2318 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2319 pair = hists__find_entry(other, pos); 2320 2321 if (pair) 2322 hist_entry__add_pair(pair, pos); 2323 } 2324 } 2325 2326 static int hists__link_hierarchy(struct hists *leader_hists, 2327 struct hist_entry *parent, 2328 struct rb_root *leader_root, 2329 struct rb_root *other_root) 2330 { 2331 struct rb_node *nd; 2332 struct hist_entry *pos, *leader; 2333 2334 for (nd = rb_first(other_root); nd; nd = rb_next(nd)) { 2335 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2336 2337 if (hist_entry__has_pairs(pos)) { 2338 bool found = false; 2339 2340 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2341 if (leader->hists == leader_hists) { 2342 found = true; 2343 break; 2344 } 2345 } 2346 if (!found) 2347 return -1; 2348 } else { 2349 leader = add_dummy_hierarchy_entry(leader_hists, 2350 leader_root, pos); 2351 if (leader == NULL) 2352 return -1; 2353 2354 /* do not point parent in the pos */ 2355 leader->parent_he = parent; 2356 2357 hist_entry__add_pair(pos, leader); 2358 } 2359 2360 if (!pos->leaf) { 2361 if (hists__link_hierarchy(leader_hists, leader, 2362 &leader->hroot_in, 2363 &pos->hroot_in) < 0) 2364 return -1; 2365 } 2366 } 2367 return 0; 2368 } 2369 2370 /* 2371 * Look for entries in the other hists that are not present in the leader, if 2372 * we find them, just add a dummy entry on the leader hists, with period=0, 2373 * nr_events=0, to serve as the list header. 2374 */ 2375 int hists__link(struct hists *leader, struct hists *other) 2376 { 2377 struct rb_root *root; 2378 struct rb_node *nd; 2379 struct hist_entry *pos, *pair; 2380 2381 if (symbol_conf.report_hierarchy) { 2382 /* hierarchy report always collapses entries */ 2383 return hists__link_hierarchy(leader, NULL, 2384 &leader->entries_collapsed, 2385 &other->entries_collapsed); 2386 } 2387 2388 if (hists__has(other, need_collapse)) 2389 root = &other->entries_collapsed; 2390 else 2391 root = other->entries_in; 2392 2393 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2394 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2395 2396 if (!hist_entry__has_pairs(pos)) { 2397 pair = hists__add_dummy_entry(leader, pos); 2398 if (pair == NULL) 2399 return -1; 2400 hist_entry__add_pair(pos, pair); 2401 } 2402 } 2403 2404 return 0; 2405 } 2406 2407 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2408 struct perf_sample *sample, bool nonany_branch_mode) 2409 { 2410 struct branch_info *bi; 2411 2412 /* If we have branch cycles always annotate them. */ 2413 if (bs && bs->nr && bs->entries[0].flags.cycles) { 2414 int i; 2415 2416 bi = sample__resolve_bstack(sample, al); 2417 if (bi) { 2418 struct addr_map_symbol *prev = NULL; 2419 2420 /* 2421 * Ignore errors, still want to process the 2422 * other entries. 2423 * 2424 * For non standard branch modes always 2425 * force no IPC (prev == NULL) 2426 * 2427 * Note that perf stores branches reversed from 2428 * program order! 2429 */ 2430 for (i = bs->nr - 1; i >= 0; i--) { 2431 addr_map_symbol__account_cycles(&bi[i].from, 2432 nonany_branch_mode ? NULL : prev, 2433 bi[i].flags.cycles); 2434 prev = &bi[i].to; 2435 } 2436 free(bi); 2437 } 2438 } 2439 } 2440 2441 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp) 2442 { 2443 struct perf_evsel *pos; 2444 size_t ret = 0; 2445 2446 evlist__for_each_entry(evlist, pos) { 2447 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 2448 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 2449 } 2450 2451 return ret; 2452 } 2453 2454 2455 u64 hists__total_period(struct hists *hists) 2456 { 2457 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2458 hists->stats.total_period; 2459 } 2460 2461 int parse_filter_percentage(const struct option *opt __maybe_unused, 2462 const char *arg, int unset __maybe_unused) 2463 { 2464 if (!strcmp(arg, "relative")) 2465 symbol_conf.filter_relative = true; 2466 else if (!strcmp(arg, "absolute")) 2467 symbol_conf.filter_relative = false; 2468 else { 2469 pr_debug("Invalid percentage: %s\n", arg); 2470 return -1; 2471 } 2472 2473 return 0; 2474 } 2475 2476 int perf_hist_config(const char *var, const char *value) 2477 { 2478 if (!strcmp(var, "hist.percentage")) 2479 return parse_filter_percentage(NULL, value, 0); 2480 2481 return 0; 2482 } 2483 2484 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2485 { 2486 memset(hists, 0, sizeof(*hists)); 2487 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; 2488 hists->entries_in = &hists->entries_in_array[0]; 2489 hists->entries_collapsed = RB_ROOT; 2490 hists->entries = RB_ROOT; 2491 pthread_mutex_init(&hists->lock, NULL); 2492 hists->socket_filter = -1; 2493 hists->hpp_list = hpp_list; 2494 INIT_LIST_HEAD(&hists->hpp_formats); 2495 return 0; 2496 } 2497 2498 static void hists__delete_remaining_entries(struct rb_root *root) 2499 { 2500 struct rb_node *node; 2501 struct hist_entry *he; 2502 2503 while (!RB_EMPTY_ROOT(root)) { 2504 node = rb_first(root); 2505 rb_erase(node, root); 2506 2507 he = rb_entry(node, struct hist_entry, rb_node_in); 2508 hist_entry__delete(he); 2509 } 2510 } 2511 2512 static void hists__delete_all_entries(struct hists *hists) 2513 { 2514 hists__delete_entries(hists); 2515 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2516 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2517 hists__delete_remaining_entries(&hists->entries_collapsed); 2518 } 2519 2520 static void hists_evsel__exit(struct perf_evsel *evsel) 2521 { 2522 struct hists *hists = evsel__hists(evsel); 2523 struct perf_hpp_fmt *fmt, *pos; 2524 struct perf_hpp_list_node *node, *tmp; 2525 2526 hists__delete_all_entries(hists); 2527 2528 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2529 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2530 list_del(&fmt->list); 2531 free(fmt); 2532 } 2533 list_del(&node->list); 2534 free(node); 2535 } 2536 } 2537 2538 static int hists_evsel__init(struct perf_evsel *evsel) 2539 { 2540 struct hists *hists = evsel__hists(evsel); 2541 2542 __hists__init(hists, &perf_hpp_list); 2543 return 0; 2544 } 2545 2546 /* 2547 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2548 * stored in the rbtree... 2549 */ 2550 2551 int hists__init(void) 2552 { 2553 int err = perf_evsel__object_config(sizeof(struct hists_evsel), 2554 hists_evsel__init, 2555 hists_evsel__exit); 2556 if (err) 2557 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2558 2559 return err; 2560 } 2561 2562 void perf_hpp_list__init(struct perf_hpp_list *list) 2563 { 2564 INIT_LIST_HEAD(&list->fields); 2565 INIT_LIST_HEAD(&list->sorts); 2566 } 2567