1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "debug.h" 4 #include "dso.h" 5 #include "build-id.h" 6 #include "hist.h" 7 #include "map.h" 8 #include "map_symbol.h" 9 #include "branch.h" 10 #include "mem-events.h" 11 #include "session.h" 12 #include "namespaces.h" 13 #include "cgroup.h" 14 #include "sort.h" 15 #include "units.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "annotate.h" 19 #include "srcline.h" 20 #include "symbol.h" 21 #include "thread.h" 22 #include "block-info.h" 23 #include "ui/progress.h" 24 #include <errno.h> 25 #include <math.h> 26 #include <inttypes.h> 27 #include <sys/param.h> 28 #include <linux/rbtree.h> 29 #include <linux/string.h> 30 #include <linux/time64.h> 31 #include <linux/zalloc.h> 32 33 static bool hists__filter_entry_by_dso(struct hists *hists, 34 struct hist_entry *he); 35 static bool hists__filter_entry_by_thread(struct hists *hists, 36 struct hist_entry *he); 37 static bool hists__filter_entry_by_symbol(struct hists *hists, 38 struct hist_entry *he); 39 static bool hists__filter_entry_by_socket(struct hists *hists, 40 struct hist_entry *he); 41 42 u16 hists__col_len(struct hists *hists, enum hist_column col) 43 { 44 return hists->col_len[col]; 45 } 46 47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 48 { 49 hists->col_len[col] = len; 50 } 51 52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 53 { 54 if (len > hists__col_len(hists, col)) { 55 hists__set_col_len(hists, col, len); 56 return true; 57 } 58 return false; 59 } 60 61 void hists__reset_col_len(struct hists *hists) 62 { 63 enum hist_column col; 64 65 for (col = 0; col < HISTC_NR_COLS; ++col) 66 hists__set_col_len(hists, col, 0); 67 } 68 69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 70 { 71 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 72 73 if (hists__col_len(hists, dso) < unresolved_col_width && 74 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 75 !symbol_conf.dso_list) 76 hists__set_col_len(hists, dso, unresolved_col_width); 77 } 78 79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 80 { 81 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 82 int symlen; 83 u16 len; 84 85 if (h->block_info) 86 return; 87 /* 88 * +4 accounts for '[x] ' priv level info 89 * +2 accounts for 0x prefix on raw addresses 90 * +3 accounts for ' y ' symtab origin info 91 */ 92 if (h->ms.sym) { 93 symlen = h->ms.sym->namelen + 4; 94 if (verbose > 0) 95 symlen += BITS_PER_LONG / 4 + 2 + 3; 96 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 97 } else { 98 symlen = unresolved_col_width + 4 + 2; 99 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 100 hists__set_unres_dso_col_len(hists, HISTC_DSO); 101 } 102 103 len = thread__comm_len(h->thread); 104 if (hists__new_col_len(hists, HISTC_COMM, len)) 105 hists__set_col_len(hists, HISTC_THREAD, len + 8); 106 107 if (h->ms.map) { 108 len = dso__name_len(h->ms.map->dso); 109 hists__new_col_len(hists, HISTC_DSO, len); 110 } 111 112 if (h->parent) 113 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 114 115 if (h->branch_info) { 116 if (h->branch_info->from.ms.sym) { 117 symlen = (int)h->branch_info->from.ms.sym->namelen + 4; 118 if (verbose > 0) 119 symlen += BITS_PER_LONG / 4 + 2 + 3; 120 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 121 122 symlen = dso__name_len(h->branch_info->from.ms.map->dso); 123 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 124 } else { 125 symlen = unresolved_col_width + 4 + 2; 126 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 127 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 128 } 129 130 if (h->branch_info->to.ms.sym) { 131 symlen = (int)h->branch_info->to.ms.sym->namelen + 4; 132 if (verbose > 0) 133 symlen += BITS_PER_LONG / 4 + 2 + 3; 134 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 135 136 symlen = dso__name_len(h->branch_info->to.ms.map->dso); 137 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 138 } else { 139 symlen = unresolved_col_width + 4 + 2; 140 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 141 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 142 } 143 144 if (h->branch_info->srcline_from) 145 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 146 strlen(h->branch_info->srcline_from)); 147 if (h->branch_info->srcline_to) 148 hists__new_col_len(hists, HISTC_SRCLINE_TO, 149 strlen(h->branch_info->srcline_to)); 150 } 151 152 if (h->mem_info) { 153 if (h->mem_info->daddr.ms.sym) { 154 symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4 155 + unresolved_col_width + 2; 156 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 157 symlen); 158 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 159 symlen + 1); 160 } else { 161 symlen = unresolved_col_width + 4 + 2; 162 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 163 symlen); 164 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 165 symlen); 166 } 167 168 if (h->mem_info->iaddr.ms.sym) { 169 symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4 170 + unresolved_col_width + 2; 171 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 172 symlen); 173 } else { 174 symlen = unresolved_col_width + 4 + 2; 175 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 176 symlen); 177 } 178 179 if (h->mem_info->daddr.ms.map) { 180 symlen = dso__name_len(h->mem_info->daddr.ms.map->dso); 181 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 182 symlen); 183 } else { 184 symlen = unresolved_col_width + 4 + 2; 185 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 186 } 187 188 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 189 unresolved_col_width + 4 + 2); 190 191 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE, 192 unresolved_col_width + 4 + 2); 193 194 } else { 195 symlen = unresolved_col_width + 4 + 2; 196 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 197 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 198 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 199 } 200 201 hists__new_col_len(hists, HISTC_CGROUP, 6); 202 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 203 hists__new_col_len(hists, HISTC_CPU, 3); 204 hists__new_col_len(hists, HISTC_SOCKET, 6); 205 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 206 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 207 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 208 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 209 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 210 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 211 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10); 212 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13); 213 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13); 214 if (symbol_conf.nanosecs) 215 hists__new_col_len(hists, HISTC_TIME, 16); 216 else 217 hists__new_col_len(hists, HISTC_TIME, 12); 218 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6); 219 220 if (h->srcline) { 221 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 222 hists__new_col_len(hists, HISTC_SRCLINE, len); 223 } 224 225 if (h->srcfile) 226 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 227 228 if (h->transaction) 229 hists__new_col_len(hists, HISTC_TRANSACTION, 230 hist_entry__transaction_len()); 231 232 if (h->trace_output) 233 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 234 235 if (h->cgroup) { 236 const char *cgrp_name = "unknown"; 237 struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env, 238 h->cgroup); 239 if (cgrp != NULL) 240 cgrp_name = cgrp->name; 241 242 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name)); 243 } 244 } 245 246 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 247 { 248 struct rb_node *next = rb_first_cached(&hists->entries); 249 struct hist_entry *n; 250 int row = 0; 251 252 hists__reset_col_len(hists); 253 254 while (next && row++ < max_rows) { 255 n = rb_entry(next, struct hist_entry, rb_node); 256 if (!n->filtered) 257 hists__calc_col_len(hists, n); 258 next = rb_next(&n->rb_node); 259 } 260 } 261 262 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 263 unsigned int cpumode, u64 period) 264 { 265 switch (cpumode) { 266 case PERF_RECORD_MISC_KERNEL: 267 he_stat->period_sys += period; 268 break; 269 case PERF_RECORD_MISC_USER: 270 he_stat->period_us += period; 271 break; 272 case PERF_RECORD_MISC_GUEST_KERNEL: 273 he_stat->period_guest_sys += period; 274 break; 275 case PERF_RECORD_MISC_GUEST_USER: 276 he_stat->period_guest_us += period; 277 break; 278 default: 279 break; 280 } 281 } 282 283 static long hist_time(unsigned long htime) 284 { 285 unsigned long time_quantum = symbol_conf.time_quantum; 286 if (time_quantum) 287 return (htime / time_quantum) * time_quantum; 288 return htime; 289 } 290 291 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 292 u64 weight, u64 ins_lat) 293 { 294 295 he_stat->period += period; 296 he_stat->weight += weight; 297 he_stat->nr_events += 1; 298 he_stat->ins_lat += ins_lat; 299 } 300 301 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 302 { 303 dest->period += src->period; 304 dest->period_sys += src->period_sys; 305 dest->period_us += src->period_us; 306 dest->period_guest_sys += src->period_guest_sys; 307 dest->period_guest_us += src->period_guest_us; 308 dest->nr_events += src->nr_events; 309 dest->weight += src->weight; 310 dest->ins_lat += src->ins_lat; 311 } 312 313 static void he_stat__decay(struct he_stat *he_stat) 314 { 315 he_stat->period = (he_stat->period * 7) / 8; 316 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 317 /* XXX need decay for weight too? */ 318 } 319 320 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 321 322 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 323 { 324 u64 prev_period = he->stat.period; 325 u64 diff; 326 327 if (prev_period == 0) 328 return true; 329 330 he_stat__decay(&he->stat); 331 if (symbol_conf.cumulate_callchain) 332 he_stat__decay(he->stat_acc); 333 decay_callchain(he->callchain); 334 335 diff = prev_period - he->stat.period; 336 337 if (!he->depth) { 338 hists->stats.total_period -= diff; 339 if (!he->filtered) 340 hists->stats.total_non_filtered_period -= diff; 341 } 342 343 if (!he->leaf) { 344 struct hist_entry *child; 345 struct rb_node *node = rb_first_cached(&he->hroot_out); 346 while (node) { 347 child = rb_entry(node, struct hist_entry, rb_node); 348 node = rb_next(node); 349 350 if (hists__decay_entry(hists, child)) 351 hists__delete_entry(hists, child); 352 } 353 } 354 355 return he->stat.period == 0; 356 } 357 358 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 359 { 360 struct rb_root_cached *root_in; 361 struct rb_root_cached *root_out; 362 363 if (he->parent_he) { 364 root_in = &he->parent_he->hroot_in; 365 root_out = &he->parent_he->hroot_out; 366 } else { 367 if (hists__has(hists, need_collapse)) 368 root_in = &hists->entries_collapsed; 369 else 370 root_in = hists->entries_in; 371 root_out = &hists->entries; 372 } 373 374 rb_erase_cached(&he->rb_node_in, root_in); 375 rb_erase_cached(&he->rb_node, root_out); 376 377 --hists->nr_entries; 378 if (!he->filtered) 379 --hists->nr_non_filtered_entries; 380 381 hist_entry__delete(he); 382 } 383 384 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 385 { 386 struct rb_node *next = rb_first_cached(&hists->entries); 387 struct hist_entry *n; 388 389 while (next) { 390 n = rb_entry(next, struct hist_entry, rb_node); 391 next = rb_next(&n->rb_node); 392 if (((zap_user && n->level == '.') || 393 (zap_kernel && n->level != '.') || 394 hists__decay_entry(hists, n))) { 395 hists__delete_entry(hists, n); 396 } 397 } 398 } 399 400 void hists__delete_entries(struct hists *hists) 401 { 402 struct rb_node *next = rb_first_cached(&hists->entries); 403 struct hist_entry *n; 404 405 while (next) { 406 n = rb_entry(next, struct hist_entry, rb_node); 407 next = rb_next(&n->rb_node); 408 409 hists__delete_entry(hists, n); 410 } 411 } 412 413 struct hist_entry *hists__get_entry(struct hists *hists, int idx) 414 { 415 struct rb_node *next = rb_first_cached(&hists->entries); 416 struct hist_entry *n; 417 int i = 0; 418 419 while (next) { 420 n = rb_entry(next, struct hist_entry, rb_node); 421 if (i == idx) 422 return n; 423 424 next = rb_next(&n->rb_node); 425 i++; 426 } 427 428 return NULL; 429 } 430 431 /* 432 * histogram, sorted on item, collects periods 433 */ 434 435 static int hist_entry__init(struct hist_entry *he, 436 struct hist_entry *template, 437 bool sample_self, 438 size_t callchain_size) 439 { 440 *he = *template; 441 he->callchain_size = callchain_size; 442 443 if (symbol_conf.cumulate_callchain) { 444 he->stat_acc = malloc(sizeof(he->stat)); 445 if (he->stat_acc == NULL) 446 return -ENOMEM; 447 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 448 if (!sample_self) 449 memset(&he->stat, 0, sizeof(he->stat)); 450 } 451 452 map__get(he->ms.map); 453 454 if (he->branch_info) { 455 /* 456 * This branch info is (a part of) allocated from 457 * sample__resolve_bstack() and will be freed after 458 * adding new entries. So we need to save a copy. 459 */ 460 he->branch_info = malloc(sizeof(*he->branch_info)); 461 if (he->branch_info == NULL) 462 goto err; 463 464 memcpy(he->branch_info, template->branch_info, 465 sizeof(*he->branch_info)); 466 467 map__get(he->branch_info->from.ms.map); 468 map__get(he->branch_info->to.ms.map); 469 } 470 471 if (he->mem_info) { 472 map__get(he->mem_info->iaddr.ms.map); 473 map__get(he->mem_info->daddr.ms.map); 474 } 475 476 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 477 callchain_init(he->callchain); 478 479 if (he->raw_data) { 480 he->raw_data = memdup(he->raw_data, he->raw_size); 481 if (he->raw_data == NULL) 482 goto err_infos; 483 } 484 485 if (he->srcline) { 486 he->srcline = strdup(he->srcline); 487 if (he->srcline == NULL) 488 goto err_rawdata; 489 } 490 491 if (symbol_conf.res_sample) { 492 he->res_samples = calloc(sizeof(struct res_sample), 493 symbol_conf.res_sample); 494 if (!he->res_samples) 495 goto err_srcline; 496 } 497 498 INIT_LIST_HEAD(&he->pairs.node); 499 thread__get(he->thread); 500 he->hroot_in = RB_ROOT_CACHED; 501 he->hroot_out = RB_ROOT_CACHED; 502 503 if (!symbol_conf.report_hierarchy) 504 he->leaf = true; 505 506 return 0; 507 508 err_srcline: 509 zfree(&he->srcline); 510 511 err_rawdata: 512 zfree(&he->raw_data); 513 514 err_infos: 515 if (he->branch_info) { 516 map__put(he->branch_info->from.ms.map); 517 map__put(he->branch_info->to.ms.map); 518 zfree(&he->branch_info); 519 } 520 if (he->mem_info) { 521 map__put(he->mem_info->iaddr.ms.map); 522 map__put(he->mem_info->daddr.ms.map); 523 } 524 err: 525 map__zput(he->ms.map); 526 zfree(&he->stat_acc); 527 return -ENOMEM; 528 } 529 530 static void *hist_entry__zalloc(size_t size) 531 { 532 return zalloc(size + sizeof(struct hist_entry)); 533 } 534 535 static void hist_entry__free(void *ptr) 536 { 537 free(ptr); 538 } 539 540 static struct hist_entry_ops default_ops = { 541 .new = hist_entry__zalloc, 542 .free = hist_entry__free, 543 }; 544 545 static struct hist_entry *hist_entry__new(struct hist_entry *template, 546 bool sample_self) 547 { 548 struct hist_entry_ops *ops = template->ops; 549 size_t callchain_size = 0; 550 struct hist_entry *he; 551 int err = 0; 552 553 if (!ops) 554 ops = template->ops = &default_ops; 555 556 if (symbol_conf.use_callchain) 557 callchain_size = sizeof(struct callchain_root); 558 559 he = ops->new(callchain_size); 560 if (he) { 561 err = hist_entry__init(he, template, sample_self, callchain_size); 562 if (err) { 563 ops->free(he); 564 he = NULL; 565 } 566 } 567 568 return he; 569 } 570 571 static u8 symbol__parent_filter(const struct symbol *parent) 572 { 573 if (symbol_conf.exclude_other && parent == NULL) 574 return 1 << HIST_FILTER__PARENT; 575 return 0; 576 } 577 578 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 579 { 580 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 581 return; 582 583 he->hists->callchain_period += period; 584 if (!he->filtered) 585 he->hists->callchain_non_filtered_period += period; 586 } 587 588 static struct hist_entry *hists__findnew_entry(struct hists *hists, 589 struct hist_entry *entry, 590 struct addr_location *al, 591 bool sample_self) 592 { 593 struct rb_node **p; 594 struct rb_node *parent = NULL; 595 struct hist_entry *he; 596 int64_t cmp; 597 u64 period = entry->stat.period; 598 u64 weight = entry->stat.weight; 599 u64 ins_lat = entry->stat.ins_lat; 600 bool leftmost = true; 601 602 p = &hists->entries_in->rb_root.rb_node; 603 604 while (*p != NULL) { 605 parent = *p; 606 he = rb_entry(parent, struct hist_entry, rb_node_in); 607 608 /* 609 * Make sure that it receives arguments in a same order as 610 * hist_entry__collapse() so that we can use an appropriate 611 * function when searching an entry regardless which sort 612 * keys were used. 613 */ 614 cmp = hist_entry__cmp(he, entry); 615 616 if (!cmp) { 617 if (sample_self) { 618 he_stat__add_period(&he->stat, period, weight, ins_lat); 619 hist_entry__add_callchain_period(he, period); 620 } 621 if (symbol_conf.cumulate_callchain) 622 he_stat__add_period(he->stat_acc, period, weight, ins_lat); 623 624 /* 625 * This mem info was allocated from sample__resolve_mem 626 * and will not be used anymore. 627 */ 628 mem_info__zput(entry->mem_info); 629 630 block_info__zput(entry->block_info); 631 632 /* If the map of an existing hist_entry has 633 * become out-of-date due to an exec() or 634 * similar, update it. Otherwise we will 635 * mis-adjust symbol addresses when computing 636 * the history counter to increment. 637 */ 638 if (he->ms.map != entry->ms.map) { 639 map__put(he->ms.map); 640 he->ms.map = map__get(entry->ms.map); 641 } 642 goto out; 643 } 644 645 if (cmp < 0) 646 p = &(*p)->rb_left; 647 else { 648 p = &(*p)->rb_right; 649 leftmost = false; 650 } 651 } 652 653 he = hist_entry__new(entry, sample_self); 654 if (!he) 655 return NULL; 656 657 if (sample_self) 658 hist_entry__add_callchain_period(he, period); 659 hists->nr_entries++; 660 661 rb_link_node(&he->rb_node_in, parent, p); 662 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 663 out: 664 if (sample_self) 665 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 666 if (symbol_conf.cumulate_callchain) 667 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 668 return he; 669 } 670 671 static unsigned random_max(unsigned high) 672 { 673 unsigned thresh = -high % high; 674 for (;;) { 675 unsigned r = random(); 676 if (r >= thresh) 677 return r % high; 678 } 679 } 680 681 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 682 { 683 struct res_sample *r; 684 int j; 685 686 if (he->num_res < symbol_conf.res_sample) { 687 j = he->num_res++; 688 } else { 689 j = random_max(symbol_conf.res_sample); 690 } 691 r = &he->res_samples[j]; 692 r->time = sample->time; 693 r->cpu = sample->cpu; 694 r->tid = sample->tid; 695 } 696 697 static struct hist_entry* 698 __hists__add_entry(struct hists *hists, 699 struct addr_location *al, 700 struct symbol *sym_parent, 701 struct branch_info *bi, 702 struct mem_info *mi, 703 struct block_info *block_info, 704 struct perf_sample *sample, 705 bool sample_self, 706 struct hist_entry_ops *ops) 707 { 708 struct namespaces *ns = thread__namespaces(al->thread); 709 struct hist_entry entry = { 710 .thread = al->thread, 711 .comm = thread__comm(al->thread), 712 .cgroup_id = { 713 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 714 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 715 }, 716 .cgroup = sample->cgroup, 717 .ms = { 718 .maps = al->maps, 719 .map = al->map, 720 .sym = al->sym, 721 }, 722 .srcline = (char *) al->srcline, 723 .socket = al->socket, 724 .cpu = al->cpu, 725 .cpumode = al->cpumode, 726 .ip = al->addr, 727 .level = al->level, 728 .code_page_size = sample->code_page_size, 729 .stat = { 730 .nr_events = 1, 731 .period = sample->period, 732 .weight = sample->weight, 733 .ins_lat = sample->ins_lat, 734 }, 735 .parent = sym_parent, 736 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 737 .hists = hists, 738 .branch_info = bi, 739 .mem_info = mi, 740 .block_info = block_info, 741 .transaction = sample->transaction, 742 .raw_data = sample->raw_data, 743 .raw_size = sample->raw_size, 744 .ops = ops, 745 .time = hist_time(sample->time), 746 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 747 748 if (!hists->has_callchains && he && he->callchain_size != 0) 749 hists->has_callchains = true; 750 if (he && symbol_conf.res_sample) 751 hists__res_sample(he, sample); 752 return he; 753 } 754 755 struct hist_entry *hists__add_entry(struct hists *hists, 756 struct addr_location *al, 757 struct symbol *sym_parent, 758 struct branch_info *bi, 759 struct mem_info *mi, 760 struct perf_sample *sample, 761 bool sample_self) 762 { 763 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 764 sample, sample_self, NULL); 765 } 766 767 struct hist_entry *hists__add_entry_ops(struct hists *hists, 768 struct hist_entry_ops *ops, 769 struct addr_location *al, 770 struct symbol *sym_parent, 771 struct branch_info *bi, 772 struct mem_info *mi, 773 struct perf_sample *sample, 774 bool sample_self) 775 { 776 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 777 sample, sample_self, ops); 778 } 779 780 struct hist_entry *hists__add_entry_block(struct hists *hists, 781 struct addr_location *al, 782 struct block_info *block_info) 783 { 784 struct hist_entry entry = { 785 .block_info = block_info, 786 .hists = hists, 787 .ms = { 788 .maps = al->maps, 789 .map = al->map, 790 .sym = al->sym, 791 }, 792 }, *he = hists__findnew_entry(hists, &entry, al, false); 793 794 return he; 795 } 796 797 static int 798 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 799 struct addr_location *al __maybe_unused) 800 { 801 return 0; 802 } 803 804 static int 805 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 806 struct addr_location *al __maybe_unused) 807 { 808 return 0; 809 } 810 811 static int 812 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 813 { 814 struct perf_sample *sample = iter->sample; 815 struct mem_info *mi; 816 817 mi = sample__resolve_mem(sample, al); 818 if (mi == NULL) 819 return -ENOMEM; 820 821 iter->priv = mi; 822 return 0; 823 } 824 825 static int 826 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 827 { 828 u64 cost; 829 struct mem_info *mi = iter->priv; 830 struct hists *hists = evsel__hists(iter->evsel); 831 struct perf_sample *sample = iter->sample; 832 struct hist_entry *he; 833 834 if (mi == NULL) 835 return -EINVAL; 836 837 cost = sample->weight; 838 if (!cost) 839 cost = 1; 840 841 /* 842 * must pass period=weight in order to get the correct 843 * sorting from hists__collapse_resort() which is solely 844 * based on periods. We want sorting be done on nr_events * weight 845 * and this is indirectly achieved by passing period=weight here 846 * and the he_stat__add_period() function. 847 */ 848 sample->period = cost; 849 850 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 851 sample, true); 852 if (!he) 853 return -ENOMEM; 854 855 iter->he = he; 856 return 0; 857 } 858 859 static int 860 iter_finish_mem_entry(struct hist_entry_iter *iter, 861 struct addr_location *al __maybe_unused) 862 { 863 struct evsel *evsel = iter->evsel; 864 struct hists *hists = evsel__hists(evsel); 865 struct hist_entry *he = iter->he; 866 int err = -EINVAL; 867 868 if (he == NULL) 869 goto out; 870 871 hists__inc_nr_samples(hists, he->filtered); 872 873 err = hist_entry__append_callchain(he, iter->sample); 874 875 out: 876 /* 877 * We don't need to free iter->priv (mem_info) here since the mem info 878 * was either already freed in hists__findnew_entry() or passed to a 879 * new hist entry by hist_entry__new(). 880 */ 881 iter->priv = NULL; 882 883 iter->he = NULL; 884 return err; 885 } 886 887 static int 888 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 889 { 890 struct branch_info *bi; 891 struct perf_sample *sample = iter->sample; 892 893 bi = sample__resolve_bstack(sample, al); 894 if (!bi) 895 return -ENOMEM; 896 897 iter->curr = 0; 898 iter->total = sample->branch_stack->nr; 899 900 iter->priv = bi; 901 return 0; 902 } 903 904 static int 905 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 906 struct addr_location *al __maybe_unused) 907 { 908 return 0; 909 } 910 911 static int 912 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 913 { 914 struct branch_info *bi = iter->priv; 915 int i = iter->curr; 916 917 if (bi == NULL) 918 return 0; 919 920 if (iter->curr >= iter->total) 921 return 0; 922 923 al->maps = bi[i].to.ms.maps; 924 al->map = bi[i].to.ms.map; 925 al->sym = bi[i].to.ms.sym; 926 al->addr = bi[i].to.addr; 927 return 1; 928 } 929 930 static int 931 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 932 { 933 struct branch_info *bi; 934 struct evsel *evsel = iter->evsel; 935 struct hists *hists = evsel__hists(evsel); 936 struct perf_sample *sample = iter->sample; 937 struct hist_entry *he = NULL; 938 int i = iter->curr; 939 int err = 0; 940 941 bi = iter->priv; 942 943 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym)) 944 goto out; 945 946 /* 947 * The report shows the percentage of total branches captured 948 * and not events sampled. Thus we use a pseudo period of 1. 949 */ 950 sample->period = 1; 951 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 952 953 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 954 sample, true); 955 if (he == NULL) 956 return -ENOMEM; 957 958 hists__inc_nr_samples(hists, he->filtered); 959 960 out: 961 iter->he = he; 962 iter->curr++; 963 return err; 964 } 965 966 static int 967 iter_finish_branch_entry(struct hist_entry_iter *iter, 968 struct addr_location *al __maybe_unused) 969 { 970 zfree(&iter->priv); 971 iter->he = NULL; 972 973 return iter->curr >= iter->total ? 0 : -1; 974 } 975 976 static int 977 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 978 struct addr_location *al __maybe_unused) 979 { 980 return 0; 981 } 982 983 static int 984 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 985 { 986 struct evsel *evsel = iter->evsel; 987 struct perf_sample *sample = iter->sample; 988 struct hist_entry *he; 989 990 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 991 sample, true); 992 if (he == NULL) 993 return -ENOMEM; 994 995 iter->he = he; 996 return 0; 997 } 998 999 static int 1000 iter_finish_normal_entry(struct hist_entry_iter *iter, 1001 struct addr_location *al __maybe_unused) 1002 { 1003 struct hist_entry *he = iter->he; 1004 struct evsel *evsel = iter->evsel; 1005 struct perf_sample *sample = iter->sample; 1006 1007 if (he == NULL) 1008 return 0; 1009 1010 iter->he = NULL; 1011 1012 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 1013 1014 return hist_entry__append_callchain(he, sample); 1015 } 1016 1017 static int 1018 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 1019 struct addr_location *al __maybe_unused) 1020 { 1021 struct hist_entry **he_cache; 1022 1023 callchain_cursor_commit(&callchain_cursor); 1024 1025 /* 1026 * This is for detecting cycles or recursions so that they're 1027 * cumulated only one time to prevent entries more than 100% 1028 * overhead. 1029 */ 1030 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); 1031 if (he_cache == NULL) 1032 return -ENOMEM; 1033 1034 iter->priv = he_cache; 1035 iter->curr = 0; 1036 1037 return 0; 1038 } 1039 1040 static int 1041 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 1042 struct addr_location *al) 1043 { 1044 struct evsel *evsel = iter->evsel; 1045 struct hists *hists = evsel__hists(evsel); 1046 struct perf_sample *sample = iter->sample; 1047 struct hist_entry **he_cache = iter->priv; 1048 struct hist_entry *he; 1049 int err = 0; 1050 1051 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 1052 sample, true); 1053 if (he == NULL) 1054 return -ENOMEM; 1055 1056 iter->he = he; 1057 he_cache[iter->curr++] = he; 1058 1059 hist_entry__append_callchain(he, sample); 1060 1061 /* 1062 * We need to re-initialize the cursor since callchain_append() 1063 * advanced the cursor to the end. 1064 */ 1065 callchain_cursor_commit(&callchain_cursor); 1066 1067 hists__inc_nr_samples(hists, he->filtered); 1068 1069 return err; 1070 } 1071 1072 static int 1073 iter_next_cumulative_entry(struct hist_entry_iter *iter, 1074 struct addr_location *al) 1075 { 1076 struct callchain_cursor_node *node; 1077 1078 node = callchain_cursor_current(&callchain_cursor); 1079 if (node == NULL) 1080 return 0; 1081 1082 return fill_callchain_info(al, node, iter->hide_unresolved); 1083 } 1084 1085 static bool 1086 hist_entry__fast__sym_diff(struct hist_entry *left, 1087 struct hist_entry *right) 1088 { 1089 struct symbol *sym_l = left->ms.sym; 1090 struct symbol *sym_r = right->ms.sym; 1091 1092 if (!sym_l && !sym_r) 1093 return left->ip != right->ip; 1094 1095 return !!_sort__sym_cmp(sym_l, sym_r); 1096 } 1097 1098 1099 static int 1100 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1101 struct addr_location *al) 1102 { 1103 struct evsel *evsel = iter->evsel; 1104 struct perf_sample *sample = iter->sample; 1105 struct hist_entry **he_cache = iter->priv; 1106 struct hist_entry *he; 1107 struct hist_entry he_tmp = { 1108 .hists = evsel__hists(evsel), 1109 .cpu = al->cpu, 1110 .thread = al->thread, 1111 .comm = thread__comm(al->thread), 1112 .ip = al->addr, 1113 .ms = { 1114 .maps = al->maps, 1115 .map = al->map, 1116 .sym = al->sym, 1117 }, 1118 .srcline = (char *) al->srcline, 1119 .parent = iter->parent, 1120 .raw_data = sample->raw_data, 1121 .raw_size = sample->raw_size, 1122 }; 1123 int i; 1124 struct callchain_cursor cursor; 1125 bool fast = hists__has(he_tmp.hists, sym); 1126 1127 callchain_cursor_snapshot(&cursor, &callchain_cursor); 1128 1129 callchain_cursor_advance(&callchain_cursor); 1130 1131 /* 1132 * Check if there's duplicate entries in the callchain. 1133 * It's possible that it has cycles or recursive calls. 1134 */ 1135 for (i = 0; i < iter->curr; i++) { 1136 /* 1137 * For most cases, there are no duplicate entries in callchain. 1138 * The symbols are usually different. Do a quick check for 1139 * symbols first. 1140 */ 1141 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp)) 1142 continue; 1143 1144 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1145 /* to avoid calling callback function */ 1146 iter->he = NULL; 1147 return 0; 1148 } 1149 } 1150 1151 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1152 sample, false); 1153 if (he == NULL) 1154 return -ENOMEM; 1155 1156 iter->he = he; 1157 he_cache[iter->curr++] = he; 1158 1159 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1160 callchain_append(he->callchain, &cursor, sample->period); 1161 return 0; 1162 } 1163 1164 static int 1165 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1166 struct addr_location *al __maybe_unused) 1167 { 1168 zfree(&iter->priv); 1169 iter->he = NULL; 1170 1171 return 0; 1172 } 1173 1174 const struct hist_iter_ops hist_iter_mem = { 1175 .prepare_entry = iter_prepare_mem_entry, 1176 .add_single_entry = iter_add_single_mem_entry, 1177 .next_entry = iter_next_nop_entry, 1178 .add_next_entry = iter_add_next_nop_entry, 1179 .finish_entry = iter_finish_mem_entry, 1180 }; 1181 1182 const struct hist_iter_ops hist_iter_branch = { 1183 .prepare_entry = iter_prepare_branch_entry, 1184 .add_single_entry = iter_add_single_branch_entry, 1185 .next_entry = iter_next_branch_entry, 1186 .add_next_entry = iter_add_next_branch_entry, 1187 .finish_entry = iter_finish_branch_entry, 1188 }; 1189 1190 const struct hist_iter_ops hist_iter_normal = { 1191 .prepare_entry = iter_prepare_normal_entry, 1192 .add_single_entry = iter_add_single_normal_entry, 1193 .next_entry = iter_next_nop_entry, 1194 .add_next_entry = iter_add_next_nop_entry, 1195 .finish_entry = iter_finish_normal_entry, 1196 }; 1197 1198 const struct hist_iter_ops hist_iter_cumulative = { 1199 .prepare_entry = iter_prepare_cumulative_entry, 1200 .add_single_entry = iter_add_single_cumulative_entry, 1201 .next_entry = iter_next_cumulative_entry, 1202 .add_next_entry = iter_add_next_cumulative_entry, 1203 .finish_entry = iter_finish_cumulative_entry, 1204 }; 1205 1206 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1207 int max_stack_depth, void *arg) 1208 { 1209 int err, err2; 1210 struct map *alm = NULL; 1211 1212 if (al) 1213 alm = map__get(al->map); 1214 1215 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1216 iter->evsel, al, max_stack_depth); 1217 if (err) { 1218 map__put(alm); 1219 return err; 1220 } 1221 1222 err = iter->ops->prepare_entry(iter, al); 1223 if (err) 1224 goto out; 1225 1226 err = iter->ops->add_single_entry(iter, al); 1227 if (err) 1228 goto out; 1229 1230 if (iter->he && iter->add_entry_cb) { 1231 err = iter->add_entry_cb(iter, al, true, arg); 1232 if (err) 1233 goto out; 1234 } 1235 1236 while (iter->ops->next_entry(iter, al)) { 1237 err = iter->ops->add_next_entry(iter, al); 1238 if (err) 1239 break; 1240 1241 if (iter->he && iter->add_entry_cb) { 1242 err = iter->add_entry_cb(iter, al, false, arg); 1243 if (err) 1244 goto out; 1245 } 1246 } 1247 1248 out: 1249 err2 = iter->ops->finish_entry(iter, al); 1250 if (!err) 1251 err = err2; 1252 1253 map__put(alm); 1254 1255 return err; 1256 } 1257 1258 int64_t 1259 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1260 { 1261 struct hists *hists = left->hists; 1262 struct perf_hpp_fmt *fmt; 1263 int64_t cmp = 0; 1264 1265 hists__for_each_sort_list(hists, fmt) { 1266 if (perf_hpp__is_dynamic_entry(fmt) && 1267 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1268 continue; 1269 1270 cmp = fmt->cmp(fmt, left, right); 1271 if (cmp) 1272 break; 1273 } 1274 1275 return cmp; 1276 } 1277 1278 int64_t 1279 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1280 { 1281 struct hists *hists = left->hists; 1282 struct perf_hpp_fmt *fmt; 1283 int64_t cmp = 0; 1284 1285 hists__for_each_sort_list(hists, fmt) { 1286 if (perf_hpp__is_dynamic_entry(fmt) && 1287 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1288 continue; 1289 1290 cmp = fmt->collapse(fmt, left, right); 1291 if (cmp) 1292 break; 1293 } 1294 1295 return cmp; 1296 } 1297 1298 void hist_entry__delete(struct hist_entry *he) 1299 { 1300 struct hist_entry_ops *ops = he->ops; 1301 1302 thread__zput(he->thread); 1303 map__zput(he->ms.map); 1304 1305 if (he->branch_info) { 1306 map__zput(he->branch_info->from.ms.map); 1307 map__zput(he->branch_info->to.ms.map); 1308 free_srcline(he->branch_info->srcline_from); 1309 free_srcline(he->branch_info->srcline_to); 1310 zfree(&he->branch_info); 1311 } 1312 1313 if (he->mem_info) { 1314 map__zput(he->mem_info->iaddr.ms.map); 1315 map__zput(he->mem_info->daddr.ms.map); 1316 mem_info__zput(he->mem_info); 1317 } 1318 1319 if (he->block_info) 1320 block_info__zput(he->block_info); 1321 1322 zfree(&he->res_samples); 1323 zfree(&he->stat_acc); 1324 free_srcline(he->srcline); 1325 if (he->srcfile && he->srcfile[0]) 1326 zfree(&he->srcfile); 1327 free_callchain(he->callchain); 1328 zfree(&he->trace_output); 1329 zfree(&he->raw_data); 1330 ops->free(he); 1331 } 1332 1333 /* 1334 * If this is not the last column, then we need to pad it according to the 1335 * pre-calculated max length for this column, otherwise don't bother adding 1336 * spaces because that would break viewing this with, for instance, 'less', 1337 * that would show tons of trailing spaces when a long C++ demangled method 1338 * names is sampled. 1339 */ 1340 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1341 struct perf_hpp_fmt *fmt, int printed) 1342 { 1343 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1344 const int width = fmt->width(fmt, hpp, he->hists); 1345 if (printed < width) { 1346 advance_hpp(hpp, printed); 1347 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1348 } 1349 } 1350 1351 return printed; 1352 } 1353 1354 /* 1355 * collapse the histogram 1356 */ 1357 1358 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1359 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1360 enum hist_filter type); 1361 1362 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1363 1364 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1365 { 1366 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1367 } 1368 1369 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1370 enum hist_filter type, 1371 fmt_chk_fn check) 1372 { 1373 struct perf_hpp_fmt *fmt; 1374 bool type_match = false; 1375 struct hist_entry *parent = he->parent_he; 1376 1377 switch (type) { 1378 case HIST_FILTER__THREAD: 1379 if (symbol_conf.comm_list == NULL && 1380 symbol_conf.pid_list == NULL && 1381 symbol_conf.tid_list == NULL) 1382 return; 1383 break; 1384 case HIST_FILTER__DSO: 1385 if (symbol_conf.dso_list == NULL) 1386 return; 1387 break; 1388 case HIST_FILTER__SYMBOL: 1389 if (symbol_conf.sym_list == NULL) 1390 return; 1391 break; 1392 case HIST_FILTER__PARENT: 1393 case HIST_FILTER__GUEST: 1394 case HIST_FILTER__HOST: 1395 case HIST_FILTER__SOCKET: 1396 case HIST_FILTER__C2C: 1397 default: 1398 return; 1399 } 1400 1401 /* if it's filtered by own fmt, it has to have filter bits */ 1402 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1403 if (check(fmt)) { 1404 type_match = true; 1405 break; 1406 } 1407 } 1408 1409 if (type_match) { 1410 /* 1411 * If the filter is for current level entry, propagate 1412 * filter marker to parents. The marker bit was 1413 * already set by default so it only needs to clear 1414 * non-filtered entries. 1415 */ 1416 if (!(he->filtered & (1 << type))) { 1417 while (parent) { 1418 parent->filtered &= ~(1 << type); 1419 parent = parent->parent_he; 1420 } 1421 } 1422 } else { 1423 /* 1424 * If current entry doesn't have matching formats, set 1425 * filter marker for upper level entries. it will be 1426 * cleared if its lower level entries is not filtered. 1427 * 1428 * For lower-level entries, it inherits parent's 1429 * filter bit so that lower level entries of a 1430 * non-filtered entry won't set the filter marker. 1431 */ 1432 if (parent == NULL) 1433 he->filtered |= (1 << type); 1434 else 1435 he->filtered |= (parent->filtered & (1 << type)); 1436 } 1437 } 1438 1439 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1440 { 1441 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1442 check_thread_entry); 1443 1444 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1445 perf_hpp__is_dso_entry); 1446 1447 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1448 perf_hpp__is_sym_entry); 1449 1450 hists__apply_filters(he->hists, he); 1451 } 1452 1453 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1454 struct rb_root_cached *root, 1455 struct hist_entry *he, 1456 struct hist_entry *parent_he, 1457 struct perf_hpp_list *hpp_list) 1458 { 1459 struct rb_node **p = &root->rb_root.rb_node; 1460 struct rb_node *parent = NULL; 1461 struct hist_entry *iter, *new; 1462 struct perf_hpp_fmt *fmt; 1463 int64_t cmp; 1464 bool leftmost = true; 1465 1466 while (*p != NULL) { 1467 parent = *p; 1468 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1469 1470 cmp = 0; 1471 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1472 cmp = fmt->collapse(fmt, iter, he); 1473 if (cmp) 1474 break; 1475 } 1476 1477 if (!cmp) { 1478 he_stat__add_stat(&iter->stat, &he->stat); 1479 return iter; 1480 } 1481 1482 if (cmp < 0) 1483 p = &parent->rb_left; 1484 else { 1485 p = &parent->rb_right; 1486 leftmost = false; 1487 } 1488 } 1489 1490 new = hist_entry__new(he, true); 1491 if (new == NULL) 1492 return NULL; 1493 1494 hists->nr_entries++; 1495 1496 /* save related format list for output */ 1497 new->hpp_list = hpp_list; 1498 new->parent_he = parent_he; 1499 1500 hist_entry__apply_hierarchy_filters(new); 1501 1502 /* some fields are now passed to 'new' */ 1503 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1504 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1505 he->trace_output = NULL; 1506 else 1507 new->trace_output = NULL; 1508 1509 if (perf_hpp__is_srcline_entry(fmt)) 1510 he->srcline = NULL; 1511 else 1512 new->srcline = NULL; 1513 1514 if (perf_hpp__is_srcfile_entry(fmt)) 1515 he->srcfile = NULL; 1516 else 1517 new->srcfile = NULL; 1518 } 1519 1520 rb_link_node(&new->rb_node_in, parent, p); 1521 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1522 return new; 1523 } 1524 1525 static int hists__hierarchy_insert_entry(struct hists *hists, 1526 struct rb_root_cached *root, 1527 struct hist_entry *he) 1528 { 1529 struct perf_hpp_list_node *node; 1530 struct hist_entry *new_he = NULL; 1531 struct hist_entry *parent = NULL; 1532 int depth = 0; 1533 int ret = 0; 1534 1535 list_for_each_entry(node, &hists->hpp_formats, list) { 1536 /* skip period (overhead) and elided columns */ 1537 if (node->level == 0 || node->skip) 1538 continue; 1539 1540 /* insert copy of 'he' for each fmt into the hierarchy */ 1541 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1542 if (new_he == NULL) { 1543 ret = -1; 1544 break; 1545 } 1546 1547 root = &new_he->hroot_in; 1548 new_he->depth = depth++; 1549 parent = new_he; 1550 } 1551 1552 if (new_he) { 1553 new_he->leaf = true; 1554 1555 if (hist_entry__has_callchains(new_he) && 1556 symbol_conf.use_callchain) { 1557 callchain_cursor_reset(&callchain_cursor); 1558 if (callchain_merge(&callchain_cursor, 1559 new_he->callchain, 1560 he->callchain) < 0) 1561 ret = -1; 1562 } 1563 } 1564 1565 /* 'he' is no longer used */ 1566 hist_entry__delete(he); 1567 1568 /* return 0 (or -1) since it already applied filters */ 1569 return ret; 1570 } 1571 1572 static int hists__collapse_insert_entry(struct hists *hists, 1573 struct rb_root_cached *root, 1574 struct hist_entry *he) 1575 { 1576 struct rb_node **p = &root->rb_root.rb_node; 1577 struct rb_node *parent = NULL; 1578 struct hist_entry *iter; 1579 int64_t cmp; 1580 bool leftmost = true; 1581 1582 if (symbol_conf.report_hierarchy) 1583 return hists__hierarchy_insert_entry(hists, root, he); 1584 1585 while (*p != NULL) { 1586 parent = *p; 1587 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1588 1589 cmp = hist_entry__collapse(iter, he); 1590 1591 if (!cmp) { 1592 int ret = 0; 1593 1594 he_stat__add_stat(&iter->stat, &he->stat); 1595 if (symbol_conf.cumulate_callchain) 1596 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1597 1598 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1599 callchain_cursor_reset(&callchain_cursor); 1600 if (callchain_merge(&callchain_cursor, 1601 iter->callchain, 1602 he->callchain) < 0) 1603 ret = -1; 1604 } 1605 hist_entry__delete(he); 1606 return ret; 1607 } 1608 1609 if (cmp < 0) 1610 p = &(*p)->rb_left; 1611 else { 1612 p = &(*p)->rb_right; 1613 leftmost = false; 1614 } 1615 } 1616 hists->nr_entries++; 1617 1618 rb_link_node(&he->rb_node_in, parent, p); 1619 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1620 return 1; 1621 } 1622 1623 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1624 { 1625 struct rb_root_cached *root; 1626 1627 pthread_mutex_lock(&hists->lock); 1628 1629 root = hists->entries_in; 1630 if (++hists->entries_in > &hists->entries_in_array[1]) 1631 hists->entries_in = &hists->entries_in_array[0]; 1632 1633 pthread_mutex_unlock(&hists->lock); 1634 1635 return root; 1636 } 1637 1638 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1639 { 1640 hists__filter_entry_by_dso(hists, he); 1641 hists__filter_entry_by_thread(hists, he); 1642 hists__filter_entry_by_symbol(hists, he); 1643 hists__filter_entry_by_socket(hists, he); 1644 } 1645 1646 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1647 { 1648 struct rb_root_cached *root; 1649 struct rb_node *next; 1650 struct hist_entry *n; 1651 int ret; 1652 1653 if (!hists__has(hists, need_collapse)) 1654 return 0; 1655 1656 hists->nr_entries = 0; 1657 1658 root = hists__get_rotate_entries_in(hists); 1659 1660 next = rb_first_cached(root); 1661 1662 while (next) { 1663 if (session_done()) 1664 break; 1665 n = rb_entry(next, struct hist_entry, rb_node_in); 1666 next = rb_next(&n->rb_node_in); 1667 1668 rb_erase_cached(&n->rb_node_in, root); 1669 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1670 if (ret < 0) 1671 return -1; 1672 1673 if (ret) { 1674 /* 1675 * If it wasn't combined with one of the entries already 1676 * collapsed, we need to apply the filters that may have 1677 * been set by, say, the hist_browser. 1678 */ 1679 hists__apply_filters(hists, n); 1680 } 1681 if (prog) 1682 ui_progress__update(prog, 1); 1683 } 1684 return 0; 1685 } 1686 1687 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1688 { 1689 struct hists *hists = a->hists; 1690 struct perf_hpp_fmt *fmt; 1691 int64_t cmp = 0; 1692 1693 hists__for_each_sort_list(hists, fmt) { 1694 if (perf_hpp__should_skip(fmt, a->hists)) 1695 continue; 1696 1697 cmp = fmt->sort(fmt, a, b); 1698 if (cmp) 1699 break; 1700 } 1701 1702 return cmp; 1703 } 1704 1705 static void hists__reset_filter_stats(struct hists *hists) 1706 { 1707 hists->nr_non_filtered_entries = 0; 1708 hists->stats.total_non_filtered_period = 0; 1709 } 1710 1711 void hists__reset_stats(struct hists *hists) 1712 { 1713 hists->nr_entries = 0; 1714 hists->stats.total_period = 0; 1715 1716 hists__reset_filter_stats(hists); 1717 } 1718 1719 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1720 { 1721 hists->nr_non_filtered_entries++; 1722 hists->stats.total_non_filtered_period += h->stat.period; 1723 } 1724 1725 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1726 { 1727 if (!h->filtered) 1728 hists__inc_filter_stats(hists, h); 1729 1730 hists->nr_entries++; 1731 hists->stats.total_period += h->stat.period; 1732 } 1733 1734 static void hierarchy_recalc_total_periods(struct hists *hists) 1735 { 1736 struct rb_node *node; 1737 struct hist_entry *he; 1738 1739 node = rb_first_cached(&hists->entries); 1740 1741 hists->stats.total_period = 0; 1742 hists->stats.total_non_filtered_period = 0; 1743 1744 /* 1745 * recalculate total period using top-level entries only 1746 * since lower level entries only see non-filtered entries 1747 * but upper level entries have sum of both entries. 1748 */ 1749 while (node) { 1750 he = rb_entry(node, struct hist_entry, rb_node); 1751 node = rb_next(node); 1752 1753 hists->stats.total_period += he->stat.period; 1754 if (!he->filtered) 1755 hists->stats.total_non_filtered_period += he->stat.period; 1756 } 1757 } 1758 1759 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1760 struct hist_entry *he) 1761 { 1762 struct rb_node **p = &root->rb_root.rb_node; 1763 struct rb_node *parent = NULL; 1764 struct hist_entry *iter; 1765 struct perf_hpp_fmt *fmt; 1766 bool leftmost = true; 1767 1768 while (*p != NULL) { 1769 parent = *p; 1770 iter = rb_entry(parent, struct hist_entry, rb_node); 1771 1772 if (hist_entry__sort(he, iter) > 0) 1773 p = &parent->rb_left; 1774 else { 1775 p = &parent->rb_right; 1776 leftmost = false; 1777 } 1778 } 1779 1780 rb_link_node(&he->rb_node, parent, p); 1781 rb_insert_color_cached(&he->rb_node, root, leftmost); 1782 1783 /* update column width of dynamic entry */ 1784 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1785 if (perf_hpp__is_dynamic_entry(fmt)) 1786 fmt->sort(fmt, he, NULL); 1787 } 1788 } 1789 1790 static void hists__hierarchy_output_resort(struct hists *hists, 1791 struct ui_progress *prog, 1792 struct rb_root_cached *root_in, 1793 struct rb_root_cached *root_out, 1794 u64 min_callchain_hits, 1795 bool use_callchain) 1796 { 1797 struct rb_node *node; 1798 struct hist_entry *he; 1799 1800 *root_out = RB_ROOT_CACHED; 1801 node = rb_first_cached(root_in); 1802 1803 while (node) { 1804 he = rb_entry(node, struct hist_entry, rb_node_in); 1805 node = rb_next(node); 1806 1807 hierarchy_insert_output_entry(root_out, he); 1808 1809 if (prog) 1810 ui_progress__update(prog, 1); 1811 1812 hists->nr_entries++; 1813 if (!he->filtered) { 1814 hists->nr_non_filtered_entries++; 1815 hists__calc_col_len(hists, he); 1816 } 1817 1818 if (!he->leaf) { 1819 hists__hierarchy_output_resort(hists, prog, 1820 &he->hroot_in, 1821 &he->hroot_out, 1822 min_callchain_hits, 1823 use_callchain); 1824 continue; 1825 } 1826 1827 if (!use_callchain) 1828 continue; 1829 1830 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1831 u64 total = he->stat.period; 1832 1833 if (symbol_conf.cumulate_callchain) 1834 total = he->stat_acc->period; 1835 1836 min_callchain_hits = total * (callchain_param.min_percent / 100); 1837 } 1838 1839 callchain_param.sort(&he->sorted_chain, he->callchain, 1840 min_callchain_hits, &callchain_param); 1841 } 1842 } 1843 1844 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1845 struct hist_entry *he, 1846 u64 min_callchain_hits, 1847 bool use_callchain) 1848 { 1849 struct rb_node **p = &entries->rb_root.rb_node; 1850 struct rb_node *parent = NULL; 1851 struct hist_entry *iter; 1852 struct perf_hpp_fmt *fmt; 1853 bool leftmost = true; 1854 1855 if (use_callchain) { 1856 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1857 u64 total = he->stat.period; 1858 1859 if (symbol_conf.cumulate_callchain) 1860 total = he->stat_acc->period; 1861 1862 min_callchain_hits = total * (callchain_param.min_percent / 100); 1863 } 1864 callchain_param.sort(&he->sorted_chain, he->callchain, 1865 min_callchain_hits, &callchain_param); 1866 } 1867 1868 while (*p != NULL) { 1869 parent = *p; 1870 iter = rb_entry(parent, struct hist_entry, rb_node); 1871 1872 if (hist_entry__sort(he, iter) > 0) 1873 p = &(*p)->rb_left; 1874 else { 1875 p = &(*p)->rb_right; 1876 leftmost = false; 1877 } 1878 } 1879 1880 rb_link_node(&he->rb_node, parent, p); 1881 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1882 1883 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1884 if (perf_hpp__is_dynamic_entry(fmt) && 1885 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1886 fmt->sort(fmt, he, NULL); /* update column width */ 1887 } 1888 } 1889 1890 static void output_resort(struct hists *hists, struct ui_progress *prog, 1891 bool use_callchain, hists__resort_cb_t cb, 1892 void *cb_arg) 1893 { 1894 struct rb_root_cached *root; 1895 struct rb_node *next; 1896 struct hist_entry *n; 1897 u64 callchain_total; 1898 u64 min_callchain_hits; 1899 1900 callchain_total = hists->callchain_period; 1901 if (symbol_conf.filter_relative) 1902 callchain_total = hists->callchain_non_filtered_period; 1903 1904 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1905 1906 hists__reset_stats(hists); 1907 hists__reset_col_len(hists); 1908 1909 if (symbol_conf.report_hierarchy) { 1910 hists__hierarchy_output_resort(hists, prog, 1911 &hists->entries_collapsed, 1912 &hists->entries, 1913 min_callchain_hits, 1914 use_callchain); 1915 hierarchy_recalc_total_periods(hists); 1916 return; 1917 } 1918 1919 if (hists__has(hists, need_collapse)) 1920 root = &hists->entries_collapsed; 1921 else 1922 root = hists->entries_in; 1923 1924 next = rb_first_cached(root); 1925 hists->entries = RB_ROOT_CACHED; 1926 1927 while (next) { 1928 n = rb_entry(next, struct hist_entry, rb_node_in); 1929 next = rb_next(&n->rb_node_in); 1930 1931 if (cb && cb(n, cb_arg)) 1932 continue; 1933 1934 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1935 hists__inc_stats(hists, n); 1936 1937 if (!n->filtered) 1938 hists__calc_col_len(hists, n); 1939 1940 if (prog) 1941 ui_progress__update(prog, 1); 1942 } 1943 } 1944 1945 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog, 1946 hists__resort_cb_t cb, void *cb_arg) 1947 { 1948 bool use_callchain; 1949 1950 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1951 use_callchain = evsel__has_callchain(evsel); 1952 else 1953 use_callchain = symbol_conf.use_callchain; 1954 1955 use_callchain |= symbol_conf.show_branchflag_count; 1956 1957 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1958 } 1959 1960 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog) 1961 { 1962 return evsel__output_resort_cb(evsel, prog, NULL, NULL); 1963 } 1964 1965 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1966 { 1967 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 1968 } 1969 1970 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 1971 hists__resort_cb_t cb) 1972 { 1973 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 1974 } 1975 1976 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1977 { 1978 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1979 return false; 1980 1981 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1982 return true; 1983 1984 return false; 1985 } 1986 1987 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1988 { 1989 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1990 1991 while (can_goto_child(he, HMD_NORMAL)) { 1992 node = rb_last(&he->hroot_out.rb_root); 1993 he = rb_entry(node, struct hist_entry, rb_node); 1994 } 1995 return node; 1996 } 1997 1998 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1999 { 2000 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2001 2002 if (can_goto_child(he, hmd)) 2003 node = rb_first_cached(&he->hroot_out); 2004 else 2005 node = rb_next(node); 2006 2007 while (node == NULL) { 2008 he = he->parent_he; 2009 if (he == NULL) 2010 break; 2011 2012 node = rb_next(&he->rb_node); 2013 } 2014 return node; 2015 } 2016 2017 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 2018 { 2019 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2020 2021 node = rb_prev(node); 2022 if (node) 2023 return rb_hierarchy_last(node); 2024 2025 he = he->parent_he; 2026 if (he == NULL) 2027 return NULL; 2028 2029 return &he->rb_node; 2030 } 2031 2032 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 2033 { 2034 struct rb_node *node; 2035 struct hist_entry *child; 2036 float percent; 2037 2038 if (he->leaf) 2039 return false; 2040 2041 node = rb_first_cached(&he->hroot_out); 2042 child = rb_entry(node, struct hist_entry, rb_node); 2043 2044 while (node && child->filtered) { 2045 node = rb_next(node); 2046 child = rb_entry(node, struct hist_entry, rb_node); 2047 } 2048 2049 if (node) 2050 percent = hist_entry__get_percent_limit(child); 2051 else 2052 percent = 0; 2053 2054 return node && percent >= limit; 2055 } 2056 2057 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 2058 enum hist_filter filter) 2059 { 2060 h->filtered &= ~(1 << filter); 2061 2062 if (symbol_conf.report_hierarchy) { 2063 struct hist_entry *parent = h->parent_he; 2064 2065 while (parent) { 2066 he_stat__add_stat(&parent->stat, &h->stat); 2067 2068 parent->filtered &= ~(1 << filter); 2069 2070 if (parent->filtered) 2071 goto next; 2072 2073 /* force fold unfiltered entry for simplicity */ 2074 parent->unfolded = false; 2075 parent->has_no_entry = false; 2076 parent->row_offset = 0; 2077 parent->nr_rows = 0; 2078 next: 2079 parent = parent->parent_he; 2080 } 2081 } 2082 2083 if (h->filtered) 2084 return; 2085 2086 /* force fold unfiltered entry for simplicity */ 2087 h->unfolded = false; 2088 h->has_no_entry = false; 2089 h->row_offset = 0; 2090 h->nr_rows = 0; 2091 2092 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 2093 2094 hists__inc_filter_stats(hists, h); 2095 hists__calc_col_len(hists, h); 2096 } 2097 2098 2099 static bool hists__filter_entry_by_dso(struct hists *hists, 2100 struct hist_entry *he) 2101 { 2102 if (hists->dso_filter != NULL && 2103 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 2104 he->filtered |= (1 << HIST_FILTER__DSO); 2105 return true; 2106 } 2107 2108 return false; 2109 } 2110 2111 static bool hists__filter_entry_by_thread(struct hists *hists, 2112 struct hist_entry *he) 2113 { 2114 if (hists->thread_filter != NULL && 2115 he->thread != hists->thread_filter) { 2116 he->filtered |= (1 << HIST_FILTER__THREAD); 2117 return true; 2118 } 2119 2120 return false; 2121 } 2122 2123 static bool hists__filter_entry_by_symbol(struct hists *hists, 2124 struct hist_entry *he) 2125 { 2126 if (hists->symbol_filter_str != NULL && 2127 (!he->ms.sym || strstr(he->ms.sym->name, 2128 hists->symbol_filter_str) == NULL)) { 2129 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2130 return true; 2131 } 2132 2133 return false; 2134 } 2135 2136 static bool hists__filter_entry_by_socket(struct hists *hists, 2137 struct hist_entry *he) 2138 { 2139 if ((hists->socket_filter > -1) && 2140 (he->socket != hists->socket_filter)) { 2141 he->filtered |= (1 << HIST_FILTER__SOCKET); 2142 return true; 2143 } 2144 2145 return false; 2146 } 2147 2148 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2149 2150 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2151 { 2152 struct rb_node *nd; 2153 2154 hists->stats.nr_non_filtered_samples = 0; 2155 2156 hists__reset_filter_stats(hists); 2157 hists__reset_col_len(hists); 2158 2159 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2160 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2161 2162 if (filter(hists, h)) 2163 continue; 2164 2165 hists__remove_entry_filter(hists, h, type); 2166 } 2167 } 2168 2169 static void resort_filtered_entry(struct rb_root_cached *root, 2170 struct hist_entry *he) 2171 { 2172 struct rb_node **p = &root->rb_root.rb_node; 2173 struct rb_node *parent = NULL; 2174 struct hist_entry *iter; 2175 struct rb_root_cached new_root = RB_ROOT_CACHED; 2176 struct rb_node *nd; 2177 bool leftmost = true; 2178 2179 while (*p != NULL) { 2180 parent = *p; 2181 iter = rb_entry(parent, struct hist_entry, rb_node); 2182 2183 if (hist_entry__sort(he, iter) > 0) 2184 p = &(*p)->rb_left; 2185 else { 2186 p = &(*p)->rb_right; 2187 leftmost = false; 2188 } 2189 } 2190 2191 rb_link_node(&he->rb_node, parent, p); 2192 rb_insert_color_cached(&he->rb_node, root, leftmost); 2193 2194 if (he->leaf || he->filtered) 2195 return; 2196 2197 nd = rb_first_cached(&he->hroot_out); 2198 while (nd) { 2199 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2200 2201 nd = rb_next(nd); 2202 rb_erase_cached(&h->rb_node, &he->hroot_out); 2203 2204 resort_filtered_entry(&new_root, h); 2205 } 2206 2207 he->hroot_out = new_root; 2208 } 2209 2210 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2211 { 2212 struct rb_node *nd; 2213 struct rb_root_cached new_root = RB_ROOT_CACHED; 2214 2215 hists->stats.nr_non_filtered_samples = 0; 2216 2217 hists__reset_filter_stats(hists); 2218 hists__reset_col_len(hists); 2219 2220 nd = rb_first_cached(&hists->entries); 2221 while (nd) { 2222 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2223 int ret; 2224 2225 ret = hist_entry__filter(h, type, arg); 2226 2227 /* 2228 * case 1. non-matching type 2229 * zero out the period, set filter marker and move to child 2230 */ 2231 if (ret < 0) { 2232 memset(&h->stat, 0, sizeof(h->stat)); 2233 h->filtered |= (1 << type); 2234 2235 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2236 } 2237 /* 2238 * case 2. matched type (filter out) 2239 * set filter marker and move to next 2240 */ 2241 else if (ret == 1) { 2242 h->filtered |= (1 << type); 2243 2244 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2245 } 2246 /* 2247 * case 3. ok (not filtered) 2248 * add period to hists and parents, erase the filter marker 2249 * and move to next sibling 2250 */ 2251 else { 2252 hists__remove_entry_filter(hists, h, type); 2253 2254 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2255 } 2256 } 2257 2258 hierarchy_recalc_total_periods(hists); 2259 2260 /* 2261 * resort output after applying a new filter since filter in a lower 2262 * hierarchy can change periods in a upper hierarchy. 2263 */ 2264 nd = rb_first_cached(&hists->entries); 2265 while (nd) { 2266 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2267 2268 nd = rb_next(nd); 2269 rb_erase_cached(&h->rb_node, &hists->entries); 2270 2271 resort_filtered_entry(&new_root, h); 2272 } 2273 2274 hists->entries = new_root; 2275 } 2276 2277 void hists__filter_by_thread(struct hists *hists) 2278 { 2279 if (symbol_conf.report_hierarchy) 2280 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2281 hists->thread_filter); 2282 else 2283 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2284 hists__filter_entry_by_thread); 2285 } 2286 2287 void hists__filter_by_dso(struct hists *hists) 2288 { 2289 if (symbol_conf.report_hierarchy) 2290 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2291 hists->dso_filter); 2292 else 2293 hists__filter_by_type(hists, HIST_FILTER__DSO, 2294 hists__filter_entry_by_dso); 2295 } 2296 2297 void hists__filter_by_symbol(struct hists *hists) 2298 { 2299 if (symbol_conf.report_hierarchy) 2300 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2301 hists->symbol_filter_str); 2302 else 2303 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2304 hists__filter_entry_by_symbol); 2305 } 2306 2307 void hists__filter_by_socket(struct hists *hists) 2308 { 2309 if (symbol_conf.report_hierarchy) 2310 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2311 &hists->socket_filter); 2312 else 2313 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2314 hists__filter_entry_by_socket); 2315 } 2316 2317 void events_stats__inc(struct events_stats *stats, u32 type) 2318 { 2319 ++stats->nr_events[0]; 2320 ++stats->nr_events[type]; 2321 } 2322 2323 void hists__inc_nr_events(struct hists *hists, u32 type) 2324 { 2325 events_stats__inc(&hists->stats, type); 2326 } 2327 2328 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2329 { 2330 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 2331 if (!filtered) 2332 hists->stats.nr_non_filtered_samples++; 2333 } 2334 2335 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2336 struct hist_entry *pair) 2337 { 2338 struct rb_root_cached *root; 2339 struct rb_node **p; 2340 struct rb_node *parent = NULL; 2341 struct hist_entry *he; 2342 int64_t cmp; 2343 bool leftmost = true; 2344 2345 if (hists__has(hists, need_collapse)) 2346 root = &hists->entries_collapsed; 2347 else 2348 root = hists->entries_in; 2349 2350 p = &root->rb_root.rb_node; 2351 2352 while (*p != NULL) { 2353 parent = *p; 2354 he = rb_entry(parent, struct hist_entry, rb_node_in); 2355 2356 cmp = hist_entry__collapse(he, pair); 2357 2358 if (!cmp) 2359 goto out; 2360 2361 if (cmp < 0) 2362 p = &(*p)->rb_left; 2363 else { 2364 p = &(*p)->rb_right; 2365 leftmost = false; 2366 } 2367 } 2368 2369 he = hist_entry__new(pair, true); 2370 if (he) { 2371 memset(&he->stat, 0, sizeof(he->stat)); 2372 he->hists = hists; 2373 if (symbol_conf.cumulate_callchain) 2374 memset(he->stat_acc, 0, sizeof(he->stat)); 2375 rb_link_node(&he->rb_node_in, parent, p); 2376 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2377 hists__inc_stats(hists, he); 2378 he->dummy = true; 2379 } 2380 out: 2381 return he; 2382 } 2383 2384 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2385 struct rb_root_cached *root, 2386 struct hist_entry *pair) 2387 { 2388 struct rb_node **p; 2389 struct rb_node *parent = NULL; 2390 struct hist_entry *he; 2391 struct perf_hpp_fmt *fmt; 2392 bool leftmost = true; 2393 2394 p = &root->rb_root.rb_node; 2395 while (*p != NULL) { 2396 int64_t cmp = 0; 2397 2398 parent = *p; 2399 he = rb_entry(parent, struct hist_entry, rb_node_in); 2400 2401 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2402 cmp = fmt->collapse(fmt, he, pair); 2403 if (cmp) 2404 break; 2405 } 2406 if (!cmp) 2407 goto out; 2408 2409 if (cmp < 0) 2410 p = &parent->rb_left; 2411 else { 2412 p = &parent->rb_right; 2413 leftmost = false; 2414 } 2415 } 2416 2417 he = hist_entry__new(pair, true); 2418 if (he) { 2419 rb_link_node(&he->rb_node_in, parent, p); 2420 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2421 2422 he->dummy = true; 2423 he->hists = hists; 2424 memset(&he->stat, 0, sizeof(he->stat)); 2425 hists__inc_stats(hists, he); 2426 } 2427 out: 2428 return he; 2429 } 2430 2431 static struct hist_entry *hists__find_entry(struct hists *hists, 2432 struct hist_entry *he) 2433 { 2434 struct rb_node *n; 2435 2436 if (hists__has(hists, need_collapse)) 2437 n = hists->entries_collapsed.rb_root.rb_node; 2438 else 2439 n = hists->entries_in->rb_root.rb_node; 2440 2441 while (n) { 2442 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2443 int64_t cmp = hist_entry__collapse(iter, he); 2444 2445 if (cmp < 0) 2446 n = n->rb_left; 2447 else if (cmp > 0) 2448 n = n->rb_right; 2449 else 2450 return iter; 2451 } 2452 2453 return NULL; 2454 } 2455 2456 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2457 struct hist_entry *he) 2458 { 2459 struct rb_node *n = root->rb_root.rb_node; 2460 2461 while (n) { 2462 struct hist_entry *iter; 2463 struct perf_hpp_fmt *fmt; 2464 int64_t cmp = 0; 2465 2466 iter = rb_entry(n, struct hist_entry, rb_node_in); 2467 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2468 cmp = fmt->collapse(fmt, iter, he); 2469 if (cmp) 2470 break; 2471 } 2472 2473 if (cmp < 0) 2474 n = n->rb_left; 2475 else if (cmp > 0) 2476 n = n->rb_right; 2477 else 2478 return iter; 2479 } 2480 2481 return NULL; 2482 } 2483 2484 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2485 struct rb_root_cached *other_root) 2486 { 2487 struct rb_node *nd; 2488 struct hist_entry *pos, *pair; 2489 2490 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2491 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2492 pair = hists__find_hierarchy_entry(other_root, pos); 2493 2494 if (pair) { 2495 hist_entry__add_pair(pair, pos); 2496 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2497 } 2498 } 2499 } 2500 2501 /* 2502 * Look for pairs to link to the leader buckets (hist_entries): 2503 */ 2504 void hists__match(struct hists *leader, struct hists *other) 2505 { 2506 struct rb_root_cached *root; 2507 struct rb_node *nd; 2508 struct hist_entry *pos, *pair; 2509 2510 if (symbol_conf.report_hierarchy) { 2511 /* hierarchy report always collapses entries */ 2512 return hists__match_hierarchy(&leader->entries_collapsed, 2513 &other->entries_collapsed); 2514 } 2515 2516 if (hists__has(leader, need_collapse)) 2517 root = &leader->entries_collapsed; 2518 else 2519 root = leader->entries_in; 2520 2521 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2522 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2523 pair = hists__find_entry(other, pos); 2524 2525 if (pair) 2526 hist_entry__add_pair(pair, pos); 2527 } 2528 } 2529 2530 static int hists__link_hierarchy(struct hists *leader_hists, 2531 struct hist_entry *parent, 2532 struct rb_root_cached *leader_root, 2533 struct rb_root_cached *other_root) 2534 { 2535 struct rb_node *nd; 2536 struct hist_entry *pos, *leader; 2537 2538 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2539 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2540 2541 if (hist_entry__has_pairs(pos)) { 2542 bool found = false; 2543 2544 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2545 if (leader->hists == leader_hists) { 2546 found = true; 2547 break; 2548 } 2549 } 2550 if (!found) 2551 return -1; 2552 } else { 2553 leader = add_dummy_hierarchy_entry(leader_hists, 2554 leader_root, pos); 2555 if (leader == NULL) 2556 return -1; 2557 2558 /* do not point parent in the pos */ 2559 leader->parent_he = parent; 2560 2561 hist_entry__add_pair(pos, leader); 2562 } 2563 2564 if (!pos->leaf) { 2565 if (hists__link_hierarchy(leader_hists, leader, 2566 &leader->hroot_in, 2567 &pos->hroot_in) < 0) 2568 return -1; 2569 } 2570 } 2571 return 0; 2572 } 2573 2574 /* 2575 * Look for entries in the other hists that are not present in the leader, if 2576 * we find them, just add a dummy entry on the leader hists, with period=0, 2577 * nr_events=0, to serve as the list header. 2578 */ 2579 int hists__link(struct hists *leader, struct hists *other) 2580 { 2581 struct rb_root_cached *root; 2582 struct rb_node *nd; 2583 struct hist_entry *pos, *pair; 2584 2585 if (symbol_conf.report_hierarchy) { 2586 /* hierarchy report always collapses entries */ 2587 return hists__link_hierarchy(leader, NULL, 2588 &leader->entries_collapsed, 2589 &other->entries_collapsed); 2590 } 2591 2592 if (hists__has(other, need_collapse)) 2593 root = &other->entries_collapsed; 2594 else 2595 root = other->entries_in; 2596 2597 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2598 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2599 2600 if (!hist_entry__has_pairs(pos)) { 2601 pair = hists__add_dummy_entry(leader, pos); 2602 if (pair == NULL) 2603 return -1; 2604 hist_entry__add_pair(pos, pair); 2605 } 2606 } 2607 2608 return 0; 2609 } 2610 2611 int hists__unlink(struct hists *hists) 2612 { 2613 struct rb_root_cached *root; 2614 struct rb_node *nd; 2615 struct hist_entry *pos; 2616 2617 if (hists__has(hists, need_collapse)) 2618 root = &hists->entries_collapsed; 2619 else 2620 root = hists->entries_in; 2621 2622 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2623 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2624 list_del_init(&pos->pairs.node); 2625 } 2626 2627 return 0; 2628 } 2629 2630 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2631 struct perf_sample *sample, bool nonany_branch_mode, 2632 u64 *total_cycles) 2633 { 2634 struct branch_info *bi; 2635 struct branch_entry *entries = perf_sample__branch_entries(sample); 2636 2637 /* If we have branch cycles always annotate them. */ 2638 if (bs && bs->nr && entries[0].flags.cycles) { 2639 int i; 2640 2641 bi = sample__resolve_bstack(sample, al); 2642 if (bi) { 2643 struct addr_map_symbol *prev = NULL; 2644 2645 /* 2646 * Ignore errors, still want to process the 2647 * other entries. 2648 * 2649 * For non standard branch modes always 2650 * force no IPC (prev == NULL) 2651 * 2652 * Note that perf stores branches reversed from 2653 * program order! 2654 */ 2655 for (i = bs->nr - 1; i >= 0; i--) { 2656 addr_map_symbol__account_cycles(&bi[i].from, 2657 nonany_branch_mode ? NULL : prev, 2658 bi[i].flags.cycles); 2659 prev = &bi[i].to; 2660 2661 if (total_cycles) 2662 *total_cycles += bi[i].flags.cycles; 2663 } 2664 free(bi); 2665 } 2666 } 2667 } 2668 2669 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp) 2670 { 2671 struct evsel *pos; 2672 size_t ret = 0; 2673 2674 evlist__for_each_entry(evlist, pos) { 2675 ret += fprintf(fp, "%s stats:\n", evsel__name(pos)); 2676 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 2677 } 2678 2679 return ret; 2680 } 2681 2682 2683 u64 hists__total_period(struct hists *hists) 2684 { 2685 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2686 hists->stats.total_period; 2687 } 2688 2689 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2690 { 2691 char unit; 2692 int printed; 2693 const struct dso *dso = hists->dso_filter; 2694 struct thread *thread = hists->thread_filter; 2695 int socket_id = hists->socket_filter; 2696 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2697 u64 nr_events = hists->stats.total_period; 2698 struct evsel *evsel = hists_to_evsel(hists); 2699 const char *ev_name = evsel__name(evsel); 2700 char buf[512], sample_freq_str[64] = ""; 2701 size_t buflen = sizeof(buf); 2702 char ref[30] = " show reference callgraph, "; 2703 bool enable_ref = false; 2704 2705 if (symbol_conf.filter_relative) { 2706 nr_samples = hists->stats.nr_non_filtered_samples; 2707 nr_events = hists->stats.total_non_filtered_period; 2708 } 2709 2710 if (evsel__is_group_event(evsel)) { 2711 struct evsel *pos; 2712 2713 evsel__group_desc(evsel, buf, buflen); 2714 ev_name = buf; 2715 2716 for_each_group_member(pos, evsel) { 2717 struct hists *pos_hists = evsel__hists(pos); 2718 2719 if (symbol_conf.filter_relative) { 2720 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2721 nr_events += pos_hists->stats.total_non_filtered_period; 2722 } else { 2723 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2724 nr_events += pos_hists->stats.total_period; 2725 } 2726 } 2727 } 2728 2729 if (symbol_conf.show_ref_callgraph && 2730 strstr(ev_name, "call-graph=no")) 2731 enable_ref = true; 2732 2733 if (show_freq) 2734 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq); 2735 2736 nr_samples = convert_unit(nr_samples, &unit); 2737 printed = scnprintf(bf, size, 2738 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2739 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "", 2740 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2741 2742 2743 if (hists->uid_filter_str) 2744 printed += snprintf(bf + printed, size - printed, 2745 ", UID: %s", hists->uid_filter_str); 2746 if (thread) { 2747 if (hists__has(hists, thread)) { 2748 printed += scnprintf(bf + printed, size - printed, 2749 ", Thread: %s(%d)", 2750 (thread->comm_set ? thread__comm_str(thread) : ""), 2751 thread->tid); 2752 } else { 2753 printed += scnprintf(bf + printed, size - printed, 2754 ", Thread: %s", 2755 (thread->comm_set ? thread__comm_str(thread) : "")); 2756 } 2757 } 2758 if (dso) 2759 printed += scnprintf(bf + printed, size - printed, 2760 ", DSO: %s", dso->short_name); 2761 if (socket_id > -1) 2762 printed += scnprintf(bf + printed, size - printed, 2763 ", Processor Socket: %d", socket_id); 2764 2765 return printed; 2766 } 2767 2768 int parse_filter_percentage(const struct option *opt __maybe_unused, 2769 const char *arg, int unset __maybe_unused) 2770 { 2771 if (!strcmp(arg, "relative")) 2772 symbol_conf.filter_relative = true; 2773 else if (!strcmp(arg, "absolute")) 2774 symbol_conf.filter_relative = false; 2775 else { 2776 pr_debug("Invalid percentage: %s\n", arg); 2777 return -1; 2778 } 2779 2780 return 0; 2781 } 2782 2783 int perf_hist_config(const char *var, const char *value) 2784 { 2785 if (!strcmp(var, "hist.percentage")) 2786 return parse_filter_percentage(NULL, value, 0); 2787 2788 return 0; 2789 } 2790 2791 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2792 { 2793 memset(hists, 0, sizeof(*hists)); 2794 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2795 hists->entries_in = &hists->entries_in_array[0]; 2796 hists->entries_collapsed = RB_ROOT_CACHED; 2797 hists->entries = RB_ROOT_CACHED; 2798 pthread_mutex_init(&hists->lock, NULL); 2799 hists->socket_filter = -1; 2800 hists->hpp_list = hpp_list; 2801 INIT_LIST_HEAD(&hists->hpp_formats); 2802 return 0; 2803 } 2804 2805 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2806 { 2807 struct rb_node *node; 2808 struct hist_entry *he; 2809 2810 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2811 node = rb_first_cached(root); 2812 rb_erase_cached(node, root); 2813 2814 he = rb_entry(node, struct hist_entry, rb_node_in); 2815 hist_entry__delete(he); 2816 } 2817 } 2818 2819 static void hists__delete_all_entries(struct hists *hists) 2820 { 2821 hists__delete_entries(hists); 2822 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2823 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2824 hists__delete_remaining_entries(&hists->entries_collapsed); 2825 } 2826 2827 static void hists_evsel__exit(struct evsel *evsel) 2828 { 2829 struct hists *hists = evsel__hists(evsel); 2830 struct perf_hpp_fmt *fmt, *pos; 2831 struct perf_hpp_list_node *node, *tmp; 2832 2833 hists__delete_all_entries(hists); 2834 2835 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2836 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2837 list_del_init(&fmt->list); 2838 free(fmt); 2839 } 2840 list_del_init(&node->list); 2841 free(node); 2842 } 2843 } 2844 2845 static int hists_evsel__init(struct evsel *evsel) 2846 { 2847 struct hists *hists = evsel__hists(evsel); 2848 2849 __hists__init(hists, &perf_hpp_list); 2850 return 0; 2851 } 2852 2853 /* 2854 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2855 * stored in the rbtree... 2856 */ 2857 2858 int hists__init(void) 2859 { 2860 int err = evsel__object_config(sizeof(struct hists_evsel), 2861 hists_evsel__init, hists_evsel__exit); 2862 if (err) 2863 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2864 2865 return err; 2866 } 2867 2868 void perf_hpp_list__init(struct perf_hpp_list *list) 2869 { 2870 INIT_LIST_HEAD(&list->fields); 2871 INIT_LIST_HEAD(&list->sorts); 2872 } 2873