1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "debug.h" 4 #include "dso.h" 5 #include "build-id.h" 6 #include "hist.h" 7 #include "kvm-stat.h" 8 #include "map.h" 9 #include "map_symbol.h" 10 #include "branch.h" 11 #include "mem-events.h" 12 #include "session.h" 13 #include "namespaces.h" 14 #include "cgroup.h" 15 #include "sort.h" 16 #include "units.h" 17 #include "evlist.h" 18 #include "evsel.h" 19 #include "annotate.h" 20 #include "srcline.h" 21 #include "symbol.h" 22 #include "thread.h" 23 #include "block-info.h" 24 #include "ui/progress.h" 25 #include <errno.h> 26 #include <math.h> 27 #include <inttypes.h> 28 #include <sys/param.h> 29 #include <linux/rbtree.h> 30 #include <linux/string.h> 31 #include <linux/time64.h> 32 #include <linux/zalloc.h> 33 34 static bool hists__filter_entry_by_dso(struct hists *hists, 35 struct hist_entry *he); 36 static bool hists__filter_entry_by_thread(struct hists *hists, 37 struct hist_entry *he); 38 static bool hists__filter_entry_by_symbol(struct hists *hists, 39 struct hist_entry *he); 40 static bool hists__filter_entry_by_socket(struct hists *hists, 41 struct hist_entry *he); 42 43 u16 hists__col_len(struct hists *hists, enum hist_column col) 44 { 45 return hists->col_len[col]; 46 } 47 48 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 49 { 50 hists->col_len[col] = len; 51 } 52 53 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 54 { 55 if (len > hists__col_len(hists, col)) { 56 hists__set_col_len(hists, col, len); 57 return true; 58 } 59 return false; 60 } 61 62 void hists__reset_col_len(struct hists *hists) 63 { 64 enum hist_column col; 65 66 for (col = 0; col < HISTC_NR_COLS; ++col) 67 hists__set_col_len(hists, col, 0); 68 } 69 70 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 71 { 72 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 73 74 if (hists__col_len(hists, dso) < unresolved_col_width && 75 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 76 !symbol_conf.dso_list) 77 hists__set_col_len(hists, dso, unresolved_col_width); 78 } 79 80 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 81 { 82 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 83 int symlen; 84 u16 len; 85 86 if (h->block_info) 87 return; 88 /* 89 * +4 accounts for '[x] ' priv level info 90 * +2 accounts for 0x prefix on raw addresses 91 * +3 accounts for ' y ' symtab origin info 92 */ 93 if (h->ms.sym) { 94 symlen = h->ms.sym->namelen + 4; 95 if (verbose > 0) 96 symlen += BITS_PER_LONG / 4 + 2 + 3; 97 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 98 } else { 99 symlen = unresolved_col_width + 4 + 2; 100 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 101 hists__set_unres_dso_col_len(hists, HISTC_DSO); 102 } 103 104 len = thread__comm_len(h->thread); 105 if (hists__new_col_len(hists, HISTC_COMM, len)) 106 hists__set_col_len(hists, HISTC_THREAD, len + 8); 107 108 if (h->ms.map) { 109 len = dso__name_len(map__dso(h->ms.map)); 110 hists__new_col_len(hists, HISTC_DSO, len); 111 } 112 113 if (h->parent) 114 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 115 116 if (h->branch_info) { 117 if (h->branch_info->from.ms.sym) { 118 symlen = (int)h->branch_info->from.ms.sym->namelen + 4; 119 if (verbose > 0) 120 symlen += BITS_PER_LONG / 4 + 2 + 3; 121 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 122 123 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map)); 124 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 125 } else { 126 symlen = unresolved_col_width + 4 + 2; 127 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 128 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen); 129 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 130 } 131 132 if (h->branch_info->to.ms.sym) { 133 symlen = (int)h->branch_info->to.ms.sym->namelen + 4; 134 if (verbose > 0) 135 symlen += BITS_PER_LONG / 4 + 2 + 3; 136 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 137 138 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map)); 139 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 140 } else { 141 symlen = unresolved_col_width + 4 + 2; 142 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 143 hists__new_col_len(hists, HISTC_ADDR_TO, symlen); 144 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 145 } 146 147 if (h->branch_info->srcline_from) 148 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 149 strlen(h->branch_info->srcline_from)); 150 if (h->branch_info->srcline_to) 151 hists__new_col_len(hists, HISTC_SRCLINE_TO, 152 strlen(h->branch_info->srcline_to)); 153 } 154 155 if (h->mem_info) { 156 if (h->mem_info->daddr.ms.sym) { 157 symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4 158 + unresolved_col_width + 2; 159 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 160 symlen); 161 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 162 symlen + 1); 163 } else { 164 symlen = unresolved_col_width + 4 + 2; 165 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 166 symlen); 167 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 168 symlen); 169 } 170 171 if (h->mem_info->iaddr.ms.sym) { 172 symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4 173 + unresolved_col_width + 2; 174 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 175 symlen); 176 } else { 177 symlen = unresolved_col_width + 4 + 2; 178 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 179 symlen); 180 } 181 182 if (h->mem_info->daddr.ms.map) { 183 symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map)); 184 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 185 symlen); 186 } else { 187 symlen = unresolved_col_width + 4 + 2; 188 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 189 } 190 191 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 192 unresolved_col_width + 4 + 2); 193 194 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE, 195 unresolved_col_width + 4 + 2); 196 197 } else { 198 symlen = unresolved_col_width + 4 + 2; 199 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 200 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 201 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 202 } 203 204 hists__new_col_len(hists, HISTC_CGROUP, 6); 205 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 206 hists__new_col_len(hists, HISTC_CPU, 3); 207 hists__new_col_len(hists, HISTC_SOCKET, 6); 208 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 209 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 210 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 211 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3); 212 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 213 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 214 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10); 215 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13); 216 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13); 217 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13); 218 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13); 219 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2); 220 221 if (symbol_conf.nanosecs) 222 hists__new_col_len(hists, HISTC_TIME, 16); 223 else 224 hists__new_col_len(hists, HISTC_TIME, 12); 225 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6); 226 227 if (h->srcline) { 228 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 229 hists__new_col_len(hists, HISTC_SRCLINE, len); 230 } 231 232 if (h->srcfile) 233 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 234 235 if (h->transaction) 236 hists__new_col_len(hists, HISTC_TRANSACTION, 237 hist_entry__transaction_len()); 238 239 if (h->trace_output) 240 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 241 242 if (h->cgroup) { 243 const char *cgrp_name = "unknown"; 244 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env, 245 h->cgroup); 246 if (cgrp != NULL) 247 cgrp_name = cgrp->name; 248 249 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name)); 250 } 251 } 252 253 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 254 { 255 struct rb_node *next = rb_first_cached(&hists->entries); 256 struct hist_entry *n; 257 int row = 0; 258 259 hists__reset_col_len(hists); 260 261 while (next && row++ < max_rows) { 262 n = rb_entry(next, struct hist_entry, rb_node); 263 if (!n->filtered) 264 hists__calc_col_len(hists, n); 265 next = rb_next(&n->rb_node); 266 } 267 } 268 269 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 270 unsigned int cpumode, u64 period) 271 { 272 switch (cpumode) { 273 case PERF_RECORD_MISC_KERNEL: 274 he_stat->period_sys += period; 275 break; 276 case PERF_RECORD_MISC_USER: 277 he_stat->period_us += period; 278 break; 279 case PERF_RECORD_MISC_GUEST_KERNEL: 280 he_stat->period_guest_sys += period; 281 break; 282 case PERF_RECORD_MISC_GUEST_USER: 283 he_stat->period_guest_us += period; 284 break; 285 default: 286 break; 287 } 288 } 289 290 static long hist_time(unsigned long htime) 291 { 292 unsigned long time_quantum = symbol_conf.time_quantum; 293 if (time_quantum) 294 return (htime / time_quantum) * time_quantum; 295 return htime; 296 } 297 298 static void he_stat__add_period(struct he_stat *he_stat, u64 period) 299 { 300 he_stat->period += period; 301 he_stat->nr_events += 1; 302 } 303 304 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 305 { 306 dest->period += src->period; 307 dest->period_sys += src->period_sys; 308 dest->period_us += src->period_us; 309 dest->period_guest_sys += src->period_guest_sys; 310 dest->period_guest_us += src->period_guest_us; 311 dest->nr_events += src->nr_events; 312 } 313 314 static void he_stat__decay(struct he_stat *he_stat) 315 { 316 he_stat->period = (he_stat->period * 7) / 8; 317 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 318 /* XXX need decay for weight too? */ 319 } 320 321 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 322 323 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 324 { 325 u64 prev_period = he->stat.period; 326 u64 diff; 327 328 if (prev_period == 0) 329 return true; 330 331 he_stat__decay(&he->stat); 332 if (symbol_conf.cumulate_callchain) 333 he_stat__decay(he->stat_acc); 334 decay_callchain(he->callchain); 335 336 diff = prev_period - he->stat.period; 337 338 if (!he->depth) { 339 hists->stats.total_period -= diff; 340 if (!he->filtered) 341 hists->stats.total_non_filtered_period -= diff; 342 } 343 344 if (!he->leaf) { 345 struct hist_entry *child; 346 struct rb_node *node = rb_first_cached(&he->hroot_out); 347 while (node) { 348 child = rb_entry(node, struct hist_entry, rb_node); 349 node = rb_next(node); 350 351 if (hists__decay_entry(hists, child)) 352 hists__delete_entry(hists, child); 353 } 354 } 355 356 return he->stat.period == 0; 357 } 358 359 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 360 { 361 struct rb_root_cached *root_in; 362 struct rb_root_cached *root_out; 363 364 if (he->parent_he) { 365 root_in = &he->parent_he->hroot_in; 366 root_out = &he->parent_he->hroot_out; 367 } else { 368 if (hists__has(hists, need_collapse)) 369 root_in = &hists->entries_collapsed; 370 else 371 root_in = hists->entries_in; 372 root_out = &hists->entries; 373 } 374 375 rb_erase_cached(&he->rb_node_in, root_in); 376 rb_erase_cached(&he->rb_node, root_out); 377 378 --hists->nr_entries; 379 if (!he->filtered) 380 --hists->nr_non_filtered_entries; 381 382 hist_entry__delete(he); 383 } 384 385 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 386 { 387 struct rb_node *next = rb_first_cached(&hists->entries); 388 struct hist_entry *n; 389 390 while (next) { 391 n = rb_entry(next, struct hist_entry, rb_node); 392 next = rb_next(&n->rb_node); 393 if (((zap_user && n->level == '.') || 394 (zap_kernel && n->level != '.') || 395 hists__decay_entry(hists, n))) { 396 hists__delete_entry(hists, n); 397 } 398 } 399 } 400 401 void hists__delete_entries(struct hists *hists) 402 { 403 struct rb_node *next = rb_first_cached(&hists->entries); 404 struct hist_entry *n; 405 406 while (next) { 407 n = rb_entry(next, struct hist_entry, rb_node); 408 next = rb_next(&n->rb_node); 409 410 hists__delete_entry(hists, n); 411 } 412 } 413 414 struct hist_entry *hists__get_entry(struct hists *hists, int idx) 415 { 416 struct rb_node *next = rb_first_cached(&hists->entries); 417 struct hist_entry *n; 418 int i = 0; 419 420 while (next) { 421 n = rb_entry(next, struct hist_entry, rb_node); 422 if (i == idx) 423 return n; 424 425 next = rb_next(&n->rb_node); 426 i++; 427 } 428 429 return NULL; 430 } 431 432 /* 433 * histogram, sorted on item, collects periods 434 */ 435 436 static int hist_entry__init(struct hist_entry *he, 437 struct hist_entry *template, 438 bool sample_self, 439 size_t callchain_size) 440 { 441 *he = *template; 442 he->callchain_size = callchain_size; 443 444 if (symbol_conf.cumulate_callchain) { 445 he->stat_acc = malloc(sizeof(he->stat)); 446 if (he->stat_acc == NULL) 447 return -ENOMEM; 448 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 449 if (!sample_self) 450 memset(&he->stat, 0, sizeof(he->stat)); 451 } 452 453 he->ms.maps = maps__get(he->ms.maps); 454 he->ms.map = map__get(he->ms.map); 455 456 if (he->branch_info) { 457 /* 458 * This branch info is (a part of) allocated from 459 * sample__resolve_bstack() and will be freed after 460 * adding new entries. So we need to save a copy. 461 */ 462 he->branch_info = malloc(sizeof(*he->branch_info)); 463 if (he->branch_info == NULL) 464 goto err; 465 466 memcpy(he->branch_info, template->branch_info, 467 sizeof(*he->branch_info)); 468 469 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map); 470 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map); 471 } 472 473 if (he->mem_info) { 474 he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map); 475 he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map); 476 } 477 478 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 479 callchain_init(he->callchain); 480 481 if (he->raw_data) { 482 he->raw_data = memdup(he->raw_data, he->raw_size); 483 if (he->raw_data == NULL) 484 goto err_infos; 485 } 486 487 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) { 488 he->srcline = strdup(he->srcline); 489 if (he->srcline == NULL) 490 goto err_rawdata; 491 } 492 493 if (symbol_conf.res_sample) { 494 he->res_samples = calloc(sizeof(struct res_sample), 495 symbol_conf.res_sample); 496 if (!he->res_samples) 497 goto err_srcline; 498 } 499 500 INIT_LIST_HEAD(&he->pairs.node); 501 he->thread = thread__get(he->thread); 502 he->hroot_in = RB_ROOT_CACHED; 503 he->hroot_out = RB_ROOT_CACHED; 504 505 if (!symbol_conf.report_hierarchy) 506 he->leaf = true; 507 508 return 0; 509 510 err_srcline: 511 zfree(&he->srcline); 512 513 err_rawdata: 514 zfree(&he->raw_data); 515 516 err_infos: 517 if (he->branch_info) { 518 map__put(he->branch_info->from.ms.map); 519 map__put(he->branch_info->to.ms.map); 520 zfree(&he->branch_info); 521 } 522 if (he->mem_info) { 523 map__put(he->mem_info->iaddr.ms.map); 524 map__put(he->mem_info->daddr.ms.map); 525 } 526 err: 527 maps__zput(he->ms.maps); 528 map__zput(he->ms.map); 529 zfree(&he->stat_acc); 530 return -ENOMEM; 531 } 532 533 static void *hist_entry__zalloc(size_t size) 534 { 535 return zalloc(size + sizeof(struct hist_entry)); 536 } 537 538 static void hist_entry__free(void *ptr) 539 { 540 free(ptr); 541 } 542 543 static struct hist_entry_ops default_ops = { 544 .new = hist_entry__zalloc, 545 .free = hist_entry__free, 546 }; 547 548 static struct hist_entry *hist_entry__new(struct hist_entry *template, 549 bool sample_self) 550 { 551 struct hist_entry_ops *ops = template->ops; 552 size_t callchain_size = 0; 553 struct hist_entry *he; 554 int err = 0; 555 556 if (!ops) 557 ops = template->ops = &default_ops; 558 559 if (symbol_conf.use_callchain) 560 callchain_size = sizeof(struct callchain_root); 561 562 he = ops->new(callchain_size); 563 if (he) { 564 err = hist_entry__init(he, template, sample_self, callchain_size); 565 if (err) { 566 ops->free(he); 567 he = NULL; 568 } 569 } 570 571 return he; 572 } 573 574 static u8 symbol__parent_filter(const struct symbol *parent) 575 { 576 if (symbol_conf.exclude_other && parent == NULL) 577 return 1 << HIST_FILTER__PARENT; 578 return 0; 579 } 580 581 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 582 { 583 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 584 return; 585 586 he->hists->callchain_period += period; 587 if (!he->filtered) 588 he->hists->callchain_non_filtered_period += period; 589 } 590 591 static struct hist_entry *hists__findnew_entry(struct hists *hists, 592 struct hist_entry *entry, 593 const struct addr_location *al, 594 bool sample_self) 595 { 596 struct rb_node **p; 597 struct rb_node *parent = NULL; 598 struct hist_entry *he; 599 int64_t cmp; 600 u64 period = entry->stat.period; 601 bool leftmost = true; 602 603 p = &hists->entries_in->rb_root.rb_node; 604 605 while (*p != NULL) { 606 parent = *p; 607 he = rb_entry(parent, struct hist_entry, rb_node_in); 608 609 /* 610 * Make sure that it receives arguments in a same order as 611 * hist_entry__collapse() so that we can use an appropriate 612 * function when searching an entry regardless which sort 613 * keys were used. 614 */ 615 cmp = hist_entry__cmp(he, entry); 616 if (!cmp) { 617 if (sample_self) { 618 he_stat__add_period(&he->stat, period); 619 hist_entry__add_callchain_period(he, period); 620 } 621 if (symbol_conf.cumulate_callchain) 622 he_stat__add_period(he->stat_acc, period); 623 624 /* 625 * This mem info was allocated from sample__resolve_mem 626 * and will not be used anymore. 627 */ 628 mem_info__zput(entry->mem_info); 629 630 block_info__zput(entry->block_info); 631 632 kvm_info__zput(entry->kvm_info); 633 634 /* If the map of an existing hist_entry has 635 * become out-of-date due to an exec() or 636 * similar, update it. Otherwise we will 637 * mis-adjust symbol addresses when computing 638 * the history counter to increment. 639 */ 640 if (he->ms.map != entry->ms.map) { 641 if (he->ms.sym) { 642 u64 addr = he->ms.sym->start; 643 he->ms.sym = map__find_symbol(entry->ms.map, addr); 644 } 645 646 map__put(he->ms.map); 647 he->ms.map = map__get(entry->ms.map); 648 } 649 goto out; 650 } 651 652 if (cmp < 0) 653 p = &(*p)->rb_left; 654 else { 655 p = &(*p)->rb_right; 656 leftmost = false; 657 } 658 } 659 660 he = hist_entry__new(entry, sample_self); 661 if (!he) 662 return NULL; 663 664 if (sample_self) 665 hist_entry__add_callchain_period(he, period); 666 hists->nr_entries++; 667 668 rb_link_node(&he->rb_node_in, parent, p); 669 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 670 out: 671 if (sample_self) 672 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 673 if (symbol_conf.cumulate_callchain) 674 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 675 return he; 676 } 677 678 static unsigned random_max(unsigned high) 679 { 680 unsigned thresh = -high % high; 681 for (;;) { 682 unsigned r = random(); 683 if (r >= thresh) 684 return r % high; 685 } 686 } 687 688 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 689 { 690 struct res_sample *r; 691 int j; 692 693 if (he->num_res < symbol_conf.res_sample) { 694 j = he->num_res++; 695 } else { 696 j = random_max(symbol_conf.res_sample); 697 } 698 r = &he->res_samples[j]; 699 r->time = sample->time; 700 r->cpu = sample->cpu; 701 r->tid = sample->tid; 702 } 703 704 static struct hist_entry* 705 __hists__add_entry(struct hists *hists, 706 struct addr_location *al, 707 struct symbol *sym_parent, 708 struct branch_info *bi, 709 struct mem_info *mi, 710 struct kvm_info *ki, 711 struct block_info *block_info, 712 struct perf_sample *sample, 713 bool sample_self, 714 struct hist_entry_ops *ops) 715 { 716 struct namespaces *ns = thread__namespaces(al->thread); 717 struct hist_entry entry = { 718 .thread = al->thread, 719 .comm = thread__comm(al->thread), 720 .cgroup_id = { 721 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 722 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 723 }, 724 .cgroup = sample->cgroup, 725 .ms = { 726 .maps = al->maps, 727 .map = al->map, 728 .sym = al->sym, 729 }, 730 .srcline = (char *) al->srcline, 731 .socket = al->socket, 732 .cpu = al->cpu, 733 .cpumode = al->cpumode, 734 .ip = al->addr, 735 .level = al->level, 736 .code_page_size = sample->code_page_size, 737 .stat = { 738 .nr_events = 1, 739 .period = sample->period, 740 }, 741 .parent = sym_parent, 742 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 743 .hists = hists, 744 .branch_info = bi, 745 .mem_info = mi, 746 .kvm_info = ki, 747 .block_info = block_info, 748 .transaction = sample->transaction, 749 .raw_data = sample->raw_data, 750 .raw_size = sample->raw_size, 751 .ops = ops, 752 .time = hist_time(sample->time), 753 .weight = sample->weight, 754 .ins_lat = sample->ins_lat, 755 .p_stage_cyc = sample->p_stage_cyc, 756 .simd_flags = sample->simd_flags, 757 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 758 759 if (!hists->has_callchains && he && he->callchain_size != 0) 760 hists->has_callchains = true; 761 if (he && symbol_conf.res_sample) 762 hists__res_sample(he, sample); 763 return he; 764 } 765 766 struct hist_entry *hists__add_entry(struct hists *hists, 767 struct addr_location *al, 768 struct symbol *sym_parent, 769 struct branch_info *bi, 770 struct mem_info *mi, 771 struct kvm_info *ki, 772 struct perf_sample *sample, 773 bool sample_self) 774 { 775 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL, 776 sample, sample_self, NULL); 777 } 778 779 struct hist_entry *hists__add_entry_ops(struct hists *hists, 780 struct hist_entry_ops *ops, 781 struct addr_location *al, 782 struct symbol *sym_parent, 783 struct branch_info *bi, 784 struct mem_info *mi, 785 struct kvm_info *ki, 786 struct perf_sample *sample, 787 bool sample_self) 788 { 789 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL, 790 sample, sample_self, ops); 791 } 792 793 struct hist_entry *hists__add_entry_block(struct hists *hists, 794 struct addr_location *al, 795 struct block_info *block_info) 796 { 797 struct hist_entry entry = { 798 .block_info = block_info, 799 .hists = hists, 800 .ms = { 801 .maps = al->maps, 802 .map = al->map, 803 .sym = al->sym, 804 }, 805 }, *he = hists__findnew_entry(hists, &entry, al, false); 806 807 return he; 808 } 809 810 static int 811 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 812 struct addr_location *al __maybe_unused) 813 { 814 return 0; 815 } 816 817 static int 818 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 819 struct addr_location *al __maybe_unused) 820 { 821 return 0; 822 } 823 824 static int 825 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 826 { 827 struct perf_sample *sample = iter->sample; 828 struct mem_info *mi; 829 830 mi = sample__resolve_mem(sample, al); 831 if (mi == NULL) 832 return -ENOMEM; 833 834 iter->priv = mi; 835 return 0; 836 } 837 838 static int 839 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 840 { 841 u64 cost; 842 struct mem_info *mi = iter->priv; 843 struct hists *hists = evsel__hists(iter->evsel); 844 struct perf_sample *sample = iter->sample; 845 struct hist_entry *he; 846 847 if (mi == NULL) 848 return -EINVAL; 849 850 cost = sample->weight; 851 if (!cost) 852 cost = 1; 853 854 /* 855 * must pass period=weight in order to get the correct 856 * sorting from hists__collapse_resort() which is solely 857 * based on periods. We want sorting be done on nr_events * weight 858 * and this is indirectly achieved by passing period=weight here 859 * and the he_stat__add_period() function. 860 */ 861 sample->period = cost; 862 863 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL, 864 sample, true); 865 if (!he) 866 return -ENOMEM; 867 868 iter->he = he; 869 return 0; 870 } 871 872 static int 873 iter_finish_mem_entry(struct hist_entry_iter *iter, 874 struct addr_location *al __maybe_unused) 875 { 876 struct evsel *evsel = iter->evsel; 877 struct hists *hists = evsel__hists(evsel); 878 struct hist_entry *he = iter->he; 879 int err = -EINVAL; 880 881 if (he == NULL) 882 goto out; 883 884 hists__inc_nr_samples(hists, he->filtered); 885 886 err = hist_entry__append_callchain(he, iter->sample); 887 888 out: 889 /* 890 * We don't need to free iter->priv (mem_info) here since the mem info 891 * was either already freed in hists__findnew_entry() or passed to a 892 * new hist entry by hist_entry__new(). 893 */ 894 iter->priv = NULL; 895 896 iter->he = NULL; 897 return err; 898 } 899 900 static int 901 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 902 { 903 struct branch_info *bi; 904 struct perf_sample *sample = iter->sample; 905 906 bi = sample__resolve_bstack(sample, al); 907 if (!bi) 908 return -ENOMEM; 909 910 iter->curr = 0; 911 iter->total = sample->branch_stack->nr; 912 913 iter->priv = bi; 914 return 0; 915 } 916 917 static int 918 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 919 struct addr_location *al __maybe_unused) 920 { 921 return 0; 922 } 923 924 static int 925 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 926 { 927 struct branch_info *bi = iter->priv; 928 int i = iter->curr; 929 930 if (bi == NULL) 931 return 0; 932 933 if (iter->curr >= iter->total) 934 return 0; 935 936 maps__put(al->maps); 937 al->maps = maps__get(bi[i].to.ms.maps); 938 map__put(al->map); 939 al->map = map__get(bi[i].to.ms.map); 940 al->sym = bi[i].to.ms.sym; 941 al->addr = bi[i].to.addr; 942 return 1; 943 } 944 945 static int 946 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 947 { 948 struct branch_info *bi; 949 struct evsel *evsel = iter->evsel; 950 struct hists *hists = evsel__hists(evsel); 951 struct perf_sample *sample = iter->sample; 952 struct hist_entry *he = NULL; 953 int i = iter->curr; 954 int err = 0; 955 956 bi = iter->priv; 957 958 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym)) 959 goto out; 960 961 /* 962 * The report shows the percentage of total branches captured 963 * and not events sampled. Thus we use a pseudo period of 1. 964 */ 965 sample->period = 1; 966 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 967 968 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL, 969 sample, true); 970 if (he == NULL) 971 return -ENOMEM; 972 973 hists__inc_nr_samples(hists, he->filtered); 974 975 out: 976 iter->he = he; 977 iter->curr++; 978 return err; 979 } 980 981 static int 982 iter_finish_branch_entry(struct hist_entry_iter *iter, 983 struct addr_location *al __maybe_unused) 984 { 985 zfree(&iter->priv); 986 iter->he = NULL; 987 988 return iter->curr >= iter->total ? 0 : -1; 989 } 990 991 static int 992 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 993 struct addr_location *al __maybe_unused) 994 { 995 return 0; 996 } 997 998 static int 999 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 1000 { 1001 struct evsel *evsel = iter->evsel; 1002 struct perf_sample *sample = iter->sample; 1003 struct hist_entry *he; 1004 1005 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1006 NULL, sample, true); 1007 if (he == NULL) 1008 return -ENOMEM; 1009 1010 iter->he = he; 1011 return 0; 1012 } 1013 1014 static int 1015 iter_finish_normal_entry(struct hist_entry_iter *iter, 1016 struct addr_location *al __maybe_unused) 1017 { 1018 struct hist_entry *he = iter->he; 1019 struct evsel *evsel = iter->evsel; 1020 struct perf_sample *sample = iter->sample; 1021 1022 if (he == NULL) 1023 return 0; 1024 1025 iter->he = NULL; 1026 1027 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 1028 1029 return hist_entry__append_callchain(he, sample); 1030 } 1031 1032 static int 1033 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 1034 struct addr_location *al __maybe_unused) 1035 { 1036 struct hist_entry **he_cache; 1037 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1038 1039 if (cursor == NULL) 1040 return -ENOMEM; 1041 1042 callchain_cursor_commit(cursor); 1043 1044 /* 1045 * This is for detecting cycles or recursions so that they're 1046 * cumulated only one time to prevent entries more than 100% 1047 * overhead. 1048 */ 1049 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1)); 1050 if (he_cache == NULL) 1051 return -ENOMEM; 1052 1053 iter->priv = he_cache; 1054 iter->curr = 0; 1055 1056 return 0; 1057 } 1058 1059 static int 1060 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 1061 struct addr_location *al) 1062 { 1063 struct evsel *evsel = iter->evsel; 1064 struct hists *hists = evsel__hists(evsel); 1065 struct perf_sample *sample = iter->sample; 1066 struct hist_entry **he_cache = iter->priv; 1067 struct hist_entry *he; 1068 int err = 0; 1069 1070 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL, 1071 sample, true); 1072 if (he == NULL) 1073 return -ENOMEM; 1074 1075 iter->he = he; 1076 he_cache[iter->curr++] = he; 1077 1078 hist_entry__append_callchain(he, sample); 1079 1080 /* 1081 * We need to re-initialize the cursor since callchain_append() 1082 * advanced the cursor to the end. 1083 */ 1084 callchain_cursor_commit(get_tls_callchain_cursor()); 1085 1086 hists__inc_nr_samples(hists, he->filtered); 1087 1088 return err; 1089 } 1090 1091 static int 1092 iter_next_cumulative_entry(struct hist_entry_iter *iter, 1093 struct addr_location *al) 1094 { 1095 struct callchain_cursor_node *node; 1096 1097 node = callchain_cursor_current(get_tls_callchain_cursor()); 1098 if (node == NULL) 1099 return 0; 1100 1101 return fill_callchain_info(al, node, iter->hide_unresolved); 1102 } 1103 1104 static bool 1105 hist_entry__fast__sym_diff(struct hist_entry *left, 1106 struct hist_entry *right) 1107 { 1108 struct symbol *sym_l = left->ms.sym; 1109 struct symbol *sym_r = right->ms.sym; 1110 1111 if (!sym_l && !sym_r) 1112 return left->ip != right->ip; 1113 1114 return !!_sort__sym_cmp(sym_l, sym_r); 1115 } 1116 1117 1118 static int 1119 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1120 struct addr_location *al) 1121 { 1122 struct evsel *evsel = iter->evsel; 1123 struct perf_sample *sample = iter->sample; 1124 struct hist_entry **he_cache = iter->priv; 1125 struct hist_entry *he; 1126 struct hist_entry he_tmp = { 1127 .hists = evsel__hists(evsel), 1128 .cpu = al->cpu, 1129 .thread = al->thread, 1130 .comm = thread__comm(al->thread), 1131 .ip = al->addr, 1132 .ms = { 1133 .maps = al->maps, 1134 .map = al->map, 1135 .sym = al->sym, 1136 }, 1137 .srcline = (char *) al->srcline, 1138 .parent = iter->parent, 1139 .raw_data = sample->raw_data, 1140 .raw_size = sample->raw_size, 1141 }; 1142 int i; 1143 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor(); 1144 bool fast = hists__has(he_tmp.hists, sym); 1145 1146 if (tls_cursor == NULL) 1147 return -ENOMEM; 1148 1149 callchain_cursor_snapshot(&cursor, tls_cursor); 1150 1151 callchain_cursor_advance(tls_cursor); 1152 1153 /* 1154 * Check if there's duplicate entries in the callchain. 1155 * It's possible that it has cycles or recursive calls. 1156 */ 1157 for (i = 0; i < iter->curr; i++) { 1158 /* 1159 * For most cases, there are no duplicate entries in callchain. 1160 * The symbols are usually different. Do a quick check for 1161 * symbols first. 1162 */ 1163 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp)) 1164 continue; 1165 1166 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1167 /* to avoid calling callback function */ 1168 iter->he = NULL; 1169 return 0; 1170 } 1171 } 1172 1173 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1174 NULL, sample, false); 1175 if (he == NULL) 1176 return -ENOMEM; 1177 1178 iter->he = he; 1179 he_cache[iter->curr++] = he; 1180 1181 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1182 callchain_append(he->callchain, &cursor, sample->period); 1183 return 0; 1184 } 1185 1186 static int 1187 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1188 struct addr_location *al __maybe_unused) 1189 { 1190 zfree(&iter->priv); 1191 iter->he = NULL; 1192 1193 return 0; 1194 } 1195 1196 const struct hist_iter_ops hist_iter_mem = { 1197 .prepare_entry = iter_prepare_mem_entry, 1198 .add_single_entry = iter_add_single_mem_entry, 1199 .next_entry = iter_next_nop_entry, 1200 .add_next_entry = iter_add_next_nop_entry, 1201 .finish_entry = iter_finish_mem_entry, 1202 }; 1203 1204 const struct hist_iter_ops hist_iter_branch = { 1205 .prepare_entry = iter_prepare_branch_entry, 1206 .add_single_entry = iter_add_single_branch_entry, 1207 .next_entry = iter_next_branch_entry, 1208 .add_next_entry = iter_add_next_branch_entry, 1209 .finish_entry = iter_finish_branch_entry, 1210 }; 1211 1212 const struct hist_iter_ops hist_iter_normal = { 1213 .prepare_entry = iter_prepare_normal_entry, 1214 .add_single_entry = iter_add_single_normal_entry, 1215 .next_entry = iter_next_nop_entry, 1216 .add_next_entry = iter_add_next_nop_entry, 1217 .finish_entry = iter_finish_normal_entry, 1218 }; 1219 1220 const struct hist_iter_ops hist_iter_cumulative = { 1221 .prepare_entry = iter_prepare_cumulative_entry, 1222 .add_single_entry = iter_add_single_cumulative_entry, 1223 .next_entry = iter_next_cumulative_entry, 1224 .add_next_entry = iter_add_next_cumulative_entry, 1225 .finish_entry = iter_finish_cumulative_entry, 1226 }; 1227 1228 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1229 int max_stack_depth, void *arg) 1230 { 1231 int err, err2; 1232 struct map *alm = NULL; 1233 1234 if (al) 1235 alm = map__get(al->map); 1236 1237 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent, 1238 iter->evsel, al, max_stack_depth); 1239 if (err) { 1240 map__put(alm); 1241 return err; 1242 } 1243 1244 err = iter->ops->prepare_entry(iter, al); 1245 if (err) 1246 goto out; 1247 1248 err = iter->ops->add_single_entry(iter, al); 1249 if (err) 1250 goto out; 1251 1252 if (iter->he && iter->add_entry_cb) { 1253 err = iter->add_entry_cb(iter, al, true, arg); 1254 if (err) 1255 goto out; 1256 } 1257 1258 while (iter->ops->next_entry(iter, al)) { 1259 err = iter->ops->add_next_entry(iter, al); 1260 if (err) 1261 break; 1262 1263 if (iter->he && iter->add_entry_cb) { 1264 err = iter->add_entry_cb(iter, al, false, arg); 1265 if (err) 1266 goto out; 1267 } 1268 } 1269 1270 out: 1271 err2 = iter->ops->finish_entry(iter, al); 1272 if (!err) 1273 err = err2; 1274 1275 map__put(alm); 1276 1277 return err; 1278 } 1279 1280 int64_t 1281 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1282 { 1283 struct hists *hists = left->hists; 1284 struct perf_hpp_fmt *fmt; 1285 int64_t cmp = 0; 1286 1287 hists__for_each_sort_list(hists, fmt) { 1288 if (perf_hpp__is_dynamic_entry(fmt) && 1289 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1290 continue; 1291 1292 cmp = fmt->cmp(fmt, left, right); 1293 if (cmp) 1294 break; 1295 } 1296 1297 return cmp; 1298 } 1299 1300 int64_t 1301 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1302 { 1303 struct hists *hists = left->hists; 1304 struct perf_hpp_fmt *fmt; 1305 int64_t cmp = 0; 1306 1307 hists__for_each_sort_list(hists, fmt) { 1308 if (perf_hpp__is_dynamic_entry(fmt) && 1309 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1310 continue; 1311 1312 cmp = fmt->collapse(fmt, left, right); 1313 if (cmp) 1314 break; 1315 } 1316 1317 return cmp; 1318 } 1319 1320 void hist_entry__delete(struct hist_entry *he) 1321 { 1322 struct hist_entry_ops *ops = he->ops; 1323 1324 thread__zput(he->thread); 1325 maps__zput(he->ms.maps); 1326 map__zput(he->ms.map); 1327 1328 if (he->branch_info) { 1329 map__zput(he->branch_info->from.ms.map); 1330 map__zput(he->branch_info->to.ms.map); 1331 zfree_srcline(&he->branch_info->srcline_from); 1332 zfree_srcline(&he->branch_info->srcline_to); 1333 zfree(&he->branch_info); 1334 } 1335 1336 if (he->mem_info) { 1337 map__zput(he->mem_info->iaddr.ms.map); 1338 map__zput(he->mem_info->daddr.ms.map); 1339 mem_info__zput(he->mem_info); 1340 } 1341 1342 if (he->block_info) 1343 block_info__zput(he->block_info); 1344 1345 if (he->kvm_info) 1346 kvm_info__zput(he->kvm_info); 1347 1348 zfree(&he->res_samples); 1349 zfree(&he->stat_acc); 1350 zfree_srcline(&he->srcline); 1351 if (he->srcfile && he->srcfile[0]) 1352 zfree(&he->srcfile); 1353 free_callchain(he->callchain); 1354 zfree(&he->trace_output); 1355 zfree(&he->raw_data); 1356 ops->free(he); 1357 } 1358 1359 /* 1360 * If this is not the last column, then we need to pad it according to the 1361 * pre-calculated max length for this column, otherwise don't bother adding 1362 * spaces because that would break viewing this with, for instance, 'less', 1363 * that would show tons of trailing spaces when a long C++ demangled method 1364 * names is sampled. 1365 */ 1366 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1367 struct perf_hpp_fmt *fmt, int printed) 1368 { 1369 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1370 const int width = fmt->width(fmt, hpp, he->hists); 1371 if (printed < width) { 1372 advance_hpp(hpp, printed); 1373 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1374 } 1375 } 1376 1377 return printed; 1378 } 1379 1380 /* 1381 * collapse the histogram 1382 */ 1383 1384 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1385 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1386 enum hist_filter type); 1387 1388 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1389 1390 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1391 { 1392 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1393 } 1394 1395 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1396 enum hist_filter type, 1397 fmt_chk_fn check) 1398 { 1399 struct perf_hpp_fmt *fmt; 1400 bool type_match = false; 1401 struct hist_entry *parent = he->parent_he; 1402 1403 switch (type) { 1404 case HIST_FILTER__THREAD: 1405 if (symbol_conf.comm_list == NULL && 1406 symbol_conf.pid_list == NULL && 1407 symbol_conf.tid_list == NULL) 1408 return; 1409 break; 1410 case HIST_FILTER__DSO: 1411 if (symbol_conf.dso_list == NULL) 1412 return; 1413 break; 1414 case HIST_FILTER__SYMBOL: 1415 if (symbol_conf.sym_list == NULL) 1416 return; 1417 break; 1418 case HIST_FILTER__PARENT: 1419 case HIST_FILTER__GUEST: 1420 case HIST_FILTER__HOST: 1421 case HIST_FILTER__SOCKET: 1422 case HIST_FILTER__C2C: 1423 default: 1424 return; 1425 } 1426 1427 /* if it's filtered by own fmt, it has to have filter bits */ 1428 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1429 if (check(fmt)) { 1430 type_match = true; 1431 break; 1432 } 1433 } 1434 1435 if (type_match) { 1436 /* 1437 * If the filter is for current level entry, propagate 1438 * filter marker to parents. The marker bit was 1439 * already set by default so it only needs to clear 1440 * non-filtered entries. 1441 */ 1442 if (!(he->filtered & (1 << type))) { 1443 while (parent) { 1444 parent->filtered &= ~(1 << type); 1445 parent = parent->parent_he; 1446 } 1447 } 1448 } else { 1449 /* 1450 * If current entry doesn't have matching formats, set 1451 * filter marker for upper level entries. it will be 1452 * cleared if its lower level entries is not filtered. 1453 * 1454 * For lower-level entries, it inherits parent's 1455 * filter bit so that lower level entries of a 1456 * non-filtered entry won't set the filter marker. 1457 */ 1458 if (parent == NULL) 1459 he->filtered |= (1 << type); 1460 else 1461 he->filtered |= (parent->filtered & (1 << type)); 1462 } 1463 } 1464 1465 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1466 { 1467 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1468 check_thread_entry); 1469 1470 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1471 perf_hpp__is_dso_entry); 1472 1473 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1474 perf_hpp__is_sym_entry); 1475 1476 hists__apply_filters(he->hists, he); 1477 } 1478 1479 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1480 struct rb_root_cached *root, 1481 struct hist_entry *he, 1482 struct hist_entry *parent_he, 1483 struct perf_hpp_list *hpp_list) 1484 { 1485 struct rb_node **p = &root->rb_root.rb_node; 1486 struct rb_node *parent = NULL; 1487 struct hist_entry *iter, *new; 1488 struct perf_hpp_fmt *fmt; 1489 int64_t cmp; 1490 bool leftmost = true; 1491 1492 while (*p != NULL) { 1493 parent = *p; 1494 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1495 1496 cmp = 0; 1497 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1498 cmp = fmt->collapse(fmt, iter, he); 1499 if (cmp) 1500 break; 1501 } 1502 1503 if (!cmp) { 1504 he_stat__add_stat(&iter->stat, &he->stat); 1505 return iter; 1506 } 1507 1508 if (cmp < 0) 1509 p = &parent->rb_left; 1510 else { 1511 p = &parent->rb_right; 1512 leftmost = false; 1513 } 1514 } 1515 1516 new = hist_entry__new(he, true); 1517 if (new == NULL) 1518 return NULL; 1519 1520 hists->nr_entries++; 1521 1522 /* save related format list for output */ 1523 new->hpp_list = hpp_list; 1524 new->parent_he = parent_he; 1525 1526 hist_entry__apply_hierarchy_filters(new); 1527 1528 /* some fields are now passed to 'new' */ 1529 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1530 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1531 he->trace_output = NULL; 1532 else 1533 new->trace_output = NULL; 1534 1535 if (perf_hpp__is_srcline_entry(fmt)) 1536 he->srcline = NULL; 1537 else 1538 new->srcline = NULL; 1539 1540 if (perf_hpp__is_srcfile_entry(fmt)) 1541 he->srcfile = NULL; 1542 else 1543 new->srcfile = NULL; 1544 } 1545 1546 rb_link_node(&new->rb_node_in, parent, p); 1547 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1548 return new; 1549 } 1550 1551 static int hists__hierarchy_insert_entry(struct hists *hists, 1552 struct rb_root_cached *root, 1553 struct hist_entry *he) 1554 { 1555 struct perf_hpp_list_node *node; 1556 struct hist_entry *new_he = NULL; 1557 struct hist_entry *parent = NULL; 1558 int depth = 0; 1559 int ret = 0; 1560 1561 list_for_each_entry(node, &hists->hpp_formats, list) { 1562 /* skip period (overhead) and elided columns */ 1563 if (node->level == 0 || node->skip) 1564 continue; 1565 1566 /* insert copy of 'he' for each fmt into the hierarchy */ 1567 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1568 if (new_he == NULL) { 1569 ret = -1; 1570 break; 1571 } 1572 1573 root = &new_he->hroot_in; 1574 new_he->depth = depth++; 1575 parent = new_he; 1576 } 1577 1578 if (new_he) { 1579 new_he->leaf = true; 1580 1581 if (hist_entry__has_callchains(new_he) && 1582 symbol_conf.use_callchain) { 1583 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1584 1585 if (cursor == NULL) 1586 return -1; 1587 1588 callchain_cursor_reset(cursor); 1589 if (callchain_merge(cursor, 1590 new_he->callchain, 1591 he->callchain) < 0) 1592 ret = -1; 1593 } 1594 } 1595 1596 /* 'he' is no longer used */ 1597 hist_entry__delete(he); 1598 1599 /* return 0 (or -1) since it already applied filters */ 1600 return ret; 1601 } 1602 1603 static int hists__collapse_insert_entry(struct hists *hists, 1604 struct rb_root_cached *root, 1605 struct hist_entry *he) 1606 { 1607 struct rb_node **p = &root->rb_root.rb_node; 1608 struct rb_node *parent = NULL; 1609 struct hist_entry *iter; 1610 int64_t cmp; 1611 bool leftmost = true; 1612 1613 if (symbol_conf.report_hierarchy) 1614 return hists__hierarchy_insert_entry(hists, root, he); 1615 1616 while (*p != NULL) { 1617 parent = *p; 1618 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1619 1620 cmp = hist_entry__collapse(iter, he); 1621 1622 if (!cmp) { 1623 int ret = 0; 1624 1625 he_stat__add_stat(&iter->stat, &he->stat); 1626 if (symbol_conf.cumulate_callchain) 1627 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1628 1629 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1630 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1631 1632 if (cursor != NULL) { 1633 callchain_cursor_reset(cursor); 1634 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0) 1635 ret = -1; 1636 } else { 1637 ret = 0; 1638 } 1639 } 1640 hist_entry__delete(he); 1641 return ret; 1642 } 1643 1644 if (cmp < 0) 1645 p = &(*p)->rb_left; 1646 else { 1647 p = &(*p)->rb_right; 1648 leftmost = false; 1649 } 1650 } 1651 hists->nr_entries++; 1652 1653 rb_link_node(&he->rb_node_in, parent, p); 1654 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1655 return 1; 1656 } 1657 1658 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1659 { 1660 struct rb_root_cached *root; 1661 1662 mutex_lock(&hists->lock); 1663 1664 root = hists->entries_in; 1665 if (++hists->entries_in > &hists->entries_in_array[1]) 1666 hists->entries_in = &hists->entries_in_array[0]; 1667 1668 mutex_unlock(&hists->lock); 1669 1670 return root; 1671 } 1672 1673 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1674 { 1675 hists__filter_entry_by_dso(hists, he); 1676 hists__filter_entry_by_thread(hists, he); 1677 hists__filter_entry_by_symbol(hists, he); 1678 hists__filter_entry_by_socket(hists, he); 1679 } 1680 1681 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1682 { 1683 struct rb_root_cached *root; 1684 struct rb_node *next; 1685 struct hist_entry *n; 1686 int ret; 1687 1688 if (!hists__has(hists, need_collapse)) 1689 return 0; 1690 1691 hists->nr_entries = 0; 1692 1693 root = hists__get_rotate_entries_in(hists); 1694 1695 next = rb_first_cached(root); 1696 1697 while (next) { 1698 if (session_done()) 1699 break; 1700 n = rb_entry(next, struct hist_entry, rb_node_in); 1701 next = rb_next(&n->rb_node_in); 1702 1703 rb_erase_cached(&n->rb_node_in, root); 1704 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1705 if (ret < 0) 1706 return -1; 1707 1708 if (ret) { 1709 /* 1710 * If it wasn't combined with one of the entries already 1711 * collapsed, we need to apply the filters that may have 1712 * been set by, say, the hist_browser. 1713 */ 1714 hists__apply_filters(hists, n); 1715 } 1716 if (prog) 1717 ui_progress__update(prog, 1); 1718 } 1719 return 0; 1720 } 1721 1722 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1723 { 1724 struct hists *hists = a->hists; 1725 struct perf_hpp_fmt *fmt; 1726 int64_t cmp = 0; 1727 1728 hists__for_each_sort_list(hists, fmt) { 1729 if (perf_hpp__should_skip(fmt, a->hists)) 1730 continue; 1731 1732 cmp = fmt->sort(fmt, a, b); 1733 if (cmp) 1734 break; 1735 } 1736 1737 return cmp; 1738 } 1739 1740 static void hists__reset_filter_stats(struct hists *hists) 1741 { 1742 hists->nr_non_filtered_entries = 0; 1743 hists->stats.total_non_filtered_period = 0; 1744 } 1745 1746 void hists__reset_stats(struct hists *hists) 1747 { 1748 hists->nr_entries = 0; 1749 hists->stats.total_period = 0; 1750 1751 hists__reset_filter_stats(hists); 1752 } 1753 1754 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1755 { 1756 hists->nr_non_filtered_entries++; 1757 hists->stats.total_non_filtered_period += h->stat.period; 1758 } 1759 1760 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1761 { 1762 if (!h->filtered) 1763 hists__inc_filter_stats(hists, h); 1764 1765 hists->nr_entries++; 1766 hists->stats.total_period += h->stat.period; 1767 } 1768 1769 static void hierarchy_recalc_total_periods(struct hists *hists) 1770 { 1771 struct rb_node *node; 1772 struct hist_entry *he; 1773 1774 node = rb_first_cached(&hists->entries); 1775 1776 hists->stats.total_period = 0; 1777 hists->stats.total_non_filtered_period = 0; 1778 1779 /* 1780 * recalculate total period using top-level entries only 1781 * since lower level entries only see non-filtered entries 1782 * but upper level entries have sum of both entries. 1783 */ 1784 while (node) { 1785 he = rb_entry(node, struct hist_entry, rb_node); 1786 node = rb_next(node); 1787 1788 hists->stats.total_period += he->stat.period; 1789 if (!he->filtered) 1790 hists->stats.total_non_filtered_period += he->stat.period; 1791 } 1792 } 1793 1794 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1795 struct hist_entry *he) 1796 { 1797 struct rb_node **p = &root->rb_root.rb_node; 1798 struct rb_node *parent = NULL; 1799 struct hist_entry *iter; 1800 struct perf_hpp_fmt *fmt; 1801 bool leftmost = true; 1802 1803 while (*p != NULL) { 1804 parent = *p; 1805 iter = rb_entry(parent, struct hist_entry, rb_node); 1806 1807 if (hist_entry__sort(he, iter) > 0) 1808 p = &parent->rb_left; 1809 else { 1810 p = &parent->rb_right; 1811 leftmost = false; 1812 } 1813 } 1814 1815 rb_link_node(&he->rb_node, parent, p); 1816 rb_insert_color_cached(&he->rb_node, root, leftmost); 1817 1818 /* update column width of dynamic entry */ 1819 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1820 if (fmt->init) 1821 fmt->init(fmt, he); 1822 } 1823 } 1824 1825 static void hists__hierarchy_output_resort(struct hists *hists, 1826 struct ui_progress *prog, 1827 struct rb_root_cached *root_in, 1828 struct rb_root_cached *root_out, 1829 u64 min_callchain_hits, 1830 bool use_callchain) 1831 { 1832 struct rb_node *node; 1833 struct hist_entry *he; 1834 1835 *root_out = RB_ROOT_CACHED; 1836 node = rb_first_cached(root_in); 1837 1838 while (node) { 1839 he = rb_entry(node, struct hist_entry, rb_node_in); 1840 node = rb_next(node); 1841 1842 hierarchy_insert_output_entry(root_out, he); 1843 1844 if (prog) 1845 ui_progress__update(prog, 1); 1846 1847 hists->nr_entries++; 1848 if (!he->filtered) { 1849 hists->nr_non_filtered_entries++; 1850 hists__calc_col_len(hists, he); 1851 } 1852 1853 if (!he->leaf) { 1854 hists__hierarchy_output_resort(hists, prog, 1855 &he->hroot_in, 1856 &he->hroot_out, 1857 min_callchain_hits, 1858 use_callchain); 1859 continue; 1860 } 1861 1862 if (!use_callchain) 1863 continue; 1864 1865 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1866 u64 total = he->stat.period; 1867 1868 if (symbol_conf.cumulate_callchain) 1869 total = he->stat_acc->period; 1870 1871 min_callchain_hits = total * (callchain_param.min_percent / 100); 1872 } 1873 1874 callchain_param.sort(&he->sorted_chain, he->callchain, 1875 min_callchain_hits, &callchain_param); 1876 } 1877 } 1878 1879 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1880 struct hist_entry *he, 1881 u64 min_callchain_hits, 1882 bool use_callchain) 1883 { 1884 struct rb_node **p = &entries->rb_root.rb_node; 1885 struct rb_node *parent = NULL; 1886 struct hist_entry *iter; 1887 struct perf_hpp_fmt *fmt; 1888 bool leftmost = true; 1889 1890 if (use_callchain) { 1891 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1892 u64 total = he->stat.period; 1893 1894 if (symbol_conf.cumulate_callchain) 1895 total = he->stat_acc->period; 1896 1897 min_callchain_hits = total * (callchain_param.min_percent / 100); 1898 } 1899 callchain_param.sort(&he->sorted_chain, he->callchain, 1900 min_callchain_hits, &callchain_param); 1901 } 1902 1903 while (*p != NULL) { 1904 parent = *p; 1905 iter = rb_entry(parent, struct hist_entry, rb_node); 1906 1907 if (hist_entry__sort(he, iter) > 0) 1908 p = &(*p)->rb_left; 1909 else { 1910 p = &(*p)->rb_right; 1911 leftmost = false; 1912 } 1913 } 1914 1915 rb_link_node(&he->rb_node, parent, p); 1916 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1917 1918 /* update column width of dynamic entries */ 1919 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1920 if (fmt->init) 1921 fmt->init(fmt, he); 1922 } 1923 } 1924 1925 static void output_resort(struct hists *hists, struct ui_progress *prog, 1926 bool use_callchain, hists__resort_cb_t cb, 1927 void *cb_arg) 1928 { 1929 struct rb_root_cached *root; 1930 struct rb_node *next; 1931 struct hist_entry *n; 1932 u64 callchain_total; 1933 u64 min_callchain_hits; 1934 1935 callchain_total = hists->callchain_period; 1936 if (symbol_conf.filter_relative) 1937 callchain_total = hists->callchain_non_filtered_period; 1938 1939 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1940 1941 hists__reset_stats(hists); 1942 hists__reset_col_len(hists); 1943 1944 if (symbol_conf.report_hierarchy) { 1945 hists__hierarchy_output_resort(hists, prog, 1946 &hists->entries_collapsed, 1947 &hists->entries, 1948 min_callchain_hits, 1949 use_callchain); 1950 hierarchy_recalc_total_periods(hists); 1951 return; 1952 } 1953 1954 if (hists__has(hists, need_collapse)) 1955 root = &hists->entries_collapsed; 1956 else 1957 root = hists->entries_in; 1958 1959 next = rb_first_cached(root); 1960 hists->entries = RB_ROOT_CACHED; 1961 1962 while (next) { 1963 n = rb_entry(next, struct hist_entry, rb_node_in); 1964 next = rb_next(&n->rb_node_in); 1965 1966 if (cb && cb(n, cb_arg)) 1967 continue; 1968 1969 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1970 hists__inc_stats(hists, n); 1971 1972 if (!n->filtered) 1973 hists__calc_col_len(hists, n); 1974 1975 if (prog) 1976 ui_progress__update(prog, 1); 1977 } 1978 } 1979 1980 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog, 1981 hists__resort_cb_t cb, void *cb_arg) 1982 { 1983 bool use_callchain; 1984 1985 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1986 use_callchain = evsel__has_callchain(evsel); 1987 else 1988 use_callchain = symbol_conf.use_callchain; 1989 1990 use_callchain |= symbol_conf.show_branchflag_count; 1991 1992 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1993 } 1994 1995 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog) 1996 { 1997 return evsel__output_resort_cb(evsel, prog, NULL, NULL); 1998 } 1999 2000 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 2001 { 2002 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 2003 } 2004 2005 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 2006 hists__resort_cb_t cb) 2007 { 2008 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 2009 } 2010 2011 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 2012 { 2013 if (he->leaf || hmd == HMD_FORCE_SIBLING) 2014 return false; 2015 2016 if (he->unfolded || hmd == HMD_FORCE_CHILD) 2017 return true; 2018 2019 return false; 2020 } 2021 2022 struct rb_node *rb_hierarchy_last(struct rb_node *node) 2023 { 2024 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2025 2026 while (can_goto_child(he, HMD_NORMAL)) { 2027 node = rb_last(&he->hroot_out.rb_root); 2028 he = rb_entry(node, struct hist_entry, rb_node); 2029 } 2030 return node; 2031 } 2032 2033 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 2034 { 2035 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2036 2037 if (can_goto_child(he, hmd)) 2038 node = rb_first_cached(&he->hroot_out); 2039 else 2040 node = rb_next(node); 2041 2042 while (node == NULL) { 2043 he = he->parent_he; 2044 if (he == NULL) 2045 break; 2046 2047 node = rb_next(&he->rb_node); 2048 } 2049 return node; 2050 } 2051 2052 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 2053 { 2054 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2055 2056 node = rb_prev(node); 2057 if (node) 2058 return rb_hierarchy_last(node); 2059 2060 he = he->parent_he; 2061 if (he == NULL) 2062 return NULL; 2063 2064 return &he->rb_node; 2065 } 2066 2067 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 2068 { 2069 struct rb_node *node; 2070 struct hist_entry *child; 2071 float percent; 2072 2073 if (he->leaf) 2074 return false; 2075 2076 node = rb_first_cached(&he->hroot_out); 2077 child = rb_entry(node, struct hist_entry, rb_node); 2078 2079 while (node && child->filtered) { 2080 node = rb_next(node); 2081 child = rb_entry(node, struct hist_entry, rb_node); 2082 } 2083 2084 if (node) 2085 percent = hist_entry__get_percent_limit(child); 2086 else 2087 percent = 0; 2088 2089 return node && percent >= limit; 2090 } 2091 2092 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 2093 enum hist_filter filter) 2094 { 2095 h->filtered &= ~(1 << filter); 2096 2097 if (symbol_conf.report_hierarchy) { 2098 struct hist_entry *parent = h->parent_he; 2099 2100 while (parent) { 2101 he_stat__add_stat(&parent->stat, &h->stat); 2102 2103 parent->filtered &= ~(1 << filter); 2104 2105 if (parent->filtered) 2106 goto next; 2107 2108 /* force fold unfiltered entry for simplicity */ 2109 parent->unfolded = false; 2110 parent->has_no_entry = false; 2111 parent->row_offset = 0; 2112 parent->nr_rows = 0; 2113 next: 2114 parent = parent->parent_he; 2115 } 2116 } 2117 2118 if (h->filtered) 2119 return; 2120 2121 /* force fold unfiltered entry for simplicity */ 2122 h->unfolded = false; 2123 h->has_no_entry = false; 2124 h->row_offset = 0; 2125 h->nr_rows = 0; 2126 2127 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 2128 2129 hists__inc_filter_stats(hists, h); 2130 hists__calc_col_len(hists, h); 2131 } 2132 2133 2134 static bool hists__filter_entry_by_dso(struct hists *hists, 2135 struct hist_entry *he) 2136 { 2137 if (hists->dso_filter != NULL && 2138 (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) { 2139 he->filtered |= (1 << HIST_FILTER__DSO); 2140 return true; 2141 } 2142 2143 return false; 2144 } 2145 2146 static bool hists__filter_entry_by_thread(struct hists *hists, 2147 struct hist_entry *he) 2148 { 2149 if (hists->thread_filter != NULL && 2150 RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(hists->thread_filter)) { 2151 he->filtered |= (1 << HIST_FILTER__THREAD); 2152 return true; 2153 } 2154 2155 return false; 2156 } 2157 2158 static bool hists__filter_entry_by_symbol(struct hists *hists, 2159 struct hist_entry *he) 2160 { 2161 if (hists->symbol_filter_str != NULL && 2162 (!he->ms.sym || strstr(he->ms.sym->name, 2163 hists->symbol_filter_str) == NULL)) { 2164 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2165 return true; 2166 } 2167 2168 return false; 2169 } 2170 2171 static bool hists__filter_entry_by_socket(struct hists *hists, 2172 struct hist_entry *he) 2173 { 2174 if ((hists->socket_filter > -1) && 2175 (he->socket != hists->socket_filter)) { 2176 he->filtered |= (1 << HIST_FILTER__SOCKET); 2177 return true; 2178 } 2179 2180 return false; 2181 } 2182 2183 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2184 2185 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2186 { 2187 struct rb_node *nd; 2188 2189 hists->stats.nr_non_filtered_samples = 0; 2190 2191 hists__reset_filter_stats(hists); 2192 hists__reset_col_len(hists); 2193 2194 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2195 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2196 2197 if (filter(hists, h)) 2198 continue; 2199 2200 hists__remove_entry_filter(hists, h, type); 2201 } 2202 } 2203 2204 static void resort_filtered_entry(struct rb_root_cached *root, 2205 struct hist_entry *he) 2206 { 2207 struct rb_node **p = &root->rb_root.rb_node; 2208 struct rb_node *parent = NULL; 2209 struct hist_entry *iter; 2210 struct rb_root_cached new_root = RB_ROOT_CACHED; 2211 struct rb_node *nd; 2212 bool leftmost = true; 2213 2214 while (*p != NULL) { 2215 parent = *p; 2216 iter = rb_entry(parent, struct hist_entry, rb_node); 2217 2218 if (hist_entry__sort(he, iter) > 0) 2219 p = &(*p)->rb_left; 2220 else { 2221 p = &(*p)->rb_right; 2222 leftmost = false; 2223 } 2224 } 2225 2226 rb_link_node(&he->rb_node, parent, p); 2227 rb_insert_color_cached(&he->rb_node, root, leftmost); 2228 2229 if (he->leaf || he->filtered) 2230 return; 2231 2232 nd = rb_first_cached(&he->hroot_out); 2233 while (nd) { 2234 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2235 2236 nd = rb_next(nd); 2237 rb_erase_cached(&h->rb_node, &he->hroot_out); 2238 2239 resort_filtered_entry(&new_root, h); 2240 } 2241 2242 he->hroot_out = new_root; 2243 } 2244 2245 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2246 { 2247 struct rb_node *nd; 2248 struct rb_root_cached new_root = RB_ROOT_CACHED; 2249 2250 hists->stats.nr_non_filtered_samples = 0; 2251 2252 hists__reset_filter_stats(hists); 2253 hists__reset_col_len(hists); 2254 2255 nd = rb_first_cached(&hists->entries); 2256 while (nd) { 2257 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2258 int ret; 2259 2260 ret = hist_entry__filter(h, type, arg); 2261 2262 /* 2263 * case 1. non-matching type 2264 * zero out the period, set filter marker and move to child 2265 */ 2266 if (ret < 0) { 2267 memset(&h->stat, 0, sizeof(h->stat)); 2268 h->filtered |= (1 << type); 2269 2270 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2271 } 2272 /* 2273 * case 2. matched type (filter out) 2274 * set filter marker and move to next 2275 */ 2276 else if (ret == 1) { 2277 h->filtered |= (1 << type); 2278 2279 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2280 } 2281 /* 2282 * case 3. ok (not filtered) 2283 * add period to hists and parents, erase the filter marker 2284 * and move to next sibling 2285 */ 2286 else { 2287 hists__remove_entry_filter(hists, h, type); 2288 2289 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2290 } 2291 } 2292 2293 hierarchy_recalc_total_periods(hists); 2294 2295 /* 2296 * resort output after applying a new filter since filter in a lower 2297 * hierarchy can change periods in a upper hierarchy. 2298 */ 2299 nd = rb_first_cached(&hists->entries); 2300 while (nd) { 2301 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2302 2303 nd = rb_next(nd); 2304 rb_erase_cached(&h->rb_node, &hists->entries); 2305 2306 resort_filtered_entry(&new_root, h); 2307 } 2308 2309 hists->entries = new_root; 2310 } 2311 2312 void hists__filter_by_thread(struct hists *hists) 2313 { 2314 if (symbol_conf.report_hierarchy) 2315 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2316 hists->thread_filter); 2317 else 2318 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2319 hists__filter_entry_by_thread); 2320 } 2321 2322 void hists__filter_by_dso(struct hists *hists) 2323 { 2324 if (symbol_conf.report_hierarchy) 2325 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2326 hists->dso_filter); 2327 else 2328 hists__filter_by_type(hists, HIST_FILTER__DSO, 2329 hists__filter_entry_by_dso); 2330 } 2331 2332 void hists__filter_by_symbol(struct hists *hists) 2333 { 2334 if (symbol_conf.report_hierarchy) 2335 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2336 hists->symbol_filter_str); 2337 else 2338 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2339 hists__filter_entry_by_symbol); 2340 } 2341 2342 void hists__filter_by_socket(struct hists *hists) 2343 { 2344 if (symbol_conf.report_hierarchy) 2345 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2346 &hists->socket_filter); 2347 else 2348 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2349 hists__filter_entry_by_socket); 2350 } 2351 2352 void events_stats__inc(struct events_stats *stats, u32 type) 2353 { 2354 ++stats->nr_events[0]; 2355 ++stats->nr_events[type]; 2356 } 2357 2358 static void hists_stats__inc(struct hists_stats *stats) 2359 { 2360 ++stats->nr_samples; 2361 } 2362 2363 void hists__inc_nr_events(struct hists *hists) 2364 { 2365 hists_stats__inc(&hists->stats); 2366 } 2367 2368 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2369 { 2370 hists_stats__inc(&hists->stats); 2371 if (!filtered) 2372 hists->stats.nr_non_filtered_samples++; 2373 } 2374 2375 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost) 2376 { 2377 hists->stats.nr_lost_samples += lost; 2378 } 2379 2380 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2381 struct hist_entry *pair) 2382 { 2383 struct rb_root_cached *root; 2384 struct rb_node **p; 2385 struct rb_node *parent = NULL; 2386 struct hist_entry *he; 2387 int64_t cmp; 2388 bool leftmost = true; 2389 2390 if (hists__has(hists, need_collapse)) 2391 root = &hists->entries_collapsed; 2392 else 2393 root = hists->entries_in; 2394 2395 p = &root->rb_root.rb_node; 2396 2397 while (*p != NULL) { 2398 parent = *p; 2399 he = rb_entry(parent, struct hist_entry, rb_node_in); 2400 2401 cmp = hist_entry__collapse(he, pair); 2402 2403 if (!cmp) 2404 goto out; 2405 2406 if (cmp < 0) 2407 p = &(*p)->rb_left; 2408 else { 2409 p = &(*p)->rb_right; 2410 leftmost = false; 2411 } 2412 } 2413 2414 he = hist_entry__new(pair, true); 2415 if (he) { 2416 memset(&he->stat, 0, sizeof(he->stat)); 2417 he->hists = hists; 2418 if (symbol_conf.cumulate_callchain) 2419 memset(he->stat_acc, 0, sizeof(he->stat)); 2420 rb_link_node(&he->rb_node_in, parent, p); 2421 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2422 hists__inc_stats(hists, he); 2423 he->dummy = true; 2424 } 2425 out: 2426 return he; 2427 } 2428 2429 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2430 struct rb_root_cached *root, 2431 struct hist_entry *pair) 2432 { 2433 struct rb_node **p; 2434 struct rb_node *parent = NULL; 2435 struct hist_entry *he; 2436 struct perf_hpp_fmt *fmt; 2437 bool leftmost = true; 2438 2439 p = &root->rb_root.rb_node; 2440 while (*p != NULL) { 2441 int64_t cmp = 0; 2442 2443 parent = *p; 2444 he = rb_entry(parent, struct hist_entry, rb_node_in); 2445 2446 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2447 cmp = fmt->collapse(fmt, he, pair); 2448 if (cmp) 2449 break; 2450 } 2451 if (!cmp) 2452 goto out; 2453 2454 if (cmp < 0) 2455 p = &parent->rb_left; 2456 else { 2457 p = &parent->rb_right; 2458 leftmost = false; 2459 } 2460 } 2461 2462 he = hist_entry__new(pair, true); 2463 if (he) { 2464 rb_link_node(&he->rb_node_in, parent, p); 2465 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2466 2467 he->dummy = true; 2468 he->hists = hists; 2469 memset(&he->stat, 0, sizeof(he->stat)); 2470 hists__inc_stats(hists, he); 2471 } 2472 out: 2473 return he; 2474 } 2475 2476 static struct hist_entry *hists__find_entry(struct hists *hists, 2477 struct hist_entry *he) 2478 { 2479 struct rb_node *n; 2480 2481 if (hists__has(hists, need_collapse)) 2482 n = hists->entries_collapsed.rb_root.rb_node; 2483 else 2484 n = hists->entries_in->rb_root.rb_node; 2485 2486 while (n) { 2487 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2488 int64_t cmp = hist_entry__collapse(iter, he); 2489 2490 if (cmp < 0) 2491 n = n->rb_left; 2492 else if (cmp > 0) 2493 n = n->rb_right; 2494 else 2495 return iter; 2496 } 2497 2498 return NULL; 2499 } 2500 2501 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2502 struct hist_entry *he) 2503 { 2504 struct rb_node *n = root->rb_root.rb_node; 2505 2506 while (n) { 2507 struct hist_entry *iter; 2508 struct perf_hpp_fmt *fmt; 2509 int64_t cmp = 0; 2510 2511 iter = rb_entry(n, struct hist_entry, rb_node_in); 2512 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2513 cmp = fmt->collapse(fmt, iter, he); 2514 if (cmp) 2515 break; 2516 } 2517 2518 if (cmp < 0) 2519 n = n->rb_left; 2520 else if (cmp > 0) 2521 n = n->rb_right; 2522 else 2523 return iter; 2524 } 2525 2526 return NULL; 2527 } 2528 2529 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2530 struct rb_root_cached *other_root) 2531 { 2532 struct rb_node *nd; 2533 struct hist_entry *pos, *pair; 2534 2535 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2536 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2537 pair = hists__find_hierarchy_entry(other_root, pos); 2538 2539 if (pair) { 2540 hist_entry__add_pair(pair, pos); 2541 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2542 } 2543 } 2544 } 2545 2546 /* 2547 * Look for pairs to link to the leader buckets (hist_entries): 2548 */ 2549 void hists__match(struct hists *leader, struct hists *other) 2550 { 2551 struct rb_root_cached *root; 2552 struct rb_node *nd; 2553 struct hist_entry *pos, *pair; 2554 2555 if (symbol_conf.report_hierarchy) { 2556 /* hierarchy report always collapses entries */ 2557 return hists__match_hierarchy(&leader->entries_collapsed, 2558 &other->entries_collapsed); 2559 } 2560 2561 if (hists__has(leader, need_collapse)) 2562 root = &leader->entries_collapsed; 2563 else 2564 root = leader->entries_in; 2565 2566 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2567 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2568 pair = hists__find_entry(other, pos); 2569 2570 if (pair) 2571 hist_entry__add_pair(pair, pos); 2572 } 2573 } 2574 2575 static int hists__link_hierarchy(struct hists *leader_hists, 2576 struct hist_entry *parent, 2577 struct rb_root_cached *leader_root, 2578 struct rb_root_cached *other_root) 2579 { 2580 struct rb_node *nd; 2581 struct hist_entry *pos, *leader; 2582 2583 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2584 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2585 2586 if (hist_entry__has_pairs(pos)) { 2587 bool found = false; 2588 2589 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2590 if (leader->hists == leader_hists) { 2591 found = true; 2592 break; 2593 } 2594 } 2595 if (!found) 2596 return -1; 2597 } else { 2598 leader = add_dummy_hierarchy_entry(leader_hists, 2599 leader_root, pos); 2600 if (leader == NULL) 2601 return -1; 2602 2603 /* do not point parent in the pos */ 2604 leader->parent_he = parent; 2605 2606 hist_entry__add_pair(pos, leader); 2607 } 2608 2609 if (!pos->leaf) { 2610 if (hists__link_hierarchy(leader_hists, leader, 2611 &leader->hroot_in, 2612 &pos->hroot_in) < 0) 2613 return -1; 2614 } 2615 } 2616 return 0; 2617 } 2618 2619 /* 2620 * Look for entries in the other hists that are not present in the leader, if 2621 * we find them, just add a dummy entry on the leader hists, with period=0, 2622 * nr_events=0, to serve as the list header. 2623 */ 2624 int hists__link(struct hists *leader, struct hists *other) 2625 { 2626 struct rb_root_cached *root; 2627 struct rb_node *nd; 2628 struct hist_entry *pos, *pair; 2629 2630 if (symbol_conf.report_hierarchy) { 2631 /* hierarchy report always collapses entries */ 2632 return hists__link_hierarchy(leader, NULL, 2633 &leader->entries_collapsed, 2634 &other->entries_collapsed); 2635 } 2636 2637 if (hists__has(other, need_collapse)) 2638 root = &other->entries_collapsed; 2639 else 2640 root = other->entries_in; 2641 2642 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2643 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2644 2645 if (!hist_entry__has_pairs(pos)) { 2646 pair = hists__add_dummy_entry(leader, pos); 2647 if (pair == NULL) 2648 return -1; 2649 hist_entry__add_pair(pos, pair); 2650 } 2651 } 2652 2653 return 0; 2654 } 2655 2656 int hists__unlink(struct hists *hists) 2657 { 2658 struct rb_root_cached *root; 2659 struct rb_node *nd; 2660 struct hist_entry *pos; 2661 2662 if (hists__has(hists, need_collapse)) 2663 root = &hists->entries_collapsed; 2664 else 2665 root = hists->entries_in; 2666 2667 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2668 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2669 list_del_init(&pos->pairs.node); 2670 } 2671 2672 return 0; 2673 } 2674 2675 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2676 struct perf_sample *sample, bool nonany_branch_mode, 2677 u64 *total_cycles) 2678 { 2679 struct branch_info *bi; 2680 struct branch_entry *entries = perf_sample__branch_entries(sample); 2681 2682 /* If we have branch cycles always annotate them. */ 2683 if (bs && bs->nr && entries[0].flags.cycles) { 2684 bi = sample__resolve_bstack(sample, al); 2685 if (bi) { 2686 struct addr_map_symbol *prev = NULL; 2687 2688 /* 2689 * Ignore errors, still want to process the 2690 * other entries. 2691 * 2692 * For non standard branch modes always 2693 * force no IPC (prev == NULL) 2694 * 2695 * Note that perf stores branches reversed from 2696 * program order! 2697 */ 2698 for (int i = bs->nr - 1; i >= 0; i--) { 2699 addr_map_symbol__account_cycles(&bi[i].from, 2700 nonany_branch_mode ? NULL : prev, 2701 bi[i].flags.cycles); 2702 prev = &bi[i].to; 2703 2704 if (total_cycles) 2705 *total_cycles += bi[i].flags.cycles; 2706 } 2707 for (unsigned int i = 0; i < bs->nr; i++) { 2708 map__put(bi[i].to.ms.map); 2709 maps__put(bi[i].to.ms.maps); 2710 map__put(bi[i].from.ms.map); 2711 maps__put(bi[i].from.ms.maps); 2712 } 2713 free(bi); 2714 } 2715 } 2716 } 2717 2718 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp, 2719 bool skip_empty) 2720 { 2721 struct evsel *pos; 2722 size_t ret = 0; 2723 2724 evlist__for_each_entry(evlist, pos) { 2725 struct hists *hists = evsel__hists(pos); 2726 2727 if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples) 2728 continue; 2729 2730 ret += fprintf(fp, "%s stats:\n", evsel__name(pos)); 2731 if (hists->stats.nr_samples) 2732 ret += fprintf(fp, "%16s events: %10d\n", 2733 "SAMPLE", hists->stats.nr_samples); 2734 if (hists->stats.nr_lost_samples) 2735 ret += fprintf(fp, "%16s events: %10d\n", 2736 "LOST_SAMPLES", hists->stats.nr_lost_samples); 2737 } 2738 2739 return ret; 2740 } 2741 2742 2743 u64 hists__total_period(struct hists *hists) 2744 { 2745 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2746 hists->stats.total_period; 2747 } 2748 2749 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2750 { 2751 char unit; 2752 int printed; 2753 const struct dso *dso = hists->dso_filter; 2754 struct thread *thread = hists->thread_filter; 2755 int socket_id = hists->socket_filter; 2756 unsigned long nr_samples = hists->stats.nr_samples; 2757 u64 nr_events = hists->stats.total_period; 2758 struct evsel *evsel = hists_to_evsel(hists); 2759 const char *ev_name = evsel__name(evsel); 2760 char buf[512], sample_freq_str[64] = ""; 2761 size_t buflen = sizeof(buf); 2762 char ref[30] = " show reference callgraph, "; 2763 bool enable_ref = false; 2764 2765 if (symbol_conf.filter_relative) { 2766 nr_samples = hists->stats.nr_non_filtered_samples; 2767 nr_events = hists->stats.total_non_filtered_period; 2768 } 2769 2770 if (evsel__is_group_event(evsel)) { 2771 struct evsel *pos; 2772 2773 evsel__group_desc(evsel, buf, buflen); 2774 ev_name = buf; 2775 2776 for_each_group_member(pos, evsel) { 2777 struct hists *pos_hists = evsel__hists(pos); 2778 2779 if (symbol_conf.filter_relative) { 2780 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2781 nr_events += pos_hists->stats.total_non_filtered_period; 2782 } else { 2783 nr_samples += pos_hists->stats.nr_samples; 2784 nr_events += pos_hists->stats.total_period; 2785 } 2786 } 2787 } 2788 2789 if (symbol_conf.show_ref_callgraph && 2790 strstr(ev_name, "call-graph=no")) 2791 enable_ref = true; 2792 2793 if (show_freq) 2794 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq); 2795 2796 nr_samples = convert_unit(nr_samples, &unit); 2797 printed = scnprintf(bf, size, 2798 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2799 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "", 2800 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2801 2802 2803 if (hists->uid_filter_str) 2804 printed += snprintf(bf + printed, size - printed, 2805 ", UID: %s", hists->uid_filter_str); 2806 if (thread) { 2807 if (hists__has(hists, thread)) { 2808 printed += scnprintf(bf + printed, size - printed, 2809 ", Thread: %s(%d)", 2810 (thread__comm_set(thread) ? thread__comm_str(thread) : ""), 2811 thread__tid(thread)); 2812 } else { 2813 printed += scnprintf(bf + printed, size - printed, 2814 ", Thread: %s", 2815 (thread__comm_set(thread) ? thread__comm_str(thread) : "")); 2816 } 2817 } 2818 if (dso) 2819 printed += scnprintf(bf + printed, size - printed, 2820 ", DSO: %s", dso->short_name); 2821 if (socket_id > -1) 2822 printed += scnprintf(bf + printed, size - printed, 2823 ", Processor Socket: %d", socket_id); 2824 2825 return printed; 2826 } 2827 2828 int parse_filter_percentage(const struct option *opt __maybe_unused, 2829 const char *arg, int unset __maybe_unused) 2830 { 2831 if (!strcmp(arg, "relative")) 2832 symbol_conf.filter_relative = true; 2833 else if (!strcmp(arg, "absolute")) 2834 symbol_conf.filter_relative = false; 2835 else { 2836 pr_debug("Invalid percentage: %s\n", arg); 2837 return -1; 2838 } 2839 2840 return 0; 2841 } 2842 2843 int perf_hist_config(const char *var, const char *value) 2844 { 2845 if (!strcmp(var, "hist.percentage")) 2846 return parse_filter_percentage(NULL, value, 0); 2847 2848 return 0; 2849 } 2850 2851 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2852 { 2853 memset(hists, 0, sizeof(*hists)); 2854 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2855 hists->entries_in = &hists->entries_in_array[0]; 2856 hists->entries_collapsed = RB_ROOT_CACHED; 2857 hists->entries = RB_ROOT_CACHED; 2858 mutex_init(&hists->lock); 2859 hists->socket_filter = -1; 2860 hists->hpp_list = hpp_list; 2861 INIT_LIST_HEAD(&hists->hpp_formats); 2862 return 0; 2863 } 2864 2865 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2866 { 2867 struct rb_node *node; 2868 struct hist_entry *he; 2869 2870 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2871 node = rb_first_cached(root); 2872 rb_erase_cached(node, root); 2873 2874 he = rb_entry(node, struct hist_entry, rb_node_in); 2875 hist_entry__delete(he); 2876 } 2877 } 2878 2879 static void hists__delete_all_entries(struct hists *hists) 2880 { 2881 hists__delete_entries(hists); 2882 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2883 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2884 hists__delete_remaining_entries(&hists->entries_collapsed); 2885 } 2886 2887 static void hists_evsel__exit(struct evsel *evsel) 2888 { 2889 struct hists *hists = evsel__hists(evsel); 2890 struct perf_hpp_fmt *fmt, *pos; 2891 struct perf_hpp_list_node *node, *tmp; 2892 2893 hists__delete_all_entries(hists); 2894 2895 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2896 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2897 list_del_init(&fmt->list); 2898 free(fmt); 2899 } 2900 list_del_init(&node->list); 2901 free(node); 2902 } 2903 } 2904 2905 static int hists_evsel__init(struct evsel *evsel) 2906 { 2907 struct hists *hists = evsel__hists(evsel); 2908 2909 __hists__init(hists, &perf_hpp_list); 2910 return 0; 2911 } 2912 2913 /* 2914 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2915 * stored in the rbtree... 2916 */ 2917 2918 int hists__init(void) 2919 { 2920 int err = evsel__object_config(sizeof(struct hists_evsel), 2921 hists_evsel__init, hists_evsel__exit); 2922 if (err) 2923 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2924 2925 return err; 2926 } 2927 2928 void perf_hpp_list__init(struct perf_hpp_list *list) 2929 { 2930 INIT_LIST_HEAD(&list->fields); 2931 INIT_LIST_HEAD(&list->sorts); 2932 } 2933