1 #include "util.h" 2 #include "build-id.h" 3 #include "hist.h" 4 #include "session.h" 5 #include "sort.h" 6 #include "evlist.h" 7 #include "evsel.h" 8 #include "annotate.h" 9 #include "ui/progress.h" 10 #include <math.h> 11 12 static bool hists__filter_entry_by_dso(struct hists *hists, 13 struct hist_entry *he); 14 static bool hists__filter_entry_by_thread(struct hists *hists, 15 struct hist_entry *he); 16 static bool hists__filter_entry_by_symbol(struct hists *hists, 17 struct hist_entry *he); 18 static bool hists__filter_entry_by_socket(struct hists *hists, 19 struct hist_entry *he); 20 21 u16 hists__col_len(struct hists *hists, enum hist_column col) 22 { 23 return hists->col_len[col]; 24 } 25 26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 27 { 28 hists->col_len[col] = len; 29 } 30 31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 32 { 33 if (len > hists__col_len(hists, col)) { 34 hists__set_col_len(hists, col, len); 35 return true; 36 } 37 return false; 38 } 39 40 void hists__reset_col_len(struct hists *hists) 41 { 42 enum hist_column col; 43 44 for (col = 0; col < HISTC_NR_COLS; ++col) 45 hists__set_col_len(hists, col, 0); 46 } 47 48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 49 { 50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 51 52 if (hists__col_len(hists, dso) < unresolved_col_width && 53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 54 !symbol_conf.dso_list) 55 hists__set_col_len(hists, dso, unresolved_col_width); 56 } 57 58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 59 { 60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 61 int symlen; 62 u16 len; 63 64 /* 65 * +4 accounts for '[x] ' priv level info 66 * +2 accounts for 0x prefix on raw addresses 67 * +3 accounts for ' y ' symtab origin info 68 */ 69 if (h->ms.sym) { 70 symlen = h->ms.sym->namelen + 4; 71 if (verbose) 72 symlen += BITS_PER_LONG / 4 + 2 + 3; 73 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 74 } else { 75 symlen = unresolved_col_width + 4 + 2; 76 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 77 hists__set_unres_dso_col_len(hists, HISTC_DSO); 78 } 79 80 len = thread__comm_len(h->thread); 81 if (hists__new_col_len(hists, HISTC_COMM, len)) 82 hists__set_col_len(hists, HISTC_THREAD, len + 8); 83 84 if (h->ms.map) { 85 len = dso__name_len(h->ms.map->dso); 86 hists__new_col_len(hists, HISTC_DSO, len); 87 } 88 89 if (h->parent) 90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 91 92 if (h->branch_info) { 93 if (h->branch_info->from.sym) { 94 symlen = (int)h->branch_info->from.sym->namelen + 4; 95 if (verbose) 96 symlen += BITS_PER_LONG / 4 + 2 + 3; 97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 98 99 symlen = dso__name_len(h->branch_info->from.map->dso); 100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 101 } else { 102 symlen = unresolved_col_width + 4 + 2; 103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 105 } 106 107 if (h->branch_info->to.sym) { 108 symlen = (int)h->branch_info->to.sym->namelen + 4; 109 if (verbose) 110 symlen += BITS_PER_LONG / 4 + 2 + 3; 111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 112 113 symlen = dso__name_len(h->branch_info->to.map->dso); 114 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 115 } else { 116 symlen = unresolved_col_width + 4 + 2; 117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 119 } 120 121 if (h->branch_info->srcline_from) 122 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 123 strlen(h->branch_info->srcline_from)); 124 if (h->branch_info->srcline_to) 125 hists__new_col_len(hists, HISTC_SRCLINE_TO, 126 strlen(h->branch_info->srcline_to)); 127 } 128 129 if (h->mem_info) { 130 if (h->mem_info->daddr.sym) { 131 symlen = (int)h->mem_info->daddr.sym->namelen + 4 132 + unresolved_col_width + 2; 133 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 134 symlen); 135 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 136 symlen + 1); 137 } else { 138 symlen = unresolved_col_width + 4 + 2; 139 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 140 symlen); 141 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 142 symlen); 143 } 144 145 if (h->mem_info->iaddr.sym) { 146 symlen = (int)h->mem_info->iaddr.sym->namelen + 4 147 + unresolved_col_width + 2; 148 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 149 symlen); 150 } else { 151 symlen = unresolved_col_width + 4 + 2; 152 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 153 symlen); 154 } 155 156 if (h->mem_info->daddr.map) { 157 symlen = dso__name_len(h->mem_info->daddr.map->dso); 158 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 159 symlen); 160 } else { 161 symlen = unresolved_col_width + 4 + 2; 162 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 163 } 164 } else { 165 symlen = unresolved_col_width + 4 + 2; 166 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 167 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 168 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 169 } 170 171 hists__new_col_len(hists, HISTC_CPU, 3); 172 hists__new_col_len(hists, HISTC_SOCKET, 6); 173 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 174 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 175 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 176 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 177 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 178 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 179 180 if (h->srcline) 181 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline)); 182 183 if (h->srcfile) 184 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 185 186 if (h->transaction) 187 hists__new_col_len(hists, HISTC_TRANSACTION, 188 hist_entry__transaction_len()); 189 190 if (h->trace_output) 191 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 192 } 193 194 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 195 { 196 struct rb_node *next = rb_first(&hists->entries); 197 struct hist_entry *n; 198 int row = 0; 199 200 hists__reset_col_len(hists); 201 202 while (next && row++ < max_rows) { 203 n = rb_entry(next, struct hist_entry, rb_node); 204 if (!n->filtered) 205 hists__calc_col_len(hists, n); 206 next = rb_next(&n->rb_node); 207 } 208 } 209 210 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 211 unsigned int cpumode, u64 period) 212 { 213 switch (cpumode) { 214 case PERF_RECORD_MISC_KERNEL: 215 he_stat->period_sys += period; 216 break; 217 case PERF_RECORD_MISC_USER: 218 he_stat->period_us += period; 219 break; 220 case PERF_RECORD_MISC_GUEST_KERNEL: 221 he_stat->period_guest_sys += period; 222 break; 223 case PERF_RECORD_MISC_GUEST_USER: 224 he_stat->period_guest_us += period; 225 break; 226 default: 227 break; 228 } 229 } 230 231 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 232 u64 weight) 233 { 234 235 he_stat->period += period; 236 he_stat->weight += weight; 237 he_stat->nr_events += 1; 238 } 239 240 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 241 { 242 dest->period += src->period; 243 dest->period_sys += src->period_sys; 244 dest->period_us += src->period_us; 245 dest->period_guest_sys += src->period_guest_sys; 246 dest->period_guest_us += src->period_guest_us; 247 dest->nr_events += src->nr_events; 248 dest->weight += src->weight; 249 } 250 251 static void he_stat__decay(struct he_stat *he_stat) 252 { 253 he_stat->period = (he_stat->period * 7) / 8; 254 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 255 /* XXX need decay for weight too? */ 256 } 257 258 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 259 260 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 261 { 262 u64 prev_period = he->stat.period; 263 u64 diff; 264 265 if (prev_period == 0) 266 return true; 267 268 he_stat__decay(&he->stat); 269 if (symbol_conf.cumulate_callchain) 270 he_stat__decay(he->stat_acc); 271 decay_callchain(he->callchain); 272 273 diff = prev_period - he->stat.period; 274 275 if (!he->depth) { 276 hists->stats.total_period -= diff; 277 if (!he->filtered) 278 hists->stats.total_non_filtered_period -= diff; 279 } 280 281 if (!he->leaf) { 282 struct hist_entry *child; 283 struct rb_node *node = rb_first(&he->hroot_out); 284 while (node) { 285 child = rb_entry(node, struct hist_entry, rb_node); 286 node = rb_next(node); 287 288 if (hists__decay_entry(hists, child)) 289 hists__delete_entry(hists, child); 290 } 291 } 292 293 return he->stat.period == 0; 294 } 295 296 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 297 { 298 struct rb_root *root_in; 299 struct rb_root *root_out; 300 301 if (he->parent_he) { 302 root_in = &he->parent_he->hroot_in; 303 root_out = &he->parent_he->hroot_out; 304 } else { 305 if (hists__has(hists, need_collapse)) 306 root_in = &hists->entries_collapsed; 307 else 308 root_in = hists->entries_in; 309 root_out = &hists->entries; 310 } 311 312 rb_erase(&he->rb_node_in, root_in); 313 rb_erase(&he->rb_node, root_out); 314 315 --hists->nr_entries; 316 if (!he->filtered) 317 --hists->nr_non_filtered_entries; 318 319 hist_entry__delete(he); 320 } 321 322 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 323 { 324 struct rb_node *next = rb_first(&hists->entries); 325 struct hist_entry *n; 326 327 while (next) { 328 n = rb_entry(next, struct hist_entry, rb_node); 329 next = rb_next(&n->rb_node); 330 if (((zap_user && n->level == '.') || 331 (zap_kernel && n->level != '.') || 332 hists__decay_entry(hists, n))) { 333 hists__delete_entry(hists, n); 334 } 335 } 336 } 337 338 void hists__delete_entries(struct hists *hists) 339 { 340 struct rb_node *next = rb_first(&hists->entries); 341 struct hist_entry *n; 342 343 while (next) { 344 n = rb_entry(next, struct hist_entry, rb_node); 345 next = rb_next(&n->rb_node); 346 347 hists__delete_entry(hists, n); 348 } 349 } 350 351 /* 352 * histogram, sorted on item, collects periods 353 */ 354 355 static int hist_entry__init(struct hist_entry *he, 356 struct hist_entry *template, 357 bool sample_self) 358 { 359 *he = *template; 360 361 if (symbol_conf.cumulate_callchain) { 362 he->stat_acc = malloc(sizeof(he->stat)); 363 if (he->stat_acc == NULL) 364 return -ENOMEM; 365 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 366 if (!sample_self) 367 memset(&he->stat, 0, sizeof(he->stat)); 368 } 369 370 map__get(he->ms.map); 371 372 if (he->branch_info) { 373 /* 374 * This branch info is (a part of) allocated from 375 * sample__resolve_bstack() and will be freed after 376 * adding new entries. So we need to save a copy. 377 */ 378 he->branch_info = malloc(sizeof(*he->branch_info)); 379 if (he->branch_info == NULL) { 380 map__zput(he->ms.map); 381 free(he->stat_acc); 382 return -ENOMEM; 383 } 384 385 memcpy(he->branch_info, template->branch_info, 386 sizeof(*he->branch_info)); 387 388 map__get(he->branch_info->from.map); 389 map__get(he->branch_info->to.map); 390 } 391 392 if (he->mem_info) { 393 map__get(he->mem_info->iaddr.map); 394 map__get(he->mem_info->daddr.map); 395 } 396 397 if (symbol_conf.use_callchain) 398 callchain_init(he->callchain); 399 400 if (he->raw_data) { 401 he->raw_data = memdup(he->raw_data, he->raw_size); 402 403 if (he->raw_data == NULL) { 404 map__put(he->ms.map); 405 if (he->branch_info) { 406 map__put(he->branch_info->from.map); 407 map__put(he->branch_info->to.map); 408 free(he->branch_info); 409 } 410 if (he->mem_info) { 411 map__put(he->mem_info->iaddr.map); 412 map__put(he->mem_info->daddr.map); 413 } 414 free(he->stat_acc); 415 return -ENOMEM; 416 } 417 } 418 INIT_LIST_HEAD(&he->pairs.node); 419 thread__get(he->thread); 420 421 if (!symbol_conf.report_hierarchy) 422 he->leaf = true; 423 424 return 0; 425 } 426 427 static void *hist_entry__zalloc(size_t size) 428 { 429 return zalloc(size + sizeof(struct hist_entry)); 430 } 431 432 static void hist_entry__free(void *ptr) 433 { 434 free(ptr); 435 } 436 437 static struct hist_entry_ops default_ops = { 438 .new = hist_entry__zalloc, 439 .free = hist_entry__free, 440 }; 441 442 static struct hist_entry *hist_entry__new(struct hist_entry *template, 443 bool sample_self) 444 { 445 struct hist_entry_ops *ops = template->ops; 446 size_t callchain_size = 0; 447 struct hist_entry *he; 448 int err = 0; 449 450 if (!ops) 451 ops = template->ops = &default_ops; 452 453 if (symbol_conf.use_callchain) 454 callchain_size = sizeof(struct callchain_root); 455 456 he = ops->new(callchain_size); 457 if (he) { 458 err = hist_entry__init(he, template, sample_self); 459 if (err) { 460 ops->free(he); 461 he = NULL; 462 } 463 } 464 465 return he; 466 } 467 468 static u8 symbol__parent_filter(const struct symbol *parent) 469 { 470 if (symbol_conf.exclude_other && parent == NULL) 471 return 1 << HIST_FILTER__PARENT; 472 return 0; 473 } 474 475 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 476 { 477 if (!symbol_conf.use_callchain) 478 return; 479 480 he->hists->callchain_period += period; 481 if (!he->filtered) 482 he->hists->callchain_non_filtered_period += period; 483 } 484 485 static struct hist_entry *hists__findnew_entry(struct hists *hists, 486 struct hist_entry *entry, 487 struct addr_location *al, 488 bool sample_self) 489 { 490 struct rb_node **p; 491 struct rb_node *parent = NULL; 492 struct hist_entry *he; 493 int64_t cmp; 494 u64 period = entry->stat.period; 495 u64 weight = entry->stat.weight; 496 497 p = &hists->entries_in->rb_node; 498 499 while (*p != NULL) { 500 parent = *p; 501 he = rb_entry(parent, struct hist_entry, rb_node_in); 502 503 /* 504 * Make sure that it receives arguments in a same order as 505 * hist_entry__collapse() so that we can use an appropriate 506 * function when searching an entry regardless which sort 507 * keys were used. 508 */ 509 cmp = hist_entry__cmp(he, entry); 510 511 if (!cmp) { 512 if (sample_self) { 513 he_stat__add_period(&he->stat, period, weight); 514 hist_entry__add_callchain_period(he, period); 515 } 516 if (symbol_conf.cumulate_callchain) 517 he_stat__add_period(he->stat_acc, period, weight); 518 519 /* 520 * This mem info was allocated from sample__resolve_mem 521 * and will not be used anymore. 522 */ 523 zfree(&entry->mem_info); 524 525 /* If the map of an existing hist_entry has 526 * become out-of-date due to an exec() or 527 * similar, update it. Otherwise we will 528 * mis-adjust symbol addresses when computing 529 * the history counter to increment. 530 */ 531 if (he->ms.map != entry->ms.map) { 532 map__put(he->ms.map); 533 he->ms.map = map__get(entry->ms.map); 534 } 535 goto out; 536 } 537 538 if (cmp < 0) 539 p = &(*p)->rb_left; 540 else 541 p = &(*p)->rb_right; 542 } 543 544 he = hist_entry__new(entry, sample_self); 545 if (!he) 546 return NULL; 547 548 if (sample_self) 549 hist_entry__add_callchain_period(he, period); 550 hists->nr_entries++; 551 552 rb_link_node(&he->rb_node_in, parent, p); 553 rb_insert_color(&he->rb_node_in, hists->entries_in); 554 out: 555 if (sample_self) 556 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 557 if (symbol_conf.cumulate_callchain) 558 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 559 return he; 560 } 561 562 static struct hist_entry* 563 __hists__add_entry(struct hists *hists, 564 struct addr_location *al, 565 struct symbol *sym_parent, 566 struct branch_info *bi, 567 struct mem_info *mi, 568 struct perf_sample *sample, 569 bool sample_self, 570 struct hist_entry_ops *ops) 571 { 572 struct hist_entry entry = { 573 .thread = al->thread, 574 .comm = thread__comm(al->thread), 575 .ms = { 576 .map = al->map, 577 .sym = al->sym, 578 }, 579 .socket = al->socket, 580 .cpu = al->cpu, 581 .cpumode = al->cpumode, 582 .ip = al->addr, 583 .level = al->level, 584 .stat = { 585 .nr_events = 1, 586 .period = sample->period, 587 .weight = sample->weight, 588 }, 589 .parent = sym_parent, 590 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 591 .hists = hists, 592 .branch_info = bi, 593 .mem_info = mi, 594 .transaction = sample->transaction, 595 .raw_data = sample->raw_data, 596 .raw_size = sample->raw_size, 597 .ops = ops, 598 }; 599 600 return hists__findnew_entry(hists, &entry, al, sample_self); 601 } 602 603 struct hist_entry *hists__add_entry(struct hists *hists, 604 struct addr_location *al, 605 struct symbol *sym_parent, 606 struct branch_info *bi, 607 struct mem_info *mi, 608 struct perf_sample *sample, 609 bool sample_self) 610 { 611 return __hists__add_entry(hists, al, sym_parent, bi, mi, 612 sample, sample_self, NULL); 613 } 614 615 struct hist_entry *hists__add_entry_ops(struct hists *hists, 616 struct hist_entry_ops *ops, 617 struct addr_location *al, 618 struct symbol *sym_parent, 619 struct branch_info *bi, 620 struct mem_info *mi, 621 struct perf_sample *sample, 622 bool sample_self) 623 { 624 return __hists__add_entry(hists, al, sym_parent, bi, mi, 625 sample, sample_self, ops); 626 } 627 628 static int 629 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 630 struct addr_location *al __maybe_unused) 631 { 632 return 0; 633 } 634 635 static int 636 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 637 struct addr_location *al __maybe_unused) 638 { 639 return 0; 640 } 641 642 static int 643 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 644 { 645 struct perf_sample *sample = iter->sample; 646 struct mem_info *mi; 647 648 mi = sample__resolve_mem(sample, al); 649 if (mi == NULL) 650 return -ENOMEM; 651 652 iter->priv = mi; 653 return 0; 654 } 655 656 static int 657 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 658 { 659 u64 cost; 660 struct mem_info *mi = iter->priv; 661 struct hists *hists = evsel__hists(iter->evsel); 662 struct perf_sample *sample = iter->sample; 663 struct hist_entry *he; 664 665 if (mi == NULL) 666 return -EINVAL; 667 668 cost = sample->weight; 669 if (!cost) 670 cost = 1; 671 672 /* 673 * must pass period=weight in order to get the correct 674 * sorting from hists__collapse_resort() which is solely 675 * based on periods. We want sorting be done on nr_events * weight 676 * and this is indirectly achieved by passing period=weight here 677 * and the he_stat__add_period() function. 678 */ 679 sample->period = cost; 680 681 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 682 sample, true); 683 if (!he) 684 return -ENOMEM; 685 686 iter->he = he; 687 return 0; 688 } 689 690 static int 691 iter_finish_mem_entry(struct hist_entry_iter *iter, 692 struct addr_location *al __maybe_unused) 693 { 694 struct perf_evsel *evsel = iter->evsel; 695 struct hists *hists = evsel__hists(evsel); 696 struct hist_entry *he = iter->he; 697 int err = -EINVAL; 698 699 if (he == NULL) 700 goto out; 701 702 hists__inc_nr_samples(hists, he->filtered); 703 704 err = hist_entry__append_callchain(he, iter->sample); 705 706 out: 707 /* 708 * We don't need to free iter->priv (mem_info) here since the mem info 709 * was either already freed in hists__findnew_entry() or passed to a 710 * new hist entry by hist_entry__new(). 711 */ 712 iter->priv = NULL; 713 714 iter->he = NULL; 715 return err; 716 } 717 718 static int 719 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 720 { 721 struct branch_info *bi; 722 struct perf_sample *sample = iter->sample; 723 724 bi = sample__resolve_bstack(sample, al); 725 if (!bi) 726 return -ENOMEM; 727 728 iter->curr = 0; 729 iter->total = sample->branch_stack->nr; 730 731 iter->priv = bi; 732 return 0; 733 } 734 735 static int 736 iter_add_single_branch_entry(struct hist_entry_iter *iter, 737 struct addr_location *al __maybe_unused) 738 { 739 /* to avoid calling callback function */ 740 iter->he = NULL; 741 742 return 0; 743 } 744 745 static int 746 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 747 { 748 struct branch_info *bi = iter->priv; 749 int i = iter->curr; 750 751 if (bi == NULL) 752 return 0; 753 754 if (iter->curr >= iter->total) 755 return 0; 756 757 al->map = bi[i].to.map; 758 al->sym = bi[i].to.sym; 759 al->addr = bi[i].to.addr; 760 return 1; 761 } 762 763 static int 764 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 765 { 766 struct branch_info *bi; 767 struct perf_evsel *evsel = iter->evsel; 768 struct hists *hists = evsel__hists(evsel); 769 struct perf_sample *sample = iter->sample; 770 struct hist_entry *he = NULL; 771 int i = iter->curr; 772 int err = 0; 773 774 bi = iter->priv; 775 776 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 777 goto out; 778 779 /* 780 * The report shows the percentage of total branches captured 781 * and not events sampled. Thus we use a pseudo period of 1. 782 */ 783 sample->period = 1; 784 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 785 786 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 787 sample, true); 788 if (he == NULL) 789 return -ENOMEM; 790 791 hists__inc_nr_samples(hists, he->filtered); 792 793 out: 794 iter->he = he; 795 iter->curr++; 796 return err; 797 } 798 799 static int 800 iter_finish_branch_entry(struct hist_entry_iter *iter, 801 struct addr_location *al __maybe_unused) 802 { 803 zfree(&iter->priv); 804 iter->he = NULL; 805 806 return iter->curr >= iter->total ? 0 : -1; 807 } 808 809 static int 810 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 811 struct addr_location *al __maybe_unused) 812 { 813 return 0; 814 } 815 816 static int 817 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 818 { 819 struct perf_evsel *evsel = iter->evsel; 820 struct perf_sample *sample = iter->sample; 821 struct hist_entry *he; 822 823 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 824 sample, true); 825 if (he == NULL) 826 return -ENOMEM; 827 828 iter->he = he; 829 return 0; 830 } 831 832 static int 833 iter_finish_normal_entry(struct hist_entry_iter *iter, 834 struct addr_location *al __maybe_unused) 835 { 836 struct hist_entry *he = iter->he; 837 struct perf_evsel *evsel = iter->evsel; 838 struct perf_sample *sample = iter->sample; 839 840 if (he == NULL) 841 return 0; 842 843 iter->he = NULL; 844 845 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 846 847 return hist_entry__append_callchain(he, sample); 848 } 849 850 static int 851 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 852 struct addr_location *al __maybe_unused) 853 { 854 struct hist_entry **he_cache; 855 856 callchain_cursor_commit(&callchain_cursor); 857 858 /* 859 * This is for detecting cycles or recursions so that they're 860 * cumulated only one time to prevent entries more than 100% 861 * overhead. 862 */ 863 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); 864 if (he_cache == NULL) 865 return -ENOMEM; 866 867 iter->priv = he_cache; 868 iter->curr = 0; 869 870 return 0; 871 } 872 873 static int 874 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 875 struct addr_location *al) 876 { 877 struct perf_evsel *evsel = iter->evsel; 878 struct hists *hists = evsel__hists(evsel); 879 struct perf_sample *sample = iter->sample; 880 struct hist_entry **he_cache = iter->priv; 881 struct hist_entry *he; 882 int err = 0; 883 884 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 885 sample, true); 886 if (he == NULL) 887 return -ENOMEM; 888 889 iter->he = he; 890 he_cache[iter->curr++] = he; 891 892 hist_entry__append_callchain(he, sample); 893 894 /* 895 * We need to re-initialize the cursor since callchain_append() 896 * advanced the cursor to the end. 897 */ 898 callchain_cursor_commit(&callchain_cursor); 899 900 hists__inc_nr_samples(hists, he->filtered); 901 902 return err; 903 } 904 905 static int 906 iter_next_cumulative_entry(struct hist_entry_iter *iter, 907 struct addr_location *al) 908 { 909 struct callchain_cursor_node *node; 910 911 node = callchain_cursor_current(&callchain_cursor); 912 if (node == NULL) 913 return 0; 914 915 return fill_callchain_info(al, node, iter->hide_unresolved); 916 } 917 918 static int 919 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 920 struct addr_location *al) 921 { 922 struct perf_evsel *evsel = iter->evsel; 923 struct perf_sample *sample = iter->sample; 924 struct hist_entry **he_cache = iter->priv; 925 struct hist_entry *he; 926 struct hist_entry he_tmp = { 927 .hists = evsel__hists(evsel), 928 .cpu = al->cpu, 929 .thread = al->thread, 930 .comm = thread__comm(al->thread), 931 .ip = al->addr, 932 .ms = { 933 .map = al->map, 934 .sym = al->sym, 935 }, 936 .parent = iter->parent, 937 .raw_data = sample->raw_data, 938 .raw_size = sample->raw_size, 939 }; 940 int i; 941 struct callchain_cursor cursor; 942 943 callchain_cursor_snapshot(&cursor, &callchain_cursor); 944 945 callchain_cursor_advance(&callchain_cursor); 946 947 /* 948 * Check if there's duplicate entries in the callchain. 949 * It's possible that it has cycles or recursive calls. 950 */ 951 for (i = 0; i < iter->curr; i++) { 952 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 953 /* to avoid calling callback function */ 954 iter->he = NULL; 955 return 0; 956 } 957 } 958 959 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 960 sample, false); 961 if (he == NULL) 962 return -ENOMEM; 963 964 iter->he = he; 965 he_cache[iter->curr++] = he; 966 967 if (symbol_conf.use_callchain) 968 callchain_append(he->callchain, &cursor, sample->period); 969 return 0; 970 } 971 972 static int 973 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 974 struct addr_location *al __maybe_unused) 975 { 976 zfree(&iter->priv); 977 iter->he = NULL; 978 979 return 0; 980 } 981 982 const struct hist_iter_ops hist_iter_mem = { 983 .prepare_entry = iter_prepare_mem_entry, 984 .add_single_entry = iter_add_single_mem_entry, 985 .next_entry = iter_next_nop_entry, 986 .add_next_entry = iter_add_next_nop_entry, 987 .finish_entry = iter_finish_mem_entry, 988 }; 989 990 const struct hist_iter_ops hist_iter_branch = { 991 .prepare_entry = iter_prepare_branch_entry, 992 .add_single_entry = iter_add_single_branch_entry, 993 .next_entry = iter_next_branch_entry, 994 .add_next_entry = iter_add_next_branch_entry, 995 .finish_entry = iter_finish_branch_entry, 996 }; 997 998 const struct hist_iter_ops hist_iter_normal = { 999 .prepare_entry = iter_prepare_normal_entry, 1000 .add_single_entry = iter_add_single_normal_entry, 1001 .next_entry = iter_next_nop_entry, 1002 .add_next_entry = iter_add_next_nop_entry, 1003 .finish_entry = iter_finish_normal_entry, 1004 }; 1005 1006 const struct hist_iter_ops hist_iter_cumulative = { 1007 .prepare_entry = iter_prepare_cumulative_entry, 1008 .add_single_entry = iter_add_single_cumulative_entry, 1009 .next_entry = iter_next_cumulative_entry, 1010 .add_next_entry = iter_add_next_cumulative_entry, 1011 .finish_entry = iter_finish_cumulative_entry, 1012 }; 1013 1014 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1015 int max_stack_depth, void *arg) 1016 { 1017 int err, err2; 1018 1019 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1020 iter->evsel, al, max_stack_depth); 1021 if (err) 1022 return err; 1023 1024 iter->max_stack = max_stack_depth; 1025 1026 err = iter->ops->prepare_entry(iter, al); 1027 if (err) 1028 goto out; 1029 1030 err = iter->ops->add_single_entry(iter, al); 1031 if (err) 1032 goto out; 1033 1034 if (iter->he && iter->add_entry_cb) { 1035 err = iter->add_entry_cb(iter, al, true, arg); 1036 if (err) 1037 goto out; 1038 } 1039 1040 while (iter->ops->next_entry(iter, al)) { 1041 err = iter->ops->add_next_entry(iter, al); 1042 if (err) 1043 break; 1044 1045 if (iter->he && iter->add_entry_cb) { 1046 err = iter->add_entry_cb(iter, al, false, arg); 1047 if (err) 1048 goto out; 1049 } 1050 } 1051 1052 out: 1053 err2 = iter->ops->finish_entry(iter, al); 1054 if (!err) 1055 err = err2; 1056 1057 return err; 1058 } 1059 1060 int64_t 1061 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1062 { 1063 struct hists *hists = left->hists; 1064 struct perf_hpp_fmt *fmt; 1065 int64_t cmp = 0; 1066 1067 hists__for_each_sort_list(hists, fmt) { 1068 if (perf_hpp__is_dynamic_entry(fmt) && 1069 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1070 continue; 1071 1072 cmp = fmt->cmp(fmt, left, right); 1073 if (cmp) 1074 break; 1075 } 1076 1077 return cmp; 1078 } 1079 1080 int64_t 1081 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1082 { 1083 struct hists *hists = left->hists; 1084 struct perf_hpp_fmt *fmt; 1085 int64_t cmp = 0; 1086 1087 hists__for_each_sort_list(hists, fmt) { 1088 if (perf_hpp__is_dynamic_entry(fmt) && 1089 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1090 continue; 1091 1092 cmp = fmt->collapse(fmt, left, right); 1093 if (cmp) 1094 break; 1095 } 1096 1097 return cmp; 1098 } 1099 1100 void hist_entry__delete(struct hist_entry *he) 1101 { 1102 struct hist_entry_ops *ops = he->ops; 1103 1104 thread__zput(he->thread); 1105 map__zput(he->ms.map); 1106 1107 if (he->branch_info) { 1108 map__zput(he->branch_info->from.map); 1109 map__zput(he->branch_info->to.map); 1110 free_srcline(he->branch_info->srcline_from); 1111 free_srcline(he->branch_info->srcline_to); 1112 zfree(&he->branch_info); 1113 } 1114 1115 if (he->mem_info) { 1116 map__zput(he->mem_info->iaddr.map); 1117 map__zput(he->mem_info->daddr.map); 1118 zfree(&he->mem_info); 1119 } 1120 1121 zfree(&he->stat_acc); 1122 free_srcline(he->srcline); 1123 if (he->srcfile && he->srcfile[0]) 1124 free(he->srcfile); 1125 free_callchain(he->callchain); 1126 free(he->trace_output); 1127 free(he->raw_data); 1128 ops->free(he); 1129 } 1130 1131 /* 1132 * If this is not the last column, then we need to pad it according to the 1133 * pre-calculated max lenght for this column, otherwise don't bother adding 1134 * spaces because that would break viewing this with, for instance, 'less', 1135 * that would show tons of trailing spaces when a long C++ demangled method 1136 * names is sampled. 1137 */ 1138 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1139 struct perf_hpp_fmt *fmt, int printed) 1140 { 1141 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1142 const int width = fmt->width(fmt, hpp, he->hists); 1143 if (printed < width) { 1144 advance_hpp(hpp, printed); 1145 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1146 } 1147 } 1148 1149 return printed; 1150 } 1151 1152 /* 1153 * collapse the histogram 1154 */ 1155 1156 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1157 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1158 enum hist_filter type); 1159 1160 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1161 1162 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1163 { 1164 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1165 } 1166 1167 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1168 enum hist_filter type, 1169 fmt_chk_fn check) 1170 { 1171 struct perf_hpp_fmt *fmt; 1172 bool type_match = false; 1173 struct hist_entry *parent = he->parent_he; 1174 1175 switch (type) { 1176 case HIST_FILTER__THREAD: 1177 if (symbol_conf.comm_list == NULL && 1178 symbol_conf.pid_list == NULL && 1179 symbol_conf.tid_list == NULL) 1180 return; 1181 break; 1182 case HIST_FILTER__DSO: 1183 if (symbol_conf.dso_list == NULL) 1184 return; 1185 break; 1186 case HIST_FILTER__SYMBOL: 1187 if (symbol_conf.sym_list == NULL) 1188 return; 1189 break; 1190 case HIST_FILTER__PARENT: 1191 case HIST_FILTER__GUEST: 1192 case HIST_FILTER__HOST: 1193 case HIST_FILTER__SOCKET: 1194 default: 1195 return; 1196 } 1197 1198 /* if it's filtered by own fmt, it has to have filter bits */ 1199 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1200 if (check(fmt)) { 1201 type_match = true; 1202 break; 1203 } 1204 } 1205 1206 if (type_match) { 1207 /* 1208 * If the filter is for current level entry, propagate 1209 * filter marker to parents. The marker bit was 1210 * already set by default so it only needs to clear 1211 * non-filtered entries. 1212 */ 1213 if (!(he->filtered & (1 << type))) { 1214 while (parent) { 1215 parent->filtered &= ~(1 << type); 1216 parent = parent->parent_he; 1217 } 1218 } 1219 } else { 1220 /* 1221 * If current entry doesn't have matching formats, set 1222 * filter marker for upper level entries. it will be 1223 * cleared if its lower level entries is not filtered. 1224 * 1225 * For lower-level entries, it inherits parent's 1226 * filter bit so that lower level entries of a 1227 * non-filtered entry won't set the filter marker. 1228 */ 1229 if (parent == NULL) 1230 he->filtered |= (1 << type); 1231 else 1232 he->filtered |= (parent->filtered & (1 << type)); 1233 } 1234 } 1235 1236 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1237 { 1238 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1239 check_thread_entry); 1240 1241 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1242 perf_hpp__is_dso_entry); 1243 1244 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1245 perf_hpp__is_sym_entry); 1246 1247 hists__apply_filters(he->hists, he); 1248 } 1249 1250 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1251 struct rb_root *root, 1252 struct hist_entry *he, 1253 struct hist_entry *parent_he, 1254 struct perf_hpp_list *hpp_list) 1255 { 1256 struct rb_node **p = &root->rb_node; 1257 struct rb_node *parent = NULL; 1258 struct hist_entry *iter, *new; 1259 struct perf_hpp_fmt *fmt; 1260 int64_t cmp; 1261 1262 while (*p != NULL) { 1263 parent = *p; 1264 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1265 1266 cmp = 0; 1267 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1268 cmp = fmt->collapse(fmt, iter, he); 1269 if (cmp) 1270 break; 1271 } 1272 1273 if (!cmp) { 1274 he_stat__add_stat(&iter->stat, &he->stat); 1275 return iter; 1276 } 1277 1278 if (cmp < 0) 1279 p = &parent->rb_left; 1280 else 1281 p = &parent->rb_right; 1282 } 1283 1284 new = hist_entry__new(he, true); 1285 if (new == NULL) 1286 return NULL; 1287 1288 hists->nr_entries++; 1289 1290 /* save related format list for output */ 1291 new->hpp_list = hpp_list; 1292 new->parent_he = parent_he; 1293 1294 hist_entry__apply_hierarchy_filters(new); 1295 1296 /* some fields are now passed to 'new' */ 1297 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1298 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1299 he->trace_output = NULL; 1300 else 1301 new->trace_output = NULL; 1302 1303 if (perf_hpp__is_srcline_entry(fmt)) 1304 he->srcline = NULL; 1305 else 1306 new->srcline = NULL; 1307 1308 if (perf_hpp__is_srcfile_entry(fmt)) 1309 he->srcfile = NULL; 1310 else 1311 new->srcfile = NULL; 1312 } 1313 1314 rb_link_node(&new->rb_node_in, parent, p); 1315 rb_insert_color(&new->rb_node_in, root); 1316 return new; 1317 } 1318 1319 static int hists__hierarchy_insert_entry(struct hists *hists, 1320 struct rb_root *root, 1321 struct hist_entry *he) 1322 { 1323 struct perf_hpp_list_node *node; 1324 struct hist_entry *new_he = NULL; 1325 struct hist_entry *parent = NULL; 1326 int depth = 0; 1327 int ret = 0; 1328 1329 list_for_each_entry(node, &hists->hpp_formats, list) { 1330 /* skip period (overhead) and elided columns */ 1331 if (node->level == 0 || node->skip) 1332 continue; 1333 1334 /* insert copy of 'he' for each fmt into the hierarchy */ 1335 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1336 if (new_he == NULL) { 1337 ret = -1; 1338 break; 1339 } 1340 1341 root = &new_he->hroot_in; 1342 new_he->depth = depth++; 1343 parent = new_he; 1344 } 1345 1346 if (new_he) { 1347 new_he->leaf = true; 1348 1349 if (symbol_conf.use_callchain) { 1350 callchain_cursor_reset(&callchain_cursor); 1351 if (callchain_merge(&callchain_cursor, 1352 new_he->callchain, 1353 he->callchain) < 0) 1354 ret = -1; 1355 } 1356 } 1357 1358 /* 'he' is no longer used */ 1359 hist_entry__delete(he); 1360 1361 /* return 0 (or -1) since it already applied filters */ 1362 return ret; 1363 } 1364 1365 static int hists__collapse_insert_entry(struct hists *hists, 1366 struct rb_root *root, 1367 struct hist_entry *he) 1368 { 1369 struct rb_node **p = &root->rb_node; 1370 struct rb_node *parent = NULL; 1371 struct hist_entry *iter; 1372 int64_t cmp; 1373 1374 if (symbol_conf.report_hierarchy) 1375 return hists__hierarchy_insert_entry(hists, root, he); 1376 1377 while (*p != NULL) { 1378 parent = *p; 1379 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1380 1381 cmp = hist_entry__collapse(iter, he); 1382 1383 if (!cmp) { 1384 int ret = 0; 1385 1386 he_stat__add_stat(&iter->stat, &he->stat); 1387 if (symbol_conf.cumulate_callchain) 1388 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1389 1390 if (symbol_conf.use_callchain) { 1391 callchain_cursor_reset(&callchain_cursor); 1392 if (callchain_merge(&callchain_cursor, 1393 iter->callchain, 1394 he->callchain) < 0) 1395 ret = -1; 1396 } 1397 hist_entry__delete(he); 1398 return ret; 1399 } 1400 1401 if (cmp < 0) 1402 p = &(*p)->rb_left; 1403 else 1404 p = &(*p)->rb_right; 1405 } 1406 hists->nr_entries++; 1407 1408 rb_link_node(&he->rb_node_in, parent, p); 1409 rb_insert_color(&he->rb_node_in, root); 1410 return 1; 1411 } 1412 1413 struct rb_root *hists__get_rotate_entries_in(struct hists *hists) 1414 { 1415 struct rb_root *root; 1416 1417 pthread_mutex_lock(&hists->lock); 1418 1419 root = hists->entries_in; 1420 if (++hists->entries_in > &hists->entries_in_array[1]) 1421 hists->entries_in = &hists->entries_in_array[0]; 1422 1423 pthread_mutex_unlock(&hists->lock); 1424 1425 return root; 1426 } 1427 1428 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1429 { 1430 hists__filter_entry_by_dso(hists, he); 1431 hists__filter_entry_by_thread(hists, he); 1432 hists__filter_entry_by_symbol(hists, he); 1433 hists__filter_entry_by_socket(hists, he); 1434 } 1435 1436 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1437 { 1438 struct rb_root *root; 1439 struct rb_node *next; 1440 struct hist_entry *n; 1441 int ret; 1442 1443 if (!hists__has(hists, need_collapse)) 1444 return 0; 1445 1446 hists->nr_entries = 0; 1447 1448 root = hists__get_rotate_entries_in(hists); 1449 1450 next = rb_first(root); 1451 1452 while (next) { 1453 if (session_done()) 1454 break; 1455 n = rb_entry(next, struct hist_entry, rb_node_in); 1456 next = rb_next(&n->rb_node_in); 1457 1458 rb_erase(&n->rb_node_in, root); 1459 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1460 if (ret < 0) 1461 return -1; 1462 1463 if (ret) { 1464 /* 1465 * If it wasn't combined with one of the entries already 1466 * collapsed, we need to apply the filters that may have 1467 * been set by, say, the hist_browser. 1468 */ 1469 hists__apply_filters(hists, n); 1470 } 1471 if (prog) 1472 ui_progress__update(prog, 1); 1473 } 1474 return 0; 1475 } 1476 1477 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1478 { 1479 struct hists *hists = a->hists; 1480 struct perf_hpp_fmt *fmt; 1481 int64_t cmp = 0; 1482 1483 hists__for_each_sort_list(hists, fmt) { 1484 if (perf_hpp__should_skip(fmt, a->hists)) 1485 continue; 1486 1487 cmp = fmt->sort(fmt, a, b); 1488 if (cmp) 1489 break; 1490 } 1491 1492 return cmp; 1493 } 1494 1495 static void hists__reset_filter_stats(struct hists *hists) 1496 { 1497 hists->nr_non_filtered_entries = 0; 1498 hists->stats.total_non_filtered_period = 0; 1499 } 1500 1501 void hists__reset_stats(struct hists *hists) 1502 { 1503 hists->nr_entries = 0; 1504 hists->stats.total_period = 0; 1505 1506 hists__reset_filter_stats(hists); 1507 } 1508 1509 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1510 { 1511 hists->nr_non_filtered_entries++; 1512 hists->stats.total_non_filtered_period += h->stat.period; 1513 } 1514 1515 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1516 { 1517 if (!h->filtered) 1518 hists__inc_filter_stats(hists, h); 1519 1520 hists->nr_entries++; 1521 hists->stats.total_period += h->stat.period; 1522 } 1523 1524 static void hierarchy_recalc_total_periods(struct hists *hists) 1525 { 1526 struct rb_node *node; 1527 struct hist_entry *he; 1528 1529 node = rb_first(&hists->entries); 1530 1531 hists->stats.total_period = 0; 1532 hists->stats.total_non_filtered_period = 0; 1533 1534 /* 1535 * recalculate total period using top-level entries only 1536 * since lower level entries only see non-filtered entries 1537 * but upper level entries have sum of both entries. 1538 */ 1539 while (node) { 1540 he = rb_entry(node, struct hist_entry, rb_node); 1541 node = rb_next(node); 1542 1543 hists->stats.total_period += he->stat.period; 1544 if (!he->filtered) 1545 hists->stats.total_non_filtered_period += he->stat.period; 1546 } 1547 } 1548 1549 static void hierarchy_insert_output_entry(struct rb_root *root, 1550 struct hist_entry *he) 1551 { 1552 struct rb_node **p = &root->rb_node; 1553 struct rb_node *parent = NULL; 1554 struct hist_entry *iter; 1555 struct perf_hpp_fmt *fmt; 1556 1557 while (*p != NULL) { 1558 parent = *p; 1559 iter = rb_entry(parent, struct hist_entry, rb_node); 1560 1561 if (hist_entry__sort(he, iter) > 0) 1562 p = &parent->rb_left; 1563 else 1564 p = &parent->rb_right; 1565 } 1566 1567 rb_link_node(&he->rb_node, parent, p); 1568 rb_insert_color(&he->rb_node, root); 1569 1570 /* update column width of dynamic entry */ 1571 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1572 if (perf_hpp__is_dynamic_entry(fmt)) 1573 fmt->sort(fmt, he, NULL); 1574 } 1575 } 1576 1577 static void hists__hierarchy_output_resort(struct hists *hists, 1578 struct ui_progress *prog, 1579 struct rb_root *root_in, 1580 struct rb_root *root_out, 1581 u64 min_callchain_hits, 1582 bool use_callchain) 1583 { 1584 struct rb_node *node; 1585 struct hist_entry *he; 1586 1587 *root_out = RB_ROOT; 1588 node = rb_first(root_in); 1589 1590 while (node) { 1591 he = rb_entry(node, struct hist_entry, rb_node_in); 1592 node = rb_next(node); 1593 1594 hierarchy_insert_output_entry(root_out, he); 1595 1596 if (prog) 1597 ui_progress__update(prog, 1); 1598 1599 if (!he->leaf) { 1600 hists__hierarchy_output_resort(hists, prog, 1601 &he->hroot_in, 1602 &he->hroot_out, 1603 min_callchain_hits, 1604 use_callchain); 1605 hists->nr_entries++; 1606 if (!he->filtered) { 1607 hists->nr_non_filtered_entries++; 1608 hists__calc_col_len(hists, he); 1609 } 1610 1611 continue; 1612 } 1613 1614 if (!use_callchain) 1615 continue; 1616 1617 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1618 u64 total = he->stat.period; 1619 1620 if (symbol_conf.cumulate_callchain) 1621 total = he->stat_acc->period; 1622 1623 min_callchain_hits = total * (callchain_param.min_percent / 100); 1624 } 1625 1626 callchain_param.sort(&he->sorted_chain, he->callchain, 1627 min_callchain_hits, &callchain_param); 1628 } 1629 } 1630 1631 static void __hists__insert_output_entry(struct rb_root *entries, 1632 struct hist_entry *he, 1633 u64 min_callchain_hits, 1634 bool use_callchain) 1635 { 1636 struct rb_node **p = &entries->rb_node; 1637 struct rb_node *parent = NULL; 1638 struct hist_entry *iter; 1639 struct perf_hpp_fmt *fmt; 1640 1641 if (use_callchain) { 1642 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1643 u64 total = he->stat.period; 1644 1645 if (symbol_conf.cumulate_callchain) 1646 total = he->stat_acc->period; 1647 1648 min_callchain_hits = total * (callchain_param.min_percent / 100); 1649 } 1650 callchain_param.sort(&he->sorted_chain, he->callchain, 1651 min_callchain_hits, &callchain_param); 1652 } 1653 1654 while (*p != NULL) { 1655 parent = *p; 1656 iter = rb_entry(parent, struct hist_entry, rb_node); 1657 1658 if (hist_entry__sort(he, iter) > 0) 1659 p = &(*p)->rb_left; 1660 else 1661 p = &(*p)->rb_right; 1662 } 1663 1664 rb_link_node(&he->rb_node, parent, p); 1665 rb_insert_color(&he->rb_node, entries); 1666 1667 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1668 if (perf_hpp__is_dynamic_entry(fmt) && 1669 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1670 fmt->sort(fmt, he, NULL); /* update column width */ 1671 } 1672 } 1673 1674 static void output_resort(struct hists *hists, struct ui_progress *prog, 1675 bool use_callchain) 1676 { 1677 struct rb_root *root; 1678 struct rb_node *next; 1679 struct hist_entry *n; 1680 u64 callchain_total; 1681 u64 min_callchain_hits; 1682 1683 callchain_total = hists->callchain_period; 1684 if (symbol_conf.filter_relative) 1685 callchain_total = hists->callchain_non_filtered_period; 1686 1687 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1688 1689 hists__reset_stats(hists); 1690 hists__reset_col_len(hists); 1691 1692 if (symbol_conf.report_hierarchy) { 1693 hists__hierarchy_output_resort(hists, prog, 1694 &hists->entries_collapsed, 1695 &hists->entries, 1696 min_callchain_hits, 1697 use_callchain); 1698 hierarchy_recalc_total_periods(hists); 1699 return; 1700 } 1701 1702 if (hists__has(hists, need_collapse)) 1703 root = &hists->entries_collapsed; 1704 else 1705 root = hists->entries_in; 1706 1707 next = rb_first(root); 1708 hists->entries = RB_ROOT; 1709 1710 while (next) { 1711 n = rb_entry(next, struct hist_entry, rb_node_in); 1712 next = rb_next(&n->rb_node_in); 1713 1714 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1715 hists__inc_stats(hists, n); 1716 1717 if (!n->filtered) 1718 hists__calc_col_len(hists, n); 1719 1720 if (prog) 1721 ui_progress__update(prog, 1); 1722 } 1723 } 1724 1725 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog) 1726 { 1727 bool use_callchain; 1728 1729 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1730 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN; 1731 else 1732 use_callchain = symbol_conf.use_callchain; 1733 1734 output_resort(evsel__hists(evsel), prog, use_callchain); 1735 } 1736 1737 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1738 { 1739 output_resort(hists, prog, symbol_conf.use_callchain); 1740 } 1741 1742 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1743 { 1744 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1745 return false; 1746 1747 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1748 return true; 1749 1750 return false; 1751 } 1752 1753 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1754 { 1755 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1756 1757 while (can_goto_child(he, HMD_NORMAL)) { 1758 node = rb_last(&he->hroot_out); 1759 he = rb_entry(node, struct hist_entry, rb_node); 1760 } 1761 return node; 1762 } 1763 1764 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1765 { 1766 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1767 1768 if (can_goto_child(he, hmd)) 1769 node = rb_first(&he->hroot_out); 1770 else 1771 node = rb_next(node); 1772 1773 while (node == NULL) { 1774 he = he->parent_he; 1775 if (he == NULL) 1776 break; 1777 1778 node = rb_next(&he->rb_node); 1779 } 1780 return node; 1781 } 1782 1783 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 1784 { 1785 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1786 1787 node = rb_prev(node); 1788 if (node) 1789 return rb_hierarchy_last(node); 1790 1791 he = he->parent_he; 1792 if (he == NULL) 1793 return NULL; 1794 1795 return &he->rb_node; 1796 } 1797 1798 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 1799 { 1800 struct rb_node *node; 1801 struct hist_entry *child; 1802 float percent; 1803 1804 if (he->leaf) 1805 return false; 1806 1807 node = rb_first(&he->hroot_out); 1808 child = rb_entry(node, struct hist_entry, rb_node); 1809 1810 while (node && child->filtered) { 1811 node = rb_next(node); 1812 child = rb_entry(node, struct hist_entry, rb_node); 1813 } 1814 1815 if (node) 1816 percent = hist_entry__get_percent_limit(child); 1817 else 1818 percent = 0; 1819 1820 return node && percent >= limit; 1821 } 1822 1823 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1824 enum hist_filter filter) 1825 { 1826 h->filtered &= ~(1 << filter); 1827 1828 if (symbol_conf.report_hierarchy) { 1829 struct hist_entry *parent = h->parent_he; 1830 1831 while (parent) { 1832 he_stat__add_stat(&parent->stat, &h->stat); 1833 1834 parent->filtered &= ~(1 << filter); 1835 1836 if (parent->filtered) 1837 goto next; 1838 1839 /* force fold unfiltered entry for simplicity */ 1840 parent->unfolded = false; 1841 parent->has_no_entry = false; 1842 parent->row_offset = 0; 1843 parent->nr_rows = 0; 1844 next: 1845 parent = parent->parent_he; 1846 } 1847 } 1848 1849 if (h->filtered) 1850 return; 1851 1852 /* force fold unfiltered entry for simplicity */ 1853 h->unfolded = false; 1854 h->has_no_entry = false; 1855 h->row_offset = 0; 1856 h->nr_rows = 0; 1857 1858 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 1859 1860 hists__inc_filter_stats(hists, h); 1861 hists__calc_col_len(hists, h); 1862 } 1863 1864 1865 static bool hists__filter_entry_by_dso(struct hists *hists, 1866 struct hist_entry *he) 1867 { 1868 if (hists->dso_filter != NULL && 1869 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 1870 he->filtered |= (1 << HIST_FILTER__DSO); 1871 return true; 1872 } 1873 1874 return false; 1875 } 1876 1877 static bool hists__filter_entry_by_thread(struct hists *hists, 1878 struct hist_entry *he) 1879 { 1880 if (hists->thread_filter != NULL && 1881 he->thread != hists->thread_filter) { 1882 he->filtered |= (1 << HIST_FILTER__THREAD); 1883 return true; 1884 } 1885 1886 return false; 1887 } 1888 1889 static bool hists__filter_entry_by_symbol(struct hists *hists, 1890 struct hist_entry *he) 1891 { 1892 if (hists->symbol_filter_str != NULL && 1893 (!he->ms.sym || strstr(he->ms.sym->name, 1894 hists->symbol_filter_str) == NULL)) { 1895 he->filtered |= (1 << HIST_FILTER__SYMBOL); 1896 return true; 1897 } 1898 1899 return false; 1900 } 1901 1902 static bool hists__filter_entry_by_socket(struct hists *hists, 1903 struct hist_entry *he) 1904 { 1905 if ((hists->socket_filter > -1) && 1906 (he->socket != hists->socket_filter)) { 1907 he->filtered |= (1 << HIST_FILTER__SOCKET); 1908 return true; 1909 } 1910 1911 return false; 1912 } 1913 1914 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 1915 1916 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 1917 { 1918 struct rb_node *nd; 1919 1920 hists->stats.nr_non_filtered_samples = 0; 1921 1922 hists__reset_filter_stats(hists); 1923 hists__reset_col_len(hists); 1924 1925 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1926 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1927 1928 if (filter(hists, h)) 1929 continue; 1930 1931 hists__remove_entry_filter(hists, h, type); 1932 } 1933 } 1934 1935 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he) 1936 { 1937 struct rb_node **p = &root->rb_node; 1938 struct rb_node *parent = NULL; 1939 struct hist_entry *iter; 1940 struct rb_root new_root = RB_ROOT; 1941 struct rb_node *nd; 1942 1943 while (*p != NULL) { 1944 parent = *p; 1945 iter = rb_entry(parent, struct hist_entry, rb_node); 1946 1947 if (hist_entry__sort(he, iter) > 0) 1948 p = &(*p)->rb_left; 1949 else 1950 p = &(*p)->rb_right; 1951 } 1952 1953 rb_link_node(&he->rb_node, parent, p); 1954 rb_insert_color(&he->rb_node, root); 1955 1956 if (he->leaf || he->filtered) 1957 return; 1958 1959 nd = rb_first(&he->hroot_out); 1960 while (nd) { 1961 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1962 1963 nd = rb_next(nd); 1964 rb_erase(&h->rb_node, &he->hroot_out); 1965 1966 resort_filtered_entry(&new_root, h); 1967 } 1968 1969 he->hroot_out = new_root; 1970 } 1971 1972 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 1973 { 1974 struct rb_node *nd; 1975 struct rb_root new_root = RB_ROOT; 1976 1977 hists->stats.nr_non_filtered_samples = 0; 1978 1979 hists__reset_filter_stats(hists); 1980 hists__reset_col_len(hists); 1981 1982 nd = rb_first(&hists->entries); 1983 while (nd) { 1984 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1985 int ret; 1986 1987 ret = hist_entry__filter(h, type, arg); 1988 1989 /* 1990 * case 1. non-matching type 1991 * zero out the period, set filter marker and move to child 1992 */ 1993 if (ret < 0) { 1994 memset(&h->stat, 0, sizeof(h->stat)); 1995 h->filtered |= (1 << type); 1996 1997 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 1998 } 1999 /* 2000 * case 2. matched type (filter out) 2001 * set filter marker and move to next 2002 */ 2003 else if (ret == 1) { 2004 h->filtered |= (1 << type); 2005 2006 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2007 } 2008 /* 2009 * case 3. ok (not filtered) 2010 * add period to hists and parents, erase the filter marker 2011 * and move to next sibling 2012 */ 2013 else { 2014 hists__remove_entry_filter(hists, h, type); 2015 2016 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2017 } 2018 } 2019 2020 hierarchy_recalc_total_periods(hists); 2021 2022 /* 2023 * resort output after applying a new filter since filter in a lower 2024 * hierarchy can change periods in a upper hierarchy. 2025 */ 2026 nd = rb_first(&hists->entries); 2027 while (nd) { 2028 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2029 2030 nd = rb_next(nd); 2031 rb_erase(&h->rb_node, &hists->entries); 2032 2033 resort_filtered_entry(&new_root, h); 2034 } 2035 2036 hists->entries = new_root; 2037 } 2038 2039 void hists__filter_by_thread(struct hists *hists) 2040 { 2041 if (symbol_conf.report_hierarchy) 2042 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2043 hists->thread_filter); 2044 else 2045 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2046 hists__filter_entry_by_thread); 2047 } 2048 2049 void hists__filter_by_dso(struct hists *hists) 2050 { 2051 if (symbol_conf.report_hierarchy) 2052 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2053 hists->dso_filter); 2054 else 2055 hists__filter_by_type(hists, HIST_FILTER__DSO, 2056 hists__filter_entry_by_dso); 2057 } 2058 2059 void hists__filter_by_symbol(struct hists *hists) 2060 { 2061 if (symbol_conf.report_hierarchy) 2062 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2063 hists->symbol_filter_str); 2064 else 2065 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2066 hists__filter_entry_by_symbol); 2067 } 2068 2069 void hists__filter_by_socket(struct hists *hists) 2070 { 2071 if (symbol_conf.report_hierarchy) 2072 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2073 &hists->socket_filter); 2074 else 2075 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2076 hists__filter_entry_by_socket); 2077 } 2078 2079 void events_stats__inc(struct events_stats *stats, u32 type) 2080 { 2081 ++stats->nr_events[0]; 2082 ++stats->nr_events[type]; 2083 } 2084 2085 void hists__inc_nr_events(struct hists *hists, u32 type) 2086 { 2087 events_stats__inc(&hists->stats, type); 2088 } 2089 2090 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2091 { 2092 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 2093 if (!filtered) 2094 hists->stats.nr_non_filtered_samples++; 2095 } 2096 2097 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2098 struct hist_entry *pair) 2099 { 2100 struct rb_root *root; 2101 struct rb_node **p; 2102 struct rb_node *parent = NULL; 2103 struct hist_entry *he; 2104 int64_t cmp; 2105 2106 if (hists__has(hists, need_collapse)) 2107 root = &hists->entries_collapsed; 2108 else 2109 root = hists->entries_in; 2110 2111 p = &root->rb_node; 2112 2113 while (*p != NULL) { 2114 parent = *p; 2115 he = rb_entry(parent, struct hist_entry, rb_node_in); 2116 2117 cmp = hist_entry__collapse(he, pair); 2118 2119 if (!cmp) 2120 goto out; 2121 2122 if (cmp < 0) 2123 p = &(*p)->rb_left; 2124 else 2125 p = &(*p)->rb_right; 2126 } 2127 2128 he = hist_entry__new(pair, true); 2129 if (he) { 2130 memset(&he->stat, 0, sizeof(he->stat)); 2131 he->hists = hists; 2132 if (symbol_conf.cumulate_callchain) 2133 memset(he->stat_acc, 0, sizeof(he->stat)); 2134 rb_link_node(&he->rb_node_in, parent, p); 2135 rb_insert_color(&he->rb_node_in, root); 2136 hists__inc_stats(hists, he); 2137 he->dummy = true; 2138 } 2139 out: 2140 return he; 2141 } 2142 2143 static struct hist_entry *hists__find_entry(struct hists *hists, 2144 struct hist_entry *he) 2145 { 2146 struct rb_node *n; 2147 2148 if (hists__has(hists, need_collapse)) 2149 n = hists->entries_collapsed.rb_node; 2150 else 2151 n = hists->entries_in->rb_node; 2152 2153 while (n) { 2154 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2155 int64_t cmp = hist_entry__collapse(iter, he); 2156 2157 if (cmp < 0) 2158 n = n->rb_left; 2159 else if (cmp > 0) 2160 n = n->rb_right; 2161 else 2162 return iter; 2163 } 2164 2165 return NULL; 2166 } 2167 2168 /* 2169 * Look for pairs to link to the leader buckets (hist_entries): 2170 */ 2171 void hists__match(struct hists *leader, struct hists *other) 2172 { 2173 struct rb_root *root; 2174 struct rb_node *nd; 2175 struct hist_entry *pos, *pair; 2176 2177 if (hists__has(leader, need_collapse)) 2178 root = &leader->entries_collapsed; 2179 else 2180 root = leader->entries_in; 2181 2182 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2183 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2184 pair = hists__find_entry(other, pos); 2185 2186 if (pair) 2187 hist_entry__add_pair(pair, pos); 2188 } 2189 } 2190 2191 /* 2192 * Look for entries in the other hists that are not present in the leader, if 2193 * we find them, just add a dummy entry on the leader hists, with period=0, 2194 * nr_events=0, to serve as the list header. 2195 */ 2196 int hists__link(struct hists *leader, struct hists *other) 2197 { 2198 struct rb_root *root; 2199 struct rb_node *nd; 2200 struct hist_entry *pos, *pair; 2201 2202 if (hists__has(other, need_collapse)) 2203 root = &other->entries_collapsed; 2204 else 2205 root = other->entries_in; 2206 2207 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2208 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2209 2210 if (!hist_entry__has_pairs(pos)) { 2211 pair = hists__add_dummy_entry(leader, pos); 2212 if (pair == NULL) 2213 return -1; 2214 hist_entry__add_pair(pos, pair); 2215 } 2216 } 2217 2218 return 0; 2219 } 2220 2221 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2222 struct perf_sample *sample, bool nonany_branch_mode) 2223 { 2224 struct branch_info *bi; 2225 2226 /* If we have branch cycles always annotate them. */ 2227 if (bs && bs->nr && bs->entries[0].flags.cycles) { 2228 int i; 2229 2230 bi = sample__resolve_bstack(sample, al); 2231 if (bi) { 2232 struct addr_map_symbol *prev = NULL; 2233 2234 /* 2235 * Ignore errors, still want to process the 2236 * other entries. 2237 * 2238 * For non standard branch modes always 2239 * force no IPC (prev == NULL) 2240 * 2241 * Note that perf stores branches reversed from 2242 * program order! 2243 */ 2244 for (i = bs->nr - 1; i >= 0; i--) { 2245 addr_map_symbol__account_cycles(&bi[i].from, 2246 nonany_branch_mode ? NULL : prev, 2247 bi[i].flags.cycles); 2248 prev = &bi[i].to; 2249 } 2250 free(bi); 2251 } 2252 } 2253 } 2254 2255 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp) 2256 { 2257 struct perf_evsel *pos; 2258 size_t ret = 0; 2259 2260 evlist__for_each_entry(evlist, pos) { 2261 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 2262 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 2263 } 2264 2265 return ret; 2266 } 2267 2268 2269 u64 hists__total_period(struct hists *hists) 2270 { 2271 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2272 hists->stats.total_period; 2273 } 2274 2275 int parse_filter_percentage(const struct option *opt __maybe_unused, 2276 const char *arg, int unset __maybe_unused) 2277 { 2278 if (!strcmp(arg, "relative")) 2279 symbol_conf.filter_relative = true; 2280 else if (!strcmp(arg, "absolute")) 2281 symbol_conf.filter_relative = false; 2282 else 2283 return -1; 2284 2285 return 0; 2286 } 2287 2288 int perf_hist_config(const char *var, const char *value) 2289 { 2290 if (!strcmp(var, "hist.percentage")) 2291 return parse_filter_percentage(NULL, value, 0); 2292 2293 return 0; 2294 } 2295 2296 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2297 { 2298 memset(hists, 0, sizeof(*hists)); 2299 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; 2300 hists->entries_in = &hists->entries_in_array[0]; 2301 hists->entries_collapsed = RB_ROOT; 2302 hists->entries = RB_ROOT; 2303 pthread_mutex_init(&hists->lock, NULL); 2304 hists->socket_filter = -1; 2305 hists->hpp_list = hpp_list; 2306 INIT_LIST_HEAD(&hists->hpp_formats); 2307 return 0; 2308 } 2309 2310 static void hists__delete_remaining_entries(struct rb_root *root) 2311 { 2312 struct rb_node *node; 2313 struct hist_entry *he; 2314 2315 while (!RB_EMPTY_ROOT(root)) { 2316 node = rb_first(root); 2317 rb_erase(node, root); 2318 2319 he = rb_entry(node, struct hist_entry, rb_node_in); 2320 hist_entry__delete(he); 2321 } 2322 } 2323 2324 static void hists__delete_all_entries(struct hists *hists) 2325 { 2326 hists__delete_entries(hists); 2327 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2328 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2329 hists__delete_remaining_entries(&hists->entries_collapsed); 2330 } 2331 2332 static void hists_evsel__exit(struct perf_evsel *evsel) 2333 { 2334 struct hists *hists = evsel__hists(evsel); 2335 struct perf_hpp_fmt *fmt, *pos; 2336 struct perf_hpp_list_node *node, *tmp; 2337 2338 hists__delete_all_entries(hists); 2339 2340 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2341 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2342 list_del(&fmt->list); 2343 free(fmt); 2344 } 2345 list_del(&node->list); 2346 free(node); 2347 } 2348 } 2349 2350 static int hists_evsel__init(struct perf_evsel *evsel) 2351 { 2352 struct hists *hists = evsel__hists(evsel); 2353 2354 __hists__init(hists, &perf_hpp_list); 2355 return 0; 2356 } 2357 2358 /* 2359 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2360 * stored in the rbtree... 2361 */ 2362 2363 int hists__init(void) 2364 { 2365 int err = perf_evsel__object_config(sizeof(struct hists_evsel), 2366 hists_evsel__init, 2367 hists_evsel__exit); 2368 if (err) 2369 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2370 2371 return err; 2372 } 2373 2374 void perf_hpp_list__init(struct perf_hpp_list *list) 2375 { 2376 INIT_LIST_HEAD(&list->fields); 2377 INIT_LIST_HEAD(&list->sorts); 2378 } 2379