1 #include <stdio.h> 2 3 #include "../../util/util.h" 4 #include "../../util/hist.h" 5 #include "../../util/sort.h" 6 #include "../../util/evsel.h" 7 8 9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 10 { 11 int i; 12 int ret = fprintf(fp, " "); 13 14 for (i = 0; i < left_margin; i++) 15 ret += fprintf(fp, " "); 16 17 return ret; 18 } 19 20 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 21 int left_margin) 22 { 23 int i; 24 size_t ret = callchain__fprintf_left_margin(fp, left_margin); 25 26 for (i = 0; i < depth; i++) 27 if (depth_mask & (1 << i)) 28 ret += fprintf(fp, "| "); 29 else 30 ret += fprintf(fp, " "); 31 32 ret += fprintf(fp, "\n"); 33 34 return ret; 35 } 36 37 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node, 38 struct callchain_list *chain, 39 int depth, int depth_mask, int period, 40 u64 total_samples, int left_margin) 41 { 42 int i; 43 size_t ret = 0; 44 char bf[1024]; 45 46 ret += callchain__fprintf_left_margin(fp, left_margin); 47 for (i = 0; i < depth; i++) { 48 if (depth_mask & (1 << i)) 49 ret += fprintf(fp, "|"); 50 else 51 ret += fprintf(fp, " "); 52 if (!period && i == depth - 1) { 53 ret += fprintf(fp, "--"); 54 ret += callchain_node__fprintf_value(node, fp, total_samples); 55 ret += fprintf(fp, "--"); 56 } else 57 ret += fprintf(fp, "%s", " "); 58 } 59 fputs(callchain_list__sym_name(chain, bf, sizeof(bf), false), fp); 60 fputc('\n', fp); 61 return ret; 62 } 63 64 static struct symbol *rem_sq_bracket; 65 static struct callchain_list rem_hits; 66 67 static void init_rem_hits(void) 68 { 69 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 70 if (!rem_sq_bracket) { 71 fprintf(stderr, "Not enough memory to display remaining hits\n"); 72 return; 73 } 74 75 strcpy(rem_sq_bracket->name, "[...]"); 76 rem_hits.ms.sym = rem_sq_bracket; 77 } 78 79 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, 80 u64 total_samples, int depth, 81 int depth_mask, int left_margin) 82 { 83 struct rb_node *node, *next; 84 struct callchain_node *child = NULL; 85 struct callchain_list *chain; 86 int new_depth_mask = depth_mask; 87 u64 remaining; 88 size_t ret = 0; 89 int i; 90 uint entries_printed = 0; 91 int cumul_count = 0; 92 93 remaining = total_samples; 94 95 node = rb_first(root); 96 while (node) { 97 u64 new_total; 98 u64 cumul; 99 100 child = rb_entry(node, struct callchain_node, rb_node); 101 cumul = callchain_cumul_hits(child); 102 remaining -= cumul; 103 cumul_count += callchain_cumul_counts(child); 104 105 /* 106 * The depth mask manages the output of pipes that show 107 * the depth. We don't want to keep the pipes of the current 108 * level for the last child of this depth. 109 * Except if we have remaining filtered hits. They will 110 * supersede the last child 111 */ 112 next = rb_next(node); 113 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 114 new_depth_mask &= ~(1 << (depth - 1)); 115 116 /* 117 * But we keep the older depth mask for the line separator 118 * to keep the level link until we reach the last child 119 */ 120 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 121 left_margin); 122 i = 0; 123 list_for_each_entry(chain, &child->val, list) { 124 ret += ipchain__fprintf_graph(fp, child, chain, depth, 125 new_depth_mask, i++, 126 total_samples, 127 left_margin); 128 } 129 130 if (callchain_param.mode == CHAIN_GRAPH_REL) 131 new_total = child->children_hit; 132 else 133 new_total = total_samples; 134 135 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, 136 depth + 1, 137 new_depth_mask | (1 << depth), 138 left_margin); 139 node = next; 140 if (++entries_printed == callchain_param.print_limit) 141 break; 142 } 143 144 if (callchain_param.mode == CHAIN_GRAPH_REL && 145 remaining && remaining != total_samples) { 146 struct callchain_node rem_node = { 147 .hit = remaining, 148 }; 149 150 if (!rem_sq_bracket) 151 return ret; 152 153 if (callchain_param.value == CCVAL_COUNT && child && child->parent) { 154 rem_node.count = child->parent->children_count - cumul_count; 155 if (rem_node.count <= 0) 156 return ret; 157 } 158 159 new_depth_mask &= ~(1 << (depth - 1)); 160 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth, 161 new_depth_mask, 0, total_samples, 162 left_margin); 163 } 164 165 return ret; 166 } 167 168 /* 169 * If have one single callchain root, don't bother printing 170 * its percentage (100 % in fractal mode and the same percentage 171 * than the hist in graph mode). This also avoid one level of column. 172 * 173 * However when percent-limit applied, it's possible that single callchain 174 * node have different (non-100% in fractal mode) percentage. 175 */ 176 static bool need_percent_display(struct rb_node *node, u64 parent_samples) 177 { 178 struct callchain_node *cnode; 179 180 if (rb_next(node)) 181 return true; 182 183 cnode = rb_entry(node, struct callchain_node, rb_node); 184 return callchain_cumul_hits(cnode) != parent_samples; 185 } 186 187 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, 188 u64 total_samples, u64 parent_samples, 189 int left_margin) 190 { 191 struct callchain_node *cnode; 192 struct callchain_list *chain; 193 u32 entries_printed = 0; 194 bool printed = false; 195 struct rb_node *node; 196 int i = 0; 197 int ret = 0; 198 char bf[1024]; 199 200 node = rb_first(root); 201 if (node && !need_percent_display(node, parent_samples)) { 202 cnode = rb_entry(node, struct callchain_node, rb_node); 203 list_for_each_entry(chain, &cnode->val, list) { 204 /* 205 * If we sort by symbol, the first entry is the same than 206 * the symbol. No need to print it otherwise it appears as 207 * displayed twice. 208 */ 209 if (!i++ && field_order == NULL && 210 sort_order && !prefixcmp(sort_order, "sym")) 211 continue; 212 if (!printed) { 213 ret += callchain__fprintf_left_margin(fp, left_margin); 214 ret += fprintf(fp, "|\n"); 215 ret += callchain__fprintf_left_margin(fp, left_margin); 216 ret += fprintf(fp, "---"); 217 left_margin += 3; 218 printed = true; 219 } else 220 ret += callchain__fprintf_left_margin(fp, left_margin); 221 222 ret += fprintf(fp, "%s\n", callchain_list__sym_name(chain, bf, sizeof(bf), 223 false)); 224 225 if (++entries_printed == callchain_param.print_limit) 226 break; 227 } 228 root = &cnode->rb_root; 229 } 230 231 if (callchain_param.mode == CHAIN_GRAPH_REL) 232 total_samples = parent_samples; 233 234 ret += __callchain__fprintf_graph(fp, root, total_samples, 235 1, 1, left_margin); 236 if (ret) { 237 /* do not add a blank line if it printed nothing */ 238 ret += fprintf(fp, "\n"); 239 } 240 241 return ret; 242 } 243 244 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node, 245 u64 total_samples) 246 { 247 struct callchain_list *chain; 248 size_t ret = 0; 249 char bf[1024]; 250 251 if (!node) 252 return 0; 253 254 ret += __callchain__fprintf_flat(fp, node->parent, total_samples); 255 256 257 list_for_each_entry(chain, &node->val, list) { 258 if (chain->ip >= PERF_CONTEXT_MAX) 259 continue; 260 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain, 261 bf, sizeof(bf), false)); 262 } 263 264 return ret; 265 } 266 267 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree, 268 u64 total_samples) 269 { 270 size_t ret = 0; 271 u32 entries_printed = 0; 272 struct callchain_node *chain; 273 struct rb_node *rb_node = rb_first(tree); 274 275 while (rb_node) { 276 chain = rb_entry(rb_node, struct callchain_node, rb_node); 277 278 ret += fprintf(fp, " "); 279 ret += callchain_node__fprintf_value(chain, fp, total_samples); 280 ret += fprintf(fp, "\n"); 281 ret += __callchain__fprintf_flat(fp, chain, total_samples); 282 ret += fprintf(fp, "\n"); 283 if (++entries_printed == callchain_param.print_limit) 284 break; 285 286 rb_node = rb_next(rb_node); 287 } 288 289 return ret; 290 } 291 292 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node) 293 { 294 const char *sep = symbol_conf.field_sep ?: ";"; 295 struct callchain_list *chain; 296 size_t ret = 0; 297 char bf[1024]; 298 bool first; 299 300 if (!node) 301 return 0; 302 303 ret += __callchain__fprintf_folded(fp, node->parent); 304 305 first = (ret == 0); 306 list_for_each_entry(chain, &node->val, list) { 307 if (chain->ip >= PERF_CONTEXT_MAX) 308 continue; 309 ret += fprintf(fp, "%s%s", first ? "" : sep, 310 callchain_list__sym_name(chain, 311 bf, sizeof(bf), false)); 312 first = false; 313 } 314 315 return ret; 316 } 317 318 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree, 319 u64 total_samples) 320 { 321 size_t ret = 0; 322 u32 entries_printed = 0; 323 struct callchain_node *chain; 324 struct rb_node *rb_node = rb_first(tree); 325 326 while (rb_node) { 327 328 chain = rb_entry(rb_node, struct callchain_node, rb_node); 329 330 ret += callchain_node__fprintf_value(chain, fp, total_samples); 331 ret += fprintf(fp, " "); 332 ret += __callchain__fprintf_folded(fp, chain); 333 ret += fprintf(fp, "\n"); 334 if (++entries_printed == callchain_param.print_limit) 335 break; 336 337 rb_node = rb_next(rb_node); 338 } 339 340 return ret; 341 } 342 343 static size_t hist_entry_callchain__fprintf(struct hist_entry *he, 344 u64 total_samples, int left_margin, 345 FILE *fp) 346 { 347 u64 parent_samples = he->stat.period; 348 349 if (symbol_conf.cumulate_callchain) 350 parent_samples = he->stat_acc->period; 351 352 switch (callchain_param.mode) { 353 case CHAIN_GRAPH_REL: 354 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, 355 parent_samples, left_margin); 356 break; 357 case CHAIN_GRAPH_ABS: 358 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, 359 parent_samples, left_margin); 360 break; 361 case CHAIN_FLAT: 362 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); 363 break; 364 case CHAIN_FOLDED: 365 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples); 366 break; 367 case CHAIN_NONE: 368 break; 369 default: 370 pr_err("Bad callchain mode\n"); 371 } 372 373 return 0; 374 } 375 376 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp, 377 struct perf_hpp_list *hpp_list) 378 { 379 const char *sep = symbol_conf.field_sep; 380 struct perf_hpp_fmt *fmt; 381 char *start = hpp->buf; 382 int ret; 383 bool first = true; 384 385 if (symbol_conf.exclude_other && !he->parent) 386 return 0; 387 388 perf_hpp_list__for_each_format(hpp_list, fmt) { 389 if (perf_hpp__should_skip(fmt, he->hists)) 390 continue; 391 392 /* 393 * If there's no field_sep, we still need 394 * to display initial ' '. 395 */ 396 if (!sep || !first) { 397 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); 398 advance_hpp(hpp, ret); 399 } else 400 first = false; 401 402 if (perf_hpp__use_color() && fmt->color) 403 ret = fmt->color(fmt, hpp, he); 404 else 405 ret = fmt->entry(fmt, hpp, he); 406 407 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret); 408 advance_hpp(hpp, ret); 409 } 410 411 return hpp->buf - start; 412 } 413 414 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp) 415 { 416 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list); 417 } 418 419 static int hist_entry__hierarchy_fprintf(struct hist_entry *he, 420 struct perf_hpp *hpp, 421 struct hists *hists, 422 FILE *fp) 423 { 424 const char *sep = symbol_conf.field_sep; 425 struct perf_hpp_fmt *fmt; 426 struct perf_hpp_list_node *fmt_node; 427 char *buf = hpp->buf; 428 size_t size = hpp->size; 429 int ret, printed = 0; 430 bool first = true; 431 432 if (symbol_conf.exclude_other && !he->parent) 433 return 0; 434 435 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, ""); 436 advance_hpp(hpp, ret); 437 438 /* the first hpp_list_node is for overhead columns */ 439 fmt_node = list_first_entry(&hists->hpp_formats, 440 struct perf_hpp_list_node, list); 441 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) { 442 /* 443 * If there's no field_sep, we still need 444 * to display initial ' '. 445 */ 446 if (!sep || !first) { 447 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); 448 advance_hpp(hpp, ret); 449 } else 450 first = false; 451 452 if (perf_hpp__use_color() && fmt->color) 453 ret = fmt->color(fmt, hpp, he); 454 else 455 ret = fmt->entry(fmt, hpp, he); 456 457 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret); 458 advance_hpp(hpp, ret); 459 } 460 461 if (!sep) 462 ret = scnprintf(hpp->buf, hpp->size, "%*s", 463 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, ""); 464 advance_hpp(hpp, ret); 465 466 printed += fprintf(fp, "%s", buf); 467 468 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 469 hpp->buf = buf; 470 hpp->size = size; 471 472 /* 473 * No need to call hist_entry__snprintf_alignment() since this 474 * fmt is always the last column in the hierarchy mode. 475 */ 476 if (perf_hpp__use_color() && fmt->color) 477 fmt->color(fmt, hpp, he); 478 else 479 fmt->entry(fmt, hpp, he); 480 481 /* 482 * dynamic entries are right-aligned but we want left-aligned 483 * in the hierarchy mode 484 */ 485 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf)); 486 } 487 printed += putc('\n', fp); 488 489 if (symbol_conf.use_callchain && he->leaf) { 490 u64 total = hists__total_period(hists); 491 492 printed += hist_entry_callchain__fprintf(he, total, 0, fp); 493 goto out; 494 } 495 496 out: 497 return printed; 498 } 499 500 static int hist_entry__fprintf(struct hist_entry *he, size_t size, 501 char *bf, size_t bfsz, FILE *fp, 502 bool use_callchain) 503 { 504 int ret; 505 struct perf_hpp hpp = { 506 .buf = bf, 507 .size = size, 508 }; 509 struct hists *hists = he->hists; 510 u64 total_period = hists->stats.total_period; 511 512 if (size == 0 || size > bfsz) 513 size = hpp.size = bfsz; 514 515 if (symbol_conf.report_hierarchy) 516 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp); 517 518 hist_entry__snprintf(he, &hpp); 519 520 ret = fprintf(fp, "%s\n", bf); 521 522 if (use_callchain) 523 ret += hist_entry_callchain__fprintf(he, total_period, 0, fp); 524 525 return ret; 526 } 527 528 static int print_hierarchy_indent(const char *sep, int indent, 529 const char *line, FILE *fp) 530 { 531 if (sep != NULL || indent < 2) 532 return 0; 533 534 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line); 535 } 536 537 static int hists__fprintf_hierarchy_headers(struct hists *hists, 538 struct perf_hpp *hpp, FILE *fp) 539 { 540 bool first_node, first_col; 541 int indent; 542 int depth; 543 unsigned width = 0; 544 unsigned header_width = 0; 545 struct perf_hpp_fmt *fmt; 546 struct perf_hpp_list_node *fmt_node; 547 const char *sep = symbol_conf.field_sep; 548 549 indent = hists->nr_hpp_node; 550 551 /* preserve max indent depth for column headers */ 552 print_hierarchy_indent(sep, indent, spaces, fp); 553 554 /* the first hpp_list_node is for overhead columns */ 555 fmt_node = list_first_entry(&hists->hpp_formats, 556 struct perf_hpp_list_node, list); 557 558 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) { 559 fmt->header(fmt, hpp, hists, 0, NULL); 560 fprintf(fp, "%s%s", hpp->buf, sep ?: " "); 561 } 562 563 /* combine sort headers with ' / ' */ 564 first_node = true; 565 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { 566 if (!first_node) 567 header_width += fprintf(fp, " / "); 568 first_node = false; 569 570 first_col = true; 571 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) { 572 if (perf_hpp__should_skip(fmt, hists)) 573 continue; 574 575 if (!first_col) 576 header_width += fprintf(fp, "+"); 577 first_col = false; 578 579 fmt->header(fmt, hpp, hists, 0, NULL); 580 581 header_width += fprintf(fp, "%s", trim(hpp->buf)); 582 } 583 } 584 585 fprintf(fp, "\n# "); 586 587 /* preserve max indent depth for initial dots */ 588 print_hierarchy_indent(sep, indent, dots, fp); 589 590 /* the first hpp_list_node is for overhead columns */ 591 fmt_node = list_first_entry(&hists->hpp_formats, 592 struct perf_hpp_list_node, list); 593 594 first_col = true; 595 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) { 596 if (!first_col) 597 fprintf(fp, "%s", sep ?: ".."); 598 first_col = false; 599 600 width = fmt->width(fmt, hpp, hists); 601 fprintf(fp, "%.*s", width, dots); 602 } 603 604 depth = 0; 605 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { 606 first_col = true; 607 width = depth * HIERARCHY_INDENT; 608 609 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) { 610 if (perf_hpp__should_skip(fmt, hists)) 611 continue; 612 613 if (!first_col) 614 width++; /* for '+' sign between column header */ 615 first_col = false; 616 617 width += fmt->width(fmt, hpp, hists); 618 } 619 620 if (width > header_width) 621 header_width = width; 622 623 depth++; 624 } 625 626 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots); 627 628 fprintf(fp, "\n#\n"); 629 630 return 2; 631 } 632 633 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp, 634 int line, FILE *fp) 635 { 636 struct perf_hpp_fmt *fmt; 637 const char *sep = symbol_conf.field_sep; 638 bool first = true; 639 int span = 0; 640 641 hists__for_each_format(hists, fmt) { 642 if (perf_hpp__should_skip(fmt, hists)) 643 continue; 644 645 if (!first && !span) 646 fprintf(fp, "%s", sep ?: " "); 647 else 648 first = false; 649 650 fmt->header(fmt, hpp, hists, line, &span); 651 652 if (!span) 653 fprintf(fp, "%s", hpp->buf); 654 } 655 } 656 657 static int 658 hists__fprintf_standard_headers(struct hists *hists, 659 struct perf_hpp *hpp, 660 FILE *fp) 661 { 662 struct perf_hpp_list *hpp_list = hists->hpp_list; 663 struct perf_hpp_fmt *fmt; 664 unsigned int width; 665 const char *sep = symbol_conf.field_sep; 666 bool first = true; 667 int line; 668 669 for (line = 0; line < hpp_list->nr_header_lines; line++) { 670 /* first # is displayed one level up */ 671 if (line) 672 fprintf(fp, "# "); 673 fprintf_line(hists, hpp, line, fp); 674 fprintf(fp, "\n"); 675 } 676 677 if (sep) 678 return hpp_list->nr_header_lines; 679 680 first = true; 681 682 fprintf(fp, "# "); 683 684 hists__for_each_format(hists, fmt) { 685 unsigned int i; 686 687 if (perf_hpp__should_skip(fmt, hists)) 688 continue; 689 690 if (!first) 691 fprintf(fp, "%s", sep ?: " "); 692 else 693 first = false; 694 695 width = fmt->width(fmt, hpp, hists); 696 for (i = 0; i < width; i++) 697 fprintf(fp, "."); 698 } 699 700 fprintf(fp, "\n"); 701 fprintf(fp, "#\n"); 702 return hpp_list->nr_header_lines + 2; 703 } 704 705 int hists__fprintf_headers(struct hists *hists, FILE *fp) 706 { 707 char bf[1024]; 708 struct perf_hpp dummy_hpp = { 709 .buf = bf, 710 .size = sizeof(bf), 711 }; 712 713 fprintf(fp, "# "); 714 715 if (symbol_conf.report_hierarchy) 716 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp); 717 else 718 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp); 719 720 } 721 722 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, 723 int max_cols, float min_pcnt, FILE *fp, 724 bool use_callchain) 725 { 726 struct rb_node *nd; 727 size_t ret = 0; 728 const char *sep = symbol_conf.field_sep; 729 int nr_rows = 0; 730 size_t linesz; 731 char *line = NULL; 732 unsigned indent; 733 734 init_rem_hits(); 735 736 hists__reset_column_width(hists); 737 738 if (symbol_conf.col_width_list_str) 739 perf_hpp__set_user_width(symbol_conf.col_width_list_str); 740 741 if (show_header) 742 nr_rows += hists__fprintf_headers(hists, fp); 743 744 if (max_rows && nr_rows >= max_rows) 745 goto out; 746 747 linesz = hists__sort_list_width(hists) + 3 + 1; 748 linesz += perf_hpp__color_overhead(); 749 line = malloc(linesz); 750 if (line == NULL) { 751 ret = -1; 752 goto out; 753 } 754 755 indent = hists__overhead_width(hists) + 4; 756 757 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) { 758 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 759 float percent; 760 761 if (h->filtered) 762 continue; 763 764 percent = hist_entry__get_percent_limit(h); 765 if (percent < min_pcnt) 766 continue; 767 768 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain); 769 770 if (max_rows && ++nr_rows >= max_rows) 771 break; 772 773 /* 774 * If all children are filtered out or percent-limited, 775 * display "no entry >= x.xx%" message. 776 */ 777 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) { 778 int depth = hists->nr_hpp_node + h->depth + 1; 779 780 print_hierarchy_indent(sep, depth, spaces, fp); 781 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt); 782 783 if (max_rows && ++nr_rows >= max_rows) 784 break; 785 } 786 787 if (h->ms.map == NULL && verbose > 1) { 788 __map_groups__fprintf_maps(h->thread->mg, 789 MAP__FUNCTION, fp); 790 fprintf(fp, "%.10s end\n", graph_dotted_line); 791 } 792 } 793 794 free(line); 795 out: 796 zfree(&rem_sq_bracket); 797 798 return ret; 799 } 800 801 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp) 802 { 803 int i; 804 size_t ret = 0; 805 806 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { 807 const char *name; 808 809 if (stats->nr_events[i] == 0) 810 continue; 811 812 name = perf_event__name(i); 813 if (!strcmp(name, "UNKNOWN")) 814 continue; 815 816 ret += fprintf(fp, "%16s events: %10d\n", name, 817 stats->nr_events[i]); 818 } 819 820 return ret; 821 } 822