xref: /openbmc/linux/tools/perf/ui/stdio/hist.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include <linux/string.h>
4 
5 #include "../../util/util.h"
6 #include "../../util/hist.h"
7 #include "../../util/sort.h"
8 #include "../../util/evsel.h"
9 #include "../../util/srcline.h"
10 #include "../../util/string2.h"
11 #include "../../util/thread.h"
12 #include "../../util/sane_ctype.h"
13 
14 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
15 {
16 	int i;
17 	int ret = fprintf(fp, "            ");
18 
19 	for (i = 0; i < left_margin; i++)
20 		ret += fprintf(fp, " ");
21 
22 	return ret;
23 }
24 
25 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
26 					  int left_margin)
27 {
28 	int i;
29 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
30 
31 	for (i = 0; i < depth; i++)
32 		if (depth_mask & (1 << i))
33 			ret += fprintf(fp, "|          ");
34 		else
35 			ret += fprintf(fp, "           ");
36 
37 	ret += fprintf(fp, "\n");
38 
39 	return ret;
40 }
41 
42 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
43 				     struct callchain_list *chain,
44 				     int depth, int depth_mask, int period,
45 				     u64 total_samples, int left_margin)
46 {
47 	int i;
48 	size_t ret = 0;
49 	char bf[1024], *alloc_str = NULL;
50 	char buf[64];
51 	const char *str;
52 
53 	ret += callchain__fprintf_left_margin(fp, left_margin);
54 	for (i = 0; i < depth; i++) {
55 		if (depth_mask & (1 << i))
56 			ret += fprintf(fp, "|");
57 		else
58 			ret += fprintf(fp, " ");
59 		if (!period && i == depth - 1) {
60 			ret += fprintf(fp, "--");
61 			ret += callchain_node__fprintf_value(node, fp, total_samples);
62 			ret += fprintf(fp, "--");
63 		} else
64 			ret += fprintf(fp, "%s", "          ");
65 	}
66 
67 	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
68 
69 	if (symbol_conf.show_branchflag_count) {
70 		callchain_list_counts__printf_value(chain, NULL,
71 						    buf, sizeof(buf));
72 
73 		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
74 			str = "Not enough memory!";
75 		else
76 			str = alloc_str;
77 	}
78 
79 	fputs(str, fp);
80 	fputc('\n', fp);
81 	free(alloc_str);
82 
83 	return ret;
84 }
85 
86 static struct symbol *rem_sq_bracket;
87 static struct callchain_list rem_hits;
88 
89 static void init_rem_hits(void)
90 {
91 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
92 	if (!rem_sq_bracket) {
93 		fprintf(stderr, "Not enough memory to display remaining hits\n");
94 		return;
95 	}
96 
97 	strcpy(rem_sq_bracket->name, "[...]");
98 	rem_hits.ms.sym = rem_sq_bracket;
99 }
100 
101 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
102 					 u64 total_samples, int depth,
103 					 int depth_mask, int left_margin)
104 {
105 	struct rb_node *node, *next;
106 	struct callchain_node *child = NULL;
107 	struct callchain_list *chain;
108 	int new_depth_mask = depth_mask;
109 	u64 remaining;
110 	size_t ret = 0;
111 	int i;
112 	uint entries_printed = 0;
113 	int cumul_count = 0;
114 
115 	remaining = total_samples;
116 
117 	node = rb_first(root);
118 	while (node) {
119 		u64 new_total;
120 		u64 cumul;
121 
122 		child = rb_entry(node, struct callchain_node, rb_node);
123 		cumul = callchain_cumul_hits(child);
124 		remaining -= cumul;
125 		cumul_count += callchain_cumul_counts(child);
126 
127 		/*
128 		 * The depth mask manages the output of pipes that show
129 		 * the depth. We don't want to keep the pipes of the current
130 		 * level for the last child of this depth.
131 		 * Except if we have remaining filtered hits. They will
132 		 * supersede the last child
133 		 */
134 		next = rb_next(node);
135 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
136 			new_depth_mask &= ~(1 << (depth - 1));
137 
138 		/*
139 		 * But we keep the older depth mask for the line separator
140 		 * to keep the level link until we reach the last child
141 		 */
142 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
143 						   left_margin);
144 		i = 0;
145 		list_for_each_entry(chain, &child->val, list) {
146 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
147 						      new_depth_mask, i++,
148 						      total_samples,
149 						      left_margin);
150 		}
151 
152 		if (callchain_param.mode == CHAIN_GRAPH_REL)
153 			new_total = child->children_hit;
154 		else
155 			new_total = total_samples;
156 
157 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
158 						  depth + 1,
159 						  new_depth_mask | (1 << depth),
160 						  left_margin);
161 		node = next;
162 		if (++entries_printed == callchain_param.print_limit)
163 			break;
164 	}
165 
166 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
167 		remaining && remaining != total_samples) {
168 		struct callchain_node rem_node = {
169 			.hit = remaining,
170 		};
171 
172 		if (!rem_sq_bracket)
173 			return ret;
174 
175 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
176 			rem_node.count = child->parent->children_count - cumul_count;
177 			if (rem_node.count <= 0)
178 				return ret;
179 		}
180 
181 		new_depth_mask &= ~(1 << (depth - 1));
182 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
183 					      new_depth_mask, 0, total_samples,
184 					      left_margin);
185 	}
186 
187 	return ret;
188 }
189 
190 /*
191  * If have one single callchain root, don't bother printing
192  * its percentage (100 % in fractal mode and the same percentage
193  * than the hist in graph mode). This also avoid one level of column.
194  *
195  * However when percent-limit applied, it's possible that single callchain
196  * node have different (non-100% in fractal mode) percentage.
197  */
198 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
199 {
200 	struct callchain_node *cnode;
201 
202 	if (rb_next(node))
203 		return true;
204 
205 	cnode = rb_entry(node, struct callchain_node, rb_node);
206 	return callchain_cumul_hits(cnode) != parent_samples;
207 }
208 
209 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
210 				       u64 total_samples, u64 parent_samples,
211 				       int left_margin)
212 {
213 	struct callchain_node *cnode;
214 	struct callchain_list *chain;
215 	u32 entries_printed = 0;
216 	bool printed = false;
217 	struct rb_node *node;
218 	int i = 0;
219 	int ret = 0;
220 	char bf[1024];
221 
222 	node = rb_first(root);
223 	if (node && !need_percent_display(node, parent_samples)) {
224 		cnode = rb_entry(node, struct callchain_node, rb_node);
225 		list_for_each_entry(chain, &cnode->val, list) {
226 			/*
227 			 * If we sort by symbol, the first entry is the same than
228 			 * the symbol. No need to print it otherwise it appears as
229 			 * displayed twice.
230 			 */
231 			if (!i++ && field_order == NULL &&
232 			    sort_order && strstarts(sort_order, "sym"))
233 				continue;
234 
235 			if (!printed) {
236 				ret += callchain__fprintf_left_margin(fp, left_margin);
237 				ret += fprintf(fp, "|\n");
238 				ret += callchain__fprintf_left_margin(fp, left_margin);
239 				ret += fprintf(fp, "---");
240 				left_margin += 3;
241 				printed = true;
242 			} else
243 				ret += callchain__fprintf_left_margin(fp, left_margin);
244 
245 			ret += fprintf(fp, "%s",
246 				       callchain_list__sym_name(chain, bf,
247 								sizeof(bf),
248 								false));
249 
250 			if (symbol_conf.show_branchflag_count)
251 				ret += callchain_list_counts__printf_value(
252 						chain, fp, NULL, 0);
253 			ret += fprintf(fp, "\n");
254 
255 			if (++entries_printed == callchain_param.print_limit)
256 				break;
257 		}
258 		root = &cnode->rb_root;
259 	}
260 
261 	if (callchain_param.mode == CHAIN_GRAPH_REL)
262 		total_samples = parent_samples;
263 
264 	ret += __callchain__fprintf_graph(fp, root, total_samples,
265 					  1, 1, left_margin);
266 	if (ret) {
267 		/* do not add a blank line if it printed nothing */
268 		ret += fprintf(fp, "\n");
269 	}
270 
271 	return ret;
272 }
273 
274 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
275 					u64 total_samples)
276 {
277 	struct callchain_list *chain;
278 	size_t ret = 0;
279 	char bf[1024];
280 
281 	if (!node)
282 		return 0;
283 
284 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
285 
286 
287 	list_for_each_entry(chain, &node->val, list) {
288 		if (chain->ip >= PERF_CONTEXT_MAX)
289 			continue;
290 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
291 					bf, sizeof(bf), false));
292 	}
293 
294 	return ret;
295 }
296 
297 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
298 				      u64 total_samples)
299 {
300 	size_t ret = 0;
301 	u32 entries_printed = 0;
302 	struct callchain_node *chain;
303 	struct rb_node *rb_node = rb_first(tree);
304 
305 	while (rb_node) {
306 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
307 
308 		ret += fprintf(fp, "           ");
309 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
310 		ret += fprintf(fp, "\n");
311 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
312 		ret += fprintf(fp, "\n");
313 		if (++entries_printed == callchain_param.print_limit)
314 			break;
315 
316 		rb_node = rb_next(rb_node);
317 	}
318 
319 	return ret;
320 }
321 
322 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
323 {
324 	const char *sep = symbol_conf.field_sep ?: ";";
325 	struct callchain_list *chain;
326 	size_t ret = 0;
327 	char bf[1024];
328 	bool first;
329 
330 	if (!node)
331 		return 0;
332 
333 	ret += __callchain__fprintf_folded(fp, node->parent);
334 
335 	first = (ret == 0);
336 	list_for_each_entry(chain, &node->val, list) {
337 		if (chain->ip >= PERF_CONTEXT_MAX)
338 			continue;
339 		ret += fprintf(fp, "%s%s", first ? "" : sep,
340 			       callchain_list__sym_name(chain,
341 						bf, sizeof(bf), false));
342 		first = false;
343 	}
344 
345 	return ret;
346 }
347 
348 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
349 					u64 total_samples)
350 {
351 	size_t ret = 0;
352 	u32 entries_printed = 0;
353 	struct callchain_node *chain;
354 	struct rb_node *rb_node = rb_first(tree);
355 
356 	while (rb_node) {
357 
358 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
359 
360 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
361 		ret += fprintf(fp, " ");
362 		ret += __callchain__fprintf_folded(fp, chain);
363 		ret += fprintf(fp, "\n");
364 		if (++entries_printed == callchain_param.print_limit)
365 			break;
366 
367 		rb_node = rb_next(rb_node);
368 	}
369 
370 	return ret;
371 }
372 
373 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
374 					    u64 total_samples, int left_margin,
375 					    FILE *fp)
376 {
377 	u64 parent_samples = he->stat.period;
378 
379 	if (symbol_conf.cumulate_callchain)
380 		parent_samples = he->stat_acc->period;
381 
382 	switch (callchain_param.mode) {
383 	case CHAIN_GRAPH_REL:
384 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
385 						parent_samples, left_margin);
386 		break;
387 	case CHAIN_GRAPH_ABS:
388 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
389 						parent_samples, left_margin);
390 		break;
391 	case CHAIN_FLAT:
392 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
393 		break;
394 	case CHAIN_FOLDED:
395 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
396 		break;
397 	case CHAIN_NONE:
398 		break;
399 	default:
400 		pr_err("Bad callchain mode\n");
401 	}
402 
403 	return 0;
404 }
405 
406 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
407 			   struct perf_hpp_list *hpp_list)
408 {
409 	const char *sep = symbol_conf.field_sep;
410 	struct perf_hpp_fmt *fmt;
411 	char *start = hpp->buf;
412 	int ret;
413 	bool first = true;
414 
415 	if (symbol_conf.exclude_other && !he->parent)
416 		return 0;
417 
418 	perf_hpp_list__for_each_format(hpp_list, fmt) {
419 		if (perf_hpp__should_skip(fmt, he->hists))
420 			continue;
421 
422 		/*
423 		 * If there's no field_sep, we still need
424 		 * to display initial '  '.
425 		 */
426 		if (!sep || !first) {
427 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
428 			advance_hpp(hpp, ret);
429 		} else
430 			first = false;
431 
432 		if (perf_hpp__use_color() && fmt->color)
433 			ret = fmt->color(fmt, hpp, he);
434 		else
435 			ret = fmt->entry(fmt, hpp, he);
436 
437 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
438 		advance_hpp(hpp, ret);
439 	}
440 
441 	return hpp->buf - start;
442 }
443 
444 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
445 {
446 	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
447 }
448 
449 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
450 					 struct perf_hpp *hpp,
451 					 struct hists *hists,
452 					 FILE *fp)
453 {
454 	const char *sep = symbol_conf.field_sep;
455 	struct perf_hpp_fmt *fmt;
456 	struct perf_hpp_list_node *fmt_node;
457 	char *buf = hpp->buf;
458 	size_t size = hpp->size;
459 	int ret, printed = 0;
460 	bool first = true;
461 
462 	if (symbol_conf.exclude_other && !he->parent)
463 		return 0;
464 
465 	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
466 	advance_hpp(hpp, ret);
467 
468 	/* the first hpp_list_node is for overhead columns */
469 	fmt_node = list_first_entry(&hists->hpp_formats,
470 				    struct perf_hpp_list_node, list);
471 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
472 		/*
473 		 * If there's no field_sep, we still need
474 		 * to display initial '  '.
475 		 */
476 		if (!sep || !first) {
477 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
478 			advance_hpp(hpp, ret);
479 		} else
480 			first = false;
481 
482 		if (perf_hpp__use_color() && fmt->color)
483 			ret = fmt->color(fmt, hpp, he);
484 		else
485 			ret = fmt->entry(fmt, hpp, he);
486 
487 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
488 		advance_hpp(hpp, ret);
489 	}
490 
491 	if (!sep)
492 		ret = scnprintf(hpp->buf, hpp->size, "%*s",
493 				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
494 	advance_hpp(hpp, ret);
495 
496 	printed += fprintf(fp, "%s", buf);
497 
498 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
499 		hpp->buf  = buf;
500 		hpp->size = size;
501 
502 		/*
503 		 * No need to call hist_entry__snprintf_alignment() since this
504 		 * fmt is always the last column in the hierarchy mode.
505 		 */
506 		if (perf_hpp__use_color() && fmt->color)
507 			fmt->color(fmt, hpp, he);
508 		else
509 			fmt->entry(fmt, hpp, he);
510 
511 		/*
512 		 * dynamic entries are right-aligned but we want left-aligned
513 		 * in the hierarchy mode
514 		 */
515 		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
516 	}
517 	printed += putc('\n', fp);
518 
519 	if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
520 		u64 total = hists__total_period(hists);
521 
522 		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
523 		goto out;
524 	}
525 
526 out:
527 	return printed;
528 }
529 
530 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
531 			       char *bf, size_t bfsz, FILE *fp,
532 			       bool use_callchain)
533 {
534 	int ret;
535 	int callchain_ret = 0;
536 	struct perf_hpp hpp = {
537 		.buf		= bf,
538 		.size		= size,
539 	};
540 	struct hists *hists = he->hists;
541 	u64 total_period = hists->stats.total_period;
542 
543 	if (size == 0 || size > bfsz)
544 		size = hpp.size = bfsz;
545 
546 	if (symbol_conf.report_hierarchy)
547 		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
548 
549 	hist_entry__snprintf(he, &hpp);
550 
551 	ret = fprintf(fp, "%s\n", bf);
552 
553 	if (hist_entry__has_callchains(he) && use_callchain)
554 		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
555 							      0, fp);
556 
557 	ret += callchain_ret;
558 
559 	return ret;
560 }
561 
562 static int print_hierarchy_indent(const char *sep, int indent,
563 				  const char *line, FILE *fp)
564 {
565 	if (sep != NULL || indent < 2)
566 		return 0;
567 
568 	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
569 }
570 
571 static int hists__fprintf_hierarchy_headers(struct hists *hists,
572 					    struct perf_hpp *hpp, FILE *fp)
573 {
574 	bool first_node, first_col;
575 	int indent;
576 	int depth;
577 	unsigned width = 0;
578 	unsigned header_width = 0;
579 	struct perf_hpp_fmt *fmt;
580 	struct perf_hpp_list_node *fmt_node;
581 	const char *sep = symbol_conf.field_sep;
582 
583 	indent = hists->nr_hpp_node;
584 
585 	/* preserve max indent depth for column headers */
586 	print_hierarchy_indent(sep, indent, spaces, fp);
587 
588 	/* the first hpp_list_node is for overhead columns */
589 	fmt_node = list_first_entry(&hists->hpp_formats,
590 				    struct perf_hpp_list_node, list);
591 
592 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
593 		fmt->header(fmt, hpp, hists, 0, NULL);
594 		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
595 	}
596 
597 	/* combine sort headers with ' / ' */
598 	first_node = true;
599 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
600 		if (!first_node)
601 			header_width += fprintf(fp, " / ");
602 		first_node = false;
603 
604 		first_col = true;
605 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
606 			if (perf_hpp__should_skip(fmt, hists))
607 				continue;
608 
609 			if (!first_col)
610 				header_width += fprintf(fp, "+");
611 			first_col = false;
612 
613 			fmt->header(fmt, hpp, hists, 0, NULL);
614 
615 			header_width += fprintf(fp, "%s", trim(hpp->buf));
616 		}
617 	}
618 
619 	fprintf(fp, "\n# ");
620 
621 	/* preserve max indent depth for initial dots */
622 	print_hierarchy_indent(sep, indent, dots, fp);
623 
624 	/* the first hpp_list_node is for overhead columns */
625 	fmt_node = list_first_entry(&hists->hpp_formats,
626 				    struct perf_hpp_list_node, list);
627 
628 	first_col = true;
629 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
630 		if (!first_col)
631 			fprintf(fp, "%s", sep ?: "..");
632 		first_col = false;
633 
634 		width = fmt->width(fmt, hpp, hists);
635 		fprintf(fp, "%.*s", width, dots);
636 	}
637 
638 	depth = 0;
639 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
640 		first_col = true;
641 		width = depth * HIERARCHY_INDENT;
642 
643 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
644 			if (perf_hpp__should_skip(fmt, hists))
645 				continue;
646 
647 			if (!first_col)
648 				width++;  /* for '+' sign between column header */
649 			first_col = false;
650 
651 			width += fmt->width(fmt, hpp, hists);
652 		}
653 
654 		if (width > header_width)
655 			header_width = width;
656 
657 		depth++;
658 	}
659 
660 	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
661 
662 	fprintf(fp, "\n#\n");
663 
664 	return 2;
665 }
666 
667 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
668 			 int line, FILE *fp)
669 {
670 	struct perf_hpp_fmt *fmt;
671 	const char *sep = symbol_conf.field_sep;
672 	bool first = true;
673 	int span = 0;
674 
675 	hists__for_each_format(hists, fmt) {
676 		if (perf_hpp__should_skip(fmt, hists))
677 			continue;
678 
679 		if (!first && !span)
680 			fprintf(fp, "%s", sep ?: "  ");
681 		else
682 			first = false;
683 
684 		fmt->header(fmt, hpp, hists, line, &span);
685 
686 		if (!span)
687 			fprintf(fp, "%s", hpp->buf);
688 	}
689 }
690 
691 static int
692 hists__fprintf_standard_headers(struct hists *hists,
693 				struct perf_hpp *hpp,
694 				FILE *fp)
695 {
696 	struct perf_hpp_list *hpp_list = hists->hpp_list;
697 	struct perf_hpp_fmt *fmt;
698 	unsigned int width;
699 	const char *sep = symbol_conf.field_sep;
700 	bool first = true;
701 	int line;
702 
703 	for (line = 0; line < hpp_list->nr_header_lines; line++) {
704 		/* first # is displayed one level up */
705 		if (line)
706 			fprintf(fp, "# ");
707 		fprintf_line(hists, hpp, line, fp);
708 		fprintf(fp, "\n");
709 	}
710 
711 	if (sep)
712 		return hpp_list->nr_header_lines;
713 
714 	first = true;
715 
716 	fprintf(fp, "# ");
717 
718 	hists__for_each_format(hists, fmt) {
719 		unsigned int i;
720 
721 		if (perf_hpp__should_skip(fmt, hists))
722 			continue;
723 
724 		if (!first)
725 			fprintf(fp, "%s", sep ?: "  ");
726 		else
727 			first = false;
728 
729 		width = fmt->width(fmt, hpp, hists);
730 		for (i = 0; i < width; i++)
731 			fprintf(fp, ".");
732 	}
733 
734 	fprintf(fp, "\n");
735 	fprintf(fp, "#\n");
736 	return hpp_list->nr_header_lines + 2;
737 }
738 
739 int hists__fprintf_headers(struct hists *hists, FILE *fp)
740 {
741 	char bf[1024];
742 	struct perf_hpp dummy_hpp = {
743 		.buf	= bf,
744 		.size	= sizeof(bf),
745 	};
746 
747 	fprintf(fp, "# ");
748 
749 	if (symbol_conf.report_hierarchy)
750 		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
751 	else
752 		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
753 
754 }
755 
756 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
757 		      int max_cols, float min_pcnt, FILE *fp,
758 		      bool use_callchain)
759 {
760 	struct rb_node *nd;
761 	size_t ret = 0;
762 	const char *sep = symbol_conf.field_sep;
763 	int nr_rows = 0;
764 	size_t linesz;
765 	char *line = NULL;
766 	unsigned indent;
767 
768 	init_rem_hits();
769 
770 	hists__reset_column_width(hists);
771 
772 	if (symbol_conf.col_width_list_str)
773 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
774 
775 	if (show_header)
776 		nr_rows += hists__fprintf_headers(hists, fp);
777 
778 	if (max_rows && nr_rows >= max_rows)
779 		goto out;
780 
781 	linesz = hists__sort_list_width(hists) + 3 + 1;
782 	linesz += perf_hpp__color_overhead();
783 	line = malloc(linesz);
784 	if (line == NULL) {
785 		ret = -1;
786 		goto out;
787 	}
788 
789 	indent = hists__overhead_width(hists) + 4;
790 
791 	for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
792 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
793 		float percent;
794 
795 		if (h->filtered)
796 			continue;
797 
798 		percent = hist_entry__get_percent_limit(h);
799 		if (percent < min_pcnt)
800 			continue;
801 
802 		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
803 
804 		if (max_rows && ++nr_rows >= max_rows)
805 			break;
806 
807 		/*
808 		 * If all children are filtered out or percent-limited,
809 		 * display "no entry >= x.xx%" message.
810 		 */
811 		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
812 			int depth = hists->nr_hpp_node + h->depth + 1;
813 
814 			print_hierarchy_indent(sep, depth, spaces, fp);
815 			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
816 
817 			if (max_rows && ++nr_rows >= max_rows)
818 				break;
819 		}
820 
821 		if (h->ms.map == NULL && verbose > 1) {
822 			map_groups__fprintf(h->thread->mg, fp);
823 			fprintf(fp, "%.10s end\n", graph_dotted_line);
824 		}
825 	}
826 
827 	free(line);
828 out:
829 	zfree(&rem_sq_bracket);
830 
831 	return ret;
832 }
833 
834 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
835 {
836 	int i;
837 	size_t ret = 0;
838 
839 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
840 		const char *name;
841 
842 		name = perf_event__name(i);
843 		if (!strcmp(name, "UNKNOWN"))
844 			continue;
845 
846 		ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
847 	}
848 
849 	return ret;
850 }
851