xref: /openbmc/linux/tools/perf/ui/stdio/hist.c (revision 3213486f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include <linux/string.h>
4 
5 #include "../../util/callchain.h"
6 #include "../../util/util.h"
7 #include "../../util/hist.h"
8 #include "../../util/map.h"
9 #include "../../util/map_groups.h"
10 #include "../../util/symbol.h"
11 #include "../../util/sort.h"
12 #include "../../util/evsel.h"
13 #include "../../util/srcline.h"
14 #include "../../util/string2.h"
15 #include "../../util/thread.h"
16 #include "../../util/sane_ctype.h"
17 
18 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
19 {
20 	int i;
21 	int ret = fprintf(fp, "            ");
22 
23 	for (i = 0; i < left_margin; i++)
24 		ret += fprintf(fp, " ");
25 
26 	return ret;
27 }
28 
29 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
30 					  int left_margin)
31 {
32 	int i;
33 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
34 
35 	for (i = 0; i < depth; i++)
36 		if (depth_mask & (1 << i))
37 			ret += fprintf(fp, "|          ");
38 		else
39 			ret += fprintf(fp, "           ");
40 
41 	ret += fprintf(fp, "\n");
42 
43 	return ret;
44 }
45 
46 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
47 				     struct callchain_list *chain,
48 				     int depth, int depth_mask, int period,
49 				     u64 total_samples, int left_margin)
50 {
51 	int i;
52 	size_t ret = 0;
53 	char bf[1024], *alloc_str = NULL;
54 	char buf[64];
55 	const char *str;
56 
57 	ret += callchain__fprintf_left_margin(fp, left_margin);
58 	for (i = 0; i < depth; i++) {
59 		if (depth_mask & (1 << i))
60 			ret += fprintf(fp, "|");
61 		else
62 			ret += fprintf(fp, " ");
63 		if (!period && i == depth - 1) {
64 			ret += fprintf(fp, "--");
65 			ret += callchain_node__fprintf_value(node, fp, total_samples);
66 			ret += fprintf(fp, "--");
67 		} else
68 			ret += fprintf(fp, "%s", "          ");
69 	}
70 
71 	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
72 
73 	if (symbol_conf.show_branchflag_count) {
74 		callchain_list_counts__printf_value(chain, NULL,
75 						    buf, sizeof(buf));
76 
77 		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
78 			str = "Not enough memory!";
79 		else
80 			str = alloc_str;
81 	}
82 
83 	fputs(str, fp);
84 	fputc('\n', fp);
85 	free(alloc_str);
86 
87 	return ret;
88 }
89 
90 static struct symbol *rem_sq_bracket;
91 static struct callchain_list rem_hits;
92 
93 static void init_rem_hits(void)
94 {
95 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
96 	if (!rem_sq_bracket) {
97 		fprintf(stderr, "Not enough memory to display remaining hits\n");
98 		return;
99 	}
100 
101 	strcpy(rem_sq_bracket->name, "[...]");
102 	rem_hits.ms.sym = rem_sq_bracket;
103 }
104 
105 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
106 					 u64 total_samples, int depth,
107 					 int depth_mask, int left_margin)
108 {
109 	struct rb_node *node, *next;
110 	struct callchain_node *child = NULL;
111 	struct callchain_list *chain;
112 	int new_depth_mask = depth_mask;
113 	u64 remaining;
114 	size_t ret = 0;
115 	int i;
116 	uint entries_printed = 0;
117 	int cumul_count = 0;
118 
119 	remaining = total_samples;
120 
121 	node = rb_first(root);
122 	while (node) {
123 		u64 new_total;
124 		u64 cumul;
125 
126 		child = rb_entry(node, struct callchain_node, rb_node);
127 		cumul = callchain_cumul_hits(child);
128 		remaining -= cumul;
129 		cumul_count += callchain_cumul_counts(child);
130 
131 		/*
132 		 * The depth mask manages the output of pipes that show
133 		 * the depth. We don't want to keep the pipes of the current
134 		 * level for the last child of this depth.
135 		 * Except if we have remaining filtered hits. They will
136 		 * supersede the last child
137 		 */
138 		next = rb_next(node);
139 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
140 			new_depth_mask &= ~(1 << (depth - 1));
141 
142 		/*
143 		 * But we keep the older depth mask for the line separator
144 		 * to keep the level link until we reach the last child
145 		 */
146 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
147 						   left_margin);
148 		i = 0;
149 		list_for_each_entry(chain, &child->val, list) {
150 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
151 						      new_depth_mask, i++,
152 						      total_samples,
153 						      left_margin);
154 		}
155 
156 		if (callchain_param.mode == CHAIN_GRAPH_REL)
157 			new_total = child->children_hit;
158 		else
159 			new_total = total_samples;
160 
161 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
162 						  depth + 1,
163 						  new_depth_mask | (1 << depth),
164 						  left_margin);
165 		node = next;
166 		if (++entries_printed == callchain_param.print_limit)
167 			break;
168 	}
169 
170 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
171 		remaining && remaining != total_samples) {
172 		struct callchain_node rem_node = {
173 			.hit = remaining,
174 		};
175 
176 		if (!rem_sq_bracket)
177 			return ret;
178 
179 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
180 			rem_node.count = child->parent->children_count - cumul_count;
181 			if (rem_node.count <= 0)
182 				return ret;
183 		}
184 
185 		new_depth_mask &= ~(1 << (depth - 1));
186 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
187 					      new_depth_mask, 0, total_samples,
188 					      left_margin);
189 	}
190 
191 	return ret;
192 }
193 
194 /*
195  * If have one single callchain root, don't bother printing
196  * its percentage (100 % in fractal mode and the same percentage
197  * than the hist in graph mode). This also avoid one level of column.
198  *
199  * However when percent-limit applied, it's possible that single callchain
200  * node have different (non-100% in fractal mode) percentage.
201  */
202 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
203 {
204 	struct callchain_node *cnode;
205 
206 	if (rb_next(node))
207 		return true;
208 
209 	cnode = rb_entry(node, struct callchain_node, rb_node);
210 	return callchain_cumul_hits(cnode) != parent_samples;
211 }
212 
213 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
214 				       u64 total_samples, u64 parent_samples,
215 				       int left_margin)
216 {
217 	struct callchain_node *cnode;
218 	struct callchain_list *chain;
219 	u32 entries_printed = 0;
220 	bool printed = false;
221 	struct rb_node *node;
222 	int i = 0;
223 	int ret = 0;
224 	char bf[1024];
225 
226 	node = rb_first(root);
227 	if (node && !need_percent_display(node, parent_samples)) {
228 		cnode = rb_entry(node, struct callchain_node, rb_node);
229 		list_for_each_entry(chain, &cnode->val, list) {
230 			/*
231 			 * If we sort by symbol, the first entry is the same than
232 			 * the symbol. No need to print it otherwise it appears as
233 			 * displayed twice.
234 			 */
235 			if (!i++ && field_order == NULL &&
236 			    sort_order && strstarts(sort_order, "sym"))
237 				continue;
238 
239 			if (!printed) {
240 				ret += callchain__fprintf_left_margin(fp, left_margin);
241 				ret += fprintf(fp, "|\n");
242 				ret += callchain__fprintf_left_margin(fp, left_margin);
243 				ret += fprintf(fp, "---");
244 				left_margin += 3;
245 				printed = true;
246 			} else
247 				ret += callchain__fprintf_left_margin(fp, left_margin);
248 
249 			ret += fprintf(fp, "%s",
250 				       callchain_list__sym_name(chain, bf,
251 								sizeof(bf),
252 								false));
253 
254 			if (symbol_conf.show_branchflag_count)
255 				ret += callchain_list_counts__printf_value(
256 						chain, fp, NULL, 0);
257 			ret += fprintf(fp, "\n");
258 
259 			if (++entries_printed == callchain_param.print_limit)
260 				break;
261 		}
262 		root = &cnode->rb_root;
263 	}
264 
265 	if (callchain_param.mode == CHAIN_GRAPH_REL)
266 		total_samples = parent_samples;
267 
268 	ret += __callchain__fprintf_graph(fp, root, total_samples,
269 					  1, 1, left_margin);
270 	if (ret) {
271 		/* do not add a blank line if it printed nothing */
272 		ret += fprintf(fp, "\n");
273 	}
274 
275 	return ret;
276 }
277 
278 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
279 					u64 total_samples)
280 {
281 	struct callchain_list *chain;
282 	size_t ret = 0;
283 	char bf[1024];
284 
285 	if (!node)
286 		return 0;
287 
288 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
289 
290 
291 	list_for_each_entry(chain, &node->val, list) {
292 		if (chain->ip >= PERF_CONTEXT_MAX)
293 			continue;
294 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
295 					bf, sizeof(bf), false));
296 	}
297 
298 	return ret;
299 }
300 
301 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
302 				      u64 total_samples)
303 {
304 	size_t ret = 0;
305 	u32 entries_printed = 0;
306 	struct callchain_node *chain;
307 	struct rb_node *rb_node = rb_first(tree);
308 
309 	while (rb_node) {
310 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
311 
312 		ret += fprintf(fp, "           ");
313 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
314 		ret += fprintf(fp, "\n");
315 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
316 		ret += fprintf(fp, "\n");
317 		if (++entries_printed == callchain_param.print_limit)
318 			break;
319 
320 		rb_node = rb_next(rb_node);
321 	}
322 
323 	return ret;
324 }
325 
326 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
327 {
328 	const char *sep = symbol_conf.field_sep ?: ";";
329 	struct callchain_list *chain;
330 	size_t ret = 0;
331 	char bf[1024];
332 	bool first;
333 
334 	if (!node)
335 		return 0;
336 
337 	ret += __callchain__fprintf_folded(fp, node->parent);
338 
339 	first = (ret == 0);
340 	list_for_each_entry(chain, &node->val, list) {
341 		if (chain->ip >= PERF_CONTEXT_MAX)
342 			continue;
343 		ret += fprintf(fp, "%s%s", first ? "" : sep,
344 			       callchain_list__sym_name(chain,
345 						bf, sizeof(bf), false));
346 		first = false;
347 	}
348 
349 	return ret;
350 }
351 
352 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
353 					u64 total_samples)
354 {
355 	size_t ret = 0;
356 	u32 entries_printed = 0;
357 	struct callchain_node *chain;
358 	struct rb_node *rb_node = rb_first(tree);
359 
360 	while (rb_node) {
361 
362 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
363 
364 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
365 		ret += fprintf(fp, " ");
366 		ret += __callchain__fprintf_folded(fp, chain);
367 		ret += fprintf(fp, "\n");
368 		if (++entries_printed == callchain_param.print_limit)
369 			break;
370 
371 		rb_node = rb_next(rb_node);
372 	}
373 
374 	return ret;
375 }
376 
377 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
378 					    u64 total_samples, int left_margin,
379 					    FILE *fp)
380 {
381 	u64 parent_samples = he->stat.period;
382 
383 	if (symbol_conf.cumulate_callchain)
384 		parent_samples = he->stat_acc->period;
385 
386 	switch (callchain_param.mode) {
387 	case CHAIN_GRAPH_REL:
388 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
389 						parent_samples, left_margin);
390 		break;
391 	case CHAIN_GRAPH_ABS:
392 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
393 						parent_samples, left_margin);
394 		break;
395 	case CHAIN_FLAT:
396 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
397 		break;
398 	case CHAIN_FOLDED:
399 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
400 		break;
401 	case CHAIN_NONE:
402 		break;
403 	default:
404 		pr_err("Bad callchain mode\n");
405 	}
406 
407 	return 0;
408 }
409 
410 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
411 			   struct perf_hpp_list *hpp_list)
412 {
413 	const char *sep = symbol_conf.field_sep;
414 	struct perf_hpp_fmt *fmt;
415 	char *start = hpp->buf;
416 	int ret;
417 	bool first = true;
418 
419 	if (symbol_conf.exclude_other && !he->parent)
420 		return 0;
421 
422 	perf_hpp_list__for_each_format(hpp_list, fmt) {
423 		if (perf_hpp__should_skip(fmt, he->hists))
424 			continue;
425 
426 		/*
427 		 * If there's no field_sep, we still need
428 		 * to display initial '  '.
429 		 */
430 		if (!sep || !first) {
431 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
432 			advance_hpp(hpp, ret);
433 		} else
434 			first = false;
435 
436 		if (perf_hpp__use_color() && fmt->color)
437 			ret = fmt->color(fmt, hpp, he);
438 		else
439 			ret = fmt->entry(fmt, hpp, he);
440 
441 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
442 		advance_hpp(hpp, ret);
443 	}
444 
445 	return hpp->buf - start;
446 }
447 
448 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
449 {
450 	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
451 }
452 
453 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
454 					 struct perf_hpp *hpp,
455 					 struct hists *hists,
456 					 FILE *fp)
457 {
458 	const char *sep = symbol_conf.field_sep;
459 	struct perf_hpp_fmt *fmt;
460 	struct perf_hpp_list_node *fmt_node;
461 	char *buf = hpp->buf;
462 	size_t size = hpp->size;
463 	int ret, printed = 0;
464 	bool first = true;
465 
466 	if (symbol_conf.exclude_other && !he->parent)
467 		return 0;
468 
469 	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
470 	advance_hpp(hpp, ret);
471 
472 	/* the first hpp_list_node is for overhead columns */
473 	fmt_node = list_first_entry(&hists->hpp_formats,
474 				    struct perf_hpp_list_node, list);
475 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
476 		/*
477 		 * If there's no field_sep, we still need
478 		 * to display initial '  '.
479 		 */
480 		if (!sep || !first) {
481 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
482 			advance_hpp(hpp, ret);
483 		} else
484 			first = false;
485 
486 		if (perf_hpp__use_color() && fmt->color)
487 			ret = fmt->color(fmt, hpp, he);
488 		else
489 			ret = fmt->entry(fmt, hpp, he);
490 
491 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
492 		advance_hpp(hpp, ret);
493 	}
494 
495 	if (!sep)
496 		ret = scnprintf(hpp->buf, hpp->size, "%*s",
497 				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
498 	advance_hpp(hpp, ret);
499 
500 	printed += fprintf(fp, "%s", buf);
501 
502 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
503 		hpp->buf  = buf;
504 		hpp->size = size;
505 
506 		/*
507 		 * No need to call hist_entry__snprintf_alignment() since this
508 		 * fmt is always the last column in the hierarchy mode.
509 		 */
510 		if (perf_hpp__use_color() && fmt->color)
511 			fmt->color(fmt, hpp, he);
512 		else
513 			fmt->entry(fmt, hpp, he);
514 
515 		/*
516 		 * dynamic entries are right-aligned but we want left-aligned
517 		 * in the hierarchy mode
518 		 */
519 		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
520 	}
521 	printed += putc('\n', fp);
522 
523 	if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
524 		u64 total = hists__total_period(hists);
525 
526 		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
527 		goto out;
528 	}
529 
530 out:
531 	return printed;
532 }
533 
534 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
535 			       char *bf, size_t bfsz, FILE *fp,
536 			       bool ignore_callchains)
537 {
538 	int ret;
539 	int callchain_ret = 0;
540 	struct perf_hpp hpp = {
541 		.buf		= bf,
542 		.size		= size,
543 	};
544 	struct hists *hists = he->hists;
545 	u64 total_period = hists->stats.total_period;
546 
547 	if (size == 0 || size > bfsz)
548 		size = hpp.size = bfsz;
549 
550 	if (symbol_conf.report_hierarchy)
551 		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
552 
553 	hist_entry__snprintf(he, &hpp);
554 
555 	ret = fprintf(fp, "%s\n", bf);
556 
557 	if (hist_entry__has_callchains(he) && !ignore_callchains)
558 		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
559 							      0, fp);
560 
561 	ret += callchain_ret;
562 
563 	return ret;
564 }
565 
566 static int print_hierarchy_indent(const char *sep, int indent,
567 				  const char *line, FILE *fp)
568 {
569 	if (sep != NULL || indent < 2)
570 		return 0;
571 
572 	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
573 }
574 
575 static int hists__fprintf_hierarchy_headers(struct hists *hists,
576 					    struct perf_hpp *hpp, FILE *fp)
577 {
578 	bool first_node, first_col;
579 	int indent;
580 	int depth;
581 	unsigned width = 0;
582 	unsigned header_width = 0;
583 	struct perf_hpp_fmt *fmt;
584 	struct perf_hpp_list_node *fmt_node;
585 	const char *sep = symbol_conf.field_sep;
586 
587 	indent = hists->nr_hpp_node;
588 
589 	/* preserve max indent depth for column headers */
590 	print_hierarchy_indent(sep, indent, spaces, fp);
591 
592 	/* the first hpp_list_node is for overhead columns */
593 	fmt_node = list_first_entry(&hists->hpp_formats,
594 				    struct perf_hpp_list_node, list);
595 
596 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
597 		fmt->header(fmt, hpp, hists, 0, NULL);
598 		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
599 	}
600 
601 	/* combine sort headers with ' / ' */
602 	first_node = true;
603 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
604 		if (!first_node)
605 			header_width += fprintf(fp, " / ");
606 		first_node = false;
607 
608 		first_col = true;
609 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
610 			if (perf_hpp__should_skip(fmt, hists))
611 				continue;
612 
613 			if (!first_col)
614 				header_width += fprintf(fp, "+");
615 			first_col = false;
616 
617 			fmt->header(fmt, hpp, hists, 0, NULL);
618 
619 			header_width += fprintf(fp, "%s", trim(hpp->buf));
620 		}
621 	}
622 
623 	fprintf(fp, "\n# ");
624 
625 	/* preserve max indent depth for initial dots */
626 	print_hierarchy_indent(sep, indent, dots, fp);
627 
628 	/* the first hpp_list_node is for overhead columns */
629 	fmt_node = list_first_entry(&hists->hpp_formats,
630 				    struct perf_hpp_list_node, list);
631 
632 	first_col = true;
633 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
634 		if (!first_col)
635 			fprintf(fp, "%s", sep ?: "..");
636 		first_col = false;
637 
638 		width = fmt->width(fmt, hpp, hists);
639 		fprintf(fp, "%.*s", width, dots);
640 	}
641 
642 	depth = 0;
643 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
644 		first_col = true;
645 		width = depth * HIERARCHY_INDENT;
646 
647 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
648 			if (perf_hpp__should_skip(fmt, hists))
649 				continue;
650 
651 			if (!first_col)
652 				width++;  /* for '+' sign between column header */
653 			first_col = false;
654 
655 			width += fmt->width(fmt, hpp, hists);
656 		}
657 
658 		if (width > header_width)
659 			header_width = width;
660 
661 		depth++;
662 	}
663 
664 	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
665 
666 	fprintf(fp, "\n#\n");
667 
668 	return 2;
669 }
670 
671 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
672 			 int line, FILE *fp)
673 {
674 	struct perf_hpp_fmt *fmt;
675 	const char *sep = symbol_conf.field_sep;
676 	bool first = true;
677 	int span = 0;
678 
679 	hists__for_each_format(hists, fmt) {
680 		if (perf_hpp__should_skip(fmt, hists))
681 			continue;
682 
683 		if (!first && !span)
684 			fprintf(fp, "%s", sep ?: "  ");
685 		else
686 			first = false;
687 
688 		fmt->header(fmt, hpp, hists, line, &span);
689 
690 		if (!span)
691 			fprintf(fp, "%s", hpp->buf);
692 	}
693 }
694 
695 static int
696 hists__fprintf_standard_headers(struct hists *hists,
697 				struct perf_hpp *hpp,
698 				FILE *fp)
699 {
700 	struct perf_hpp_list *hpp_list = hists->hpp_list;
701 	struct perf_hpp_fmt *fmt;
702 	unsigned int width;
703 	const char *sep = symbol_conf.field_sep;
704 	bool first = true;
705 	int line;
706 
707 	for (line = 0; line < hpp_list->nr_header_lines; line++) {
708 		/* first # is displayed one level up */
709 		if (line)
710 			fprintf(fp, "# ");
711 		fprintf_line(hists, hpp, line, fp);
712 		fprintf(fp, "\n");
713 	}
714 
715 	if (sep)
716 		return hpp_list->nr_header_lines;
717 
718 	first = true;
719 
720 	fprintf(fp, "# ");
721 
722 	hists__for_each_format(hists, fmt) {
723 		unsigned int i;
724 
725 		if (perf_hpp__should_skip(fmt, hists))
726 			continue;
727 
728 		if (!first)
729 			fprintf(fp, "%s", sep ?: "  ");
730 		else
731 			first = false;
732 
733 		width = fmt->width(fmt, hpp, hists);
734 		for (i = 0; i < width; i++)
735 			fprintf(fp, ".");
736 	}
737 
738 	fprintf(fp, "\n");
739 	fprintf(fp, "#\n");
740 	return hpp_list->nr_header_lines + 2;
741 }
742 
743 int hists__fprintf_headers(struct hists *hists, FILE *fp)
744 {
745 	char bf[1024];
746 	struct perf_hpp dummy_hpp = {
747 		.buf	= bf,
748 		.size	= sizeof(bf),
749 	};
750 
751 	fprintf(fp, "# ");
752 
753 	if (symbol_conf.report_hierarchy)
754 		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
755 	else
756 		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
757 
758 }
759 
760 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
761 		      int max_cols, float min_pcnt, FILE *fp,
762 		      bool ignore_callchains)
763 {
764 	struct rb_node *nd;
765 	size_t ret = 0;
766 	const char *sep = symbol_conf.field_sep;
767 	int nr_rows = 0;
768 	size_t linesz;
769 	char *line = NULL;
770 	unsigned indent;
771 
772 	init_rem_hits();
773 
774 	hists__reset_column_width(hists);
775 
776 	if (symbol_conf.col_width_list_str)
777 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
778 
779 	if (show_header)
780 		nr_rows += hists__fprintf_headers(hists, fp);
781 
782 	if (max_rows && nr_rows >= max_rows)
783 		goto out;
784 
785 	linesz = hists__sort_list_width(hists) + 3 + 1;
786 	linesz += perf_hpp__color_overhead();
787 	line = malloc(linesz);
788 	if (line == NULL) {
789 		ret = -1;
790 		goto out;
791 	}
792 
793 	indent = hists__overhead_width(hists) + 4;
794 
795 	for (nd = rb_first_cached(&hists->entries); nd;
796 	     nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
797 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
798 		float percent;
799 
800 		if (h->filtered)
801 			continue;
802 
803 		percent = hist_entry__get_percent_limit(h);
804 		if (percent < min_pcnt)
805 			continue;
806 
807 		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
808 
809 		if (max_rows && ++nr_rows >= max_rows)
810 			break;
811 
812 		/*
813 		 * If all children are filtered out or percent-limited,
814 		 * display "no entry >= x.xx%" message.
815 		 */
816 		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
817 			int depth = hists->nr_hpp_node + h->depth + 1;
818 
819 			print_hierarchy_indent(sep, depth, spaces, fp);
820 			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
821 
822 			if (max_rows && ++nr_rows >= max_rows)
823 				break;
824 		}
825 
826 		if (h->ms.map == NULL && verbose > 1) {
827 			map_groups__fprintf(h->thread->mg, fp);
828 			fprintf(fp, "%.10s end\n", graph_dotted_line);
829 		}
830 	}
831 
832 	free(line);
833 out:
834 	zfree(&rem_sq_bracket);
835 
836 	return ret;
837 }
838 
839 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
840 {
841 	int i;
842 	size_t ret = 0;
843 
844 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
845 		const char *name;
846 
847 		name = perf_event__name(i);
848 		if (!strcmp(name, "UNKNOWN"))
849 			continue;
850 
851 		ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
852 	}
853 
854 	return ret;
855 }
856