xref: /openbmc/linux/tools/perf/ui/stdio/hist.c (revision 0db64dd060f7fd77921be8f10fa9f7a5f49a3a43)
1 #include <stdio.h>
2 
3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
7 
8 
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10 {
11 	int i;
12 	int ret = fprintf(fp, "            ");
13 
14 	for (i = 0; i < left_margin; i++)
15 		ret += fprintf(fp, " ");
16 
17 	return ret;
18 }
19 
20 static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
21 			      int depth, int depth_mask, FILE *fp)
22 {
23 	struct dso *dso;
24 	struct inline_node *node;
25 	struct inline_list *ilist;
26 	int ret = 0, i;
27 
28 	if (map == NULL)
29 		return 0;
30 
31 	dso = map->dso;
32 	if (dso == NULL)
33 		return 0;
34 
35 	if (dso->kernel != DSO_TYPE_USER)
36 		return 0;
37 
38 	node = dso__parse_addr_inlines(dso,
39 				       map__rip_2objdump(map, ip));
40 	if (node == NULL)
41 		return 0;
42 
43 	list_for_each_entry(ilist, &node->val, list) {
44 		if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
45 			ret += callchain__fprintf_left_margin(fp, left_margin);
46 
47 			for (i = 0; i < depth; i++) {
48 				if (depth_mask & (1 << i))
49 					ret += fprintf(fp, "|");
50 				else
51 					ret += fprintf(fp, " ");
52 				ret += fprintf(fp, "          ");
53 			}
54 
55 			if (callchain_param.key == CCKEY_ADDRESS) {
56 				if (ilist->filename != NULL)
57 					ret += fprintf(fp, "%s:%d (inline)",
58 						       ilist->filename,
59 						       ilist->line_nr);
60 				else
61 					ret += fprintf(fp, "??");
62 			} else if (ilist->funcname != NULL)
63 				ret += fprintf(fp, "%s (inline)",
64 					       ilist->funcname);
65 			else if (ilist->filename != NULL)
66 				ret += fprintf(fp, "%s:%d (inline)",
67 					       ilist->filename,
68 					       ilist->line_nr);
69 			else
70 				ret += fprintf(fp, "??");
71 
72 			ret += fprintf(fp, "\n");
73 		}
74 	}
75 
76 	inline_node__delete(node);
77 	return ret;
78 }
79 
80 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
81 					  int left_margin)
82 {
83 	int i;
84 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
85 
86 	for (i = 0; i < depth; i++)
87 		if (depth_mask & (1 << i))
88 			ret += fprintf(fp, "|          ");
89 		else
90 			ret += fprintf(fp, "           ");
91 
92 	ret += fprintf(fp, "\n");
93 
94 	return ret;
95 }
96 
97 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
98 				     struct callchain_list *chain,
99 				     int depth, int depth_mask, int period,
100 				     u64 total_samples, int left_margin)
101 {
102 	int i;
103 	size_t ret = 0;
104 	char bf[1024], *alloc_str = NULL;
105 	char buf[64];
106 	const char *str;
107 
108 	ret += callchain__fprintf_left_margin(fp, left_margin);
109 	for (i = 0; i < depth; i++) {
110 		if (depth_mask & (1 << i))
111 			ret += fprintf(fp, "|");
112 		else
113 			ret += fprintf(fp, " ");
114 		if (!period && i == depth - 1) {
115 			ret += fprintf(fp, "--");
116 			ret += callchain_node__fprintf_value(node, fp, total_samples);
117 			ret += fprintf(fp, "--");
118 		} else
119 			ret += fprintf(fp, "%s", "          ");
120 	}
121 
122 	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
123 
124 	if (symbol_conf.show_branchflag_count) {
125 		if (!period)
126 			callchain_list_counts__printf_value(node, chain, NULL,
127 							    buf, sizeof(buf));
128 		else
129 			callchain_list_counts__printf_value(NULL, chain, NULL,
130 							    buf, sizeof(buf));
131 
132 		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
133 			str = "Not enough memory!";
134 		else
135 			str = alloc_str;
136 	}
137 
138 	fputs(str, fp);
139 	fputc('\n', fp);
140 	free(alloc_str);
141 
142 	if (symbol_conf.inline_name)
143 		ret += inline__fprintf(chain->ms.map, chain->ip,
144 				       left_margin, depth, depth_mask, fp);
145 	return ret;
146 }
147 
148 static struct symbol *rem_sq_bracket;
149 static struct callchain_list rem_hits;
150 
151 static void init_rem_hits(void)
152 {
153 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
154 	if (!rem_sq_bracket) {
155 		fprintf(stderr, "Not enough memory to display remaining hits\n");
156 		return;
157 	}
158 
159 	strcpy(rem_sq_bracket->name, "[...]");
160 	rem_hits.ms.sym = rem_sq_bracket;
161 }
162 
163 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
164 					 u64 total_samples, int depth,
165 					 int depth_mask, int left_margin)
166 {
167 	struct rb_node *node, *next;
168 	struct callchain_node *child = NULL;
169 	struct callchain_list *chain;
170 	int new_depth_mask = depth_mask;
171 	u64 remaining;
172 	size_t ret = 0;
173 	int i;
174 	uint entries_printed = 0;
175 	int cumul_count = 0;
176 
177 	remaining = total_samples;
178 
179 	node = rb_first(root);
180 	while (node) {
181 		u64 new_total;
182 		u64 cumul;
183 
184 		child = rb_entry(node, struct callchain_node, rb_node);
185 		cumul = callchain_cumul_hits(child);
186 		remaining -= cumul;
187 		cumul_count += callchain_cumul_counts(child);
188 
189 		/*
190 		 * The depth mask manages the output of pipes that show
191 		 * the depth. We don't want to keep the pipes of the current
192 		 * level for the last child of this depth.
193 		 * Except if we have remaining filtered hits. They will
194 		 * supersede the last child
195 		 */
196 		next = rb_next(node);
197 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
198 			new_depth_mask &= ~(1 << (depth - 1));
199 
200 		/*
201 		 * But we keep the older depth mask for the line separator
202 		 * to keep the level link until we reach the last child
203 		 */
204 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
205 						   left_margin);
206 		i = 0;
207 		list_for_each_entry(chain, &child->val, list) {
208 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
209 						      new_depth_mask, i++,
210 						      total_samples,
211 						      left_margin);
212 		}
213 
214 		if (callchain_param.mode == CHAIN_GRAPH_REL)
215 			new_total = child->children_hit;
216 		else
217 			new_total = total_samples;
218 
219 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
220 						  depth + 1,
221 						  new_depth_mask | (1 << depth),
222 						  left_margin);
223 		node = next;
224 		if (++entries_printed == callchain_param.print_limit)
225 			break;
226 	}
227 
228 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
229 		remaining && remaining != total_samples) {
230 		struct callchain_node rem_node = {
231 			.hit = remaining,
232 		};
233 
234 		if (!rem_sq_bracket)
235 			return ret;
236 
237 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
238 			rem_node.count = child->parent->children_count - cumul_count;
239 			if (rem_node.count <= 0)
240 				return ret;
241 		}
242 
243 		new_depth_mask &= ~(1 << (depth - 1));
244 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
245 					      new_depth_mask, 0, total_samples,
246 					      left_margin);
247 	}
248 
249 	return ret;
250 }
251 
252 /*
253  * If have one single callchain root, don't bother printing
254  * its percentage (100 % in fractal mode and the same percentage
255  * than the hist in graph mode). This also avoid one level of column.
256  *
257  * However when percent-limit applied, it's possible that single callchain
258  * node have different (non-100% in fractal mode) percentage.
259  */
260 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
261 {
262 	struct callchain_node *cnode;
263 
264 	if (rb_next(node))
265 		return true;
266 
267 	cnode = rb_entry(node, struct callchain_node, rb_node);
268 	return callchain_cumul_hits(cnode) != parent_samples;
269 }
270 
271 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
272 				       u64 total_samples, u64 parent_samples,
273 				       int left_margin)
274 {
275 	struct callchain_node *cnode;
276 	struct callchain_list *chain;
277 	u32 entries_printed = 0;
278 	bool printed = false;
279 	struct rb_node *node;
280 	int i = 0;
281 	int ret = 0;
282 	char bf[1024];
283 
284 	node = rb_first(root);
285 	if (node && !need_percent_display(node, parent_samples)) {
286 		cnode = rb_entry(node, struct callchain_node, rb_node);
287 		list_for_each_entry(chain, &cnode->val, list) {
288 			/*
289 			 * If we sort by symbol, the first entry is the same than
290 			 * the symbol. No need to print it otherwise it appears as
291 			 * displayed twice.
292 			 */
293 			if (!i++ && field_order == NULL &&
294 			    sort_order && !prefixcmp(sort_order, "sym"))
295 				continue;
296 
297 			if (!printed) {
298 				ret += callchain__fprintf_left_margin(fp, left_margin);
299 				ret += fprintf(fp, "|\n");
300 				ret += callchain__fprintf_left_margin(fp, left_margin);
301 				ret += fprintf(fp, "---");
302 				left_margin += 3;
303 				printed = true;
304 			} else
305 				ret += callchain__fprintf_left_margin(fp, left_margin);
306 
307 			ret += fprintf(fp, "%s",
308 				       callchain_list__sym_name(chain, bf,
309 								sizeof(bf),
310 								false));
311 
312 			if (symbol_conf.show_branchflag_count)
313 				ret += callchain_list_counts__printf_value(
314 						NULL, chain, fp, NULL, 0);
315 			ret += fprintf(fp, "\n");
316 
317 			if (++entries_printed == callchain_param.print_limit)
318 				break;
319 
320 			if (symbol_conf.inline_name)
321 				ret += inline__fprintf(chain->ms.map,
322 						       chain->ip,
323 						       left_margin,
324 						       0, 0,
325 						       fp);
326 		}
327 		root = &cnode->rb_root;
328 	}
329 
330 	if (callchain_param.mode == CHAIN_GRAPH_REL)
331 		total_samples = parent_samples;
332 
333 	ret += __callchain__fprintf_graph(fp, root, total_samples,
334 					  1, 1, left_margin);
335 	if (ret) {
336 		/* do not add a blank line if it printed nothing */
337 		ret += fprintf(fp, "\n");
338 	}
339 
340 	return ret;
341 }
342 
343 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
344 					u64 total_samples)
345 {
346 	struct callchain_list *chain;
347 	size_t ret = 0;
348 	char bf[1024];
349 
350 	if (!node)
351 		return 0;
352 
353 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
354 
355 
356 	list_for_each_entry(chain, &node->val, list) {
357 		if (chain->ip >= PERF_CONTEXT_MAX)
358 			continue;
359 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
360 					bf, sizeof(bf), false));
361 	}
362 
363 	return ret;
364 }
365 
366 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
367 				      u64 total_samples)
368 {
369 	size_t ret = 0;
370 	u32 entries_printed = 0;
371 	struct callchain_node *chain;
372 	struct rb_node *rb_node = rb_first(tree);
373 
374 	while (rb_node) {
375 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
376 
377 		ret += fprintf(fp, "           ");
378 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
379 		ret += fprintf(fp, "\n");
380 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
381 		ret += fprintf(fp, "\n");
382 		if (++entries_printed == callchain_param.print_limit)
383 			break;
384 
385 		rb_node = rb_next(rb_node);
386 	}
387 
388 	return ret;
389 }
390 
391 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
392 {
393 	const char *sep = symbol_conf.field_sep ?: ";";
394 	struct callchain_list *chain;
395 	size_t ret = 0;
396 	char bf[1024];
397 	bool first;
398 
399 	if (!node)
400 		return 0;
401 
402 	ret += __callchain__fprintf_folded(fp, node->parent);
403 
404 	first = (ret == 0);
405 	list_for_each_entry(chain, &node->val, list) {
406 		if (chain->ip >= PERF_CONTEXT_MAX)
407 			continue;
408 		ret += fprintf(fp, "%s%s", first ? "" : sep,
409 			       callchain_list__sym_name(chain,
410 						bf, sizeof(bf), false));
411 		first = false;
412 	}
413 
414 	return ret;
415 }
416 
417 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
418 					u64 total_samples)
419 {
420 	size_t ret = 0;
421 	u32 entries_printed = 0;
422 	struct callchain_node *chain;
423 	struct rb_node *rb_node = rb_first(tree);
424 
425 	while (rb_node) {
426 
427 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
428 
429 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
430 		ret += fprintf(fp, " ");
431 		ret += __callchain__fprintf_folded(fp, chain);
432 		ret += fprintf(fp, "\n");
433 		if (++entries_printed == callchain_param.print_limit)
434 			break;
435 
436 		rb_node = rb_next(rb_node);
437 	}
438 
439 	return ret;
440 }
441 
442 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
443 					    u64 total_samples, int left_margin,
444 					    FILE *fp)
445 {
446 	u64 parent_samples = he->stat.period;
447 
448 	if (symbol_conf.cumulate_callchain)
449 		parent_samples = he->stat_acc->period;
450 
451 	switch (callchain_param.mode) {
452 	case CHAIN_GRAPH_REL:
453 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
454 						parent_samples, left_margin);
455 		break;
456 	case CHAIN_GRAPH_ABS:
457 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
458 						parent_samples, left_margin);
459 		break;
460 	case CHAIN_FLAT:
461 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
462 		break;
463 	case CHAIN_FOLDED:
464 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
465 		break;
466 	case CHAIN_NONE:
467 		break;
468 	default:
469 		pr_err("Bad callchain mode\n");
470 	}
471 
472 	return 0;
473 }
474 
475 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
476 			   struct perf_hpp_list *hpp_list)
477 {
478 	const char *sep = symbol_conf.field_sep;
479 	struct perf_hpp_fmt *fmt;
480 	char *start = hpp->buf;
481 	int ret;
482 	bool first = true;
483 
484 	if (symbol_conf.exclude_other && !he->parent)
485 		return 0;
486 
487 	perf_hpp_list__for_each_format(hpp_list, fmt) {
488 		if (perf_hpp__should_skip(fmt, he->hists))
489 			continue;
490 
491 		/*
492 		 * If there's no field_sep, we still need
493 		 * to display initial '  '.
494 		 */
495 		if (!sep || !first) {
496 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
497 			advance_hpp(hpp, ret);
498 		} else
499 			first = false;
500 
501 		if (perf_hpp__use_color() && fmt->color)
502 			ret = fmt->color(fmt, hpp, he);
503 		else
504 			ret = fmt->entry(fmt, hpp, he);
505 
506 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
507 		advance_hpp(hpp, ret);
508 	}
509 
510 	return hpp->buf - start;
511 }
512 
513 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
514 {
515 	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
516 }
517 
518 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
519 					 struct perf_hpp *hpp,
520 					 struct hists *hists,
521 					 FILE *fp)
522 {
523 	const char *sep = symbol_conf.field_sep;
524 	struct perf_hpp_fmt *fmt;
525 	struct perf_hpp_list_node *fmt_node;
526 	char *buf = hpp->buf;
527 	size_t size = hpp->size;
528 	int ret, printed = 0;
529 	bool first = true;
530 
531 	if (symbol_conf.exclude_other && !he->parent)
532 		return 0;
533 
534 	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
535 	advance_hpp(hpp, ret);
536 
537 	/* the first hpp_list_node is for overhead columns */
538 	fmt_node = list_first_entry(&hists->hpp_formats,
539 				    struct perf_hpp_list_node, list);
540 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
541 		/*
542 		 * If there's no field_sep, we still need
543 		 * to display initial '  '.
544 		 */
545 		if (!sep || !first) {
546 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
547 			advance_hpp(hpp, ret);
548 		} else
549 			first = false;
550 
551 		if (perf_hpp__use_color() && fmt->color)
552 			ret = fmt->color(fmt, hpp, he);
553 		else
554 			ret = fmt->entry(fmt, hpp, he);
555 
556 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
557 		advance_hpp(hpp, ret);
558 	}
559 
560 	if (!sep)
561 		ret = scnprintf(hpp->buf, hpp->size, "%*s",
562 				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
563 	advance_hpp(hpp, ret);
564 
565 	printed += fprintf(fp, "%s", buf);
566 
567 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
568 		hpp->buf  = buf;
569 		hpp->size = size;
570 
571 		/*
572 		 * No need to call hist_entry__snprintf_alignment() since this
573 		 * fmt is always the last column in the hierarchy mode.
574 		 */
575 		if (perf_hpp__use_color() && fmt->color)
576 			fmt->color(fmt, hpp, he);
577 		else
578 			fmt->entry(fmt, hpp, he);
579 
580 		/*
581 		 * dynamic entries are right-aligned but we want left-aligned
582 		 * in the hierarchy mode
583 		 */
584 		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
585 	}
586 	printed += putc('\n', fp);
587 
588 	if (symbol_conf.use_callchain && he->leaf) {
589 		u64 total = hists__total_period(hists);
590 
591 		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
592 		goto out;
593 	}
594 
595 out:
596 	return printed;
597 }
598 
599 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
600 			       char *bf, size_t bfsz, FILE *fp,
601 			       bool use_callchain)
602 {
603 	int ret;
604 	int callchain_ret = 0;
605 	int inline_ret = 0;
606 	struct perf_hpp hpp = {
607 		.buf		= bf,
608 		.size		= size,
609 	};
610 	struct hists *hists = he->hists;
611 	u64 total_period = hists->stats.total_period;
612 
613 	if (size == 0 || size > bfsz)
614 		size = hpp.size = bfsz;
615 
616 	if (symbol_conf.report_hierarchy)
617 		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
618 
619 	hist_entry__snprintf(he, &hpp);
620 
621 	ret = fprintf(fp, "%s\n", bf);
622 
623 	if (use_callchain)
624 		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
625 							      0, fp);
626 
627 	if (callchain_ret == 0 && symbol_conf.inline_name) {
628 		inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
629 		ret += inline_ret;
630 		if (inline_ret > 0)
631 			ret += fprintf(fp, "\n");
632 	} else
633 		ret += callchain_ret;
634 
635 	return ret;
636 }
637 
638 static int print_hierarchy_indent(const char *sep, int indent,
639 				  const char *line, FILE *fp)
640 {
641 	if (sep != NULL || indent < 2)
642 		return 0;
643 
644 	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
645 }
646 
647 static int hists__fprintf_hierarchy_headers(struct hists *hists,
648 					    struct perf_hpp *hpp, FILE *fp)
649 {
650 	bool first_node, first_col;
651 	int indent;
652 	int depth;
653 	unsigned width = 0;
654 	unsigned header_width = 0;
655 	struct perf_hpp_fmt *fmt;
656 	struct perf_hpp_list_node *fmt_node;
657 	const char *sep = symbol_conf.field_sep;
658 
659 	indent = hists->nr_hpp_node;
660 
661 	/* preserve max indent depth for column headers */
662 	print_hierarchy_indent(sep, indent, spaces, fp);
663 
664 	/* the first hpp_list_node is for overhead columns */
665 	fmt_node = list_first_entry(&hists->hpp_formats,
666 				    struct perf_hpp_list_node, list);
667 
668 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
669 		fmt->header(fmt, hpp, hists, 0, NULL);
670 		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
671 	}
672 
673 	/* combine sort headers with ' / ' */
674 	first_node = true;
675 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
676 		if (!first_node)
677 			header_width += fprintf(fp, " / ");
678 		first_node = false;
679 
680 		first_col = true;
681 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
682 			if (perf_hpp__should_skip(fmt, hists))
683 				continue;
684 
685 			if (!first_col)
686 				header_width += fprintf(fp, "+");
687 			first_col = false;
688 
689 			fmt->header(fmt, hpp, hists, 0, NULL);
690 
691 			header_width += fprintf(fp, "%s", trim(hpp->buf));
692 		}
693 	}
694 
695 	fprintf(fp, "\n# ");
696 
697 	/* preserve max indent depth for initial dots */
698 	print_hierarchy_indent(sep, indent, dots, fp);
699 
700 	/* the first hpp_list_node is for overhead columns */
701 	fmt_node = list_first_entry(&hists->hpp_formats,
702 				    struct perf_hpp_list_node, list);
703 
704 	first_col = true;
705 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
706 		if (!first_col)
707 			fprintf(fp, "%s", sep ?: "..");
708 		first_col = false;
709 
710 		width = fmt->width(fmt, hpp, hists);
711 		fprintf(fp, "%.*s", width, dots);
712 	}
713 
714 	depth = 0;
715 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
716 		first_col = true;
717 		width = depth * HIERARCHY_INDENT;
718 
719 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
720 			if (perf_hpp__should_skip(fmt, hists))
721 				continue;
722 
723 			if (!first_col)
724 				width++;  /* for '+' sign between column header */
725 			first_col = false;
726 
727 			width += fmt->width(fmt, hpp, hists);
728 		}
729 
730 		if (width > header_width)
731 			header_width = width;
732 
733 		depth++;
734 	}
735 
736 	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
737 
738 	fprintf(fp, "\n#\n");
739 
740 	return 2;
741 }
742 
743 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
744 			 int line, FILE *fp)
745 {
746 	struct perf_hpp_fmt *fmt;
747 	const char *sep = symbol_conf.field_sep;
748 	bool first = true;
749 	int span = 0;
750 
751 	hists__for_each_format(hists, fmt) {
752 		if (perf_hpp__should_skip(fmt, hists))
753 			continue;
754 
755 		if (!first && !span)
756 			fprintf(fp, "%s", sep ?: "  ");
757 		else
758 			first = false;
759 
760 		fmt->header(fmt, hpp, hists, line, &span);
761 
762 		if (!span)
763 			fprintf(fp, "%s", hpp->buf);
764 	}
765 }
766 
767 static int
768 hists__fprintf_standard_headers(struct hists *hists,
769 				struct perf_hpp *hpp,
770 				FILE *fp)
771 {
772 	struct perf_hpp_list *hpp_list = hists->hpp_list;
773 	struct perf_hpp_fmt *fmt;
774 	unsigned int width;
775 	const char *sep = symbol_conf.field_sep;
776 	bool first = true;
777 	int line;
778 
779 	for (line = 0; line < hpp_list->nr_header_lines; line++) {
780 		/* first # is displayed one level up */
781 		if (line)
782 			fprintf(fp, "# ");
783 		fprintf_line(hists, hpp, line, fp);
784 		fprintf(fp, "\n");
785 	}
786 
787 	if (sep)
788 		return hpp_list->nr_header_lines;
789 
790 	first = true;
791 
792 	fprintf(fp, "# ");
793 
794 	hists__for_each_format(hists, fmt) {
795 		unsigned int i;
796 
797 		if (perf_hpp__should_skip(fmt, hists))
798 			continue;
799 
800 		if (!first)
801 			fprintf(fp, "%s", sep ?: "  ");
802 		else
803 			first = false;
804 
805 		width = fmt->width(fmt, hpp, hists);
806 		for (i = 0; i < width; i++)
807 			fprintf(fp, ".");
808 	}
809 
810 	fprintf(fp, "\n");
811 	fprintf(fp, "#\n");
812 	return hpp_list->nr_header_lines + 2;
813 }
814 
815 int hists__fprintf_headers(struct hists *hists, FILE *fp)
816 {
817 	char bf[1024];
818 	struct perf_hpp dummy_hpp = {
819 		.buf	= bf,
820 		.size	= sizeof(bf),
821 	};
822 
823 	fprintf(fp, "# ");
824 
825 	if (symbol_conf.report_hierarchy)
826 		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
827 	else
828 		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
829 
830 }
831 
832 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
833 		      int max_cols, float min_pcnt, FILE *fp,
834 		      bool use_callchain)
835 {
836 	struct rb_node *nd;
837 	size_t ret = 0;
838 	const char *sep = symbol_conf.field_sep;
839 	int nr_rows = 0;
840 	size_t linesz;
841 	char *line = NULL;
842 	unsigned indent;
843 
844 	init_rem_hits();
845 
846 	hists__reset_column_width(hists);
847 
848 	if (symbol_conf.col_width_list_str)
849 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
850 
851 	if (show_header)
852 		nr_rows += hists__fprintf_headers(hists, fp);
853 
854 	if (max_rows && nr_rows >= max_rows)
855 		goto out;
856 
857 	linesz = hists__sort_list_width(hists) + 3 + 1;
858 	linesz += perf_hpp__color_overhead();
859 	line = malloc(linesz);
860 	if (line == NULL) {
861 		ret = -1;
862 		goto out;
863 	}
864 
865 	indent = hists__overhead_width(hists) + 4;
866 
867 	for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
868 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
869 		float percent;
870 
871 		if (h->filtered)
872 			continue;
873 
874 		percent = hist_entry__get_percent_limit(h);
875 		if (percent < min_pcnt)
876 			continue;
877 
878 		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
879 
880 		if (max_rows && ++nr_rows >= max_rows)
881 			break;
882 
883 		/*
884 		 * If all children are filtered out or percent-limited,
885 		 * display "no entry >= x.xx%" message.
886 		 */
887 		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
888 			int depth = hists->nr_hpp_node + h->depth + 1;
889 
890 			print_hierarchy_indent(sep, depth, spaces, fp);
891 			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
892 
893 			if (max_rows && ++nr_rows >= max_rows)
894 				break;
895 		}
896 
897 		if (h->ms.map == NULL && verbose > 1) {
898 			__map_groups__fprintf_maps(h->thread->mg,
899 						   MAP__FUNCTION, fp);
900 			fprintf(fp, "%.10s end\n", graph_dotted_line);
901 		}
902 	}
903 
904 	free(line);
905 out:
906 	zfree(&rem_sq_bracket);
907 
908 	return ret;
909 }
910 
911 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
912 {
913 	int i;
914 	size_t ret = 0;
915 
916 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
917 		const char *name;
918 
919 		if (stats->nr_events[i] == 0)
920 			continue;
921 
922 		name = perf_event__name(i);
923 		if (!strcmp(name, "UNKNOWN"))
924 			continue;
925 
926 		ret += fprintf(fp, "%16s events: %10d\n", name,
927 			       stats->nr_events[i]);
928 	}
929 
930 	return ret;
931 }
932