xref: /openbmc/linux/tools/perf/ui/stdio/hist.c (revision 4d2804b7)
1 #include <stdio.h>
2 
3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
7 #include "../../util/srcline.h"
8 #include "../../util/string2.h"
9 #include "../../util/thread.h"
10 #include "../../util/sane_ctype.h"
11 
12 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
13 {
14 	int i;
15 	int ret = fprintf(fp, "            ");
16 
17 	for (i = 0; i < left_margin; i++)
18 		ret += fprintf(fp, " ");
19 
20 	return ret;
21 }
22 
23 static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
24 			      int depth, int depth_mask, FILE *fp)
25 {
26 	struct dso *dso;
27 	struct inline_node *node;
28 	struct inline_list *ilist;
29 	int ret = 0, i;
30 
31 	if (map == NULL)
32 		return 0;
33 
34 	dso = map->dso;
35 	if (dso == NULL)
36 		return 0;
37 
38 	if (dso->kernel != DSO_TYPE_USER)
39 		return 0;
40 
41 	node = dso__parse_addr_inlines(dso,
42 				       map__rip_2objdump(map, ip));
43 	if (node == NULL)
44 		return 0;
45 
46 	list_for_each_entry(ilist, &node->val, list) {
47 		if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
48 			ret += callchain__fprintf_left_margin(fp, left_margin);
49 
50 			for (i = 0; i < depth; i++) {
51 				if (depth_mask & (1 << i))
52 					ret += fprintf(fp, "|");
53 				else
54 					ret += fprintf(fp, " ");
55 				ret += fprintf(fp, "          ");
56 			}
57 
58 			if (callchain_param.key == CCKEY_ADDRESS ||
59 			    callchain_param.key == CCKEY_SRCLINE) {
60 				if (ilist->filename != NULL)
61 					ret += fprintf(fp, "%s:%d (inline)",
62 						       ilist->filename,
63 						       ilist->line_nr);
64 				else
65 					ret += fprintf(fp, "??");
66 			} else if (ilist->funcname != NULL)
67 				ret += fprintf(fp, "%s (inline)",
68 					       ilist->funcname);
69 			else if (ilist->filename != NULL)
70 				ret += fprintf(fp, "%s:%d (inline)",
71 					       ilist->filename,
72 					       ilist->line_nr);
73 			else
74 				ret += fprintf(fp, "??");
75 
76 			ret += fprintf(fp, "\n");
77 		}
78 	}
79 
80 	inline_node__delete(node);
81 	return ret;
82 }
83 
84 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
85 					  int left_margin)
86 {
87 	int i;
88 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
89 
90 	for (i = 0; i < depth; i++)
91 		if (depth_mask & (1 << i))
92 			ret += fprintf(fp, "|          ");
93 		else
94 			ret += fprintf(fp, "           ");
95 
96 	ret += fprintf(fp, "\n");
97 
98 	return ret;
99 }
100 
101 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
102 				     struct callchain_list *chain,
103 				     int depth, int depth_mask, int period,
104 				     u64 total_samples, int left_margin)
105 {
106 	int i;
107 	size_t ret = 0;
108 	char bf[1024], *alloc_str = NULL;
109 	char buf[64];
110 	const char *str;
111 
112 	ret += callchain__fprintf_left_margin(fp, left_margin);
113 	for (i = 0; i < depth; i++) {
114 		if (depth_mask & (1 << i))
115 			ret += fprintf(fp, "|");
116 		else
117 			ret += fprintf(fp, " ");
118 		if (!period && i == depth - 1) {
119 			ret += fprintf(fp, "--");
120 			ret += callchain_node__fprintf_value(node, fp, total_samples);
121 			ret += fprintf(fp, "--");
122 		} else
123 			ret += fprintf(fp, "%s", "          ");
124 	}
125 
126 	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
127 
128 	if (symbol_conf.show_branchflag_count) {
129 		if (!period)
130 			callchain_list_counts__printf_value(node, chain, NULL,
131 							    buf, sizeof(buf));
132 		else
133 			callchain_list_counts__printf_value(NULL, chain, NULL,
134 							    buf, sizeof(buf));
135 
136 		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
137 			str = "Not enough memory!";
138 		else
139 			str = alloc_str;
140 	}
141 
142 	fputs(str, fp);
143 	fputc('\n', fp);
144 	free(alloc_str);
145 
146 	if (symbol_conf.inline_name)
147 		ret += inline__fprintf(chain->ms.map, chain->ip,
148 				       left_margin, depth, depth_mask, fp);
149 	return ret;
150 }
151 
152 static struct symbol *rem_sq_bracket;
153 static struct callchain_list rem_hits;
154 
155 static void init_rem_hits(void)
156 {
157 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
158 	if (!rem_sq_bracket) {
159 		fprintf(stderr, "Not enough memory to display remaining hits\n");
160 		return;
161 	}
162 
163 	strcpy(rem_sq_bracket->name, "[...]");
164 	rem_hits.ms.sym = rem_sq_bracket;
165 }
166 
167 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
168 					 u64 total_samples, int depth,
169 					 int depth_mask, int left_margin)
170 {
171 	struct rb_node *node, *next;
172 	struct callchain_node *child = NULL;
173 	struct callchain_list *chain;
174 	int new_depth_mask = depth_mask;
175 	u64 remaining;
176 	size_t ret = 0;
177 	int i;
178 	uint entries_printed = 0;
179 	int cumul_count = 0;
180 
181 	remaining = total_samples;
182 
183 	node = rb_first(root);
184 	while (node) {
185 		u64 new_total;
186 		u64 cumul;
187 
188 		child = rb_entry(node, struct callchain_node, rb_node);
189 		cumul = callchain_cumul_hits(child);
190 		remaining -= cumul;
191 		cumul_count += callchain_cumul_counts(child);
192 
193 		/*
194 		 * The depth mask manages the output of pipes that show
195 		 * the depth. We don't want to keep the pipes of the current
196 		 * level for the last child of this depth.
197 		 * Except if we have remaining filtered hits. They will
198 		 * supersede the last child
199 		 */
200 		next = rb_next(node);
201 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
202 			new_depth_mask &= ~(1 << (depth - 1));
203 
204 		/*
205 		 * But we keep the older depth mask for the line separator
206 		 * to keep the level link until we reach the last child
207 		 */
208 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
209 						   left_margin);
210 		i = 0;
211 		list_for_each_entry(chain, &child->val, list) {
212 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
213 						      new_depth_mask, i++,
214 						      total_samples,
215 						      left_margin);
216 		}
217 
218 		if (callchain_param.mode == CHAIN_GRAPH_REL)
219 			new_total = child->children_hit;
220 		else
221 			new_total = total_samples;
222 
223 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
224 						  depth + 1,
225 						  new_depth_mask | (1 << depth),
226 						  left_margin);
227 		node = next;
228 		if (++entries_printed == callchain_param.print_limit)
229 			break;
230 	}
231 
232 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
233 		remaining && remaining != total_samples) {
234 		struct callchain_node rem_node = {
235 			.hit = remaining,
236 		};
237 
238 		if (!rem_sq_bracket)
239 			return ret;
240 
241 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
242 			rem_node.count = child->parent->children_count - cumul_count;
243 			if (rem_node.count <= 0)
244 				return ret;
245 		}
246 
247 		new_depth_mask &= ~(1 << (depth - 1));
248 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
249 					      new_depth_mask, 0, total_samples,
250 					      left_margin);
251 	}
252 
253 	return ret;
254 }
255 
256 /*
257  * If have one single callchain root, don't bother printing
258  * its percentage (100 % in fractal mode and the same percentage
259  * than the hist in graph mode). This also avoid one level of column.
260  *
261  * However when percent-limit applied, it's possible that single callchain
262  * node have different (non-100% in fractal mode) percentage.
263  */
264 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
265 {
266 	struct callchain_node *cnode;
267 
268 	if (rb_next(node))
269 		return true;
270 
271 	cnode = rb_entry(node, struct callchain_node, rb_node);
272 	return callchain_cumul_hits(cnode) != parent_samples;
273 }
274 
275 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
276 				       u64 total_samples, u64 parent_samples,
277 				       int left_margin)
278 {
279 	struct callchain_node *cnode;
280 	struct callchain_list *chain;
281 	u32 entries_printed = 0;
282 	bool printed = false;
283 	struct rb_node *node;
284 	int i = 0;
285 	int ret = 0;
286 	char bf[1024];
287 
288 	node = rb_first(root);
289 	if (node && !need_percent_display(node, parent_samples)) {
290 		cnode = rb_entry(node, struct callchain_node, rb_node);
291 		list_for_each_entry(chain, &cnode->val, list) {
292 			/*
293 			 * If we sort by symbol, the first entry is the same than
294 			 * the symbol. No need to print it otherwise it appears as
295 			 * displayed twice.
296 			 */
297 			if (!i++ && field_order == NULL &&
298 			    sort_order && !prefixcmp(sort_order, "sym"))
299 				continue;
300 
301 			if (!printed) {
302 				ret += callchain__fprintf_left_margin(fp, left_margin);
303 				ret += fprintf(fp, "|\n");
304 				ret += callchain__fprintf_left_margin(fp, left_margin);
305 				ret += fprintf(fp, "---");
306 				left_margin += 3;
307 				printed = true;
308 			} else
309 				ret += callchain__fprintf_left_margin(fp, left_margin);
310 
311 			ret += fprintf(fp, "%s",
312 				       callchain_list__sym_name(chain, bf,
313 								sizeof(bf),
314 								false));
315 
316 			if (symbol_conf.show_branchflag_count)
317 				ret += callchain_list_counts__printf_value(
318 						NULL, chain, fp, NULL, 0);
319 			ret += fprintf(fp, "\n");
320 
321 			if (++entries_printed == callchain_param.print_limit)
322 				break;
323 
324 			if (symbol_conf.inline_name)
325 				ret += inline__fprintf(chain->ms.map,
326 						       chain->ip,
327 						       left_margin,
328 						       0, 0,
329 						       fp);
330 		}
331 		root = &cnode->rb_root;
332 	}
333 
334 	if (callchain_param.mode == CHAIN_GRAPH_REL)
335 		total_samples = parent_samples;
336 
337 	ret += __callchain__fprintf_graph(fp, root, total_samples,
338 					  1, 1, left_margin);
339 	if (ret) {
340 		/* do not add a blank line if it printed nothing */
341 		ret += fprintf(fp, "\n");
342 	}
343 
344 	return ret;
345 }
346 
347 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
348 					u64 total_samples)
349 {
350 	struct callchain_list *chain;
351 	size_t ret = 0;
352 	char bf[1024];
353 
354 	if (!node)
355 		return 0;
356 
357 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
358 
359 
360 	list_for_each_entry(chain, &node->val, list) {
361 		if (chain->ip >= PERF_CONTEXT_MAX)
362 			continue;
363 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
364 					bf, sizeof(bf), false));
365 	}
366 
367 	return ret;
368 }
369 
370 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
371 				      u64 total_samples)
372 {
373 	size_t ret = 0;
374 	u32 entries_printed = 0;
375 	struct callchain_node *chain;
376 	struct rb_node *rb_node = rb_first(tree);
377 
378 	while (rb_node) {
379 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
380 
381 		ret += fprintf(fp, "           ");
382 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
383 		ret += fprintf(fp, "\n");
384 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
385 		ret += fprintf(fp, "\n");
386 		if (++entries_printed == callchain_param.print_limit)
387 			break;
388 
389 		rb_node = rb_next(rb_node);
390 	}
391 
392 	return ret;
393 }
394 
395 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
396 {
397 	const char *sep = symbol_conf.field_sep ?: ";";
398 	struct callchain_list *chain;
399 	size_t ret = 0;
400 	char bf[1024];
401 	bool first;
402 
403 	if (!node)
404 		return 0;
405 
406 	ret += __callchain__fprintf_folded(fp, node->parent);
407 
408 	first = (ret == 0);
409 	list_for_each_entry(chain, &node->val, list) {
410 		if (chain->ip >= PERF_CONTEXT_MAX)
411 			continue;
412 		ret += fprintf(fp, "%s%s", first ? "" : sep,
413 			       callchain_list__sym_name(chain,
414 						bf, sizeof(bf), false));
415 		first = false;
416 	}
417 
418 	return ret;
419 }
420 
421 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
422 					u64 total_samples)
423 {
424 	size_t ret = 0;
425 	u32 entries_printed = 0;
426 	struct callchain_node *chain;
427 	struct rb_node *rb_node = rb_first(tree);
428 
429 	while (rb_node) {
430 
431 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
432 
433 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
434 		ret += fprintf(fp, " ");
435 		ret += __callchain__fprintf_folded(fp, chain);
436 		ret += fprintf(fp, "\n");
437 		if (++entries_printed == callchain_param.print_limit)
438 			break;
439 
440 		rb_node = rb_next(rb_node);
441 	}
442 
443 	return ret;
444 }
445 
446 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
447 					    u64 total_samples, int left_margin,
448 					    FILE *fp)
449 {
450 	u64 parent_samples = he->stat.period;
451 
452 	if (symbol_conf.cumulate_callchain)
453 		parent_samples = he->stat_acc->period;
454 
455 	switch (callchain_param.mode) {
456 	case CHAIN_GRAPH_REL:
457 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
458 						parent_samples, left_margin);
459 		break;
460 	case CHAIN_GRAPH_ABS:
461 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
462 						parent_samples, left_margin);
463 		break;
464 	case CHAIN_FLAT:
465 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
466 		break;
467 	case CHAIN_FOLDED:
468 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
469 		break;
470 	case CHAIN_NONE:
471 		break;
472 	default:
473 		pr_err("Bad callchain mode\n");
474 	}
475 
476 	return 0;
477 }
478 
479 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
480 			   struct perf_hpp_list *hpp_list)
481 {
482 	const char *sep = symbol_conf.field_sep;
483 	struct perf_hpp_fmt *fmt;
484 	char *start = hpp->buf;
485 	int ret;
486 	bool first = true;
487 
488 	if (symbol_conf.exclude_other && !he->parent)
489 		return 0;
490 
491 	perf_hpp_list__for_each_format(hpp_list, fmt) {
492 		if (perf_hpp__should_skip(fmt, he->hists))
493 			continue;
494 
495 		/*
496 		 * If there's no field_sep, we still need
497 		 * to display initial '  '.
498 		 */
499 		if (!sep || !first) {
500 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
501 			advance_hpp(hpp, ret);
502 		} else
503 			first = false;
504 
505 		if (perf_hpp__use_color() && fmt->color)
506 			ret = fmt->color(fmt, hpp, he);
507 		else
508 			ret = fmt->entry(fmt, hpp, he);
509 
510 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
511 		advance_hpp(hpp, ret);
512 	}
513 
514 	return hpp->buf - start;
515 }
516 
517 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
518 {
519 	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
520 }
521 
522 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
523 					 struct perf_hpp *hpp,
524 					 struct hists *hists,
525 					 FILE *fp)
526 {
527 	const char *sep = symbol_conf.field_sep;
528 	struct perf_hpp_fmt *fmt;
529 	struct perf_hpp_list_node *fmt_node;
530 	char *buf = hpp->buf;
531 	size_t size = hpp->size;
532 	int ret, printed = 0;
533 	bool first = true;
534 
535 	if (symbol_conf.exclude_other && !he->parent)
536 		return 0;
537 
538 	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
539 	advance_hpp(hpp, ret);
540 
541 	/* the first hpp_list_node is for overhead columns */
542 	fmt_node = list_first_entry(&hists->hpp_formats,
543 				    struct perf_hpp_list_node, list);
544 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
545 		/*
546 		 * If there's no field_sep, we still need
547 		 * to display initial '  '.
548 		 */
549 		if (!sep || !first) {
550 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
551 			advance_hpp(hpp, ret);
552 		} else
553 			first = false;
554 
555 		if (perf_hpp__use_color() && fmt->color)
556 			ret = fmt->color(fmt, hpp, he);
557 		else
558 			ret = fmt->entry(fmt, hpp, he);
559 
560 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
561 		advance_hpp(hpp, ret);
562 	}
563 
564 	if (!sep)
565 		ret = scnprintf(hpp->buf, hpp->size, "%*s",
566 				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
567 	advance_hpp(hpp, ret);
568 
569 	printed += fprintf(fp, "%s", buf);
570 
571 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
572 		hpp->buf  = buf;
573 		hpp->size = size;
574 
575 		/*
576 		 * No need to call hist_entry__snprintf_alignment() since this
577 		 * fmt is always the last column in the hierarchy mode.
578 		 */
579 		if (perf_hpp__use_color() && fmt->color)
580 			fmt->color(fmt, hpp, he);
581 		else
582 			fmt->entry(fmt, hpp, he);
583 
584 		/*
585 		 * dynamic entries are right-aligned but we want left-aligned
586 		 * in the hierarchy mode
587 		 */
588 		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
589 	}
590 	printed += putc('\n', fp);
591 
592 	if (symbol_conf.use_callchain && he->leaf) {
593 		u64 total = hists__total_period(hists);
594 
595 		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
596 		goto out;
597 	}
598 
599 out:
600 	return printed;
601 }
602 
603 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
604 			       char *bf, size_t bfsz, FILE *fp,
605 			       bool use_callchain)
606 {
607 	int ret;
608 	int callchain_ret = 0;
609 	int inline_ret = 0;
610 	struct perf_hpp hpp = {
611 		.buf		= bf,
612 		.size		= size,
613 	};
614 	struct hists *hists = he->hists;
615 	u64 total_period = hists->stats.total_period;
616 
617 	if (size == 0 || size > bfsz)
618 		size = hpp.size = bfsz;
619 
620 	if (symbol_conf.report_hierarchy)
621 		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
622 
623 	hist_entry__snprintf(he, &hpp);
624 
625 	ret = fprintf(fp, "%s\n", bf);
626 
627 	if (use_callchain)
628 		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
629 							      0, fp);
630 
631 	if (callchain_ret == 0 && symbol_conf.inline_name) {
632 		inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
633 		ret += inline_ret;
634 		if (inline_ret > 0)
635 			ret += fprintf(fp, "\n");
636 	} else
637 		ret += callchain_ret;
638 
639 	return ret;
640 }
641 
642 static int print_hierarchy_indent(const char *sep, int indent,
643 				  const char *line, FILE *fp)
644 {
645 	if (sep != NULL || indent < 2)
646 		return 0;
647 
648 	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
649 }
650 
651 static int hists__fprintf_hierarchy_headers(struct hists *hists,
652 					    struct perf_hpp *hpp, FILE *fp)
653 {
654 	bool first_node, first_col;
655 	int indent;
656 	int depth;
657 	unsigned width = 0;
658 	unsigned header_width = 0;
659 	struct perf_hpp_fmt *fmt;
660 	struct perf_hpp_list_node *fmt_node;
661 	const char *sep = symbol_conf.field_sep;
662 
663 	indent = hists->nr_hpp_node;
664 
665 	/* preserve max indent depth for column headers */
666 	print_hierarchy_indent(sep, indent, spaces, fp);
667 
668 	/* the first hpp_list_node is for overhead columns */
669 	fmt_node = list_first_entry(&hists->hpp_formats,
670 				    struct perf_hpp_list_node, list);
671 
672 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
673 		fmt->header(fmt, hpp, hists, 0, NULL);
674 		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
675 	}
676 
677 	/* combine sort headers with ' / ' */
678 	first_node = true;
679 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
680 		if (!first_node)
681 			header_width += fprintf(fp, " / ");
682 		first_node = false;
683 
684 		first_col = true;
685 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
686 			if (perf_hpp__should_skip(fmt, hists))
687 				continue;
688 
689 			if (!first_col)
690 				header_width += fprintf(fp, "+");
691 			first_col = false;
692 
693 			fmt->header(fmt, hpp, hists, 0, NULL);
694 
695 			header_width += fprintf(fp, "%s", trim(hpp->buf));
696 		}
697 	}
698 
699 	fprintf(fp, "\n# ");
700 
701 	/* preserve max indent depth for initial dots */
702 	print_hierarchy_indent(sep, indent, dots, fp);
703 
704 	/* the first hpp_list_node is for overhead columns */
705 	fmt_node = list_first_entry(&hists->hpp_formats,
706 				    struct perf_hpp_list_node, list);
707 
708 	first_col = true;
709 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
710 		if (!first_col)
711 			fprintf(fp, "%s", sep ?: "..");
712 		first_col = false;
713 
714 		width = fmt->width(fmt, hpp, hists);
715 		fprintf(fp, "%.*s", width, dots);
716 	}
717 
718 	depth = 0;
719 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
720 		first_col = true;
721 		width = depth * HIERARCHY_INDENT;
722 
723 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
724 			if (perf_hpp__should_skip(fmt, hists))
725 				continue;
726 
727 			if (!first_col)
728 				width++;  /* for '+' sign between column header */
729 			first_col = false;
730 
731 			width += fmt->width(fmt, hpp, hists);
732 		}
733 
734 		if (width > header_width)
735 			header_width = width;
736 
737 		depth++;
738 	}
739 
740 	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
741 
742 	fprintf(fp, "\n#\n");
743 
744 	return 2;
745 }
746 
747 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
748 			 int line, FILE *fp)
749 {
750 	struct perf_hpp_fmt *fmt;
751 	const char *sep = symbol_conf.field_sep;
752 	bool first = true;
753 	int span = 0;
754 
755 	hists__for_each_format(hists, fmt) {
756 		if (perf_hpp__should_skip(fmt, hists))
757 			continue;
758 
759 		if (!first && !span)
760 			fprintf(fp, "%s", sep ?: "  ");
761 		else
762 			first = false;
763 
764 		fmt->header(fmt, hpp, hists, line, &span);
765 
766 		if (!span)
767 			fprintf(fp, "%s", hpp->buf);
768 	}
769 }
770 
771 static int
772 hists__fprintf_standard_headers(struct hists *hists,
773 				struct perf_hpp *hpp,
774 				FILE *fp)
775 {
776 	struct perf_hpp_list *hpp_list = hists->hpp_list;
777 	struct perf_hpp_fmt *fmt;
778 	unsigned int width;
779 	const char *sep = symbol_conf.field_sep;
780 	bool first = true;
781 	int line;
782 
783 	for (line = 0; line < hpp_list->nr_header_lines; line++) {
784 		/* first # is displayed one level up */
785 		if (line)
786 			fprintf(fp, "# ");
787 		fprintf_line(hists, hpp, line, fp);
788 		fprintf(fp, "\n");
789 	}
790 
791 	if (sep)
792 		return hpp_list->nr_header_lines;
793 
794 	first = true;
795 
796 	fprintf(fp, "# ");
797 
798 	hists__for_each_format(hists, fmt) {
799 		unsigned int i;
800 
801 		if (perf_hpp__should_skip(fmt, hists))
802 			continue;
803 
804 		if (!first)
805 			fprintf(fp, "%s", sep ?: "  ");
806 		else
807 			first = false;
808 
809 		width = fmt->width(fmt, hpp, hists);
810 		for (i = 0; i < width; i++)
811 			fprintf(fp, ".");
812 	}
813 
814 	fprintf(fp, "\n");
815 	fprintf(fp, "#\n");
816 	return hpp_list->nr_header_lines + 2;
817 }
818 
819 int hists__fprintf_headers(struct hists *hists, FILE *fp)
820 {
821 	char bf[1024];
822 	struct perf_hpp dummy_hpp = {
823 		.buf	= bf,
824 		.size	= sizeof(bf),
825 	};
826 
827 	fprintf(fp, "# ");
828 
829 	if (symbol_conf.report_hierarchy)
830 		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
831 	else
832 		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
833 
834 }
835 
836 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
837 		      int max_cols, float min_pcnt, FILE *fp,
838 		      bool use_callchain)
839 {
840 	struct rb_node *nd;
841 	size_t ret = 0;
842 	const char *sep = symbol_conf.field_sep;
843 	int nr_rows = 0;
844 	size_t linesz;
845 	char *line = NULL;
846 	unsigned indent;
847 
848 	init_rem_hits();
849 
850 	hists__reset_column_width(hists);
851 
852 	if (symbol_conf.col_width_list_str)
853 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
854 
855 	if (show_header)
856 		nr_rows += hists__fprintf_headers(hists, fp);
857 
858 	if (max_rows && nr_rows >= max_rows)
859 		goto out;
860 
861 	linesz = hists__sort_list_width(hists) + 3 + 1;
862 	linesz += perf_hpp__color_overhead();
863 	line = malloc(linesz);
864 	if (line == NULL) {
865 		ret = -1;
866 		goto out;
867 	}
868 
869 	indent = hists__overhead_width(hists) + 4;
870 
871 	for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
872 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
873 		float percent;
874 
875 		if (h->filtered)
876 			continue;
877 
878 		percent = hist_entry__get_percent_limit(h);
879 		if (percent < min_pcnt)
880 			continue;
881 
882 		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
883 
884 		if (max_rows && ++nr_rows >= max_rows)
885 			break;
886 
887 		/*
888 		 * If all children are filtered out or percent-limited,
889 		 * display "no entry >= x.xx%" message.
890 		 */
891 		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
892 			int depth = hists->nr_hpp_node + h->depth + 1;
893 
894 			print_hierarchy_indent(sep, depth, spaces, fp);
895 			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
896 
897 			if (max_rows && ++nr_rows >= max_rows)
898 				break;
899 		}
900 
901 		if (h->ms.map == NULL && verbose > 1) {
902 			__map_groups__fprintf_maps(h->thread->mg,
903 						   MAP__FUNCTION, fp);
904 			fprintf(fp, "%.10s end\n", graph_dotted_line);
905 		}
906 	}
907 
908 	free(line);
909 out:
910 	zfree(&rem_sq_bracket);
911 
912 	return ret;
913 }
914 
915 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
916 {
917 	int i;
918 	size_t ret = 0;
919 
920 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
921 		const char *name;
922 
923 		if (stats->nr_events[i] == 0)
924 			continue;
925 
926 		name = perf_event__name(i);
927 		if (!strcmp(name, "UNKNOWN"))
928 			continue;
929 
930 		ret += fprintf(fp, "%16s events: %10d\n", name,
931 			       stats->nr_events[i]);
932 	}
933 
934 	return ret;
935 }
936