xref: /openbmc/linux/tools/perf/ui/stdio/hist.c (revision 7559e757)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include <stdlib.h>
4 #include <linux/string.h>
5 
6 #include "../../util/callchain.h"
7 #include "../../util/debug.h"
8 #include "../../util/event.h"
9 #include "../../util/hist.h"
10 #include "../../util/map.h"
11 #include "../../util/maps.h"
12 #include "../../util/symbol.h"
13 #include "../../util/sort.h"
14 #include "../../util/evsel.h"
15 #include "../../util/srcline.h"
16 #include "../../util/string2.h"
17 #include "../../util/thread.h"
18 #include "../../util/block-info.h"
19 #include <linux/ctype.h>
20 #include <linux/zalloc.h>
21 
22 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
23 {
24 	int i;
25 	int ret = fprintf(fp, "            ");
26 
27 	for (i = 0; i < left_margin; i++)
28 		ret += fprintf(fp, " ");
29 
30 	return ret;
31 }
32 
33 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
34 					  int left_margin)
35 {
36 	int i;
37 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
38 
39 	for (i = 0; i < depth; i++)
40 		if (depth_mask & (1 << i))
41 			ret += fprintf(fp, "|          ");
42 		else
43 			ret += fprintf(fp, "           ");
44 
45 	ret += fprintf(fp, "\n");
46 
47 	return ret;
48 }
49 
50 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
51 				     struct callchain_list *chain,
52 				     int depth, int depth_mask, int period,
53 				     u64 total_samples, int left_margin)
54 {
55 	int i;
56 	size_t ret = 0;
57 	char bf[1024], *alloc_str = NULL;
58 	char buf[64];
59 	const char *str;
60 
61 	ret += callchain__fprintf_left_margin(fp, left_margin);
62 	for (i = 0; i < depth; i++) {
63 		if (depth_mask & (1 << i))
64 			ret += fprintf(fp, "|");
65 		else
66 			ret += fprintf(fp, " ");
67 		if (!period && i == depth - 1) {
68 			ret += fprintf(fp, "--");
69 			ret += callchain_node__fprintf_value(node, fp, total_samples);
70 			ret += fprintf(fp, "--");
71 		} else
72 			ret += fprintf(fp, "%s", "          ");
73 	}
74 
75 	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
76 
77 	if (symbol_conf.show_branchflag_count) {
78 		callchain_list_counts__printf_value(chain, NULL,
79 						    buf, sizeof(buf));
80 
81 		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
82 			str = "Not enough memory!";
83 		else
84 			str = alloc_str;
85 	}
86 
87 	fputs(str, fp);
88 	fputc('\n', fp);
89 	free(alloc_str);
90 
91 	return ret;
92 }
93 
94 static struct symbol *rem_sq_bracket;
95 static struct callchain_list rem_hits;
96 
97 static void init_rem_hits(void)
98 {
99 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
100 	if (!rem_sq_bracket) {
101 		fprintf(stderr, "Not enough memory to display remaining hits\n");
102 		return;
103 	}
104 
105 	strcpy(rem_sq_bracket->name, "[...]");
106 	rem_hits.ms.sym = rem_sq_bracket;
107 }
108 
109 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
110 					 u64 total_samples, int depth,
111 					 int depth_mask, int left_margin)
112 {
113 	struct rb_node *node, *next;
114 	struct callchain_node *child = NULL;
115 	struct callchain_list *chain;
116 	int new_depth_mask = depth_mask;
117 	u64 remaining;
118 	size_t ret = 0;
119 	int i;
120 	uint entries_printed = 0;
121 	int cumul_count = 0;
122 
123 	remaining = total_samples;
124 
125 	node = rb_first(root);
126 	while (node) {
127 		u64 new_total;
128 		u64 cumul;
129 
130 		child = rb_entry(node, struct callchain_node, rb_node);
131 		cumul = callchain_cumul_hits(child);
132 		remaining -= cumul;
133 		cumul_count += callchain_cumul_counts(child);
134 
135 		/*
136 		 * The depth mask manages the output of pipes that show
137 		 * the depth. We don't want to keep the pipes of the current
138 		 * level for the last child of this depth.
139 		 * Except if we have remaining filtered hits. They will
140 		 * supersede the last child
141 		 */
142 		next = rb_next(node);
143 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
144 			new_depth_mask &= ~(1 << (depth - 1));
145 
146 		/*
147 		 * But we keep the older depth mask for the line separator
148 		 * to keep the level link until we reach the last child
149 		 */
150 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
151 						   left_margin);
152 		i = 0;
153 		list_for_each_entry(chain, &child->val, list) {
154 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
155 						      new_depth_mask, i++,
156 						      total_samples,
157 						      left_margin);
158 		}
159 
160 		if (callchain_param.mode == CHAIN_GRAPH_REL)
161 			new_total = child->children_hit;
162 		else
163 			new_total = total_samples;
164 
165 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
166 						  depth + 1,
167 						  new_depth_mask | (1 << depth),
168 						  left_margin);
169 		node = next;
170 		if (++entries_printed == callchain_param.print_limit)
171 			break;
172 	}
173 
174 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
175 		remaining && remaining != total_samples) {
176 		struct callchain_node rem_node = {
177 			.hit = remaining,
178 		};
179 
180 		if (!rem_sq_bracket)
181 			return ret;
182 
183 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
184 			rem_node.count = child->parent->children_count - cumul_count;
185 			if (rem_node.count <= 0)
186 				return ret;
187 		}
188 
189 		new_depth_mask &= ~(1 << (depth - 1));
190 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
191 					      new_depth_mask, 0, total_samples,
192 					      left_margin);
193 	}
194 
195 	return ret;
196 }
197 
198 /*
199  * If have one single callchain root, don't bother printing
200  * its percentage (100 % in fractal mode and the same percentage
201  * than the hist in graph mode). This also avoid one level of column.
202  *
203  * However when percent-limit applied, it's possible that single callchain
204  * node have different (non-100% in fractal mode) percentage.
205  */
206 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
207 {
208 	struct callchain_node *cnode;
209 
210 	if (rb_next(node))
211 		return true;
212 
213 	cnode = rb_entry(node, struct callchain_node, rb_node);
214 	return callchain_cumul_hits(cnode) != parent_samples;
215 }
216 
217 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
218 				       u64 total_samples, u64 parent_samples,
219 				       int left_margin)
220 {
221 	struct callchain_node *cnode;
222 	struct callchain_list *chain;
223 	u32 entries_printed = 0;
224 	bool printed = false;
225 	struct rb_node *node;
226 	int i = 0;
227 	int ret = 0;
228 	char bf[1024];
229 
230 	node = rb_first(root);
231 	if (node && !need_percent_display(node, parent_samples)) {
232 		cnode = rb_entry(node, struct callchain_node, rb_node);
233 		list_for_each_entry(chain, &cnode->val, list) {
234 			/*
235 			 * If we sort by symbol, the first entry is the same than
236 			 * the symbol. No need to print it otherwise it appears as
237 			 * displayed twice.
238 			 */
239 			if (!i++ && field_order == NULL &&
240 			    sort_order && strstarts(sort_order, "sym"))
241 				continue;
242 
243 			if (!printed) {
244 				ret += callchain__fprintf_left_margin(fp, left_margin);
245 				ret += fprintf(fp, "|\n");
246 				ret += callchain__fprintf_left_margin(fp, left_margin);
247 				ret += fprintf(fp, "---");
248 				left_margin += 3;
249 				printed = true;
250 			} else
251 				ret += callchain__fprintf_left_margin(fp, left_margin);
252 
253 			ret += fprintf(fp, "%s",
254 				       callchain_list__sym_name(chain, bf,
255 								sizeof(bf),
256 								false));
257 
258 			if (symbol_conf.show_branchflag_count)
259 				ret += callchain_list_counts__printf_value(
260 						chain, fp, NULL, 0);
261 			ret += fprintf(fp, "\n");
262 
263 			if (++entries_printed == callchain_param.print_limit)
264 				break;
265 		}
266 		root = &cnode->rb_root;
267 	}
268 
269 	if (callchain_param.mode == CHAIN_GRAPH_REL)
270 		total_samples = parent_samples;
271 
272 	ret += __callchain__fprintf_graph(fp, root, total_samples,
273 					  1, 1, left_margin);
274 	if (ret) {
275 		/* do not add a blank line if it printed nothing */
276 		ret += fprintf(fp, "\n");
277 	}
278 
279 	return ret;
280 }
281 
282 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
283 					u64 total_samples)
284 {
285 	struct callchain_list *chain;
286 	size_t ret = 0;
287 	char bf[1024];
288 
289 	if (!node)
290 		return 0;
291 
292 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
293 
294 
295 	list_for_each_entry(chain, &node->val, list) {
296 		if (chain->ip >= PERF_CONTEXT_MAX)
297 			continue;
298 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
299 					bf, sizeof(bf), false));
300 	}
301 
302 	return ret;
303 }
304 
305 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
306 				      u64 total_samples)
307 {
308 	size_t ret = 0;
309 	u32 entries_printed = 0;
310 	struct callchain_node *chain;
311 	struct rb_node *rb_node = rb_first(tree);
312 
313 	while (rb_node) {
314 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
315 
316 		ret += fprintf(fp, "           ");
317 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
318 		ret += fprintf(fp, "\n");
319 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
320 		ret += fprintf(fp, "\n");
321 		if (++entries_printed == callchain_param.print_limit)
322 			break;
323 
324 		rb_node = rb_next(rb_node);
325 	}
326 
327 	return ret;
328 }
329 
330 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
331 {
332 	const char *sep = symbol_conf.field_sep ?: ";";
333 	struct callchain_list *chain;
334 	size_t ret = 0;
335 	char bf[1024];
336 	bool first;
337 
338 	if (!node)
339 		return 0;
340 
341 	ret += __callchain__fprintf_folded(fp, node->parent);
342 
343 	first = (ret == 0);
344 	list_for_each_entry(chain, &node->val, list) {
345 		if (chain->ip >= PERF_CONTEXT_MAX)
346 			continue;
347 		ret += fprintf(fp, "%s%s", first ? "" : sep,
348 			       callchain_list__sym_name(chain,
349 						bf, sizeof(bf), false));
350 		first = false;
351 	}
352 
353 	return ret;
354 }
355 
356 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
357 					u64 total_samples)
358 {
359 	size_t ret = 0;
360 	u32 entries_printed = 0;
361 	struct callchain_node *chain;
362 	struct rb_node *rb_node = rb_first(tree);
363 
364 	while (rb_node) {
365 
366 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
367 
368 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
369 		ret += fprintf(fp, " ");
370 		ret += __callchain__fprintf_folded(fp, chain);
371 		ret += fprintf(fp, "\n");
372 		if (++entries_printed == callchain_param.print_limit)
373 			break;
374 
375 		rb_node = rb_next(rb_node);
376 	}
377 
378 	return ret;
379 }
380 
381 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
382 					    u64 total_samples, int left_margin,
383 					    FILE *fp)
384 {
385 	u64 parent_samples = he->stat.period;
386 
387 	if (symbol_conf.cumulate_callchain)
388 		parent_samples = he->stat_acc->period;
389 
390 	switch (callchain_param.mode) {
391 	case CHAIN_GRAPH_REL:
392 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
393 						parent_samples, left_margin);
394 		break;
395 	case CHAIN_GRAPH_ABS:
396 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
397 						parent_samples, left_margin);
398 		break;
399 	case CHAIN_FLAT:
400 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
401 		break;
402 	case CHAIN_FOLDED:
403 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
404 		break;
405 	case CHAIN_NONE:
406 		break;
407 	default:
408 		pr_err("Bad callchain mode\n");
409 	}
410 
411 	return 0;
412 }
413 
414 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
415 			   struct perf_hpp_list *hpp_list)
416 {
417 	const char *sep = symbol_conf.field_sep;
418 	struct perf_hpp_fmt *fmt;
419 	char *start = hpp->buf;
420 	int ret;
421 	bool first = true;
422 
423 	if (symbol_conf.exclude_other && !he->parent)
424 		return 0;
425 
426 	perf_hpp_list__for_each_format(hpp_list, fmt) {
427 		if (perf_hpp__should_skip(fmt, he->hists))
428 			continue;
429 
430 		/*
431 		 * If there's no field_sep, we still need
432 		 * to display initial '  '.
433 		 */
434 		if (!sep || !first) {
435 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
436 			advance_hpp(hpp, ret);
437 		} else
438 			first = false;
439 
440 		if (perf_hpp__use_color() && fmt->color)
441 			ret = fmt->color(fmt, hpp, he);
442 		else
443 			ret = fmt->entry(fmt, hpp, he);
444 
445 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
446 		advance_hpp(hpp, ret);
447 	}
448 
449 	return hpp->buf - start;
450 }
451 
452 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
453 {
454 	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
455 }
456 
457 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
458 					 struct perf_hpp *hpp,
459 					 struct hists *hists,
460 					 FILE *fp)
461 {
462 	const char *sep = symbol_conf.field_sep;
463 	struct perf_hpp_fmt *fmt;
464 	struct perf_hpp_list_node *fmt_node;
465 	char *buf = hpp->buf;
466 	size_t size = hpp->size;
467 	int ret, printed = 0;
468 	bool first = true;
469 
470 	if (symbol_conf.exclude_other && !he->parent)
471 		return 0;
472 
473 	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
474 	advance_hpp(hpp, ret);
475 
476 	/* the first hpp_list_node is for overhead columns */
477 	fmt_node = list_first_entry(&hists->hpp_formats,
478 				    struct perf_hpp_list_node, list);
479 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
480 		/*
481 		 * If there's no field_sep, we still need
482 		 * to display initial '  '.
483 		 */
484 		if (!sep || !first) {
485 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
486 			advance_hpp(hpp, ret);
487 		} else
488 			first = false;
489 
490 		if (perf_hpp__use_color() && fmt->color)
491 			ret = fmt->color(fmt, hpp, he);
492 		else
493 			ret = fmt->entry(fmt, hpp, he);
494 
495 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
496 		advance_hpp(hpp, ret);
497 	}
498 
499 	if (!sep)
500 		ret = scnprintf(hpp->buf, hpp->size, "%*s",
501 				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
502 	advance_hpp(hpp, ret);
503 
504 	printed += fprintf(fp, "%s", buf);
505 
506 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
507 		hpp->buf  = buf;
508 		hpp->size = size;
509 
510 		/*
511 		 * No need to call hist_entry__snprintf_alignment() since this
512 		 * fmt is always the last column in the hierarchy mode.
513 		 */
514 		if (perf_hpp__use_color() && fmt->color)
515 			fmt->color(fmt, hpp, he);
516 		else
517 			fmt->entry(fmt, hpp, he);
518 
519 		/*
520 		 * dynamic entries are right-aligned but we want left-aligned
521 		 * in the hierarchy mode
522 		 */
523 		printed += fprintf(fp, "%s%s", sep ?: "  ", skip_spaces(buf));
524 	}
525 	printed += putc('\n', fp);
526 
527 	if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
528 		u64 total = hists__total_period(hists);
529 
530 		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
531 		goto out;
532 	}
533 
534 out:
535 	return printed;
536 }
537 
538 static int hist_entry__block_fprintf(struct hist_entry *he,
539 				     char *bf, size_t size,
540 				     FILE *fp)
541 {
542 	struct block_hist *bh = container_of(he, struct block_hist, he);
543 	int ret = 0;
544 
545 	for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
546 		struct perf_hpp hpp = {
547 			.buf		= bf,
548 			.size		= size,
549 			.skip		= false,
550 		};
551 
552 		bh->block_idx = i;
553 		hist_entry__snprintf(he, &hpp);
554 
555 		if (!hpp.skip)
556 			ret += fprintf(fp, "%s\n", bf);
557 	}
558 
559 	return ret;
560 }
561 
562 static int hist_entry__individual_block_fprintf(struct hist_entry *he,
563 						char *bf, size_t size,
564 						FILE *fp)
565 {
566 	int ret = 0;
567 
568 	struct perf_hpp hpp = {
569 		.buf		= bf,
570 		.size		= size,
571 		.skip		= false,
572 	};
573 
574 	hist_entry__snprintf(he, &hpp);
575 	if (!hpp.skip)
576 		ret += fprintf(fp, "%s\n", bf);
577 
578 	return ret;
579 }
580 
581 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
582 			       char *bf, size_t bfsz, FILE *fp,
583 			       bool ignore_callchains)
584 {
585 	int ret;
586 	int callchain_ret = 0;
587 	struct perf_hpp hpp = {
588 		.buf		= bf,
589 		.size		= size,
590 	};
591 	struct hists *hists = he->hists;
592 	u64 total_period = hists->stats.total_period;
593 
594 	if (size == 0 || size > bfsz)
595 		size = hpp.size = bfsz;
596 
597 	if (symbol_conf.report_hierarchy)
598 		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
599 
600 	if (symbol_conf.report_block)
601 		return hist_entry__block_fprintf(he, bf, size, fp);
602 
603 	if (symbol_conf.report_individual_block)
604 		return hist_entry__individual_block_fprintf(he, bf, size, fp);
605 
606 	hist_entry__snprintf(he, &hpp);
607 
608 	ret = fprintf(fp, "%s\n", bf);
609 
610 	if (hist_entry__has_callchains(he) && !ignore_callchains)
611 		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
612 							      0, fp);
613 
614 	ret += callchain_ret;
615 
616 	return ret;
617 }
618 
619 static int print_hierarchy_indent(const char *sep, int indent,
620 				  const char *line, FILE *fp)
621 {
622 	int width;
623 
624 	if (sep != NULL || indent < 2)
625 		return 0;
626 
627 	width = (indent - 2) * HIERARCHY_INDENT;
628 
629 	return fprintf(fp, "%-*.*s", width, width, line);
630 }
631 
632 static int hists__fprintf_hierarchy_headers(struct hists *hists,
633 					    struct perf_hpp *hpp, FILE *fp)
634 {
635 	bool first_node, first_col;
636 	int indent;
637 	int depth;
638 	unsigned width = 0;
639 	unsigned header_width = 0;
640 	struct perf_hpp_fmt *fmt;
641 	struct perf_hpp_list_node *fmt_node;
642 	const char *sep = symbol_conf.field_sep;
643 
644 	indent = hists->nr_hpp_node;
645 
646 	/* preserve max indent depth for column headers */
647 	print_hierarchy_indent(sep, indent, " ", fp);
648 
649 	/* the first hpp_list_node is for overhead columns */
650 	fmt_node = list_first_entry(&hists->hpp_formats,
651 				    struct perf_hpp_list_node, list);
652 
653 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
654 		fmt->header(fmt, hpp, hists, 0, NULL);
655 		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
656 	}
657 
658 	/* combine sort headers with ' / ' */
659 	first_node = true;
660 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
661 		if (!first_node)
662 			header_width += fprintf(fp, " / ");
663 		first_node = false;
664 
665 		first_col = true;
666 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
667 			if (perf_hpp__should_skip(fmt, hists))
668 				continue;
669 
670 			if (!first_col)
671 				header_width += fprintf(fp, "+");
672 			first_col = false;
673 
674 			fmt->header(fmt, hpp, hists, 0, NULL);
675 
676 			header_width += fprintf(fp, "%s", strim(hpp->buf));
677 		}
678 	}
679 
680 	fprintf(fp, "\n# ");
681 
682 	/* preserve max indent depth for initial dots */
683 	print_hierarchy_indent(sep, indent, dots, fp);
684 
685 	/* the first hpp_list_node is for overhead columns */
686 	fmt_node = list_first_entry(&hists->hpp_formats,
687 				    struct perf_hpp_list_node, list);
688 
689 	first_col = true;
690 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
691 		if (!first_col)
692 			fprintf(fp, "%s", sep ?: "..");
693 		first_col = false;
694 
695 		width = fmt->width(fmt, hpp, hists);
696 		fprintf(fp, "%.*s", width, dots);
697 	}
698 
699 	depth = 0;
700 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
701 		first_col = true;
702 		width = depth * HIERARCHY_INDENT;
703 
704 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
705 			if (perf_hpp__should_skip(fmt, hists))
706 				continue;
707 
708 			if (!first_col)
709 				width++;  /* for '+' sign between column header */
710 			first_col = false;
711 
712 			width += fmt->width(fmt, hpp, hists);
713 		}
714 
715 		if (width > header_width)
716 			header_width = width;
717 
718 		depth++;
719 	}
720 
721 	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
722 
723 	fprintf(fp, "\n#\n");
724 
725 	return 2;
726 }
727 
728 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
729 			 int line, FILE *fp)
730 {
731 	struct perf_hpp_fmt *fmt;
732 	const char *sep = symbol_conf.field_sep;
733 	bool first = true;
734 	int span = 0;
735 
736 	hists__for_each_format(hists, fmt) {
737 		if (perf_hpp__should_skip(fmt, hists))
738 			continue;
739 
740 		if (!first && !span)
741 			fprintf(fp, "%s", sep ?: "  ");
742 		else
743 			first = false;
744 
745 		fmt->header(fmt, hpp, hists, line, &span);
746 
747 		if (!span)
748 			fprintf(fp, "%s", hpp->buf);
749 	}
750 }
751 
752 static int
753 hists__fprintf_standard_headers(struct hists *hists,
754 				struct perf_hpp *hpp,
755 				FILE *fp)
756 {
757 	struct perf_hpp_list *hpp_list = hists->hpp_list;
758 	struct perf_hpp_fmt *fmt;
759 	unsigned int width;
760 	const char *sep = symbol_conf.field_sep;
761 	bool first = true;
762 	int line;
763 
764 	for (line = 0; line < hpp_list->nr_header_lines; line++) {
765 		/* first # is displayed one level up */
766 		if (line)
767 			fprintf(fp, "# ");
768 		fprintf_line(hists, hpp, line, fp);
769 		fprintf(fp, "\n");
770 	}
771 
772 	if (sep)
773 		return hpp_list->nr_header_lines;
774 
775 	first = true;
776 
777 	fprintf(fp, "# ");
778 
779 	hists__for_each_format(hists, fmt) {
780 		unsigned int i;
781 
782 		if (perf_hpp__should_skip(fmt, hists))
783 			continue;
784 
785 		if (!first)
786 			fprintf(fp, "%s", sep ?: "  ");
787 		else
788 			first = false;
789 
790 		width = fmt->width(fmt, hpp, hists);
791 		for (i = 0; i < width; i++)
792 			fprintf(fp, ".");
793 	}
794 
795 	fprintf(fp, "\n");
796 	fprintf(fp, "#\n");
797 	return hpp_list->nr_header_lines + 2;
798 }
799 
800 int hists__fprintf_headers(struct hists *hists, FILE *fp)
801 {
802 	char bf[1024];
803 	struct perf_hpp dummy_hpp = {
804 		.buf	= bf,
805 		.size	= sizeof(bf),
806 	};
807 
808 	fprintf(fp, "# ");
809 
810 	if (symbol_conf.report_hierarchy)
811 		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
812 	else
813 		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
814 
815 }
816 
817 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
818 		      int max_cols, float min_pcnt, FILE *fp,
819 		      bool ignore_callchains)
820 {
821 	struct rb_node *nd;
822 	size_t ret = 0;
823 	const char *sep = symbol_conf.field_sep;
824 	int nr_rows = 0;
825 	size_t linesz;
826 	char *line = NULL;
827 	unsigned indent;
828 
829 	init_rem_hits();
830 
831 	hists__reset_column_width(hists);
832 
833 	if (symbol_conf.col_width_list_str)
834 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
835 
836 	if (show_header)
837 		nr_rows += hists__fprintf_headers(hists, fp);
838 
839 	if (max_rows && nr_rows >= max_rows)
840 		goto out;
841 
842 	linesz = hists__sort_list_width(hists) + 3 + 1;
843 	linesz += perf_hpp__color_overhead();
844 	line = malloc(linesz);
845 	if (line == NULL) {
846 		ret = -1;
847 		goto out;
848 	}
849 
850 	indent = hists__overhead_width(hists) + 4;
851 
852 	for (nd = rb_first_cached(&hists->entries); nd;
853 	     nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
854 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
855 		float percent;
856 
857 		if (h->filtered)
858 			continue;
859 
860 		if (symbol_conf.report_individual_block)
861 			percent = block_info__total_cycles_percent(h);
862 		else
863 			percent = hist_entry__get_percent_limit(h);
864 
865 		if (percent < min_pcnt)
866 			continue;
867 
868 		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
869 
870 		if (max_rows && ++nr_rows >= max_rows)
871 			break;
872 
873 		/*
874 		 * If all children are filtered out or percent-limited,
875 		 * display "no entry >= x.xx%" message.
876 		 */
877 		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
878 			int depth = hists->nr_hpp_node + h->depth + 1;
879 
880 			print_hierarchy_indent(sep, depth, " ", fp);
881 			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
882 
883 			if (max_rows && ++nr_rows >= max_rows)
884 				break;
885 		}
886 
887 		if (h->ms.map == NULL && verbose > 1) {
888 			maps__fprintf(thread__maps(h->thread), fp);
889 			fprintf(fp, "%.10s end\n", graph_dotted_line);
890 		}
891 	}
892 
893 	free(line);
894 out:
895 	zfree(&rem_sq_bracket);
896 
897 	return ret;
898 }
899 
900 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
901 			     bool skip_empty)
902 {
903 	int i;
904 	size_t ret = 0;
905 	u32 total = stats->nr_events[0];
906 
907 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
908 		const char *name;
909 
910 		name = perf_event__name(i);
911 		if (!strcmp(name, "UNKNOWN"))
912 			continue;
913 		if (skip_empty && !stats->nr_events[i])
914 			continue;
915 
916 		if (i && total) {
917 			ret += fprintf(fp, "%16s events: %10d  (%4.1f%%)\n",
918 				       name, stats->nr_events[i],
919 				       100.0 * stats->nr_events[i] / total);
920 		} else {
921 			ret += fprintf(fp, "%16s events: %10d\n",
922 				       name, stats->nr_events[i]);
923 		}
924 	}
925 
926 	return ret;
927 }
928