xref: /openbmc/linux/tools/perf/ui/stdio/hist.c (revision 74ce1896)
1 #include <stdio.h>
2 #include <linux/string.h>
3 
4 #include "../../util/util.h"
5 #include "../../util/hist.h"
6 #include "../../util/sort.h"
7 #include "../../util/evsel.h"
8 #include "../../util/srcline.h"
9 #include "../../util/string2.h"
10 #include "../../util/thread.h"
11 #include "../../util/sane_ctype.h"
12 
13 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
14 {
15 	int i;
16 	int ret = fprintf(fp, "            ");
17 
18 	for (i = 0; i < left_margin; i++)
19 		ret += fprintf(fp, " ");
20 
21 	return ret;
22 }
23 
24 static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
25 			      int depth, int depth_mask, FILE *fp)
26 {
27 	struct dso *dso;
28 	struct inline_node *node;
29 	struct inline_list *ilist;
30 	int ret = 0, i;
31 
32 	if (map == NULL)
33 		return 0;
34 
35 	dso = map->dso;
36 	if (dso == NULL)
37 		return 0;
38 
39 	node = dso__parse_addr_inlines(dso,
40 				       map__rip_2objdump(map, ip));
41 	if (node == NULL)
42 		return 0;
43 
44 	list_for_each_entry(ilist, &node->val, list) {
45 		if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
46 			ret += callchain__fprintf_left_margin(fp, left_margin);
47 
48 			for (i = 0; i < depth; i++) {
49 				if (depth_mask & (1 << i))
50 					ret += fprintf(fp, "|");
51 				else
52 					ret += fprintf(fp, " ");
53 				ret += fprintf(fp, "          ");
54 			}
55 
56 			if (callchain_param.key == CCKEY_ADDRESS ||
57 			    callchain_param.key == CCKEY_SRCLINE) {
58 				if (ilist->filename != NULL)
59 					ret += fprintf(fp, "%s:%d (inline)",
60 						       ilist->filename,
61 						       ilist->line_nr);
62 				else
63 					ret += fprintf(fp, "??");
64 			} else if (ilist->funcname != NULL)
65 				ret += fprintf(fp, "%s (inline)",
66 					       ilist->funcname);
67 			else if (ilist->filename != NULL)
68 				ret += fprintf(fp, "%s:%d (inline)",
69 					       ilist->filename,
70 					       ilist->line_nr);
71 			else
72 				ret += fprintf(fp, "??");
73 
74 			ret += fprintf(fp, "\n");
75 		}
76 	}
77 
78 	inline_node__delete(node);
79 	return ret;
80 }
81 
82 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
83 					  int left_margin)
84 {
85 	int i;
86 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
87 
88 	for (i = 0; i < depth; i++)
89 		if (depth_mask & (1 << i))
90 			ret += fprintf(fp, "|          ");
91 		else
92 			ret += fprintf(fp, "           ");
93 
94 	ret += fprintf(fp, "\n");
95 
96 	return ret;
97 }
98 
99 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
100 				     struct callchain_list *chain,
101 				     int depth, int depth_mask, int period,
102 				     u64 total_samples, int left_margin)
103 {
104 	int i;
105 	size_t ret = 0;
106 	char bf[1024], *alloc_str = NULL;
107 	char buf[64];
108 	const char *str;
109 
110 	ret += callchain__fprintf_left_margin(fp, left_margin);
111 	for (i = 0; i < depth; i++) {
112 		if (depth_mask & (1 << i))
113 			ret += fprintf(fp, "|");
114 		else
115 			ret += fprintf(fp, " ");
116 		if (!period && i == depth - 1) {
117 			ret += fprintf(fp, "--");
118 			ret += callchain_node__fprintf_value(node, fp, total_samples);
119 			ret += fprintf(fp, "--");
120 		} else
121 			ret += fprintf(fp, "%s", "          ");
122 	}
123 
124 	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
125 
126 	if (symbol_conf.show_branchflag_count) {
127 		callchain_list_counts__printf_value(chain, NULL,
128 						    buf, sizeof(buf));
129 
130 		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
131 			str = "Not enough memory!";
132 		else
133 			str = alloc_str;
134 	}
135 
136 	fputs(str, fp);
137 	fputc('\n', fp);
138 	free(alloc_str);
139 
140 	if (symbol_conf.inline_name)
141 		ret += inline__fprintf(chain->ms.map, chain->ip,
142 				       left_margin, depth, depth_mask, fp);
143 	return ret;
144 }
145 
146 static struct symbol *rem_sq_bracket;
147 static struct callchain_list rem_hits;
148 
149 static void init_rem_hits(void)
150 {
151 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
152 	if (!rem_sq_bracket) {
153 		fprintf(stderr, "Not enough memory to display remaining hits\n");
154 		return;
155 	}
156 
157 	strcpy(rem_sq_bracket->name, "[...]");
158 	rem_hits.ms.sym = rem_sq_bracket;
159 }
160 
161 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
162 					 u64 total_samples, int depth,
163 					 int depth_mask, int left_margin)
164 {
165 	struct rb_node *node, *next;
166 	struct callchain_node *child = NULL;
167 	struct callchain_list *chain;
168 	int new_depth_mask = depth_mask;
169 	u64 remaining;
170 	size_t ret = 0;
171 	int i;
172 	uint entries_printed = 0;
173 	int cumul_count = 0;
174 
175 	remaining = total_samples;
176 
177 	node = rb_first(root);
178 	while (node) {
179 		u64 new_total;
180 		u64 cumul;
181 
182 		child = rb_entry(node, struct callchain_node, rb_node);
183 		cumul = callchain_cumul_hits(child);
184 		remaining -= cumul;
185 		cumul_count += callchain_cumul_counts(child);
186 
187 		/*
188 		 * The depth mask manages the output of pipes that show
189 		 * the depth. We don't want to keep the pipes of the current
190 		 * level for the last child of this depth.
191 		 * Except if we have remaining filtered hits. They will
192 		 * supersede the last child
193 		 */
194 		next = rb_next(node);
195 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
196 			new_depth_mask &= ~(1 << (depth - 1));
197 
198 		/*
199 		 * But we keep the older depth mask for the line separator
200 		 * to keep the level link until we reach the last child
201 		 */
202 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
203 						   left_margin);
204 		i = 0;
205 		list_for_each_entry(chain, &child->val, list) {
206 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
207 						      new_depth_mask, i++,
208 						      total_samples,
209 						      left_margin);
210 		}
211 
212 		if (callchain_param.mode == CHAIN_GRAPH_REL)
213 			new_total = child->children_hit;
214 		else
215 			new_total = total_samples;
216 
217 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
218 						  depth + 1,
219 						  new_depth_mask | (1 << depth),
220 						  left_margin);
221 		node = next;
222 		if (++entries_printed == callchain_param.print_limit)
223 			break;
224 	}
225 
226 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
227 		remaining && remaining != total_samples) {
228 		struct callchain_node rem_node = {
229 			.hit = remaining,
230 		};
231 
232 		if (!rem_sq_bracket)
233 			return ret;
234 
235 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
236 			rem_node.count = child->parent->children_count - cumul_count;
237 			if (rem_node.count <= 0)
238 				return ret;
239 		}
240 
241 		new_depth_mask &= ~(1 << (depth - 1));
242 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
243 					      new_depth_mask, 0, total_samples,
244 					      left_margin);
245 	}
246 
247 	return ret;
248 }
249 
250 /*
251  * If have one single callchain root, don't bother printing
252  * its percentage (100 % in fractal mode and the same percentage
253  * than the hist in graph mode). This also avoid one level of column.
254  *
255  * However when percent-limit applied, it's possible that single callchain
256  * node have different (non-100% in fractal mode) percentage.
257  */
258 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
259 {
260 	struct callchain_node *cnode;
261 
262 	if (rb_next(node))
263 		return true;
264 
265 	cnode = rb_entry(node, struct callchain_node, rb_node);
266 	return callchain_cumul_hits(cnode) != parent_samples;
267 }
268 
269 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
270 				       u64 total_samples, u64 parent_samples,
271 				       int left_margin)
272 {
273 	struct callchain_node *cnode;
274 	struct callchain_list *chain;
275 	u32 entries_printed = 0;
276 	bool printed = false;
277 	struct rb_node *node;
278 	int i = 0;
279 	int ret = 0;
280 	char bf[1024];
281 
282 	node = rb_first(root);
283 	if (node && !need_percent_display(node, parent_samples)) {
284 		cnode = rb_entry(node, struct callchain_node, rb_node);
285 		list_for_each_entry(chain, &cnode->val, list) {
286 			/*
287 			 * If we sort by symbol, the first entry is the same than
288 			 * the symbol. No need to print it otherwise it appears as
289 			 * displayed twice.
290 			 */
291 			if (!i++ && field_order == NULL &&
292 			    sort_order && strstarts(sort_order, "sym"))
293 				continue;
294 
295 			if (!printed) {
296 				ret += callchain__fprintf_left_margin(fp, left_margin);
297 				ret += fprintf(fp, "|\n");
298 				ret += callchain__fprintf_left_margin(fp, left_margin);
299 				ret += fprintf(fp, "---");
300 				left_margin += 3;
301 				printed = true;
302 			} else
303 				ret += callchain__fprintf_left_margin(fp, left_margin);
304 
305 			ret += fprintf(fp, "%s",
306 				       callchain_list__sym_name(chain, bf,
307 								sizeof(bf),
308 								false));
309 
310 			if (symbol_conf.show_branchflag_count)
311 				ret += callchain_list_counts__printf_value(
312 						chain, fp, NULL, 0);
313 			ret += fprintf(fp, "\n");
314 
315 			if (++entries_printed == callchain_param.print_limit)
316 				break;
317 
318 			if (symbol_conf.inline_name)
319 				ret += inline__fprintf(chain->ms.map,
320 						       chain->ip,
321 						       left_margin,
322 						       0, 0,
323 						       fp);
324 		}
325 		root = &cnode->rb_root;
326 	}
327 
328 	if (callchain_param.mode == CHAIN_GRAPH_REL)
329 		total_samples = parent_samples;
330 
331 	ret += __callchain__fprintf_graph(fp, root, total_samples,
332 					  1, 1, left_margin);
333 	if (ret) {
334 		/* do not add a blank line if it printed nothing */
335 		ret += fprintf(fp, "\n");
336 	}
337 
338 	return ret;
339 }
340 
341 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
342 					u64 total_samples)
343 {
344 	struct callchain_list *chain;
345 	size_t ret = 0;
346 	char bf[1024];
347 
348 	if (!node)
349 		return 0;
350 
351 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
352 
353 
354 	list_for_each_entry(chain, &node->val, list) {
355 		if (chain->ip >= PERF_CONTEXT_MAX)
356 			continue;
357 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
358 					bf, sizeof(bf), false));
359 	}
360 
361 	return ret;
362 }
363 
364 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
365 				      u64 total_samples)
366 {
367 	size_t ret = 0;
368 	u32 entries_printed = 0;
369 	struct callchain_node *chain;
370 	struct rb_node *rb_node = rb_first(tree);
371 
372 	while (rb_node) {
373 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
374 
375 		ret += fprintf(fp, "           ");
376 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
377 		ret += fprintf(fp, "\n");
378 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
379 		ret += fprintf(fp, "\n");
380 		if (++entries_printed == callchain_param.print_limit)
381 			break;
382 
383 		rb_node = rb_next(rb_node);
384 	}
385 
386 	return ret;
387 }
388 
389 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
390 {
391 	const char *sep = symbol_conf.field_sep ?: ";";
392 	struct callchain_list *chain;
393 	size_t ret = 0;
394 	char bf[1024];
395 	bool first;
396 
397 	if (!node)
398 		return 0;
399 
400 	ret += __callchain__fprintf_folded(fp, node->parent);
401 
402 	first = (ret == 0);
403 	list_for_each_entry(chain, &node->val, list) {
404 		if (chain->ip >= PERF_CONTEXT_MAX)
405 			continue;
406 		ret += fprintf(fp, "%s%s", first ? "" : sep,
407 			       callchain_list__sym_name(chain,
408 						bf, sizeof(bf), false));
409 		first = false;
410 	}
411 
412 	return ret;
413 }
414 
415 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
416 					u64 total_samples)
417 {
418 	size_t ret = 0;
419 	u32 entries_printed = 0;
420 	struct callchain_node *chain;
421 	struct rb_node *rb_node = rb_first(tree);
422 
423 	while (rb_node) {
424 
425 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
426 
427 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
428 		ret += fprintf(fp, " ");
429 		ret += __callchain__fprintf_folded(fp, chain);
430 		ret += fprintf(fp, "\n");
431 		if (++entries_printed == callchain_param.print_limit)
432 			break;
433 
434 		rb_node = rb_next(rb_node);
435 	}
436 
437 	return ret;
438 }
439 
440 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
441 					    u64 total_samples, int left_margin,
442 					    FILE *fp)
443 {
444 	u64 parent_samples = he->stat.period;
445 
446 	if (symbol_conf.cumulate_callchain)
447 		parent_samples = he->stat_acc->period;
448 
449 	switch (callchain_param.mode) {
450 	case CHAIN_GRAPH_REL:
451 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
452 						parent_samples, left_margin);
453 		break;
454 	case CHAIN_GRAPH_ABS:
455 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
456 						parent_samples, left_margin);
457 		break;
458 	case CHAIN_FLAT:
459 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
460 		break;
461 	case CHAIN_FOLDED:
462 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
463 		break;
464 	case CHAIN_NONE:
465 		break;
466 	default:
467 		pr_err("Bad callchain mode\n");
468 	}
469 
470 	return 0;
471 }
472 
473 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
474 			   struct perf_hpp_list *hpp_list)
475 {
476 	const char *sep = symbol_conf.field_sep;
477 	struct perf_hpp_fmt *fmt;
478 	char *start = hpp->buf;
479 	int ret;
480 	bool first = true;
481 
482 	if (symbol_conf.exclude_other && !he->parent)
483 		return 0;
484 
485 	perf_hpp_list__for_each_format(hpp_list, fmt) {
486 		if (perf_hpp__should_skip(fmt, he->hists))
487 			continue;
488 
489 		/*
490 		 * If there's no field_sep, we still need
491 		 * to display initial '  '.
492 		 */
493 		if (!sep || !first) {
494 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
495 			advance_hpp(hpp, ret);
496 		} else
497 			first = false;
498 
499 		if (perf_hpp__use_color() && fmt->color)
500 			ret = fmt->color(fmt, hpp, he);
501 		else
502 			ret = fmt->entry(fmt, hpp, he);
503 
504 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
505 		advance_hpp(hpp, ret);
506 	}
507 
508 	return hpp->buf - start;
509 }
510 
511 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
512 {
513 	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
514 }
515 
516 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
517 					 struct perf_hpp *hpp,
518 					 struct hists *hists,
519 					 FILE *fp)
520 {
521 	const char *sep = symbol_conf.field_sep;
522 	struct perf_hpp_fmt *fmt;
523 	struct perf_hpp_list_node *fmt_node;
524 	char *buf = hpp->buf;
525 	size_t size = hpp->size;
526 	int ret, printed = 0;
527 	bool first = true;
528 
529 	if (symbol_conf.exclude_other && !he->parent)
530 		return 0;
531 
532 	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
533 	advance_hpp(hpp, ret);
534 
535 	/* the first hpp_list_node is for overhead columns */
536 	fmt_node = list_first_entry(&hists->hpp_formats,
537 				    struct perf_hpp_list_node, list);
538 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
539 		/*
540 		 * If there's no field_sep, we still need
541 		 * to display initial '  '.
542 		 */
543 		if (!sep || !first) {
544 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
545 			advance_hpp(hpp, ret);
546 		} else
547 			first = false;
548 
549 		if (perf_hpp__use_color() && fmt->color)
550 			ret = fmt->color(fmt, hpp, he);
551 		else
552 			ret = fmt->entry(fmt, hpp, he);
553 
554 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
555 		advance_hpp(hpp, ret);
556 	}
557 
558 	if (!sep)
559 		ret = scnprintf(hpp->buf, hpp->size, "%*s",
560 				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
561 	advance_hpp(hpp, ret);
562 
563 	printed += fprintf(fp, "%s", buf);
564 
565 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
566 		hpp->buf  = buf;
567 		hpp->size = size;
568 
569 		/*
570 		 * No need to call hist_entry__snprintf_alignment() since this
571 		 * fmt is always the last column in the hierarchy mode.
572 		 */
573 		if (perf_hpp__use_color() && fmt->color)
574 			fmt->color(fmt, hpp, he);
575 		else
576 			fmt->entry(fmt, hpp, he);
577 
578 		/*
579 		 * dynamic entries are right-aligned but we want left-aligned
580 		 * in the hierarchy mode
581 		 */
582 		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
583 	}
584 	printed += putc('\n', fp);
585 
586 	if (symbol_conf.use_callchain && he->leaf) {
587 		u64 total = hists__total_period(hists);
588 
589 		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
590 		goto out;
591 	}
592 
593 out:
594 	return printed;
595 }
596 
597 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
598 			       char *bf, size_t bfsz, FILE *fp,
599 			       bool use_callchain)
600 {
601 	int ret;
602 	int callchain_ret = 0;
603 	int inline_ret = 0;
604 	struct perf_hpp hpp = {
605 		.buf		= bf,
606 		.size		= size,
607 	};
608 	struct hists *hists = he->hists;
609 	u64 total_period = hists->stats.total_period;
610 
611 	if (size == 0 || size > bfsz)
612 		size = hpp.size = bfsz;
613 
614 	if (symbol_conf.report_hierarchy)
615 		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
616 
617 	hist_entry__snprintf(he, &hpp);
618 
619 	ret = fprintf(fp, "%s\n", bf);
620 
621 	if (use_callchain)
622 		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
623 							      0, fp);
624 
625 	if (callchain_ret == 0 && symbol_conf.inline_name) {
626 		inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
627 		ret += inline_ret;
628 		if (inline_ret > 0)
629 			ret += fprintf(fp, "\n");
630 	} else
631 		ret += callchain_ret;
632 
633 	return ret;
634 }
635 
636 static int print_hierarchy_indent(const char *sep, int indent,
637 				  const char *line, FILE *fp)
638 {
639 	if (sep != NULL || indent < 2)
640 		return 0;
641 
642 	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
643 }
644 
645 static int hists__fprintf_hierarchy_headers(struct hists *hists,
646 					    struct perf_hpp *hpp, FILE *fp)
647 {
648 	bool first_node, first_col;
649 	int indent;
650 	int depth;
651 	unsigned width = 0;
652 	unsigned header_width = 0;
653 	struct perf_hpp_fmt *fmt;
654 	struct perf_hpp_list_node *fmt_node;
655 	const char *sep = symbol_conf.field_sep;
656 
657 	indent = hists->nr_hpp_node;
658 
659 	/* preserve max indent depth for column headers */
660 	print_hierarchy_indent(sep, indent, spaces, fp);
661 
662 	/* the first hpp_list_node is for overhead columns */
663 	fmt_node = list_first_entry(&hists->hpp_formats,
664 				    struct perf_hpp_list_node, list);
665 
666 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
667 		fmt->header(fmt, hpp, hists, 0, NULL);
668 		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
669 	}
670 
671 	/* combine sort headers with ' / ' */
672 	first_node = true;
673 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
674 		if (!first_node)
675 			header_width += fprintf(fp, " / ");
676 		first_node = false;
677 
678 		first_col = true;
679 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
680 			if (perf_hpp__should_skip(fmt, hists))
681 				continue;
682 
683 			if (!first_col)
684 				header_width += fprintf(fp, "+");
685 			first_col = false;
686 
687 			fmt->header(fmt, hpp, hists, 0, NULL);
688 
689 			header_width += fprintf(fp, "%s", trim(hpp->buf));
690 		}
691 	}
692 
693 	fprintf(fp, "\n# ");
694 
695 	/* preserve max indent depth for initial dots */
696 	print_hierarchy_indent(sep, indent, dots, fp);
697 
698 	/* the first hpp_list_node is for overhead columns */
699 	fmt_node = list_first_entry(&hists->hpp_formats,
700 				    struct perf_hpp_list_node, list);
701 
702 	first_col = true;
703 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
704 		if (!first_col)
705 			fprintf(fp, "%s", sep ?: "..");
706 		first_col = false;
707 
708 		width = fmt->width(fmt, hpp, hists);
709 		fprintf(fp, "%.*s", width, dots);
710 	}
711 
712 	depth = 0;
713 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
714 		first_col = true;
715 		width = depth * HIERARCHY_INDENT;
716 
717 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
718 			if (perf_hpp__should_skip(fmt, hists))
719 				continue;
720 
721 			if (!first_col)
722 				width++;  /* for '+' sign between column header */
723 			first_col = false;
724 
725 			width += fmt->width(fmt, hpp, hists);
726 		}
727 
728 		if (width > header_width)
729 			header_width = width;
730 
731 		depth++;
732 	}
733 
734 	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
735 
736 	fprintf(fp, "\n#\n");
737 
738 	return 2;
739 }
740 
741 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
742 			 int line, FILE *fp)
743 {
744 	struct perf_hpp_fmt *fmt;
745 	const char *sep = symbol_conf.field_sep;
746 	bool first = true;
747 	int span = 0;
748 
749 	hists__for_each_format(hists, fmt) {
750 		if (perf_hpp__should_skip(fmt, hists))
751 			continue;
752 
753 		if (!first && !span)
754 			fprintf(fp, "%s", sep ?: "  ");
755 		else
756 			first = false;
757 
758 		fmt->header(fmt, hpp, hists, line, &span);
759 
760 		if (!span)
761 			fprintf(fp, "%s", hpp->buf);
762 	}
763 }
764 
765 static int
766 hists__fprintf_standard_headers(struct hists *hists,
767 				struct perf_hpp *hpp,
768 				FILE *fp)
769 {
770 	struct perf_hpp_list *hpp_list = hists->hpp_list;
771 	struct perf_hpp_fmt *fmt;
772 	unsigned int width;
773 	const char *sep = symbol_conf.field_sep;
774 	bool first = true;
775 	int line;
776 
777 	for (line = 0; line < hpp_list->nr_header_lines; line++) {
778 		/* first # is displayed one level up */
779 		if (line)
780 			fprintf(fp, "# ");
781 		fprintf_line(hists, hpp, line, fp);
782 		fprintf(fp, "\n");
783 	}
784 
785 	if (sep)
786 		return hpp_list->nr_header_lines;
787 
788 	first = true;
789 
790 	fprintf(fp, "# ");
791 
792 	hists__for_each_format(hists, fmt) {
793 		unsigned int i;
794 
795 		if (perf_hpp__should_skip(fmt, hists))
796 			continue;
797 
798 		if (!first)
799 			fprintf(fp, "%s", sep ?: "  ");
800 		else
801 			first = false;
802 
803 		width = fmt->width(fmt, hpp, hists);
804 		for (i = 0; i < width; i++)
805 			fprintf(fp, ".");
806 	}
807 
808 	fprintf(fp, "\n");
809 	fprintf(fp, "#\n");
810 	return hpp_list->nr_header_lines + 2;
811 }
812 
813 int hists__fprintf_headers(struct hists *hists, FILE *fp)
814 {
815 	char bf[1024];
816 	struct perf_hpp dummy_hpp = {
817 		.buf	= bf,
818 		.size	= sizeof(bf),
819 	};
820 
821 	fprintf(fp, "# ");
822 
823 	if (symbol_conf.report_hierarchy)
824 		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
825 	else
826 		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
827 
828 }
829 
830 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
831 		      int max_cols, float min_pcnt, FILE *fp,
832 		      bool use_callchain)
833 {
834 	struct rb_node *nd;
835 	size_t ret = 0;
836 	const char *sep = symbol_conf.field_sep;
837 	int nr_rows = 0;
838 	size_t linesz;
839 	char *line = NULL;
840 	unsigned indent;
841 
842 	init_rem_hits();
843 
844 	hists__reset_column_width(hists);
845 
846 	if (symbol_conf.col_width_list_str)
847 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
848 
849 	if (show_header)
850 		nr_rows += hists__fprintf_headers(hists, fp);
851 
852 	if (max_rows && nr_rows >= max_rows)
853 		goto out;
854 
855 	linesz = hists__sort_list_width(hists) + 3 + 1;
856 	linesz += perf_hpp__color_overhead();
857 	line = malloc(linesz);
858 	if (line == NULL) {
859 		ret = -1;
860 		goto out;
861 	}
862 
863 	indent = hists__overhead_width(hists) + 4;
864 
865 	for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
866 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
867 		float percent;
868 
869 		if (h->filtered)
870 			continue;
871 
872 		percent = hist_entry__get_percent_limit(h);
873 		if (percent < min_pcnt)
874 			continue;
875 
876 		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
877 
878 		if (max_rows && ++nr_rows >= max_rows)
879 			break;
880 
881 		/*
882 		 * If all children are filtered out or percent-limited,
883 		 * display "no entry >= x.xx%" message.
884 		 */
885 		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
886 			int depth = hists->nr_hpp_node + h->depth + 1;
887 
888 			print_hierarchy_indent(sep, depth, spaces, fp);
889 			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
890 
891 			if (max_rows && ++nr_rows >= max_rows)
892 				break;
893 		}
894 
895 		if (h->ms.map == NULL && verbose > 1) {
896 			__map_groups__fprintf_maps(h->thread->mg,
897 						   MAP__FUNCTION, fp);
898 			fprintf(fp, "%.10s end\n", graph_dotted_line);
899 		}
900 	}
901 
902 	free(line);
903 out:
904 	zfree(&rem_sq_bracket);
905 
906 	return ret;
907 }
908 
909 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
910 {
911 	int i;
912 	size_t ret = 0;
913 
914 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
915 		const char *name;
916 
917 		if (stats->nr_events[i] == 0)
918 			continue;
919 
920 		name = perf_event__name(i);
921 		if (!strcmp(name, "UNKNOWN"))
922 			continue;
923 
924 		ret += fprintf(fp, "%16s events: %10d\n", name,
925 			       stats->nr_events[i]);
926 	}
927 
928 	return ret;
929 }
930