xref: /openbmc/linux/tools/perf/ui/stdio/hist.c (revision a06c488d)
1 #include <stdio.h>
2 
3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
7 
8 
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10 {
11 	int i;
12 	int ret = fprintf(fp, "            ");
13 
14 	for (i = 0; i < left_margin; i++)
15 		ret += fprintf(fp, " ");
16 
17 	return ret;
18 }
19 
20 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
21 					  int left_margin)
22 {
23 	int i;
24 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
25 
26 	for (i = 0; i < depth; i++)
27 		if (depth_mask & (1 << i))
28 			ret += fprintf(fp, "|          ");
29 		else
30 			ret += fprintf(fp, "           ");
31 
32 	ret += fprintf(fp, "\n");
33 
34 	return ret;
35 }
36 
37 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
38 				     struct callchain_list *chain,
39 				     int depth, int depth_mask, int period,
40 				     u64 total_samples, int left_margin)
41 {
42 	int i;
43 	size_t ret = 0;
44 	char bf[1024];
45 
46 	ret += callchain__fprintf_left_margin(fp, left_margin);
47 	for (i = 0; i < depth; i++) {
48 		if (depth_mask & (1 << i))
49 			ret += fprintf(fp, "|");
50 		else
51 			ret += fprintf(fp, " ");
52 		if (!period && i == depth - 1) {
53 			ret += fprintf(fp, "--");
54 			ret += callchain_node__fprintf_value(node, fp, total_samples);
55 			ret += fprintf(fp, "--");
56 		} else
57 			ret += fprintf(fp, "%s", "          ");
58 	}
59 	fputs(callchain_list__sym_name(chain, bf, sizeof(bf), false), fp);
60 	fputc('\n', fp);
61 	return ret;
62 }
63 
64 static struct symbol *rem_sq_bracket;
65 static struct callchain_list rem_hits;
66 
67 static void init_rem_hits(void)
68 {
69 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
70 	if (!rem_sq_bracket) {
71 		fprintf(stderr, "Not enough memory to display remaining hits\n");
72 		return;
73 	}
74 
75 	strcpy(rem_sq_bracket->name, "[...]");
76 	rem_hits.ms.sym = rem_sq_bracket;
77 }
78 
79 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
80 					 u64 total_samples, int depth,
81 					 int depth_mask, int left_margin)
82 {
83 	struct rb_node *node, *next;
84 	struct callchain_node *child = NULL;
85 	struct callchain_list *chain;
86 	int new_depth_mask = depth_mask;
87 	u64 remaining;
88 	size_t ret = 0;
89 	int i;
90 	uint entries_printed = 0;
91 	int cumul_count = 0;
92 
93 	remaining = total_samples;
94 
95 	node = rb_first(root);
96 	while (node) {
97 		u64 new_total;
98 		u64 cumul;
99 
100 		child = rb_entry(node, struct callchain_node, rb_node);
101 		cumul = callchain_cumul_hits(child);
102 		remaining -= cumul;
103 		cumul_count += callchain_cumul_counts(child);
104 
105 		/*
106 		 * The depth mask manages the output of pipes that show
107 		 * the depth. We don't want to keep the pipes of the current
108 		 * level for the last child of this depth.
109 		 * Except if we have remaining filtered hits. They will
110 		 * supersede the last child
111 		 */
112 		next = rb_next(node);
113 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
114 			new_depth_mask &= ~(1 << (depth - 1));
115 
116 		/*
117 		 * But we keep the older depth mask for the line separator
118 		 * to keep the level link until we reach the last child
119 		 */
120 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
121 						   left_margin);
122 		i = 0;
123 		list_for_each_entry(chain, &child->val, list) {
124 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
125 						      new_depth_mask, i++,
126 						      total_samples,
127 						      left_margin);
128 		}
129 
130 		if (callchain_param.mode == CHAIN_GRAPH_REL)
131 			new_total = child->children_hit;
132 		else
133 			new_total = total_samples;
134 
135 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
136 						  depth + 1,
137 						  new_depth_mask | (1 << depth),
138 						  left_margin);
139 		node = next;
140 		if (++entries_printed == callchain_param.print_limit)
141 			break;
142 	}
143 
144 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
145 		remaining && remaining != total_samples) {
146 		struct callchain_node rem_node = {
147 			.hit = remaining,
148 		};
149 
150 		if (!rem_sq_bracket)
151 			return ret;
152 
153 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
154 			rem_node.count = child->parent->children_count - cumul_count;
155 			if (rem_node.count <= 0)
156 				return ret;
157 		}
158 
159 		new_depth_mask &= ~(1 << (depth - 1));
160 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
161 					      new_depth_mask, 0, total_samples,
162 					      left_margin);
163 	}
164 
165 	return ret;
166 }
167 
168 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
169 				       u64 total_samples, int left_margin)
170 {
171 	struct callchain_node *cnode;
172 	struct callchain_list *chain;
173 	u32 entries_printed = 0;
174 	bool printed = false;
175 	struct rb_node *node;
176 	int i = 0;
177 	int ret = 0;
178 	char bf[1024];
179 
180 	/*
181 	 * If have one single callchain root, don't bother printing
182 	 * its percentage (100 % in fractal mode and the same percentage
183 	 * than the hist in graph mode). This also avoid one level of column.
184 	 */
185 	node = rb_first(root);
186 	if (node && !rb_next(node)) {
187 		cnode = rb_entry(node, struct callchain_node, rb_node);
188 		list_for_each_entry(chain, &cnode->val, list) {
189 			/*
190 			 * If we sort by symbol, the first entry is the same than
191 			 * the symbol. No need to print it otherwise it appears as
192 			 * displayed twice.
193 			 */
194 			if (!i++ && field_order == NULL &&
195 			    sort_order && !prefixcmp(sort_order, "sym"))
196 				continue;
197 			if (!printed) {
198 				ret += callchain__fprintf_left_margin(fp, left_margin);
199 				ret += fprintf(fp, "|\n");
200 				ret += callchain__fprintf_left_margin(fp, left_margin);
201 				ret += fprintf(fp, "---");
202 				left_margin += 3;
203 				printed = true;
204 			} else
205 				ret += callchain__fprintf_left_margin(fp, left_margin);
206 
207 			ret += fprintf(fp, "%s\n", callchain_list__sym_name(chain, bf, sizeof(bf),
208 							false));
209 
210 			if (++entries_printed == callchain_param.print_limit)
211 				break;
212 		}
213 		root = &cnode->rb_root;
214 	}
215 
216 	ret += __callchain__fprintf_graph(fp, root, total_samples,
217 					  1, 1, left_margin);
218 	ret += fprintf(fp, "\n");
219 
220 	return ret;
221 }
222 
223 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
224 					u64 total_samples)
225 {
226 	struct callchain_list *chain;
227 	size_t ret = 0;
228 	char bf[1024];
229 
230 	if (!node)
231 		return 0;
232 
233 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
234 
235 
236 	list_for_each_entry(chain, &node->val, list) {
237 		if (chain->ip >= PERF_CONTEXT_MAX)
238 			continue;
239 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
240 					bf, sizeof(bf), false));
241 	}
242 
243 	return ret;
244 }
245 
246 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
247 				      u64 total_samples)
248 {
249 	size_t ret = 0;
250 	u32 entries_printed = 0;
251 	struct callchain_node *chain;
252 	struct rb_node *rb_node = rb_first(tree);
253 
254 	while (rb_node) {
255 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
256 
257 		ret += fprintf(fp, "           ");
258 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
259 		ret += fprintf(fp, "\n");
260 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
261 		ret += fprintf(fp, "\n");
262 		if (++entries_printed == callchain_param.print_limit)
263 			break;
264 
265 		rb_node = rb_next(rb_node);
266 	}
267 
268 	return ret;
269 }
270 
271 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
272 {
273 	const char *sep = symbol_conf.field_sep ?: ";";
274 	struct callchain_list *chain;
275 	size_t ret = 0;
276 	char bf[1024];
277 	bool first;
278 
279 	if (!node)
280 		return 0;
281 
282 	ret += __callchain__fprintf_folded(fp, node->parent);
283 
284 	first = (ret == 0);
285 	list_for_each_entry(chain, &node->val, list) {
286 		if (chain->ip >= PERF_CONTEXT_MAX)
287 			continue;
288 		ret += fprintf(fp, "%s%s", first ? "" : sep,
289 			       callchain_list__sym_name(chain,
290 						bf, sizeof(bf), false));
291 		first = false;
292 	}
293 
294 	return ret;
295 }
296 
297 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
298 					u64 total_samples)
299 {
300 	size_t ret = 0;
301 	u32 entries_printed = 0;
302 	struct callchain_node *chain;
303 	struct rb_node *rb_node = rb_first(tree);
304 
305 	while (rb_node) {
306 
307 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
308 
309 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
310 		ret += fprintf(fp, " ");
311 		ret += __callchain__fprintf_folded(fp, chain);
312 		ret += fprintf(fp, "\n");
313 		if (++entries_printed == callchain_param.print_limit)
314 			break;
315 
316 		rb_node = rb_next(rb_node);
317 	}
318 
319 	return ret;
320 }
321 
322 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
323 					    u64 total_samples, int left_margin,
324 					    FILE *fp)
325 {
326 	switch (callchain_param.mode) {
327 	case CHAIN_GRAPH_REL:
328 		return callchain__fprintf_graph(fp, &he->sorted_chain,
329 						symbol_conf.cumulate_callchain ?
330 						he->stat_acc->period : he->stat.period,
331 						left_margin);
332 		break;
333 	case CHAIN_GRAPH_ABS:
334 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
335 						left_margin);
336 		break;
337 	case CHAIN_FLAT:
338 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
339 		break;
340 	case CHAIN_FOLDED:
341 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
342 		break;
343 	case CHAIN_NONE:
344 		break;
345 	default:
346 		pr_err("Bad callchain mode\n");
347 	}
348 
349 	return 0;
350 }
351 
352 static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
353 					    struct hists *hists,
354 					    FILE *fp)
355 {
356 	int left_margin = 0;
357 	u64 total_period = hists->stats.total_period;
358 
359 	if (field_order == NULL && (sort_order == NULL ||
360 				    !prefixcmp(sort_order, "comm"))) {
361 		struct perf_hpp_fmt *fmt;
362 
363 		perf_hpp__for_each_format(fmt) {
364 			if (!perf_hpp__is_sort_entry(fmt))
365 				continue;
366 
367 			/* must be 'comm' sort entry */
368 			left_margin = fmt->width(fmt, NULL, hists_to_evsel(hists));
369 			left_margin -= thread__comm_len(he->thread);
370 			break;
371 		}
372 	}
373 	return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
374 }
375 
376 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
377 {
378 	const char *sep = symbol_conf.field_sep;
379 	struct perf_hpp_fmt *fmt;
380 	char *start = hpp->buf;
381 	int ret;
382 	bool first = true;
383 
384 	if (symbol_conf.exclude_other && !he->parent)
385 		return 0;
386 
387 	perf_hpp__for_each_format(fmt) {
388 		if (perf_hpp__should_skip(fmt, he->hists))
389 			continue;
390 
391 		/*
392 		 * If there's no field_sep, we still need
393 		 * to display initial '  '.
394 		 */
395 		if (!sep || !first) {
396 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
397 			advance_hpp(hpp, ret);
398 		} else
399 			first = false;
400 
401 		if (perf_hpp__use_color() && fmt->color)
402 			ret = fmt->color(fmt, hpp, he);
403 		else
404 			ret = fmt->entry(fmt, hpp, he);
405 
406 		advance_hpp(hpp, ret);
407 	}
408 
409 	return hpp->buf - start;
410 }
411 
412 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
413 			       struct hists *hists,
414 			       char *bf, size_t bfsz, FILE *fp)
415 {
416 	int ret;
417 	struct perf_hpp hpp = {
418 		.buf		= bf,
419 		.size		= size,
420 	};
421 
422 	if (size == 0 || size > bfsz)
423 		size = hpp.size = bfsz;
424 
425 	hist_entry__snprintf(he, &hpp);
426 
427 	ret = fprintf(fp, "%s\n", bf);
428 
429 	if (symbol_conf.use_callchain)
430 		ret += hist_entry__callchain_fprintf(he, hists, fp);
431 
432 	return ret;
433 }
434 
435 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
436 		      int max_cols, float min_pcnt, FILE *fp)
437 {
438 	struct perf_hpp_fmt *fmt;
439 	struct rb_node *nd;
440 	size_t ret = 0;
441 	unsigned int width;
442 	const char *sep = symbol_conf.field_sep;
443 	int nr_rows = 0;
444 	char bf[96];
445 	struct perf_hpp dummy_hpp = {
446 		.buf	= bf,
447 		.size	= sizeof(bf),
448 	};
449 	bool first = true;
450 	size_t linesz;
451 	char *line = NULL;
452 
453 	init_rem_hits();
454 
455 	perf_hpp__for_each_format(fmt)
456 		perf_hpp__reset_width(fmt, hists);
457 
458 	if (symbol_conf.col_width_list_str)
459 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
460 
461 	if (!show_header)
462 		goto print_entries;
463 
464 	fprintf(fp, "# ");
465 
466 	perf_hpp__for_each_format(fmt) {
467 		if (perf_hpp__should_skip(fmt, hists))
468 			continue;
469 
470 		if (!first)
471 			fprintf(fp, "%s", sep ?: "  ");
472 		else
473 			first = false;
474 
475 		fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
476 		fprintf(fp, "%s", bf);
477 	}
478 
479 	fprintf(fp, "\n");
480 	if (max_rows && ++nr_rows >= max_rows)
481 		goto out;
482 
483 	if (sep)
484 		goto print_entries;
485 
486 	first = true;
487 
488 	fprintf(fp, "# ");
489 
490 	perf_hpp__for_each_format(fmt) {
491 		unsigned int i;
492 
493 		if (perf_hpp__should_skip(fmt, hists))
494 			continue;
495 
496 		if (!first)
497 			fprintf(fp, "%s", sep ?: "  ");
498 		else
499 			first = false;
500 
501 		width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
502 		for (i = 0; i < width; i++)
503 			fprintf(fp, ".");
504 	}
505 
506 	fprintf(fp, "\n");
507 	if (max_rows && ++nr_rows >= max_rows)
508 		goto out;
509 
510 	fprintf(fp, "#\n");
511 	if (max_rows && ++nr_rows >= max_rows)
512 		goto out;
513 
514 print_entries:
515 	linesz = hists__sort_list_width(hists) + 3 + 1;
516 	linesz += perf_hpp__color_overhead();
517 	line = malloc(linesz);
518 	if (line == NULL) {
519 		ret = -1;
520 		goto out;
521 	}
522 
523 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
524 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
525 		float percent;
526 
527 		if (h->filtered)
528 			continue;
529 
530 		percent = hist_entry__get_percent_limit(h);
531 		if (percent < min_pcnt)
532 			continue;
533 
534 		ret += hist_entry__fprintf(h, max_cols, hists, line, linesz, fp);
535 
536 		if (max_rows && ++nr_rows >= max_rows)
537 			break;
538 
539 		if (h->ms.map == NULL && verbose > 1) {
540 			__map_groups__fprintf_maps(h->thread->mg,
541 						   MAP__FUNCTION, fp);
542 			fprintf(fp, "%.10s end\n", graph_dotted_line);
543 		}
544 	}
545 
546 	free(line);
547 out:
548 	zfree(&rem_sq_bracket);
549 
550 	return ret;
551 }
552 
553 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
554 {
555 	int i;
556 	size_t ret = 0;
557 
558 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
559 		const char *name;
560 
561 		if (stats->nr_events[i] == 0)
562 			continue;
563 
564 		name = perf_event__name(i);
565 		if (!strcmp(name, "UNKNOWN"))
566 			continue;
567 
568 		ret += fprintf(fp, "%16s events: %10d\n", name,
569 			       stats->nr_events[i]);
570 	}
571 
572 	return ret;
573 }
574