xref: /openbmc/linux/tools/perf/ui/hist.c (revision a50b854e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/compiler.h>
7 
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/util.h"
12 #include "../util/sort.h"
13 #include "../util/evsel.h"
14 #include "../util/evlist.h"
15 #include "../perf.h"
16 
17 /* hist period print (hpp) functions */
18 
19 #define hpp__call_print_fn(hpp, fn, fmt, ...)			\
20 ({								\
21 	int __ret = fn(hpp, fmt, ##__VA_ARGS__);		\
22 	advance_hpp(hpp, __ret);				\
23 	__ret;							\
24 })
25 
26 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
27 		      hpp_field_fn get_field, const char *fmt, int len,
28 		      hpp_snprint_fn print_fn, bool fmt_percent)
29 {
30 	int ret;
31 	struct hists *hists = he->hists;
32 	struct evsel *evsel = hists_to_evsel(hists);
33 	char *buf = hpp->buf;
34 	size_t size = hpp->size;
35 
36 	if (fmt_percent) {
37 		double percent = 0.0;
38 		u64 total = hists__total_period(hists);
39 
40 		if (total)
41 			percent = 100.0 * get_field(he) / total;
42 
43 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
44 	} else
45 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
46 
47 	if (perf_evsel__is_group_event(evsel)) {
48 		int prev_idx, idx_delta;
49 		struct hist_entry *pair;
50 		int nr_members = evsel->core.nr_members;
51 
52 		prev_idx = perf_evsel__group_idx(evsel);
53 
54 		list_for_each_entry(pair, &he->pairs.head, pairs.node) {
55 			u64 period = get_field(pair);
56 			u64 total = hists__total_period(pair->hists);
57 
58 			if (!total)
59 				continue;
60 
61 			evsel = hists_to_evsel(pair->hists);
62 			idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
63 
64 			while (idx_delta--) {
65 				/*
66 				 * zero-fill group members in the middle which
67 				 * have no sample
68 				 */
69 				if (fmt_percent) {
70 					ret += hpp__call_print_fn(hpp, print_fn,
71 								  fmt, len, 0.0);
72 				} else {
73 					ret += hpp__call_print_fn(hpp, print_fn,
74 								  fmt, len, 0ULL);
75 				}
76 			}
77 
78 			if (fmt_percent) {
79 				ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
80 							  100.0 * period / total);
81 			} else {
82 				ret += hpp__call_print_fn(hpp, print_fn, fmt,
83 							  len, period);
84 			}
85 
86 			prev_idx = perf_evsel__group_idx(evsel);
87 		}
88 
89 		idx_delta = nr_members - prev_idx - 1;
90 
91 		while (idx_delta--) {
92 			/*
93 			 * zero-fill group members at last which have no sample
94 			 */
95 			if (fmt_percent) {
96 				ret += hpp__call_print_fn(hpp, print_fn,
97 							  fmt, len, 0.0);
98 			} else {
99 				ret += hpp__call_print_fn(hpp, print_fn,
100 							  fmt, len, 0ULL);
101 			}
102 		}
103 	}
104 
105 	/*
106 	 * Restore original buf and size as it's where caller expects
107 	 * the result will be saved.
108 	 */
109 	hpp->buf = buf;
110 	hpp->size = size;
111 
112 	return ret;
113 }
114 
115 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
116 	     struct hist_entry *he, hpp_field_fn get_field,
117 	     const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
118 {
119 	int len = fmt->user_len ?: fmt->len;
120 
121 	if (symbol_conf.field_sep) {
122 		return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
123 				  print_fn, fmt_percent);
124 	}
125 
126 	if (fmt_percent)
127 		len -= 2; /* 2 for a space and a % sign */
128 	else
129 		len -= 1;
130 
131 	return  __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
132 }
133 
134 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
135 		 struct hist_entry *he, hpp_field_fn get_field,
136 		 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
137 {
138 	if (!symbol_conf.cumulate_callchain) {
139 		int len = fmt->user_len ?: fmt->len;
140 		return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
141 	}
142 
143 	return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
144 }
145 
146 static int field_cmp(u64 field_a, u64 field_b)
147 {
148 	if (field_a > field_b)
149 		return 1;
150 	if (field_a < field_b)
151 		return -1;
152 	return 0;
153 }
154 
155 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
156 		       hpp_field_fn get_field)
157 {
158 	s64 ret;
159 	int i, nr_members;
160 	struct evsel *evsel;
161 	struct hist_entry *pair;
162 	u64 *fields_a, *fields_b;
163 
164 	ret = field_cmp(get_field(a), get_field(b));
165 	if (ret || !symbol_conf.event_group)
166 		return ret;
167 
168 	evsel = hists_to_evsel(a->hists);
169 	if (!perf_evsel__is_group_event(evsel))
170 		return ret;
171 
172 	nr_members = evsel->core.nr_members;
173 	fields_a = calloc(nr_members, sizeof(*fields_a));
174 	fields_b = calloc(nr_members, sizeof(*fields_b));
175 
176 	if (!fields_a || !fields_b)
177 		goto out;
178 
179 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
180 		evsel = hists_to_evsel(pair->hists);
181 		fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
182 	}
183 
184 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
185 		evsel = hists_to_evsel(pair->hists);
186 		fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
187 	}
188 
189 	for (i = 1; i < nr_members; i++) {
190 		ret = field_cmp(fields_a[i], fields_b[i]);
191 		if (ret)
192 			break;
193 	}
194 
195 out:
196 	free(fields_a);
197 	free(fields_b);
198 
199 	return ret;
200 }
201 
202 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
203 			   hpp_field_fn get_field)
204 {
205 	s64 ret = 0;
206 
207 	if (symbol_conf.cumulate_callchain) {
208 		/*
209 		 * Put caller above callee when they have equal period.
210 		 */
211 		ret = field_cmp(get_field(a), get_field(b));
212 		if (ret)
213 			return ret;
214 
215 		if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
216 			return 0;
217 
218 		ret = b->callchain->max_depth - a->callchain->max_depth;
219 		if (callchain_param.order == ORDER_CALLER)
220 			ret = -ret;
221 	}
222 	return ret;
223 }
224 
225 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
226 			 struct perf_hpp *hpp __maybe_unused,
227 			 struct hists *hists)
228 {
229 	int len = fmt->user_len ?: fmt->len;
230 	struct evsel *evsel = hists_to_evsel(hists);
231 
232 	if (symbol_conf.event_group)
233 		len = max(len, evsel->core.nr_members * fmt->len);
234 
235 	if (len < (int)strlen(fmt->name))
236 		len = strlen(fmt->name);
237 
238 	return len;
239 }
240 
241 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
242 			  struct hists *hists, int line __maybe_unused,
243 			  int *span __maybe_unused)
244 {
245 	int len = hpp__width_fn(fmt, hpp, hists);
246 	return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
247 }
248 
249 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
250 {
251 	va_list args;
252 	ssize_t ssize = hpp->size;
253 	double percent;
254 	int ret, len;
255 
256 	va_start(args, fmt);
257 	len = va_arg(args, int);
258 	percent = va_arg(args, double);
259 	ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
260 	va_end(args);
261 
262 	return (ret >= ssize) ? (ssize - 1) : ret;
263 }
264 
265 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
266 {
267 	va_list args;
268 	ssize_t ssize = hpp->size;
269 	int ret;
270 
271 	va_start(args, fmt);
272 	ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
273 	va_end(args);
274 
275 	return (ret >= ssize) ? (ssize - 1) : ret;
276 }
277 
278 #define __HPP_COLOR_PERCENT_FN(_type, _field)					\
279 static u64 he_get_##_field(struct hist_entry *he)				\
280 {										\
281 	return he->stat._field;							\
282 }										\
283 										\
284 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
285 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
286 {										\
287 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
288 			hpp_color_scnprintf, true);				\
289 }
290 
291 #define __HPP_ENTRY_PERCENT_FN(_type, _field)					\
292 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
293 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
294 {										\
295 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
296 			hpp_entry_scnprintf, true);				\
297 }
298 
299 #define __HPP_SORT_FN(_type, _field)						\
300 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
301 				 struct hist_entry *a, struct hist_entry *b) 	\
302 {										\
303 	return __hpp__sort(a, b, he_get_##_field);				\
304 }
305 
306 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
307 static u64 he_get_acc_##_field(struct hist_entry *he)				\
308 {										\
309 	return he->stat_acc->_field;						\
310 }										\
311 										\
312 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
313 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
314 {										\
315 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", 	\
316 			    hpp_color_scnprintf, true);				\
317 }
318 
319 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
320 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
321 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
322 {										\
323 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%",	\
324 			    hpp_entry_scnprintf, true);				\
325 }
326 
327 #define __HPP_SORT_ACC_FN(_type, _field)					\
328 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
329 				 struct hist_entry *a, struct hist_entry *b) 	\
330 {										\
331 	return __hpp__sort_acc(a, b, he_get_acc_##_field);			\
332 }
333 
334 #define __HPP_ENTRY_RAW_FN(_type, _field)					\
335 static u64 he_get_raw_##_field(struct hist_entry *he)				\
336 {										\
337 	return he->stat._field;							\
338 }										\
339 										\
340 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
341 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
342 {										\
343 	return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, 	\
344 			hpp_entry_scnprintf, false);				\
345 }
346 
347 #define __HPP_SORT_RAW_FN(_type, _field)					\
348 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
349 				 struct hist_entry *a, struct hist_entry *b) 	\
350 {										\
351 	return __hpp__sort(a, b, he_get_raw_##_field);				\
352 }
353 
354 
355 #define HPP_PERCENT_FNS(_type, _field)					\
356 __HPP_COLOR_PERCENT_FN(_type, _field)					\
357 __HPP_ENTRY_PERCENT_FN(_type, _field)					\
358 __HPP_SORT_FN(_type, _field)
359 
360 #define HPP_PERCENT_ACC_FNS(_type, _field)				\
361 __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
362 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
363 __HPP_SORT_ACC_FN(_type, _field)
364 
365 #define HPP_RAW_FNS(_type, _field)					\
366 __HPP_ENTRY_RAW_FN(_type, _field)					\
367 __HPP_SORT_RAW_FN(_type, _field)
368 
369 HPP_PERCENT_FNS(overhead, period)
370 HPP_PERCENT_FNS(overhead_sys, period_sys)
371 HPP_PERCENT_FNS(overhead_us, period_us)
372 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
373 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
374 HPP_PERCENT_ACC_FNS(overhead_acc, period)
375 
376 HPP_RAW_FNS(samples, nr_events)
377 HPP_RAW_FNS(period, period)
378 
379 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
380 			    struct hist_entry *a __maybe_unused,
381 			    struct hist_entry *b __maybe_unused)
382 {
383 	return 0;
384 }
385 
386 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
387 {
388 	return a->header == hpp__header_fn;
389 }
390 
391 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
392 {
393 	if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
394 		return false;
395 
396 	return a->idx == b->idx;
397 }
398 
399 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx)		\
400 	{						\
401 		.name   = _name,			\
402 		.header	= hpp__header_fn,		\
403 		.width	= hpp__width_fn,		\
404 		.color	= hpp__color_ ## _fn,		\
405 		.entry	= hpp__entry_ ## _fn,		\
406 		.cmp	= hpp__nop_cmp,			\
407 		.collapse = hpp__nop_cmp,		\
408 		.sort	= hpp__sort_ ## _fn,		\
409 		.idx	= PERF_HPP__ ## _idx,		\
410 		.equal	= hpp__equal,			\
411 	}
412 
413 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx)	\
414 	{						\
415 		.name   = _name,			\
416 		.header	= hpp__header_fn,		\
417 		.width	= hpp__width_fn,		\
418 		.color	= hpp__color_ ## _fn,		\
419 		.entry	= hpp__entry_ ## _fn,		\
420 		.cmp	= hpp__nop_cmp,			\
421 		.collapse = hpp__nop_cmp,		\
422 		.sort	= hpp__sort_ ## _fn,		\
423 		.idx	= PERF_HPP__ ## _idx,		\
424 		.equal	= hpp__equal,			\
425 	}
426 
427 #define HPP__PRINT_FNS(_name, _fn, _idx)		\
428 	{						\
429 		.name   = _name,			\
430 		.header	= hpp__header_fn,		\
431 		.width	= hpp__width_fn,		\
432 		.entry	= hpp__entry_ ## _fn,		\
433 		.cmp	= hpp__nop_cmp,			\
434 		.collapse = hpp__nop_cmp,		\
435 		.sort	= hpp__sort_ ## _fn,		\
436 		.idx	= PERF_HPP__ ## _idx,		\
437 		.equal	= hpp__equal,			\
438 	}
439 
440 struct perf_hpp_fmt perf_hpp__format[] = {
441 	HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
442 	HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
443 	HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
444 	HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
445 	HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
446 	HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
447 	HPP__PRINT_FNS("Samples", samples, SAMPLES),
448 	HPP__PRINT_FNS("Period", period, PERIOD)
449 };
450 
451 struct perf_hpp_list perf_hpp_list = {
452 	.fields	= LIST_HEAD_INIT(perf_hpp_list.fields),
453 	.sorts	= LIST_HEAD_INIT(perf_hpp_list.sorts),
454 	.nr_header_lines = 1,
455 };
456 
457 #undef HPP__COLOR_PRINT_FNS
458 #undef HPP__COLOR_ACC_PRINT_FNS
459 #undef HPP__PRINT_FNS
460 
461 #undef HPP_PERCENT_FNS
462 #undef HPP_PERCENT_ACC_FNS
463 #undef HPP_RAW_FNS
464 
465 #undef __HPP_HEADER_FN
466 #undef __HPP_WIDTH_FN
467 #undef __HPP_COLOR_PERCENT_FN
468 #undef __HPP_ENTRY_PERCENT_FN
469 #undef __HPP_COLOR_ACC_PERCENT_FN
470 #undef __HPP_ENTRY_ACC_PERCENT_FN
471 #undef __HPP_ENTRY_RAW_FN
472 #undef __HPP_SORT_FN
473 #undef __HPP_SORT_ACC_FN
474 #undef __HPP_SORT_RAW_FN
475 
476 
477 void perf_hpp__init(void)
478 {
479 	int i;
480 
481 	for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
482 		struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
483 
484 		INIT_LIST_HEAD(&fmt->list);
485 
486 		/* sort_list may be linked by setup_sorting() */
487 		if (fmt->sort_list.next == NULL)
488 			INIT_LIST_HEAD(&fmt->sort_list);
489 	}
490 
491 	/*
492 	 * If user specified field order, no need to setup default fields.
493 	 */
494 	if (is_strict_order(field_order))
495 		return;
496 
497 	if (symbol_conf.cumulate_callchain) {
498 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
499 		perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
500 	}
501 
502 	hpp_dimension__add_output(PERF_HPP__OVERHEAD);
503 
504 	if (symbol_conf.show_cpu_utilization) {
505 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
506 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
507 
508 		if (perf_guest) {
509 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
510 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
511 		}
512 	}
513 
514 	if (symbol_conf.show_nr_samples)
515 		hpp_dimension__add_output(PERF_HPP__SAMPLES);
516 
517 	if (symbol_conf.show_total_period)
518 		hpp_dimension__add_output(PERF_HPP__PERIOD);
519 }
520 
521 void perf_hpp_list__column_register(struct perf_hpp_list *list,
522 				    struct perf_hpp_fmt *format)
523 {
524 	list_add_tail(&format->list, &list->fields);
525 }
526 
527 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
528 					struct perf_hpp_fmt *format)
529 {
530 	list_add_tail(&format->sort_list, &list->sorts);
531 }
532 
533 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
534 				       struct perf_hpp_fmt *format)
535 {
536 	list_add(&format->sort_list, &list->sorts);
537 }
538 
539 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
540 {
541 	list_del_init(&format->list);
542 }
543 
544 void perf_hpp__cancel_cumulate(void)
545 {
546 	struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
547 
548 	if (is_strict_order(field_order))
549 		return;
550 
551 	ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
552 	acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
553 
554 	perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
555 		if (acc->equal(acc, fmt)) {
556 			perf_hpp__column_unregister(fmt);
557 			continue;
558 		}
559 
560 		if (ovh->equal(ovh, fmt))
561 			fmt->name = "Overhead";
562 	}
563 }
564 
565 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
566 {
567 	return a->equal && a->equal(a, b);
568 }
569 
570 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
571 {
572 	struct perf_hpp_fmt *fmt;
573 
574 	/* append sort keys to output field */
575 	perf_hpp_list__for_each_sort_list(list, fmt) {
576 		struct perf_hpp_fmt *pos;
577 
578 		/* skip sort-only fields ("sort_compute" in perf diff) */
579 		if (!fmt->entry && !fmt->color)
580 			continue;
581 
582 		perf_hpp_list__for_each_format(list, pos) {
583 			if (fmt_equal(fmt, pos))
584 				goto next;
585 		}
586 
587 		perf_hpp__column_register(fmt);
588 next:
589 		continue;
590 	}
591 }
592 
593 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
594 {
595 	struct perf_hpp_fmt *fmt;
596 
597 	/* append output fields to sort keys */
598 	perf_hpp_list__for_each_format(list, fmt) {
599 		struct perf_hpp_fmt *pos;
600 
601 		perf_hpp_list__for_each_sort_list(list, pos) {
602 			if (fmt_equal(fmt, pos))
603 				goto next;
604 		}
605 
606 		perf_hpp__register_sort_field(fmt);
607 next:
608 		continue;
609 	}
610 }
611 
612 
613 static void fmt_free(struct perf_hpp_fmt *fmt)
614 {
615 	/*
616 	 * At this point fmt should be completely
617 	 * unhooked, if not it's a bug.
618 	 */
619 	BUG_ON(!list_empty(&fmt->list));
620 	BUG_ON(!list_empty(&fmt->sort_list));
621 
622 	if (fmt->free)
623 		fmt->free(fmt);
624 }
625 
626 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
627 {
628 	struct perf_hpp_fmt *fmt, *tmp;
629 
630 	/* reset output fields */
631 	perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
632 		list_del_init(&fmt->list);
633 		list_del_init(&fmt->sort_list);
634 		fmt_free(fmt);
635 	}
636 
637 	/* reset sort keys */
638 	perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
639 		list_del_init(&fmt->list);
640 		list_del_init(&fmt->sort_list);
641 		fmt_free(fmt);
642 	}
643 }
644 
645 /*
646  * See hists__fprintf to match the column widths
647  */
648 unsigned int hists__sort_list_width(struct hists *hists)
649 {
650 	struct perf_hpp_fmt *fmt;
651 	int ret = 0;
652 	bool first = true;
653 	struct perf_hpp dummy_hpp;
654 
655 	hists__for_each_format(hists, fmt) {
656 		if (perf_hpp__should_skip(fmt, hists))
657 			continue;
658 
659 		if (first)
660 			first = false;
661 		else
662 			ret += 2;
663 
664 		ret += fmt->width(fmt, &dummy_hpp, hists);
665 	}
666 
667 	if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
668 		ret += 3 + BITS_PER_LONG / 4;
669 
670 	return ret;
671 }
672 
673 unsigned int hists__overhead_width(struct hists *hists)
674 {
675 	struct perf_hpp_fmt *fmt;
676 	int ret = 0;
677 	bool first = true;
678 	struct perf_hpp dummy_hpp;
679 
680 	hists__for_each_format(hists, fmt) {
681 		if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
682 			break;
683 
684 		if (first)
685 			first = false;
686 		else
687 			ret += 2;
688 
689 		ret += fmt->width(fmt, &dummy_hpp, hists);
690 	}
691 
692 	return ret;
693 }
694 
695 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
696 {
697 	if (perf_hpp__is_sort_entry(fmt))
698 		return perf_hpp__reset_sort_width(fmt, hists);
699 
700 	if (perf_hpp__is_dynamic_entry(fmt))
701 		return;
702 
703 	BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
704 
705 	switch (fmt->idx) {
706 	case PERF_HPP__OVERHEAD:
707 	case PERF_HPP__OVERHEAD_SYS:
708 	case PERF_HPP__OVERHEAD_US:
709 	case PERF_HPP__OVERHEAD_ACC:
710 		fmt->len = 8;
711 		break;
712 
713 	case PERF_HPP__OVERHEAD_GUEST_SYS:
714 	case PERF_HPP__OVERHEAD_GUEST_US:
715 		fmt->len = 9;
716 		break;
717 
718 	case PERF_HPP__SAMPLES:
719 	case PERF_HPP__PERIOD:
720 		fmt->len = 12;
721 		break;
722 
723 	default:
724 		break;
725 	}
726 }
727 
728 void hists__reset_column_width(struct hists *hists)
729 {
730 	struct perf_hpp_fmt *fmt;
731 	struct perf_hpp_list_node *node;
732 
733 	hists__for_each_format(hists, fmt)
734 		perf_hpp__reset_width(fmt, hists);
735 
736 	/* hierarchy entries have their own hpp list */
737 	list_for_each_entry(node, &hists->hpp_formats, list) {
738 		perf_hpp_list__for_each_format(&node->hpp, fmt)
739 			perf_hpp__reset_width(fmt, hists);
740 	}
741 }
742 
743 void perf_hpp__set_user_width(const char *width_list_str)
744 {
745 	struct perf_hpp_fmt *fmt;
746 	const char *ptr = width_list_str;
747 
748 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
749 		char *p;
750 
751 		int len = strtol(ptr, &p, 10);
752 		fmt->user_len = len;
753 
754 		if (*p == ',')
755 			ptr = p + 1;
756 		else
757 			break;
758 	}
759 }
760 
761 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
762 {
763 	struct perf_hpp_list_node *node = NULL;
764 	struct perf_hpp_fmt *fmt_copy;
765 	bool found = false;
766 	bool skip = perf_hpp__should_skip(fmt, hists);
767 
768 	list_for_each_entry(node, &hists->hpp_formats, list) {
769 		if (node->level == fmt->level) {
770 			found = true;
771 			break;
772 		}
773 	}
774 
775 	if (!found) {
776 		node = malloc(sizeof(*node));
777 		if (node == NULL)
778 			return -1;
779 
780 		node->skip = skip;
781 		node->level = fmt->level;
782 		perf_hpp_list__init(&node->hpp);
783 
784 		hists->nr_hpp_node++;
785 		list_add_tail(&node->list, &hists->hpp_formats);
786 	}
787 
788 	fmt_copy = perf_hpp_fmt__dup(fmt);
789 	if (fmt_copy == NULL)
790 		return -1;
791 
792 	if (!skip)
793 		node->skip = false;
794 
795 	list_add_tail(&fmt_copy->list, &node->hpp.fields);
796 	list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
797 
798 	return 0;
799 }
800 
801 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
802 				  struct evlist *evlist)
803 {
804 	struct evsel *evsel;
805 	struct perf_hpp_fmt *fmt;
806 	struct hists *hists;
807 	int ret;
808 
809 	if (!symbol_conf.report_hierarchy)
810 		return 0;
811 
812 	evlist__for_each_entry(evlist, evsel) {
813 		hists = evsel__hists(evsel);
814 
815 		perf_hpp_list__for_each_sort_list(list, fmt) {
816 			if (perf_hpp__is_dynamic_entry(fmt) &&
817 			    !perf_hpp__defined_dynamic_entry(fmt, hists))
818 				continue;
819 
820 			ret = add_hierarchy_fmt(hists, fmt);
821 			if (ret < 0)
822 				return ret;
823 		}
824 	}
825 
826 	return 0;
827 }
828