xref: /openbmc/linux/tools/perf/ui/hist.c (revision d2ba09c1)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <linux/compiler.h>
5 
6 #include "../util/hist.h"
7 #include "../util/util.h"
8 #include "../util/sort.h"
9 #include "../util/evsel.h"
10 #include "../util/evlist.h"
11 
12 /* hist period print (hpp) functions */
13 
14 #define hpp__call_print_fn(hpp, fn, fmt, ...)			\
15 ({								\
16 	int __ret = fn(hpp, fmt, ##__VA_ARGS__);		\
17 	advance_hpp(hpp, __ret);				\
18 	__ret;							\
19 })
20 
21 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
22 		      hpp_field_fn get_field, const char *fmt, int len,
23 		      hpp_snprint_fn print_fn, bool fmt_percent)
24 {
25 	int ret;
26 	struct hists *hists = he->hists;
27 	struct perf_evsel *evsel = hists_to_evsel(hists);
28 	char *buf = hpp->buf;
29 	size_t size = hpp->size;
30 
31 	if (fmt_percent) {
32 		double percent = 0.0;
33 		u64 total = hists__total_period(hists);
34 
35 		if (total)
36 			percent = 100.0 * get_field(he) / total;
37 
38 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
39 	} else
40 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
41 
42 	if (perf_evsel__is_group_event(evsel)) {
43 		int prev_idx, idx_delta;
44 		struct hist_entry *pair;
45 		int nr_members = evsel->nr_members;
46 
47 		prev_idx = perf_evsel__group_idx(evsel);
48 
49 		list_for_each_entry(pair, &he->pairs.head, pairs.node) {
50 			u64 period = get_field(pair);
51 			u64 total = hists__total_period(pair->hists);
52 
53 			if (!total)
54 				continue;
55 
56 			evsel = hists_to_evsel(pair->hists);
57 			idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
58 
59 			while (idx_delta--) {
60 				/*
61 				 * zero-fill group members in the middle which
62 				 * have no sample
63 				 */
64 				if (fmt_percent) {
65 					ret += hpp__call_print_fn(hpp, print_fn,
66 								  fmt, len, 0.0);
67 				} else {
68 					ret += hpp__call_print_fn(hpp, print_fn,
69 								  fmt, len, 0ULL);
70 				}
71 			}
72 
73 			if (fmt_percent) {
74 				ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
75 							  100.0 * period / total);
76 			} else {
77 				ret += hpp__call_print_fn(hpp, print_fn, fmt,
78 							  len, period);
79 			}
80 
81 			prev_idx = perf_evsel__group_idx(evsel);
82 		}
83 
84 		idx_delta = nr_members - prev_idx - 1;
85 
86 		while (idx_delta--) {
87 			/*
88 			 * zero-fill group members at last which have no sample
89 			 */
90 			if (fmt_percent) {
91 				ret += hpp__call_print_fn(hpp, print_fn,
92 							  fmt, len, 0.0);
93 			} else {
94 				ret += hpp__call_print_fn(hpp, print_fn,
95 							  fmt, len, 0ULL);
96 			}
97 		}
98 	}
99 
100 	/*
101 	 * Restore original buf and size as it's where caller expects
102 	 * the result will be saved.
103 	 */
104 	hpp->buf = buf;
105 	hpp->size = size;
106 
107 	return ret;
108 }
109 
110 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
111 	     struct hist_entry *he, hpp_field_fn get_field,
112 	     const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
113 {
114 	int len = fmt->user_len ?: fmt->len;
115 
116 	if (symbol_conf.field_sep) {
117 		return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
118 				  print_fn, fmt_percent);
119 	}
120 
121 	if (fmt_percent)
122 		len -= 2; /* 2 for a space and a % sign */
123 	else
124 		len -= 1;
125 
126 	return  __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
127 }
128 
129 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
130 		 struct hist_entry *he, hpp_field_fn get_field,
131 		 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
132 {
133 	if (!symbol_conf.cumulate_callchain) {
134 		int len = fmt->user_len ?: fmt->len;
135 		return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
136 	}
137 
138 	return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
139 }
140 
141 static int field_cmp(u64 field_a, u64 field_b)
142 {
143 	if (field_a > field_b)
144 		return 1;
145 	if (field_a < field_b)
146 		return -1;
147 	return 0;
148 }
149 
150 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
151 		       hpp_field_fn get_field)
152 {
153 	s64 ret;
154 	int i, nr_members;
155 	struct perf_evsel *evsel;
156 	struct hist_entry *pair;
157 	u64 *fields_a, *fields_b;
158 
159 	ret = field_cmp(get_field(a), get_field(b));
160 	if (ret || !symbol_conf.event_group)
161 		return ret;
162 
163 	evsel = hists_to_evsel(a->hists);
164 	if (!perf_evsel__is_group_event(evsel))
165 		return ret;
166 
167 	nr_members = evsel->nr_members;
168 	fields_a = calloc(nr_members, sizeof(*fields_a));
169 	fields_b = calloc(nr_members, sizeof(*fields_b));
170 
171 	if (!fields_a || !fields_b)
172 		goto out;
173 
174 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
175 		evsel = hists_to_evsel(pair->hists);
176 		fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
177 	}
178 
179 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
180 		evsel = hists_to_evsel(pair->hists);
181 		fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
182 	}
183 
184 	for (i = 1; i < nr_members; i++) {
185 		ret = field_cmp(fields_a[i], fields_b[i]);
186 		if (ret)
187 			break;
188 	}
189 
190 out:
191 	free(fields_a);
192 	free(fields_b);
193 
194 	return ret;
195 }
196 
197 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
198 			   hpp_field_fn get_field)
199 {
200 	s64 ret = 0;
201 
202 	if (symbol_conf.cumulate_callchain) {
203 		/*
204 		 * Put caller above callee when they have equal period.
205 		 */
206 		ret = field_cmp(get_field(a), get_field(b));
207 		if (ret)
208 			return ret;
209 
210 		if (a->thread != b->thread || !symbol_conf.use_callchain)
211 			return 0;
212 
213 		ret = b->callchain->max_depth - a->callchain->max_depth;
214 		if (callchain_param.order == ORDER_CALLER)
215 			ret = -ret;
216 	}
217 	return ret;
218 }
219 
220 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
221 			 struct perf_hpp *hpp __maybe_unused,
222 			 struct hists *hists)
223 {
224 	int len = fmt->user_len ?: fmt->len;
225 	struct perf_evsel *evsel = hists_to_evsel(hists);
226 
227 	if (symbol_conf.event_group)
228 		len = max(len, evsel->nr_members * fmt->len);
229 
230 	if (len < (int)strlen(fmt->name))
231 		len = strlen(fmt->name);
232 
233 	return len;
234 }
235 
236 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
237 			  struct hists *hists, int line __maybe_unused,
238 			  int *span __maybe_unused)
239 {
240 	int len = hpp__width_fn(fmt, hpp, hists);
241 	return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
242 }
243 
244 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
245 {
246 	va_list args;
247 	ssize_t ssize = hpp->size;
248 	double percent;
249 	int ret, len;
250 
251 	va_start(args, fmt);
252 	len = va_arg(args, int);
253 	percent = va_arg(args, double);
254 	ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
255 	va_end(args);
256 
257 	return (ret >= ssize) ? (ssize - 1) : ret;
258 }
259 
260 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
261 {
262 	va_list args;
263 	ssize_t ssize = hpp->size;
264 	int ret;
265 
266 	va_start(args, fmt);
267 	ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
268 	va_end(args);
269 
270 	return (ret >= ssize) ? (ssize - 1) : ret;
271 }
272 
273 #define __HPP_COLOR_PERCENT_FN(_type, _field)					\
274 static u64 he_get_##_field(struct hist_entry *he)				\
275 {										\
276 	return he->stat._field;							\
277 }										\
278 										\
279 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
280 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
281 {										\
282 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
283 			hpp_color_scnprintf, true);				\
284 }
285 
286 #define __HPP_ENTRY_PERCENT_FN(_type, _field)					\
287 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
288 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
289 {										\
290 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
291 			hpp_entry_scnprintf, true);				\
292 }
293 
294 #define __HPP_SORT_FN(_type, _field)						\
295 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
296 				 struct hist_entry *a, struct hist_entry *b) 	\
297 {										\
298 	return __hpp__sort(a, b, he_get_##_field);				\
299 }
300 
301 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
302 static u64 he_get_acc_##_field(struct hist_entry *he)				\
303 {										\
304 	return he->stat_acc->_field;						\
305 }										\
306 										\
307 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
308 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
309 {										\
310 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", 	\
311 			    hpp_color_scnprintf, true);				\
312 }
313 
314 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
315 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
316 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
317 {										\
318 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%",	\
319 			    hpp_entry_scnprintf, true);				\
320 }
321 
322 #define __HPP_SORT_ACC_FN(_type, _field)					\
323 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
324 				 struct hist_entry *a, struct hist_entry *b) 	\
325 {										\
326 	return __hpp__sort_acc(a, b, he_get_acc_##_field);			\
327 }
328 
329 #define __HPP_ENTRY_RAW_FN(_type, _field)					\
330 static u64 he_get_raw_##_field(struct hist_entry *he)				\
331 {										\
332 	return he->stat._field;							\
333 }										\
334 										\
335 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
336 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
337 {										\
338 	return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, 	\
339 			hpp_entry_scnprintf, false);				\
340 }
341 
342 #define __HPP_SORT_RAW_FN(_type, _field)					\
343 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
344 				 struct hist_entry *a, struct hist_entry *b) 	\
345 {										\
346 	return __hpp__sort(a, b, he_get_raw_##_field);				\
347 }
348 
349 
350 #define HPP_PERCENT_FNS(_type, _field)					\
351 __HPP_COLOR_PERCENT_FN(_type, _field)					\
352 __HPP_ENTRY_PERCENT_FN(_type, _field)					\
353 __HPP_SORT_FN(_type, _field)
354 
355 #define HPP_PERCENT_ACC_FNS(_type, _field)				\
356 __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
357 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
358 __HPP_SORT_ACC_FN(_type, _field)
359 
360 #define HPP_RAW_FNS(_type, _field)					\
361 __HPP_ENTRY_RAW_FN(_type, _field)					\
362 __HPP_SORT_RAW_FN(_type, _field)
363 
364 HPP_PERCENT_FNS(overhead, period)
365 HPP_PERCENT_FNS(overhead_sys, period_sys)
366 HPP_PERCENT_FNS(overhead_us, period_us)
367 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
368 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
369 HPP_PERCENT_ACC_FNS(overhead_acc, period)
370 
371 HPP_RAW_FNS(samples, nr_events)
372 HPP_RAW_FNS(period, period)
373 
374 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
375 			    struct hist_entry *a __maybe_unused,
376 			    struct hist_entry *b __maybe_unused)
377 {
378 	return 0;
379 }
380 
381 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
382 {
383 	return a->header == hpp__header_fn;
384 }
385 
386 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
387 {
388 	if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
389 		return false;
390 
391 	return a->idx == b->idx;
392 }
393 
394 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx)		\
395 	{						\
396 		.name   = _name,			\
397 		.header	= hpp__header_fn,		\
398 		.width	= hpp__width_fn,		\
399 		.color	= hpp__color_ ## _fn,		\
400 		.entry	= hpp__entry_ ## _fn,		\
401 		.cmp	= hpp__nop_cmp,			\
402 		.collapse = hpp__nop_cmp,		\
403 		.sort	= hpp__sort_ ## _fn,		\
404 		.idx	= PERF_HPP__ ## _idx,		\
405 		.equal	= hpp__equal,			\
406 	}
407 
408 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx)	\
409 	{						\
410 		.name   = _name,			\
411 		.header	= hpp__header_fn,		\
412 		.width	= hpp__width_fn,		\
413 		.color	= hpp__color_ ## _fn,		\
414 		.entry	= hpp__entry_ ## _fn,		\
415 		.cmp	= hpp__nop_cmp,			\
416 		.collapse = hpp__nop_cmp,		\
417 		.sort	= hpp__sort_ ## _fn,		\
418 		.idx	= PERF_HPP__ ## _idx,		\
419 		.equal	= hpp__equal,			\
420 	}
421 
422 #define HPP__PRINT_FNS(_name, _fn, _idx)		\
423 	{						\
424 		.name   = _name,			\
425 		.header	= hpp__header_fn,		\
426 		.width	= hpp__width_fn,		\
427 		.entry	= hpp__entry_ ## _fn,		\
428 		.cmp	= hpp__nop_cmp,			\
429 		.collapse = hpp__nop_cmp,		\
430 		.sort	= hpp__sort_ ## _fn,		\
431 		.idx	= PERF_HPP__ ## _idx,		\
432 		.equal	= hpp__equal,			\
433 	}
434 
435 struct perf_hpp_fmt perf_hpp__format[] = {
436 	HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
437 	HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
438 	HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
439 	HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
440 	HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
441 	HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
442 	HPP__PRINT_FNS("Samples", samples, SAMPLES),
443 	HPP__PRINT_FNS("Period", period, PERIOD)
444 };
445 
446 struct perf_hpp_list perf_hpp_list = {
447 	.fields	= LIST_HEAD_INIT(perf_hpp_list.fields),
448 	.sorts	= LIST_HEAD_INIT(perf_hpp_list.sorts),
449 	.nr_header_lines = 1,
450 };
451 
452 #undef HPP__COLOR_PRINT_FNS
453 #undef HPP__COLOR_ACC_PRINT_FNS
454 #undef HPP__PRINT_FNS
455 
456 #undef HPP_PERCENT_FNS
457 #undef HPP_PERCENT_ACC_FNS
458 #undef HPP_RAW_FNS
459 
460 #undef __HPP_HEADER_FN
461 #undef __HPP_WIDTH_FN
462 #undef __HPP_COLOR_PERCENT_FN
463 #undef __HPP_ENTRY_PERCENT_FN
464 #undef __HPP_COLOR_ACC_PERCENT_FN
465 #undef __HPP_ENTRY_ACC_PERCENT_FN
466 #undef __HPP_ENTRY_RAW_FN
467 #undef __HPP_SORT_FN
468 #undef __HPP_SORT_ACC_FN
469 #undef __HPP_SORT_RAW_FN
470 
471 
472 void perf_hpp__init(void)
473 {
474 	int i;
475 
476 	for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
477 		struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
478 
479 		INIT_LIST_HEAD(&fmt->list);
480 
481 		/* sort_list may be linked by setup_sorting() */
482 		if (fmt->sort_list.next == NULL)
483 			INIT_LIST_HEAD(&fmt->sort_list);
484 	}
485 
486 	/*
487 	 * If user specified field order, no need to setup default fields.
488 	 */
489 	if (is_strict_order(field_order))
490 		return;
491 
492 	if (symbol_conf.cumulate_callchain) {
493 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
494 		perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
495 	}
496 
497 	hpp_dimension__add_output(PERF_HPP__OVERHEAD);
498 
499 	if (symbol_conf.show_cpu_utilization) {
500 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
501 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
502 
503 		if (perf_guest) {
504 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
505 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
506 		}
507 	}
508 
509 	if (symbol_conf.show_nr_samples)
510 		hpp_dimension__add_output(PERF_HPP__SAMPLES);
511 
512 	if (symbol_conf.show_total_period)
513 		hpp_dimension__add_output(PERF_HPP__PERIOD);
514 }
515 
516 void perf_hpp_list__column_register(struct perf_hpp_list *list,
517 				    struct perf_hpp_fmt *format)
518 {
519 	list_add_tail(&format->list, &list->fields);
520 }
521 
522 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
523 					struct perf_hpp_fmt *format)
524 {
525 	list_add_tail(&format->sort_list, &list->sorts);
526 }
527 
528 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
529 				       struct perf_hpp_fmt *format)
530 {
531 	list_add(&format->sort_list, &list->sorts);
532 }
533 
534 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
535 {
536 	list_del_init(&format->list);
537 }
538 
539 void perf_hpp__cancel_cumulate(void)
540 {
541 	struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
542 
543 	if (is_strict_order(field_order))
544 		return;
545 
546 	ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
547 	acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
548 
549 	perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
550 		if (acc->equal(acc, fmt)) {
551 			perf_hpp__column_unregister(fmt);
552 			continue;
553 		}
554 
555 		if (ovh->equal(ovh, fmt))
556 			fmt->name = "Overhead";
557 	}
558 }
559 
560 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
561 {
562 	return a->equal && a->equal(a, b);
563 }
564 
565 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
566 {
567 	struct perf_hpp_fmt *fmt;
568 
569 	/* append sort keys to output field */
570 	perf_hpp_list__for_each_sort_list(list, fmt) {
571 		struct perf_hpp_fmt *pos;
572 
573 		/* skip sort-only fields ("sort_compute" in perf diff) */
574 		if (!fmt->entry && !fmt->color)
575 			continue;
576 
577 		perf_hpp_list__for_each_format(list, pos) {
578 			if (fmt_equal(fmt, pos))
579 				goto next;
580 		}
581 
582 		perf_hpp__column_register(fmt);
583 next:
584 		continue;
585 	}
586 }
587 
588 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
589 {
590 	struct perf_hpp_fmt *fmt;
591 
592 	/* append output fields to sort keys */
593 	perf_hpp_list__for_each_format(list, fmt) {
594 		struct perf_hpp_fmt *pos;
595 
596 		perf_hpp_list__for_each_sort_list(list, pos) {
597 			if (fmt_equal(fmt, pos))
598 				goto next;
599 		}
600 
601 		perf_hpp__register_sort_field(fmt);
602 next:
603 		continue;
604 	}
605 }
606 
607 
608 static void fmt_free(struct perf_hpp_fmt *fmt)
609 {
610 	/*
611 	 * At this point fmt should be completely
612 	 * unhooked, if not it's a bug.
613 	 */
614 	BUG_ON(!list_empty(&fmt->list));
615 	BUG_ON(!list_empty(&fmt->sort_list));
616 
617 	if (fmt->free)
618 		fmt->free(fmt);
619 }
620 
621 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
622 {
623 	struct perf_hpp_fmt *fmt, *tmp;
624 
625 	/* reset output fields */
626 	perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
627 		list_del_init(&fmt->list);
628 		list_del_init(&fmt->sort_list);
629 		fmt_free(fmt);
630 	}
631 
632 	/* reset sort keys */
633 	perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
634 		list_del_init(&fmt->list);
635 		list_del_init(&fmt->sort_list);
636 		fmt_free(fmt);
637 	}
638 }
639 
640 /*
641  * See hists__fprintf to match the column widths
642  */
643 unsigned int hists__sort_list_width(struct hists *hists)
644 {
645 	struct perf_hpp_fmt *fmt;
646 	int ret = 0;
647 	bool first = true;
648 	struct perf_hpp dummy_hpp;
649 
650 	hists__for_each_format(hists, fmt) {
651 		if (perf_hpp__should_skip(fmt, hists))
652 			continue;
653 
654 		if (first)
655 			first = false;
656 		else
657 			ret += 2;
658 
659 		ret += fmt->width(fmt, &dummy_hpp, hists);
660 	}
661 
662 	if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
663 		ret += 3 + BITS_PER_LONG / 4;
664 
665 	return ret;
666 }
667 
668 unsigned int hists__overhead_width(struct hists *hists)
669 {
670 	struct perf_hpp_fmt *fmt;
671 	int ret = 0;
672 	bool first = true;
673 	struct perf_hpp dummy_hpp;
674 
675 	hists__for_each_format(hists, fmt) {
676 		if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
677 			break;
678 
679 		if (first)
680 			first = false;
681 		else
682 			ret += 2;
683 
684 		ret += fmt->width(fmt, &dummy_hpp, hists);
685 	}
686 
687 	return ret;
688 }
689 
690 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
691 {
692 	if (perf_hpp__is_sort_entry(fmt))
693 		return perf_hpp__reset_sort_width(fmt, hists);
694 
695 	if (perf_hpp__is_dynamic_entry(fmt))
696 		return;
697 
698 	BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
699 
700 	switch (fmt->idx) {
701 	case PERF_HPP__OVERHEAD:
702 	case PERF_HPP__OVERHEAD_SYS:
703 	case PERF_HPP__OVERHEAD_US:
704 	case PERF_HPP__OVERHEAD_ACC:
705 		fmt->len = 8;
706 		break;
707 
708 	case PERF_HPP__OVERHEAD_GUEST_SYS:
709 	case PERF_HPP__OVERHEAD_GUEST_US:
710 		fmt->len = 9;
711 		break;
712 
713 	case PERF_HPP__SAMPLES:
714 	case PERF_HPP__PERIOD:
715 		fmt->len = 12;
716 		break;
717 
718 	default:
719 		break;
720 	}
721 }
722 
723 void hists__reset_column_width(struct hists *hists)
724 {
725 	struct perf_hpp_fmt *fmt;
726 	struct perf_hpp_list_node *node;
727 
728 	hists__for_each_format(hists, fmt)
729 		perf_hpp__reset_width(fmt, hists);
730 
731 	/* hierarchy entries have their own hpp list */
732 	list_for_each_entry(node, &hists->hpp_formats, list) {
733 		perf_hpp_list__for_each_format(&node->hpp, fmt)
734 			perf_hpp__reset_width(fmt, hists);
735 	}
736 }
737 
738 void perf_hpp__set_user_width(const char *width_list_str)
739 {
740 	struct perf_hpp_fmt *fmt;
741 	const char *ptr = width_list_str;
742 
743 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
744 		char *p;
745 
746 		int len = strtol(ptr, &p, 10);
747 		fmt->user_len = len;
748 
749 		if (*p == ',')
750 			ptr = p + 1;
751 		else
752 			break;
753 	}
754 }
755 
756 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
757 {
758 	struct perf_hpp_list_node *node = NULL;
759 	struct perf_hpp_fmt *fmt_copy;
760 	bool found = false;
761 	bool skip = perf_hpp__should_skip(fmt, hists);
762 
763 	list_for_each_entry(node, &hists->hpp_formats, list) {
764 		if (node->level == fmt->level) {
765 			found = true;
766 			break;
767 		}
768 	}
769 
770 	if (!found) {
771 		node = malloc(sizeof(*node));
772 		if (node == NULL)
773 			return -1;
774 
775 		node->skip = skip;
776 		node->level = fmt->level;
777 		perf_hpp_list__init(&node->hpp);
778 
779 		hists->nr_hpp_node++;
780 		list_add_tail(&node->list, &hists->hpp_formats);
781 	}
782 
783 	fmt_copy = perf_hpp_fmt__dup(fmt);
784 	if (fmt_copy == NULL)
785 		return -1;
786 
787 	if (!skip)
788 		node->skip = false;
789 
790 	list_add_tail(&fmt_copy->list, &node->hpp.fields);
791 	list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
792 
793 	return 0;
794 }
795 
796 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
797 				  struct perf_evlist *evlist)
798 {
799 	struct perf_evsel *evsel;
800 	struct perf_hpp_fmt *fmt;
801 	struct hists *hists;
802 	int ret;
803 
804 	if (!symbol_conf.report_hierarchy)
805 		return 0;
806 
807 	evlist__for_each_entry(evlist, evsel) {
808 		hists = evsel__hists(evsel);
809 
810 		perf_hpp_list__for_each_sort_list(list, fmt) {
811 			if (perf_hpp__is_dynamic_entry(fmt) &&
812 			    !perf_hpp__defined_dynamic_entry(fmt, hists))
813 				continue;
814 
815 			ret = add_hierarchy_fmt(hists, fmt);
816 			if (ret < 0)
817 				return ret;
818 		}
819 	}
820 
821 	return 0;
822 }
823