xref: /openbmc/linux/tools/perf/util/hist.c (revision 3b27d139)
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11 
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 				       struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 					  struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 					  struct hist_entry *he);
18 
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
20 {
21 	return hists->col_len[col];
22 }
23 
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
25 {
26 	hists->col_len[col] = len;
27 }
28 
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
30 {
31 	if (len > hists__col_len(hists, col)) {
32 		hists__set_col_len(hists, col, len);
33 		return true;
34 	}
35 	return false;
36 }
37 
38 void hists__reset_col_len(struct hists *hists)
39 {
40 	enum hist_column col;
41 
42 	for (col = 0; col < HISTC_NR_COLS; ++col)
43 		hists__set_col_len(hists, col, 0);
44 }
45 
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
47 {
48 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
49 
50 	if (hists__col_len(hists, dso) < unresolved_col_width &&
51 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 	    !symbol_conf.dso_list)
53 		hists__set_col_len(hists, dso, unresolved_col_width);
54 }
55 
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
57 {
58 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59 	int symlen;
60 	u16 len;
61 
62 	/*
63 	 * +4 accounts for '[x] ' priv level info
64 	 * +2 accounts for 0x prefix on raw addresses
65 	 * +3 accounts for ' y ' symtab origin info
66 	 */
67 	if (h->ms.sym) {
68 		symlen = h->ms.sym->namelen + 4;
69 		if (verbose)
70 			symlen += BITS_PER_LONG / 4 + 2 + 3;
71 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 	} else {
73 		symlen = unresolved_col_width + 4 + 2;
74 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 	}
77 
78 	len = thread__comm_len(h->thread);
79 	if (hists__new_col_len(hists, HISTC_COMM, len))
80 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
81 
82 	if (h->ms.map) {
83 		len = dso__name_len(h->ms.map->dso);
84 		hists__new_col_len(hists, HISTC_DSO, len);
85 	}
86 
87 	if (h->parent)
88 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
89 
90 	if (h->branch_info) {
91 		if (h->branch_info->from.sym) {
92 			symlen = (int)h->branch_info->from.sym->namelen + 4;
93 			if (verbose)
94 				symlen += BITS_PER_LONG / 4 + 2 + 3;
95 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
96 
97 			symlen = dso__name_len(h->branch_info->from.map->dso);
98 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 		} else {
100 			symlen = unresolved_col_width + 4 + 2;
101 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 		}
104 
105 		if (h->branch_info->to.sym) {
106 			symlen = (int)h->branch_info->to.sym->namelen + 4;
107 			if (verbose)
108 				symlen += BITS_PER_LONG / 4 + 2 + 3;
109 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110 
111 			symlen = dso__name_len(h->branch_info->to.map->dso);
112 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 		} else {
114 			symlen = unresolved_col_width + 4 + 2;
115 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 		}
118 	}
119 
120 	if (h->mem_info) {
121 		if (h->mem_info->daddr.sym) {
122 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 			       + unresolved_col_width + 2;
124 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 					   symlen);
126 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 					   symlen + 1);
128 		} else {
129 			symlen = unresolved_col_width + 4 + 2;
130 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 					   symlen);
132 		}
133 		if (h->mem_info->daddr.map) {
134 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 					   symlen);
137 		} else {
138 			symlen = unresolved_col_width + 4 + 2;
139 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
140 		}
141 	} else {
142 		symlen = unresolved_col_width + 4 + 2;
143 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 	}
146 
147 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
153 
154 	if (h->srcline)
155 		hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
156 
157 	if (h->srcfile)
158 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
159 
160 	if (h->transaction)
161 		hists__new_col_len(hists, HISTC_TRANSACTION,
162 				   hist_entry__transaction_len());
163 }
164 
165 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
166 {
167 	struct rb_node *next = rb_first(&hists->entries);
168 	struct hist_entry *n;
169 	int row = 0;
170 
171 	hists__reset_col_len(hists);
172 
173 	while (next && row++ < max_rows) {
174 		n = rb_entry(next, struct hist_entry, rb_node);
175 		if (!n->filtered)
176 			hists__calc_col_len(hists, n);
177 		next = rb_next(&n->rb_node);
178 	}
179 }
180 
181 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
182 					unsigned int cpumode, u64 period)
183 {
184 	switch (cpumode) {
185 	case PERF_RECORD_MISC_KERNEL:
186 		he_stat->period_sys += period;
187 		break;
188 	case PERF_RECORD_MISC_USER:
189 		he_stat->period_us += period;
190 		break;
191 	case PERF_RECORD_MISC_GUEST_KERNEL:
192 		he_stat->period_guest_sys += period;
193 		break;
194 	case PERF_RECORD_MISC_GUEST_USER:
195 		he_stat->period_guest_us += period;
196 		break;
197 	default:
198 		break;
199 	}
200 }
201 
202 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
203 				u64 weight)
204 {
205 
206 	he_stat->period		+= period;
207 	he_stat->weight		+= weight;
208 	he_stat->nr_events	+= 1;
209 }
210 
211 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
212 {
213 	dest->period		+= src->period;
214 	dest->period_sys	+= src->period_sys;
215 	dest->period_us		+= src->period_us;
216 	dest->period_guest_sys	+= src->period_guest_sys;
217 	dest->period_guest_us	+= src->period_guest_us;
218 	dest->nr_events		+= src->nr_events;
219 	dest->weight		+= src->weight;
220 }
221 
222 static void he_stat__decay(struct he_stat *he_stat)
223 {
224 	he_stat->period = (he_stat->period * 7) / 8;
225 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
226 	/* XXX need decay for weight too? */
227 }
228 
229 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
230 {
231 	u64 prev_period = he->stat.period;
232 	u64 diff;
233 
234 	if (prev_period == 0)
235 		return true;
236 
237 	he_stat__decay(&he->stat);
238 	if (symbol_conf.cumulate_callchain)
239 		he_stat__decay(he->stat_acc);
240 
241 	diff = prev_period - he->stat.period;
242 
243 	hists->stats.total_period -= diff;
244 	if (!he->filtered)
245 		hists->stats.total_non_filtered_period -= diff;
246 
247 	return he->stat.period == 0;
248 }
249 
250 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
251 {
252 	rb_erase(&he->rb_node, &hists->entries);
253 
254 	if (sort__need_collapse)
255 		rb_erase(&he->rb_node_in, &hists->entries_collapsed);
256 
257 	--hists->nr_entries;
258 	if (!he->filtered)
259 		--hists->nr_non_filtered_entries;
260 
261 	hist_entry__delete(he);
262 }
263 
264 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
265 {
266 	struct rb_node *next = rb_first(&hists->entries);
267 	struct hist_entry *n;
268 
269 	while (next) {
270 		n = rb_entry(next, struct hist_entry, rb_node);
271 		next = rb_next(&n->rb_node);
272 		if (((zap_user && n->level == '.') ||
273 		     (zap_kernel && n->level != '.') ||
274 		     hists__decay_entry(hists, n))) {
275 			hists__delete_entry(hists, n);
276 		}
277 	}
278 }
279 
280 void hists__delete_entries(struct hists *hists)
281 {
282 	struct rb_node *next = rb_first(&hists->entries);
283 	struct hist_entry *n;
284 
285 	while (next) {
286 		n = rb_entry(next, struct hist_entry, rb_node);
287 		next = rb_next(&n->rb_node);
288 
289 		hists__delete_entry(hists, n);
290 	}
291 }
292 
293 /*
294  * histogram, sorted on item, collects periods
295  */
296 
297 static struct hist_entry *hist_entry__new(struct hist_entry *template,
298 					  bool sample_self)
299 {
300 	size_t callchain_size = 0;
301 	struct hist_entry *he;
302 
303 	if (symbol_conf.use_callchain)
304 		callchain_size = sizeof(struct callchain_root);
305 
306 	he = zalloc(sizeof(*he) + callchain_size);
307 
308 	if (he != NULL) {
309 		*he = *template;
310 
311 		if (symbol_conf.cumulate_callchain) {
312 			he->stat_acc = malloc(sizeof(he->stat));
313 			if (he->stat_acc == NULL) {
314 				free(he);
315 				return NULL;
316 			}
317 			memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
318 			if (!sample_self)
319 				memset(&he->stat, 0, sizeof(he->stat));
320 		}
321 
322 		map__get(he->ms.map);
323 
324 		if (he->branch_info) {
325 			/*
326 			 * This branch info is (a part of) allocated from
327 			 * sample__resolve_bstack() and will be freed after
328 			 * adding new entries.  So we need to save a copy.
329 			 */
330 			he->branch_info = malloc(sizeof(*he->branch_info));
331 			if (he->branch_info == NULL) {
332 				map__zput(he->ms.map);
333 				free(he->stat_acc);
334 				free(he);
335 				return NULL;
336 			}
337 
338 			memcpy(he->branch_info, template->branch_info,
339 			       sizeof(*he->branch_info));
340 
341 			map__get(he->branch_info->from.map);
342 			map__get(he->branch_info->to.map);
343 		}
344 
345 		if (he->mem_info) {
346 			map__get(he->mem_info->iaddr.map);
347 			map__get(he->mem_info->daddr.map);
348 		}
349 
350 		if (symbol_conf.use_callchain)
351 			callchain_init(he->callchain);
352 
353 		INIT_LIST_HEAD(&he->pairs.node);
354 		thread__get(he->thread);
355 	}
356 
357 	return he;
358 }
359 
360 static u8 symbol__parent_filter(const struct symbol *parent)
361 {
362 	if (symbol_conf.exclude_other && parent == NULL)
363 		return 1 << HIST_FILTER__PARENT;
364 	return 0;
365 }
366 
367 static struct hist_entry *hists__findnew_entry(struct hists *hists,
368 					       struct hist_entry *entry,
369 					       struct addr_location *al,
370 					       bool sample_self)
371 {
372 	struct rb_node **p;
373 	struct rb_node *parent = NULL;
374 	struct hist_entry *he;
375 	int64_t cmp;
376 	u64 period = entry->stat.period;
377 	u64 weight = entry->stat.weight;
378 
379 	p = &hists->entries_in->rb_node;
380 
381 	while (*p != NULL) {
382 		parent = *p;
383 		he = rb_entry(parent, struct hist_entry, rb_node_in);
384 
385 		/*
386 		 * Make sure that it receives arguments in a same order as
387 		 * hist_entry__collapse() so that we can use an appropriate
388 		 * function when searching an entry regardless which sort
389 		 * keys were used.
390 		 */
391 		cmp = hist_entry__cmp(he, entry);
392 
393 		if (!cmp) {
394 			if (sample_self)
395 				he_stat__add_period(&he->stat, period, weight);
396 			if (symbol_conf.cumulate_callchain)
397 				he_stat__add_period(he->stat_acc, period, weight);
398 
399 			/*
400 			 * This mem info was allocated from sample__resolve_mem
401 			 * and will not be used anymore.
402 			 */
403 			zfree(&entry->mem_info);
404 
405 			/* If the map of an existing hist_entry has
406 			 * become out-of-date due to an exec() or
407 			 * similar, update it.  Otherwise we will
408 			 * mis-adjust symbol addresses when computing
409 			 * the history counter to increment.
410 			 */
411 			if (he->ms.map != entry->ms.map) {
412 				map__put(he->ms.map);
413 				he->ms.map = map__get(entry->ms.map);
414 			}
415 			goto out;
416 		}
417 
418 		if (cmp < 0)
419 			p = &(*p)->rb_left;
420 		else
421 			p = &(*p)->rb_right;
422 	}
423 
424 	he = hist_entry__new(entry, sample_self);
425 	if (!he)
426 		return NULL;
427 
428 	hists->nr_entries++;
429 
430 	rb_link_node(&he->rb_node_in, parent, p);
431 	rb_insert_color(&he->rb_node_in, hists->entries_in);
432 out:
433 	if (sample_self)
434 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
435 	if (symbol_conf.cumulate_callchain)
436 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
437 	return he;
438 }
439 
440 struct hist_entry *__hists__add_entry(struct hists *hists,
441 				      struct addr_location *al,
442 				      struct symbol *sym_parent,
443 				      struct branch_info *bi,
444 				      struct mem_info *mi,
445 				      u64 period, u64 weight, u64 transaction,
446 				      bool sample_self)
447 {
448 	struct hist_entry entry = {
449 		.thread	= al->thread,
450 		.comm = thread__comm(al->thread),
451 		.ms = {
452 			.map	= al->map,
453 			.sym	= al->sym,
454 		},
455 		.cpu	 = al->cpu,
456 		.cpumode = al->cpumode,
457 		.ip	 = al->addr,
458 		.level	 = al->level,
459 		.stat = {
460 			.nr_events = 1,
461 			.period	= period,
462 			.weight = weight,
463 		},
464 		.parent = sym_parent,
465 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
466 		.hists	= hists,
467 		.branch_info = bi,
468 		.mem_info = mi,
469 		.transaction = transaction,
470 	};
471 
472 	return hists__findnew_entry(hists, &entry, al, sample_self);
473 }
474 
475 static int
476 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
477 		    struct addr_location *al __maybe_unused)
478 {
479 	return 0;
480 }
481 
482 static int
483 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
484 			struct addr_location *al __maybe_unused)
485 {
486 	return 0;
487 }
488 
489 static int
490 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
491 {
492 	struct perf_sample *sample = iter->sample;
493 	struct mem_info *mi;
494 
495 	mi = sample__resolve_mem(sample, al);
496 	if (mi == NULL)
497 		return -ENOMEM;
498 
499 	iter->priv = mi;
500 	return 0;
501 }
502 
503 static int
504 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
505 {
506 	u64 cost;
507 	struct mem_info *mi = iter->priv;
508 	struct hists *hists = evsel__hists(iter->evsel);
509 	struct hist_entry *he;
510 
511 	if (mi == NULL)
512 		return -EINVAL;
513 
514 	cost = iter->sample->weight;
515 	if (!cost)
516 		cost = 1;
517 
518 	/*
519 	 * must pass period=weight in order to get the correct
520 	 * sorting from hists__collapse_resort() which is solely
521 	 * based on periods. We want sorting be done on nr_events * weight
522 	 * and this is indirectly achieved by passing period=weight here
523 	 * and the he_stat__add_period() function.
524 	 */
525 	he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
526 				cost, cost, 0, true);
527 	if (!he)
528 		return -ENOMEM;
529 
530 	iter->he = he;
531 	return 0;
532 }
533 
534 static int
535 iter_finish_mem_entry(struct hist_entry_iter *iter,
536 		      struct addr_location *al __maybe_unused)
537 {
538 	struct perf_evsel *evsel = iter->evsel;
539 	struct hists *hists = evsel__hists(evsel);
540 	struct hist_entry *he = iter->he;
541 	int err = -EINVAL;
542 
543 	if (he == NULL)
544 		goto out;
545 
546 	hists__inc_nr_samples(hists, he->filtered);
547 
548 	err = hist_entry__append_callchain(he, iter->sample);
549 
550 out:
551 	/*
552 	 * We don't need to free iter->priv (mem_info) here since the mem info
553 	 * was either already freed in hists__findnew_entry() or passed to a
554 	 * new hist entry by hist_entry__new().
555 	 */
556 	iter->priv = NULL;
557 
558 	iter->he = NULL;
559 	return err;
560 }
561 
562 static int
563 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
564 {
565 	struct branch_info *bi;
566 	struct perf_sample *sample = iter->sample;
567 
568 	bi = sample__resolve_bstack(sample, al);
569 	if (!bi)
570 		return -ENOMEM;
571 
572 	iter->curr = 0;
573 	iter->total = sample->branch_stack->nr;
574 
575 	iter->priv = bi;
576 	return 0;
577 }
578 
579 static int
580 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
581 			     struct addr_location *al __maybe_unused)
582 {
583 	/* to avoid calling callback function */
584 	iter->he = NULL;
585 
586 	return 0;
587 }
588 
589 static int
590 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
591 {
592 	struct branch_info *bi = iter->priv;
593 	int i = iter->curr;
594 
595 	if (bi == NULL)
596 		return 0;
597 
598 	if (iter->curr >= iter->total)
599 		return 0;
600 
601 	al->map = bi[i].to.map;
602 	al->sym = bi[i].to.sym;
603 	al->addr = bi[i].to.addr;
604 	return 1;
605 }
606 
607 static int
608 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
609 {
610 	struct branch_info *bi;
611 	struct perf_evsel *evsel = iter->evsel;
612 	struct hists *hists = evsel__hists(evsel);
613 	struct hist_entry *he = NULL;
614 	int i = iter->curr;
615 	int err = 0;
616 
617 	bi = iter->priv;
618 
619 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
620 		goto out;
621 
622 	/*
623 	 * The report shows the percentage of total branches captured
624 	 * and not events sampled. Thus we use a pseudo period of 1.
625 	 */
626 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
627 				1, bi->flags.cycles ? bi->flags.cycles : 1,
628 				0, true);
629 	if (he == NULL)
630 		return -ENOMEM;
631 
632 	hists__inc_nr_samples(hists, he->filtered);
633 
634 out:
635 	iter->he = he;
636 	iter->curr++;
637 	return err;
638 }
639 
640 static int
641 iter_finish_branch_entry(struct hist_entry_iter *iter,
642 			 struct addr_location *al __maybe_unused)
643 {
644 	zfree(&iter->priv);
645 	iter->he = NULL;
646 
647 	return iter->curr >= iter->total ? 0 : -1;
648 }
649 
650 static int
651 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
652 			  struct addr_location *al __maybe_unused)
653 {
654 	return 0;
655 }
656 
657 static int
658 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
659 {
660 	struct perf_evsel *evsel = iter->evsel;
661 	struct perf_sample *sample = iter->sample;
662 	struct hist_entry *he;
663 
664 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
665 				sample->period, sample->weight,
666 				sample->transaction, true);
667 	if (he == NULL)
668 		return -ENOMEM;
669 
670 	iter->he = he;
671 	return 0;
672 }
673 
674 static int
675 iter_finish_normal_entry(struct hist_entry_iter *iter,
676 			 struct addr_location *al __maybe_unused)
677 {
678 	struct hist_entry *he = iter->he;
679 	struct perf_evsel *evsel = iter->evsel;
680 	struct perf_sample *sample = iter->sample;
681 
682 	if (he == NULL)
683 		return 0;
684 
685 	iter->he = NULL;
686 
687 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
688 
689 	return hist_entry__append_callchain(he, sample);
690 }
691 
692 static int
693 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
694 			      struct addr_location *al __maybe_unused)
695 {
696 	struct hist_entry **he_cache;
697 
698 	callchain_cursor_commit(&callchain_cursor);
699 
700 	/*
701 	 * This is for detecting cycles or recursions so that they're
702 	 * cumulated only one time to prevent entries more than 100%
703 	 * overhead.
704 	 */
705 	he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
706 	if (he_cache == NULL)
707 		return -ENOMEM;
708 
709 	iter->priv = he_cache;
710 	iter->curr = 0;
711 
712 	return 0;
713 }
714 
715 static int
716 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
717 				 struct addr_location *al)
718 {
719 	struct perf_evsel *evsel = iter->evsel;
720 	struct hists *hists = evsel__hists(evsel);
721 	struct perf_sample *sample = iter->sample;
722 	struct hist_entry **he_cache = iter->priv;
723 	struct hist_entry *he;
724 	int err = 0;
725 
726 	he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
727 				sample->period, sample->weight,
728 				sample->transaction, true);
729 	if (he == NULL)
730 		return -ENOMEM;
731 
732 	iter->he = he;
733 	he_cache[iter->curr++] = he;
734 
735 	hist_entry__append_callchain(he, sample);
736 
737 	/*
738 	 * We need to re-initialize the cursor since callchain_append()
739 	 * advanced the cursor to the end.
740 	 */
741 	callchain_cursor_commit(&callchain_cursor);
742 
743 	hists__inc_nr_samples(hists, he->filtered);
744 
745 	return err;
746 }
747 
748 static int
749 iter_next_cumulative_entry(struct hist_entry_iter *iter,
750 			   struct addr_location *al)
751 {
752 	struct callchain_cursor_node *node;
753 
754 	node = callchain_cursor_current(&callchain_cursor);
755 	if (node == NULL)
756 		return 0;
757 
758 	return fill_callchain_info(al, node, iter->hide_unresolved);
759 }
760 
761 static int
762 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
763 			       struct addr_location *al)
764 {
765 	struct perf_evsel *evsel = iter->evsel;
766 	struct perf_sample *sample = iter->sample;
767 	struct hist_entry **he_cache = iter->priv;
768 	struct hist_entry *he;
769 	struct hist_entry he_tmp = {
770 		.hists = evsel__hists(evsel),
771 		.cpu = al->cpu,
772 		.thread = al->thread,
773 		.comm = thread__comm(al->thread),
774 		.ip = al->addr,
775 		.ms = {
776 			.map = al->map,
777 			.sym = al->sym,
778 		},
779 		.parent = iter->parent,
780 	};
781 	int i;
782 	struct callchain_cursor cursor;
783 
784 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
785 
786 	callchain_cursor_advance(&callchain_cursor);
787 
788 	/*
789 	 * Check if there's duplicate entries in the callchain.
790 	 * It's possible that it has cycles or recursive calls.
791 	 */
792 	for (i = 0; i < iter->curr; i++) {
793 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
794 			/* to avoid calling callback function */
795 			iter->he = NULL;
796 			return 0;
797 		}
798 	}
799 
800 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
801 				sample->period, sample->weight,
802 				sample->transaction, false);
803 	if (he == NULL)
804 		return -ENOMEM;
805 
806 	iter->he = he;
807 	he_cache[iter->curr++] = he;
808 
809 	if (symbol_conf.use_callchain)
810 		callchain_append(he->callchain, &cursor, sample->period);
811 	return 0;
812 }
813 
814 static int
815 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
816 			     struct addr_location *al __maybe_unused)
817 {
818 	zfree(&iter->priv);
819 	iter->he = NULL;
820 
821 	return 0;
822 }
823 
824 const struct hist_iter_ops hist_iter_mem = {
825 	.prepare_entry 		= iter_prepare_mem_entry,
826 	.add_single_entry 	= iter_add_single_mem_entry,
827 	.next_entry 		= iter_next_nop_entry,
828 	.add_next_entry 	= iter_add_next_nop_entry,
829 	.finish_entry 		= iter_finish_mem_entry,
830 };
831 
832 const struct hist_iter_ops hist_iter_branch = {
833 	.prepare_entry 		= iter_prepare_branch_entry,
834 	.add_single_entry 	= iter_add_single_branch_entry,
835 	.next_entry 		= iter_next_branch_entry,
836 	.add_next_entry 	= iter_add_next_branch_entry,
837 	.finish_entry 		= iter_finish_branch_entry,
838 };
839 
840 const struct hist_iter_ops hist_iter_normal = {
841 	.prepare_entry 		= iter_prepare_normal_entry,
842 	.add_single_entry 	= iter_add_single_normal_entry,
843 	.next_entry 		= iter_next_nop_entry,
844 	.add_next_entry 	= iter_add_next_nop_entry,
845 	.finish_entry 		= iter_finish_normal_entry,
846 };
847 
848 const struct hist_iter_ops hist_iter_cumulative = {
849 	.prepare_entry 		= iter_prepare_cumulative_entry,
850 	.add_single_entry 	= iter_add_single_cumulative_entry,
851 	.next_entry 		= iter_next_cumulative_entry,
852 	.add_next_entry 	= iter_add_next_cumulative_entry,
853 	.finish_entry 		= iter_finish_cumulative_entry,
854 };
855 
856 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
857 			 int max_stack_depth, void *arg)
858 {
859 	int err, err2;
860 
861 	err = sample__resolve_callchain(iter->sample, &iter->parent,
862 					iter->evsel, al, max_stack_depth);
863 	if (err)
864 		return err;
865 
866 	err = iter->ops->prepare_entry(iter, al);
867 	if (err)
868 		goto out;
869 
870 	err = iter->ops->add_single_entry(iter, al);
871 	if (err)
872 		goto out;
873 
874 	if (iter->he && iter->add_entry_cb) {
875 		err = iter->add_entry_cb(iter, al, true, arg);
876 		if (err)
877 			goto out;
878 	}
879 
880 	while (iter->ops->next_entry(iter, al)) {
881 		err = iter->ops->add_next_entry(iter, al);
882 		if (err)
883 			break;
884 
885 		if (iter->he && iter->add_entry_cb) {
886 			err = iter->add_entry_cb(iter, al, false, arg);
887 			if (err)
888 				goto out;
889 		}
890 	}
891 
892 out:
893 	err2 = iter->ops->finish_entry(iter, al);
894 	if (!err)
895 		err = err2;
896 
897 	return err;
898 }
899 
900 int64_t
901 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
902 {
903 	struct perf_hpp_fmt *fmt;
904 	int64_t cmp = 0;
905 
906 	perf_hpp__for_each_sort_list(fmt) {
907 		if (perf_hpp__should_skip(fmt))
908 			continue;
909 
910 		cmp = fmt->cmp(fmt, left, right);
911 		if (cmp)
912 			break;
913 	}
914 
915 	return cmp;
916 }
917 
918 int64_t
919 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
920 {
921 	struct perf_hpp_fmt *fmt;
922 	int64_t cmp = 0;
923 
924 	perf_hpp__for_each_sort_list(fmt) {
925 		if (perf_hpp__should_skip(fmt))
926 			continue;
927 
928 		cmp = fmt->collapse(fmt, left, right);
929 		if (cmp)
930 			break;
931 	}
932 
933 	return cmp;
934 }
935 
936 void hist_entry__delete(struct hist_entry *he)
937 {
938 	thread__zput(he->thread);
939 	map__zput(he->ms.map);
940 
941 	if (he->branch_info) {
942 		map__zput(he->branch_info->from.map);
943 		map__zput(he->branch_info->to.map);
944 		zfree(&he->branch_info);
945 	}
946 
947 	if (he->mem_info) {
948 		map__zput(he->mem_info->iaddr.map);
949 		map__zput(he->mem_info->daddr.map);
950 		zfree(&he->mem_info);
951 	}
952 
953 	zfree(&he->stat_acc);
954 	free_srcline(he->srcline);
955 	if (he->srcfile && he->srcfile[0])
956 		free(he->srcfile);
957 	free_callchain(he->callchain);
958 	free(he);
959 }
960 
961 /*
962  * collapse the histogram
963  */
964 
965 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
966 					 struct rb_root *root,
967 					 struct hist_entry *he)
968 {
969 	struct rb_node **p = &root->rb_node;
970 	struct rb_node *parent = NULL;
971 	struct hist_entry *iter;
972 	int64_t cmp;
973 
974 	while (*p != NULL) {
975 		parent = *p;
976 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
977 
978 		cmp = hist_entry__collapse(iter, he);
979 
980 		if (!cmp) {
981 			he_stat__add_stat(&iter->stat, &he->stat);
982 			if (symbol_conf.cumulate_callchain)
983 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
984 
985 			if (symbol_conf.use_callchain) {
986 				callchain_cursor_reset(&callchain_cursor);
987 				callchain_merge(&callchain_cursor,
988 						iter->callchain,
989 						he->callchain);
990 			}
991 			hist_entry__delete(he);
992 			return false;
993 		}
994 
995 		if (cmp < 0)
996 			p = &(*p)->rb_left;
997 		else
998 			p = &(*p)->rb_right;
999 	}
1000 	hists->nr_entries++;
1001 
1002 	rb_link_node(&he->rb_node_in, parent, p);
1003 	rb_insert_color(&he->rb_node_in, root);
1004 	return true;
1005 }
1006 
1007 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1008 {
1009 	struct rb_root *root;
1010 
1011 	pthread_mutex_lock(&hists->lock);
1012 
1013 	root = hists->entries_in;
1014 	if (++hists->entries_in > &hists->entries_in_array[1])
1015 		hists->entries_in = &hists->entries_in_array[0];
1016 
1017 	pthread_mutex_unlock(&hists->lock);
1018 
1019 	return root;
1020 }
1021 
1022 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1023 {
1024 	hists__filter_entry_by_dso(hists, he);
1025 	hists__filter_entry_by_thread(hists, he);
1026 	hists__filter_entry_by_symbol(hists, he);
1027 }
1028 
1029 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1030 {
1031 	struct rb_root *root;
1032 	struct rb_node *next;
1033 	struct hist_entry *n;
1034 
1035 	if (!sort__need_collapse)
1036 		return;
1037 
1038 	hists->nr_entries = 0;
1039 
1040 	root = hists__get_rotate_entries_in(hists);
1041 
1042 	next = rb_first(root);
1043 
1044 	while (next) {
1045 		if (session_done())
1046 			break;
1047 		n = rb_entry(next, struct hist_entry, rb_node_in);
1048 		next = rb_next(&n->rb_node_in);
1049 
1050 		rb_erase(&n->rb_node_in, root);
1051 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1052 			/*
1053 			 * If it wasn't combined with one of the entries already
1054 			 * collapsed, we need to apply the filters that may have
1055 			 * been set by, say, the hist_browser.
1056 			 */
1057 			hists__apply_filters(hists, n);
1058 		}
1059 		if (prog)
1060 			ui_progress__update(prog, 1);
1061 	}
1062 }
1063 
1064 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1065 {
1066 	struct perf_hpp_fmt *fmt;
1067 	int64_t cmp = 0;
1068 
1069 	perf_hpp__for_each_sort_list(fmt) {
1070 		if (perf_hpp__should_skip(fmt))
1071 			continue;
1072 
1073 		cmp = fmt->sort(fmt, a, b);
1074 		if (cmp)
1075 			break;
1076 	}
1077 
1078 	return cmp;
1079 }
1080 
1081 static void hists__reset_filter_stats(struct hists *hists)
1082 {
1083 	hists->nr_non_filtered_entries = 0;
1084 	hists->stats.total_non_filtered_period = 0;
1085 }
1086 
1087 void hists__reset_stats(struct hists *hists)
1088 {
1089 	hists->nr_entries = 0;
1090 	hists->stats.total_period = 0;
1091 
1092 	hists__reset_filter_stats(hists);
1093 }
1094 
1095 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1096 {
1097 	hists->nr_non_filtered_entries++;
1098 	hists->stats.total_non_filtered_period += h->stat.period;
1099 }
1100 
1101 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1102 {
1103 	if (!h->filtered)
1104 		hists__inc_filter_stats(hists, h);
1105 
1106 	hists->nr_entries++;
1107 	hists->stats.total_period += h->stat.period;
1108 }
1109 
1110 static void __hists__insert_output_entry(struct rb_root *entries,
1111 					 struct hist_entry *he,
1112 					 u64 min_callchain_hits,
1113 					 bool use_callchain)
1114 {
1115 	struct rb_node **p = &entries->rb_node;
1116 	struct rb_node *parent = NULL;
1117 	struct hist_entry *iter;
1118 
1119 	if (use_callchain)
1120 		callchain_param.sort(&he->sorted_chain, he->callchain,
1121 				      min_callchain_hits, &callchain_param);
1122 
1123 	while (*p != NULL) {
1124 		parent = *p;
1125 		iter = rb_entry(parent, struct hist_entry, rb_node);
1126 
1127 		if (hist_entry__sort(he, iter) > 0)
1128 			p = &(*p)->rb_left;
1129 		else
1130 			p = &(*p)->rb_right;
1131 	}
1132 
1133 	rb_link_node(&he->rb_node, parent, p);
1134 	rb_insert_color(&he->rb_node, entries);
1135 }
1136 
1137 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1138 {
1139 	struct rb_root *root;
1140 	struct rb_node *next;
1141 	struct hist_entry *n;
1142 	u64 min_callchain_hits;
1143 	struct perf_evsel *evsel = hists_to_evsel(hists);
1144 	bool use_callchain;
1145 
1146 	if (evsel && !symbol_conf.show_ref_callgraph)
1147 		use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1148 	else
1149 		use_callchain = symbol_conf.use_callchain;
1150 
1151 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1152 
1153 	if (sort__need_collapse)
1154 		root = &hists->entries_collapsed;
1155 	else
1156 		root = hists->entries_in;
1157 
1158 	next = rb_first(root);
1159 	hists->entries = RB_ROOT;
1160 
1161 	hists__reset_stats(hists);
1162 	hists__reset_col_len(hists);
1163 
1164 	while (next) {
1165 		n = rb_entry(next, struct hist_entry, rb_node_in);
1166 		next = rb_next(&n->rb_node_in);
1167 
1168 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1169 		hists__inc_stats(hists, n);
1170 
1171 		if (!n->filtered)
1172 			hists__calc_col_len(hists, n);
1173 
1174 		if (prog)
1175 			ui_progress__update(prog, 1);
1176 	}
1177 }
1178 
1179 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1180 				       enum hist_filter filter)
1181 {
1182 	h->filtered &= ~(1 << filter);
1183 	if (h->filtered)
1184 		return;
1185 
1186 	/* force fold unfiltered entry for simplicity */
1187 	h->unfolded = false;
1188 	h->row_offset = 0;
1189 	h->nr_rows = 0;
1190 
1191 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1192 
1193 	hists__inc_filter_stats(hists, h);
1194 	hists__calc_col_len(hists, h);
1195 }
1196 
1197 
1198 static bool hists__filter_entry_by_dso(struct hists *hists,
1199 				       struct hist_entry *he)
1200 {
1201 	if (hists->dso_filter != NULL &&
1202 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1203 		he->filtered |= (1 << HIST_FILTER__DSO);
1204 		return true;
1205 	}
1206 
1207 	return false;
1208 }
1209 
1210 void hists__filter_by_dso(struct hists *hists)
1211 {
1212 	struct rb_node *nd;
1213 
1214 	hists->stats.nr_non_filtered_samples = 0;
1215 
1216 	hists__reset_filter_stats(hists);
1217 	hists__reset_col_len(hists);
1218 
1219 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1220 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1221 
1222 		if (symbol_conf.exclude_other && !h->parent)
1223 			continue;
1224 
1225 		if (hists__filter_entry_by_dso(hists, h))
1226 			continue;
1227 
1228 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1229 	}
1230 }
1231 
1232 static bool hists__filter_entry_by_thread(struct hists *hists,
1233 					  struct hist_entry *he)
1234 {
1235 	if (hists->thread_filter != NULL &&
1236 	    he->thread != hists->thread_filter) {
1237 		he->filtered |= (1 << HIST_FILTER__THREAD);
1238 		return true;
1239 	}
1240 
1241 	return false;
1242 }
1243 
1244 void hists__filter_by_thread(struct hists *hists)
1245 {
1246 	struct rb_node *nd;
1247 
1248 	hists->stats.nr_non_filtered_samples = 0;
1249 
1250 	hists__reset_filter_stats(hists);
1251 	hists__reset_col_len(hists);
1252 
1253 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1254 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1255 
1256 		if (hists__filter_entry_by_thread(hists, h))
1257 			continue;
1258 
1259 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1260 	}
1261 }
1262 
1263 static bool hists__filter_entry_by_symbol(struct hists *hists,
1264 					  struct hist_entry *he)
1265 {
1266 	if (hists->symbol_filter_str != NULL &&
1267 	    (!he->ms.sym || strstr(he->ms.sym->name,
1268 				   hists->symbol_filter_str) == NULL)) {
1269 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1270 		return true;
1271 	}
1272 
1273 	return false;
1274 }
1275 
1276 void hists__filter_by_symbol(struct hists *hists)
1277 {
1278 	struct rb_node *nd;
1279 
1280 	hists->stats.nr_non_filtered_samples = 0;
1281 
1282 	hists__reset_filter_stats(hists);
1283 	hists__reset_col_len(hists);
1284 
1285 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1286 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1287 
1288 		if (hists__filter_entry_by_symbol(hists, h))
1289 			continue;
1290 
1291 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1292 	}
1293 }
1294 
1295 void events_stats__inc(struct events_stats *stats, u32 type)
1296 {
1297 	++stats->nr_events[0];
1298 	++stats->nr_events[type];
1299 }
1300 
1301 void hists__inc_nr_events(struct hists *hists, u32 type)
1302 {
1303 	events_stats__inc(&hists->stats, type);
1304 }
1305 
1306 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1307 {
1308 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1309 	if (!filtered)
1310 		hists->stats.nr_non_filtered_samples++;
1311 }
1312 
1313 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1314 						 struct hist_entry *pair)
1315 {
1316 	struct rb_root *root;
1317 	struct rb_node **p;
1318 	struct rb_node *parent = NULL;
1319 	struct hist_entry *he;
1320 	int64_t cmp;
1321 
1322 	if (sort__need_collapse)
1323 		root = &hists->entries_collapsed;
1324 	else
1325 		root = hists->entries_in;
1326 
1327 	p = &root->rb_node;
1328 
1329 	while (*p != NULL) {
1330 		parent = *p;
1331 		he = rb_entry(parent, struct hist_entry, rb_node_in);
1332 
1333 		cmp = hist_entry__collapse(he, pair);
1334 
1335 		if (!cmp)
1336 			goto out;
1337 
1338 		if (cmp < 0)
1339 			p = &(*p)->rb_left;
1340 		else
1341 			p = &(*p)->rb_right;
1342 	}
1343 
1344 	he = hist_entry__new(pair, true);
1345 	if (he) {
1346 		memset(&he->stat, 0, sizeof(he->stat));
1347 		he->hists = hists;
1348 		rb_link_node(&he->rb_node_in, parent, p);
1349 		rb_insert_color(&he->rb_node_in, root);
1350 		hists__inc_stats(hists, he);
1351 		he->dummy = true;
1352 	}
1353 out:
1354 	return he;
1355 }
1356 
1357 static struct hist_entry *hists__find_entry(struct hists *hists,
1358 					    struct hist_entry *he)
1359 {
1360 	struct rb_node *n;
1361 
1362 	if (sort__need_collapse)
1363 		n = hists->entries_collapsed.rb_node;
1364 	else
1365 		n = hists->entries_in->rb_node;
1366 
1367 	while (n) {
1368 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1369 		int64_t cmp = hist_entry__collapse(iter, he);
1370 
1371 		if (cmp < 0)
1372 			n = n->rb_left;
1373 		else if (cmp > 0)
1374 			n = n->rb_right;
1375 		else
1376 			return iter;
1377 	}
1378 
1379 	return NULL;
1380 }
1381 
1382 /*
1383  * Look for pairs to link to the leader buckets (hist_entries):
1384  */
1385 void hists__match(struct hists *leader, struct hists *other)
1386 {
1387 	struct rb_root *root;
1388 	struct rb_node *nd;
1389 	struct hist_entry *pos, *pair;
1390 
1391 	if (sort__need_collapse)
1392 		root = &leader->entries_collapsed;
1393 	else
1394 		root = leader->entries_in;
1395 
1396 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1397 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1398 		pair = hists__find_entry(other, pos);
1399 
1400 		if (pair)
1401 			hist_entry__add_pair(pair, pos);
1402 	}
1403 }
1404 
1405 /*
1406  * Look for entries in the other hists that are not present in the leader, if
1407  * we find them, just add a dummy entry on the leader hists, with period=0,
1408  * nr_events=0, to serve as the list header.
1409  */
1410 int hists__link(struct hists *leader, struct hists *other)
1411 {
1412 	struct rb_root *root;
1413 	struct rb_node *nd;
1414 	struct hist_entry *pos, *pair;
1415 
1416 	if (sort__need_collapse)
1417 		root = &other->entries_collapsed;
1418 	else
1419 		root = other->entries_in;
1420 
1421 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1422 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1423 
1424 		if (!hist_entry__has_pairs(pos)) {
1425 			pair = hists__add_dummy_entry(leader, pos);
1426 			if (pair == NULL)
1427 				return -1;
1428 			hist_entry__add_pair(pos, pair);
1429 		}
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1436 			  struct perf_sample *sample, bool nonany_branch_mode)
1437 {
1438 	struct branch_info *bi;
1439 
1440 	/* If we have branch cycles always annotate them. */
1441 	if (bs && bs->nr && bs->entries[0].flags.cycles) {
1442 		int i;
1443 
1444 		bi = sample__resolve_bstack(sample, al);
1445 		if (bi) {
1446 			struct addr_map_symbol *prev = NULL;
1447 
1448 			/*
1449 			 * Ignore errors, still want to process the
1450 			 * other entries.
1451 			 *
1452 			 * For non standard branch modes always
1453 			 * force no IPC (prev == NULL)
1454 			 *
1455 			 * Note that perf stores branches reversed from
1456 			 * program order!
1457 			 */
1458 			for (i = bs->nr - 1; i >= 0; i--) {
1459 				addr_map_symbol__account_cycles(&bi[i].from,
1460 					nonany_branch_mode ? NULL : prev,
1461 					bi[i].flags.cycles);
1462 				prev = &bi[i].to;
1463 			}
1464 			free(bi);
1465 		}
1466 	}
1467 }
1468 
1469 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1470 {
1471 	struct perf_evsel *pos;
1472 	size_t ret = 0;
1473 
1474 	evlist__for_each(evlist, pos) {
1475 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1476 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1477 	}
1478 
1479 	return ret;
1480 }
1481 
1482 
1483 u64 hists__total_period(struct hists *hists)
1484 {
1485 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1486 		hists->stats.total_period;
1487 }
1488 
1489 int parse_filter_percentage(const struct option *opt __maybe_unused,
1490 			    const char *arg, int unset __maybe_unused)
1491 {
1492 	if (!strcmp(arg, "relative"))
1493 		symbol_conf.filter_relative = true;
1494 	else if (!strcmp(arg, "absolute"))
1495 		symbol_conf.filter_relative = false;
1496 	else
1497 		return -1;
1498 
1499 	return 0;
1500 }
1501 
1502 int perf_hist_config(const char *var, const char *value)
1503 {
1504 	if (!strcmp(var, "hist.percentage"))
1505 		return parse_filter_percentage(NULL, value, 0);
1506 
1507 	return 0;
1508 }
1509 
1510 static int hists_evsel__init(struct perf_evsel *evsel)
1511 {
1512 	struct hists *hists = evsel__hists(evsel);
1513 
1514 	memset(hists, 0, sizeof(*hists));
1515 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1516 	hists->entries_in = &hists->entries_in_array[0];
1517 	hists->entries_collapsed = RB_ROOT;
1518 	hists->entries = RB_ROOT;
1519 	pthread_mutex_init(&hists->lock, NULL);
1520 	return 0;
1521 }
1522 
1523 /*
1524  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1525  * stored in the rbtree...
1526  */
1527 
1528 int hists__init(void)
1529 {
1530 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1531 					    hists_evsel__init, NULL);
1532 	if (err)
1533 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1534 
1535 	return err;
1536 }
1537