xref: /openbmc/linux/tools/perf/util/hist.c (revision 6cbbfe1c)
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11 
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 				       struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 					  struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 					  struct hist_entry *he);
18 
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
20 {
21 	return hists->col_len[col];
22 }
23 
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
25 {
26 	hists->col_len[col] = len;
27 }
28 
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
30 {
31 	if (len > hists__col_len(hists, col)) {
32 		hists__set_col_len(hists, col, len);
33 		return true;
34 	}
35 	return false;
36 }
37 
38 void hists__reset_col_len(struct hists *hists)
39 {
40 	enum hist_column col;
41 
42 	for (col = 0; col < HISTC_NR_COLS; ++col)
43 		hists__set_col_len(hists, col, 0);
44 }
45 
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
47 {
48 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
49 
50 	if (hists__col_len(hists, dso) < unresolved_col_width &&
51 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 	    !symbol_conf.dso_list)
53 		hists__set_col_len(hists, dso, unresolved_col_width);
54 }
55 
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
57 {
58 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59 	int symlen;
60 	u16 len;
61 
62 	/*
63 	 * +4 accounts for '[x] ' priv level info
64 	 * +2 accounts for 0x prefix on raw addresses
65 	 * +3 accounts for ' y ' symtab origin info
66 	 */
67 	if (h->ms.sym) {
68 		symlen = h->ms.sym->namelen + 4;
69 		if (verbose)
70 			symlen += BITS_PER_LONG / 4 + 2 + 3;
71 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 	} else {
73 		symlen = unresolved_col_width + 4 + 2;
74 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 	}
77 
78 	len = thread__comm_len(h->thread);
79 	if (hists__new_col_len(hists, HISTC_COMM, len))
80 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
81 
82 	if (h->ms.map) {
83 		len = dso__name_len(h->ms.map->dso);
84 		hists__new_col_len(hists, HISTC_DSO, len);
85 	}
86 
87 	if (h->parent)
88 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
89 
90 	if (h->branch_info) {
91 		if (h->branch_info->from.sym) {
92 			symlen = (int)h->branch_info->from.sym->namelen + 4;
93 			if (verbose)
94 				symlen += BITS_PER_LONG / 4 + 2 + 3;
95 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
96 
97 			symlen = dso__name_len(h->branch_info->from.map->dso);
98 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 		} else {
100 			symlen = unresolved_col_width + 4 + 2;
101 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 		}
104 
105 		if (h->branch_info->to.sym) {
106 			symlen = (int)h->branch_info->to.sym->namelen + 4;
107 			if (verbose)
108 				symlen += BITS_PER_LONG / 4 + 2 + 3;
109 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110 
111 			symlen = dso__name_len(h->branch_info->to.map->dso);
112 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 		} else {
114 			symlen = unresolved_col_width + 4 + 2;
115 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 		}
118 	}
119 
120 	if (h->mem_info) {
121 		if (h->mem_info->daddr.sym) {
122 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 			       + unresolved_col_width + 2;
124 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 					   symlen);
126 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 					   symlen + 1);
128 		} else {
129 			symlen = unresolved_col_width + 4 + 2;
130 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 					   symlen);
132 		}
133 		if (h->mem_info->daddr.map) {
134 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 					   symlen);
137 		} else {
138 			symlen = unresolved_col_width + 4 + 2;
139 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
140 		}
141 	} else {
142 		symlen = unresolved_col_width + 4 + 2;
143 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 	}
146 
147 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
153 
154 	if (h->transaction)
155 		hists__new_col_len(hists, HISTC_TRANSACTION,
156 				   hist_entry__transaction_len());
157 }
158 
159 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
160 {
161 	struct rb_node *next = rb_first(&hists->entries);
162 	struct hist_entry *n;
163 	int row = 0;
164 
165 	hists__reset_col_len(hists);
166 
167 	while (next && row++ < max_rows) {
168 		n = rb_entry(next, struct hist_entry, rb_node);
169 		if (!n->filtered)
170 			hists__calc_col_len(hists, n);
171 		next = rb_next(&n->rb_node);
172 	}
173 }
174 
175 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
176 					unsigned int cpumode, u64 period)
177 {
178 	switch (cpumode) {
179 	case PERF_RECORD_MISC_KERNEL:
180 		he_stat->period_sys += period;
181 		break;
182 	case PERF_RECORD_MISC_USER:
183 		he_stat->period_us += period;
184 		break;
185 	case PERF_RECORD_MISC_GUEST_KERNEL:
186 		he_stat->period_guest_sys += period;
187 		break;
188 	case PERF_RECORD_MISC_GUEST_USER:
189 		he_stat->period_guest_us += period;
190 		break;
191 	default:
192 		break;
193 	}
194 }
195 
196 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
197 				u64 weight)
198 {
199 
200 	he_stat->period		+= period;
201 	he_stat->weight		+= weight;
202 	he_stat->nr_events	+= 1;
203 }
204 
205 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
206 {
207 	dest->period		+= src->period;
208 	dest->period_sys	+= src->period_sys;
209 	dest->period_us		+= src->period_us;
210 	dest->period_guest_sys	+= src->period_guest_sys;
211 	dest->period_guest_us	+= src->period_guest_us;
212 	dest->nr_events		+= src->nr_events;
213 	dest->weight		+= src->weight;
214 }
215 
216 static void he_stat__decay(struct he_stat *he_stat)
217 {
218 	he_stat->period = (he_stat->period * 7) / 8;
219 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
220 	/* XXX need decay for weight too? */
221 }
222 
223 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
224 {
225 	u64 prev_period = he->stat.period;
226 	u64 diff;
227 
228 	if (prev_period == 0)
229 		return true;
230 
231 	he_stat__decay(&he->stat);
232 	if (symbol_conf.cumulate_callchain)
233 		he_stat__decay(he->stat_acc);
234 
235 	diff = prev_period - he->stat.period;
236 
237 	hists->stats.total_period -= diff;
238 	if (!he->filtered)
239 		hists->stats.total_non_filtered_period -= diff;
240 
241 	return he->stat.period == 0;
242 }
243 
244 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
245 {
246 	struct rb_node *next = rb_first(&hists->entries);
247 	struct hist_entry *n;
248 
249 	while (next) {
250 		n = rb_entry(next, struct hist_entry, rb_node);
251 		next = rb_next(&n->rb_node);
252 		/*
253 		 * We may be annotating this, for instance, so keep it here in
254 		 * case some it gets new samples, we'll eventually free it when
255 		 * the user stops browsing and it agains gets fully decayed.
256 		 */
257 		if (((zap_user && n->level == '.') ||
258 		     (zap_kernel && n->level != '.') ||
259 		     hists__decay_entry(hists, n)) &&
260 		    !n->used) {
261 			rb_erase(&n->rb_node, &hists->entries);
262 
263 			if (sort__need_collapse)
264 				rb_erase(&n->rb_node_in, &hists->entries_collapsed);
265 
266 			--hists->nr_entries;
267 			if (!n->filtered)
268 				--hists->nr_non_filtered_entries;
269 
270 			hist_entry__free(n);
271 		}
272 	}
273 }
274 
275 void hists__delete_entries(struct hists *hists)
276 {
277 	struct rb_node *next = rb_first(&hists->entries);
278 	struct hist_entry *n;
279 
280 	while (next) {
281 		n = rb_entry(next, struct hist_entry, rb_node);
282 		next = rb_next(&n->rb_node);
283 
284 		rb_erase(&n->rb_node, &hists->entries);
285 
286 		if (sort__need_collapse)
287 			rb_erase(&n->rb_node_in, &hists->entries_collapsed);
288 
289 		--hists->nr_entries;
290 		if (!n->filtered)
291 			--hists->nr_non_filtered_entries;
292 
293 		hist_entry__free(n);
294 	}
295 }
296 
297 /*
298  * histogram, sorted on item, collects periods
299  */
300 
301 static struct hist_entry *hist_entry__new(struct hist_entry *template,
302 					  bool sample_self)
303 {
304 	size_t callchain_size = 0;
305 	struct hist_entry *he;
306 
307 	if (symbol_conf.use_callchain)
308 		callchain_size = sizeof(struct callchain_root);
309 
310 	he = zalloc(sizeof(*he) + callchain_size);
311 
312 	if (he != NULL) {
313 		*he = *template;
314 
315 		if (symbol_conf.cumulate_callchain) {
316 			he->stat_acc = malloc(sizeof(he->stat));
317 			if (he->stat_acc == NULL) {
318 				free(he);
319 				return NULL;
320 			}
321 			memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
322 			if (!sample_self)
323 				memset(&he->stat, 0, sizeof(he->stat));
324 		}
325 
326 		if (he->ms.map)
327 			he->ms.map->referenced = true;
328 
329 		if (he->branch_info) {
330 			/*
331 			 * This branch info is (a part of) allocated from
332 			 * sample__resolve_bstack() and will be freed after
333 			 * adding new entries.  So we need to save a copy.
334 			 */
335 			he->branch_info = malloc(sizeof(*he->branch_info));
336 			if (he->branch_info == NULL) {
337 				free(he->stat_acc);
338 				free(he);
339 				return NULL;
340 			}
341 
342 			memcpy(he->branch_info, template->branch_info,
343 			       sizeof(*he->branch_info));
344 
345 			if (he->branch_info->from.map)
346 				he->branch_info->from.map->referenced = true;
347 			if (he->branch_info->to.map)
348 				he->branch_info->to.map->referenced = true;
349 		}
350 
351 		if (he->mem_info) {
352 			if (he->mem_info->iaddr.map)
353 				he->mem_info->iaddr.map->referenced = true;
354 			if (he->mem_info->daddr.map)
355 				he->mem_info->daddr.map->referenced = true;
356 		}
357 
358 		if (symbol_conf.use_callchain)
359 			callchain_init(he->callchain);
360 
361 		INIT_LIST_HEAD(&he->pairs.node);
362 	}
363 
364 	return he;
365 }
366 
367 static u8 symbol__parent_filter(const struct symbol *parent)
368 {
369 	if (symbol_conf.exclude_other && parent == NULL)
370 		return 1 << HIST_FILTER__PARENT;
371 	return 0;
372 }
373 
374 static struct hist_entry *add_hist_entry(struct hists *hists,
375 					 struct hist_entry *entry,
376 					 struct addr_location *al,
377 					 bool sample_self)
378 {
379 	struct rb_node **p;
380 	struct rb_node *parent = NULL;
381 	struct hist_entry *he;
382 	int64_t cmp;
383 	u64 period = entry->stat.period;
384 	u64 weight = entry->stat.weight;
385 
386 	p = &hists->entries_in->rb_node;
387 
388 	while (*p != NULL) {
389 		parent = *p;
390 		he = rb_entry(parent, struct hist_entry, rb_node_in);
391 
392 		/*
393 		 * Make sure that it receives arguments in a same order as
394 		 * hist_entry__collapse() so that we can use an appropriate
395 		 * function when searching an entry regardless which sort
396 		 * keys were used.
397 		 */
398 		cmp = hist_entry__cmp(he, entry);
399 
400 		if (!cmp) {
401 			if (sample_self)
402 				he_stat__add_period(&he->stat, period, weight);
403 			if (symbol_conf.cumulate_callchain)
404 				he_stat__add_period(he->stat_acc, period, weight);
405 
406 			/*
407 			 * This mem info was allocated from sample__resolve_mem
408 			 * and will not be used anymore.
409 			 */
410 			zfree(&entry->mem_info);
411 
412 			/* If the map of an existing hist_entry has
413 			 * become out-of-date due to an exec() or
414 			 * similar, update it.  Otherwise we will
415 			 * mis-adjust symbol addresses when computing
416 			 * the history counter to increment.
417 			 */
418 			if (he->ms.map != entry->ms.map) {
419 				he->ms.map = entry->ms.map;
420 				if (he->ms.map)
421 					he->ms.map->referenced = true;
422 			}
423 			goto out;
424 		}
425 
426 		if (cmp < 0)
427 			p = &(*p)->rb_left;
428 		else
429 			p = &(*p)->rb_right;
430 	}
431 
432 	he = hist_entry__new(entry, sample_self);
433 	if (!he)
434 		return NULL;
435 
436 	rb_link_node(&he->rb_node_in, parent, p);
437 	rb_insert_color(&he->rb_node_in, hists->entries_in);
438 out:
439 	if (sample_self)
440 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
441 	if (symbol_conf.cumulate_callchain)
442 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
443 	return he;
444 }
445 
446 struct hist_entry *__hists__add_entry(struct hists *hists,
447 				      struct addr_location *al,
448 				      struct symbol *sym_parent,
449 				      struct branch_info *bi,
450 				      struct mem_info *mi,
451 				      u64 period, u64 weight, u64 transaction,
452 				      bool sample_self)
453 {
454 	struct hist_entry entry = {
455 		.thread	= al->thread,
456 		.comm = thread__comm(al->thread),
457 		.ms = {
458 			.map	= al->map,
459 			.sym	= al->sym,
460 		},
461 		.cpu	 = al->cpu,
462 		.cpumode = al->cpumode,
463 		.ip	 = al->addr,
464 		.level	 = al->level,
465 		.stat = {
466 			.nr_events = 1,
467 			.period	= period,
468 			.weight = weight,
469 		},
470 		.parent = sym_parent,
471 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
472 		.hists	= hists,
473 		.branch_info = bi,
474 		.mem_info = mi,
475 		.transaction = transaction,
476 	};
477 
478 	return add_hist_entry(hists, &entry, al, sample_self);
479 }
480 
481 static int
482 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
483 		    struct addr_location *al __maybe_unused)
484 {
485 	return 0;
486 }
487 
488 static int
489 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
490 			struct addr_location *al __maybe_unused)
491 {
492 	return 0;
493 }
494 
495 static int
496 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
497 {
498 	struct perf_sample *sample = iter->sample;
499 	struct mem_info *mi;
500 
501 	mi = sample__resolve_mem(sample, al);
502 	if (mi == NULL)
503 		return -ENOMEM;
504 
505 	iter->priv = mi;
506 	return 0;
507 }
508 
509 static int
510 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
511 {
512 	u64 cost;
513 	struct mem_info *mi = iter->priv;
514 	struct hists *hists = evsel__hists(iter->evsel);
515 	struct hist_entry *he;
516 
517 	if (mi == NULL)
518 		return -EINVAL;
519 
520 	cost = iter->sample->weight;
521 	if (!cost)
522 		cost = 1;
523 
524 	/*
525 	 * must pass period=weight in order to get the correct
526 	 * sorting from hists__collapse_resort() which is solely
527 	 * based on periods. We want sorting be done on nr_events * weight
528 	 * and this is indirectly achieved by passing period=weight here
529 	 * and the he_stat__add_period() function.
530 	 */
531 	he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
532 				cost, cost, 0, true);
533 	if (!he)
534 		return -ENOMEM;
535 
536 	iter->he = he;
537 	return 0;
538 }
539 
540 static int
541 iter_finish_mem_entry(struct hist_entry_iter *iter,
542 		      struct addr_location *al __maybe_unused)
543 {
544 	struct perf_evsel *evsel = iter->evsel;
545 	struct hists *hists = evsel__hists(evsel);
546 	struct hist_entry *he = iter->he;
547 	int err = -EINVAL;
548 
549 	if (he == NULL)
550 		goto out;
551 
552 	hists__inc_nr_samples(hists, he->filtered);
553 
554 	err = hist_entry__append_callchain(he, iter->sample);
555 
556 out:
557 	/*
558 	 * We don't need to free iter->priv (mem_info) here since
559 	 * the mem info was either already freed in add_hist_entry() or
560 	 * passed to a new hist entry by hist_entry__new().
561 	 */
562 	iter->priv = NULL;
563 
564 	iter->he = NULL;
565 	return err;
566 }
567 
568 static int
569 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
570 {
571 	struct branch_info *bi;
572 	struct perf_sample *sample = iter->sample;
573 
574 	bi = sample__resolve_bstack(sample, al);
575 	if (!bi)
576 		return -ENOMEM;
577 
578 	iter->curr = 0;
579 	iter->total = sample->branch_stack->nr;
580 
581 	iter->priv = bi;
582 	return 0;
583 }
584 
585 static int
586 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
587 			     struct addr_location *al __maybe_unused)
588 {
589 	/* to avoid calling callback function */
590 	iter->he = NULL;
591 
592 	return 0;
593 }
594 
595 static int
596 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
597 {
598 	struct branch_info *bi = iter->priv;
599 	int i = iter->curr;
600 
601 	if (bi == NULL)
602 		return 0;
603 
604 	if (iter->curr >= iter->total)
605 		return 0;
606 
607 	al->map = bi[i].to.map;
608 	al->sym = bi[i].to.sym;
609 	al->addr = bi[i].to.addr;
610 	return 1;
611 }
612 
613 static int
614 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
615 {
616 	struct branch_info *bi;
617 	struct perf_evsel *evsel = iter->evsel;
618 	struct hists *hists = evsel__hists(evsel);
619 	struct hist_entry *he = NULL;
620 	int i = iter->curr;
621 	int err = 0;
622 
623 	bi = iter->priv;
624 
625 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
626 		goto out;
627 
628 	/*
629 	 * The report shows the percentage of total branches captured
630 	 * and not events sampled. Thus we use a pseudo period of 1.
631 	 */
632 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
633 				1, 1, 0, true);
634 	if (he == NULL)
635 		return -ENOMEM;
636 
637 	hists__inc_nr_samples(hists, he->filtered);
638 
639 out:
640 	iter->he = he;
641 	iter->curr++;
642 	return err;
643 }
644 
645 static int
646 iter_finish_branch_entry(struct hist_entry_iter *iter,
647 			 struct addr_location *al __maybe_unused)
648 {
649 	zfree(&iter->priv);
650 	iter->he = NULL;
651 
652 	return iter->curr >= iter->total ? 0 : -1;
653 }
654 
655 static int
656 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
657 			  struct addr_location *al __maybe_unused)
658 {
659 	return 0;
660 }
661 
662 static int
663 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
664 {
665 	struct perf_evsel *evsel = iter->evsel;
666 	struct perf_sample *sample = iter->sample;
667 	struct hist_entry *he;
668 
669 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
670 				sample->period, sample->weight,
671 				sample->transaction, true);
672 	if (he == NULL)
673 		return -ENOMEM;
674 
675 	iter->he = he;
676 	return 0;
677 }
678 
679 static int
680 iter_finish_normal_entry(struct hist_entry_iter *iter,
681 			 struct addr_location *al __maybe_unused)
682 {
683 	struct hist_entry *he = iter->he;
684 	struct perf_evsel *evsel = iter->evsel;
685 	struct perf_sample *sample = iter->sample;
686 
687 	if (he == NULL)
688 		return 0;
689 
690 	iter->he = NULL;
691 
692 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
693 
694 	return hist_entry__append_callchain(he, sample);
695 }
696 
697 static int
698 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
699 			      struct addr_location *al __maybe_unused)
700 {
701 	struct hist_entry **he_cache;
702 
703 	callchain_cursor_commit(&callchain_cursor);
704 
705 	/*
706 	 * This is for detecting cycles or recursions so that they're
707 	 * cumulated only one time to prevent entries more than 100%
708 	 * overhead.
709 	 */
710 	he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
711 	if (he_cache == NULL)
712 		return -ENOMEM;
713 
714 	iter->priv = he_cache;
715 	iter->curr = 0;
716 
717 	return 0;
718 }
719 
720 static int
721 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
722 				 struct addr_location *al)
723 {
724 	struct perf_evsel *evsel = iter->evsel;
725 	struct hists *hists = evsel__hists(evsel);
726 	struct perf_sample *sample = iter->sample;
727 	struct hist_entry **he_cache = iter->priv;
728 	struct hist_entry *he;
729 	int err = 0;
730 
731 	he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
732 				sample->period, sample->weight,
733 				sample->transaction, true);
734 	if (he == NULL)
735 		return -ENOMEM;
736 
737 	iter->he = he;
738 	he_cache[iter->curr++] = he;
739 
740 	hist_entry__append_callchain(he, sample);
741 
742 	/*
743 	 * We need to re-initialize the cursor since callchain_append()
744 	 * advanced the cursor to the end.
745 	 */
746 	callchain_cursor_commit(&callchain_cursor);
747 
748 	hists__inc_nr_samples(hists, he->filtered);
749 
750 	return err;
751 }
752 
753 static int
754 iter_next_cumulative_entry(struct hist_entry_iter *iter,
755 			   struct addr_location *al)
756 {
757 	struct callchain_cursor_node *node;
758 
759 	node = callchain_cursor_current(&callchain_cursor);
760 	if (node == NULL)
761 		return 0;
762 
763 	return fill_callchain_info(al, node, iter->hide_unresolved);
764 }
765 
766 static int
767 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
768 			       struct addr_location *al)
769 {
770 	struct perf_evsel *evsel = iter->evsel;
771 	struct perf_sample *sample = iter->sample;
772 	struct hist_entry **he_cache = iter->priv;
773 	struct hist_entry *he;
774 	struct hist_entry he_tmp = {
775 		.cpu = al->cpu,
776 		.thread = al->thread,
777 		.comm = thread__comm(al->thread),
778 		.ip = al->addr,
779 		.ms = {
780 			.map = al->map,
781 			.sym = al->sym,
782 		},
783 		.parent = iter->parent,
784 	};
785 	int i;
786 	struct callchain_cursor cursor;
787 
788 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
789 
790 	callchain_cursor_advance(&callchain_cursor);
791 
792 	/*
793 	 * Check if there's duplicate entries in the callchain.
794 	 * It's possible that it has cycles or recursive calls.
795 	 */
796 	for (i = 0; i < iter->curr; i++) {
797 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
798 			/* to avoid calling callback function */
799 			iter->he = NULL;
800 			return 0;
801 		}
802 	}
803 
804 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
805 				sample->period, sample->weight,
806 				sample->transaction, false);
807 	if (he == NULL)
808 		return -ENOMEM;
809 
810 	iter->he = he;
811 	he_cache[iter->curr++] = he;
812 
813 	if (symbol_conf.use_callchain)
814 		callchain_append(he->callchain, &cursor, sample->period);
815 	return 0;
816 }
817 
818 static int
819 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
820 			     struct addr_location *al __maybe_unused)
821 {
822 	zfree(&iter->priv);
823 	iter->he = NULL;
824 
825 	return 0;
826 }
827 
828 const struct hist_iter_ops hist_iter_mem = {
829 	.prepare_entry 		= iter_prepare_mem_entry,
830 	.add_single_entry 	= iter_add_single_mem_entry,
831 	.next_entry 		= iter_next_nop_entry,
832 	.add_next_entry 	= iter_add_next_nop_entry,
833 	.finish_entry 		= iter_finish_mem_entry,
834 };
835 
836 const struct hist_iter_ops hist_iter_branch = {
837 	.prepare_entry 		= iter_prepare_branch_entry,
838 	.add_single_entry 	= iter_add_single_branch_entry,
839 	.next_entry 		= iter_next_branch_entry,
840 	.add_next_entry 	= iter_add_next_branch_entry,
841 	.finish_entry 		= iter_finish_branch_entry,
842 };
843 
844 const struct hist_iter_ops hist_iter_normal = {
845 	.prepare_entry 		= iter_prepare_normal_entry,
846 	.add_single_entry 	= iter_add_single_normal_entry,
847 	.next_entry 		= iter_next_nop_entry,
848 	.add_next_entry 	= iter_add_next_nop_entry,
849 	.finish_entry 		= iter_finish_normal_entry,
850 };
851 
852 const struct hist_iter_ops hist_iter_cumulative = {
853 	.prepare_entry 		= iter_prepare_cumulative_entry,
854 	.add_single_entry 	= iter_add_single_cumulative_entry,
855 	.next_entry 		= iter_next_cumulative_entry,
856 	.add_next_entry 	= iter_add_next_cumulative_entry,
857 	.finish_entry 		= iter_finish_cumulative_entry,
858 };
859 
860 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
861 			 struct perf_evsel *evsel, struct perf_sample *sample,
862 			 int max_stack_depth, void *arg)
863 {
864 	int err, err2;
865 
866 	err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
867 					max_stack_depth);
868 	if (err)
869 		return err;
870 
871 	iter->evsel = evsel;
872 	iter->sample = sample;
873 
874 	err = iter->ops->prepare_entry(iter, al);
875 	if (err)
876 		goto out;
877 
878 	err = iter->ops->add_single_entry(iter, al);
879 	if (err)
880 		goto out;
881 
882 	if (iter->he && iter->add_entry_cb) {
883 		err = iter->add_entry_cb(iter, al, true, arg);
884 		if (err)
885 			goto out;
886 	}
887 
888 	while (iter->ops->next_entry(iter, al)) {
889 		err = iter->ops->add_next_entry(iter, al);
890 		if (err)
891 			break;
892 
893 		if (iter->he && iter->add_entry_cb) {
894 			err = iter->add_entry_cb(iter, al, false, arg);
895 			if (err)
896 				goto out;
897 		}
898 	}
899 
900 out:
901 	err2 = iter->ops->finish_entry(iter, al);
902 	if (!err)
903 		err = err2;
904 
905 	return err;
906 }
907 
908 int64_t
909 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
910 {
911 	struct perf_hpp_fmt *fmt;
912 	int64_t cmp = 0;
913 
914 	perf_hpp__for_each_sort_list(fmt) {
915 		if (perf_hpp__should_skip(fmt))
916 			continue;
917 
918 		cmp = fmt->cmp(left, right);
919 		if (cmp)
920 			break;
921 	}
922 
923 	return cmp;
924 }
925 
926 int64_t
927 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
928 {
929 	struct perf_hpp_fmt *fmt;
930 	int64_t cmp = 0;
931 
932 	perf_hpp__for_each_sort_list(fmt) {
933 		if (perf_hpp__should_skip(fmt))
934 			continue;
935 
936 		cmp = fmt->collapse(left, right);
937 		if (cmp)
938 			break;
939 	}
940 
941 	return cmp;
942 }
943 
944 void hist_entry__free(struct hist_entry *he)
945 {
946 	zfree(&he->branch_info);
947 	zfree(&he->mem_info);
948 	zfree(&he->stat_acc);
949 	free_srcline(he->srcline);
950 	free_callchain(he->callchain);
951 	free(he);
952 }
953 
954 /*
955  * collapse the histogram
956  */
957 
958 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
959 					 struct rb_root *root,
960 					 struct hist_entry *he)
961 {
962 	struct rb_node **p = &root->rb_node;
963 	struct rb_node *parent = NULL;
964 	struct hist_entry *iter;
965 	int64_t cmp;
966 
967 	while (*p != NULL) {
968 		parent = *p;
969 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
970 
971 		cmp = hist_entry__collapse(iter, he);
972 
973 		if (!cmp) {
974 			he_stat__add_stat(&iter->stat, &he->stat);
975 			if (symbol_conf.cumulate_callchain)
976 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
977 
978 			if (symbol_conf.use_callchain) {
979 				callchain_cursor_reset(&callchain_cursor);
980 				callchain_merge(&callchain_cursor,
981 						iter->callchain,
982 						he->callchain);
983 			}
984 			hist_entry__free(he);
985 			return false;
986 		}
987 
988 		if (cmp < 0)
989 			p = &(*p)->rb_left;
990 		else
991 			p = &(*p)->rb_right;
992 	}
993 	hists->nr_entries++;
994 
995 	rb_link_node(&he->rb_node_in, parent, p);
996 	rb_insert_color(&he->rb_node_in, root);
997 	return true;
998 }
999 
1000 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1001 {
1002 	struct rb_root *root;
1003 
1004 	pthread_mutex_lock(&hists->lock);
1005 
1006 	root = hists->entries_in;
1007 	if (++hists->entries_in > &hists->entries_in_array[1])
1008 		hists->entries_in = &hists->entries_in_array[0];
1009 
1010 	pthread_mutex_unlock(&hists->lock);
1011 
1012 	return root;
1013 }
1014 
1015 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1016 {
1017 	hists__filter_entry_by_dso(hists, he);
1018 	hists__filter_entry_by_thread(hists, he);
1019 	hists__filter_entry_by_symbol(hists, he);
1020 }
1021 
1022 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1023 {
1024 	struct rb_root *root;
1025 	struct rb_node *next;
1026 	struct hist_entry *n;
1027 
1028 	if (!sort__need_collapse)
1029 		return;
1030 
1031 	hists->nr_entries = 0;
1032 
1033 	root = hists__get_rotate_entries_in(hists);
1034 
1035 	next = rb_first(root);
1036 
1037 	while (next) {
1038 		if (session_done())
1039 			break;
1040 		n = rb_entry(next, struct hist_entry, rb_node_in);
1041 		next = rb_next(&n->rb_node_in);
1042 
1043 		rb_erase(&n->rb_node_in, root);
1044 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1045 			/*
1046 			 * If it wasn't combined with one of the entries already
1047 			 * collapsed, we need to apply the filters that may have
1048 			 * been set by, say, the hist_browser.
1049 			 */
1050 			hists__apply_filters(hists, n);
1051 		}
1052 		if (prog)
1053 			ui_progress__update(prog, 1);
1054 	}
1055 }
1056 
1057 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1058 {
1059 	struct perf_hpp_fmt *fmt;
1060 	int64_t cmp = 0;
1061 
1062 	perf_hpp__for_each_sort_list(fmt) {
1063 		if (perf_hpp__should_skip(fmt))
1064 			continue;
1065 
1066 		cmp = fmt->sort(a, b);
1067 		if (cmp)
1068 			break;
1069 	}
1070 
1071 	return cmp;
1072 }
1073 
1074 static void hists__reset_filter_stats(struct hists *hists)
1075 {
1076 	hists->nr_non_filtered_entries = 0;
1077 	hists->stats.total_non_filtered_period = 0;
1078 }
1079 
1080 void hists__reset_stats(struct hists *hists)
1081 {
1082 	hists->nr_entries = 0;
1083 	hists->stats.total_period = 0;
1084 
1085 	hists__reset_filter_stats(hists);
1086 }
1087 
1088 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1089 {
1090 	hists->nr_non_filtered_entries++;
1091 	hists->stats.total_non_filtered_period += h->stat.period;
1092 }
1093 
1094 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1095 {
1096 	if (!h->filtered)
1097 		hists__inc_filter_stats(hists, h);
1098 
1099 	hists->nr_entries++;
1100 	hists->stats.total_period += h->stat.period;
1101 }
1102 
1103 static void __hists__insert_output_entry(struct rb_root *entries,
1104 					 struct hist_entry *he,
1105 					 u64 min_callchain_hits)
1106 {
1107 	struct rb_node **p = &entries->rb_node;
1108 	struct rb_node *parent = NULL;
1109 	struct hist_entry *iter;
1110 
1111 	if (symbol_conf.use_callchain)
1112 		callchain_param.sort(&he->sorted_chain, he->callchain,
1113 				      min_callchain_hits, &callchain_param);
1114 
1115 	while (*p != NULL) {
1116 		parent = *p;
1117 		iter = rb_entry(parent, struct hist_entry, rb_node);
1118 
1119 		if (hist_entry__sort(he, iter) > 0)
1120 			p = &(*p)->rb_left;
1121 		else
1122 			p = &(*p)->rb_right;
1123 	}
1124 
1125 	rb_link_node(&he->rb_node, parent, p);
1126 	rb_insert_color(&he->rb_node, entries);
1127 }
1128 
1129 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1130 {
1131 	struct rb_root *root;
1132 	struct rb_node *next;
1133 	struct hist_entry *n;
1134 	u64 min_callchain_hits;
1135 
1136 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1137 
1138 	if (sort__need_collapse)
1139 		root = &hists->entries_collapsed;
1140 	else
1141 		root = hists->entries_in;
1142 
1143 	next = rb_first(root);
1144 	hists->entries = RB_ROOT;
1145 
1146 	hists__reset_stats(hists);
1147 	hists__reset_col_len(hists);
1148 
1149 	while (next) {
1150 		n = rb_entry(next, struct hist_entry, rb_node_in);
1151 		next = rb_next(&n->rb_node_in);
1152 
1153 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1154 		hists__inc_stats(hists, n);
1155 
1156 		if (!n->filtered)
1157 			hists__calc_col_len(hists, n);
1158 
1159 		if (prog)
1160 			ui_progress__update(prog, 1);
1161 	}
1162 }
1163 
1164 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1165 				       enum hist_filter filter)
1166 {
1167 	h->filtered &= ~(1 << filter);
1168 	if (h->filtered)
1169 		return;
1170 
1171 	/* force fold unfiltered entry for simplicity */
1172 	h->ms.unfolded = false;
1173 	h->row_offset = 0;
1174 
1175 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1176 
1177 	hists__inc_filter_stats(hists, h);
1178 	hists__calc_col_len(hists, h);
1179 }
1180 
1181 
1182 static bool hists__filter_entry_by_dso(struct hists *hists,
1183 				       struct hist_entry *he)
1184 {
1185 	if (hists->dso_filter != NULL &&
1186 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1187 		he->filtered |= (1 << HIST_FILTER__DSO);
1188 		return true;
1189 	}
1190 
1191 	return false;
1192 }
1193 
1194 void hists__filter_by_dso(struct hists *hists)
1195 {
1196 	struct rb_node *nd;
1197 
1198 	hists->stats.nr_non_filtered_samples = 0;
1199 
1200 	hists__reset_filter_stats(hists);
1201 	hists__reset_col_len(hists);
1202 
1203 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1204 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1205 
1206 		if (symbol_conf.exclude_other && !h->parent)
1207 			continue;
1208 
1209 		if (hists__filter_entry_by_dso(hists, h))
1210 			continue;
1211 
1212 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1213 	}
1214 }
1215 
1216 static bool hists__filter_entry_by_thread(struct hists *hists,
1217 					  struct hist_entry *he)
1218 {
1219 	if (hists->thread_filter != NULL &&
1220 	    he->thread != hists->thread_filter) {
1221 		he->filtered |= (1 << HIST_FILTER__THREAD);
1222 		return true;
1223 	}
1224 
1225 	return false;
1226 }
1227 
1228 void hists__filter_by_thread(struct hists *hists)
1229 {
1230 	struct rb_node *nd;
1231 
1232 	hists->stats.nr_non_filtered_samples = 0;
1233 
1234 	hists__reset_filter_stats(hists);
1235 	hists__reset_col_len(hists);
1236 
1237 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1238 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1239 
1240 		if (hists__filter_entry_by_thread(hists, h))
1241 			continue;
1242 
1243 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1244 	}
1245 }
1246 
1247 static bool hists__filter_entry_by_symbol(struct hists *hists,
1248 					  struct hist_entry *he)
1249 {
1250 	if (hists->symbol_filter_str != NULL &&
1251 	    (!he->ms.sym || strstr(he->ms.sym->name,
1252 				   hists->symbol_filter_str) == NULL)) {
1253 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1254 		return true;
1255 	}
1256 
1257 	return false;
1258 }
1259 
1260 void hists__filter_by_symbol(struct hists *hists)
1261 {
1262 	struct rb_node *nd;
1263 
1264 	hists->stats.nr_non_filtered_samples = 0;
1265 
1266 	hists__reset_filter_stats(hists);
1267 	hists__reset_col_len(hists);
1268 
1269 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1270 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1271 
1272 		if (hists__filter_entry_by_symbol(hists, h))
1273 			continue;
1274 
1275 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1276 	}
1277 }
1278 
1279 void events_stats__inc(struct events_stats *stats, u32 type)
1280 {
1281 	++stats->nr_events[0];
1282 	++stats->nr_events[type];
1283 }
1284 
1285 void hists__inc_nr_events(struct hists *hists, u32 type)
1286 {
1287 	events_stats__inc(&hists->stats, type);
1288 }
1289 
1290 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1291 {
1292 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1293 	if (!filtered)
1294 		hists->stats.nr_non_filtered_samples++;
1295 }
1296 
1297 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1298 						 struct hist_entry *pair)
1299 {
1300 	struct rb_root *root;
1301 	struct rb_node **p;
1302 	struct rb_node *parent = NULL;
1303 	struct hist_entry *he;
1304 	int64_t cmp;
1305 
1306 	if (sort__need_collapse)
1307 		root = &hists->entries_collapsed;
1308 	else
1309 		root = hists->entries_in;
1310 
1311 	p = &root->rb_node;
1312 
1313 	while (*p != NULL) {
1314 		parent = *p;
1315 		he = rb_entry(parent, struct hist_entry, rb_node_in);
1316 
1317 		cmp = hist_entry__collapse(he, pair);
1318 
1319 		if (!cmp)
1320 			goto out;
1321 
1322 		if (cmp < 0)
1323 			p = &(*p)->rb_left;
1324 		else
1325 			p = &(*p)->rb_right;
1326 	}
1327 
1328 	he = hist_entry__new(pair, true);
1329 	if (he) {
1330 		memset(&he->stat, 0, sizeof(he->stat));
1331 		he->hists = hists;
1332 		rb_link_node(&he->rb_node_in, parent, p);
1333 		rb_insert_color(&he->rb_node_in, root);
1334 		hists__inc_stats(hists, he);
1335 		he->dummy = true;
1336 	}
1337 out:
1338 	return he;
1339 }
1340 
1341 static struct hist_entry *hists__find_entry(struct hists *hists,
1342 					    struct hist_entry *he)
1343 {
1344 	struct rb_node *n;
1345 
1346 	if (sort__need_collapse)
1347 		n = hists->entries_collapsed.rb_node;
1348 	else
1349 		n = hists->entries_in->rb_node;
1350 
1351 	while (n) {
1352 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1353 		int64_t cmp = hist_entry__collapse(iter, he);
1354 
1355 		if (cmp < 0)
1356 			n = n->rb_left;
1357 		else if (cmp > 0)
1358 			n = n->rb_right;
1359 		else
1360 			return iter;
1361 	}
1362 
1363 	return NULL;
1364 }
1365 
1366 /*
1367  * Look for pairs to link to the leader buckets (hist_entries):
1368  */
1369 void hists__match(struct hists *leader, struct hists *other)
1370 {
1371 	struct rb_root *root;
1372 	struct rb_node *nd;
1373 	struct hist_entry *pos, *pair;
1374 
1375 	if (sort__need_collapse)
1376 		root = &leader->entries_collapsed;
1377 	else
1378 		root = leader->entries_in;
1379 
1380 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1381 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1382 		pair = hists__find_entry(other, pos);
1383 
1384 		if (pair)
1385 			hist_entry__add_pair(pair, pos);
1386 	}
1387 }
1388 
1389 /*
1390  * Look for entries in the other hists that are not present in the leader, if
1391  * we find them, just add a dummy entry on the leader hists, with period=0,
1392  * nr_events=0, to serve as the list header.
1393  */
1394 int hists__link(struct hists *leader, struct hists *other)
1395 {
1396 	struct rb_root *root;
1397 	struct rb_node *nd;
1398 	struct hist_entry *pos, *pair;
1399 
1400 	if (sort__need_collapse)
1401 		root = &other->entries_collapsed;
1402 	else
1403 		root = other->entries_in;
1404 
1405 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1406 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1407 
1408 		if (!hist_entry__has_pairs(pos)) {
1409 			pair = hists__add_dummy_entry(leader, pos);
1410 			if (pair == NULL)
1411 				return -1;
1412 			hist_entry__add_pair(pos, pair);
1413 		}
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 
1420 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1421 {
1422 	struct perf_evsel *pos;
1423 	size_t ret = 0;
1424 
1425 	evlist__for_each(evlist, pos) {
1426 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1427 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1428 	}
1429 
1430 	return ret;
1431 }
1432 
1433 
1434 u64 hists__total_period(struct hists *hists)
1435 {
1436 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1437 		hists->stats.total_period;
1438 }
1439 
1440 int parse_filter_percentage(const struct option *opt __maybe_unused,
1441 			    const char *arg, int unset __maybe_unused)
1442 {
1443 	if (!strcmp(arg, "relative"))
1444 		symbol_conf.filter_relative = true;
1445 	else if (!strcmp(arg, "absolute"))
1446 		symbol_conf.filter_relative = false;
1447 	else
1448 		return -1;
1449 
1450 	return 0;
1451 }
1452 
1453 int perf_hist_config(const char *var, const char *value)
1454 {
1455 	if (!strcmp(var, "hist.percentage"))
1456 		return parse_filter_percentage(NULL, value, 0);
1457 
1458 	return 0;
1459 }
1460 
1461 static int hists_evsel__init(struct perf_evsel *evsel)
1462 {
1463 	struct hists *hists = evsel__hists(evsel);
1464 
1465 	memset(hists, 0, sizeof(*hists));
1466 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1467 	hists->entries_in = &hists->entries_in_array[0];
1468 	hists->entries_collapsed = RB_ROOT;
1469 	hists->entries = RB_ROOT;
1470 	pthread_mutex_init(&hists->lock, NULL);
1471 	return 0;
1472 }
1473 
1474 /*
1475  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1476  * stored in the rbtree...
1477  */
1478 
1479 int hists__init(void)
1480 {
1481 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1482 					    hists_evsel__init, NULL);
1483 	if (err)
1484 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1485 
1486 	return err;
1487 }
1488