xref: /openbmc/linux/tools/perf/util/hist.c (revision 92b19ff5)
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11 
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 				       struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 					  struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 					  struct hist_entry *he);
18 
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
20 {
21 	return hists->col_len[col];
22 }
23 
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
25 {
26 	hists->col_len[col] = len;
27 }
28 
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
30 {
31 	if (len > hists__col_len(hists, col)) {
32 		hists__set_col_len(hists, col, len);
33 		return true;
34 	}
35 	return false;
36 }
37 
38 void hists__reset_col_len(struct hists *hists)
39 {
40 	enum hist_column col;
41 
42 	for (col = 0; col < HISTC_NR_COLS; ++col)
43 		hists__set_col_len(hists, col, 0);
44 }
45 
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
47 {
48 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
49 
50 	if (hists__col_len(hists, dso) < unresolved_col_width &&
51 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 	    !symbol_conf.dso_list)
53 		hists__set_col_len(hists, dso, unresolved_col_width);
54 }
55 
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
57 {
58 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59 	int symlen;
60 	u16 len;
61 
62 	/*
63 	 * +4 accounts for '[x] ' priv level info
64 	 * +2 accounts for 0x prefix on raw addresses
65 	 * +3 accounts for ' y ' symtab origin info
66 	 */
67 	if (h->ms.sym) {
68 		symlen = h->ms.sym->namelen + 4;
69 		if (verbose)
70 			symlen += BITS_PER_LONG / 4 + 2 + 3;
71 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 	} else {
73 		symlen = unresolved_col_width + 4 + 2;
74 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 	}
77 
78 	len = thread__comm_len(h->thread);
79 	if (hists__new_col_len(hists, HISTC_COMM, len))
80 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
81 
82 	if (h->ms.map) {
83 		len = dso__name_len(h->ms.map->dso);
84 		hists__new_col_len(hists, HISTC_DSO, len);
85 	}
86 
87 	if (h->parent)
88 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
89 
90 	if (h->branch_info) {
91 		if (h->branch_info->from.sym) {
92 			symlen = (int)h->branch_info->from.sym->namelen + 4;
93 			if (verbose)
94 				symlen += BITS_PER_LONG / 4 + 2 + 3;
95 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
96 
97 			symlen = dso__name_len(h->branch_info->from.map->dso);
98 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 		} else {
100 			symlen = unresolved_col_width + 4 + 2;
101 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 		}
104 
105 		if (h->branch_info->to.sym) {
106 			symlen = (int)h->branch_info->to.sym->namelen + 4;
107 			if (verbose)
108 				symlen += BITS_PER_LONG / 4 + 2 + 3;
109 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110 
111 			symlen = dso__name_len(h->branch_info->to.map->dso);
112 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 		} else {
114 			symlen = unresolved_col_width + 4 + 2;
115 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 		}
118 	}
119 
120 	if (h->mem_info) {
121 		if (h->mem_info->daddr.sym) {
122 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 			       + unresolved_col_width + 2;
124 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 					   symlen);
126 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 					   symlen + 1);
128 		} else {
129 			symlen = unresolved_col_width + 4 + 2;
130 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 					   symlen);
132 		}
133 		if (h->mem_info->daddr.map) {
134 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 					   symlen);
137 		} else {
138 			symlen = unresolved_col_width + 4 + 2;
139 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
140 		}
141 	} else {
142 		symlen = unresolved_col_width + 4 + 2;
143 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 	}
146 
147 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
153 
154 	if (h->transaction)
155 		hists__new_col_len(hists, HISTC_TRANSACTION,
156 				   hist_entry__transaction_len());
157 }
158 
159 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
160 {
161 	struct rb_node *next = rb_first(&hists->entries);
162 	struct hist_entry *n;
163 	int row = 0;
164 
165 	hists__reset_col_len(hists);
166 
167 	while (next && row++ < max_rows) {
168 		n = rb_entry(next, struct hist_entry, rb_node);
169 		if (!n->filtered)
170 			hists__calc_col_len(hists, n);
171 		next = rb_next(&n->rb_node);
172 	}
173 }
174 
175 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
176 					unsigned int cpumode, u64 period)
177 {
178 	switch (cpumode) {
179 	case PERF_RECORD_MISC_KERNEL:
180 		he_stat->period_sys += period;
181 		break;
182 	case PERF_RECORD_MISC_USER:
183 		he_stat->period_us += period;
184 		break;
185 	case PERF_RECORD_MISC_GUEST_KERNEL:
186 		he_stat->period_guest_sys += period;
187 		break;
188 	case PERF_RECORD_MISC_GUEST_USER:
189 		he_stat->period_guest_us += period;
190 		break;
191 	default:
192 		break;
193 	}
194 }
195 
196 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
197 				u64 weight)
198 {
199 
200 	he_stat->period		+= period;
201 	he_stat->weight		+= weight;
202 	he_stat->nr_events	+= 1;
203 }
204 
205 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
206 {
207 	dest->period		+= src->period;
208 	dest->period_sys	+= src->period_sys;
209 	dest->period_us		+= src->period_us;
210 	dest->period_guest_sys	+= src->period_guest_sys;
211 	dest->period_guest_us	+= src->period_guest_us;
212 	dest->nr_events		+= src->nr_events;
213 	dest->weight		+= src->weight;
214 }
215 
216 static void he_stat__decay(struct he_stat *he_stat)
217 {
218 	he_stat->period = (he_stat->period * 7) / 8;
219 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
220 	/* XXX need decay for weight too? */
221 }
222 
223 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
224 {
225 	u64 prev_period = he->stat.period;
226 	u64 diff;
227 
228 	if (prev_period == 0)
229 		return true;
230 
231 	he_stat__decay(&he->stat);
232 	if (symbol_conf.cumulate_callchain)
233 		he_stat__decay(he->stat_acc);
234 
235 	diff = prev_period - he->stat.period;
236 
237 	hists->stats.total_period -= diff;
238 	if (!he->filtered)
239 		hists->stats.total_non_filtered_period -= diff;
240 
241 	return he->stat.period == 0;
242 }
243 
244 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
245 {
246 	rb_erase(&he->rb_node, &hists->entries);
247 
248 	if (sort__need_collapse)
249 		rb_erase(&he->rb_node_in, &hists->entries_collapsed);
250 
251 	--hists->nr_entries;
252 	if (!he->filtered)
253 		--hists->nr_non_filtered_entries;
254 
255 	hist_entry__delete(he);
256 }
257 
258 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
259 {
260 	struct rb_node *next = rb_first(&hists->entries);
261 	struct hist_entry *n;
262 
263 	while (next) {
264 		n = rb_entry(next, struct hist_entry, rb_node);
265 		next = rb_next(&n->rb_node);
266 		if (((zap_user && n->level == '.') ||
267 		     (zap_kernel && n->level != '.') ||
268 		     hists__decay_entry(hists, n))) {
269 			hists__delete_entry(hists, n);
270 		}
271 	}
272 }
273 
274 void hists__delete_entries(struct hists *hists)
275 {
276 	struct rb_node *next = rb_first(&hists->entries);
277 	struct hist_entry *n;
278 
279 	while (next) {
280 		n = rb_entry(next, struct hist_entry, rb_node);
281 		next = rb_next(&n->rb_node);
282 
283 		hists__delete_entry(hists, n);
284 	}
285 }
286 
287 /*
288  * histogram, sorted on item, collects periods
289  */
290 
291 static struct hist_entry *hist_entry__new(struct hist_entry *template,
292 					  bool sample_self)
293 {
294 	size_t callchain_size = 0;
295 	struct hist_entry *he;
296 
297 	if (symbol_conf.use_callchain)
298 		callchain_size = sizeof(struct callchain_root);
299 
300 	he = zalloc(sizeof(*he) + callchain_size);
301 
302 	if (he != NULL) {
303 		*he = *template;
304 
305 		if (symbol_conf.cumulate_callchain) {
306 			he->stat_acc = malloc(sizeof(he->stat));
307 			if (he->stat_acc == NULL) {
308 				free(he);
309 				return NULL;
310 			}
311 			memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
312 			if (!sample_self)
313 				memset(&he->stat, 0, sizeof(he->stat));
314 		}
315 
316 		map__get(he->ms.map);
317 
318 		if (he->branch_info) {
319 			/*
320 			 * This branch info is (a part of) allocated from
321 			 * sample__resolve_bstack() and will be freed after
322 			 * adding new entries.  So we need to save a copy.
323 			 */
324 			he->branch_info = malloc(sizeof(*he->branch_info));
325 			if (he->branch_info == NULL) {
326 				map__zput(he->ms.map);
327 				free(he->stat_acc);
328 				free(he);
329 				return NULL;
330 			}
331 
332 			memcpy(he->branch_info, template->branch_info,
333 			       sizeof(*he->branch_info));
334 
335 			map__get(he->branch_info->from.map);
336 			map__get(he->branch_info->to.map);
337 		}
338 
339 		if (he->mem_info) {
340 			map__get(he->mem_info->iaddr.map);
341 			map__get(he->mem_info->daddr.map);
342 		}
343 
344 		if (symbol_conf.use_callchain)
345 			callchain_init(he->callchain);
346 
347 		INIT_LIST_HEAD(&he->pairs.node);
348 		thread__get(he->thread);
349 	}
350 
351 	return he;
352 }
353 
354 static u8 symbol__parent_filter(const struct symbol *parent)
355 {
356 	if (symbol_conf.exclude_other && parent == NULL)
357 		return 1 << HIST_FILTER__PARENT;
358 	return 0;
359 }
360 
361 static struct hist_entry *hists__findnew_entry(struct hists *hists,
362 					       struct hist_entry *entry,
363 					       struct addr_location *al,
364 					       bool sample_self)
365 {
366 	struct rb_node **p;
367 	struct rb_node *parent = NULL;
368 	struct hist_entry *he;
369 	int64_t cmp;
370 	u64 period = entry->stat.period;
371 	u64 weight = entry->stat.weight;
372 
373 	p = &hists->entries_in->rb_node;
374 
375 	while (*p != NULL) {
376 		parent = *p;
377 		he = rb_entry(parent, struct hist_entry, rb_node_in);
378 
379 		/*
380 		 * Make sure that it receives arguments in a same order as
381 		 * hist_entry__collapse() so that we can use an appropriate
382 		 * function when searching an entry regardless which sort
383 		 * keys were used.
384 		 */
385 		cmp = hist_entry__cmp(he, entry);
386 
387 		if (!cmp) {
388 			if (sample_self)
389 				he_stat__add_period(&he->stat, period, weight);
390 			if (symbol_conf.cumulate_callchain)
391 				he_stat__add_period(he->stat_acc, period, weight);
392 
393 			/*
394 			 * This mem info was allocated from sample__resolve_mem
395 			 * and will not be used anymore.
396 			 */
397 			zfree(&entry->mem_info);
398 
399 			/* If the map of an existing hist_entry has
400 			 * become out-of-date due to an exec() or
401 			 * similar, update it.  Otherwise we will
402 			 * mis-adjust symbol addresses when computing
403 			 * the history counter to increment.
404 			 */
405 			if (he->ms.map != entry->ms.map) {
406 				map__put(he->ms.map);
407 				he->ms.map = map__get(entry->ms.map);
408 			}
409 			goto out;
410 		}
411 
412 		if (cmp < 0)
413 			p = &(*p)->rb_left;
414 		else
415 			p = &(*p)->rb_right;
416 	}
417 
418 	he = hist_entry__new(entry, sample_self);
419 	if (!he)
420 		return NULL;
421 
422 	hists->nr_entries++;
423 
424 	rb_link_node(&he->rb_node_in, parent, p);
425 	rb_insert_color(&he->rb_node_in, hists->entries_in);
426 out:
427 	if (sample_self)
428 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
429 	if (symbol_conf.cumulate_callchain)
430 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
431 	return he;
432 }
433 
434 struct hist_entry *__hists__add_entry(struct hists *hists,
435 				      struct addr_location *al,
436 				      struct symbol *sym_parent,
437 				      struct branch_info *bi,
438 				      struct mem_info *mi,
439 				      u64 period, u64 weight, u64 transaction,
440 				      bool sample_self)
441 {
442 	struct hist_entry entry = {
443 		.thread	= al->thread,
444 		.comm = thread__comm(al->thread),
445 		.ms = {
446 			.map	= al->map,
447 			.sym	= al->sym,
448 		},
449 		.cpu	 = al->cpu,
450 		.cpumode = al->cpumode,
451 		.ip	 = al->addr,
452 		.level	 = al->level,
453 		.stat = {
454 			.nr_events = 1,
455 			.period	= period,
456 			.weight = weight,
457 		},
458 		.parent = sym_parent,
459 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
460 		.hists	= hists,
461 		.branch_info = bi,
462 		.mem_info = mi,
463 		.transaction = transaction,
464 	};
465 
466 	return hists__findnew_entry(hists, &entry, al, sample_self);
467 }
468 
469 static int
470 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
471 		    struct addr_location *al __maybe_unused)
472 {
473 	return 0;
474 }
475 
476 static int
477 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
478 			struct addr_location *al __maybe_unused)
479 {
480 	return 0;
481 }
482 
483 static int
484 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
485 {
486 	struct perf_sample *sample = iter->sample;
487 	struct mem_info *mi;
488 
489 	mi = sample__resolve_mem(sample, al);
490 	if (mi == NULL)
491 		return -ENOMEM;
492 
493 	iter->priv = mi;
494 	return 0;
495 }
496 
497 static int
498 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
499 {
500 	u64 cost;
501 	struct mem_info *mi = iter->priv;
502 	struct hists *hists = evsel__hists(iter->evsel);
503 	struct hist_entry *he;
504 
505 	if (mi == NULL)
506 		return -EINVAL;
507 
508 	cost = iter->sample->weight;
509 	if (!cost)
510 		cost = 1;
511 
512 	/*
513 	 * must pass period=weight in order to get the correct
514 	 * sorting from hists__collapse_resort() which is solely
515 	 * based on periods. We want sorting be done on nr_events * weight
516 	 * and this is indirectly achieved by passing period=weight here
517 	 * and the he_stat__add_period() function.
518 	 */
519 	he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
520 				cost, cost, 0, true);
521 	if (!he)
522 		return -ENOMEM;
523 
524 	iter->he = he;
525 	return 0;
526 }
527 
528 static int
529 iter_finish_mem_entry(struct hist_entry_iter *iter,
530 		      struct addr_location *al __maybe_unused)
531 {
532 	struct perf_evsel *evsel = iter->evsel;
533 	struct hists *hists = evsel__hists(evsel);
534 	struct hist_entry *he = iter->he;
535 	int err = -EINVAL;
536 
537 	if (he == NULL)
538 		goto out;
539 
540 	hists__inc_nr_samples(hists, he->filtered);
541 
542 	err = hist_entry__append_callchain(he, iter->sample);
543 
544 out:
545 	/*
546 	 * We don't need to free iter->priv (mem_info) here since the mem info
547 	 * was either already freed in hists__findnew_entry() or passed to a
548 	 * new hist entry by hist_entry__new().
549 	 */
550 	iter->priv = NULL;
551 
552 	iter->he = NULL;
553 	return err;
554 }
555 
556 static int
557 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
558 {
559 	struct branch_info *bi;
560 	struct perf_sample *sample = iter->sample;
561 
562 	bi = sample__resolve_bstack(sample, al);
563 	if (!bi)
564 		return -ENOMEM;
565 
566 	iter->curr = 0;
567 	iter->total = sample->branch_stack->nr;
568 
569 	iter->priv = bi;
570 	return 0;
571 }
572 
573 static int
574 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
575 			     struct addr_location *al __maybe_unused)
576 {
577 	/* to avoid calling callback function */
578 	iter->he = NULL;
579 
580 	return 0;
581 }
582 
583 static int
584 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
585 {
586 	struct branch_info *bi = iter->priv;
587 	int i = iter->curr;
588 
589 	if (bi == NULL)
590 		return 0;
591 
592 	if (iter->curr >= iter->total)
593 		return 0;
594 
595 	al->map = bi[i].to.map;
596 	al->sym = bi[i].to.sym;
597 	al->addr = bi[i].to.addr;
598 	return 1;
599 }
600 
601 static int
602 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
603 {
604 	struct branch_info *bi;
605 	struct perf_evsel *evsel = iter->evsel;
606 	struct hists *hists = evsel__hists(evsel);
607 	struct hist_entry *he = NULL;
608 	int i = iter->curr;
609 	int err = 0;
610 
611 	bi = iter->priv;
612 
613 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
614 		goto out;
615 
616 	/*
617 	 * The report shows the percentage of total branches captured
618 	 * and not events sampled. Thus we use a pseudo period of 1.
619 	 */
620 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
621 				1, 1, 0, true);
622 	if (he == NULL)
623 		return -ENOMEM;
624 
625 	hists__inc_nr_samples(hists, he->filtered);
626 
627 out:
628 	iter->he = he;
629 	iter->curr++;
630 	return err;
631 }
632 
633 static int
634 iter_finish_branch_entry(struct hist_entry_iter *iter,
635 			 struct addr_location *al __maybe_unused)
636 {
637 	zfree(&iter->priv);
638 	iter->he = NULL;
639 
640 	return iter->curr >= iter->total ? 0 : -1;
641 }
642 
643 static int
644 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
645 			  struct addr_location *al __maybe_unused)
646 {
647 	return 0;
648 }
649 
650 static int
651 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
652 {
653 	struct perf_evsel *evsel = iter->evsel;
654 	struct perf_sample *sample = iter->sample;
655 	struct hist_entry *he;
656 
657 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
658 				sample->period, sample->weight,
659 				sample->transaction, true);
660 	if (he == NULL)
661 		return -ENOMEM;
662 
663 	iter->he = he;
664 	return 0;
665 }
666 
667 static int
668 iter_finish_normal_entry(struct hist_entry_iter *iter,
669 			 struct addr_location *al __maybe_unused)
670 {
671 	struct hist_entry *he = iter->he;
672 	struct perf_evsel *evsel = iter->evsel;
673 	struct perf_sample *sample = iter->sample;
674 
675 	if (he == NULL)
676 		return 0;
677 
678 	iter->he = NULL;
679 
680 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
681 
682 	return hist_entry__append_callchain(he, sample);
683 }
684 
685 static int
686 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
687 			      struct addr_location *al __maybe_unused)
688 {
689 	struct hist_entry **he_cache;
690 
691 	callchain_cursor_commit(&callchain_cursor);
692 
693 	/*
694 	 * This is for detecting cycles or recursions so that they're
695 	 * cumulated only one time to prevent entries more than 100%
696 	 * overhead.
697 	 */
698 	he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
699 	if (he_cache == NULL)
700 		return -ENOMEM;
701 
702 	iter->priv = he_cache;
703 	iter->curr = 0;
704 
705 	return 0;
706 }
707 
708 static int
709 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
710 				 struct addr_location *al)
711 {
712 	struct perf_evsel *evsel = iter->evsel;
713 	struct hists *hists = evsel__hists(evsel);
714 	struct perf_sample *sample = iter->sample;
715 	struct hist_entry **he_cache = iter->priv;
716 	struct hist_entry *he;
717 	int err = 0;
718 
719 	he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
720 				sample->period, sample->weight,
721 				sample->transaction, true);
722 	if (he == NULL)
723 		return -ENOMEM;
724 
725 	iter->he = he;
726 	he_cache[iter->curr++] = he;
727 
728 	hist_entry__append_callchain(he, sample);
729 
730 	/*
731 	 * We need to re-initialize the cursor since callchain_append()
732 	 * advanced the cursor to the end.
733 	 */
734 	callchain_cursor_commit(&callchain_cursor);
735 
736 	hists__inc_nr_samples(hists, he->filtered);
737 
738 	return err;
739 }
740 
741 static int
742 iter_next_cumulative_entry(struct hist_entry_iter *iter,
743 			   struct addr_location *al)
744 {
745 	struct callchain_cursor_node *node;
746 
747 	node = callchain_cursor_current(&callchain_cursor);
748 	if (node == NULL)
749 		return 0;
750 
751 	return fill_callchain_info(al, node, iter->hide_unresolved);
752 }
753 
754 static int
755 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
756 			       struct addr_location *al)
757 {
758 	struct perf_evsel *evsel = iter->evsel;
759 	struct perf_sample *sample = iter->sample;
760 	struct hist_entry **he_cache = iter->priv;
761 	struct hist_entry *he;
762 	struct hist_entry he_tmp = {
763 		.cpu = al->cpu,
764 		.thread = al->thread,
765 		.comm = thread__comm(al->thread),
766 		.ip = al->addr,
767 		.ms = {
768 			.map = al->map,
769 			.sym = al->sym,
770 		},
771 		.parent = iter->parent,
772 	};
773 	int i;
774 	struct callchain_cursor cursor;
775 
776 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
777 
778 	callchain_cursor_advance(&callchain_cursor);
779 
780 	/*
781 	 * Check if there's duplicate entries in the callchain.
782 	 * It's possible that it has cycles or recursive calls.
783 	 */
784 	for (i = 0; i < iter->curr; i++) {
785 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
786 			/* to avoid calling callback function */
787 			iter->he = NULL;
788 			return 0;
789 		}
790 	}
791 
792 	he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
793 				sample->period, sample->weight,
794 				sample->transaction, false);
795 	if (he == NULL)
796 		return -ENOMEM;
797 
798 	iter->he = he;
799 	he_cache[iter->curr++] = he;
800 
801 	if (symbol_conf.use_callchain)
802 		callchain_append(he->callchain, &cursor, sample->period);
803 	return 0;
804 }
805 
806 static int
807 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
808 			     struct addr_location *al __maybe_unused)
809 {
810 	zfree(&iter->priv);
811 	iter->he = NULL;
812 
813 	return 0;
814 }
815 
816 const struct hist_iter_ops hist_iter_mem = {
817 	.prepare_entry 		= iter_prepare_mem_entry,
818 	.add_single_entry 	= iter_add_single_mem_entry,
819 	.next_entry 		= iter_next_nop_entry,
820 	.add_next_entry 	= iter_add_next_nop_entry,
821 	.finish_entry 		= iter_finish_mem_entry,
822 };
823 
824 const struct hist_iter_ops hist_iter_branch = {
825 	.prepare_entry 		= iter_prepare_branch_entry,
826 	.add_single_entry 	= iter_add_single_branch_entry,
827 	.next_entry 		= iter_next_branch_entry,
828 	.add_next_entry 	= iter_add_next_branch_entry,
829 	.finish_entry 		= iter_finish_branch_entry,
830 };
831 
832 const struct hist_iter_ops hist_iter_normal = {
833 	.prepare_entry 		= iter_prepare_normal_entry,
834 	.add_single_entry 	= iter_add_single_normal_entry,
835 	.next_entry 		= iter_next_nop_entry,
836 	.add_next_entry 	= iter_add_next_nop_entry,
837 	.finish_entry 		= iter_finish_normal_entry,
838 };
839 
840 const struct hist_iter_ops hist_iter_cumulative = {
841 	.prepare_entry 		= iter_prepare_cumulative_entry,
842 	.add_single_entry 	= iter_add_single_cumulative_entry,
843 	.next_entry 		= iter_next_cumulative_entry,
844 	.add_next_entry 	= iter_add_next_cumulative_entry,
845 	.finish_entry 		= iter_finish_cumulative_entry,
846 };
847 
848 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
849 			 int max_stack_depth, void *arg)
850 {
851 	int err, err2;
852 
853 	err = sample__resolve_callchain(iter->sample, &iter->parent,
854 					iter->evsel, al, max_stack_depth);
855 	if (err)
856 		return err;
857 
858 	err = iter->ops->prepare_entry(iter, al);
859 	if (err)
860 		goto out;
861 
862 	err = iter->ops->add_single_entry(iter, al);
863 	if (err)
864 		goto out;
865 
866 	if (iter->he && iter->add_entry_cb) {
867 		err = iter->add_entry_cb(iter, al, true, arg);
868 		if (err)
869 			goto out;
870 	}
871 
872 	while (iter->ops->next_entry(iter, al)) {
873 		err = iter->ops->add_next_entry(iter, al);
874 		if (err)
875 			break;
876 
877 		if (iter->he && iter->add_entry_cb) {
878 			err = iter->add_entry_cb(iter, al, false, arg);
879 			if (err)
880 				goto out;
881 		}
882 	}
883 
884 out:
885 	err2 = iter->ops->finish_entry(iter, al);
886 	if (!err)
887 		err = err2;
888 
889 	return err;
890 }
891 
892 int64_t
893 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
894 {
895 	struct perf_hpp_fmt *fmt;
896 	int64_t cmp = 0;
897 
898 	perf_hpp__for_each_sort_list(fmt) {
899 		if (perf_hpp__should_skip(fmt))
900 			continue;
901 
902 		cmp = fmt->cmp(fmt, left, right);
903 		if (cmp)
904 			break;
905 	}
906 
907 	return cmp;
908 }
909 
910 int64_t
911 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
912 {
913 	struct perf_hpp_fmt *fmt;
914 	int64_t cmp = 0;
915 
916 	perf_hpp__for_each_sort_list(fmt) {
917 		if (perf_hpp__should_skip(fmt))
918 			continue;
919 
920 		cmp = fmt->collapse(fmt, left, right);
921 		if (cmp)
922 			break;
923 	}
924 
925 	return cmp;
926 }
927 
928 void hist_entry__delete(struct hist_entry *he)
929 {
930 	thread__zput(he->thread);
931 	map__zput(he->ms.map);
932 
933 	if (he->branch_info) {
934 		map__zput(he->branch_info->from.map);
935 		map__zput(he->branch_info->to.map);
936 		zfree(&he->branch_info);
937 	}
938 
939 	if (he->mem_info) {
940 		map__zput(he->mem_info->iaddr.map);
941 		map__zput(he->mem_info->daddr.map);
942 		zfree(&he->mem_info);
943 	}
944 
945 	zfree(&he->stat_acc);
946 	free_srcline(he->srcline);
947 	free_callchain(he->callchain);
948 	free(he);
949 }
950 
951 /*
952  * collapse the histogram
953  */
954 
955 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
956 					 struct rb_root *root,
957 					 struct hist_entry *he)
958 {
959 	struct rb_node **p = &root->rb_node;
960 	struct rb_node *parent = NULL;
961 	struct hist_entry *iter;
962 	int64_t cmp;
963 
964 	while (*p != NULL) {
965 		parent = *p;
966 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
967 
968 		cmp = hist_entry__collapse(iter, he);
969 
970 		if (!cmp) {
971 			he_stat__add_stat(&iter->stat, &he->stat);
972 			if (symbol_conf.cumulate_callchain)
973 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
974 
975 			if (symbol_conf.use_callchain) {
976 				callchain_cursor_reset(&callchain_cursor);
977 				callchain_merge(&callchain_cursor,
978 						iter->callchain,
979 						he->callchain);
980 			}
981 			hist_entry__delete(he);
982 			return false;
983 		}
984 
985 		if (cmp < 0)
986 			p = &(*p)->rb_left;
987 		else
988 			p = &(*p)->rb_right;
989 	}
990 	hists->nr_entries++;
991 
992 	rb_link_node(&he->rb_node_in, parent, p);
993 	rb_insert_color(&he->rb_node_in, root);
994 	return true;
995 }
996 
997 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
998 {
999 	struct rb_root *root;
1000 
1001 	pthread_mutex_lock(&hists->lock);
1002 
1003 	root = hists->entries_in;
1004 	if (++hists->entries_in > &hists->entries_in_array[1])
1005 		hists->entries_in = &hists->entries_in_array[0];
1006 
1007 	pthread_mutex_unlock(&hists->lock);
1008 
1009 	return root;
1010 }
1011 
1012 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1013 {
1014 	hists__filter_entry_by_dso(hists, he);
1015 	hists__filter_entry_by_thread(hists, he);
1016 	hists__filter_entry_by_symbol(hists, he);
1017 }
1018 
1019 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1020 {
1021 	struct rb_root *root;
1022 	struct rb_node *next;
1023 	struct hist_entry *n;
1024 
1025 	if (!sort__need_collapse)
1026 		return;
1027 
1028 	hists->nr_entries = 0;
1029 
1030 	root = hists__get_rotate_entries_in(hists);
1031 
1032 	next = rb_first(root);
1033 
1034 	while (next) {
1035 		if (session_done())
1036 			break;
1037 		n = rb_entry(next, struct hist_entry, rb_node_in);
1038 		next = rb_next(&n->rb_node_in);
1039 
1040 		rb_erase(&n->rb_node_in, root);
1041 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1042 			/*
1043 			 * If it wasn't combined with one of the entries already
1044 			 * collapsed, we need to apply the filters that may have
1045 			 * been set by, say, the hist_browser.
1046 			 */
1047 			hists__apply_filters(hists, n);
1048 		}
1049 		if (prog)
1050 			ui_progress__update(prog, 1);
1051 	}
1052 }
1053 
1054 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1055 {
1056 	struct perf_hpp_fmt *fmt;
1057 	int64_t cmp = 0;
1058 
1059 	perf_hpp__for_each_sort_list(fmt) {
1060 		if (perf_hpp__should_skip(fmt))
1061 			continue;
1062 
1063 		cmp = fmt->sort(fmt, a, b);
1064 		if (cmp)
1065 			break;
1066 	}
1067 
1068 	return cmp;
1069 }
1070 
1071 static void hists__reset_filter_stats(struct hists *hists)
1072 {
1073 	hists->nr_non_filtered_entries = 0;
1074 	hists->stats.total_non_filtered_period = 0;
1075 }
1076 
1077 void hists__reset_stats(struct hists *hists)
1078 {
1079 	hists->nr_entries = 0;
1080 	hists->stats.total_period = 0;
1081 
1082 	hists__reset_filter_stats(hists);
1083 }
1084 
1085 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1086 {
1087 	hists->nr_non_filtered_entries++;
1088 	hists->stats.total_non_filtered_period += h->stat.period;
1089 }
1090 
1091 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1092 {
1093 	if (!h->filtered)
1094 		hists__inc_filter_stats(hists, h);
1095 
1096 	hists->nr_entries++;
1097 	hists->stats.total_period += h->stat.period;
1098 }
1099 
1100 static void __hists__insert_output_entry(struct rb_root *entries,
1101 					 struct hist_entry *he,
1102 					 u64 min_callchain_hits)
1103 {
1104 	struct rb_node **p = &entries->rb_node;
1105 	struct rb_node *parent = NULL;
1106 	struct hist_entry *iter;
1107 
1108 	if (symbol_conf.use_callchain)
1109 		callchain_param.sort(&he->sorted_chain, he->callchain,
1110 				      min_callchain_hits, &callchain_param);
1111 
1112 	while (*p != NULL) {
1113 		parent = *p;
1114 		iter = rb_entry(parent, struct hist_entry, rb_node);
1115 
1116 		if (hist_entry__sort(he, iter) > 0)
1117 			p = &(*p)->rb_left;
1118 		else
1119 			p = &(*p)->rb_right;
1120 	}
1121 
1122 	rb_link_node(&he->rb_node, parent, p);
1123 	rb_insert_color(&he->rb_node, entries);
1124 }
1125 
1126 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1127 {
1128 	struct rb_root *root;
1129 	struct rb_node *next;
1130 	struct hist_entry *n;
1131 	u64 min_callchain_hits;
1132 
1133 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1134 
1135 	if (sort__need_collapse)
1136 		root = &hists->entries_collapsed;
1137 	else
1138 		root = hists->entries_in;
1139 
1140 	next = rb_first(root);
1141 	hists->entries = RB_ROOT;
1142 
1143 	hists__reset_stats(hists);
1144 	hists__reset_col_len(hists);
1145 
1146 	while (next) {
1147 		n = rb_entry(next, struct hist_entry, rb_node_in);
1148 		next = rb_next(&n->rb_node_in);
1149 
1150 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1151 		hists__inc_stats(hists, n);
1152 
1153 		if (!n->filtered)
1154 			hists__calc_col_len(hists, n);
1155 
1156 		if (prog)
1157 			ui_progress__update(prog, 1);
1158 	}
1159 }
1160 
1161 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1162 				       enum hist_filter filter)
1163 {
1164 	h->filtered &= ~(1 << filter);
1165 	if (h->filtered)
1166 		return;
1167 
1168 	/* force fold unfiltered entry for simplicity */
1169 	h->unfolded = false;
1170 	h->row_offset = 0;
1171 	h->nr_rows = 0;
1172 
1173 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1174 
1175 	hists__inc_filter_stats(hists, h);
1176 	hists__calc_col_len(hists, h);
1177 }
1178 
1179 
1180 static bool hists__filter_entry_by_dso(struct hists *hists,
1181 				       struct hist_entry *he)
1182 {
1183 	if (hists->dso_filter != NULL &&
1184 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1185 		he->filtered |= (1 << HIST_FILTER__DSO);
1186 		return true;
1187 	}
1188 
1189 	return false;
1190 }
1191 
1192 void hists__filter_by_dso(struct hists *hists)
1193 {
1194 	struct rb_node *nd;
1195 
1196 	hists->stats.nr_non_filtered_samples = 0;
1197 
1198 	hists__reset_filter_stats(hists);
1199 	hists__reset_col_len(hists);
1200 
1201 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1202 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1203 
1204 		if (symbol_conf.exclude_other && !h->parent)
1205 			continue;
1206 
1207 		if (hists__filter_entry_by_dso(hists, h))
1208 			continue;
1209 
1210 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1211 	}
1212 }
1213 
1214 static bool hists__filter_entry_by_thread(struct hists *hists,
1215 					  struct hist_entry *he)
1216 {
1217 	if (hists->thread_filter != NULL &&
1218 	    he->thread != hists->thread_filter) {
1219 		he->filtered |= (1 << HIST_FILTER__THREAD);
1220 		return true;
1221 	}
1222 
1223 	return false;
1224 }
1225 
1226 void hists__filter_by_thread(struct hists *hists)
1227 {
1228 	struct rb_node *nd;
1229 
1230 	hists->stats.nr_non_filtered_samples = 0;
1231 
1232 	hists__reset_filter_stats(hists);
1233 	hists__reset_col_len(hists);
1234 
1235 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1236 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1237 
1238 		if (hists__filter_entry_by_thread(hists, h))
1239 			continue;
1240 
1241 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1242 	}
1243 }
1244 
1245 static bool hists__filter_entry_by_symbol(struct hists *hists,
1246 					  struct hist_entry *he)
1247 {
1248 	if (hists->symbol_filter_str != NULL &&
1249 	    (!he->ms.sym || strstr(he->ms.sym->name,
1250 				   hists->symbol_filter_str) == NULL)) {
1251 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
1252 		return true;
1253 	}
1254 
1255 	return false;
1256 }
1257 
1258 void hists__filter_by_symbol(struct hists *hists)
1259 {
1260 	struct rb_node *nd;
1261 
1262 	hists->stats.nr_non_filtered_samples = 0;
1263 
1264 	hists__reset_filter_stats(hists);
1265 	hists__reset_col_len(hists);
1266 
1267 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1268 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1269 
1270 		if (hists__filter_entry_by_symbol(hists, h))
1271 			continue;
1272 
1273 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1274 	}
1275 }
1276 
1277 void events_stats__inc(struct events_stats *stats, u32 type)
1278 {
1279 	++stats->nr_events[0];
1280 	++stats->nr_events[type];
1281 }
1282 
1283 void hists__inc_nr_events(struct hists *hists, u32 type)
1284 {
1285 	events_stats__inc(&hists->stats, type);
1286 }
1287 
1288 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1289 {
1290 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1291 	if (!filtered)
1292 		hists->stats.nr_non_filtered_samples++;
1293 }
1294 
1295 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1296 						 struct hist_entry *pair)
1297 {
1298 	struct rb_root *root;
1299 	struct rb_node **p;
1300 	struct rb_node *parent = NULL;
1301 	struct hist_entry *he;
1302 	int64_t cmp;
1303 
1304 	if (sort__need_collapse)
1305 		root = &hists->entries_collapsed;
1306 	else
1307 		root = hists->entries_in;
1308 
1309 	p = &root->rb_node;
1310 
1311 	while (*p != NULL) {
1312 		parent = *p;
1313 		he = rb_entry(parent, struct hist_entry, rb_node_in);
1314 
1315 		cmp = hist_entry__collapse(he, pair);
1316 
1317 		if (!cmp)
1318 			goto out;
1319 
1320 		if (cmp < 0)
1321 			p = &(*p)->rb_left;
1322 		else
1323 			p = &(*p)->rb_right;
1324 	}
1325 
1326 	he = hist_entry__new(pair, true);
1327 	if (he) {
1328 		memset(&he->stat, 0, sizeof(he->stat));
1329 		he->hists = hists;
1330 		rb_link_node(&he->rb_node_in, parent, p);
1331 		rb_insert_color(&he->rb_node_in, root);
1332 		hists__inc_stats(hists, he);
1333 		he->dummy = true;
1334 	}
1335 out:
1336 	return he;
1337 }
1338 
1339 static struct hist_entry *hists__find_entry(struct hists *hists,
1340 					    struct hist_entry *he)
1341 {
1342 	struct rb_node *n;
1343 
1344 	if (sort__need_collapse)
1345 		n = hists->entries_collapsed.rb_node;
1346 	else
1347 		n = hists->entries_in->rb_node;
1348 
1349 	while (n) {
1350 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1351 		int64_t cmp = hist_entry__collapse(iter, he);
1352 
1353 		if (cmp < 0)
1354 			n = n->rb_left;
1355 		else if (cmp > 0)
1356 			n = n->rb_right;
1357 		else
1358 			return iter;
1359 	}
1360 
1361 	return NULL;
1362 }
1363 
1364 /*
1365  * Look for pairs to link to the leader buckets (hist_entries):
1366  */
1367 void hists__match(struct hists *leader, struct hists *other)
1368 {
1369 	struct rb_root *root;
1370 	struct rb_node *nd;
1371 	struct hist_entry *pos, *pair;
1372 
1373 	if (sort__need_collapse)
1374 		root = &leader->entries_collapsed;
1375 	else
1376 		root = leader->entries_in;
1377 
1378 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1379 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1380 		pair = hists__find_entry(other, pos);
1381 
1382 		if (pair)
1383 			hist_entry__add_pair(pair, pos);
1384 	}
1385 }
1386 
1387 /*
1388  * Look for entries in the other hists that are not present in the leader, if
1389  * we find them, just add a dummy entry on the leader hists, with period=0,
1390  * nr_events=0, to serve as the list header.
1391  */
1392 int hists__link(struct hists *leader, struct hists *other)
1393 {
1394 	struct rb_root *root;
1395 	struct rb_node *nd;
1396 	struct hist_entry *pos, *pair;
1397 
1398 	if (sort__need_collapse)
1399 		root = &other->entries_collapsed;
1400 	else
1401 		root = other->entries_in;
1402 
1403 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1404 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1405 
1406 		if (!hist_entry__has_pairs(pos)) {
1407 			pair = hists__add_dummy_entry(leader, pos);
1408 			if (pair == NULL)
1409 				return -1;
1410 			hist_entry__add_pair(pos, pair);
1411 		}
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 
1418 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1419 {
1420 	struct perf_evsel *pos;
1421 	size_t ret = 0;
1422 
1423 	evlist__for_each(evlist, pos) {
1424 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1425 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1426 	}
1427 
1428 	return ret;
1429 }
1430 
1431 
1432 u64 hists__total_period(struct hists *hists)
1433 {
1434 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1435 		hists->stats.total_period;
1436 }
1437 
1438 int parse_filter_percentage(const struct option *opt __maybe_unused,
1439 			    const char *arg, int unset __maybe_unused)
1440 {
1441 	if (!strcmp(arg, "relative"))
1442 		symbol_conf.filter_relative = true;
1443 	else if (!strcmp(arg, "absolute"))
1444 		symbol_conf.filter_relative = false;
1445 	else
1446 		return -1;
1447 
1448 	return 0;
1449 }
1450 
1451 int perf_hist_config(const char *var, const char *value)
1452 {
1453 	if (!strcmp(var, "hist.percentage"))
1454 		return parse_filter_percentage(NULL, value, 0);
1455 
1456 	return 0;
1457 }
1458 
1459 static int hists_evsel__init(struct perf_evsel *evsel)
1460 {
1461 	struct hists *hists = evsel__hists(evsel);
1462 
1463 	memset(hists, 0, sizeof(*hists));
1464 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1465 	hists->entries_in = &hists->entries_in_array[0];
1466 	hists->entries_collapsed = RB_ROOT;
1467 	hists->entries = RB_ROOT;
1468 	pthread_mutex_init(&hists->lock, NULL);
1469 	return 0;
1470 }
1471 
1472 /*
1473  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1474  * stored in the rbtree...
1475  */
1476 
1477 int hists__init(void)
1478 {
1479 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1480 					    hists_evsel__init, NULL);
1481 	if (err)
1482 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1483 
1484 	return err;
1485 }
1486