xref: /openbmc/linux/tools/perf/util/hist.c (revision 861e10be)
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9 
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 				       struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 					  struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 					  struct hist_entry *he);
16 
17 enum hist_filter {
18 	HIST_FILTER__DSO,
19 	HIST_FILTER__THREAD,
20 	HIST_FILTER__PARENT,
21 	HIST_FILTER__SYMBOL,
22 };
23 
24 struct callchain_param	callchain_param = {
25 	.mode	= CHAIN_GRAPH_REL,
26 	.min_percent = 0.5,
27 	.order  = ORDER_CALLEE
28 };
29 
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 {
32 	return hists->col_len[col];
33 }
34 
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 {
37 	hists->col_len[col] = len;
38 }
39 
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 {
42 	if (len > hists__col_len(hists, col)) {
43 		hists__set_col_len(hists, col, len);
44 		return true;
45 	}
46 	return false;
47 }
48 
49 void hists__reset_col_len(struct hists *hists)
50 {
51 	enum hist_column col;
52 
53 	for (col = 0; col < HISTC_NR_COLS; ++col)
54 		hists__set_col_len(hists, col, 0);
55 }
56 
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 {
59 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60 
61 	if (hists__col_len(hists, dso) < unresolved_col_width &&
62 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 	    !symbol_conf.dso_list)
64 		hists__set_col_len(hists, dso, unresolved_col_width);
65 }
66 
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 {
69 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70 	u16 len;
71 
72 	if (h->ms.sym)
73 		hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
74 	else
75 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 
77 	len = thread__comm_len(h->thread);
78 	if (hists__new_col_len(hists, HISTC_COMM, len))
79 		hists__set_col_len(hists, HISTC_THREAD, len + 6);
80 
81 	if (h->ms.map) {
82 		len = dso__name_len(h->ms.map->dso);
83 		hists__new_col_len(hists, HISTC_DSO, len);
84 	}
85 
86 	if (h->parent)
87 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
88 
89 	if (h->branch_info) {
90 		int symlen;
91 		/*
92 		 * +4 accounts for '[x] ' priv level info
93 		 * +2 account of 0x prefix on raw addresses
94 		 */
95 		if (h->branch_info->from.sym) {
96 			symlen = (int)h->branch_info->from.sym->namelen + 4;
97 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98 
99 			symlen = dso__name_len(h->branch_info->from.map->dso);
100 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 		} else {
102 			symlen = unresolved_col_width + 4 + 2;
103 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 		}
106 
107 		if (h->branch_info->to.sym) {
108 			symlen = (int)h->branch_info->to.sym->namelen + 4;
109 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110 
111 			symlen = dso__name_len(h->branch_info->to.map->dso);
112 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 		} else {
114 			symlen = unresolved_col_width + 4 + 2;
115 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 		}
118 	}
119 }
120 
121 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
122 {
123 	struct rb_node *next = rb_first(&hists->entries);
124 	struct hist_entry *n;
125 	int row = 0;
126 
127 	hists__reset_col_len(hists);
128 
129 	while (next && row++ < max_rows) {
130 		n = rb_entry(next, struct hist_entry, rb_node);
131 		if (!n->filtered)
132 			hists__calc_col_len(hists, n);
133 		next = rb_next(&n->rb_node);
134 	}
135 }
136 
137 static void hist_entry__add_cpumode_period(struct hist_entry *he,
138 					   unsigned int cpumode, u64 period)
139 {
140 	switch (cpumode) {
141 	case PERF_RECORD_MISC_KERNEL:
142 		he->stat.period_sys += period;
143 		break;
144 	case PERF_RECORD_MISC_USER:
145 		he->stat.period_us += period;
146 		break;
147 	case PERF_RECORD_MISC_GUEST_KERNEL:
148 		he->stat.period_guest_sys += period;
149 		break;
150 	case PERF_RECORD_MISC_GUEST_USER:
151 		he->stat.period_guest_us += period;
152 		break;
153 	default:
154 		break;
155 	}
156 }
157 
158 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
159 {
160 	he_stat->period		+= period;
161 	he_stat->nr_events	+= 1;
162 }
163 
164 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
165 {
166 	dest->period		+= src->period;
167 	dest->period_sys	+= src->period_sys;
168 	dest->period_us		+= src->period_us;
169 	dest->period_guest_sys	+= src->period_guest_sys;
170 	dest->period_guest_us	+= src->period_guest_us;
171 	dest->nr_events		+= src->nr_events;
172 }
173 
174 static void hist_entry__decay(struct hist_entry *he)
175 {
176 	he->stat.period = (he->stat.period * 7) / 8;
177 	he->stat.nr_events = (he->stat.nr_events * 7) / 8;
178 }
179 
180 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
181 {
182 	u64 prev_period = he->stat.period;
183 
184 	if (prev_period == 0)
185 		return true;
186 
187 	hist_entry__decay(he);
188 
189 	if (!he->filtered)
190 		hists->stats.total_period -= prev_period - he->stat.period;
191 
192 	return he->stat.period == 0;
193 }
194 
195 static void __hists__decay_entries(struct hists *hists, bool zap_user,
196 				   bool zap_kernel, bool threaded)
197 {
198 	struct rb_node *next = rb_first(&hists->entries);
199 	struct hist_entry *n;
200 
201 	while (next) {
202 		n = rb_entry(next, struct hist_entry, rb_node);
203 		next = rb_next(&n->rb_node);
204 		/*
205 		 * We may be annotating this, for instance, so keep it here in
206 		 * case some it gets new samples, we'll eventually free it when
207 		 * the user stops browsing and it agains gets fully decayed.
208 		 */
209 		if (((zap_user && n->level == '.') ||
210 		     (zap_kernel && n->level != '.') ||
211 		     hists__decay_entry(hists, n)) &&
212 		    !n->used) {
213 			rb_erase(&n->rb_node, &hists->entries);
214 
215 			if (sort__need_collapse || threaded)
216 				rb_erase(&n->rb_node_in, &hists->entries_collapsed);
217 
218 			hist_entry__free(n);
219 			--hists->nr_entries;
220 		}
221 	}
222 }
223 
224 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
225 {
226 	return __hists__decay_entries(hists, zap_user, zap_kernel, false);
227 }
228 
229 void hists__decay_entries_threaded(struct hists *hists,
230 				   bool zap_user, bool zap_kernel)
231 {
232 	return __hists__decay_entries(hists, zap_user, zap_kernel, true);
233 }
234 
235 /*
236  * histogram, sorted on item, collects periods
237  */
238 
239 static struct hist_entry *hist_entry__new(struct hist_entry *template)
240 {
241 	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
242 	struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
243 
244 	if (he != NULL) {
245 		*he = *template;
246 
247 		if (he->ms.map)
248 			he->ms.map->referenced = true;
249 
250 		if (he->branch_info) {
251 			if (he->branch_info->from.map)
252 				he->branch_info->from.map->referenced = true;
253 			if (he->branch_info->to.map)
254 				he->branch_info->to.map->referenced = true;
255 		}
256 
257 		if (symbol_conf.use_callchain)
258 			callchain_init(he->callchain);
259 
260 		INIT_LIST_HEAD(&he->pairs.node);
261 	}
262 
263 	return he;
264 }
265 
266 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
267 {
268 	if (!h->filtered) {
269 		hists__calc_col_len(hists, h);
270 		++hists->nr_entries;
271 		hists->stats.total_period += h->stat.period;
272 	}
273 }
274 
275 static u8 symbol__parent_filter(const struct symbol *parent)
276 {
277 	if (symbol_conf.exclude_other && parent == NULL)
278 		return 1 << HIST_FILTER__PARENT;
279 	return 0;
280 }
281 
282 static struct hist_entry *add_hist_entry(struct hists *hists,
283 				      struct hist_entry *entry,
284 				      struct addr_location *al,
285 				      u64 period)
286 {
287 	struct rb_node **p;
288 	struct rb_node *parent = NULL;
289 	struct hist_entry *he;
290 	int cmp;
291 
292 	pthread_mutex_lock(&hists->lock);
293 
294 	p = &hists->entries_in->rb_node;
295 
296 	while (*p != NULL) {
297 		parent = *p;
298 		he = rb_entry(parent, struct hist_entry, rb_node_in);
299 
300 		/*
301 		 * Make sure that it receives arguments in a same order as
302 		 * hist_entry__collapse() so that we can use an appropriate
303 		 * function when searching an entry regardless which sort
304 		 * keys were used.
305 		 */
306 		cmp = hist_entry__cmp(he, entry);
307 
308 		if (!cmp) {
309 			he_stat__add_period(&he->stat, period);
310 
311 			/* If the map of an existing hist_entry has
312 			 * become out-of-date due to an exec() or
313 			 * similar, update it.  Otherwise we will
314 			 * mis-adjust symbol addresses when computing
315 			 * the history counter to increment.
316 			 */
317 			if (he->ms.map != entry->ms.map) {
318 				he->ms.map = entry->ms.map;
319 				if (he->ms.map)
320 					he->ms.map->referenced = true;
321 			}
322 			goto out;
323 		}
324 
325 		if (cmp < 0)
326 			p = &(*p)->rb_left;
327 		else
328 			p = &(*p)->rb_right;
329 	}
330 
331 	he = hist_entry__new(entry);
332 	if (!he)
333 		goto out_unlock;
334 
335 	rb_link_node(&he->rb_node_in, parent, p);
336 	rb_insert_color(&he->rb_node_in, hists->entries_in);
337 out:
338 	hist_entry__add_cpumode_period(he, al->cpumode, period);
339 out_unlock:
340 	pthread_mutex_unlock(&hists->lock);
341 	return he;
342 }
343 
344 struct hist_entry *__hists__add_branch_entry(struct hists *self,
345 					     struct addr_location *al,
346 					     struct symbol *sym_parent,
347 					     struct branch_info *bi,
348 					     u64 period)
349 {
350 	struct hist_entry entry = {
351 		.thread	= al->thread,
352 		.ms = {
353 			.map	= bi->to.map,
354 			.sym	= bi->to.sym,
355 		},
356 		.cpu	= al->cpu,
357 		.ip	= bi->to.addr,
358 		.level	= al->level,
359 		.stat = {
360 			.period	= period,
361 			.nr_events = 1,
362 		},
363 		.parent = sym_parent,
364 		.filtered = symbol__parent_filter(sym_parent),
365 		.branch_info = bi,
366 		.hists	= self,
367 	};
368 
369 	return add_hist_entry(self, &entry, al, period);
370 }
371 
372 struct hist_entry *__hists__add_entry(struct hists *self,
373 				      struct addr_location *al,
374 				      struct symbol *sym_parent, u64 period)
375 {
376 	struct hist_entry entry = {
377 		.thread	= al->thread,
378 		.ms = {
379 			.map	= al->map,
380 			.sym	= al->sym,
381 		},
382 		.cpu	= al->cpu,
383 		.ip	= al->addr,
384 		.level	= al->level,
385 		.stat = {
386 			.period	= period,
387 			.nr_events = 1,
388 		},
389 		.parent = sym_parent,
390 		.filtered = symbol__parent_filter(sym_parent),
391 		.hists	= self,
392 	};
393 
394 	return add_hist_entry(self, &entry, al, period);
395 }
396 
397 int64_t
398 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
399 {
400 	struct sort_entry *se;
401 	int64_t cmp = 0;
402 
403 	list_for_each_entry(se, &hist_entry__sort_list, list) {
404 		cmp = se->se_cmp(left, right);
405 		if (cmp)
406 			break;
407 	}
408 
409 	return cmp;
410 }
411 
412 int64_t
413 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
414 {
415 	struct sort_entry *se;
416 	int64_t cmp = 0;
417 
418 	list_for_each_entry(se, &hist_entry__sort_list, list) {
419 		int64_t (*f)(struct hist_entry *, struct hist_entry *);
420 
421 		f = se->se_collapse ?: se->se_cmp;
422 
423 		cmp = f(left, right);
424 		if (cmp)
425 			break;
426 	}
427 
428 	return cmp;
429 }
430 
431 void hist_entry__free(struct hist_entry *he)
432 {
433 	free(he->branch_info);
434 	free(he);
435 }
436 
437 /*
438  * collapse the histogram
439  */
440 
441 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
442 					 struct rb_root *root,
443 					 struct hist_entry *he)
444 {
445 	struct rb_node **p = &root->rb_node;
446 	struct rb_node *parent = NULL;
447 	struct hist_entry *iter;
448 	int64_t cmp;
449 
450 	while (*p != NULL) {
451 		parent = *p;
452 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
453 
454 		cmp = hist_entry__collapse(iter, he);
455 
456 		if (!cmp) {
457 			he_stat__add_stat(&iter->stat, &he->stat);
458 
459 			if (symbol_conf.use_callchain) {
460 				callchain_cursor_reset(&callchain_cursor);
461 				callchain_merge(&callchain_cursor,
462 						iter->callchain,
463 						he->callchain);
464 			}
465 			hist_entry__free(he);
466 			return false;
467 		}
468 
469 		if (cmp < 0)
470 			p = &(*p)->rb_left;
471 		else
472 			p = &(*p)->rb_right;
473 	}
474 
475 	rb_link_node(&he->rb_node_in, parent, p);
476 	rb_insert_color(&he->rb_node_in, root);
477 	return true;
478 }
479 
480 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
481 {
482 	struct rb_root *root;
483 
484 	pthread_mutex_lock(&hists->lock);
485 
486 	root = hists->entries_in;
487 	if (++hists->entries_in > &hists->entries_in_array[1])
488 		hists->entries_in = &hists->entries_in_array[0];
489 
490 	pthread_mutex_unlock(&hists->lock);
491 
492 	return root;
493 }
494 
495 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
496 {
497 	hists__filter_entry_by_dso(hists, he);
498 	hists__filter_entry_by_thread(hists, he);
499 	hists__filter_entry_by_symbol(hists, he);
500 }
501 
502 static void __hists__collapse_resort(struct hists *hists, bool threaded)
503 {
504 	struct rb_root *root;
505 	struct rb_node *next;
506 	struct hist_entry *n;
507 
508 	if (!sort__need_collapse && !threaded)
509 		return;
510 
511 	root = hists__get_rotate_entries_in(hists);
512 	next = rb_first(root);
513 
514 	while (next) {
515 		n = rb_entry(next, struct hist_entry, rb_node_in);
516 		next = rb_next(&n->rb_node_in);
517 
518 		rb_erase(&n->rb_node_in, root);
519 		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
520 			/*
521 			 * If it wasn't combined with one of the entries already
522 			 * collapsed, we need to apply the filters that may have
523 			 * been set by, say, the hist_browser.
524 			 */
525 			hists__apply_filters(hists, n);
526 		}
527 	}
528 }
529 
530 void hists__collapse_resort(struct hists *hists)
531 {
532 	return __hists__collapse_resort(hists, false);
533 }
534 
535 void hists__collapse_resort_threaded(struct hists *hists)
536 {
537 	return __hists__collapse_resort(hists, true);
538 }
539 
540 /*
541  * reverse the map, sort on period.
542  */
543 
544 static int period_cmp(u64 period_a, u64 period_b)
545 {
546 	if (period_a > period_b)
547 		return 1;
548 	if (period_a < period_b)
549 		return -1;
550 	return 0;
551 }
552 
553 static int hist_entry__sort_on_period(struct hist_entry *a,
554 				      struct hist_entry *b)
555 {
556 	int ret;
557 	int i, nr_members;
558 	struct perf_evsel *evsel;
559 	struct hist_entry *pair;
560 	u64 *periods_a, *periods_b;
561 
562 	ret = period_cmp(a->stat.period, b->stat.period);
563 	if (ret || !symbol_conf.event_group)
564 		return ret;
565 
566 	evsel = hists_to_evsel(a->hists);
567 	nr_members = evsel->nr_members;
568 	if (nr_members <= 1)
569 		return ret;
570 
571 	periods_a = zalloc(sizeof(periods_a) * nr_members);
572 	periods_b = zalloc(sizeof(periods_b) * nr_members);
573 
574 	if (!periods_a || !periods_b)
575 		goto out;
576 
577 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
578 		evsel = hists_to_evsel(pair->hists);
579 		periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
580 	}
581 
582 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
583 		evsel = hists_to_evsel(pair->hists);
584 		periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
585 	}
586 
587 	for (i = 1; i < nr_members; i++) {
588 		ret = period_cmp(periods_a[i], periods_b[i]);
589 		if (ret)
590 			break;
591 	}
592 
593 out:
594 	free(periods_a);
595 	free(periods_b);
596 
597 	return ret;
598 }
599 
600 static void __hists__insert_output_entry(struct rb_root *entries,
601 					 struct hist_entry *he,
602 					 u64 min_callchain_hits)
603 {
604 	struct rb_node **p = &entries->rb_node;
605 	struct rb_node *parent = NULL;
606 	struct hist_entry *iter;
607 
608 	if (symbol_conf.use_callchain)
609 		callchain_param.sort(&he->sorted_chain, he->callchain,
610 				      min_callchain_hits, &callchain_param);
611 
612 	while (*p != NULL) {
613 		parent = *p;
614 		iter = rb_entry(parent, struct hist_entry, rb_node);
615 
616 		if (hist_entry__sort_on_period(he, iter) > 0)
617 			p = &(*p)->rb_left;
618 		else
619 			p = &(*p)->rb_right;
620 	}
621 
622 	rb_link_node(&he->rb_node, parent, p);
623 	rb_insert_color(&he->rb_node, entries);
624 }
625 
626 static void __hists__output_resort(struct hists *hists, bool threaded)
627 {
628 	struct rb_root *root;
629 	struct rb_node *next;
630 	struct hist_entry *n;
631 	u64 min_callchain_hits;
632 
633 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
634 
635 	if (sort__need_collapse || threaded)
636 		root = &hists->entries_collapsed;
637 	else
638 		root = hists->entries_in;
639 
640 	next = rb_first(root);
641 	hists->entries = RB_ROOT;
642 
643 	hists->nr_entries = 0;
644 	hists->stats.total_period = 0;
645 	hists__reset_col_len(hists);
646 
647 	while (next) {
648 		n = rb_entry(next, struct hist_entry, rb_node_in);
649 		next = rb_next(&n->rb_node_in);
650 
651 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
652 		hists__inc_nr_entries(hists, n);
653 	}
654 }
655 
656 void hists__output_resort(struct hists *hists)
657 {
658 	return __hists__output_resort(hists, false);
659 }
660 
661 void hists__output_resort_threaded(struct hists *hists)
662 {
663 	return __hists__output_resort(hists, true);
664 }
665 
666 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
667 				       enum hist_filter filter)
668 {
669 	h->filtered &= ~(1 << filter);
670 	if (h->filtered)
671 		return;
672 
673 	++hists->nr_entries;
674 	if (h->ms.unfolded)
675 		hists->nr_entries += h->nr_rows;
676 	h->row_offset = 0;
677 	hists->stats.total_period += h->stat.period;
678 	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
679 
680 	hists__calc_col_len(hists, h);
681 }
682 
683 
684 static bool hists__filter_entry_by_dso(struct hists *hists,
685 				       struct hist_entry *he)
686 {
687 	if (hists->dso_filter != NULL &&
688 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
689 		he->filtered |= (1 << HIST_FILTER__DSO);
690 		return true;
691 	}
692 
693 	return false;
694 }
695 
696 void hists__filter_by_dso(struct hists *hists)
697 {
698 	struct rb_node *nd;
699 
700 	hists->nr_entries = hists->stats.total_period = 0;
701 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
702 	hists__reset_col_len(hists);
703 
704 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
705 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
706 
707 		if (symbol_conf.exclude_other && !h->parent)
708 			continue;
709 
710 		if (hists__filter_entry_by_dso(hists, h))
711 			continue;
712 
713 		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
714 	}
715 }
716 
717 static bool hists__filter_entry_by_thread(struct hists *hists,
718 					  struct hist_entry *he)
719 {
720 	if (hists->thread_filter != NULL &&
721 	    he->thread != hists->thread_filter) {
722 		he->filtered |= (1 << HIST_FILTER__THREAD);
723 		return true;
724 	}
725 
726 	return false;
727 }
728 
729 void hists__filter_by_thread(struct hists *hists)
730 {
731 	struct rb_node *nd;
732 
733 	hists->nr_entries = hists->stats.total_period = 0;
734 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
735 	hists__reset_col_len(hists);
736 
737 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
738 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
739 
740 		if (hists__filter_entry_by_thread(hists, h))
741 			continue;
742 
743 		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
744 	}
745 }
746 
747 static bool hists__filter_entry_by_symbol(struct hists *hists,
748 					  struct hist_entry *he)
749 {
750 	if (hists->symbol_filter_str != NULL &&
751 	    (!he->ms.sym || strstr(he->ms.sym->name,
752 				   hists->symbol_filter_str) == NULL)) {
753 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
754 		return true;
755 	}
756 
757 	return false;
758 }
759 
760 void hists__filter_by_symbol(struct hists *hists)
761 {
762 	struct rb_node *nd;
763 
764 	hists->nr_entries = hists->stats.total_period = 0;
765 	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
766 	hists__reset_col_len(hists);
767 
768 	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
769 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
770 
771 		if (hists__filter_entry_by_symbol(hists, h))
772 			continue;
773 
774 		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
775 	}
776 }
777 
778 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
779 {
780 	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
781 }
782 
783 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
784 {
785 	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
786 }
787 
788 void events_stats__inc(struct events_stats *stats, u32 type)
789 {
790 	++stats->nr_events[0];
791 	++stats->nr_events[type];
792 }
793 
794 void hists__inc_nr_events(struct hists *hists, u32 type)
795 {
796 	events_stats__inc(&hists->stats, type);
797 }
798 
799 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
800 						 struct hist_entry *pair)
801 {
802 	struct rb_root *root;
803 	struct rb_node **p;
804 	struct rb_node *parent = NULL;
805 	struct hist_entry *he;
806 	int cmp;
807 
808 	if (sort__need_collapse)
809 		root = &hists->entries_collapsed;
810 	else
811 		root = hists->entries_in;
812 
813 	p = &root->rb_node;
814 
815 	while (*p != NULL) {
816 		parent = *p;
817 		he = rb_entry(parent, struct hist_entry, rb_node_in);
818 
819 		cmp = hist_entry__collapse(he, pair);
820 
821 		if (!cmp)
822 			goto out;
823 
824 		if (cmp < 0)
825 			p = &(*p)->rb_left;
826 		else
827 			p = &(*p)->rb_right;
828 	}
829 
830 	he = hist_entry__new(pair);
831 	if (he) {
832 		memset(&he->stat, 0, sizeof(he->stat));
833 		he->hists = hists;
834 		rb_link_node(&he->rb_node_in, parent, p);
835 		rb_insert_color(&he->rb_node_in, root);
836 		hists__inc_nr_entries(hists, he);
837 	}
838 out:
839 	return he;
840 }
841 
842 static struct hist_entry *hists__find_entry(struct hists *hists,
843 					    struct hist_entry *he)
844 {
845 	struct rb_node *n;
846 
847 	if (sort__need_collapse)
848 		n = hists->entries_collapsed.rb_node;
849 	else
850 		n = hists->entries_in->rb_node;
851 
852 	while (n) {
853 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
854 		int64_t cmp = hist_entry__collapse(iter, he);
855 
856 		if (cmp < 0)
857 			n = n->rb_left;
858 		else if (cmp > 0)
859 			n = n->rb_right;
860 		else
861 			return iter;
862 	}
863 
864 	return NULL;
865 }
866 
867 /*
868  * Look for pairs to link to the leader buckets (hist_entries):
869  */
870 void hists__match(struct hists *leader, struct hists *other)
871 {
872 	struct rb_root *root;
873 	struct rb_node *nd;
874 	struct hist_entry *pos, *pair;
875 
876 	if (sort__need_collapse)
877 		root = &leader->entries_collapsed;
878 	else
879 		root = leader->entries_in;
880 
881 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
882 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
883 		pair = hists__find_entry(other, pos);
884 
885 		if (pair)
886 			hist_entry__add_pair(pair, pos);
887 	}
888 }
889 
890 /*
891  * Look for entries in the other hists that are not present in the leader, if
892  * we find them, just add a dummy entry on the leader hists, with period=0,
893  * nr_events=0, to serve as the list header.
894  */
895 int hists__link(struct hists *leader, struct hists *other)
896 {
897 	struct rb_root *root;
898 	struct rb_node *nd;
899 	struct hist_entry *pos, *pair;
900 
901 	if (sort__need_collapse)
902 		root = &other->entries_collapsed;
903 	else
904 		root = other->entries_in;
905 
906 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
907 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
908 
909 		if (!hist_entry__has_pairs(pos)) {
910 			pair = hists__add_dummy_entry(leader, pos);
911 			if (pair == NULL)
912 				return -1;
913 			hist_entry__add_pair(pos, pair);
914 		}
915 	}
916 
917 	return 0;
918 }
919