xref: /openbmc/linux/tools/perf/util/hist.c (revision bffb5b0c0976aa46aaa961dd19a47c9d6301cfe1)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "session.h"
13 #include "namespaces.h"
14 #include "cgroup.h"
15 #include "sort.h"
16 #include "units.h"
17 #include "evlist.h"
18 #include "evsel.h"
19 #include "annotate.h"
20 #include "srcline.h"
21 #include "symbol.h"
22 #include "thread.h"
23 #include "block-info.h"
24 #include "ui/progress.h"
25 #include <errno.h>
26 #include <math.h>
27 #include <inttypes.h>
28 #include <sys/param.h>
29 #include <linux/rbtree.h>
30 #include <linux/string.h>
31 #include <linux/time64.h>
32 #include <linux/zalloc.h>
33 
34 static bool hists__filter_entry_by_dso(struct hists *hists,
35 				       struct hist_entry *he);
36 static bool hists__filter_entry_by_thread(struct hists *hists,
37 					  struct hist_entry *he);
38 static bool hists__filter_entry_by_symbol(struct hists *hists,
39 					  struct hist_entry *he);
40 static bool hists__filter_entry_by_socket(struct hists *hists,
41 					  struct hist_entry *he);
42 
43 u16 hists__col_len(struct hists *hists, enum hist_column col)
44 {
45 	return hists->col_len[col];
46 }
47 
48 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
49 {
50 	hists->col_len[col] = len;
51 }
52 
53 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
54 {
55 	if (len > hists__col_len(hists, col)) {
56 		hists__set_col_len(hists, col, len);
57 		return true;
58 	}
59 	return false;
60 }
61 
62 void hists__reset_col_len(struct hists *hists)
63 {
64 	enum hist_column col;
65 
66 	for (col = 0; col < HISTC_NR_COLS; ++col)
67 		hists__set_col_len(hists, col, 0);
68 }
69 
70 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
71 {
72 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
73 
74 	if (hists__col_len(hists, dso) < unresolved_col_width &&
75 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
76 	    !symbol_conf.dso_list)
77 		hists__set_col_len(hists, dso, unresolved_col_width);
78 }
79 
80 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
81 {
82 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
83 	int symlen;
84 	u16 len;
85 
86 	if (h->block_info)
87 		return;
88 	/*
89 	 * +4 accounts for '[x] ' priv level info
90 	 * +2 accounts for 0x prefix on raw addresses
91 	 * +3 accounts for ' y ' symtab origin info
92 	 */
93 	if (h->ms.sym) {
94 		symlen = h->ms.sym->namelen + 4;
95 		if (verbose > 0)
96 			symlen += BITS_PER_LONG / 4 + 2 + 3;
97 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
98 	} else {
99 		symlen = unresolved_col_width + 4 + 2;
100 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
101 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
102 	}
103 
104 	len = thread__comm_len(h->thread);
105 	if (hists__new_col_len(hists, HISTC_COMM, len))
106 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
107 
108 	if (h->ms.map) {
109 		len = dso__name_len(map__dso(h->ms.map));
110 		hists__new_col_len(hists, HISTC_DSO, len);
111 	}
112 
113 	if (h->parent)
114 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
115 
116 	if (h->branch_info) {
117 		if (h->branch_info->from.ms.sym) {
118 			symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
119 			if (verbose > 0)
120 				symlen += BITS_PER_LONG / 4 + 2 + 3;
121 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
122 
123 			symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
124 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
125 		} else {
126 			symlen = unresolved_col_width + 4 + 2;
127 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128 			hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
129 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
130 		}
131 
132 		if (h->branch_info->to.ms.sym) {
133 			symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
134 			if (verbose > 0)
135 				symlen += BITS_PER_LONG / 4 + 2 + 3;
136 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
137 
138 			symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
139 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
140 		} else {
141 			symlen = unresolved_col_width + 4 + 2;
142 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143 			hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
144 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
145 		}
146 
147 		if (h->branch_info->srcline_from)
148 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
149 					strlen(h->branch_info->srcline_from));
150 		if (h->branch_info->srcline_to)
151 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
152 					strlen(h->branch_info->srcline_to));
153 	}
154 
155 	if (h->mem_info) {
156 		if (h->mem_info->daddr.ms.sym) {
157 			symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
158 			       + unresolved_col_width + 2;
159 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
160 					   symlen);
161 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
162 					   symlen + 1);
163 		} else {
164 			symlen = unresolved_col_width + 4 + 2;
165 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
166 					   symlen);
167 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
168 					   symlen);
169 		}
170 
171 		if (h->mem_info->iaddr.ms.sym) {
172 			symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
173 			       + unresolved_col_width + 2;
174 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
175 					   symlen);
176 		} else {
177 			symlen = unresolved_col_width + 4 + 2;
178 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
179 					   symlen);
180 		}
181 
182 		if (h->mem_info->daddr.ms.map) {
183 			symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
184 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
185 					   symlen);
186 		} else {
187 			symlen = unresolved_col_width + 4 + 2;
188 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
189 		}
190 
191 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
192 				   unresolved_col_width + 4 + 2);
193 
194 		hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
195 				   unresolved_col_width + 4 + 2);
196 
197 	} else {
198 		symlen = unresolved_col_width + 4 + 2;
199 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
200 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
201 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
202 	}
203 
204 	hists__new_col_len(hists, HISTC_CGROUP, 6);
205 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
206 	hists__new_col_len(hists, HISTC_CPU, 3);
207 	hists__new_col_len(hists, HISTC_SOCKET, 6);
208 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
209 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
210 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
211 	hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
212 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
213 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
214 	hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
215 	hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
216 	hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
217 	hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
218 	hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
219 	hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
220 
221 	if (symbol_conf.nanosecs)
222 		hists__new_col_len(hists, HISTC_TIME, 16);
223 	else
224 		hists__new_col_len(hists, HISTC_TIME, 12);
225 	hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
226 
227 	if (h->srcline) {
228 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
229 		hists__new_col_len(hists, HISTC_SRCLINE, len);
230 	}
231 
232 	if (h->srcfile)
233 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
234 
235 	if (h->transaction)
236 		hists__new_col_len(hists, HISTC_TRANSACTION,
237 				   hist_entry__transaction_len());
238 
239 	if (h->trace_output)
240 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
241 
242 	if (h->cgroup) {
243 		const char *cgrp_name = "unknown";
244 		struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
245 						   h->cgroup);
246 		if (cgrp != NULL)
247 			cgrp_name = cgrp->name;
248 
249 		hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
250 	}
251 }
252 
253 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
254 {
255 	struct rb_node *next = rb_first_cached(&hists->entries);
256 	struct hist_entry *n;
257 	int row = 0;
258 
259 	hists__reset_col_len(hists);
260 
261 	while (next && row++ < max_rows) {
262 		n = rb_entry(next, struct hist_entry, rb_node);
263 		if (!n->filtered)
264 			hists__calc_col_len(hists, n);
265 		next = rb_next(&n->rb_node);
266 	}
267 }
268 
269 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
270 					unsigned int cpumode, u64 period)
271 {
272 	switch (cpumode) {
273 	case PERF_RECORD_MISC_KERNEL:
274 		he_stat->period_sys += period;
275 		break;
276 	case PERF_RECORD_MISC_USER:
277 		he_stat->period_us += period;
278 		break;
279 	case PERF_RECORD_MISC_GUEST_KERNEL:
280 		he_stat->period_guest_sys += period;
281 		break;
282 	case PERF_RECORD_MISC_GUEST_USER:
283 		he_stat->period_guest_us += period;
284 		break;
285 	default:
286 		break;
287 	}
288 }
289 
290 static long hist_time(unsigned long htime)
291 {
292 	unsigned long time_quantum = symbol_conf.time_quantum;
293 	if (time_quantum)
294 		return (htime / time_quantum) * time_quantum;
295 	return htime;
296 }
297 
298 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
299 {
300 	he_stat->period		+= period;
301 	he_stat->nr_events	+= 1;
302 }
303 
304 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
305 {
306 	dest->period		+= src->period;
307 	dest->period_sys	+= src->period_sys;
308 	dest->period_us		+= src->period_us;
309 	dest->period_guest_sys	+= src->period_guest_sys;
310 	dest->period_guest_us	+= src->period_guest_us;
311 	dest->nr_events		+= src->nr_events;
312 }
313 
314 static void he_stat__decay(struct he_stat *he_stat)
315 {
316 	he_stat->period = (he_stat->period * 7) / 8;
317 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
318 	/* XXX need decay for weight too? */
319 }
320 
321 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
322 
323 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
324 {
325 	u64 prev_period = he->stat.period;
326 	u64 diff;
327 
328 	if (prev_period == 0)
329 		return true;
330 
331 	he_stat__decay(&he->stat);
332 	if (symbol_conf.cumulate_callchain)
333 		he_stat__decay(he->stat_acc);
334 	decay_callchain(he->callchain);
335 
336 	diff = prev_period - he->stat.period;
337 
338 	if (!he->depth) {
339 		hists->stats.total_period -= diff;
340 		if (!he->filtered)
341 			hists->stats.total_non_filtered_period -= diff;
342 	}
343 
344 	if (!he->leaf) {
345 		struct hist_entry *child;
346 		struct rb_node *node = rb_first_cached(&he->hroot_out);
347 		while (node) {
348 			child = rb_entry(node, struct hist_entry, rb_node);
349 			node = rb_next(node);
350 
351 			if (hists__decay_entry(hists, child))
352 				hists__delete_entry(hists, child);
353 		}
354 	}
355 
356 	return he->stat.period == 0;
357 }
358 
359 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
360 {
361 	struct rb_root_cached *root_in;
362 	struct rb_root_cached *root_out;
363 
364 	if (he->parent_he) {
365 		root_in  = &he->parent_he->hroot_in;
366 		root_out = &he->parent_he->hroot_out;
367 	} else {
368 		if (hists__has(hists, need_collapse))
369 			root_in = &hists->entries_collapsed;
370 		else
371 			root_in = hists->entries_in;
372 		root_out = &hists->entries;
373 	}
374 
375 	rb_erase_cached(&he->rb_node_in, root_in);
376 	rb_erase_cached(&he->rb_node, root_out);
377 
378 	--hists->nr_entries;
379 	if (!he->filtered)
380 		--hists->nr_non_filtered_entries;
381 
382 	hist_entry__delete(he);
383 }
384 
385 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
386 {
387 	struct rb_node *next = rb_first_cached(&hists->entries);
388 	struct hist_entry *n;
389 
390 	while (next) {
391 		n = rb_entry(next, struct hist_entry, rb_node);
392 		next = rb_next(&n->rb_node);
393 		if (((zap_user && n->level == '.') ||
394 		     (zap_kernel && n->level != '.') ||
395 		     hists__decay_entry(hists, n))) {
396 			hists__delete_entry(hists, n);
397 		}
398 	}
399 }
400 
401 void hists__delete_entries(struct hists *hists)
402 {
403 	struct rb_node *next = rb_first_cached(&hists->entries);
404 	struct hist_entry *n;
405 
406 	while (next) {
407 		n = rb_entry(next, struct hist_entry, rb_node);
408 		next = rb_next(&n->rb_node);
409 
410 		hists__delete_entry(hists, n);
411 	}
412 }
413 
414 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
415 {
416 	struct rb_node *next = rb_first_cached(&hists->entries);
417 	struct hist_entry *n;
418 	int i = 0;
419 
420 	while (next) {
421 		n = rb_entry(next, struct hist_entry, rb_node);
422 		if (i == idx)
423 			return n;
424 
425 		next = rb_next(&n->rb_node);
426 		i++;
427 	}
428 
429 	return NULL;
430 }
431 
432 /*
433  * histogram, sorted on item, collects periods
434  */
435 
436 static int hist_entry__init(struct hist_entry *he,
437 			    struct hist_entry *template,
438 			    bool sample_self,
439 			    size_t callchain_size)
440 {
441 	*he = *template;
442 	he->callchain_size = callchain_size;
443 
444 	if (symbol_conf.cumulate_callchain) {
445 		he->stat_acc = malloc(sizeof(he->stat));
446 		if (he->stat_acc == NULL)
447 			return -ENOMEM;
448 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
449 		if (!sample_self)
450 			memset(&he->stat, 0, sizeof(he->stat));
451 	}
452 
453 	he->ms.maps = maps__get(he->ms.maps);
454 	he->ms.map = map__get(he->ms.map);
455 
456 	if (he->branch_info) {
457 		/*
458 		 * This branch info is (a part of) allocated from
459 		 * sample__resolve_bstack() and will be freed after
460 		 * adding new entries.  So we need to save a copy.
461 		 */
462 		he->branch_info = malloc(sizeof(*he->branch_info));
463 		if (he->branch_info == NULL)
464 			goto err;
465 
466 		memcpy(he->branch_info, template->branch_info,
467 		       sizeof(*he->branch_info));
468 
469 		he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
470 		he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
471 	}
472 
473 	if (he->mem_info) {
474 		he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
475 		he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
476 	}
477 
478 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
479 		callchain_init(he->callchain);
480 
481 	if (he->raw_data) {
482 		he->raw_data = memdup(he->raw_data, he->raw_size);
483 		if (he->raw_data == NULL)
484 			goto err_infos;
485 	}
486 
487 	if (he->srcline) {
488 		he->srcline = strdup(he->srcline);
489 		if (he->srcline == NULL)
490 			goto err_rawdata;
491 	}
492 
493 	if (symbol_conf.res_sample) {
494 		he->res_samples = calloc(sizeof(struct res_sample),
495 					symbol_conf.res_sample);
496 		if (!he->res_samples)
497 			goto err_srcline;
498 	}
499 
500 	INIT_LIST_HEAD(&he->pairs.node);
501 	he->thread = thread__get(he->thread);
502 	he->hroot_in  = RB_ROOT_CACHED;
503 	he->hroot_out = RB_ROOT_CACHED;
504 
505 	if (!symbol_conf.report_hierarchy)
506 		he->leaf = true;
507 
508 	return 0;
509 
510 err_srcline:
511 	zfree(&he->srcline);
512 
513 err_rawdata:
514 	zfree(&he->raw_data);
515 
516 err_infos:
517 	if (he->branch_info) {
518 		map__put(he->branch_info->from.ms.map);
519 		map__put(he->branch_info->to.ms.map);
520 		zfree(&he->branch_info);
521 	}
522 	if (he->mem_info) {
523 		map__put(he->mem_info->iaddr.ms.map);
524 		map__put(he->mem_info->daddr.ms.map);
525 	}
526 err:
527 	maps__zput(he->ms.maps);
528 	map__zput(he->ms.map);
529 	zfree(&he->stat_acc);
530 	return -ENOMEM;
531 }
532 
533 static void *hist_entry__zalloc(size_t size)
534 {
535 	return zalloc(size + sizeof(struct hist_entry));
536 }
537 
538 static void hist_entry__free(void *ptr)
539 {
540 	free(ptr);
541 }
542 
543 static struct hist_entry_ops default_ops = {
544 	.new	= hist_entry__zalloc,
545 	.free	= hist_entry__free,
546 };
547 
548 static struct hist_entry *hist_entry__new(struct hist_entry *template,
549 					  bool sample_self)
550 {
551 	struct hist_entry_ops *ops = template->ops;
552 	size_t callchain_size = 0;
553 	struct hist_entry *he;
554 	int err = 0;
555 
556 	if (!ops)
557 		ops = template->ops = &default_ops;
558 
559 	if (symbol_conf.use_callchain)
560 		callchain_size = sizeof(struct callchain_root);
561 
562 	he = ops->new(callchain_size);
563 	if (he) {
564 		err = hist_entry__init(he, template, sample_self, callchain_size);
565 		if (err) {
566 			ops->free(he);
567 			he = NULL;
568 		}
569 	}
570 
571 	return he;
572 }
573 
574 static u8 symbol__parent_filter(const struct symbol *parent)
575 {
576 	if (symbol_conf.exclude_other && parent == NULL)
577 		return 1 << HIST_FILTER__PARENT;
578 	return 0;
579 }
580 
581 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
582 {
583 	if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
584 		return;
585 
586 	he->hists->callchain_period += period;
587 	if (!he->filtered)
588 		he->hists->callchain_non_filtered_period += period;
589 }
590 
591 static struct hist_entry *hists__findnew_entry(struct hists *hists,
592 					       struct hist_entry *entry,
593 					       const struct addr_location *al,
594 					       bool sample_self)
595 {
596 	struct rb_node **p;
597 	struct rb_node *parent = NULL;
598 	struct hist_entry *he;
599 	int64_t cmp;
600 	u64 period = entry->stat.period;
601 	bool leftmost = true;
602 
603 	p = &hists->entries_in->rb_root.rb_node;
604 
605 	while (*p != NULL) {
606 		parent = *p;
607 		he = rb_entry(parent, struct hist_entry, rb_node_in);
608 
609 		/*
610 		 * Make sure that it receives arguments in a same order as
611 		 * hist_entry__collapse() so that we can use an appropriate
612 		 * function when searching an entry regardless which sort
613 		 * keys were used.
614 		 */
615 		cmp = hist_entry__cmp(he, entry);
616 		if (!cmp) {
617 			if (sample_self) {
618 				he_stat__add_period(&he->stat, period);
619 				hist_entry__add_callchain_period(he, period);
620 			}
621 			if (symbol_conf.cumulate_callchain)
622 				he_stat__add_period(he->stat_acc, period);
623 
624 			/*
625 			 * This mem info was allocated from sample__resolve_mem
626 			 * and will not be used anymore.
627 			 */
628 			mem_info__zput(entry->mem_info);
629 
630 			block_info__zput(entry->block_info);
631 
632 			kvm_info__zput(entry->kvm_info);
633 
634 			/* If the map of an existing hist_entry has
635 			 * become out-of-date due to an exec() or
636 			 * similar, update it.  Otherwise we will
637 			 * mis-adjust symbol addresses when computing
638 			 * the history counter to increment.
639 			 */
640 			if (he->ms.map != entry->ms.map) {
641 				map__put(he->ms.map);
642 				he->ms.map = map__get(entry->ms.map);
643 			}
644 			goto out;
645 		}
646 
647 		if (cmp < 0)
648 			p = &(*p)->rb_left;
649 		else {
650 			p = &(*p)->rb_right;
651 			leftmost = false;
652 		}
653 	}
654 
655 	he = hist_entry__new(entry, sample_self);
656 	if (!he)
657 		return NULL;
658 
659 	if (sample_self)
660 		hist_entry__add_callchain_period(he, period);
661 	hists->nr_entries++;
662 
663 	rb_link_node(&he->rb_node_in, parent, p);
664 	rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
665 out:
666 	if (sample_self)
667 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
668 	if (symbol_conf.cumulate_callchain)
669 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
670 	return he;
671 }
672 
673 static unsigned random_max(unsigned high)
674 {
675 	unsigned thresh = -high % high;
676 	for (;;) {
677 		unsigned r = random();
678 		if (r >= thresh)
679 			return r % high;
680 	}
681 }
682 
683 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
684 {
685 	struct res_sample *r;
686 	int j;
687 
688 	if (he->num_res < symbol_conf.res_sample) {
689 		j = he->num_res++;
690 	} else {
691 		j = random_max(symbol_conf.res_sample);
692 	}
693 	r = &he->res_samples[j];
694 	r->time = sample->time;
695 	r->cpu = sample->cpu;
696 	r->tid = sample->tid;
697 }
698 
699 static struct hist_entry*
700 __hists__add_entry(struct hists *hists,
701 		   struct addr_location *al,
702 		   struct symbol *sym_parent,
703 		   struct branch_info *bi,
704 		   struct mem_info *mi,
705 		   struct kvm_info *ki,
706 		   struct block_info *block_info,
707 		   struct perf_sample *sample,
708 		   bool sample_self,
709 		   struct hist_entry_ops *ops)
710 {
711 	struct namespaces *ns = thread__namespaces(al->thread);
712 	struct hist_entry entry = {
713 		.thread	= al->thread,
714 		.comm = thread__comm(al->thread),
715 		.cgroup_id = {
716 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
717 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
718 		},
719 		.cgroup = sample->cgroup,
720 		.ms = {
721 			.maps	= al->maps,
722 			.map	= al->map,
723 			.sym	= al->sym,
724 		},
725 		.srcline = (char *) al->srcline,
726 		.socket	 = al->socket,
727 		.cpu	 = al->cpu,
728 		.cpumode = al->cpumode,
729 		.ip	 = al->addr,
730 		.level	 = al->level,
731 		.code_page_size = sample->code_page_size,
732 		.stat = {
733 			.nr_events = 1,
734 			.period	= sample->period,
735 		},
736 		.parent = sym_parent,
737 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
738 		.hists	= hists,
739 		.branch_info = bi,
740 		.mem_info = mi,
741 		.kvm_info = ki,
742 		.block_info = block_info,
743 		.transaction = sample->transaction,
744 		.raw_data = sample->raw_data,
745 		.raw_size = sample->raw_size,
746 		.ops = ops,
747 		.time = hist_time(sample->time),
748 		.weight = sample->weight,
749 		.ins_lat = sample->ins_lat,
750 		.p_stage_cyc = sample->p_stage_cyc,
751 		.simd_flags = sample->simd_flags,
752 	}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
753 
754 	if (!hists->has_callchains && he && he->callchain_size != 0)
755 		hists->has_callchains = true;
756 	if (he && symbol_conf.res_sample)
757 		hists__res_sample(he, sample);
758 	return he;
759 }
760 
761 struct hist_entry *hists__add_entry(struct hists *hists,
762 				    struct addr_location *al,
763 				    struct symbol *sym_parent,
764 				    struct branch_info *bi,
765 				    struct mem_info *mi,
766 				    struct kvm_info *ki,
767 				    struct perf_sample *sample,
768 				    bool sample_self)
769 {
770 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
771 				  sample, sample_self, NULL);
772 }
773 
774 struct hist_entry *hists__add_entry_ops(struct hists *hists,
775 					struct hist_entry_ops *ops,
776 					struct addr_location *al,
777 					struct symbol *sym_parent,
778 					struct branch_info *bi,
779 					struct mem_info *mi,
780 					struct kvm_info *ki,
781 					struct perf_sample *sample,
782 					bool sample_self)
783 {
784 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
785 				  sample, sample_self, ops);
786 }
787 
788 struct hist_entry *hists__add_entry_block(struct hists *hists,
789 					  struct addr_location *al,
790 					  struct block_info *block_info)
791 {
792 	struct hist_entry entry = {
793 		.block_info = block_info,
794 		.hists = hists,
795 		.ms = {
796 			.maps = al->maps,
797 			.map = al->map,
798 			.sym = al->sym,
799 		},
800 	}, *he = hists__findnew_entry(hists, &entry, al, false);
801 
802 	return he;
803 }
804 
805 static int
806 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
807 		    struct addr_location *al __maybe_unused)
808 {
809 	return 0;
810 }
811 
812 static int
813 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
814 			struct addr_location *al __maybe_unused)
815 {
816 	return 0;
817 }
818 
819 static int
820 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
821 {
822 	struct perf_sample *sample = iter->sample;
823 	struct mem_info *mi;
824 
825 	mi = sample__resolve_mem(sample, al);
826 	if (mi == NULL)
827 		return -ENOMEM;
828 
829 	iter->priv = mi;
830 	return 0;
831 }
832 
833 static int
834 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
835 {
836 	u64 cost;
837 	struct mem_info *mi = iter->priv;
838 	struct hists *hists = evsel__hists(iter->evsel);
839 	struct perf_sample *sample = iter->sample;
840 	struct hist_entry *he;
841 
842 	if (mi == NULL)
843 		return -EINVAL;
844 
845 	cost = sample->weight;
846 	if (!cost)
847 		cost = 1;
848 
849 	/*
850 	 * must pass period=weight in order to get the correct
851 	 * sorting from hists__collapse_resort() which is solely
852 	 * based on periods. We want sorting be done on nr_events * weight
853 	 * and this is indirectly achieved by passing period=weight here
854 	 * and the he_stat__add_period() function.
855 	 */
856 	sample->period = cost;
857 
858 	he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
859 			      sample, true);
860 	if (!he)
861 		return -ENOMEM;
862 
863 	iter->he = he;
864 	return 0;
865 }
866 
867 static int
868 iter_finish_mem_entry(struct hist_entry_iter *iter,
869 		      struct addr_location *al __maybe_unused)
870 {
871 	struct evsel *evsel = iter->evsel;
872 	struct hists *hists = evsel__hists(evsel);
873 	struct hist_entry *he = iter->he;
874 	int err = -EINVAL;
875 
876 	if (he == NULL)
877 		goto out;
878 
879 	hists__inc_nr_samples(hists, he->filtered);
880 
881 	err = hist_entry__append_callchain(he, iter->sample);
882 
883 out:
884 	/*
885 	 * We don't need to free iter->priv (mem_info) here since the mem info
886 	 * was either already freed in hists__findnew_entry() or passed to a
887 	 * new hist entry by hist_entry__new().
888 	 */
889 	iter->priv = NULL;
890 
891 	iter->he = NULL;
892 	return err;
893 }
894 
895 static int
896 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
897 {
898 	struct branch_info *bi;
899 	struct perf_sample *sample = iter->sample;
900 
901 	bi = sample__resolve_bstack(sample, al);
902 	if (!bi)
903 		return -ENOMEM;
904 
905 	iter->curr = 0;
906 	iter->total = sample->branch_stack->nr;
907 
908 	iter->priv = bi;
909 	return 0;
910 }
911 
912 static int
913 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
914 			     struct addr_location *al __maybe_unused)
915 {
916 	return 0;
917 }
918 
919 static int
920 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
921 {
922 	struct branch_info *bi = iter->priv;
923 	int i = iter->curr;
924 
925 	if (bi == NULL)
926 		return 0;
927 
928 	if (iter->curr >= iter->total)
929 		return 0;
930 
931 	maps__put(al->maps);
932 	al->maps = maps__get(bi[i].to.ms.maps);
933 	map__put(al->map);
934 	al->map = map__get(bi[i].to.ms.map);
935 	al->sym = bi[i].to.ms.sym;
936 	al->addr = bi[i].to.addr;
937 	return 1;
938 }
939 
940 static int
941 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
942 {
943 	struct branch_info *bi;
944 	struct evsel *evsel = iter->evsel;
945 	struct hists *hists = evsel__hists(evsel);
946 	struct perf_sample *sample = iter->sample;
947 	struct hist_entry *he = NULL;
948 	int i = iter->curr;
949 	int err = 0;
950 
951 	bi = iter->priv;
952 
953 	if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
954 		goto out;
955 
956 	/*
957 	 * The report shows the percentage of total branches captured
958 	 * and not events sampled. Thus we use a pseudo period of 1.
959 	 */
960 	sample->period = 1;
961 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
962 
963 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
964 			      sample, true);
965 	if (he == NULL)
966 		return -ENOMEM;
967 
968 	hists__inc_nr_samples(hists, he->filtered);
969 
970 out:
971 	iter->he = he;
972 	iter->curr++;
973 	return err;
974 }
975 
976 static int
977 iter_finish_branch_entry(struct hist_entry_iter *iter,
978 			 struct addr_location *al __maybe_unused)
979 {
980 	zfree(&iter->priv);
981 	iter->he = NULL;
982 
983 	return iter->curr >= iter->total ? 0 : -1;
984 }
985 
986 static int
987 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
988 			  struct addr_location *al __maybe_unused)
989 {
990 	return 0;
991 }
992 
993 static int
994 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
995 {
996 	struct evsel *evsel = iter->evsel;
997 	struct perf_sample *sample = iter->sample;
998 	struct hist_entry *he;
999 
1000 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1001 			      NULL, sample, true);
1002 	if (he == NULL)
1003 		return -ENOMEM;
1004 
1005 	iter->he = he;
1006 	return 0;
1007 }
1008 
1009 static int
1010 iter_finish_normal_entry(struct hist_entry_iter *iter,
1011 			 struct addr_location *al __maybe_unused)
1012 {
1013 	struct hist_entry *he = iter->he;
1014 	struct evsel *evsel = iter->evsel;
1015 	struct perf_sample *sample = iter->sample;
1016 
1017 	if (he == NULL)
1018 		return 0;
1019 
1020 	iter->he = NULL;
1021 
1022 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1023 
1024 	return hist_entry__append_callchain(he, sample);
1025 }
1026 
1027 static int
1028 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1029 			      struct addr_location *al __maybe_unused)
1030 {
1031 	struct hist_entry **he_cache;
1032 
1033 	callchain_cursor_commit(&callchain_cursor);
1034 
1035 	/*
1036 	 * This is for detecting cycles or recursions so that they're
1037 	 * cumulated only one time to prevent entries more than 100%
1038 	 * overhead.
1039 	 */
1040 	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1041 	if (he_cache == NULL)
1042 		return -ENOMEM;
1043 
1044 	iter->priv = he_cache;
1045 	iter->curr = 0;
1046 
1047 	return 0;
1048 }
1049 
1050 static int
1051 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1052 				 struct addr_location *al)
1053 {
1054 	struct evsel *evsel = iter->evsel;
1055 	struct hists *hists = evsel__hists(evsel);
1056 	struct perf_sample *sample = iter->sample;
1057 	struct hist_entry **he_cache = iter->priv;
1058 	struct hist_entry *he;
1059 	int err = 0;
1060 
1061 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1062 			      sample, true);
1063 	if (he == NULL)
1064 		return -ENOMEM;
1065 
1066 	iter->he = he;
1067 	he_cache[iter->curr++] = he;
1068 
1069 	hist_entry__append_callchain(he, sample);
1070 
1071 	/*
1072 	 * We need to re-initialize the cursor since callchain_append()
1073 	 * advanced the cursor to the end.
1074 	 */
1075 	callchain_cursor_commit(&callchain_cursor);
1076 
1077 	hists__inc_nr_samples(hists, he->filtered);
1078 
1079 	return err;
1080 }
1081 
1082 static int
1083 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1084 			   struct addr_location *al)
1085 {
1086 	struct callchain_cursor_node *node;
1087 
1088 	node = callchain_cursor_current(&callchain_cursor);
1089 	if (node == NULL)
1090 		return 0;
1091 
1092 	return fill_callchain_info(al, node, iter->hide_unresolved);
1093 }
1094 
1095 static bool
1096 hist_entry__fast__sym_diff(struct hist_entry *left,
1097 			   struct hist_entry *right)
1098 {
1099 	struct symbol *sym_l = left->ms.sym;
1100 	struct symbol *sym_r = right->ms.sym;
1101 
1102 	if (!sym_l && !sym_r)
1103 		return left->ip != right->ip;
1104 
1105 	return !!_sort__sym_cmp(sym_l, sym_r);
1106 }
1107 
1108 
1109 static int
1110 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1111 			       struct addr_location *al)
1112 {
1113 	struct evsel *evsel = iter->evsel;
1114 	struct perf_sample *sample = iter->sample;
1115 	struct hist_entry **he_cache = iter->priv;
1116 	struct hist_entry *he;
1117 	struct hist_entry he_tmp = {
1118 		.hists = evsel__hists(evsel),
1119 		.cpu = al->cpu,
1120 		.thread = al->thread,
1121 		.comm = thread__comm(al->thread),
1122 		.ip = al->addr,
1123 		.ms = {
1124 			.maps = al->maps,
1125 			.map = al->map,
1126 			.sym = al->sym,
1127 		},
1128 		.srcline = (char *) al->srcline,
1129 		.parent = iter->parent,
1130 		.raw_data = sample->raw_data,
1131 		.raw_size = sample->raw_size,
1132 	};
1133 	int i;
1134 	struct callchain_cursor cursor;
1135 	bool fast = hists__has(he_tmp.hists, sym);
1136 
1137 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
1138 
1139 	callchain_cursor_advance(&callchain_cursor);
1140 
1141 	/*
1142 	 * Check if there's duplicate entries in the callchain.
1143 	 * It's possible that it has cycles or recursive calls.
1144 	 */
1145 	for (i = 0; i < iter->curr; i++) {
1146 		/*
1147 		 * For most cases, there are no duplicate entries in callchain.
1148 		 * The symbols are usually different. Do a quick check for
1149 		 * symbols first.
1150 		 */
1151 		if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1152 			continue;
1153 
1154 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1155 			/* to avoid calling callback function */
1156 			iter->he = NULL;
1157 			return 0;
1158 		}
1159 	}
1160 
1161 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1162 			      NULL, sample, false);
1163 	if (he == NULL)
1164 		return -ENOMEM;
1165 
1166 	iter->he = he;
1167 	he_cache[iter->curr++] = he;
1168 
1169 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1170 		callchain_append(he->callchain, &cursor, sample->period);
1171 	return 0;
1172 }
1173 
1174 static int
1175 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1176 			     struct addr_location *al __maybe_unused)
1177 {
1178 	zfree(&iter->priv);
1179 	iter->he = NULL;
1180 
1181 	return 0;
1182 }
1183 
1184 const struct hist_iter_ops hist_iter_mem = {
1185 	.prepare_entry 		= iter_prepare_mem_entry,
1186 	.add_single_entry 	= iter_add_single_mem_entry,
1187 	.next_entry 		= iter_next_nop_entry,
1188 	.add_next_entry 	= iter_add_next_nop_entry,
1189 	.finish_entry 		= iter_finish_mem_entry,
1190 };
1191 
1192 const struct hist_iter_ops hist_iter_branch = {
1193 	.prepare_entry 		= iter_prepare_branch_entry,
1194 	.add_single_entry 	= iter_add_single_branch_entry,
1195 	.next_entry 		= iter_next_branch_entry,
1196 	.add_next_entry 	= iter_add_next_branch_entry,
1197 	.finish_entry 		= iter_finish_branch_entry,
1198 };
1199 
1200 const struct hist_iter_ops hist_iter_normal = {
1201 	.prepare_entry 		= iter_prepare_normal_entry,
1202 	.add_single_entry 	= iter_add_single_normal_entry,
1203 	.next_entry 		= iter_next_nop_entry,
1204 	.add_next_entry 	= iter_add_next_nop_entry,
1205 	.finish_entry 		= iter_finish_normal_entry,
1206 };
1207 
1208 const struct hist_iter_ops hist_iter_cumulative = {
1209 	.prepare_entry 		= iter_prepare_cumulative_entry,
1210 	.add_single_entry 	= iter_add_single_cumulative_entry,
1211 	.next_entry 		= iter_next_cumulative_entry,
1212 	.add_next_entry 	= iter_add_next_cumulative_entry,
1213 	.finish_entry 		= iter_finish_cumulative_entry,
1214 };
1215 
1216 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1217 			 int max_stack_depth, void *arg)
1218 {
1219 	int err, err2;
1220 	struct map *alm = NULL;
1221 
1222 	if (al)
1223 		alm = map__get(al->map);
1224 
1225 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1226 					iter->evsel, al, max_stack_depth);
1227 	if (err) {
1228 		map__put(alm);
1229 		return err;
1230 	}
1231 
1232 	err = iter->ops->prepare_entry(iter, al);
1233 	if (err)
1234 		goto out;
1235 
1236 	err = iter->ops->add_single_entry(iter, al);
1237 	if (err)
1238 		goto out;
1239 
1240 	if (iter->he && iter->add_entry_cb) {
1241 		err = iter->add_entry_cb(iter, al, true, arg);
1242 		if (err)
1243 			goto out;
1244 	}
1245 
1246 	while (iter->ops->next_entry(iter, al)) {
1247 		err = iter->ops->add_next_entry(iter, al);
1248 		if (err)
1249 			break;
1250 
1251 		if (iter->he && iter->add_entry_cb) {
1252 			err = iter->add_entry_cb(iter, al, false, arg);
1253 			if (err)
1254 				goto out;
1255 		}
1256 	}
1257 
1258 out:
1259 	err2 = iter->ops->finish_entry(iter, al);
1260 	if (!err)
1261 		err = err2;
1262 
1263 	map__put(alm);
1264 
1265 	return err;
1266 }
1267 
1268 int64_t
1269 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1270 {
1271 	struct hists *hists = left->hists;
1272 	struct perf_hpp_fmt *fmt;
1273 	int64_t cmp = 0;
1274 
1275 	hists__for_each_sort_list(hists, fmt) {
1276 		if (perf_hpp__is_dynamic_entry(fmt) &&
1277 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1278 			continue;
1279 
1280 		cmp = fmt->cmp(fmt, left, right);
1281 		if (cmp)
1282 			break;
1283 	}
1284 
1285 	return cmp;
1286 }
1287 
1288 int64_t
1289 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1290 {
1291 	struct hists *hists = left->hists;
1292 	struct perf_hpp_fmt *fmt;
1293 	int64_t cmp = 0;
1294 
1295 	hists__for_each_sort_list(hists, fmt) {
1296 		if (perf_hpp__is_dynamic_entry(fmt) &&
1297 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1298 			continue;
1299 
1300 		cmp = fmt->collapse(fmt, left, right);
1301 		if (cmp)
1302 			break;
1303 	}
1304 
1305 	return cmp;
1306 }
1307 
1308 void hist_entry__delete(struct hist_entry *he)
1309 {
1310 	struct hist_entry_ops *ops = he->ops;
1311 
1312 	thread__zput(he->thread);
1313 	maps__zput(he->ms.maps);
1314 	map__zput(he->ms.map);
1315 
1316 	if (he->branch_info) {
1317 		map__zput(he->branch_info->from.ms.map);
1318 		map__zput(he->branch_info->to.ms.map);
1319 		free_srcline(he->branch_info->srcline_from);
1320 		free_srcline(he->branch_info->srcline_to);
1321 		zfree(&he->branch_info);
1322 	}
1323 
1324 	if (he->mem_info) {
1325 		map__zput(he->mem_info->iaddr.ms.map);
1326 		map__zput(he->mem_info->daddr.ms.map);
1327 		mem_info__zput(he->mem_info);
1328 	}
1329 
1330 	if (he->block_info)
1331 		block_info__zput(he->block_info);
1332 
1333 	if (he->kvm_info)
1334 		kvm_info__zput(he->kvm_info);
1335 
1336 	zfree(&he->res_samples);
1337 	zfree(&he->stat_acc);
1338 	free_srcline(he->srcline);
1339 	if (he->srcfile && he->srcfile[0])
1340 		zfree(&he->srcfile);
1341 	free_callchain(he->callchain);
1342 	zfree(&he->trace_output);
1343 	zfree(&he->raw_data);
1344 	ops->free(he);
1345 }
1346 
1347 /*
1348  * If this is not the last column, then we need to pad it according to the
1349  * pre-calculated max length for this column, otherwise don't bother adding
1350  * spaces because that would break viewing this with, for instance, 'less',
1351  * that would show tons of trailing spaces when a long C++ demangled method
1352  * names is sampled.
1353 */
1354 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1355 				   struct perf_hpp_fmt *fmt, int printed)
1356 {
1357 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1358 		const int width = fmt->width(fmt, hpp, he->hists);
1359 		if (printed < width) {
1360 			advance_hpp(hpp, printed);
1361 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1362 		}
1363 	}
1364 
1365 	return printed;
1366 }
1367 
1368 /*
1369  * collapse the histogram
1370  */
1371 
1372 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1373 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1374 				       enum hist_filter type);
1375 
1376 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1377 
1378 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1379 {
1380 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1381 }
1382 
1383 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1384 						enum hist_filter type,
1385 						fmt_chk_fn check)
1386 {
1387 	struct perf_hpp_fmt *fmt;
1388 	bool type_match = false;
1389 	struct hist_entry *parent = he->parent_he;
1390 
1391 	switch (type) {
1392 	case HIST_FILTER__THREAD:
1393 		if (symbol_conf.comm_list == NULL &&
1394 		    symbol_conf.pid_list == NULL &&
1395 		    symbol_conf.tid_list == NULL)
1396 			return;
1397 		break;
1398 	case HIST_FILTER__DSO:
1399 		if (symbol_conf.dso_list == NULL)
1400 			return;
1401 		break;
1402 	case HIST_FILTER__SYMBOL:
1403 		if (symbol_conf.sym_list == NULL)
1404 			return;
1405 		break;
1406 	case HIST_FILTER__PARENT:
1407 	case HIST_FILTER__GUEST:
1408 	case HIST_FILTER__HOST:
1409 	case HIST_FILTER__SOCKET:
1410 	case HIST_FILTER__C2C:
1411 	default:
1412 		return;
1413 	}
1414 
1415 	/* if it's filtered by own fmt, it has to have filter bits */
1416 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1417 		if (check(fmt)) {
1418 			type_match = true;
1419 			break;
1420 		}
1421 	}
1422 
1423 	if (type_match) {
1424 		/*
1425 		 * If the filter is for current level entry, propagate
1426 		 * filter marker to parents.  The marker bit was
1427 		 * already set by default so it only needs to clear
1428 		 * non-filtered entries.
1429 		 */
1430 		if (!(he->filtered & (1 << type))) {
1431 			while (parent) {
1432 				parent->filtered &= ~(1 << type);
1433 				parent = parent->parent_he;
1434 			}
1435 		}
1436 	} else {
1437 		/*
1438 		 * If current entry doesn't have matching formats, set
1439 		 * filter marker for upper level entries.  it will be
1440 		 * cleared if its lower level entries is not filtered.
1441 		 *
1442 		 * For lower-level entries, it inherits parent's
1443 		 * filter bit so that lower level entries of a
1444 		 * non-filtered entry won't set the filter marker.
1445 		 */
1446 		if (parent == NULL)
1447 			he->filtered |= (1 << type);
1448 		else
1449 			he->filtered |= (parent->filtered & (1 << type));
1450 	}
1451 }
1452 
1453 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1454 {
1455 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1456 					    check_thread_entry);
1457 
1458 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1459 					    perf_hpp__is_dso_entry);
1460 
1461 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1462 					    perf_hpp__is_sym_entry);
1463 
1464 	hists__apply_filters(he->hists, he);
1465 }
1466 
1467 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1468 						 struct rb_root_cached *root,
1469 						 struct hist_entry *he,
1470 						 struct hist_entry *parent_he,
1471 						 struct perf_hpp_list *hpp_list)
1472 {
1473 	struct rb_node **p = &root->rb_root.rb_node;
1474 	struct rb_node *parent = NULL;
1475 	struct hist_entry *iter, *new;
1476 	struct perf_hpp_fmt *fmt;
1477 	int64_t cmp;
1478 	bool leftmost = true;
1479 
1480 	while (*p != NULL) {
1481 		parent = *p;
1482 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1483 
1484 		cmp = 0;
1485 		perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1486 			cmp = fmt->collapse(fmt, iter, he);
1487 			if (cmp)
1488 				break;
1489 		}
1490 
1491 		if (!cmp) {
1492 			he_stat__add_stat(&iter->stat, &he->stat);
1493 			return iter;
1494 		}
1495 
1496 		if (cmp < 0)
1497 			p = &parent->rb_left;
1498 		else {
1499 			p = &parent->rb_right;
1500 			leftmost = false;
1501 		}
1502 	}
1503 
1504 	new = hist_entry__new(he, true);
1505 	if (new == NULL)
1506 		return NULL;
1507 
1508 	hists->nr_entries++;
1509 
1510 	/* save related format list for output */
1511 	new->hpp_list = hpp_list;
1512 	new->parent_he = parent_he;
1513 
1514 	hist_entry__apply_hierarchy_filters(new);
1515 
1516 	/* some fields are now passed to 'new' */
1517 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1518 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1519 			he->trace_output = NULL;
1520 		else
1521 			new->trace_output = NULL;
1522 
1523 		if (perf_hpp__is_srcline_entry(fmt))
1524 			he->srcline = NULL;
1525 		else
1526 			new->srcline = NULL;
1527 
1528 		if (perf_hpp__is_srcfile_entry(fmt))
1529 			he->srcfile = NULL;
1530 		else
1531 			new->srcfile = NULL;
1532 	}
1533 
1534 	rb_link_node(&new->rb_node_in, parent, p);
1535 	rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1536 	return new;
1537 }
1538 
1539 static int hists__hierarchy_insert_entry(struct hists *hists,
1540 					 struct rb_root_cached *root,
1541 					 struct hist_entry *he)
1542 {
1543 	struct perf_hpp_list_node *node;
1544 	struct hist_entry *new_he = NULL;
1545 	struct hist_entry *parent = NULL;
1546 	int depth = 0;
1547 	int ret = 0;
1548 
1549 	list_for_each_entry(node, &hists->hpp_formats, list) {
1550 		/* skip period (overhead) and elided columns */
1551 		if (node->level == 0 || node->skip)
1552 			continue;
1553 
1554 		/* insert copy of 'he' for each fmt into the hierarchy */
1555 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1556 		if (new_he == NULL) {
1557 			ret = -1;
1558 			break;
1559 		}
1560 
1561 		root = &new_he->hroot_in;
1562 		new_he->depth = depth++;
1563 		parent = new_he;
1564 	}
1565 
1566 	if (new_he) {
1567 		new_he->leaf = true;
1568 
1569 		if (hist_entry__has_callchains(new_he) &&
1570 		    symbol_conf.use_callchain) {
1571 			callchain_cursor_reset(&callchain_cursor);
1572 			if (callchain_merge(&callchain_cursor,
1573 					    new_he->callchain,
1574 					    he->callchain) < 0)
1575 				ret = -1;
1576 		}
1577 	}
1578 
1579 	/* 'he' is no longer used */
1580 	hist_entry__delete(he);
1581 
1582 	/* return 0 (or -1) since it already applied filters */
1583 	return ret;
1584 }
1585 
1586 static int hists__collapse_insert_entry(struct hists *hists,
1587 					struct rb_root_cached *root,
1588 					struct hist_entry *he)
1589 {
1590 	struct rb_node **p = &root->rb_root.rb_node;
1591 	struct rb_node *parent = NULL;
1592 	struct hist_entry *iter;
1593 	int64_t cmp;
1594 	bool leftmost = true;
1595 
1596 	if (symbol_conf.report_hierarchy)
1597 		return hists__hierarchy_insert_entry(hists, root, he);
1598 
1599 	while (*p != NULL) {
1600 		parent = *p;
1601 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1602 
1603 		cmp = hist_entry__collapse(iter, he);
1604 
1605 		if (!cmp) {
1606 			int ret = 0;
1607 
1608 			he_stat__add_stat(&iter->stat, &he->stat);
1609 			if (symbol_conf.cumulate_callchain)
1610 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1611 
1612 			if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1613 				callchain_cursor_reset(&callchain_cursor);
1614 				if (callchain_merge(&callchain_cursor,
1615 						    iter->callchain,
1616 						    he->callchain) < 0)
1617 					ret = -1;
1618 			}
1619 			hist_entry__delete(he);
1620 			return ret;
1621 		}
1622 
1623 		if (cmp < 0)
1624 			p = &(*p)->rb_left;
1625 		else {
1626 			p = &(*p)->rb_right;
1627 			leftmost = false;
1628 		}
1629 	}
1630 	hists->nr_entries++;
1631 
1632 	rb_link_node(&he->rb_node_in, parent, p);
1633 	rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1634 	return 1;
1635 }
1636 
1637 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1638 {
1639 	struct rb_root_cached *root;
1640 
1641 	mutex_lock(&hists->lock);
1642 
1643 	root = hists->entries_in;
1644 	if (++hists->entries_in > &hists->entries_in_array[1])
1645 		hists->entries_in = &hists->entries_in_array[0];
1646 
1647 	mutex_unlock(&hists->lock);
1648 
1649 	return root;
1650 }
1651 
1652 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1653 {
1654 	hists__filter_entry_by_dso(hists, he);
1655 	hists__filter_entry_by_thread(hists, he);
1656 	hists__filter_entry_by_symbol(hists, he);
1657 	hists__filter_entry_by_socket(hists, he);
1658 }
1659 
1660 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1661 {
1662 	struct rb_root_cached *root;
1663 	struct rb_node *next;
1664 	struct hist_entry *n;
1665 	int ret;
1666 
1667 	if (!hists__has(hists, need_collapse))
1668 		return 0;
1669 
1670 	hists->nr_entries = 0;
1671 
1672 	root = hists__get_rotate_entries_in(hists);
1673 
1674 	next = rb_first_cached(root);
1675 
1676 	while (next) {
1677 		if (session_done())
1678 			break;
1679 		n = rb_entry(next, struct hist_entry, rb_node_in);
1680 		next = rb_next(&n->rb_node_in);
1681 
1682 		rb_erase_cached(&n->rb_node_in, root);
1683 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1684 		if (ret < 0)
1685 			return -1;
1686 
1687 		if (ret) {
1688 			/*
1689 			 * If it wasn't combined with one of the entries already
1690 			 * collapsed, we need to apply the filters that may have
1691 			 * been set by, say, the hist_browser.
1692 			 */
1693 			hists__apply_filters(hists, n);
1694 		}
1695 		if (prog)
1696 			ui_progress__update(prog, 1);
1697 	}
1698 	return 0;
1699 }
1700 
1701 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1702 {
1703 	struct hists *hists = a->hists;
1704 	struct perf_hpp_fmt *fmt;
1705 	int64_t cmp = 0;
1706 
1707 	hists__for_each_sort_list(hists, fmt) {
1708 		if (perf_hpp__should_skip(fmt, a->hists))
1709 			continue;
1710 
1711 		cmp = fmt->sort(fmt, a, b);
1712 		if (cmp)
1713 			break;
1714 	}
1715 
1716 	return cmp;
1717 }
1718 
1719 static void hists__reset_filter_stats(struct hists *hists)
1720 {
1721 	hists->nr_non_filtered_entries = 0;
1722 	hists->stats.total_non_filtered_period = 0;
1723 }
1724 
1725 void hists__reset_stats(struct hists *hists)
1726 {
1727 	hists->nr_entries = 0;
1728 	hists->stats.total_period = 0;
1729 
1730 	hists__reset_filter_stats(hists);
1731 }
1732 
1733 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1734 {
1735 	hists->nr_non_filtered_entries++;
1736 	hists->stats.total_non_filtered_period += h->stat.period;
1737 }
1738 
1739 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1740 {
1741 	if (!h->filtered)
1742 		hists__inc_filter_stats(hists, h);
1743 
1744 	hists->nr_entries++;
1745 	hists->stats.total_period += h->stat.period;
1746 }
1747 
1748 static void hierarchy_recalc_total_periods(struct hists *hists)
1749 {
1750 	struct rb_node *node;
1751 	struct hist_entry *he;
1752 
1753 	node = rb_first_cached(&hists->entries);
1754 
1755 	hists->stats.total_period = 0;
1756 	hists->stats.total_non_filtered_period = 0;
1757 
1758 	/*
1759 	 * recalculate total period using top-level entries only
1760 	 * since lower level entries only see non-filtered entries
1761 	 * but upper level entries have sum of both entries.
1762 	 */
1763 	while (node) {
1764 		he = rb_entry(node, struct hist_entry, rb_node);
1765 		node = rb_next(node);
1766 
1767 		hists->stats.total_period += he->stat.period;
1768 		if (!he->filtered)
1769 			hists->stats.total_non_filtered_period += he->stat.period;
1770 	}
1771 }
1772 
1773 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1774 					  struct hist_entry *he)
1775 {
1776 	struct rb_node **p = &root->rb_root.rb_node;
1777 	struct rb_node *parent = NULL;
1778 	struct hist_entry *iter;
1779 	struct perf_hpp_fmt *fmt;
1780 	bool leftmost = true;
1781 
1782 	while (*p != NULL) {
1783 		parent = *p;
1784 		iter = rb_entry(parent, struct hist_entry, rb_node);
1785 
1786 		if (hist_entry__sort(he, iter) > 0)
1787 			p = &parent->rb_left;
1788 		else {
1789 			p = &parent->rb_right;
1790 			leftmost = false;
1791 		}
1792 	}
1793 
1794 	rb_link_node(&he->rb_node, parent, p);
1795 	rb_insert_color_cached(&he->rb_node, root, leftmost);
1796 
1797 	/* update column width of dynamic entry */
1798 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1799 		if (fmt->init)
1800 			fmt->init(fmt, he);
1801 	}
1802 }
1803 
1804 static void hists__hierarchy_output_resort(struct hists *hists,
1805 					   struct ui_progress *prog,
1806 					   struct rb_root_cached *root_in,
1807 					   struct rb_root_cached *root_out,
1808 					   u64 min_callchain_hits,
1809 					   bool use_callchain)
1810 {
1811 	struct rb_node *node;
1812 	struct hist_entry *he;
1813 
1814 	*root_out = RB_ROOT_CACHED;
1815 	node = rb_first_cached(root_in);
1816 
1817 	while (node) {
1818 		he = rb_entry(node, struct hist_entry, rb_node_in);
1819 		node = rb_next(node);
1820 
1821 		hierarchy_insert_output_entry(root_out, he);
1822 
1823 		if (prog)
1824 			ui_progress__update(prog, 1);
1825 
1826 		hists->nr_entries++;
1827 		if (!he->filtered) {
1828 			hists->nr_non_filtered_entries++;
1829 			hists__calc_col_len(hists, he);
1830 		}
1831 
1832 		if (!he->leaf) {
1833 			hists__hierarchy_output_resort(hists, prog,
1834 						       &he->hroot_in,
1835 						       &he->hroot_out,
1836 						       min_callchain_hits,
1837 						       use_callchain);
1838 			continue;
1839 		}
1840 
1841 		if (!use_callchain)
1842 			continue;
1843 
1844 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1845 			u64 total = he->stat.period;
1846 
1847 			if (symbol_conf.cumulate_callchain)
1848 				total = he->stat_acc->period;
1849 
1850 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1851 		}
1852 
1853 		callchain_param.sort(&he->sorted_chain, he->callchain,
1854 				     min_callchain_hits, &callchain_param);
1855 	}
1856 }
1857 
1858 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1859 					 struct hist_entry *he,
1860 					 u64 min_callchain_hits,
1861 					 bool use_callchain)
1862 {
1863 	struct rb_node **p = &entries->rb_root.rb_node;
1864 	struct rb_node *parent = NULL;
1865 	struct hist_entry *iter;
1866 	struct perf_hpp_fmt *fmt;
1867 	bool leftmost = true;
1868 
1869 	if (use_callchain) {
1870 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1871 			u64 total = he->stat.period;
1872 
1873 			if (symbol_conf.cumulate_callchain)
1874 				total = he->stat_acc->period;
1875 
1876 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1877 		}
1878 		callchain_param.sort(&he->sorted_chain, he->callchain,
1879 				      min_callchain_hits, &callchain_param);
1880 	}
1881 
1882 	while (*p != NULL) {
1883 		parent = *p;
1884 		iter = rb_entry(parent, struct hist_entry, rb_node);
1885 
1886 		if (hist_entry__sort(he, iter) > 0)
1887 			p = &(*p)->rb_left;
1888 		else {
1889 			p = &(*p)->rb_right;
1890 			leftmost = false;
1891 		}
1892 	}
1893 
1894 	rb_link_node(&he->rb_node, parent, p);
1895 	rb_insert_color_cached(&he->rb_node, entries, leftmost);
1896 
1897 	/* update column width of dynamic entries */
1898 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1899 		if (fmt->init)
1900 			fmt->init(fmt, he);
1901 	}
1902 }
1903 
1904 static void output_resort(struct hists *hists, struct ui_progress *prog,
1905 			  bool use_callchain, hists__resort_cb_t cb,
1906 			  void *cb_arg)
1907 {
1908 	struct rb_root_cached *root;
1909 	struct rb_node *next;
1910 	struct hist_entry *n;
1911 	u64 callchain_total;
1912 	u64 min_callchain_hits;
1913 
1914 	callchain_total = hists->callchain_period;
1915 	if (symbol_conf.filter_relative)
1916 		callchain_total = hists->callchain_non_filtered_period;
1917 
1918 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1919 
1920 	hists__reset_stats(hists);
1921 	hists__reset_col_len(hists);
1922 
1923 	if (symbol_conf.report_hierarchy) {
1924 		hists__hierarchy_output_resort(hists, prog,
1925 					       &hists->entries_collapsed,
1926 					       &hists->entries,
1927 					       min_callchain_hits,
1928 					       use_callchain);
1929 		hierarchy_recalc_total_periods(hists);
1930 		return;
1931 	}
1932 
1933 	if (hists__has(hists, need_collapse))
1934 		root = &hists->entries_collapsed;
1935 	else
1936 		root = hists->entries_in;
1937 
1938 	next = rb_first_cached(root);
1939 	hists->entries = RB_ROOT_CACHED;
1940 
1941 	while (next) {
1942 		n = rb_entry(next, struct hist_entry, rb_node_in);
1943 		next = rb_next(&n->rb_node_in);
1944 
1945 		if (cb && cb(n, cb_arg))
1946 			continue;
1947 
1948 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1949 		hists__inc_stats(hists, n);
1950 
1951 		if (!n->filtered)
1952 			hists__calc_col_len(hists, n);
1953 
1954 		if (prog)
1955 			ui_progress__update(prog, 1);
1956 	}
1957 }
1958 
1959 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1960 			     hists__resort_cb_t cb, void *cb_arg)
1961 {
1962 	bool use_callchain;
1963 
1964 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1965 		use_callchain = evsel__has_callchain(evsel);
1966 	else
1967 		use_callchain = symbol_conf.use_callchain;
1968 
1969 	use_callchain |= symbol_conf.show_branchflag_count;
1970 
1971 	output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1972 }
1973 
1974 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1975 {
1976 	return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1977 }
1978 
1979 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1980 {
1981 	output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1982 }
1983 
1984 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1985 			     hists__resort_cb_t cb)
1986 {
1987 	output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1988 }
1989 
1990 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1991 {
1992 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
1993 		return false;
1994 
1995 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
1996 		return true;
1997 
1998 	return false;
1999 }
2000 
2001 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2002 {
2003 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2004 
2005 	while (can_goto_child(he, HMD_NORMAL)) {
2006 		node = rb_last(&he->hroot_out.rb_root);
2007 		he = rb_entry(node, struct hist_entry, rb_node);
2008 	}
2009 	return node;
2010 }
2011 
2012 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2013 {
2014 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2015 
2016 	if (can_goto_child(he, hmd))
2017 		node = rb_first_cached(&he->hroot_out);
2018 	else
2019 		node = rb_next(node);
2020 
2021 	while (node == NULL) {
2022 		he = he->parent_he;
2023 		if (he == NULL)
2024 			break;
2025 
2026 		node = rb_next(&he->rb_node);
2027 	}
2028 	return node;
2029 }
2030 
2031 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2032 {
2033 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2034 
2035 	node = rb_prev(node);
2036 	if (node)
2037 		return rb_hierarchy_last(node);
2038 
2039 	he = he->parent_he;
2040 	if (he == NULL)
2041 		return NULL;
2042 
2043 	return &he->rb_node;
2044 }
2045 
2046 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2047 {
2048 	struct rb_node *node;
2049 	struct hist_entry *child;
2050 	float percent;
2051 
2052 	if (he->leaf)
2053 		return false;
2054 
2055 	node = rb_first_cached(&he->hroot_out);
2056 	child = rb_entry(node, struct hist_entry, rb_node);
2057 
2058 	while (node && child->filtered) {
2059 		node = rb_next(node);
2060 		child = rb_entry(node, struct hist_entry, rb_node);
2061 	}
2062 
2063 	if (node)
2064 		percent = hist_entry__get_percent_limit(child);
2065 	else
2066 		percent = 0;
2067 
2068 	return node && percent >= limit;
2069 }
2070 
2071 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2072 				       enum hist_filter filter)
2073 {
2074 	h->filtered &= ~(1 << filter);
2075 
2076 	if (symbol_conf.report_hierarchy) {
2077 		struct hist_entry *parent = h->parent_he;
2078 
2079 		while (parent) {
2080 			he_stat__add_stat(&parent->stat, &h->stat);
2081 
2082 			parent->filtered &= ~(1 << filter);
2083 
2084 			if (parent->filtered)
2085 				goto next;
2086 
2087 			/* force fold unfiltered entry for simplicity */
2088 			parent->unfolded = false;
2089 			parent->has_no_entry = false;
2090 			parent->row_offset = 0;
2091 			parent->nr_rows = 0;
2092 next:
2093 			parent = parent->parent_he;
2094 		}
2095 	}
2096 
2097 	if (h->filtered)
2098 		return;
2099 
2100 	/* force fold unfiltered entry for simplicity */
2101 	h->unfolded = false;
2102 	h->has_no_entry = false;
2103 	h->row_offset = 0;
2104 	h->nr_rows = 0;
2105 
2106 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2107 
2108 	hists__inc_filter_stats(hists, h);
2109 	hists__calc_col_len(hists, h);
2110 }
2111 
2112 
2113 static bool hists__filter_entry_by_dso(struct hists *hists,
2114 				       struct hist_entry *he)
2115 {
2116 	if (hists->dso_filter != NULL &&
2117 	    (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
2118 		he->filtered |= (1 << HIST_FILTER__DSO);
2119 		return true;
2120 	}
2121 
2122 	return false;
2123 }
2124 
2125 static bool hists__filter_entry_by_thread(struct hists *hists,
2126 					  struct hist_entry *he)
2127 {
2128 	if (hists->thread_filter != NULL &&
2129 	    RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(hists->thread_filter)) {
2130 		he->filtered |= (1 << HIST_FILTER__THREAD);
2131 		return true;
2132 	}
2133 
2134 	return false;
2135 }
2136 
2137 static bool hists__filter_entry_by_symbol(struct hists *hists,
2138 					  struct hist_entry *he)
2139 {
2140 	if (hists->symbol_filter_str != NULL &&
2141 	    (!he->ms.sym || strstr(he->ms.sym->name,
2142 				   hists->symbol_filter_str) == NULL)) {
2143 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
2144 		return true;
2145 	}
2146 
2147 	return false;
2148 }
2149 
2150 static bool hists__filter_entry_by_socket(struct hists *hists,
2151 					  struct hist_entry *he)
2152 {
2153 	if ((hists->socket_filter > -1) &&
2154 	    (he->socket != hists->socket_filter)) {
2155 		he->filtered |= (1 << HIST_FILTER__SOCKET);
2156 		return true;
2157 	}
2158 
2159 	return false;
2160 }
2161 
2162 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2163 
2164 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2165 {
2166 	struct rb_node *nd;
2167 
2168 	hists->stats.nr_non_filtered_samples = 0;
2169 
2170 	hists__reset_filter_stats(hists);
2171 	hists__reset_col_len(hists);
2172 
2173 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2174 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2175 
2176 		if (filter(hists, h))
2177 			continue;
2178 
2179 		hists__remove_entry_filter(hists, h, type);
2180 	}
2181 }
2182 
2183 static void resort_filtered_entry(struct rb_root_cached *root,
2184 				  struct hist_entry *he)
2185 {
2186 	struct rb_node **p = &root->rb_root.rb_node;
2187 	struct rb_node *parent = NULL;
2188 	struct hist_entry *iter;
2189 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2190 	struct rb_node *nd;
2191 	bool leftmost = true;
2192 
2193 	while (*p != NULL) {
2194 		parent = *p;
2195 		iter = rb_entry(parent, struct hist_entry, rb_node);
2196 
2197 		if (hist_entry__sort(he, iter) > 0)
2198 			p = &(*p)->rb_left;
2199 		else {
2200 			p = &(*p)->rb_right;
2201 			leftmost = false;
2202 		}
2203 	}
2204 
2205 	rb_link_node(&he->rb_node, parent, p);
2206 	rb_insert_color_cached(&he->rb_node, root, leftmost);
2207 
2208 	if (he->leaf || he->filtered)
2209 		return;
2210 
2211 	nd = rb_first_cached(&he->hroot_out);
2212 	while (nd) {
2213 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2214 
2215 		nd = rb_next(nd);
2216 		rb_erase_cached(&h->rb_node, &he->hroot_out);
2217 
2218 		resort_filtered_entry(&new_root, h);
2219 	}
2220 
2221 	he->hroot_out = new_root;
2222 }
2223 
2224 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2225 {
2226 	struct rb_node *nd;
2227 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2228 
2229 	hists->stats.nr_non_filtered_samples = 0;
2230 
2231 	hists__reset_filter_stats(hists);
2232 	hists__reset_col_len(hists);
2233 
2234 	nd = rb_first_cached(&hists->entries);
2235 	while (nd) {
2236 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2237 		int ret;
2238 
2239 		ret = hist_entry__filter(h, type, arg);
2240 
2241 		/*
2242 		 * case 1. non-matching type
2243 		 * zero out the period, set filter marker and move to child
2244 		 */
2245 		if (ret < 0) {
2246 			memset(&h->stat, 0, sizeof(h->stat));
2247 			h->filtered |= (1 << type);
2248 
2249 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2250 		}
2251 		/*
2252 		 * case 2. matched type (filter out)
2253 		 * set filter marker and move to next
2254 		 */
2255 		else if (ret == 1) {
2256 			h->filtered |= (1 << type);
2257 
2258 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2259 		}
2260 		/*
2261 		 * case 3. ok (not filtered)
2262 		 * add period to hists and parents, erase the filter marker
2263 		 * and move to next sibling
2264 		 */
2265 		else {
2266 			hists__remove_entry_filter(hists, h, type);
2267 
2268 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2269 		}
2270 	}
2271 
2272 	hierarchy_recalc_total_periods(hists);
2273 
2274 	/*
2275 	 * resort output after applying a new filter since filter in a lower
2276 	 * hierarchy can change periods in a upper hierarchy.
2277 	 */
2278 	nd = rb_first_cached(&hists->entries);
2279 	while (nd) {
2280 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2281 
2282 		nd = rb_next(nd);
2283 		rb_erase_cached(&h->rb_node, &hists->entries);
2284 
2285 		resort_filtered_entry(&new_root, h);
2286 	}
2287 
2288 	hists->entries = new_root;
2289 }
2290 
2291 void hists__filter_by_thread(struct hists *hists)
2292 {
2293 	if (symbol_conf.report_hierarchy)
2294 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2295 					hists->thread_filter);
2296 	else
2297 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2298 				      hists__filter_entry_by_thread);
2299 }
2300 
2301 void hists__filter_by_dso(struct hists *hists)
2302 {
2303 	if (symbol_conf.report_hierarchy)
2304 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2305 					hists->dso_filter);
2306 	else
2307 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2308 				      hists__filter_entry_by_dso);
2309 }
2310 
2311 void hists__filter_by_symbol(struct hists *hists)
2312 {
2313 	if (symbol_conf.report_hierarchy)
2314 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2315 					hists->symbol_filter_str);
2316 	else
2317 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2318 				      hists__filter_entry_by_symbol);
2319 }
2320 
2321 void hists__filter_by_socket(struct hists *hists)
2322 {
2323 	if (symbol_conf.report_hierarchy)
2324 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2325 					&hists->socket_filter);
2326 	else
2327 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2328 				      hists__filter_entry_by_socket);
2329 }
2330 
2331 void events_stats__inc(struct events_stats *stats, u32 type)
2332 {
2333 	++stats->nr_events[0];
2334 	++stats->nr_events[type];
2335 }
2336 
2337 static void hists_stats__inc(struct hists_stats *stats)
2338 {
2339 	++stats->nr_samples;
2340 }
2341 
2342 void hists__inc_nr_events(struct hists *hists)
2343 {
2344 	hists_stats__inc(&hists->stats);
2345 }
2346 
2347 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2348 {
2349 	hists_stats__inc(&hists->stats);
2350 	if (!filtered)
2351 		hists->stats.nr_non_filtered_samples++;
2352 }
2353 
2354 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2355 {
2356 	hists->stats.nr_lost_samples += lost;
2357 }
2358 
2359 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2360 						 struct hist_entry *pair)
2361 {
2362 	struct rb_root_cached *root;
2363 	struct rb_node **p;
2364 	struct rb_node *parent = NULL;
2365 	struct hist_entry *he;
2366 	int64_t cmp;
2367 	bool leftmost = true;
2368 
2369 	if (hists__has(hists, need_collapse))
2370 		root = &hists->entries_collapsed;
2371 	else
2372 		root = hists->entries_in;
2373 
2374 	p = &root->rb_root.rb_node;
2375 
2376 	while (*p != NULL) {
2377 		parent = *p;
2378 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2379 
2380 		cmp = hist_entry__collapse(he, pair);
2381 
2382 		if (!cmp)
2383 			goto out;
2384 
2385 		if (cmp < 0)
2386 			p = &(*p)->rb_left;
2387 		else {
2388 			p = &(*p)->rb_right;
2389 			leftmost = false;
2390 		}
2391 	}
2392 
2393 	he = hist_entry__new(pair, true);
2394 	if (he) {
2395 		memset(&he->stat, 0, sizeof(he->stat));
2396 		he->hists = hists;
2397 		if (symbol_conf.cumulate_callchain)
2398 			memset(he->stat_acc, 0, sizeof(he->stat));
2399 		rb_link_node(&he->rb_node_in, parent, p);
2400 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2401 		hists__inc_stats(hists, he);
2402 		he->dummy = true;
2403 	}
2404 out:
2405 	return he;
2406 }
2407 
2408 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2409 						    struct rb_root_cached *root,
2410 						    struct hist_entry *pair)
2411 {
2412 	struct rb_node **p;
2413 	struct rb_node *parent = NULL;
2414 	struct hist_entry *he;
2415 	struct perf_hpp_fmt *fmt;
2416 	bool leftmost = true;
2417 
2418 	p = &root->rb_root.rb_node;
2419 	while (*p != NULL) {
2420 		int64_t cmp = 0;
2421 
2422 		parent = *p;
2423 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2424 
2425 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2426 			cmp = fmt->collapse(fmt, he, pair);
2427 			if (cmp)
2428 				break;
2429 		}
2430 		if (!cmp)
2431 			goto out;
2432 
2433 		if (cmp < 0)
2434 			p = &parent->rb_left;
2435 		else {
2436 			p = &parent->rb_right;
2437 			leftmost = false;
2438 		}
2439 	}
2440 
2441 	he = hist_entry__new(pair, true);
2442 	if (he) {
2443 		rb_link_node(&he->rb_node_in, parent, p);
2444 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2445 
2446 		he->dummy = true;
2447 		he->hists = hists;
2448 		memset(&he->stat, 0, sizeof(he->stat));
2449 		hists__inc_stats(hists, he);
2450 	}
2451 out:
2452 	return he;
2453 }
2454 
2455 static struct hist_entry *hists__find_entry(struct hists *hists,
2456 					    struct hist_entry *he)
2457 {
2458 	struct rb_node *n;
2459 
2460 	if (hists__has(hists, need_collapse))
2461 		n = hists->entries_collapsed.rb_root.rb_node;
2462 	else
2463 		n = hists->entries_in->rb_root.rb_node;
2464 
2465 	while (n) {
2466 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2467 		int64_t cmp = hist_entry__collapse(iter, he);
2468 
2469 		if (cmp < 0)
2470 			n = n->rb_left;
2471 		else if (cmp > 0)
2472 			n = n->rb_right;
2473 		else
2474 			return iter;
2475 	}
2476 
2477 	return NULL;
2478 }
2479 
2480 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2481 						      struct hist_entry *he)
2482 {
2483 	struct rb_node *n = root->rb_root.rb_node;
2484 
2485 	while (n) {
2486 		struct hist_entry *iter;
2487 		struct perf_hpp_fmt *fmt;
2488 		int64_t cmp = 0;
2489 
2490 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2491 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2492 			cmp = fmt->collapse(fmt, iter, he);
2493 			if (cmp)
2494 				break;
2495 		}
2496 
2497 		if (cmp < 0)
2498 			n = n->rb_left;
2499 		else if (cmp > 0)
2500 			n = n->rb_right;
2501 		else
2502 			return iter;
2503 	}
2504 
2505 	return NULL;
2506 }
2507 
2508 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2509 				   struct rb_root_cached *other_root)
2510 {
2511 	struct rb_node *nd;
2512 	struct hist_entry *pos, *pair;
2513 
2514 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2515 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2516 		pair = hists__find_hierarchy_entry(other_root, pos);
2517 
2518 		if (pair) {
2519 			hist_entry__add_pair(pair, pos);
2520 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2521 		}
2522 	}
2523 }
2524 
2525 /*
2526  * Look for pairs to link to the leader buckets (hist_entries):
2527  */
2528 void hists__match(struct hists *leader, struct hists *other)
2529 {
2530 	struct rb_root_cached *root;
2531 	struct rb_node *nd;
2532 	struct hist_entry *pos, *pair;
2533 
2534 	if (symbol_conf.report_hierarchy) {
2535 		/* hierarchy report always collapses entries */
2536 		return hists__match_hierarchy(&leader->entries_collapsed,
2537 					      &other->entries_collapsed);
2538 	}
2539 
2540 	if (hists__has(leader, need_collapse))
2541 		root = &leader->entries_collapsed;
2542 	else
2543 		root = leader->entries_in;
2544 
2545 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2546 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2547 		pair = hists__find_entry(other, pos);
2548 
2549 		if (pair)
2550 			hist_entry__add_pair(pair, pos);
2551 	}
2552 }
2553 
2554 static int hists__link_hierarchy(struct hists *leader_hists,
2555 				 struct hist_entry *parent,
2556 				 struct rb_root_cached *leader_root,
2557 				 struct rb_root_cached *other_root)
2558 {
2559 	struct rb_node *nd;
2560 	struct hist_entry *pos, *leader;
2561 
2562 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2563 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2564 
2565 		if (hist_entry__has_pairs(pos)) {
2566 			bool found = false;
2567 
2568 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2569 				if (leader->hists == leader_hists) {
2570 					found = true;
2571 					break;
2572 				}
2573 			}
2574 			if (!found)
2575 				return -1;
2576 		} else {
2577 			leader = add_dummy_hierarchy_entry(leader_hists,
2578 							   leader_root, pos);
2579 			if (leader == NULL)
2580 				return -1;
2581 
2582 			/* do not point parent in the pos */
2583 			leader->parent_he = parent;
2584 
2585 			hist_entry__add_pair(pos, leader);
2586 		}
2587 
2588 		if (!pos->leaf) {
2589 			if (hists__link_hierarchy(leader_hists, leader,
2590 						  &leader->hroot_in,
2591 						  &pos->hroot_in) < 0)
2592 				return -1;
2593 		}
2594 	}
2595 	return 0;
2596 }
2597 
2598 /*
2599  * Look for entries in the other hists that are not present in the leader, if
2600  * we find them, just add a dummy entry on the leader hists, with period=0,
2601  * nr_events=0, to serve as the list header.
2602  */
2603 int hists__link(struct hists *leader, struct hists *other)
2604 {
2605 	struct rb_root_cached *root;
2606 	struct rb_node *nd;
2607 	struct hist_entry *pos, *pair;
2608 
2609 	if (symbol_conf.report_hierarchy) {
2610 		/* hierarchy report always collapses entries */
2611 		return hists__link_hierarchy(leader, NULL,
2612 					     &leader->entries_collapsed,
2613 					     &other->entries_collapsed);
2614 	}
2615 
2616 	if (hists__has(other, need_collapse))
2617 		root = &other->entries_collapsed;
2618 	else
2619 		root = other->entries_in;
2620 
2621 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2622 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2623 
2624 		if (!hist_entry__has_pairs(pos)) {
2625 			pair = hists__add_dummy_entry(leader, pos);
2626 			if (pair == NULL)
2627 				return -1;
2628 			hist_entry__add_pair(pos, pair);
2629 		}
2630 	}
2631 
2632 	return 0;
2633 }
2634 
2635 int hists__unlink(struct hists *hists)
2636 {
2637 	struct rb_root_cached *root;
2638 	struct rb_node *nd;
2639 	struct hist_entry *pos;
2640 
2641 	if (hists__has(hists, need_collapse))
2642 		root = &hists->entries_collapsed;
2643 	else
2644 		root = hists->entries_in;
2645 
2646 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2647 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2648 		list_del_init(&pos->pairs.node);
2649 	}
2650 
2651 	return 0;
2652 }
2653 
2654 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2655 			  struct perf_sample *sample, bool nonany_branch_mode,
2656 			  u64 *total_cycles)
2657 {
2658 	struct branch_info *bi;
2659 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2660 
2661 	/* If we have branch cycles always annotate them. */
2662 	if (bs && bs->nr && entries[0].flags.cycles) {
2663 		int i;
2664 
2665 		bi = sample__resolve_bstack(sample, al);
2666 		if (bi) {
2667 			struct addr_map_symbol *prev = NULL;
2668 
2669 			/*
2670 			 * Ignore errors, still want to process the
2671 			 * other entries.
2672 			 *
2673 			 * For non standard branch modes always
2674 			 * force no IPC (prev == NULL)
2675 			 *
2676 			 * Note that perf stores branches reversed from
2677 			 * program order!
2678 			 */
2679 			for (i = bs->nr - 1; i >= 0; i--) {
2680 				addr_map_symbol__account_cycles(&bi[i].from,
2681 					nonany_branch_mode ? NULL : prev,
2682 					bi[i].flags.cycles);
2683 				prev = &bi[i].to;
2684 
2685 				if (total_cycles)
2686 					*total_cycles += bi[i].flags.cycles;
2687 			}
2688 			free(bi);
2689 		}
2690 	}
2691 }
2692 
2693 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2694 				 bool skip_empty)
2695 {
2696 	struct evsel *pos;
2697 	size_t ret = 0;
2698 
2699 	evlist__for_each_entry(evlist, pos) {
2700 		struct hists *hists = evsel__hists(pos);
2701 
2702 		if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
2703 			continue;
2704 
2705 		ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2706 		if (hists->stats.nr_samples)
2707 			ret += fprintf(fp, "%16s events: %10d\n",
2708 				       "SAMPLE", hists->stats.nr_samples);
2709 		if (hists->stats.nr_lost_samples)
2710 			ret += fprintf(fp, "%16s events: %10d\n",
2711 				       "LOST_SAMPLES", hists->stats.nr_lost_samples);
2712 	}
2713 
2714 	return ret;
2715 }
2716 
2717 
2718 u64 hists__total_period(struct hists *hists)
2719 {
2720 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2721 		hists->stats.total_period;
2722 }
2723 
2724 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2725 {
2726 	char unit;
2727 	int printed;
2728 	const struct dso *dso = hists->dso_filter;
2729 	struct thread *thread = hists->thread_filter;
2730 	int socket_id = hists->socket_filter;
2731 	unsigned long nr_samples = hists->stats.nr_samples;
2732 	u64 nr_events = hists->stats.total_period;
2733 	struct evsel *evsel = hists_to_evsel(hists);
2734 	const char *ev_name = evsel__name(evsel);
2735 	char buf[512], sample_freq_str[64] = "";
2736 	size_t buflen = sizeof(buf);
2737 	char ref[30] = " show reference callgraph, ";
2738 	bool enable_ref = false;
2739 
2740 	if (symbol_conf.filter_relative) {
2741 		nr_samples = hists->stats.nr_non_filtered_samples;
2742 		nr_events = hists->stats.total_non_filtered_period;
2743 	}
2744 
2745 	if (evsel__is_group_event(evsel)) {
2746 		struct evsel *pos;
2747 
2748 		evsel__group_desc(evsel, buf, buflen);
2749 		ev_name = buf;
2750 
2751 		for_each_group_member(pos, evsel) {
2752 			struct hists *pos_hists = evsel__hists(pos);
2753 
2754 			if (symbol_conf.filter_relative) {
2755 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
2756 				nr_events += pos_hists->stats.total_non_filtered_period;
2757 			} else {
2758 				nr_samples += pos_hists->stats.nr_samples;
2759 				nr_events += pos_hists->stats.total_period;
2760 			}
2761 		}
2762 	}
2763 
2764 	if (symbol_conf.show_ref_callgraph &&
2765 	    strstr(ev_name, "call-graph=no"))
2766 		enable_ref = true;
2767 
2768 	if (show_freq)
2769 		scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2770 
2771 	nr_samples = convert_unit(nr_samples, &unit);
2772 	printed = scnprintf(bf, size,
2773 			   "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2774 			   nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2775 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2776 
2777 
2778 	if (hists->uid_filter_str)
2779 		printed += snprintf(bf + printed, size - printed,
2780 				    ", UID: %s", hists->uid_filter_str);
2781 	if (thread) {
2782 		if (hists__has(hists, thread)) {
2783 			printed += scnprintf(bf + printed, size - printed,
2784 				    ", Thread: %s(%d)",
2785 				    (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2786 					thread__tid(thread));
2787 		} else {
2788 			printed += scnprintf(bf + printed, size - printed,
2789 				    ", Thread: %s",
2790 				    (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2791 		}
2792 	}
2793 	if (dso)
2794 		printed += scnprintf(bf + printed, size - printed,
2795 				    ", DSO: %s", dso->short_name);
2796 	if (socket_id > -1)
2797 		printed += scnprintf(bf + printed, size - printed,
2798 				    ", Processor Socket: %d", socket_id);
2799 
2800 	return printed;
2801 }
2802 
2803 int parse_filter_percentage(const struct option *opt __maybe_unused,
2804 			    const char *arg, int unset __maybe_unused)
2805 {
2806 	if (!strcmp(arg, "relative"))
2807 		symbol_conf.filter_relative = true;
2808 	else if (!strcmp(arg, "absolute"))
2809 		symbol_conf.filter_relative = false;
2810 	else {
2811 		pr_debug("Invalid percentage: %s\n", arg);
2812 		return -1;
2813 	}
2814 
2815 	return 0;
2816 }
2817 
2818 int perf_hist_config(const char *var, const char *value)
2819 {
2820 	if (!strcmp(var, "hist.percentage"))
2821 		return parse_filter_percentage(NULL, value, 0);
2822 
2823 	return 0;
2824 }
2825 
2826 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2827 {
2828 	memset(hists, 0, sizeof(*hists));
2829 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2830 	hists->entries_in = &hists->entries_in_array[0];
2831 	hists->entries_collapsed = RB_ROOT_CACHED;
2832 	hists->entries = RB_ROOT_CACHED;
2833 	mutex_init(&hists->lock);
2834 	hists->socket_filter = -1;
2835 	hists->hpp_list = hpp_list;
2836 	INIT_LIST_HEAD(&hists->hpp_formats);
2837 	return 0;
2838 }
2839 
2840 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2841 {
2842 	struct rb_node *node;
2843 	struct hist_entry *he;
2844 
2845 	while (!RB_EMPTY_ROOT(&root->rb_root)) {
2846 		node = rb_first_cached(root);
2847 		rb_erase_cached(node, root);
2848 
2849 		he = rb_entry(node, struct hist_entry, rb_node_in);
2850 		hist_entry__delete(he);
2851 	}
2852 }
2853 
2854 static void hists__delete_all_entries(struct hists *hists)
2855 {
2856 	hists__delete_entries(hists);
2857 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
2858 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
2859 	hists__delete_remaining_entries(&hists->entries_collapsed);
2860 }
2861 
2862 static void hists_evsel__exit(struct evsel *evsel)
2863 {
2864 	struct hists *hists = evsel__hists(evsel);
2865 	struct perf_hpp_fmt *fmt, *pos;
2866 	struct perf_hpp_list_node *node, *tmp;
2867 
2868 	hists__delete_all_entries(hists);
2869 
2870 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2871 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2872 			list_del_init(&fmt->list);
2873 			free(fmt);
2874 		}
2875 		list_del_init(&node->list);
2876 		free(node);
2877 	}
2878 }
2879 
2880 static int hists_evsel__init(struct evsel *evsel)
2881 {
2882 	struct hists *hists = evsel__hists(evsel);
2883 
2884 	__hists__init(hists, &perf_hpp_list);
2885 	return 0;
2886 }
2887 
2888 /*
2889  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2890  * stored in the rbtree...
2891  */
2892 
2893 int hists__init(void)
2894 {
2895 	int err = evsel__object_config(sizeof(struct hists_evsel),
2896 				       hists_evsel__init, hists_evsel__exit);
2897 	if (err)
2898 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2899 
2900 	return err;
2901 }
2902 
2903 void perf_hpp_list__init(struct perf_hpp_list *list)
2904 {
2905 	INIT_LIST_HEAD(&list->fields);
2906 	INIT_LIST_HEAD(&list->sorts);
2907 }
2908