xref: /openbmc/linux/tools/perf/util/hist.c (revision 55b37d9c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "session.h"
13 #include "namespaces.h"
14 #include "cgroup.h"
15 #include "sort.h"
16 #include "units.h"
17 #include "evlist.h"
18 #include "evsel.h"
19 #include "annotate.h"
20 #include "srcline.h"
21 #include "symbol.h"
22 #include "thread.h"
23 #include "block-info.h"
24 #include "ui/progress.h"
25 #include <errno.h>
26 #include <math.h>
27 #include <inttypes.h>
28 #include <sys/param.h>
29 #include <linux/rbtree.h>
30 #include <linux/string.h>
31 #include <linux/time64.h>
32 #include <linux/zalloc.h>
33 
34 static bool hists__filter_entry_by_dso(struct hists *hists,
35 				       struct hist_entry *he);
36 static bool hists__filter_entry_by_thread(struct hists *hists,
37 					  struct hist_entry *he);
38 static bool hists__filter_entry_by_symbol(struct hists *hists,
39 					  struct hist_entry *he);
40 static bool hists__filter_entry_by_socket(struct hists *hists,
41 					  struct hist_entry *he);
42 
43 u16 hists__col_len(struct hists *hists, enum hist_column col)
44 {
45 	return hists->col_len[col];
46 }
47 
48 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
49 {
50 	hists->col_len[col] = len;
51 }
52 
53 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
54 {
55 	if (len > hists__col_len(hists, col)) {
56 		hists__set_col_len(hists, col, len);
57 		return true;
58 	}
59 	return false;
60 }
61 
62 void hists__reset_col_len(struct hists *hists)
63 {
64 	enum hist_column col;
65 
66 	for (col = 0; col < HISTC_NR_COLS; ++col)
67 		hists__set_col_len(hists, col, 0);
68 }
69 
70 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
71 {
72 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
73 
74 	if (hists__col_len(hists, dso) < unresolved_col_width &&
75 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
76 	    !symbol_conf.dso_list)
77 		hists__set_col_len(hists, dso, unresolved_col_width);
78 }
79 
80 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
81 {
82 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
83 	int symlen;
84 	u16 len;
85 
86 	if (h->block_info)
87 		return;
88 	/*
89 	 * +4 accounts for '[x] ' priv level info
90 	 * +2 accounts for 0x prefix on raw addresses
91 	 * +3 accounts for ' y ' symtab origin info
92 	 */
93 	if (h->ms.sym) {
94 		symlen = h->ms.sym->namelen + 4;
95 		if (verbose > 0)
96 			symlen += BITS_PER_LONG / 4 + 2 + 3;
97 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
98 	} else {
99 		symlen = unresolved_col_width + 4 + 2;
100 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
101 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
102 	}
103 
104 	len = thread__comm_len(h->thread);
105 	if (hists__new_col_len(hists, HISTC_COMM, len))
106 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
107 
108 	if (h->ms.map) {
109 		len = dso__name_len(map__dso(h->ms.map));
110 		hists__new_col_len(hists, HISTC_DSO, len);
111 	}
112 
113 	if (h->parent)
114 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
115 
116 	if (h->branch_info) {
117 		if (h->branch_info->from.ms.sym) {
118 			symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
119 			if (verbose > 0)
120 				symlen += BITS_PER_LONG / 4 + 2 + 3;
121 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
122 
123 			symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
124 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
125 		} else {
126 			symlen = unresolved_col_width + 4 + 2;
127 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128 			hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
129 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
130 		}
131 
132 		if (h->branch_info->to.ms.sym) {
133 			symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
134 			if (verbose > 0)
135 				symlen += BITS_PER_LONG / 4 + 2 + 3;
136 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
137 
138 			symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
139 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
140 		} else {
141 			symlen = unresolved_col_width + 4 + 2;
142 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143 			hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
144 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
145 		}
146 
147 		if (h->branch_info->srcline_from)
148 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
149 					strlen(h->branch_info->srcline_from));
150 		if (h->branch_info->srcline_to)
151 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
152 					strlen(h->branch_info->srcline_to));
153 	}
154 
155 	if (h->mem_info) {
156 		if (h->mem_info->daddr.ms.sym) {
157 			symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
158 			       + unresolved_col_width + 2;
159 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
160 					   symlen);
161 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
162 					   symlen + 1);
163 		} else {
164 			symlen = unresolved_col_width + 4 + 2;
165 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
166 					   symlen);
167 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
168 					   symlen);
169 		}
170 
171 		if (h->mem_info->iaddr.ms.sym) {
172 			symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
173 			       + unresolved_col_width + 2;
174 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
175 					   symlen);
176 		} else {
177 			symlen = unresolved_col_width + 4 + 2;
178 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
179 					   symlen);
180 		}
181 
182 		if (h->mem_info->daddr.ms.map) {
183 			symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
184 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
185 					   symlen);
186 		} else {
187 			symlen = unresolved_col_width + 4 + 2;
188 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
189 		}
190 
191 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
192 				   unresolved_col_width + 4 + 2);
193 
194 		hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
195 				   unresolved_col_width + 4 + 2);
196 
197 	} else {
198 		symlen = unresolved_col_width + 4 + 2;
199 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
200 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
201 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
202 	}
203 
204 	hists__new_col_len(hists, HISTC_CGROUP, 6);
205 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
206 	hists__new_col_len(hists, HISTC_CPU, 3);
207 	hists__new_col_len(hists, HISTC_SOCKET, 6);
208 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
209 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
210 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
211 	hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
212 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
213 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
214 	hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
215 	hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
216 	hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
217 	hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
218 	hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
219 	hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
220 
221 	if (symbol_conf.nanosecs)
222 		hists__new_col_len(hists, HISTC_TIME, 16);
223 	else
224 		hists__new_col_len(hists, HISTC_TIME, 12);
225 	hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
226 
227 	if (h->srcline) {
228 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
229 		hists__new_col_len(hists, HISTC_SRCLINE, len);
230 	}
231 
232 	if (h->srcfile)
233 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
234 
235 	if (h->transaction)
236 		hists__new_col_len(hists, HISTC_TRANSACTION,
237 				   hist_entry__transaction_len());
238 
239 	if (h->trace_output)
240 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
241 
242 	if (h->cgroup) {
243 		const char *cgrp_name = "unknown";
244 		struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
245 						   h->cgroup);
246 		if (cgrp != NULL)
247 			cgrp_name = cgrp->name;
248 
249 		hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
250 	}
251 }
252 
253 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
254 {
255 	struct rb_node *next = rb_first_cached(&hists->entries);
256 	struct hist_entry *n;
257 	int row = 0;
258 
259 	hists__reset_col_len(hists);
260 
261 	while (next && row++ < max_rows) {
262 		n = rb_entry(next, struct hist_entry, rb_node);
263 		if (!n->filtered)
264 			hists__calc_col_len(hists, n);
265 		next = rb_next(&n->rb_node);
266 	}
267 }
268 
269 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
270 					unsigned int cpumode, u64 period)
271 {
272 	switch (cpumode) {
273 	case PERF_RECORD_MISC_KERNEL:
274 		he_stat->period_sys += period;
275 		break;
276 	case PERF_RECORD_MISC_USER:
277 		he_stat->period_us += period;
278 		break;
279 	case PERF_RECORD_MISC_GUEST_KERNEL:
280 		he_stat->period_guest_sys += period;
281 		break;
282 	case PERF_RECORD_MISC_GUEST_USER:
283 		he_stat->period_guest_us += period;
284 		break;
285 	default:
286 		break;
287 	}
288 }
289 
290 static long hist_time(unsigned long htime)
291 {
292 	unsigned long time_quantum = symbol_conf.time_quantum;
293 	if (time_quantum)
294 		return (htime / time_quantum) * time_quantum;
295 	return htime;
296 }
297 
298 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
299 {
300 	he_stat->period		+= period;
301 	he_stat->nr_events	+= 1;
302 }
303 
304 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
305 {
306 	dest->period		+= src->period;
307 	dest->period_sys	+= src->period_sys;
308 	dest->period_us		+= src->period_us;
309 	dest->period_guest_sys	+= src->period_guest_sys;
310 	dest->period_guest_us	+= src->period_guest_us;
311 	dest->nr_events		+= src->nr_events;
312 }
313 
314 static void he_stat__decay(struct he_stat *he_stat)
315 {
316 	he_stat->period = (he_stat->period * 7) / 8;
317 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
318 	/* XXX need decay for weight too? */
319 }
320 
321 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
322 
323 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
324 {
325 	u64 prev_period = he->stat.period;
326 	u64 diff;
327 
328 	if (prev_period == 0)
329 		return true;
330 
331 	he_stat__decay(&he->stat);
332 	if (symbol_conf.cumulate_callchain)
333 		he_stat__decay(he->stat_acc);
334 	decay_callchain(he->callchain);
335 
336 	diff = prev_period - he->stat.period;
337 
338 	if (!he->depth) {
339 		hists->stats.total_period -= diff;
340 		if (!he->filtered)
341 			hists->stats.total_non_filtered_period -= diff;
342 	}
343 
344 	if (!he->leaf) {
345 		struct hist_entry *child;
346 		struct rb_node *node = rb_first_cached(&he->hroot_out);
347 		while (node) {
348 			child = rb_entry(node, struct hist_entry, rb_node);
349 			node = rb_next(node);
350 
351 			if (hists__decay_entry(hists, child))
352 				hists__delete_entry(hists, child);
353 		}
354 	}
355 
356 	return he->stat.period == 0;
357 }
358 
359 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
360 {
361 	struct rb_root_cached *root_in;
362 	struct rb_root_cached *root_out;
363 
364 	if (he->parent_he) {
365 		root_in  = &he->parent_he->hroot_in;
366 		root_out = &he->parent_he->hroot_out;
367 	} else {
368 		if (hists__has(hists, need_collapse))
369 			root_in = &hists->entries_collapsed;
370 		else
371 			root_in = hists->entries_in;
372 		root_out = &hists->entries;
373 	}
374 
375 	rb_erase_cached(&he->rb_node_in, root_in);
376 	rb_erase_cached(&he->rb_node, root_out);
377 
378 	--hists->nr_entries;
379 	if (!he->filtered)
380 		--hists->nr_non_filtered_entries;
381 
382 	hist_entry__delete(he);
383 }
384 
385 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
386 {
387 	struct rb_node *next = rb_first_cached(&hists->entries);
388 	struct hist_entry *n;
389 
390 	while (next) {
391 		n = rb_entry(next, struct hist_entry, rb_node);
392 		next = rb_next(&n->rb_node);
393 		if (((zap_user && n->level == '.') ||
394 		     (zap_kernel && n->level != '.') ||
395 		     hists__decay_entry(hists, n))) {
396 			hists__delete_entry(hists, n);
397 		}
398 	}
399 }
400 
401 void hists__delete_entries(struct hists *hists)
402 {
403 	struct rb_node *next = rb_first_cached(&hists->entries);
404 	struct hist_entry *n;
405 
406 	while (next) {
407 		n = rb_entry(next, struct hist_entry, rb_node);
408 		next = rb_next(&n->rb_node);
409 
410 		hists__delete_entry(hists, n);
411 	}
412 }
413 
414 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
415 {
416 	struct rb_node *next = rb_first_cached(&hists->entries);
417 	struct hist_entry *n;
418 	int i = 0;
419 
420 	while (next) {
421 		n = rb_entry(next, struct hist_entry, rb_node);
422 		if (i == idx)
423 			return n;
424 
425 		next = rb_next(&n->rb_node);
426 		i++;
427 	}
428 
429 	return NULL;
430 }
431 
432 /*
433  * histogram, sorted on item, collects periods
434  */
435 
436 static int hist_entry__init(struct hist_entry *he,
437 			    struct hist_entry *template,
438 			    bool sample_self,
439 			    size_t callchain_size)
440 {
441 	*he = *template;
442 	he->callchain_size = callchain_size;
443 
444 	if (symbol_conf.cumulate_callchain) {
445 		he->stat_acc = malloc(sizeof(he->stat));
446 		if (he->stat_acc == NULL)
447 			return -ENOMEM;
448 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
449 		if (!sample_self)
450 			memset(&he->stat, 0, sizeof(he->stat));
451 	}
452 
453 	he->ms.map = map__get(he->ms.map);
454 
455 	if (he->branch_info) {
456 		/*
457 		 * This branch info is (a part of) allocated from
458 		 * sample__resolve_bstack() and will be freed after
459 		 * adding new entries.  So we need to save a copy.
460 		 */
461 		he->branch_info = malloc(sizeof(*he->branch_info));
462 		if (he->branch_info == NULL)
463 			goto err;
464 
465 		memcpy(he->branch_info, template->branch_info,
466 		       sizeof(*he->branch_info));
467 
468 		he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
469 		he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
470 	}
471 
472 	if (he->mem_info) {
473 		he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
474 		he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
475 	}
476 
477 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
478 		callchain_init(he->callchain);
479 
480 	if (he->raw_data) {
481 		he->raw_data = memdup(he->raw_data, he->raw_size);
482 		if (he->raw_data == NULL)
483 			goto err_infos;
484 	}
485 
486 	if (he->srcline) {
487 		he->srcline = strdup(he->srcline);
488 		if (he->srcline == NULL)
489 			goto err_rawdata;
490 	}
491 
492 	if (symbol_conf.res_sample) {
493 		he->res_samples = calloc(sizeof(struct res_sample),
494 					symbol_conf.res_sample);
495 		if (!he->res_samples)
496 			goto err_srcline;
497 	}
498 
499 	INIT_LIST_HEAD(&he->pairs.node);
500 	thread__get(he->thread);
501 	he->hroot_in  = RB_ROOT_CACHED;
502 	he->hroot_out = RB_ROOT_CACHED;
503 
504 	if (!symbol_conf.report_hierarchy)
505 		he->leaf = true;
506 
507 	return 0;
508 
509 err_srcline:
510 	zfree(&he->srcline);
511 
512 err_rawdata:
513 	zfree(&he->raw_data);
514 
515 err_infos:
516 	if (he->branch_info) {
517 		map__put(he->branch_info->from.ms.map);
518 		map__put(he->branch_info->to.ms.map);
519 		zfree(&he->branch_info);
520 	}
521 	if (he->mem_info) {
522 		map__put(he->mem_info->iaddr.ms.map);
523 		map__put(he->mem_info->daddr.ms.map);
524 	}
525 err:
526 	map__zput(he->ms.map);
527 	zfree(&he->stat_acc);
528 	return -ENOMEM;
529 }
530 
531 static void *hist_entry__zalloc(size_t size)
532 {
533 	return zalloc(size + sizeof(struct hist_entry));
534 }
535 
536 static void hist_entry__free(void *ptr)
537 {
538 	free(ptr);
539 }
540 
541 static struct hist_entry_ops default_ops = {
542 	.new	= hist_entry__zalloc,
543 	.free	= hist_entry__free,
544 };
545 
546 static struct hist_entry *hist_entry__new(struct hist_entry *template,
547 					  bool sample_self)
548 {
549 	struct hist_entry_ops *ops = template->ops;
550 	size_t callchain_size = 0;
551 	struct hist_entry *he;
552 	int err = 0;
553 
554 	if (!ops)
555 		ops = template->ops = &default_ops;
556 
557 	if (symbol_conf.use_callchain)
558 		callchain_size = sizeof(struct callchain_root);
559 
560 	he = ops->new(callchain_size);
561 	if (he) {
562 		err = hist_entry__init(he, template, sample_self, callchain_size);
563 		if (err) {
564 			ops->free(he);
565 			he = NULL;
566 		}
567 	}
568 
569 	return he;
570 }
571 
572 static u8 symbol__parent_filter(const struct symbol *parent)
573 {
574 	if (symbol_conf.exclude_other && parent == NULL)
575 		return 1 << HIST_FILTER__PARENT;
576 	return 0;
577 }
578 
579 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
580 {
581 	if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
582 		return;
583 
584 	he->hists->callchain_period += period;
585 	if (!he->filtered)
586 		he->hists->callchain_non_filtered_period += period;
587 }
588 
589 static struct hist_entry *hists__findnew_entry(struct hists *hists,
590 					       struct hist_entry *entry,
591 					       struct addr_location *al,
592 					       bool sample_self)
593 {
594 	struct rb_node **p;
595 	struct rb_node *parent = NULL;
596 	struct hist_entry *he;
597 	int64_t cmp;
598 	u64 period = entry->stat.period;
599 	bool leftmost = true;
600 
601 	p = &hists->entries_in->rb_root.rb_node;
602 
603 	while (*p != NULL) {
604 		parent = *p;
605 		he = rb_entry(parent, struct hist_entry, rb_node_in);
606 
607 		/*
608 		 * Make sure that it receives arguments in a same order as
609 		 * hist_entry__collapse() so that we can use an appropriate
610 		 * function when searching an entry regardless which sort
611 		 * keys were used.
612 		 */
613 		cmp = hist_entry__cmp(he, entry);
614 
615 		if (!cmp) {
616 			if (sample_self) {
617 				he_stat__add_period(&he->stat, period);
618 				hist_entry__add_callchain_period(he, period);
619 			}
620 			if (symbol_conf.cumulate_callchain)
621 				he_stat__add_period(he->stat_acc, period);
622 
623 			/*
624 			 * This mem info was allocated from sample__resolve_mem
625 			 * and will not be used anymore.
626 			 */
627 			mem_info__zput(entry->mem_info);
628 
629 			block_info__zput(entry->block_info);
630 
631 			kvm_info__zput(entry->kvm_info);
632 
633 			/* If the map of an existing hist_entry has
634 			 * become out-of-date due to an exec() or
635 			 * similar, update it.  Otherwise we will
636 			 * mis-adjust symbol addresses when computing
637 			 * the history counter to increment.
638 			 */
639 			if (he->ms.map != entry->ms.map) {
640 				map__put(he->ms.map);
641 				he->ms.map = map__get(entry->ms.map);
642 			}
643 			goto out;
644 		}
645 
646 		if (cmp < 0)
647 			p = &(*p)->rb_left;
648 		else {
649 			p = &(*p)->rb_right;
650 			leftmost = false;
651 		}
652 	}
653 
654 	he = hist_entry__new(entry, sample_self);
655 	if (!he)
656 		return NULL;
657 
658 	if (sample_self)
659 		hist_entry__add_callchain_period(he, period);
660 	hists->nr_entries++;
661 
662 	rb_link_node(&he->rb_node_in, parent, p);
663 	rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
664 out:
665 	if (sample_self)
666 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
667 	if (symbol_conf.cumulate_callchain)
668 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
669 	return he;
670 }
671 
672 static unsigned random_max(unsigned high)
673 {
674 	unsigned thresh = -high % high;
675 	for (;;) {
676 		unsigned r = random();
677 		if (r >= thresh)
678 			return r % high;
679 	}
680 }
681 
682 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
683 {
684 	struct res_sample *r;
685 	int j;
686 
687 	if (he->num_res < symbol_conf.res_sample) {
688 		j = he->num_res++;
689 	} else {
690 		j = random_max(symbol_conf.res_sample);
691 	}
692 	r = &he->res_samples[j];
693 	r->time = sample->time;
694 	r->cpu = sample->cpu;
695 	r->tid = sample->tid;
696 }
697 
698 static struct hist_entry*
699 __hists__add_entry(struct hists *hists,
700 		   struct addr_location *al,
701 		   struct symbol *sym_parent,
702 		   struct branch_info *bi,
703 		   struct mem_info *mi,
704 		   struct kvm_info *ki,
705 		   struct block_info *block_info,
706 		   struct perf_sample *sample,
707 		   bool sample_self,
708 		   struct hist_entry_ops *ops)
709 {
710 	struct namespaces *ns = thread__namespaces(al->thread);
711 	struct hist_entry entry = {
712 		.thread	= al->thread,
713 		.comm = thread__comm(al->thread),
714 		.cgroup_id = {
715 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
716 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
717 		},
718 		.cgroup = sample->cgroup,
719 		.ms = {
720 			.maps	= al->maps,
721 			.map	= al->map,
722 			.sym	= al->sym,
723 		},
724 		.srcline = (char *) al->srcline,
725 		.socket	 = al->socket,
726 		.cpu	 = al->cpu,
727 		.cpumode = al->cpumode,
728 		.ip	 = al->addr,
729 		.level	 = al->level,
730 		.code_page_size = sample->code_page_size,
731 		.stat = {
732 			.nr_events = 1,
733 			.period	= sample->period,
734 		},
735 		.parent = sym_parent,
736 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
737 		.hists	= hists,
738 		.branch_info = bi,
739 		.mem_info = mi,
740 		.kvm_info = ki,
741 		.block_info = block_info,
742 		.transaction = sample->transaction,
743 		.raw_data = sample->raw_data,
744 		.raw_size = sample->raw_size,
745 		.ops = ops,
746 		.time = hist_time(sample->time),
747 		.weight = sample->weight,
748 		.ins_lat = sample->ins_lat,
749 		.p_stage_cyc = sample->p_stage_cyc,
750 		.simd_flags = sample->simd_flags,
751 	}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
752 
753 	if (!hists->has_callchains && he && he->callchain_size != 0)
754 		hists->has_callchains = true;
755 	if (he && symbol_conf.res_sample)
756 		hists__res_sample(he, sample);
757 	return he;
758 }
759 
760 struct hist_entry *hists__add_entry(struct hists *hists,
761 				    struct addr_location *al,
762 				    struct symbol *sym_parent,
763 				    struct branch_info *bi,
764 				    struct mem_info *mi,
765 				    struct kvm_info *ki,
766 				    struct perf_sample *sample,
767 				    bool sample_self)
768 {
769 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
770 				  sample, sample_self, NULL);
771 }
772 
773 struct hist_entry *hists__add_entry_ops(struct hists *hists,
774 					struct hist_entry_ops *ops,
775 					struct addr_location *al,
776 					struct symbol *sym_parent,
777 					struct branch_info *bi,
778 					struct mem_info *mi,
779 					struct kvm_info *ki,
780 					struct perf_sample *sample,
781 					bool sample_self)
782 {
783 	return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
784 				  sample, sample_self, ops);
785 }
786 
787 struct hist_entry *hists__add_entry_block(struct hists *hists,
788 					  struct addr_location *al,
789 					  struct block_info *block_info)
790 {
791 	struct hist_entry entry = {
792 		.block_info = block_info,
793 		.hists = hists,
794 		.ms = {
795 			.maps = al->maps,
796 			.map = al->map,
797 			.sym = al->sym,
798 		},
799 	}, *he = hists__findnew_entry(hists, &entry, al, false);
800 
801 	return he;
802 }
803 
804 static int
805 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
806 		    struct addr_location *al __maybe_unused)
807 {
808 	return 0;
809 }
810 
811 static int
812 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
813 			struct addr_location *al __maybe_unused)
814 {
815 	return 0;
816 }
817 
818 static int
819 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
820 {
821 	struct perf_sample *sample = iter->sample;
822 	struct mem_info *mi;
823 
824 	mi = sample__resolve_mem(sample, al);
825 	if (mi == NULL)
826 		return -ENOMEM;
827 
828 	iter->priv = mi;
829 	return 0;
830 }
831 
832 static int
833 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
834 {
835 	u64 cost;
836 	struct mem_info *mi = iter->priv;
837 	struct hists *hists = evsel__hists(iter->evsel);
838 	struct perf_sample *sample = iter->sample;
839 	struct hist_entry *he;
840 
841 	if (mi == NULL)
842 		return -EINVAL;
843 
844 	cost = sample->weight;
845 	if (!cost)
846 		cost = 1;
847 
848 	/*
849 	 * must pass period=weight in order to get the correct
850 	 * sorting from hists__collapse_resort() which is solely
851 	 * based on periods. We want sorting be done on nr_events * weight
852 	 * and this is indirectly achieved by passing period=weight here
853 	 * and the he_stat__add_period() function.
854 	 */
855 	sample->period = cost;
856 
857 	he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
858 			      sample, true);
859 	if (!he)
860 		return -ENOMEM;
861 
862 	iter->he = he;
863 	return 0;
864 }
865 
866 static int
867 iter_finish_mem_entry(struct hist_entry_iter *iter,
868 		      struct addr_location *al __maybe_unused)
869 {
870 	struct evsel *evsel = iter->evsel;
871 	struct hists *hists = evsel__hists(evsel);
872 	struct hist_entry *he = iter->he;
873 	int err = -EINVAL;
874 
875 	if (he == NULL)
876 		goto out;
877 
878 	hists__inc_nr_samples(hists, he->filtered);
879 
880 	err = hist_entry__append_callchain(he, iter->sample);
881 
882 out:
883 	/*
884 	 * We don't need to free iter->priv (mem_info) here since the mem info
885 	 * was either already freed in hists__findnew_entry() or passed to a
886 	 * new hist entry by hist_entry__new().
887 	 */
888 	iter->priv = NULL;
889 
890 	iter->he = NULL;
891 	return err;
892 }
893 
894 static int
895 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
896 {
897 	struct branch_info *bi;
898 	struct perf_sample *sample = iter->sample;
899 
900 	bi = sample__resolve_bstack(sample, al);
901 	if (!bi)
902 		return -ENOMEM;
903 
904 	iter->curr = 0;
905 	iter->total = sample->branch_stack->nr;
906 
907 	iter->priv = bi;
908 	return 0;
909 }
910 
911 static int
912 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
913 			     struct addr_location *al __maybe_unused)
914 {
915 	return 0;
916 }
917 
918 static int
919 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
920 {
921 	struct branch_info *bi = iter->priv;
922 	int i = iter->curr;
923 
924 	if (bi == NULL)
925 		return 0;
926 
927 	if (iter->curr >= iter->total)
928 		return 0;
929 
930 	al->maps = bi[i].to.ms.maps;
931 	al->map = bi[i].to.ms.map;
932 	al->sym = bi[i].to.ms.sym;
933 	al->addr = bi[i].to.addr;
934 	return 1;
935 }
936 
937 static int
938 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
939 {
940 	struct branch_info *bi;
941 	struct evsel *evsel = iter->evsel;
942 	struct hists *hists = evsel__hists(evsel);
943 	struct perf_sample *sample = iter->sample;
944 	struct hist_entry *he = NULL;
945 	int i = iter->curr;
946 	int err = 0;
947 
948 	bi = iter->priv;
949 
950 	if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
951 		goto out;
952 
953 	/*
954 	 * The report shows the percentage of total branches captured
955 	 * and not events sampled. Thus we use a pseudo period of 1.
956 	 */
957 	sample->period = 1;
958 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
959 
960 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
961 			      sample, true);
962 	if (he == NULL)
963 		return -ENOMEM;
964 
965 	hists__inc_nr_samples(hists, he->filtered);
966 
967 out:
968 	iter->he = he;
969 	iter->curr++;
970 	return err;
971 }
972 
973 static int
974 iter_finish_branch_entry(struct hist_entry_iter *iter,
975 			 struct addr_location *al __maybe_unused)
976 {
977 	zfree(&iter->priv);
978 	iter->he = NULL;
979 
980 	return iter->curr >= iter->total ? 0 : -1;
981 }
982 
983 static int
984 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
985 			  struct addr_location *al __maybe_unused)
986 {
987 	return 0;
988 }
989 
990 static int
991 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
992 {
993 	struct evsel *evsel = iter->evsel;
994 	struct perf_sample *sample = iter->sample;
995 	struct hist_entry *he;
996 
997 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
998 			      NULL, sample, true);
999 	if (he == NULL)
1000 		return -ENOMEM;
1001 
1002 	iter->he = he;
1003 	return 0;
1004 }
1005 
1006 static int
1007 iter_finish_normal_entry(struct hist_entry_iter *iter,
1008 			 struct addr_location *al __maybe_unused)
1009 {
1010 	struct hist_entry *he = iter->he;
1011 	struct evsel *evsel = iter->evsel;
1012 	struct perf_sample *sample = iter->sample;
1013 
1014 	if (he == NULL)
1015 		return 0;
1016 
1017 	iter->he = NULL;
1018 
1019 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1020 
1021 	return hist_entry__append_callchain(he, sample);
1022 }
1023 
1024 static int
1025 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1026 			      struct addr_location *al __maybe_unused)
1027 {
1028 	struct hist_entry **he_cache;
1029 
1030 	callchain_cursor_commit(&callchain_cursor);
1031 
1032 	/*
1033 	 * This is for detecting cycles or recursions so that they're
1034 	 * cumulated only one time to prevent entries more than 100%
1035 	 * overhead.
1036 	 */
1037 	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1038 	if (he_cache == NULL)
1039 		return -ENOMEM;
1040 
1041 	iter->priv = he_cache;
1042 	iter->curr = 0;
1043 
1044 	return 0;
1045 }
1046 
1047 static int
1048 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1049 				 struct addr_location *al)
1050 {
1051 	struct evsel *evsel = iter->evsel;
1052 	struct hists *hists = evsel__hists(evsel);
1053 	struct perf_sample *sample = iter->sample;
1054 	struct hist_entry **he_cache = iter->priv;
1055 	struct hist_entry *he;
1056 	int err = 0;
1057 
1058 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1059 			      sample, true);
1060 	if (he == NULL)
1061 		return -ENOMEM;
1062 
1063 	iter->he = he;
1064 	he_cache[iter->curr++] = he;
1065 
1066 	hist_entry__append_callchain(he, sample);
1067 
1068 	/*
1069 	 * We need to re-initialize the cursor since callchain_append()
1070 	 * advanced the cursor to the end.
1071 	 */
1072 	callchain_cursor_commit(&callchain_cursor);
1073 
1074 	hists__inc_nr_samples(hists, he->filtered);
1075 
1076 	return err;
1077 }
1078 
1079 static int
1080 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1081 			   struct addr_location *al)
1082 {
1083 	struct callchain_cursor_node *node;
1084 
1085 	node = callchain_cursor_current(&callchain_cursor);
1086 	if (node == NULL)
1087 		return 0;
1088 
1089 	return fill_callchain_info(al, node, iter->hide_unresolved);
1090 }
1091 
1092 static bool
1093 hist_entry__fast__sym_diff(struct hist_entry *left,
1094 			   struct hist_entry *right)
1095 {
1096 	struct symbol *sym_l = left->ms.sym;
1097 	struct symbol *sym_r = right->ms.sym;
1098 
1099 	if (!sym_l && !sym_r)
1100 		return left->ip != right->ip;
1101 
1102 	return !!_sort__sym_cmp(sym_l, sym_r);
1103 }
1104 
1105 
1106 static int
1107 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1108 			       struct addr_location *al)
1109 {
1110 	struct evsel *evsel = iter->evsel;
1111 	struct perf_sample *sample = iter->sample;
1112 	struct hist_entry **he_cache = iter->priv;
1113 	struct hist_entry *he;
1114 	struct hist_entry he_tmp = {
1115 		.hists = evsel__hists(evsel),
1116 		.cpu = al->cpu,
1117 		.thread = al->thread,
1118 		.comm = thread__comm(al->thread),
1119 		.ip = al->addr,
1120 		.ms = {
1121 			.maps = al->maps,
1122 			.map = al->map,
1123 			.sym = al->sym,
1124 		},
1125 		.srcline = (char *) al->srcline,
1126 		.parent = iter->parent,
1127 		.raw_data = sample->raw_data,
1128 		.raw_size = sample->raw_size,
1129 	};
1130 	int i;
1131 	struct callchain_cursor cursor;
1132 	bool fast = hists__has(he_tmp.hists, sym);
1133 
1134 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
1135 
1136 	callchain_cursor_advance(&callchain_cursor);
1137 
1138 	/*
1139 	 * Check if there's duplicate entries in the callchain.
1140 	 * It's possible that it has cycles or recursive calls.
1141 	 */
1142 	for (i = 0; i < iter->curr; i++) {
1143 		/*
1144 		 * For most cases, there are no duplicate entries in callchain.
1145 		 * The symbols are usually different. Do a quick check for
1146 		 * symbols first.
1147 		 */
1148 		if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1149 			continue;
1150 
1151 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1152 			/* to avoid calling callback function */
1153 			iter->he = NULL;
1154 			return 0;
1155 		}
1156 	}
1157 
1158 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1159 			      NULL, sample, false);
1160 	if (he == NULL)
1161 		return -ENOMEM;
1162 
1163 	iter->he = he;
1164 	he_cache[iter->curr++] = he;
1165 
1166 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1167 		callchain_append(he->callchain, &cursor, sample->period);
1168 	return 0;
1169 }
1170 
1171 static int
1172 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1173 			     struct addr_location *al __maybe_unused)
1174 {
1175 	zfree(&iter->priv);
1176 	iter->he = NULL;
1177 
1178 	return 0;
1179 }
1180 
1181 const struct hist_iter_ops hist_iter_mem = {
1182 	.prepare_entry 		= iter_prepare_mem_entry,
1183 	.add_single_entry 	= iter_add_single_mem_entry,
1184 	.next_entry 		= iter_next_nop_entry,
1185 	.add_next_entry 	= iter_add_next_nop_entry,
1186 	.finish_entry 		= iter_finish_mem_entry,
1187 };
1188 
1189 const struct hist_iter_ops hist_iter_branch = {
1190 	.prepare_entry 		= iter_prepare_branch_entry,
1191 	.add_single_entry 	= iter_add_single_branch_entry,
1192 	.next_entry 		= iter_next_branch_entry,
1193 	.add_next_entry 	= iter_add_next_branch_entry,
1194 	.finish_entry 		= iter_finish_branch_entry,
1195 };
1196 
1197 const struct hist_iter_ops hist_iter_normal = {
1198 	.prepare_entry 		= iter_prepare_normal_entry,
1199 	.add_single_entry 	= iter_add_single_normal_entry,
1200 	.next_entry 		= iter_next_nop_entry,
1201 	.add_next_entry 	= iter_add_next_nop_entry,
1202 	.finish_entry 		= iter_finish_normal_entry,
1203 };
1204 
1205 const struct hist_iter_ops hist_iter_cumulative = {
1206 	.prepare_entry 		= iter_prepare_cumulative_entry,
1207 	.add_single_entry 	= iter_add_single_cumulative_entry,
1208 	.next_entry 		= iter_next_cumulative_entry,
1209 	.add_next_entry 	= iter_add_next_cumulative_entry,
1210 	.finish_entry 		= iter_finish_cumulative_entry,
1211 };
1212 
1213 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1214 			 int max_stack_depth, void *arg)
1215 {
1216 	int err, err2;
1217 	struct map *alm = NULL;
1218 
1219 	if (al)
1220 		alm = map__get(al->map);
1221 
1222 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1223 					iter->evsel, al, max_stack_depth);
1224 	if (err) {
1225 		map__put(alm);
1226 		return err;
1227 	}
1228 
1229 	err = iter->ops->prepare_entry(iter, al);
1230 	if (err)
1231 		goto out;
1232 
1233 	err = iter->ops->add_single_entry(iter, al);
1234 	if (err)
1235 		goto out;
1236 
1237 	if (iter->he && iter->add_entry_cb) {
1238 		err = iter->add_entry_cb(iter, al, true, arg);
1239 		if (err)
1240 			goto out;
1241 	}
1242 
1243 	while (iter->ops->next_entry(iter, al)) {
1244 		err = iter->ops->add_next_entry(iter, al);
1245 		if (err)
1246 			break;
1247 
1248 		if (iter->he && iter->add_entry_cb) {
1249 			err = iter->add_entry_cb(iter, al, false, arg);
1250 			if (err)
1251 				goto out;
1252 		}
1253 	}
1254 
1255 out:
1256 	err2 = iter->ops->finish_entry(iter, al);
1257 	if (!err)
1258 		err = err2;
1259 
1260 	map__put(alm);
1261 
1262 	return err;
1263 }
1264 
1265 int64_t
1266 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1267 {
1268 	struct hists *hists = left->hists;
1269 	struct perf_hpp_fmt *fmt;
1270 	int64_t cmp = 0;
1271 
1272 	hists__for_each_sort_list(hists, fmt) {
1273 		if (perf_hpp__is_dynamic_entry(fmt) &&
1274 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1275 			continue;
1276 
1277 		cmp = fmt->cmp(fmt, left, right);
1278 		if (cmp)
1279 			break;
1280 	}
1281 
1282 	return cmp;
1283 }
1284 
1285 int64_t
1286 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1287 {
1288 	struct hists *hists = left->hists;
1289 	struct perf_hpp_fmt *fmt;
1290 	int64_t cmp = 0;
1291 
1292 	hists__for_each_sort_list(hists, fmt) {
1293 		if (perf_hpp__is_dynamic_entry(fmt) &&
1294 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1295 			continue;
1296 
1297 		cmp = fmt->collapse(fmt, left, right);
1298 		if (cmp)
1299 			break;
1300 	}
1301 
1302 	return cmp;
1303 }
1304 
1305 void hist_entry__delete(struct hist_entry *he)
1306 {
1307 	struct hist_entry_ops *ops = he->ops;
1308 
1309 	thread__zput(he->thread);
1310 	map__zput(he->ms.map);
1311 
1312 	if (he->branch_info) {
1313 		map__zput(he->branch_info->from.ms.map);
1314 		map__zput(he->branch_info->to.ms.map);
1315 		free_srcline(he->branch_info->srcline_from);
1316 		free_srcline(he->branch_info->srcline_to);
1317 		zfree(&he->branch_info);
1318 	}
1319 
1320 	if (he->mem_info) {
1321 		map__zput(he->mem_info->iaddr.ms.map);
1322 		map__zput(he->mem_info->daddr.ms.map);
1323 		mem_info__zput(he->mem_info);
1324 	}
1325 
1326 	if (he->block_info)
1327 		block_info__zput(he->block_info);
1328 
1329 	if (he->kvm_info)
1330 		kvm_info__zput(he->kvm_info);
1331 
1332 	zfree(&he->res_samples);
1333 	zfree(&he->stat_acc);
1334 	free_srcline(he->srcline);
1335 	if (he->srcfile && he->srcfile[0])
1336 		zfree(&he->srcfile);
1337 	free_callchain(he->callchain);
1338 	zfree(&he->trace_output);
1339 	zfree(&he->raw_data);
1340 	ops->free(he);
1341 }
1342 
1343 /*
1344  * If this is not the last column, then we need to pad it according to the
1345  * pre-calculated max length for this column, otherwise don't bother adding
1346  * spaces because that would break viewing this with, for instance, 'less',
1347  * that would show tons of trailing spaces when a long C++ demangled method
1348  * names is sampled.
1349 */
1350 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1351 				   struct perf_hpp_fmt *fmt, int printed)
1352 {
1353 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1354 		const int width = fmt->width(fmt, hpp, he->hists);
1355 		if (printed < width) {
1356 			advance_hpp(hpp, printed);
1357 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1358 		}
1359 	}
1360 
1361 	return printed;
1362 }
1363 
1364 /*
1365  * collapse the histogram
1366  */
1367 
1368 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1369 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1370 				       enum hist_filter type);
1371 
1372 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1373 
1374 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1375 {
1376 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1377 }
1378 
1379 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1380 						enum hist_filter type,
1381 						fmt_chk_fn check)
1382 {
1383 	struct perf_hpp_fmt *fmt;
1384 	bool type_match = false;
1385 	struct hist_entry *parent = he->parent_he;
1386 
1387 	switch (type) {
1388 	case HIST_FILTER__THREAD:
1389 		if (symbol_conf.comm_list == NULL &&
1390 		    symbol_conf.pid_list == NULL &&
1391 		    symbol_conf.tid_list == NULL)
1392 			return;
1393 		break;
1394 	case HIST_FILTER__DSO:
1395 		if (symbol_conf.dso_list == NULL)
1396 			return;
1397 		break;
1398 	case HIST_FILTER__SYMBOL:
1399 		if (symbol_conf.sym_list == NULL)
1400 			return;
1401 		break;
1402 	case HIST_FILTER__PARENT:
1403 	case HIST_FILTER__GUEST:
1404 	case HIST_FILTER__HOST:
1405 	case HIST_FILTER__SOCKET:
1406 	case HIST_FILTER__C2C:
1407 	default:
1408 		return;
1409 	}
1410 
1411 	/* if it's filtered by own fmt, it has to have filter bits */
1412 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1413 		if (check(fmt)) {
1414 			type_match = true;
1415 			break;
1416 		}
1417 	}
1418 
1419 	if (type_match) {
1420 		/*
1421 		 * If the filter is for current level entry, propagate
1422 		 * filter marker to parents.  The marker bit was
1423 		 * already set by default so it only needs to clear
1424 		 * non-filtered entries.
1425 		 */
1426 		if (!(he->filtered & (1 << type))) {
1427 			while (parent) {
1428 				parent->filtered &= ~(1 << type);
1429 				parent = parent->parent_he;
1430 			}
1431 		}
1432 	} else {
1433 		/*
1434 		 * If current entry doesn't have matching formats, set
1435 		 * filter marker for upper level entries.  it will be
1436 		 * cleared if its lower level entries is not filtered.
1437 		 *
1438 		 * For lower-level entries, it inherits parent's
1439 		 * filter bit so that lower level entries of a
1440 		 * non-filtered entry won't set the filter marker.
1441 		 */
1442 		if (parent == NULL)
1443 			he->filtered |= (1 << type);
1444 		else
1445 			he->filtered |= (parent->filtered & (1 << type));
1446 	}
1447 }
1448 
1449 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1450 {
1451 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1452 					    check_thread_entry);
1453 
1454 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1455 					    perf_hpp__is_dso_entry);
1456 
1457 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1458 					    perf_hpp__is_sym_entry);
1459 
1460 	hists__apply_filters(he->hists, he);
1461 }
1462 
1463 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1464 						 struct rb_root_cached *root,
1465 						 struct hist_entry *he,
1466 						 struct hist_entry *parent_he,
1467 						 struct perf_hpp_list *hpp_list)
1468 {
1469 	struct rb_node **p = &root->rb_root.rb_node;
1470 	struct rb_node *parent = NULL;
1471 	struct hist_entry *iter, *new;
1472 	struct perf_hpp_fmt *fmt;
1473 	int64_t cmp;
1474 	bool leftmost = true;
1475 
1476 	while (*p != NULL) {
1477 		parent = *p;
1478 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1479 
1480 		cmp = 0;
1481 		perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1482 			cmp = fmt->collapse(fmt, iter, he);
1483 			if (cmp)
1484 				break;
1485 		}
1486 
1487 		if (!cmp) {
1488 			he_stat__add_stat(&iter->stat, &he->stat);
1489 			return iter;
1490 		}
1491 
1492 		if (cmp < 0)
1493 			p = &parent->rb_left;
1494 		else {
1495 			p = &parent->rb_right;
1496 			leftmost = false;
1497 		}
1498 	}
1499 
1500 	new = hist_entry__new(he, true);
1501 	if (new == NULL)
1502 		return NULL;
1503 
1504 	hists->nr_entries++;
1505 
1506 	/* save related format list for output */
1507 	new->hpp_list = hpp_list;
1508 	new->parent_he = parent_he;
1509 
1510 	hist_entry__apply_hierarchy_filters(new);
1511 
1512 	/* some fields are now passed to 'new' */
1513 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1514 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1515 			he->trace_output = NULL;
1516 		else
1517 			new->trace_output = NULL;
1518 
1519 		if (perf_hpp__is_srcline_entry(fmt))
1520 			he->srcline = NULL;
1521 		else
1522 			new->srcline = NULL;
1523 
1524 		if (perf_hpp__is_srcfile_entry(fmt))
1525 			he->srcfile = NULL;
1526 		else
1527 			new->srcfile = NULL;
1528 	}
1529 
1530 	rb_link_node(&new->rb_node_in, parent, p);
1531 	rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1532 	return new;
1533 }
1534 
1535 static int hists__hierarchy_insert_entry(struct hists *hists,
1536 					 struct rb_root_cached *root,
1537 					 struct hist_entry *he)
1538 {
1539 	struct perf_hpp_list_node *node;
1540 	struct hist_entry *new_he = NULL;
1541 	struct hist_entry *parent = NULL;
1542 	int depth = 0;
1543 	int ret = 0;
1544 
1545 	list_for_each_entry(node, &hists->hpp_formats, list) {
1546 		/* skip period (overhead) and elided columns */
1547 		if (node->level == 0 || node->skip)
1548 			continue;
1549 
1550 		/* insert copy of 'he' for each fmt into the hierarchy */
1551 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1552 		if (new_he == NULL) {
1553 			ret = -1;
1554 			break;
1555 		}
1556 
1557 		root = &new_he->hroot_in;
1558 		new_he->depth = depth++;
1559 		parent = new_he;
1560 	}
1561 
1562 	if (new_he) {
1563 		new_he->leaf = true;
1564 
1565 		if (hist_entry__has_callchains(new_he) &&
1566 		    symbol_conf.use_callchain) {
1567 			callchain_cursor_reset(&callchain_cursor);
1568 			if (callchain_merge(&callchain_cursor,
1569 					    new_he->callchain,
1570 					    he->callchain) < 0)
1571 				ret = -1;
1572 		}
1573 	}
1574 
1575 	/* 'he' is no longer used */
1576 	hist_entry__delete(he);
1577 
1578 	/* return 0 (or -1) since it already applied filters */
1579 	return ret;
1580 }
1581 
1582 static int hists__collapse_insert_entry(struct hists *hists,
1583 					struct rb_root_cached *root,
1584 					struct hist_entry *he)
1585 {
1586 	struct rb_node **p = &root->rb_root.rb_node;
1587 	struct rb_node *parent = NULL;
1588 	struct hist_entry *iter;
1589 	int64_t cmp;
1590 	bool leftmost = true;
1591 
1592 	if (symbol_conf.report_hierarchy)
1593 		return hists__hierarchy_insert_entry(hists, root, he);
1594 
1595 	while (*p != NULL) {
1596 		parent = *p;
1597 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1598 
1599 		cmp = hist_entry__collapse(iter, he);
1600 
1601 		if (!cmp) {
1602 			int ret = 0;
1603 
1604 			he_stat__add_stat(&iter->stat, &he->stat);
1605 			if (symbol_conf.cumulate_callchain)
1606 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1607 
1608 			if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1609 				callchain_cursor_reset(&callchain_cursor);
1610 				if (callchain_merge(&callchain_cursor,
1611 						    iter->callchain,
1612 						    he->callchain) < 0)
1613 					ret = -1;
1614 			}
1615 			hist_entry__delete(he);
1616 			return ret;
1617 		}
1618 
1619 		if (cmp < 0)
1620 			p = &(*p)->rb_left;
1621 		else {
1622 			p = &(*p)->rb_right;
1623 			leftmost = false;
1624 		}
1625 	}
1626 	hists->nr_entries++;
1627 
1628 	rb_link_node(&he->rb_node_in, parent, p);
1629 	rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1630 	return 1;
1631 }
1632 
1633 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1634 {
1635 	struct rb_root_cached *root;
1636 
1637 	mutex_lock(&hists->lock);
1638 
1639 	root = hists->entries_in;
1640 	if (++hists->entries_in > &hists->entries_in_array[1])
1641 		hists->entries_in = &hists->entries_in_array[0];
1642 
1643 	mutex_unlock(&hists->lock);
1644 
1645 	return root;
1646 }
1647 
1648 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1649 {
1650 	hists__filter_entry_by_dso(hists, he);
1651 	hists__filter_entry_by_thread(hists, he);
1652 	hists__filter_entry_by_symbol(hists, he);
1653 	hists__filter_entry_by_socket(hists, he);
1654 }
1655 
1656 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1657 {
1658 	struct rb_root_cached *root;
1659 	struct rb_node *next;
1660 	struct hist_entry *n;
1661 	int ret;
1662 
1663 	if (!hists__has(hists, need_collapse))
1664 		return 0;
1665 
1666 	hists->nr_entries = 0;
1667 
1668 	root = hists__get_rotate_entries_in(hists);
1669 
1670 	next = rb_first_cached(root);
1671 
1672 	while (next) {
1673 		if (session_done())
1674 			break;
1675 		n = rb_entry(next, struct hist_entry, rb_node_in);
1676 		next = rb_next(&n->rb_node_in);
1677 
1678 		rb_erase_cached(&n->rb_node_in, root);
1679 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1680 		if (ret < 0)
1681 			return -1;
1682 
1683 		if (ret) {
1684 			/*
1685 			 * If it wasn't combined with one of the entries already
1686 			 * collapsed, we need to apply the filters that may have
1687 			 * been set by, say, the hist_browser.
1688 			 */
1689 			hists__apply_filters(hists, n);
1690 		}
1691 		if (prog)
1692 			ui_progress__update(prog, 1);
1693 	}
1694 	return 0;
1695 }
1696 
1697 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1698 {
1699 	struct hists *hists = a->hists;
1700 	struct perf_hpp_fmt *fmt;
1701 	int64_t cmp = 0;
1702 
1703 	hists__for_each_sort_list(hists, fmt) {
1704 		if (perf_hpp__should_skip(fmt, a->hists))
1705 			continue;
1706 
1707 		cmp = fmt->sort(fmt, a, b);
1708 		if (cmp)
1709 			break;
1710 	}
1711 
1712 	return cmp;
1713 }
1714 
1715 static void hists__reset_filter_stats(struct hists *hists)
1716 {
1717 	hists->nr_non_filtered_entries = 0;
1718 	hists->stats.total_non_filtered_period = 0;
1719 }
1720 
1721 void hists__reset_stats(struct hists *hists)
1722 {
1723 	hists->nr_entries = 0;
1724 	hists->stats.total_period = 0;
1725 
1726 	hists__reset_filter_stats(hists);
1727 }
1728 
1729 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1730 {
1731 	hists->nr_non_filtered_entries++;
1732 	hists->stats.total_non_filtered_period += h->stat.period;
1733 }
1734 
1735 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1736 {
1737 	if (!h->filtered)
1738 		hists__inc_filter_stats(hists, h);
1739 
1740 	hists->nr_entries++;
1741 	hists->stats.total_period += h->stat.period;
1742 }
1743 
1744 static void hierarchy_recalc_total_periods(struct hists *hists)
1745 {
1746 	struct rb_node *node;
1747 	struct hist_entry *he;
1748 
1749 	node = rb_first_cached(&hists->entries);
1750 
1751 	hists->stats.total_period = 0;
1752 	hists->stats.total_non_filtered_period = 0;
1753 
1754 	/*
1755 	 * recalculate total period using top-level entries only
1756 	 * since lower level entries only see non-filtered entries
1757 	 * but upper level entries have sum of both entries.
1758 	 */
1759 	while (node) {
1760 		he = rb_entry(node, struct hist_entry, rb_node);
1761 		node = rb_next(node);
1762 
1763 		hists->stats.total_period += he->stat.period;
1764 		if (!he->filtered)
1765 			hists->stats.total_non_filtered_period += he->stat.period;
1766 	}
1767 }
1768 
1769 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1770 					  struct hist_entry *he)
1771 {
1772 	struct rb_node **p = &root->rb_root.rb_node;
1773 	struct rb_node *parent = NULL;
1774 	struct hist_entry *iter;
1775 	struct perf_hpp_fmt *fmt;
1776 	bool leftmost = true;
1777 
1778 	while (*p != NULL) {
1779 		parent = *p;
1780 		iter = rb_entry(parent, struct hist_entry, rb_node);
1781 
1782 		if (hist_entry__sort(he, iter) > 0)
1783 			p = &parent->rb_left;
1784 		else {
1785 			p = &parent->rb_right;
1786 			leftmost = false;
1787 		}
1788 	}
1789 
1790 	rb_link_node(&he->rb_node, parent, p);
1791 	rb_insert_color_cached(&he->rb_node, root, leftmost);
1792 
1793 	/* update column width of dynamic entry */
1794 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1795 		if (fmt->init)
1796 			fmt->init(fmt, he);
1797 	}
1798 }
1799 
1800 static void hists__hierarchy_output_resort(struct hists *hists,
1801 					   struct ui_progress *prog,
1802 					   struct rb_root_cached *root_in,
1803 					   struct rb_root_cached *root_out,
1804 					   u64 min_callchain_hits,
1805 					   bool use_callchain)
1806 {
1807 	struct rb_node *node;
1808 	struct hist_entry *he;
1809 
1810 	*root_out = RB_ROOT_CACHED;
1811 	node = rb_first_cached(root_in);
1812 
1813 	while (node) {
1814 		he = rb_entry(node, struct hist_entry, rb_node_in);
1815 		node = rb_next(node);
1816 
1817 		hierarchy_insert_output_entry(root_out, he);
1818 
1819 		if (prog)
1820 			ui_progress__update(prog, 1);
1821 
1822 		hists->nr_entries++;
1823 		if (!he->filtered) {
1824 			hists->nr_non_filtered_entries++;
1825 			hists__calc_col_len(hists, he);
1826 		}
1827 
1828 		if (!he->leaf) {
1829 			hists__hierarchy_output_resort(hists, prog,
1830 						       &he->hroot_in,
1831 						       &he->hroot_out,
1832 						       min_callchain_hits,
1833 						       use_callchain);
1834 			continue;
1835 		}
1836 
1837 		if (!use_callchain)
1838 			continue;
1839 
1840 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1841 			u64 total = he->stat.period;
1842 
1843 			if (symbol_conf.cumulate_callchain)
1844 				total = he->stat_acc->period;
1845 
1846 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1847 		}
1848 
1849 		callchain_param.sort(&he->sorted_chain, he->callchain,
1850 				     min_callchain_hits, &callchain_param);
1851 	}
1852 }
1853 
1854 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1855 					 struct hist_entry *he,
1856 					 u64 min_callchain_hits,
1857 					 bool use_callchain)
1858 {
1859 	struct rb_node **p = &entries->rb_root.rb_node;
1860 	struct rb_node *parent = NULL;
1861 	struct hist_entry *iter;
1862 	struct perf_hpp_fmt *fmt;
1863 	bool leftmost = true;
1864 
1865 	if (use_callchain) {
1866 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1867 			u64 total = he->stat.period;
1868 
1869 			if (symbol_conf.cumulate_callchain)
1870 				total = he->stat_acc->period;
1871 
1872 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1873 		}
1874 		callchain_param.sort(&he->sorted_chain, he->callchain,
1875 				      min_callchain_hits, &callchain_param);
1876 	}
1877 
1878 	while (*p != NULL) {
1879 		parent = *p;
1880 		iter = rb_entry(parent, struct hist_entry, rb_node);
1881 
1882 		if (hist_entry__sort(he, iter) > 0)
1883 			p = &(*p)->rb_left;
1884 		else {
1885 			p = &(*p)->rb_right;
1886 			leftmost = false;
1887 		}
1888 	}
1889 
1890 	rb_link_node(&he->rb_node, parent, p);
1891 	rb_insert_color_cached(&he->rb_node, entries, leftmost);
1892 
1893 	/* update column width of dynamic entries */
1894 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1895 		if (fmt->init)
1896 			fmt->init(fmt, he);
1897 	}
1898 }
1899 
1900 static void output_resort(struct hists *hists, struct ui_progress *prog,
1901 			  bool use_callchain, hists__resort_cb_t cb,
1902 			  void *cb_arg)
1903 {
1904 	struct rb_root_cached *root;
1905 	struct rb_node *next;
1906 	struct hist_entry *n;
1907 	u64 callchain_total;
1908 	u64 min_callchain_hits;
1909 
1910 	callchain_total = hists->callchain_period;
1911 	if (symbol_conf.filter_relative)
1912 		callchain_total = hists->callchain_non_filtered_period;
1913 
1914 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1915 
1916 	hists__reset_stats(hists);
1917 	hists__reset_col_len(hists);
1918 
1919 	if (symbol_conf.report_hierarchy) {
1920 		hists__hierarchy_output_resort(hists, prog,
1921 					       &hists->entries_collapsed,
1922 					       &hists->entries,
1923 					       min_callchain_hits,
1924 					       use_callchain);
1925 		hierarchy_recalc_total_periods(hists);
1926 		return;
1927 	}
1928 
1929 	if (hists__has(hists, need_collapse))
1930 		root = &hists->entries_collapsed;
1931 	else
1932 		root = hists->entries_in;
1933 
1934 	next = rb_first_cached(root);
1935 	hists->entries = RB_ROOT_CACHED;
1936 
1937 	while (next) {
1938 		n = rb_entry(next, struct hist_entry, rb_node_in);
1939 		next = rb_next(&n->rb_node_in);
1940 
1941 		if (cb && cb(n, cb_arg))
1942 			continue;
1943 
1944 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1945 		hists__inc_stats(hists, n);
1946 
1947 		if (!n->filtered)
1948 			hists__calc_col_len(hists, n);
1949 
1950 		if (prog)
1951 			ui_progress__update(prog, 1);
1952 	}
1953 }
1954 
1955 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1956 			     hists__resort_cb_t cb, void *cb_arg)
1957 {
1958 	bool use_callchain;
1959 
1960 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1961 		use_callchain = evsel__has_callchain(evsel);
1962 	else
1963 		use_callchain = symbol_conf.use_callchain;
1964 
1965 	use_callchain |= symbol_conf.show_branchflag_count;
1966 
1967 	output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1968 }
1969 
1970 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1971 {
1972 	return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1973 }
1974 
1975 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1976 {
1977 	output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1978 }
1979 
1980 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1981 			     hists__resort_cb_t cb)
1982 {
1983 	output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1984 }
1985 
1986 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1987 {
1988 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
1989 		return false;
1990 
1991 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
1992 		return true;
1993 
1994 	return false;
1995 }
1996 
1997 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1998 {
1999 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2000 
2001 	while (can_goto_child(he, HMD_NORMAL)) {
2002 		node = rb_last(&he->hroot_out.rb_root);
2003 		he = rb_entry(node, struct hist_entry, rb_node);
2004 	}
2005 	return node;
2006 }
2007 
2008 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2009 {
2010 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2011 
2012 	if (can_goto_child(he, hmd))
2013 		node = rb_first_cached(&he->hroot_out);
2014 	else
2015 		node = rb_next(node);
2016 
2017 	while (node == NULL) {
2018 		he = he->parent_he;
2019 		if (he == NULL)
2020 			break;
2021 
2022 		node = rb_next(&he->rb_node);
2023 	}
2024 	return node;
2025 }
2026 
2027 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2028 {
2029 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2030 
2031 	node = rb_prev(node);
2032 	if (node)
2033 		return rb_hierarchy_last(node);
2034 
2035 	he = he->parent_he;
2036 	if (he == NULL)
2037 		return NULL;
2038 
2039 	return &he->rb_node;
2040 }
2041 
2042 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2043 {
2044 	struct rb_node *node;
2045 	struct hist_entry *child;
2046 	float percent;
2047 
2048 	if (he->leaf)
2049 		return false;
2050 
2051 	node = rb_first_cached(&he->hroot_out);
2052 	child = rb_entry(node, struct hist_entry, rb_node);
2053 
2054 	while (node && child->filtered) {
2055 		node = rb_next(node);
2056 		child = rb_entry(node, struct hist_entry, rb_node);
2057 	}
2058 
2059 	if (node)
2060 		percent = hist_entry__get_percent_limit(child);
2061 	else
2062 		percent = 0;
2063 
2064 	return node && percent >= limit;
2065 }
2066 
2067 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2068 				       enum hist_filter filter)
2069 {
2070 	h->filtered &= ~(1 << filter);
2071 
2072 	if (symbol_conf.report_hierarchy) {
2073 		struct hist_entry *parent = h->parent_he;
2074 
2075 		while (parent) {
2076 			he_stat__add_stat(&parent->stat, &h->stat);
2077 
2078 			parent->filtered &= ~(1 << filter);
2079 
2080 			if (parent->filtered)
2081 				goto next;
2082 
2083 			/* force fold unfiltered entry for simplicity */
2084 			parent->unfolded = false;
2085 			parent->has_no_entry = false;
2086 			parent->row_offset = 0;
2087 			parent->nr_rows = 0;
2088 next:
2089 			parent = parent->parent_he;
2090 		}
2091 	}
2092 
2093 	if (h->filtered)
2094 		return;
2095 
2096 	/* force fold unfiltered entry for simplicity */
2097 	h->unfolded = false;
2098 	h->has_no_entry = false;
2099 	h->row_offset = 0;
2100 	h->nr_rows = 0;
2101 
2102 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2103 
2104 	hists__inc_filter_stats(hists, h);
2105 	hists__calc_col_len(hists, h);
2106 }
2107 
2108 
2109 static bool hists__filter_entry_by_dso(struct hists *hists,
2110 				       struct hist_entry *he)
2111 {
2112 	if (hists->dso_filter != NULL &&
2113 	    (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
2114 		he->filtered |= (1 << HIST_FILTER__DSO);
2115 		return true;
2116 	}
2117 
2118 	return false;
2119 }
2120 
2121 static bool hists__filter_entry_by_thread(struct hists *hists,
2122 					  struct hist_entry *he)
2123 {
2124 	if (hists->thread_filter != NULL &&
2125 	    he->thread != hists->thread_filter) {
2126 		he->filtered |= (1 << HIST_FILTER__THREAD);
2127 		return true;
2128 	}
2129 
2130 	return false;
2131 }
2132 
2133 static bool hists__filter_entry_by_symbol(struct hists *hists,
2134 					  struct hist_entry *he)
2135 {
2136 	if (hists->symbol_filter_str != NULL &&
2137 	    (!he->ms.sym || strstr(he->ms.sym->name,
2138 				   hists->symbol_filter_str) == NULL)) {
2139 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
2140 		return true;
2141 	}
2142 
2143 	return false;
2144 }
2145 
2146 static bool hists__filter_entry_by_socket(struct hists *hists,
2147 					  struct hist_entry *he)
2148 {
2149 	if ((hists->socket_filter > -1) &&
2150 	    (he->socket != hists->socket_filter)) {
2151 		he->filtered |= (1 << HIST_FILTER__SOCKET);
2152 		return true;
2153 	}
2154 
2155 	return false;
2156 }
2157 
2158 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2159 
2160 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2161 {
2162 	struct rb_node *nd;
2163 
2164 	hists->stats.nr_non_filtered_samples = 0;
2165 
2166 	hists__reset_filter_stats(hists);
2167 	hists__reset_col_len(hists);
2168 
2169 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2170 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2171 
2172 		if (filter(hists, h))
2173 			continue;
2174 
2175 		hists__remove_entry_filter(hists, h, type);
2176 	}
2177 }
2178 
2179 static void resort_filtered_entry(struct rb_root_cached *root,
2180 				  struct hist_entry *he)
2181 {
2182 	struct rb_node **p = &root->rb_root.rb_node;
2183 	struct rb_node *parent = NULL;
2184 	struct hist_entry *iter;
2185 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2186 	struct rb_node *nd;
2187 	bool leftmost = true;
2188 
2189 	while (*p != NULL) {
2190 		parent = *p;
2191 		iter = rb_entry(parent, struct hist_entry, rb_node);
2192 
2193 		if (hist_entry__sort(he, iter) > 0)
2194 			p = &(*p)->rb_left;
2195 		else {
2196 			p = &(*p)->rb_right;
2197 			leftmost = false;
2198 		}
2199 	}
2200 
2201 	rb_link_node(&he->rb_node, parent, p);
2202 	rb_insert_color_cached(&he->rb_node, root, leftmost);
2203 
2204 	if (he->leaf || he->filtered)
2205 		return;
2206 
2207 	nd = rb_first_cached(&he->hroot_out);
2208 	while (nd) {
2209 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2210 
2211 		nd = rb_next(nd);
2212 		rb_erase_cached(&h->rb_node, &he->hroot_out);
2213 
2214 		resort_filtered_entry(&new_root, h);
2215 	}
2216 
2217 	he->hroot_out = new_root;
2218 }
2219 
2220 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2221 {
2222 	struct rb_node *nd;
2223 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2224 
2225 	hists->stats.nr_non_filtered_samples = 0;
2226 
2227 	hists__reset_filter_stats(hists);
2228 	hists__reset_col_len(hists);
2229 
2230 	nd = rb_first_cached(&hists->entries);
2231 	while (nd) {
2232 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2233 		int ret;
2234 
2235 		ret = hist_entry__filter(h, type, arg);
2236 
2237 		/*
2238 		 * case 1. non-matching type
2239 		 * zero out the period, set filter marker and move to child
2240 		 */
2241 		if (ret < 0) {
2242 			memset(&h->stat, 0, sizeof(h->stat));
2243 			h->filtered |= (1 << type);
2244 
2245 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2246 		}
2247 		/*
2248 		 * case 2. matched type (filter out)
2249 		 * set filter marker and move to next
2250 		 */
2251 		else if (ret == 1) {
2252 			h->filtered |= (1 << type);
2253 
2254 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2255 		}
2256 		/*
2257 		 * case 3. ok (not filtered)
2258 		 * add period to hists and parents, erase the filter marker
2259 		 * and move to next sibling
2260 		 */
2261 		else {
2262 			hists__remove_entry_filter(hists, h, type);
2263 
2264 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2265 		}
2266 	}
2267 
2268 	hierarchy_recalc_total_periods(hists);
2269 
2270 	/*
2271 	 * resort output after applying a new filter since filter in a lower
2272 	 * hierarchy can change periods in a upper hierarchy.
2273 	 */
2274 	nd = rb_first_cached(&hists->entries);
2275 	while (nd) {
2276 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2277 
2278 		nd = rb_next(nd);
2279 		rb_erase_cached(&h->rb_node, &hists->entries);
2280 
2281 		resort_filtered_entry(&new_root, h);
2282 	}
2283 
2284 	hists->entries = new_root;
2285 }
2286 
2287 void hists__filter_by_thread(struct hists *hists)
2288 {
2289 	if (symbol_conf.report_hierarchy)
2290 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2291 					hists->thread_filter);
2292 	else
2293 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2294 				      hists__filter_entry_by_thread);
2295 }
2296 
2297 void hists__filter_by_dso(struct hists *hists)
2298 {
2299 	if (symbol_conf.report_hierarchy)
2300 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2301 					hists->dso_filter);
2302 	else
2303 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2304 				      hists__filter_entry_by_dso);
2305 }
2306 
2307 void hists__filter_by_symbol(struct hists *hists)
2308 {
2309 	if (symbol_conf.report_hierarchy)
2310 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2311 					hists->symbol_filter_str);
2312 	else
2313 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2314 				      hists__filter_entry_by_symbol);
2315 }
2316 
2317 void hists__filter_by_socket(struct hists *hists)
2318 {
2319 	if (symbol_conf.report_hierarchy)
2320 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2321 					&hists->socket_filter);
2322 	else
2323 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2324 				      hists__filter_entry_by_socket);
2325 }
2326 
2327 void events_stats__inc(struct events_stats *stats, u32 type)
2328 {
2329 	++stats->nr_events[0];
2330 	++stats->nr_events[type];
2331 }
2332 
2333 static void hists_stats__inc(struct hists_stats *stats)
2334 {
2335 	++stats->nr_samples;
2336 }
2337 
2338 void hists__inc_nr_events(struct hists *hists)
2339 {
2340 	hists_stats__inc(&hists->stats);
2341 }
2342 
2343 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2344 {
2345 	hists_stats__inc(&hists->stats);
2346 	if (!filtered)
2347 		hists->stats.nr_non_filtered_samples++;
2348 }
2349 
2350 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2351 {
2352 	hists->stats.nr_lost_samples += lost;
2353 }
2354 
2355 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2356 						 struct hist_entry *pair)
2357 {
2358 	struct rb_root_cached *root;
2359 	struct rb_node **p;
2360 	struct rb_node *parent = NULL;
2361 	struct hist_entry *he;
2362 	int64_t cmp;
2363 	bool leftmost = true;
2364 
2365 	if (hists__has(hists, need_collapse))
2366 		root = &hists->entries_collapsed;
2367 	else
2368 		root = hists->entries_in;
2369 
2370 	p = &root->rb_root.rb_node;
2371 
2372 	while (*p != NULL) {
2373 		parent = *p;
2374 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2375 
2376 		cmp = hist_entry__collapse(he, pair);
2377 
2378 		if (!cmp)
2379 			goto out;
2380 
2381 		if (cmp < 0)
2382 			p = &(*p)->rb_left;
2383 		else {
2384 			p = &(*p)->rb_right;
2385 			leftmost = false;
2386 		}
2387 	}
2388 
2389 	he = hist_entry__new(pair, true);
2390 	if (he) {
2391 		memset(&he->stat, 0, sizeof(he->stat));
2392 		he->hists = hists;
2393 		if (symbol_conf.cumulate_callchain)
2394 			memset(he->stat_acc, 0, sizeof(he->stat));
2395 		rb_link_node(&he->rb_node_in, parent, p);
2396 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2397 		hists__inc_stats(hists, he);
2398 		he->dummy = true;
2399 	}
2400 out:
2401 	return he;
2402 }
2403 
2404 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2405 						    struct rb_root_cached *root,
2406 						    struct hist_entry *pair)
2407 {
2408 	struct rb_node **p;
2409 	struct rb_node *parent = NULL;
2410 	struct hist_entry *he;
2411 	struct perf_hpp_fmt *fmt;
2412 	bool leftmost = true;
2413 
2414 	p = &root->rb_root.rb_node;
2415 	while (*p != NULL) {
2416 		int64_t cmp = 0;
2417 
2418 		parent = *p;
2419 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2420 
2421 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2422 			cmp = fmt->collapse(fmt, he, pair);
2423 			if (cmp)
2424 				break;
2425 		}
2426 		if (!cmp)
2427 			goto out;
2428 
2429 		if (cmp < 0)
2430 			p = &parent->rb_left;
2431 		else {
2432 			p = &parent->rb_right;
2433 			leftmost = false;
2434 		}
2435 	}
2436 
2437 	he = hist_entry__new(pair, true);
2438 	if (he) {
2439 		rb_link_node(&he->rb_node_in, parent, p);
2440 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2441 
2442 		he->dummy = true;
2443 		he->hists = hists;
2444 		memset(&he->stat, 0, sizeof(he->stat));
2445 		hists__inc_stats(hists, he);
2446 	}
2447 out:
2448 	return he;
2449 }
2450 
2451 static struct hist_entry *hists__find_entry(struct hists *hists,
2452 					    struct hist_entry *he)
2453 {
2454 	struct rb_node *n;
2455 
2456 	if (hists__has(hists, need_collapse))
2457 		n = hists->entries_collapsed.rb_root.rb_node;
2458 	else
2459 		n = hists->entries_in->rb_root.rb_node;
2460 
2461 	while (n) {
2462 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2463 		int64_t cmp = hist_entry__collapse(iter, he);
2464 
2465 		if (cmp < 0)
2466 			n = n->rb_left;
2467 		else if (cmp > 0)
2468 			n = n->rb_right;
2469 		else
2470 			return iter;
2471 	}
2472 
2473 	return NULL;
2474 }
2475 
2476 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2477 						      struct hist_entry *he)
2478 {
2479 	struct rb_node *n = root->rb_root.rb_node;
2480 
2481 	while (n) {
2482 		struct hist_entry *iter;
2483 		struct perf_hpp_fmt *fmt;
2484 		int64_t cmp = 0;
2485 
2486 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2487 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2488 			cmp = fmt->collapse(fmt, iter, he);
2489 			if (cmp)
2490 				break;
2491 		}
2492 
2493 		if (cmp < 0)
2494 			n = n->rb_left;
2495 		else if (cmp > 0)
2496 			n = n->rb_right;
2497 		else
2498 			return iter;
2499 	}
2500 
2501 	return NULL;
2502 }
2503 
2504 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2505 				   struct rb_root_cached *other_root)
2506 {
2507 	struct rb_node *nd;
2508 	struct hist_entry *pos, *pair;
2509 
2510 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2511 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2512 		pair = hists__find_hierarchy_entry(other_root, pos);
2513 
2514 		if (pair) {
2515 			hist_entry__add_pair(pair, pos);
2516 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2517 		}
2518 	}
2519 }
2520 
2521 /*
2522  * Look for pairs to link to the leader buckets (hist_entries):
2523  */
2524 void hists__match(struct hists *leader, struct hists *other)
2525 {
2526 	struct rb_root_cached *root;
2527 	struct rb_node *nd;
2528 	struct hist_entry *pos, *pair;
2529 
2530 	if (symbol_conf.report_hierarchy) {
2531 		/* hierarchy report always collapses entries */
2532 		return hists__match_hierarchy(&leader->entries_collapsed,
2533 					      &other->entries_collapsed);
2534 	}
2535 
2536 	if (hists__has(leader, need_collapse))
2537 		root = &leader->entries_collapsed;
2538 	else
2539 		root = leader->entries_in;
2540 
2541 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2542 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2543 		pair = hists__find_entry(other, pos);
2544 
2545 		if (pair)
2546 			hist_entry__add_pair(pair, pos);
2547 	}
2548 }
2549 
2550 static int hists__link_hierarchy(struct hists *leader_hists,
2551 				 struct hist_entry *parent,
2552 				 struct rb_root_cached *leader_root,
2553 				 struct rb_root_cached *other_root)
2554 {
2555 	struct rb_node *nd;
2556 	struct hist_entry *pos, *leader;
2557 
2558 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2559 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2560 
2561 		if (hist_entry__has_pairs(pos)) {
2562 			bool found = false;
2563 
2564 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2565 				if (leader->hists == leader_hists) {
2566 					found = true;
2567 					break;
2568 				}
2569 			}
2570 			if (!found)
2571 				return -1;
2572 		} else {
2573 			leader = add_dummy_hierarchy_entry(leader_hists,
2574 							   leader_root, pos);
2575 			if (leader == NULL)
2576 				return -1;
2577 
2578 			/* do not point parent in the pos */
2579 			leader->parent_he = parent;
2580 
2581 			hist_entry__add_pair(pos, leader);
2582 		}
2583 
2584 		if (!pos->leaf) {
2585 			if (hists__link_hierarchy(leader_hists, leader,
2586 						  &leader->hroot_in,
2587 						  &pos->hroot_in) < 0)
2588 				return -1;
2589 		}
2590 	}
2591 	return 0;
2592 }
2593 
2594 /*
2595  * Look for entries in the other hists that are not present in the leader, if
2596  * we find them, just add a dummy entry on the leader hists, with period=0,
2597  * nr_events=0, to serve as the list header.
2598  */
2599 int hists__link(struct hists *leader, struct hists *other)
2600 {
2601 	struct rb_root_cached *root;
2602 	struct rb_node *nd;
2603 	struct hist_entry *pos, *pair;
2604 
2605 	if (symbol_conf.report_hierarchy) {
2606 		/* hierarchy report always collapses entries */
2607 		return hists__link_hierarchy(leader, NULL,
2608 					     &leader->entries_collapsed,
2609 					     &other->entries_collapsed);
2610 	}
2611 
2612 	if (hists__has(other, need_collapse))
2613 		root = &other->entries_collapsed;
2614 	else
2615 		root = other->entries_in;
2616 
2617 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2618 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2619 
2620 		if (!hist_entry__has_pairs(pos)) {
2621 			pair = hists__add_dummy_entry(leader, pos);
2622 			if (pair == NULL)
2623 				return -1;
2624 			hist_entry__add_pair(pos, pair);
2625 		}
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 int hists__unlink(struct hists *hists)
2632 {
2633 	struct rb_root_cached *root;
2634 	struct rb_node *nd;
2635 	struct hist_entry *pos;
2636 
2637 	if (hists__has(hists, need_collapse))
2638 		root = &hists->entries_collapsed;
2639 	else
2640 		root = hists->entries_in;
2641 
2642 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2643 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2644 		list_del_init(&pos->pairs.node);
2645 	}
2646 
2647 	return 0;
2648 }
2649 
2650 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2651 			  struct perf_sample *sample, bool nonany_branch_mode,
2652 			  u64 *total_cycles)
2653 {
2654 	struct branch_info *bi;
2655 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2656 
2657 	/* If we have branch cycles always annotate them. */
2658 	if (bs && bs->nr && entries[0].flags.cycles) {
2659 		int i;
2660 
2661 		bi = sample__resolve_bstack(sample, al);
2662 		if (bi) {
2663 			struct addr_map_symbol *prev = NULL;
2664 
2665 			/*
2666 			 * Ignore errors, still want to process the
2667 			 * other entries.
2668 			 *
2669 			 * For non standard branch modes always
2670 			 * force no IPC (prev == NULL)
2671 			 *
2672 			 * Note that perf stores branches reversed from
2673 			 * program order!
2674 			 */
2675 			for (i = bs->nr - 1; i >= 0; i--) {
2676 				addr_map_symbol__account_cycles(&bi[i].from,
2677 					nonany_branch_mode ? NULL : prev,
2678 					bi[i].flags.cycles);
2679 				prev = &bi[i].to;
2680 
2681 				if (total_cycles)
2682 					*total_cycles += bi[i].flags.cycles;
2683 			}
2684 			free(bi);
2685 		}
2686 	}
2687 }
2688 
2689 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2690 				 bool skip_empty)
2691 {
2692 	struct evsel *pos;
2693 	size_t ret = 0;
2694 
2695 	evlist__for_each_entry(evlist, pos) {
2696 		struct hists *hists = evsel__hists(pos);
2697 
2698 		if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
2699 			continue;
2700 
2701 		ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2702 		if (hists->stats.nr_samples)
2703 			ret += fprintf(fp, "%16s events: %10d\n",
2704 				       "SAMPLE", hists->stats.nr_samples);
2705 		if (hists->stats.nr_lost_samples)
2706 			ret += fprintf(fp, "%16s events: %10d\n",
2707 				       "LOST_SAMPLES", hists->stats.nr_lost_samples);
2708 	}
2709 
2710 	return ret;
2711 }
2712 
2713 
2714 u64 hists__total_period(struct hists *hists)
2715 {
2716 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2717 		hists->stats.total_period;
2718 }
2719 
2720 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2721 {
2722 	char unit;
2723 	int printed;
2724 	const struct dso *dso = hists->dso_filter;
2725 	struct thread *thread = hists->thread_filter;
2726 	int socket_id = hists->socket_filter;
2727 	unsigned long nr_samples = hists->stats.nr_samples;
2728 	u64 nr_events = hists->stats.total_period;
2729 	struct evsel *evsel = hists_to_evsel(hists);
2730 	const char *ev_name = evsel__name(evsel);
2731 	char buf[512], sample_freq_str[64] = "";
2732 	size_t buflen = sizeof(buf);
2733 	char ref[30] = " show reference callgraph, ";
2734 	bool enable_ref = false;
2735 
2736 	if (symbol_conf.filter_relative) {
2737 		nr_samples = hists->stats.nr_non_filtered_samples;
2738 		nr_events = hists->stats.total_non_filtered_period;
2739 	}
2740 
2741 	if (evsel__is_group_event(evsel)) {
2742 		struct evsel *pos;
2743 
2744 		evsel__group_desc(evsel, buf, buflen);
2745 		ev_name = buf;
2746 
2747 		for_each_group_member(pos, evsel) {
2748 			struct hists *pos_hists = evsel__hists(pos);
2749 
2750 			if (symbol_conf.filter_relative) {
2751 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
2752 				nr_events += pos_hists->stats.total_non_filtered_period;
2753 			} else {
2754 				nr_samples += pos_hists->stats.nr_samples;
2755 				nr_events += pos_hists->stats.total_period;
2756 			}
2757 		}
2758 	}
2759 
2760 	if (symbol_conf.show_ref_callgraph &&
2761 	    strstr(ev_name, "call-graph=no"))
2762 		enable_ref = true;
2763 
2764 	if (show_freq)
2765 		scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2766 
2767 	nr_samples = convert_unit(nr_samples, &unit);
2768 	printed = scnprintf(bf, size,
2769 			   "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2770 			   nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2771 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2772 
2773 
2774 	if (hists->uid_filter_str)
2775 		printed += snprintf(bf + printed, size - printed,
2776 				    ", UID: %s", hists->uid_filter_str);
2777 	if (thread) {
2778 		if (hists__has(hists, thread)) {
2779 			printed += scnprintf(bf + printed, size - printed,
2780 				    ", Thread: %s(%d)",
2781 				     (thread->comm_set ? thread__comm_str(thread) : ""),
2782 				    thread->tid);
2783 		} else {
2784 			printed += scnprintf(bf + printed, size - printed,
2785 				    ", Thread: %s",
2786 				     (thread->comm_set ? thread__comm_str(thread) : ""));
2787 		}
2788 	}
2789 	if (dso)
2790 		printed += scnprintf(bf + printed, size - printed,
2791 				    ", DSO: %s", dso->short_name);
2792 	if (socket_id > -1)
2793 		printed += scnprintf(bf + printed, size - printed,
2794 				    ", Processor Socket: %d", socket_id);
2795 
2796 	return printed;
2797 }
2798 
2799 int parse_filter_percentage(const struct option *opt __maybe_unused,
2800 			    const char *arg, int unset __maybe_unused)
2801 {
2802 	if (!strcmp(arg, "relative"))
2803 		symbol_conf.filter_relative = true;
2804 	else if (!strcmp(arg, "absolute"))
2805 		symbol_conf.filter_relative = false;
2806 	else {
2807 		pr_debug("Invalid percentage: %s\n", arg);
2808 		return -1;
2809 	}
2810 
2811 	return 0;
2812 }
2813 
2814 int perf_hist_config(const char *var, const char *value)
2815 {
2816 	if (!strcmp(var, "hist.percentage"))
2817 		return parse_filter_percentage(NULL, value, 0);
2818 
2819 	return 0;
2820 }
2821 
2822 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2823 {
2824 	memset(hists, 0, sizeof(*hists));
2825 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2826 	hists->entries_in = &hists->entries_in_array[0];
2827 	hists->entries_collapsed = RB_ROOT_CACHED;
2828 	hists->entries = RB_ROOT_CACHED;
2829 	mutex_init(&hists->lock);
2830 	hists->socket_filter = -1;
2831 	hists->hpp_list = hpp_list;
2832 	INIT_LIST_HEAD(&hists->hpp_formats);
2833 	return 0;
2834 }
2835 
2836 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2837 {
2838 	struct rb_node *node;
2839 	struct hist_entry *he;
2840 
2841 	while (!RB_EMPTY_ROOT(&root->rb_root)) {
2842 		node = rb_first_cached(root);
2843 		rb_erase_cached(node, root);
2844 
2845 		he = rb_entry(node, struct hist_entry, rb_node_in);
2846 		hist_entry__delete(he);
2847 	}
2848 }
2849 
2850 static void hists__delete_all_entries(struct hists *hists)
2851 {
2852 	hists__delete_entries(hists);
2853 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
2854 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
2855 	hists__delete_remaining_entries(&hists->entries_collapsed);
2856 }
2857 
2858 static void hists_evsel__exit(struct evsel *evsel)
2859 {
2860 	struct hists *hists = evsel__hists(evsel);
2861 	struct perf_hpp_fmt *fmt, *pos;
2862 	struct perf_hpp_list_node *node, *tmp;
2863 
2864 	hists__delete_all_entries(hists);
2865 
2866 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2867 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2868 			list_del_init(&fmt->list);
2869 			free(fmt);
2870 		}
2871 		list_del_init(&node->list);
2872 		free(node);
2873 	}
2874 }
2875 
2876 static int hists_evsel__init(struct evsel *evsel)
2877 {
2878 	struct hists *hists = evsel__hists(evsel);
2879 
2880 	__hists__init(hists, &perf_hpp_list);
2881 	return 0;
2882 }
2883 
2884 /*
2885  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2886  * stored in the rbtree...
2887  */
2888 
2889 int hists__init(void)
2890 {
2891 	int err = evsel__object_config(sizeof(struct hists_evsel),
2892 				       hists_evsel__init, hists_evsel__exit);
2893 	if (err)
2894 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2895 
2896 	return err;
2897 }
2898 
2899 void perf_hpp_list__init(struct perf_hpp_list *list)
2900 {
2901 	INIT_LIST_HEAD(&list->fields);
2902 	INIT_LIST_HEAD(&list->sorts);
2903 }
2904