xref: /openbmc/linux/tools/perf/util/callchain.c (revision 9fb29c73)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4  *
5  * Handle the callchains from the stream in an ad-hoc radix tree and then
6  * sort them in an rbtree.
7  *
8  * Using a radix for code path provides a fast retrieval and factorizes
9  * memory use. Also that lets us use the paths in a hierarchical graph view.
10  *
11  */
12 
13 #include <inttypes.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <errno.h>
18 #include <math.h>
19 
20 #include "asm/bug.h"
21 
22 #include "hist.h"
23 #include "util.h"
24 #include "sort.h"
25 #include "machine.h"
26 #include "callchain.h"
27 #include "branch.h"
28 
29 #define CALLCHAIN_PARAM_DEFAULT			\
30 	.mode		= CHAIN_GRAPH_ABS,	\
31 	.min_percent	= 0.5,			\
32 	.order		= ORDER_CALLEE,		\
33 	.key		= CCKEY_FUNCTION,	\
34 	.value		= CCVAL_PERCENT,	\
35 
36 struct callchain_param callchain_param = {
37 	CALLCHAIN_PARAM_DEFAULT
38 };
39 
40 /*
41  * Are there any events usind DWARF callchains?
42  *
43  * I.e.
44  *
45  * -e cycles/call-graph=dwarf/
46  */
47 bool dwarf_callchain_users;
48 
49 struct callchain_param callchain_param_default = {
50 	CALLCHAIN_PARAM_DEFAULT
51 };
52 
53 __thread struct callchain_cursor callchain_cursor;
54 
55 int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
56 {
57 	return parse_callchain_record(arg, param);
58 }
59 
60 static int parse_callchain_mode(const char *value)
61 {
62 	if (!strncmp(value, "graph", strlen(value))) {
63 		callchain_param.mode = CHAIN_GRAPH_ABS;
64 		return 0;
65 	}
66 	if (!strncmp(value, "flat", strlen(value))) {
67 		callchain_param.mode = CHAIN_FLAT;
68 		return 0;
69 	}
70 	if (!strncmp(value, "fractal", strlen(value))) {
71 		callchain_param.mode = CHAIN_GRAPH_REL;
72 		return 0;
73 	}
74 	if (!strncmp(value, "folded", strlen(value))) {
75 		callchain_param.mode = CHAIN_FOLDED;
76 		return 0;
77 	}
78 	return -1;
79 }
80 
81 static int parse_callchain_order(const char *value)
82 {
83 	if (!strncmp(value, "caller", strlen(value))) {
84 		callchain_param.order = ORDER_CALLER;
85 		callchain_param.order_set = true;
86 		return 0;
87 	}
88 	if (!strncmp(value, "callee", strlen(value))) {
89 		callchain_param.order = ORDER_CALLEE;
90 		callchain_param.order_set = true;
91 		return 0;
92 	}
93 	return -1;
94 }
95 
96 static int parse_callchain_sort_key(const char *value)
97 {
98 	if (!strncmp(value, "function", strlen(value))) {
99 		callchain_param.key = CCKEY_FUNCTION;
100 		return 0;
101 	}
102 	if (!strncmp(value, "address", strlen(value))) {
103 		callchain_param.key = CCKEY_ADDRESS;
104 		return 0;
105 	}
106 	if (!strncmp(value, "srcline", strlen(value))) {
107 		callchain_param.key = CCKEY_SRCLINE;
108 		return 0;
109 	}
110 	if (!strncmp(value, "branch", strlen(value))) {
111 		callchain_param.branch_callstack = 1;
112 		return 0;
113 	}
114 	return -1;
115 }
116 
117 static int parse_callchain_value(const char *value)
118 {
119 	if (!strncmp(value, "percent", strlen(value))) {
120 		callchain_param.value = CCVAL_PERCENT;
121 		return 0;
122 	}
123 	if (!strncmp(value, "period", strlen(value))) {
124 		callchain_param.value = CCVAL_PERIOD;
125 		return 0;
126 	}
127 	if (!strncmp(value, "count", strlen(value))) {
128 		callchain_param.value = CCVAL_COUNT;
129 		return 0;
130 	}
131 	return -1;
132 }
133 
134 static int get_stack_size(const char *str, unsigned long *_size)
135 {
136 	char *endptr;
137 	unsigned long size;
138 	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
139 
140 	size = strtoul(str, &endptr, 0);
141 
142 	do {
143 		if (*endptr)
144 			break;
145 
146 		size = round_up(size, sizeof(u64));
147 		if (!size || size > max_size)
148 			break;
149 
150 		*_size = size;
151 		return 0;
152 
153 	} while (0);
154 
155 	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
156 	       max_size, str);
157 	return -1;
158 }
159 
160 static int
161 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
162 {
163 	char *tok;
164 	char *endptr, *saveptr = NULL;
165 	bool minpcnt_set = false;
166 	bool record_opt_set = false;
167 	bool try_stack_size = false;
168 
169 	callchain_param.enabled = true;
170 	symbol_conf.use_callchain = true;
171 
172 	if (!arg)
173 		return 0;
174 
175 	while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
176 		if (!strncmp(tok, "none", strlen(tok))) {
177 			callchain_param.mode = CHAIN_NONE;
178 			callchain_param.enabled = false;
179 			symbol_conf.use_callchain = false;
180 			return 0;
181 		}
182 
183 		if (!parse_callchain_mode(tok) ||
184 		    !parse_callchain_order(tok) ||
185 		    !parse_callchain_sort_key(tok) ||
186 		    !parse_callchain_value(tok)) {
187 			/* parsing ok - move on to the next */
188 			try_stack_size = false;
189 			goto next;
190 		} else if (allow_record_opt && !record_opt_set) {
191 			if (parse_callchain_record(tok, &callchain_param))
192 				goto try_numbers;
193 
194 			/* assume that number followed by 'dwarf' is stack size */
195 			if (callchain_param.record_mode == CALLCHAIN_DWARF)
196 				try_stack_size = true;
197 
198 			record_opt_set = true;
199 			goto next;
200 		}
201 
202 try_numbers:
203 		if (try_stack_size) {
204 			unsigned long size = 0;
205 
206 			if (get_stack_size(tok, &size) < 0)
207 				return -1;
208 			callchain_param.dump_size = size;
209 			try_stack_size = false;
210 		} else if (!minpcnt_set) {
211 			/* try to get the min percent */
212 			callchain_param.min_percent = strtod(tok, &endptr);
213 			if (tok == endptr)
214 				return -1;
215 			minpcnt_set = true;
216 		} else {
217 			/* try print limit at last */
218 			callchain_param.print_limit = strtoul(tok, &endptr, 0);
219 			if (tok == endptr)
220 				return -1;
221 		}
222 next:
223 		arg = NULL;
224 	}
225 
226 	if (callchain_register_param(&callchain_param) < 0) {
227 		pr_err("Can't register callchain params\n");
228 		return -1;
229 	}
230 	return 0;
231 }
232 
233 int parse_callchain_report_opt(const char *arg)
234 {
235 	return __parse_callchain_report_opt(arg, false);
236 }
237 
238 int parse_callchain_top_opt(const char *arg)
239 {
240 	return __parse_callchain_report_opt(arg, true);
241 }
242 
243 int parse_callchain_record(const char *arg, struct callchain_param *param)
244 {
245 	char *tok, *name, *saveptr = NULL;
246 	char *buf;
247 	int ret = -1;
248 
249 	/* We need buffer that we know we can write to. */
250 	buf = malloc(strlen(arg) + 1);
251 	if (!buf)
252 		return -ENOMEM;
253 
254 	strcpy(buf, arg);
255 
256 	tok = strtok_r((char *)buf, ",", &saveptr);
257 	name = tok ? : (char *)buf;
258 
259 	do {
260 		/* Framepointer style */
261 		if (!strncmp(name, "fp", sizeof("fp"))) {
262 			if (!strtok_r(NULL, ",", &saveptr)) {
263 				param->record_mode = CALLCHAIN_FP;
264 				ret = 0;
265 			} else
266 				pr_err("callchain: No more arguments "
267 				       "needed for --call-graph fp\n");
268 			break;
269 
270 		/* Dwarf style */
271 		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
272 			const unsigned long default_stack_dump_size = 8192;
273 
274 			ret = 0;
275 			param->record_mode = CALLCHAIN_DWARF;
276 			param->dump_size = default_stack_dump_size;
277 			dwarf_callchain_users = true;
278 
279 			tok = strtok_r(NULL, ",", &saveptr);
280 			if (tok) {
281 				unsigned long size = 0;
282 
283 				ret = get_stack_size(tok, &size);
284 				param->dump_size = size;
285 			}
286 		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
287 			if (!strtok_r(NULL, ",", &saveptr)) {
288 				param->record_mode = CALLCHAIN_LBR;
289 				ret = 0;
290 			} else
291 				pr_err("callchain: No more arguments "
292 					"needed for --call-graph lbr\n");
293 			break;
294 		} else {
295 			pr_err("callchain: Unknown --call-graph option "
296 			       "value: %s\n", arg);
297 			break;
298 		}
299 
300 	} while (0);
301 
302 	free(buf);
303 	return ret;
304 }
305 
306 int perf_callchain_config(const char *var, const char *value)
307 {
308 	char *endptr;
309 
310 	if (!strstarts(var, "call-graph."))
311 		return 0;
312 	var += sizeof("call-graph.") - 1;
313 
314 	if (!strcmp(var, "record-mode"))
315 		return parse_callchain_record_opt(value, &callchain_param);
316 	if (!strcmp(var, "dump-size")) {
317 		unsigned long size = 0;
318 		int ret;
319 
320 		ret = get_stack_size(value, &size);
321 		callchain_param.dump_size = size;
322 
323 		return ret;
324 	}
325 	if (!strcmp(var, "print-type")){
326 		int ret;
327 		ret = parse_callchain_mode(value);
328 		if (ret == -1)
329 			pr_err("Invalid callchain mode: %s\n", value);
330 		return ret;
331 	}
332 	if (!strcmp(var, "order")){
333 		int ret;
334 		ret = parse_callchain_order(value);
335 		if (ret == -1)
336 			pr_err("Invalid callchain order: %s\n", value);
337 		return ret;
338 	}
339 	if (!strcmp(var, "sort-key")){
340 		int ret;
341 		ret = parse_callchain_sort_key(value);
342 		if (ret == -1)
343 			pr_err("Invalid callchain sort key: %s\n", value);
344 		return ret;
345 	}
346 	if (!strcmp(var, "threshold")) {
347 		callchain_param.min_percent = strtod(value, &endptr);
348 		if (value == endptr) {
349 			pr_err("Invalid callchain threshold: %s\n", value);
350 			return -1;
351 		}
352 	}
353 	if (!strcmp(var, "print-limit")) {
354 		callchain_param.print_limit = strtod(value, &endptr);
355 		if (value == endptr) {
356 			pr_err("Invalid callchain print limit: %s\n", value);
357 			return -1;
358 		}
359 	}
360 
361 	return 0;
362 }
363 
364 static void
365 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
366 		    enum chain_mode mode)
367 {
368 	struct rb_node **p = &root->rb_node;
369 	struct rb_node *parent = NULL;
370 	struct callchain_node *rnode;
371 	u64 chain_cumul = callchain_cumul_hits(chain);
372 
373 	while (*p) {
374 		u64 rnode_cumul;
375 
376 		parent = *p;
377 		rnode = rb_entry(parent, struct callchain_node, rb_node);
378 		rnode_cumul = callchain_cumul_hits(rnode);
379 
380 		switch (mode) {
381 		case CHAIN_FLAT:
382 		case CHAIN_FOLDED:
383 			if (rnode->hit < chain->hit)
384 				p = &(*p)->rb_left;
385 			else
386 				p = &(*p)->rb_right;
387 			break;
388 		case CHAIN_GRAPH_ABS: /* Falldown */
389 		case CHAIN_GRAPH_REL:
390 			if (rnode_cumul < chain_cumul)
391 				p = &(*p)->rb_left;
392 			else
393 				p = &(*p)->rb_right;
394 			break;
395 		case CHAIN_NONE:
396 		default:
397 			break;
398 		}
399 	}
400 
401 	rb_link_node(&chain->rb_node, parent, p);
402 	rb_insert_color(&chain->rb_node, root);
403 }
404 
405 static void
406 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
407 		  u64 min_hit)
408 {
409 	struct rb_node *n;
410 	struct callchain_node *child;
411 
412 	n = rb_first(&node->rb_root_in);
413 	while (n) {
414 		child = rb_entry(n, struct callchain_node, rb_node_in);
415 		n = rb_next(n);
416 
417 		__sort_chain_flat(rb_root, child, min_hit);
418 	}
419 
420 	if (node->hit && node->hit >= min_hit)
421 		rb_insert_callchain(rb_root, node, CHAIN_FLAT);
422 }
423 
424 /*
425  * Once we get every callchains from the stream, we can now
426  * sort them by hit
427  */
428 static void
429 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
430 		u64 min_hit, struct callchain_param *param __maybe_unused)
431 {
432 	*rb_root = RB_ROOT;
433 	__sort_chain_flat(rb_root, &root->node, min_hit);
434 }
435 
436 static void __sort_chain_graph_abs(struct callchain_node *node,
437 				   u64 min_hit)
438 {
439 	struct rb_node *n;
440 	struct callchain_node *child;
441 
442 	node->rb_root = RB_ROOT;
443 	n = rb_first(&node->rb_root_in);
444 
445 	while (n) {
446 		child = rb_entry(n, struct callchain_node, rb_node_in);
447 		n = rb_next(n);
448 
449 		__sort_chain_graph_abs(child, min_hit);
450 		if (callchain_cumul_hits(child) >= min_hit)
451 			rb_insert_callchain(&node->rb_root, child,
452 					    CHAIN_GRAPH_ABS);
453 	}
454 }
455 
456 static void
457 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
458 		     u64 min_hit, struct callchain_param *param __maybe_unused)
459 {
460 	__sort_chain_graph_abs(&chain_root->node, min_hit);
461 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
462 }
463 
464 static void __sort_chain_graph_rel(struct callchain_node *node,
465 				   double min_percent)
466 {
467 	struct rb_node *n;
468 	struct callchain_node *child;
469 	u64 min_hit;
470 
471 	node->rb_root = RB_ROOT;
472 	min_hit = ceil(node->children_hit * min_percent);
473 
474 	n = rb_first(&node->rb_root_in);
475 	while (n) {
476 		child = rb_entry(n, struct callchain_node, rb_node_in);
477 		n = rb_next(n);
478 
479 		__sort_chain_graph_rel(child, min_percent);
480 		if (callchain_cumul_hits(child) >= min_hit)
481 			rb_insert_callchain(&node->rb_root, child,
482 					    CHAIN_GRAPH_REL);
483 	}
484 }
485 
486 static void
487 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
488 		     u64 min_hit __maybe_unused, struct callchain_param *param)
489 {
490 	__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
491 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
492 }
493 
494 int callchain_register_param(struct callchain_param *param)
495 {
496 	switch (param->mode) {
497 	case CHAIN_GRAPH_ABS:
498 		param->sort = sort_chain_graph_abs;
499 		break;
500 	case CHAIN_GRAPH_REL:
501 		param->sort = sort_chain_graph_rel;
502 		break;
503 	case CHAIN_FLAT:
504 	case CHAIN_FOLDED:
505 		param->sort = sort_chain_flat;
506 		break;
507 	case CHAIN_NONE:
508 	default:
509 		return -1;
510 	}
511 	return 0;
512 }
513 
514 /*
515  * Create a child for a parent. If inherit_children, then the new child
516  * will become the new parent of it's parent children
517  */
518 static struct callchain_node *
519 create_child(struct callchain_node *parent, bool inherit_children)
520 {
521 	struct callchain_node *new;
522 
523 	new = zalloc(sizeof(*new));
524 	if (!new) {
525 		perror("not enough memory to create child for code path tree");
526 		return NULL;
527 	}
528 	new->parent = parent;
529 	INIT_LIST_HEAD(&new->val);
530 	INIT_LIST_HEAD(&new->parent_val);
531 
532 	if (inherit_children) {
533 		struct rb_node *n;
534 		struct callchain_node *child;
535 
536 		new->rb_root_in = parent->rb_root_in;
537 		parent->rb_root_in = RB_ROOT;
538 
539 		n = rb_first(&new->rb_root_in);
540 		while (n) {
541 			child = rb_entry(n, struct callchain_node, rb_node_in);
542 			child->parent = new;
543 			n = rb_next(n);
544 		}
545 
546 		/* make it the first child */
547 		rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
548 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
549 	}
550 
551 	return new;
552 }
553 
554 
555 /*
556  * Fill the node with callchain values
557  */
558 static int
559 fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
560 {
561 	struct callchain_cursor_node *cursor_node;
562 
563 	node->val_nr = cursor->nr - cursor->pos;
564 	if (!node->val_nr)
565 		pr_warning("Warning: empty node in callchain tree\n");
566 
567 	cursor_node = callchain_cursor_current(cursor);
568 
569 	while (cursor_node) {
570 		struct callchain_list *call;
571 
572 		call = zalloc(sizeof(*call));
573 		if (!call) {
574 			perror("not enough memory for the code path tree");
575 			return -1;
576 		}
577 		call->ip = cursor_node->ip;
578 		call->ms.sym = cursor_node->sym;
579 		call->ms.map = map__get(cursor_node->map);
580 		call->srcline = cursor_node->srcline;
581 
582 		if (cursor_node->branch) {
583 			call->branch_count = 1;
584 
585 			if (cursor_node->branch_from) {
586 				/*
587 				 * branch_from is set with value somewhere else
588 				 * to imply it's "to" of a branch.
589 				 */
590 				call->brtype_stat.branch_to = true;
591 
592 				if (cursor_node->branch_flags.predicted)
593 					call->predicted_count = 1;
594 
595 				if (cursor_node->branch_flags.abort)
596 					call->abort_count = 1;
597 
598 				branch_type_count(&call->brtype_stat,
599 						  &cursor_node->branch_flags,
600 						  cursor_node->branch_from,
601 						  cursor_node->ip);
602 			} else {
603 				/*
604 				 * It's "from" of a branch
605 				 */
606 				call->brtype_stat.branch_to = false;
607 				call->cycles_count =
608 					cursor_node->branch_flags.cycles;
609 				call->iter_count = cursor_node->nr_loop_iter;
610 				call->iter_cycles = cursor_node->iter_cycles;
611 			}
612 		}
613 
614 		list_add_tail(&call->list, &node->val);
615 
616 		callchain_cursor_advance(cursor);
617 		cursor_node = callchain_cursor_current(cursor);
618 	}
619 	return 0;
620 }
621 
622 static struct callchain_node *
623 add_child(struct callchain_node *parent,
624 	  struct callchain_cursor *cursor,
625 	  u64 period)
626 {
627 	struct callchain_node *new;
628 
629 	new = create_child(parent, false);
630 	if (new == NULL)
631 		return NULL;
632 
633 	if (fill_node(new, cursor) < 0) {
634 		struct callchain_list *call, *tmp;
635 
636 		list_for_each_entry_safe(call, tmp, &new->val, list) {
637 			list_del(&call->list);
638 			map__zput(call->ms.map);
639 			free(call);
640 		}
641 		free(new);
642 		return NULL;
643 	}
644 
645 	new->children_hit = 0;
646 	new->hit = period;
647 	new->children_count = 0;
648 	new->count = 1;
649 	return new;
650 }
651 
652 enum match_result {
653 	MATCH_ERROR  = -1,
654 	MATCH_EQ,
655 	MATCH_LT,
656 	MATCH_GT,
657 };
658 
659 static enum match_result match_chain_strings(const char *left,
660 					     const char *right)
661 {
662 	enum match_result ret = MATCH_EQ;
663 	int cmp;
664 
665 	if (left && right)
666 		cmp = strcmp(left, right);
667 	else if (!left && right)
668 		cmp = 1;
669 	else if (left && !right)
670 		cmp = -1;
671 	else
672 		return MATCH_ERROR;
673 
674 	if (cmp != 0)
675 		ret = cmp < 0 ? MATCH_LT : MATCH_GT;
676 
677 	return ret;
678 }
679 
680 /*
681  * We need to always use relative addresses because we're aggregating
682  * callchains from multiple threads, i.e. different address spaces, so
683  * comparing absolute addresses make no sense as a symbol in a DSO may end up
684  * in a different address when used in a different binary or even the same
685  * binary but with some sort of address randomization technique, thus we need
686  * to compare just relative addresses. -acme
687  */
688 static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
689 						   struct map *right_map, u64 right_ip)
690 {
691 	struct dso *left_dso = left_map ? left_map->dso : NULL;
692 	struct dso *right_dso = right_map ? right_map->dso : NULL;
693 
694 	if (left_dso != right_dso)
695 		return left_dso < right_dso ? MATCH_LT : MATCH_GT;
696 
697 	if (left_ip != right_ip)
698  		return left_ip < right_ip ? MATCH_LT : MATCH_GT;
699 
700 	return MATCH_EQ;
701 }
702 
703 static enum match_result match_chain(struct callchain_cursor_node *node,
704 				     struct callchain_list *cnode)
705 {
706 	enum match_result match = MATCH_ERROR;
707 
708 	switch (callchain_param.key) {
709 	case CCKEY_SRCLINE:
710 		match = match_chain_strings(cnode->srcline, node->srcline);
711 		if (match != MATCH_ERROR)
712 			break;
713 		/* otherwise fall-back to symbol-based comparison below */
714 		__fallthrough;
715 	case CCKEY_FUNCTION:
716 		if (node->sym && cnode->ms.sym) {
717 			/*
718 			 * Compare inlined frames based on their symbol name
719 			 * because different inlined frames will have the same
720 			 * symbol start. Otherwise do a faster comparison based
721 			 * on the symbol start address.
722 			 */
723 			if (cnode->ms.sym->inlined || node->sym->inlined) {
724 				match = match_chain_strings(cnode->ms.sym->name,
725 							    node->sym->name);
726 				if (match != MATCH_ERROR)
727 					break;
728 			} else {
729 				match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
730 								  node->map, node->sym->start);
731 				break;
732 			}
733 		}
734 		/* otherwise fall-back to IP-based comparison below */
735 		__fallthrough;
736 	case CCKEY_ADDRESS:
737 	default:
738 		match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
739 		break;
740 	}
741 
742 	if (match == MATCH_EQ && node->branch) {
743 		cnode->branch_count++;
744 
745 		if (node->branch_from) {
746 			/*
747 			 * It's "to" of a branch
748 			 */
749 			cnode->brtype_stat.branch_to = true;
750 
751 			if (node->branch_flags.predicted)
752 				cnode->predicted_count++;
753 
754 			if (node->branch_flags.abort)
755 				cnode->abort_count++;
756 
757 			branch_type_count(&cnode->brtype_stat,
758 					  &node->branch_flags,
759 					  node->branch_from,
760 					  node->ip);
761 		} else {
762 			/*
763 			 * It's "from" of a branch
764 			 */
765 			cnode->brtype_stat.branch_to = false;
766 			cnode->cycles_count += node->branch_flags.cycles;
767 			cnode->iter_count += node->nr_loop_iter;
768 			cnode->iter_cycles += node->iter_cycles;
769 			cnode->from_count++;
770 		}
771 	}
772 
773 	return match;
774 }
775 
776 /*
777  * Split the parent in two parts (a new child is created) and
778  * give a part of its callchain to the created child.
779  * Then create another child to host the given callchain of new branch
780  */
781 static int
782 split_add_child(struct callchain_node *parent,
783 		struct callchain_cursor *cursor,
784 		struct callchain_list *to_split,
785 		u64 idx_parents, u64 idx_local, u64 period)
786 {
787 	struct callchain_node *new;
788 	struct list_head *old_tail;
789 	unsigned int idx_total = idx_parents + idx_local;
790 
791 	/* split */
792 	new = create_child(parent, true);
793 	if (new == NULL)
794 		return -1;
795 
796 	/* split the callchain and move a part to the new child */
797 	old_tail = parent->val.prev;
798 	list_del_range(&to_split->list, old_tail);
799 	new->val.next = &to_split->list;
800 	new->val.prev = old_tail;
801 	to_split->list.prev = &new->val;
802 	old_tail->next = &new->val;
803 
804 	/* split the hits */
805 	new->hit = parent->hit;
806 	new->children_hit = parent->children_hit;
807 	parent->children_hit = callchain_cumul_hits(new);
808 	new->val_nr = parent->val_nr - idx_local;
809 	parent->val_nr = idx_local;
810 	new->count = parent->count;
811 	new->children_count = parent->children_count;
812 	parent->children_count = callchain_cumul_counts(new);
813 
814 	/* create a new child for the new branch if any */
815 	if (idx_total < cursor->nr) {
816 		struct callchain_node *first;
817 		struct callchain_list *cnode;
818 		struct callchain_cursor_node *node;
819 		struct rb_node *p, **pp;
820 
821 		parent->hit = 0;
822 		parent->children_hit += period;
823 		parent->count = 0;
824 		parent->children_count += 1;
825 
826 		node = callchain_cursor_current(cursor);
827 		new = add_child(parent, cursor, period);
828 		if (new == NULL)
829 			return -1;
830 
831 		/*
832 		 * This is second child since we moved parent's children
833 		 * to new (first) child above.
834 		 */
835 		p = parent->rb_root_in.rb_node;
836 		first = rb_entry(p, struct callchain_node, rb_node_in);
837 		cnode = list_first_entry(&first->val, struct callchain_list,
838 					 list);
839 
840 		if (match_chain(node, cnode) == MATCH_LT)
841 			pp = &p->rb_left;
842 		else
843 			pp = &p->rb_right;
844 
845 		rb_link_node(&new->rb_node_in, p, pp);
846 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
847 	} else {
848 		parent->hit = period;
849 		parent->count = 1;
850 	}
851 	return 0;
852 }
853 
854 static enum match_result
855 append_chain(struct callchain_node *root,
856 	     struct callchain_cursor *cursor,
857 	     u64 period);
858 
859 static int
860 append_chain_children(struct callchain_node *root,
861 		      struct callchain_cursor *cursor,
862 		      u64 period)
863 {
864 	struct callchain_node *rnode;
865 	struct callchain_cursor_node *node;
866 	struct rb_node **p = &root->rb_root_in.rb_node;
867 	struct rb_node *parent = NULL;
868 
869 	node = callchain_cursor_current(cursor);
870 	if (!node)
871 		return -1;
872 
873 	/* lookup in childrens */
874 	while (*p) {
875 		enum match_result ret;
876 
877 		parent = *p;
878 		rnode = rb_entry(parent, struct callchain_node, rb_node_in);
879 
880 		/* If at least first entry matches, rely to children */
881 		ret = append_chain(rnode, cursor, period);
882 		if (ret == MATCH_EQ)
883 			goto inc_children_hit;
884 		if (ret == MATCH_ERROR)
885 			return -1;
886 
887 		if (ret == MATCH_LT)
888 			p = &parent->rb_left;
889 		else
890 			p = &parent->rb_right;
891 	}
892 	/* nothing in children, add to the current node */
893 	rnode = add_child(root, cursor, period);
894 	if (rnode == NULL)
895 		return -1;
896 
897 	rb_link_node(&rnode->rb_node_in, parent, p);
898 	rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
899 
900 inc_children_hit:
901 	root->children_hit += period;
902 	root->children_count++;
903 	return 0;
904 }
905 
906 static enum match_result
907 append_chain(struct callchain_node *root,
908 	     struct callchain_cursor *cursor,
909 	     u64 period)
910 {
911 	struct callchain_list *cnode;
912 	u64 start = cursor->pos;
913 	bool found = false;
914 	u64 matches;
915 	enum match_result cmp = MATCH_ERROR;
916 
917 	/*
918 	 * Lookup in the current node
919 	 * If we have a symbol, then compare the start to match
920 	 * anywhere inside a function, unless function
921 	 * mode is disabled.
922 	 */
923 	list_for_each_entry(cnode, &root->val, list) {
924 		struct callchain_cursor_node *node;
925 
926 		node = callchain_cursor_current(cursor);
927 		if (!node)
928 			break;
929 
930 		cmp = match_chain(node, cnode);
931 		if (cmp != MATCH_EQ)
932 			break;
933 
934 		found = true;
935 
936 		callchain_cursor_advance(cursor);
937 	}
938 
939 	/* matches not, relay no the parent */
940 	if (!found) {
941 		WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
942 		return cmp;
943 	}
944 
945 	matches = cursor->pos - start;
946 
947 	/* we match only a part of the node. Split it and add the new chain */
948 	if (matches < root->val_nr) {
949 		if (split_add_child(root, cursor, cnode, start, matches,
950 				    period) < 0)
951 			return MATCH_ERROR;
952 
953 		return MATCH_EQ;
954 	}
955 
956 	/* we match 100% of the path, increment the hit */
957 	if (matches == root->val_nr && cursor->pos == cursor->nr) {
958 		root->hit += period;
959 		root->count++;
960 		return MATCH_EQ;
961 	}
962 
963 	/* We match the node and still have a part remaining */
964 	if (append_chain_children(root, cursor, period) < 0)
965 		return MATCH_ERROR;
966 
967 	return MATCH_EQ;
968 }
969 
970 int callchain_append(struct callchain_root *root,
971 		     struct callchain_cursor *cursor,
972 		     u64 period)
973 {
974 	if (!cursor->nr)
975 		return 0;
976 
977 	callchain_cursor_commit(cursor);
978 
979 	if (append_chain_children(&root->node, cursor, period) < 0)
980 		return -1;
981 
982 	if (cursor->nr > root->max_depth)
983 		root->max_depth = cursor->nr;
984 
985 	return 0;
986 }
987 
988 static int
989 merge_chain_branch(struct callchain_cursor *cursor,
990 		   struct callchain_node *dst, struct callchain_node *src)
991 {
992 	struct callchain_cursor_node **old_last = cursor->last;
993 	struct callchain_node *child;
994 	struct callchain_list *list, *next_list;
995 	struct rb_node *n;
996 	int old_pos = cursor->nr;
997 	int err = 0;
998 
999 	list_for_each_entry_safe(list, next_list, &src->val, list) {
1000 		callchain_cursor_append(cursor, list->ip,
1001 					list->ms.map, list->ms.sym,
1002 					false, NULL, 0, 0, 0, list->srcline);
1003 		list_del(&list->list);
1004 		map__zput(list->ms.map);
1005 		free(list);
1006 	}
1007 
1008 	if (src->hit) {
1009 		callchain_cursor_commit(cursor);
1010 		if (append_chain_children(dst, cursor, src->hit) < 0)
1011 			return -1;
1012 	}
1013 
1014 	n = rb_first(&src->rb_root_in);
1015 	while (n) {
1016 		child = container_of(n, struct callchain_node, rb_node_in);
1017 		n = rb_next(n);
1018 		rb_erase(&child->rb_node_in, &src->rb_root_in);
1019 
1020 		err = merge_chain_branch(cursor, dst, child);
1021 		if (err)
1022 			break;
1023 
1024 		free(child);
1025 	}
1026 
1027 	cursor->nr = old_pos;
1028 	cursor->last = old_last;
1029 
1030 	return err;
1031 }
1032 
1033 int callchain_merge(struct callchain_cursor *cursor,
1034 		    struct callchain_root *dst, struct callchain_root *src)
1035 {
1036 	return merge_chain_branch(cursor, &dst->node, &src->node);
1037 }
1038 
1039 int callchain_cursor_append(struct callchain_cursor *cursor,
1040 			    u64 ip, struct map *map, struct symbol *sym,
1041 			    bool branch, struct branch_flags *flags,
1042 			    int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1043 			    const char *srcline)
1044 {
1045 	struct callchain_cursor_node *node = *cursor->last;
1046 
1047 	if (!node) {
1048 		node = calloc(1, sizeof(*node));
1049 		if (!node)
1050 			return -ENOMEM;
1051 
1052 		*cursor->last = node;
1053 	}
1054 
1055 	node->ip = ip;
1056 	map__zput(node->map);
1057 	node->map = map__get(map);
1058 	node->sym = sym;
1059 	node->branch = branch;
1060 	node->nr_loop_iter = nr_loop_iter;
1061 	node->iter_cycles = iter_cycles;
1062 	node->srcline = srcline;
1063 
1064 	if (flags)
1065 		memcpy(&node->branch_flags, flags,
1066 			sizeof(struct branch_flags));
1067 
1068 	node->branch_from = branch_from;
1069 	cursor->nr++;
1070 
1071 	cursor->last = &node->next;
1072 
1073 	return 0;
1074 }
1075 
1076 int sample__resolve_callchain(struct perf_sample *sample,
1077 			      struct callchain_cursor *cursor, struct symbol **parent,
1078 			      struct perf_evsel *evsel, struct addr_location *al,
1079 			      int max_stack)
1080 {
1081 	if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1082 		return 0;
1083 
1084 	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1085 	    perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1086 		return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1087 						 parent, al, max_stack);
1088 	}
1089 	return 0;
1090 }
1091 
1092 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1093 {
1094 	if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1095 		!symbol_conf.show_branchflag_count)
1096 		return 0;
1097 	return callchain_append(he->callchain, &callchain_cursor, sample->period);
1098 }
1099 
1100 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1101 			bool hide_unresolved)
1102 {
1103 	al->map = node->map;
1104 	al->sym = node->sym;
1105 	al->srcline = node->srcline;
1106 	al->addr = node->ip;
1107 
1108 	if (al->sym == NULL) {
1109 		if (hide_unresolved)
1110 			return 0;
1111 		if (al->map == NULL)
1112 			goto out;
1113 	}
1114 
1115 	if (al->map->groups == &al->machine->kmaps) {
1116 		if (machine__is_host(al->machine)) {
1117 			al->cpumode = PERF_RECORD_MISC_KERNEL;
1118 			al->level = 'k';
1119 		} else {
1120 			al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1121 			al->level = 'g';
1122 		}
1123 	} else {
1124 		if (machine__is_host(al->machine)) {
1125 			al->cpumode = PERF_RECORD_MISC_USER;
1126 			al->level = '.';
1127 		} else if (perf_guest) {
1128 			al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1129 			al->level = 'u';
1130 		} else {
1131 			al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1132 			al->level = 'H';
1133 		}
1134 	}
1135 
1136 out:
1137 	return 1;
1138 }
1139 
1140 char *callchain_list__sym_name(struct callchain_list *cl,
1141 			       char *bf, size_t bfsize, bool show_dso)
1142 {
1143 	bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1144 	bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1145 	int printed;
1146 
1147 	if (cl->ms.sym) {
1148 		const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1149 
1150 		if (show_srcline && cl->srcline)
1151 			printed = scnprintf(bf, bfsize, "%s %s%s",
1152 					    cl->ms.sym->name, cl->srcline,
1153 					    inlined);
1154 		else
1155 			printed = scnprintf(bf, bfsize, "%s%s",
1156 					    cl->ms.sym->name, inlined);
1157 	} else
1158 		printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1159 
1160 	if (show_dso)
1161 		scnprintf(bf + printed, bfsize - printed, " %s",
1162 			  cl->ms.map ?
1163 			  cl->ms.map->dso->short_name :
1164 			  "unknown");
1165 
1166 	return bf;
1167 }
1168 
1169 char *callchain_node__scnprintf_value(struct callchain_node *node,
1170 				      char *bf, size_t bfsize, u64 total)
1171 {
1172 	double percent = 0.0;
1173 	u64 period = callchain_cumul_hits(node);
1174 	unsigned count = callchain_cumul_counts(node);
1175 
1176 	if (callchain_param.mode == CHAIN_FOLDED) {
1177 		period = node->hit;
1178 		count = node->count;
1179 	}
1180 
1181 	switch (callchain_param.value) {
1182 	case CCVAL_PERIOD:
1183 		scnprintf(bf, bfsize, "%"PRIu64, period);
1184 		break;
1185 	case CCVAL_COUNT:
1186 		scnprintf(bf, bfsize, "%u", count);
1187 		break;
1188 	case CCVAL_PERCENT:
1189 	default:
1190 		if (total)
1191 			percent = period * 100.0 / total;
1192 		scnprintf(bf, bfsize, "%.2f%%", percent);
1193 		break;
1194 	}
1195 	return bf;
1196 }
1197 
1198 int callchain_node__fprintf_value(struct callchain_node *node,
1199 				 FILE *fp, u64 total)
1200 {
1201 	double percent = 0.0;
1202 	u64 period = callchain_cumul_hits(node);
1203 	unsigned count = callchain_cumul_counts(node);
1204 
1205 	if (callchain_param.mode == CHAIN_FOLDED) {
1206 		period = node->hit;
1207 		count = node->count;
1208 	}
1209 
1210 	switch (callchain_param.value) {
1211 	case CCVAL_PERIOD:
1212 		return fprintf(fp, "%"PRIu64, period);
1213 	case CCVAL_COUNT:
1214 		return fprintf(fp, "%u", count);
1215 	case CCVAL_PERCENT:
1216 	default:
1217 		if (total)
1218 			percent = period * 100.0 / total;
1219 		return percent_color_fprintf(fp, "%.2f%%", percent);
1220 	}
1221 	return 0;
1222 }
1223 
1224 static void callchain_counts_value(struct callchain_node *node,
1225 				   u64 *branch_count, u64 *predicted_count,
1226 				   u64 *abort_count, u64 *cycles_count)
1227 {
1228 	struct callchain_list *clist;
1229 
1230 	list_for_each_entry(clist, &node->val, list) {
1231 		if (branch_count)
1232 			*branch_count += clist->branch_count;
1233 
1234 		if (predicted_count)
1235 			*predicted_count += clist->predicted_count;
1236 
1237 		if (abort_count)
1238 			*abort_count += clist->abort_count;
1239 
1240 		if (cycles_count)
1241 			*cycles_count += clist->cycles_count;
1242 	}
1243 }
1244 
1245 static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1246 					      u64 *branch_count,
1247 					      u64 *predicted_count,
1248 					      u64 *abort_count,
1249 					      u64 *cycles_count)
1250 {
1251 	struct callchain_node *child;
1252 	struct rb_node *n;
1253 
1254 	n = rb_first(&node->rb_root_in);
1255 	while (n) {
1256 		child = rb_entry(n, struct callchain_node, rb_node_in);
1257 		n = rb_next(n);
1258 
1259 		callchain_node_branch_counts_cumul(child, branch_count,
1260 						   predicted_count,
1261 						   abort_count,
1262 						   cycles_count);
1263 
1264 		callchain_counts_value(child, branch_count,
1265 				       predicted_count, abort_count,
1266 				       cycles_count);
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 int callchain_branch_counts(struct callchain_root *root,
1273 			    u64 *branch_count, u64 *predicted_count,
1274 			    u64 *abort_count, u64 *cycles_count)
1275 {
1276 	if (branch_count)
1277 		*branch_count = 0;
1278 
1279 	if (predicted_count)
1280 		*predicted_count = 0;
1281 
1282 	if (abort_count)
1283 		*abort_count = 0;
1284 
1285 	if (cycles_count)
1286 		*cycles_count = 0;
1287 
1288 	return callchain_node_branch_counts_cumul(&root->node,
1289 						  branch_count,
1290 						  predicted_count,
1291 						  abort_count,
1292 						  cycles_count);
1293 }
1294 
1295 static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1296 {
1297 	int printed;
1298 
1299 	printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1300 
1301 	return printed;
1302 }
1303 
1304 static int count_float_printf(int idx, const char *str, float value,
1305 			      char *bf, int bfsize, float threshold)
1306 {
1307 	int printed;
1308 
1309 	if (threshold != 0.0 && value < threshold)
1310 		return 0;
1311 
1312 	printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1313 
1314 	return printed;
1315 }
1316 
1317 static int branch_to_str(char *bf, int bfsize,
1318 			 u64 branch_count, u64 predicted_count,
1319 			 u64 abort_count,
1320 			 struct branch_type_stat *brtype_stat)
1321 {
1322 	int printed, i = 0;
1323 
1324 	printed = branch_type_str(brtype_stat, bf, bfsize);
1325 	if (printed)
1326 		i++;
1327 
1328 	if (predicted_count < branch_count) {
1329 		printed += count_float_printf(i++, "predicted",
1330 				predicted_count * 100.0 / branch_count,
1331 				bf + printed, bfsize - printed, 0.0);
1332 	}
1333 
1334 	if (abort_count) {
1335 		printed += count_float_printf(i++, "abort",
1336 				abort_count * 100.0 / branch_count,
1337 				bf + printed, bfsize - printed, 0.1);
1338 	}
1339 
1340 	if (i)
1341 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1342 
1343 	return printed;
1344 }
1345 
1346 static int branch_from_str(char *bf, int bfsize,
1347 			   u64 branch_count,
1348 			   u64 cycles_count, u64 iter_count,
1349 			   u64 iter_cycles, u64 from_count)
1350 {
1351 	int printed = 0, i = 0;
1352 	u64 cycles, v = 0;
1353 
1354 	cycles = cycles_count / branch_count;
1355 	if (cycles) {
1356 		printed += count_pri64_printf(i++, "cycles",
1357 				cycles,
1358 				bf + printed, bfsize - printed);
1359 	}
1360 
1361 	if (iter_count && from_count) {
1362 		v = iter_count / from_count;
1363 		if (v) {
1364 			printed += count_pri64_printf(i++, "iter",
1365 					v, bf + printed, bfsize - printed);
1366 
1367 			printed += count_pri64_printf(i++, "avg_cycles",
1368 					iter_cycles / iter_count,
1369 					bf + printed, bfsize - printed);
1370 		}
1371 	}
1372 
1373 	if (i)
1374 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1375 
1376 	return printed;
1377 }
1378 
1379 static int counts_str_build(char *bf, int bfsize,
1380 			     u64 branch_count, u64 predicted_count,
1381 			     u64 abort_count, u64 cycles_count,
1382 			     u64 iter_count, u64 iter_cycles,
1383 			     u64 from_count,
1384 			     struct branch_type_stat *brtype_stat)
1385 {
1386 	int printed;
1387 
1388 	if (branch_count == 0)
1389 		return scnprintf(bf, bfsize, " (calltrace)");
1390 
1391 	if (brtype_stat->branch_to) {
1392 		printed = branch_to_str(bf, bfsize, branch_count,
1393 				predicted_count, abort_count, brtype_stat);
1394 	} else {
1395 		printed = branch_from_str(bf, bfsize, branch_count,
1396 				cycles_count, iter_count, iter_cycles,
1397 				from_count);
1398 	}
1399 
1400 	if (!printed)
1401 		bf[0] = 0;
1402 
1403 	return printed;
1404 }
1405 
1406 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1407 				   u64 branch_count, u64 predicted_count,
1408 				   u64 abort_count, u64 cycles_count,
1409 				   u64 iter_count, u64 iter_cycles,
1410 				   u64 from_count,
1411 				   struct branch_type_stat *brtype_stat)
1412 {
1413 	char str[256];
1414 
1415 	counts_str_build(str, sizeof(str), branch_count,
1416 			 predicted_count, abort_count, cycles_count,
1417 			 iter_count, iter_cycles, from_count, brtype_stat);
1418 
1419 	if (fp)
1420 		return fprintf(fp, "%s", str);
1421 
1422 	return scnprintf(bf, bfsize, "%s", str);
1423 }
1424 
1425 int callchain_list_counts__printf_value(struct callchain_list *clist,
1426 					FILE *fp, char *bf, int bfsize)
1427 {
1428 	u64 branch_count, predicted_count;
1429 	u64 abort_count, cycles_count;
1430 	u64 iter_count, iter_cycles;
1431 	u64 from_count;
1432 
1433 	branch_count = clist->branch_count;
1434 	predicted_count = clist->predicted_count;
1435 	abort_count = clist->abort_count;
1436 	cycles_count = clist->cycles_count;
1437 	iter_count = clist->iter_count;
1438 	iter_cycles = clist->iter_cycles;
1439 	from_count = clist->from_count;
1440 
1441 	return callchain_counts_printf(fp, bf, bfsize, branch_count,
1442 				       predicted_count, abort_count,
1443 				       cycles_count, iter_count, iter_cycles,
1444 				       from_count, &clist->brtype_stat);
1445 }
1446 
1447 static void free_callchain_node(struct callchain_node *node)
1448 {
1449 	struct callchain_list *list, *tmp;
1450 	struct callchain_node *child;
1451 	struct rb_node *n;
1452 
1453 	list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1454 		list_del(&list->list);
1455 		map__zput(list->ms.map);
1456 		free(list);
1457 	}
1458 
1459 	list_for_each_entry_safe(list, tmp, &node->val, list) {
1460 		list_del(&list->list);
1461 		map__zput(list->ms.map);
1462 		free(list);
1463 	}
1464 
1465 	n = rb_first(&node->rb_root_in);
1466 	while (n) {
1467 		child = container_of(n, struct callchain_node, rb_node_in);
1468 		n = rb_next(n);
1469 		rb_erase(&child->rb_node_in, &node->rb_root_in);
1470 
1471 		free_callchain_node(child);
1472 		free(child);
1473 	}
1474 }
1475 
1476 void free_callchain(struct callchain_root *root)
1477 {
1478 	if (!symbol_conf.use_callchain)
1479 		return;
1480 
1481 	free_callchain_node(&root->node);
1482 }
1483 
1484 static u64 decay_callchain_node(struct callchain_node *node)
1485 {
1486 	struct callchain_node *child;
1487 	struct rb_node *n;
1488 	u64 child_hits = 0;
1489 
1490 	n = rb_first(&node->rb_root_in);
1491 	while (n) {
1492 		child = container_of(n, struct callchain_node, rb_node_in);
1493 
1494 		child_hits += decay_callchain_node(child);
1495 		n = rb_next(n);
1496 	}
1497 
1498 	node->hit = (node->hit * 7) / 8;
1499 	node->children_hit = child_hits;
1500 
1501 	return node->hit;
1502 }
1503 
1504 void decay_callchain(struct callchain_root *root)
1505 {
1506 	if (!symbol_conf.use_callchain)
1507 		return;
1508 
1509 	decay_callchain_node(&root->node);
1510 }
1511 
1512 int callchain_node__make_parent_list(struct callchain_node *node)
1513 {
1514 	struct callchain_node *parent = node->parent;
1515 	struct callchain_list *chain, *new;
1516 	LIST_HEAD(head);
1517 
1518 	while (parent) {
1519 		list_for_each_entry_reverse(chain, &parent->val, list) {
1520 			new = malloc(sizeof(*new));
1521 			if (new == NULL)
1522 				goto out;
1523 			*new = *chain;
1524 			new->has_children = false;
1525 			map__get(new->ms.map);
1526 			list_add_tail(&new->list, &head);
1527 		}
1528 		parent = parent->parent;
1529 	}
1530 
1531 	list_for_each_entry_safe_reverse(chain, new, &head, list)
1532 		list_move_tail(&chain->list, &node->parent_val);
1533 
1534 	if (!list_empty(&node->parent_val)) {
1535 		chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1536 		chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1537 
1538 		chain = list_first_entry(&node->val, struct callchain_list, list);
1539 		chain->has_children = false;
1540 	}
1541 	return 0;
1542 
1543 out:
1544 	list_for_each_entry_safe(chain, new, &head, list) {
1545 		list_del(&chain->list);
1546 		map__zput(chain->ms.map);
1547 		free(chain);
1548 	}
1549 	return -ENOMEM;
1550 }
1551 
1552 int callchain_cursor__copy(struct callchain_cursor *dst,
1553 			   struct callchain_cursor *src)
1554 {
1555 	int rc = 0;
1556 
1557 	callchain_cursor_reset(dst);
1558 	callchain_cursor_commit(src);
1559 
1560 	while (true) {
1561 		struct callchain_cursor_node *node;
1562 
1563 		node = callchain_cursor_current(src);
1564 		if (node == NULL)
1565 			break;
1566 
1567 		rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
1568 					     node->branch, &node->branch_flags,
1569 					     node->nr_loop_iter,
1570 					     node->iter_cycles,
1571 					     node->branch_from, node->srcline);
1572 		if (rc)
1573 			break;
1574 
1575 		callchain_cursor_advance(src);
1576 	}
1577 
1578 	return rc;
1579 }
1580