xref: /openbmc/linux/tools/perf/util/callchain.c (revision ecd25094)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4  *
5  * Handle the callchains from the stream in an ad-hoc radix tree and then
6  * sort them in an rbtree.
7  *
8  * Using a radix for code path provides a fast retrieval and factorizes
9  * memory use. Also that lets us use the paths in a hierarchical graph view.
10  *
11  */
12 
13 #include <inttypes.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <errno.h>
18 #include <math.h>
19 #include <linux/string.h>
20 #include <linux/zalloc.h>
21 
22 #include "asm/bug.h"
23 
24 #include "debug.h"
25 #include "dso.h"
26 #include "hist.h"
27 #include "sort.h"
28 #include "machine.h"
29 #include "map.h"
30 #include "callchain.h"
31 #include "branch.h"
32 #include "symbol.h"
33 #include "../perf.h"
34 
35 #define CALLCHAIN_PARAM_DEFAULT			\
36 	.mode		= CHAIN_GRAPH_ABS,	\
37 	.min_percent	= 0.5,			\
38 	.order		= ORDER_CALLEE,		\
39 	.key		= CCKEY_FUNCTION,	\
40 	.value		= CCVAL_PERCENT,	\
41 
42 struct callchain_param callchain_param = {
43 	CALLCHAIN_PARAM_DEFAULT
44 };
45 
46 /*
47  * Are there any events usind DWARF callchains?
48  *
49  * I.e.
50  *
51  * -e cycles/call-graph=dwarf/
52  */
53 bool dwarf_callchain_users;
54 
55 struct callchain_param callchain_param_default = {
56 	CALLCHAIN_PARAM_DEFAULT
57 };
58 
59 __thread struct callchain_cursor callchain_cursor;
60 
61 int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
62 {
63 	return parse_callchain_record(arg, param);
64 }
65 
66 static int parse_callchain_mode(const char *value)
67 {
68 	if (!strncmp(value, "graph", strlen(value))) {
69 		callchain_param.mode = CHAIN_GRAPH_ABS;
70 		return 0;
71 	}
72 	if (!strncmp(value, "flat", strlen(value))) {
73 		callchain_param.mode = CHAIN_FLAT;
74 		return 0;
75 	}
76 	if (!strncmp(value, "fractal", strlen(value))) {
77 		callchain_param.mode = CHAIN_GRAPH_REL;
78 		return 0;
79 	}
80 	if (!strncmp(value, "folded", strlen(value))) {
81 		callchain_param.mode = CHAIN_FOLDED;
82 		return 0;
83 	}
84 	return -1;
85 }
86 
87 static int parse_callchain_order(const char *value)
88 {
89 	if (!strncmp(value, "caller", strlen(value))) {
90 		callchain_param.order = ORDER_CALLER;
91 		callchain_param.order_set = true;
92 		return 0;
93 	}
94 	if (!strncmp(value, "callee", strlen(value))) {
95 		callchain_param.order = ORDER_CALLEE;
96 		callchain_param.order_set = true;
97 		return 0;
98 	}
99 	return -1;
100 }
101 
102 static int parse_callchain_sort_key(const char *value)
103 {
104 	if (!strncmp(value, "function", strlen(value))) {
105 		callchain_param.key = CCKEY_FUNCTION;
106 		return 0;
107 	}
108 	if (!strncmp(value, "address", strlen(value))) {
109 		callchain_param.key = CCKEY_ADDRESS;
110 		return 0;
111 	}
112 	if (!strncmp(value, "srcline", strlen(value))) {
113 		callchain_param.key = CCKEY_SRCLINE;
114 		return 0;
115 	}
116 	if (!strncmp(value, "branch", strlen(value))) {
117 		callchain_param.branch_callstack = 1;
118 		return 0;
119 	}
120 	return -1;
121 }
122 
123 static int parse_callchain_value(const char *value)
124 {
125 	if (!strncmp(value, "percent", strlen(value))) {
126 		callchain_param.value = CCVAL_PERCENT;
127 		return 0;
128 	}
129 	if (!strncmp(value, "period", strlen(value))) {
130 		callchain_param.value = CCVAL_PERIOD;
131 		return 0;
132 	}
133 	if (!strncmp(value, "count", strlen(value))) {
134 		callchain_param.value = CCVAL_COUNT;
135 		return 0;
136 	}
137 	return -1;
138 }
139 
140 static int get_stack_size(const char *str, unsigned long *_size)
141 {
142 	char *endptr;
143 	unsigned long size;
144 	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
145 
146 	size = strtoul(str, &endptr, 0);
147 
148 	do {
149 		if (*endptr)
150 			break;
151 
152 		size = round_up(size, sizeof(u64));
153 		if (!size || size > max_size)
154 			break;
155 
156 		*_size = size;
157 		return 0;
158 
159 	} while (0);
160 
161 	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
162 	       max_size, str);
163 	return -1;
164 }
165 
166 static int
167 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
168 {
169 	char *tok;
170 	char *endptr, *saveptr = NULL;
171 	bool minpcnt_set = false;
172 	bool record_opt_set = false;
173 	bool try_stack_size = false;
174 
175 	callchain_param.enabled = true;
176 	symbol_conf.use_callchain = true;
177 
178 	if (!arg)
179 		return 0;
180 
181 	while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
182 		if (!strncmp(tok, "none", strlen(tok))) {
183 			callchain_param.mode = CHAIN_NONE;
184 			callchain_param.enabled = false;
185 			symbol_conf.use_callchain = false;
186 			return 0;
187 		}
188 
189 		if (!parse_callchain_mode(tok) ||
190 		    !parse_callchain_order(tok) ||
191 		    !parse_callchain_sort_key(tok) ||
192 		    !parse_callchain_value(tok)) {
193 			/* parsing ok - move on to the next */
194 			try_stack_size = false;
195 			goto next;
196 		} else if (allow_record_opt && !record_opt_set) {
197 			if (parse_callchain_record(tok, &callchain_param))
198 				goto try_numbers;
199 
200 			/* assume that number followed by 'dwarf' is stack size */
201 			if (callchain_param.record_mode == CALLCHAIN_DWARF)
202 				try_stack_size = true;
203 
204 			record_opt_set = true;
205 			goto next;
206 		}
207 
208 try_numbers:
209 		if (try_stack_size) {
210 			unsigned long size = 0;
211 
212 			if (get_stack_size(tok, &size) < 0)
213 				return -1;
214 			callchain_param.dump_size = size;
215 			try_stack_size = false;
216 		} else if (!minpcnt_set) {
217 			/* try to get the min percent */
218 			callchain_param.min_percent = strtod(tok, &endptr);
219 			if (tok == endptr)
220 				return -1;
221 			minpcnt_set = true;
222 		} else {
223 			/* try print limit at last */
224 			callchain_param.print_limit = strtoul(tok, &endptr, 0);
225 			if (tok == endptr)
226 				return -1;
227 		}
228 next:
229 		arg = NULL;
230 	}
231 
232 	if (callchain_register_param(&callchain_param) < 0) {
233 		pr_err("Can't register callchain params\n");
234 		return -1;
235 	}
236 	return 0;
237 }
238 
239 int parse_callchain_report_opt(const char *arg)
240 {
241 	return __parse_callchain_report_opt(arg, false);
242 }
243 
244 int parse_callchain_top_opt(const char *arg)
245 {
246 	return __parse_callchain_report_opt(arg, true);
247 }
248 
249 int parse_callchain_record(const char *arg, struct callchain_param *param)
250 {
251 	char *tok, *name, *saveptr = NULL;
252 	char *buf;
253 	int ret = -1;
254 
255 	/* We need buffer that we know we can write to. */
256 	buf = malloc(strlen(arg) + 1);
257 	if (!buf)
258 		return -ENOMEM;
259 
260 	strcpy(buf, arg);
261 
262 	tok = strtok_r((char *)buf, ",", &saveptr);
263 	name = tok ? : (char *)buf;
264 
265 	do {
266 		/* Framepointer style */
267 		if (!strncmp(name, "fp", sizeof("fp"))) {
268 			if (!strtok_r(NULL, ",", &saveptr)) {
269 				param->record_mode = CALLCHAIN_FP;
270 				ret = 0;
271 			} else
272 				pr_err("callchain: No more arguments "
273 				       "needed for --call-graph fp\n");
274 			break;
275 
276 		/* Dwarf style */
277 		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
278 			const unsigned long default_stack_dump_size = 8192;
279 
280 			ret = 0;
281 			param->record_mode = CALLCHAIN_DWARF;
282 			param->dump_size = default_stack_dump_size;
283 			dwarf_callchain_users = true;
284 
285 			tok = strtok_r(NULL, ",", &saveptr);
286 			if (tok) {
287 				unsigned long size = 0;
288 
289 				ret = get_stack_size(tok, &size);
290 				param->dump_size = size;
291 			}
292 		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
293 			if (!strtok_r(NULL, ",", &saveptr)) {
294 				param->record_mode = CALLCHAIN_LBR;
295 				ret = 0;
296 			} else
297 				pr_err("callchain: No more arguments "
298 					"needed for --call-graph lbr\n");
299 			break;
300 		} else {
301 			pr_err("callchain: Unknown --call-graph option "
302 			       "value: %s\n", arg);
303 			break;
304 		}
305 
306 	} while (0);
307 
308 	free(buf);
309 	return ret;
310 }
311 
312 int perf_callchain_config(const char *var, const char *value)
313 {
314 	char *endptr;
315 
316 	if (!strstarts(var, "call-graph."))
317 		return 0;
318 	var += sizeof("call-graph.") - 1;
319 
320 	if (!strcmp(var, "record-mode"))
321 		return parse_callchain_record_opt(value, &callchain_param);
322 	if (!strcmp(var, "dump-size")) {
323 		unsigned long size = 0;
324 		int ret;
325 
326 		ret = get_stack_size(value, &size);
327 		callchain_param.dump_size = size;
328 
329 		return ret;
330 	}
331 	if (!strcmp(var, "print-type")){
332 		int ret;
333 		ret = parse_callchain_mode(value);
334 		if (ret == -1)
335 			pr_err("Invalid callchain mode: %s\n", value);
336 		return ret;
337 	}
338 	if (!strcmp(var, "order")){
339 		int ret;
340 		ret = parse_callchain_order(value);
341 		if (ret == -1)
342 			pr_err("Invalid callchain order: %s\n", value);
343 		return ret;
344 	}
345 	if (!strcmp(var, "sort-key")){
346 		int ret;
347 		ret = parse_callchain_sort_key(value);
348 		if (ret == -1)
349 			pr_err("Invalid callchain sort key: %s\n", value);
350 		return ret;
351 	}
352 	if (!strcmp(var, "threshold")) {
353 		callchain_param.min_percent = strtod(value, &endptr);
354 		if (value == endptr) {
355 			pr_err("Invalid callchain threshold: %s\n", value);
356 			return -1;
357 		}
358 	}
359 	if (!strcmp(var, "print-limit")) {
360 		callchain_param.print_limit = strtod(value, &endptr);
361 		if (value == endptr) {
362 			pr_err("Invalid callchain print limit: %s\n", value);
363 			return -1;
364 		}
365 	}
366 
367 	return 0;
368 }
369 
370 static void
371 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
372 		    enum chain_mode mode)
373 {
374 	struct rb_node **p = &root->rb_node;
375 	struct rb_node *parent = NULL;
376 	struct callchain_node *rnode;
377 	u64 chain_cumul = callchain_cumul_hits(chain);
378 
379 	while (*p) {
380 		u64 rnode_cumul;
381 
382 		parent = *p;
383 		rnode = rb_entry(parent, struct callchain_node, rb_node);
384 		rnode_cumul = callchain_cumul_hits(rnode);
385 
386 		switch (mode) {
387 		case CHAIN_FLAT:
388 		case CHAIN_FOLDED:
389 			if (rnode->hit < chain->hit)
390 				p = &(*p)->rb_left;
391 			else
392 				p = &(*p)->rb_right;
393 			break;
394 		case CHAIN_GRAPH_ABS: /* Falldown */
395 		case CHAIN_GRAPH_REL:
396 			if (rnode_cumul < chain_cumul)
397 				p = &(*p)->rb_left;
398 			else
399 				p = &(*p)->rb_right;
400 			break;
401 		case CHAIN_NONE:
402 		default:
403 			break;
404 		}
405 	}
406 
407 	rb_link_node(&chain->rb_node, parent, p);
408 	rb_insert_color(&chain->rb_node, root);
409 }
410 
411 static void
412 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
413 		  u64 min_hit)
414 {
415 	struct rb_node *n;
416 	struct callchain_node *child;
417 
418 	n = rb_first(&node->rb_root_in);
419 	while (n) {
420 		child = rb_entry(n, struct callchain_node, rb_node_in);
421 		n = rb_next(n);
422 
423 		__sort_chain_flat(rb_root, child, min_hit);
424 	}
425 
426 	if (node->hit && node->hit >= min_hit)
427 		rb_insert_callchain(rb_root, node, CHAIN_FLAT);
428 }
429 
430 /*
431  * Once we get every callchains from the stream, we can now
432  * sort them by hit
433  */
434 static void
435 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
436 		u64 min_hit, struct callchain_param *param __maybe_unused)
437 {
438 	*rb_root = RB_ROOT;
439 	__sort_chain_flat(rb_root, &root->node, min_hit);
440 }
441 
442 static void __sort_chain_graph_abs(struct callchain_node *node,
443 				   u64 min_hit)
444 {
445 	struct rb_node *n;
446 	struct callchain_node *child;
447 
448 	node->rb_root = RB_ROOT;
449 	n = rb_first(&node->rb_root_in);
450 
451 	while (n) {
452 		child = rb_entry(n, struct callchain_node, rb_node_in);
453 		n = rb_next(n);
454 
455 		__sort_chain_graph_abs(child, min_hit);
456 		if (callchain_cumul_hits(child) >= min_hit)
457 			rb_insert_callchain(&node->rb_root, child,
458 					    CHAIN_GRAPH_ABS);
459 	}
460 }
461 
462 static void
463 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
464 		     u64 min_hit, struct callchain_param *param __maybe_unused)
465 {
466 	__sort_chain_graph_abs(&chain_root->node, min_hit);
467 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
468 }
469 
470 static void __sort_chain_graph_rel(struct callchain_node *node,
471 				   double min_percent)
472 {
473 	struct rb_node *n;
474 	struct callchain_node *child;
475 	u64 min_hit;
476 
477 	node->rb_root = RB_ROOT;
478 	min_hit = ceil(node->children_hit * min_percent);
479 
480 	n = rb_first(&node->rb_root_in);
481 	while (n) {
482 		child = rb_entry(n, struct callchain_node, rb_node_in);
483 		n = rb_next(n);
484 
485 		__sort_chain_graph_rel(child, min_percent);
486 		if (callchain_cumul_hits(child) >= min_hit)
487 			rb_insert_callchain(&node->rb_root, child,
488 					    CHAIN_GRAPH_REL);
489 	}
490 }
491 
492 static void
493 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
494 		     u64 min_hit __maybe_unused, struct callchain_param *param)
495 {
496 	__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
497 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
498 }
499 
500 int callchain_register_param(struct callchain_param *param)
501 {
502 	switch (param->mode) {
503 	case CHAIN_GRAPH_ABS:
504 		param->sort = sort_chain_graph_abs;
505 		break;
506 	case CHAIN_GRAPH_REL:
507 		param->sort = sort_chain_graph_rel;
508 		break;
509 	case CHAIN_FLAT:
510 	case CHAIN_FOLDED:
511 		param->sort = sort_chain_flat;
512 		break;
513 	case CHAIN_NONE:
514 	default:
515 		return -1;
516 	}
517 	return 0;
518 }
519 
520 /*
521  * Create a child for a parent. If inherit_children, then the new child
522  * will become the new parent of it's parent children
523  */
524 static struct callchain_node *
525 create_child(struct callchain_node *parent, bool inherit_children)
526 {
527 	struct callchain_node *new;
528 
529 	new = zalloc(sizeof(*new));
530 	if (!new) {
531 		perror("not enough memory to create child for code path tree");
532 		return NULL;
533 	}
534 	new->parent = parent;
535 	INIT_LIST_HEAD(&new->val);
536 	INIT_LIST_HEAD(&new->parent_val);
537 
538 	if (inherit_children) {
539 		struct rb_node *n;
540 		struct callchain_node *child;
541 
542 		new->rb_root_in = parent->rb_root_in;
543 		parent->rb_root_in = RB_ROOT;
544 
545 		n = rb_first(&new->rb_root_in);
546 		while (n) {
547 			child = rb_entry(n, struct callchain_node, rb_node_in);
548 			child->parent = new;
549 			n = rb_next(n);
550 		}
551 
552 		/* make it the first child */
553 		rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
554 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
555 	}
556 
557 	return new;
558 }
559 
560 
561 /*
562  * Fill the node with callchain values
563  */
564 static int
565 fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
566 {
567 	struct callchain_cursor_node *cursor_node;
568 
569 	node->val_nr = cursor->nr - cursor->pos;
570 	if (!node->val_nr)
571 		pr_warning("Warning: empty node in callchain tree\n");
572 
573 	cursor_node = callchain_cursor_current(cursor);
574 
575 	while (cursor_node) {
576 		struct callchain_list *call;
577 
578 		call = zalloc(sizeof(*call));
579 		if (!call) {
580 			perror("not enough memory for the code path tree");
581 			return -1;
582 		}
583 		call->ip = cursor_node->ip;
584 		call->ms.sym = cursor_node->sym;
585 		call->ms.map = map__get(cursor_node->map);
586 		call->srcline = cursor_node->srcline;
587 
588 		if (cursor_node->branch) {
589 			call->branch_count = 1;
590 
591 			if (cursor_node->branch_from) {
592 				/*
593 				 * branch_from is set with value somewhere else
594 				 * to imply it's "to" of a branch.
595 				 */
596 				call->brtype_stat.branch_to = true;
597 
598 				if (cursor_node->branch_flags.predicted)
599 					call->predicted_count = 1;
600 
601 				if (cursor_node->branch_flags.abort)
602 					call->abort_count = 1;
603 
604 				branch_type_count(&call->brtype_stat,
605 						  &cursor_node->branch_flags,
606 						  cursor_node->branch_from,
607 						  cursor_node->ip);
608 			} else {
609 				/*
610 				 * It's "from" of a branch
611 				 */
612 				call->brtype_stat.branch_to = false;
613 				call->cycles_count =
614 					cursor_node->branch_flags.cycles;
615 				call->iter_count = cursor_node->nr_loop_iter;
616 				call->iter_cycles = cursor_node->iter_cycles;
617 			}
618 		}
619 
620 		list_add_tail(&call->list, &node->val);
621 
622 		callchain_cursor_advance(cursor);
623 		cursor_node = callchain_cursor_current(cursor);
624 	}
625 	return 0;
626 }
627 
628 static struct callchain_node *
629 add_child(struct callchain_node *parent,
630 	  struct callchain_cursor *cursor,
631 	  u64 period)
632 {
633 	struct callchain_node *new;
634 
635 	new = create_child(parent, false);
636 	if (new == NULL)
637 		return NULL;
638 
639 	if (fill_node(new, cursor) < 0) {
640 		struct callchain_list *call, *tmp;
641 
642 		list_for_each_entry_safe(call, tmp, &new->val, list) {
643 			list_del_init(&call->list);
644 			map__zput(call->ms.map);
645 			free(call);
646 		}
647 		free(new);
648 		return NULL;
649 	}
650 
651 	new->children_hit = 0;
652 	new->hit = period;
653 	new->children_count = 0;
654 	new->count = 1;
655 	return new;
656 }
657 
658 enum match_result {
659 	MATCH_ERROR  = -1,
660 	MATCH_EQ,
661 	MATCH_LT,
662 	MATCH_GT,
663 };
664 
665 static enum match_result match_chain_strings(const char *left,
666 					     const char *right)
667 {
668 	enum match_result ret = MATCH_EQ;
669 	int cmp;
670 
671 	if (left && right)
672 		cmp = strcmp(left, right);
673 	else if (!left && right)
674 		cmp = 1;
675 	else if (left && !right)
676 		cmp = -1;
677 	else
678 		return MATCH_ERROR;
679 
680 	if (cmp != 0)
681 		ret = cmp < 0 ? MATCH_LT : MATCH_GT;
682 
683 	return ret;
684 }
685 
686 /*
687  * We need to always use relative addresses because we're aggregating
688  * callchains from multiple threads, i.e. different address spaces, so
689  * comparing absolute addresses make no sense as a symbol in a DSO may end up
690  * in a different address when used in a different binary or even the same
691  * binary but with some sort of address randomization technique, thus we need
692  * to compare just relative addresses. -acme
693  */
694 static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
695 						   struct map *right_map, u64 right_ip)
696 {
697 	struct dso *left_dso = left_map ? left_map->dso : NULL;
698 	struct dso *right_dso = right_map ? right_map->dso : NULL;
699 
700 	if (left_dso != right_dso)
701 		return left_dso < right_dso ? MATCH_LT : MATCH_GT;
702 
703 	if (left_ip != right_ip)
704  		return left_ip < right_ip ? MATCH_LT : MATCH_GT;
705 
706 	return MATCH_EQ;
707 }
708 
709 static enum match_result match_chain(struct callchain_cursor_node *node,
710 				     struct callchain_list *cnode)
711 {
712 	enum match_result match = MATCH_ERROR;
713 
714 	switch (callchain_param.key) {
715 	case CCKEY_SRCLINE:
716 		match = match_chain_strings(cnode->srcline, node->srcline);
717 		if (match != MATCH_ERROR)
718 			break;
719 		/* otherwise fall-back to symbol-based comparison below */
720 		__fallthrough;
721 	case CCKEY_FUNCTION:
722 		if (node->sym && cnode->ms.sym) {
723 			/*
724 			 * Compare inlined frames based on their symbol name
725 			 * because different inlined frames will have the same
726 			 * symbol start. Otherwise do a faster comparison based
727 			 * on the symbol start address.
728 			 */
729 			if (cnode->ms.sym->inlined || node->sym->inlined) {
730 				match = match_chain_strings(cnode->ms.sym->name,
731 							    node->sym->name);
732 				if (match != MATCH_ERROR)
733 					break;
734 			} else {
735 				match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
736 								  node->map, node->sym->start);
737 				break;
738 			}
739 		}
740 		/* otherwise fall-back to IP-based comparison below */
741 		__fallthrough;
742 	case CCKEY_ADDRESS:
743 	default:
744 		match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
745 		break;
746 	}
747 
748 	if (match == MATCH_EQ && node->branch) {
749 		cnode->branch_count++;
750 
751 		if (node->branch_from) {
752 			/*
753 			 * It's "to" of a branch
754 			 */
755 			cnode->brtype_stat.branch_to = true;
756 
757 			if (node->branch_flags.predicted)
758 				cnode->predicted_count++;
759 
760 			if (node->branch_flags.abort)
761 				cnode->abort_count++;
762 
763 			branch_type_count(&cnode->brtype_stat,
764 					  &node->branch_flags,
765 					  node->branch_from,
766 					  node->ip);
767 		} else {
768 			/*
769 			 * It's "from" of a branch
770 			 */
771 			cnode->brtype_stat.branch_to = false;
772 			cnode->cycles_count += node->branch_flags.cycles;
773 			cnode->iter_count += node->nr_loop_iter;
774 			cnode->iter_cycles += node->iter_cycles;
775 			cnode->from_count++;
776 		}
777 	}
778 
779 	return match;
780 }
781 
782 /*
783  * Split the parent in two parts (a new child is created) and
784  * give a part of its callchain to the created child.
785  * Then create another child to host the given callchain of new branch
786  */
787 static int
788 split_add_child(struct callchain_node *parent,
789 		struct callchain_cursor *cursor,
790 		struct callchain_list *to_split,
791 		u64 idx_parents, u64 idx_local, u64 period)
792 {
793 	struct callchain_node *new;
794 	struct list_head *old_tail;
795 	unsigned int idx_total = idx_parents + idx_local;
796 
797 	/* split */
798 	new = create_child(parent, true);
799 	if (new == NULL)
800 		return -1;
801 
802 	/* split the callchain and move a part to the new child */
803 	old_tail = parent->val.prev;
804 	list_del_range(&to_split->list, old_tail);
805 	new->val.next = &to_split->list;
806 	new->val.prev = old_tail;
807 	to_split->list.prev = &new->val;
808 	old_tail->next = &new->val;
809 
810 	/* split the hits */
811 	new->hit = parent->hit;
812 	new->children_hit = parent->children_hit;
813 	parent->children_hit = callchain_cumul_hits(new);
814 	new->val_nr = parent->val_nr - idx_local;
815 	parent->val_nr = idx_local;
816 	new->count = parent->count;
817 	new->children_count = parent->children_count;
818 	parent->children_count = callchain_cumul_counts(new);
819 
820 	/* create a new child for the new branch if any */
821 	if (idx_total < cursor->nr) {
822 		struct callchain_node *first;
823 		struct callchain_list *cnode;
824 		struct callchain_cursor_node *node;
825 		struct rb_node *p, **pp;
826 
827 		parent->hit = 0;
828 		parent->children_hit += period;
829 		parent->count = 0;
830 		parent->children_count += 1;
831 
832 		node = callchain_cursor_current(cursor);
833 		new = add_child(parent, cursor, period);
834 		if (new == NULL)
835 			return -1;
836 
837 		/*
838 		 * This is second child since we moved parent's children
839 		 * to new (first) child above.
840 		 */
841 		p = parent->rb_root_in.rb_node;
842 		first = rb_entry(p, struct callchain_node, rb_node_in);
843 		cnode = list_first_entry(&first->val, struct callchain_list,
844 					 list);
845 
846 		if (match_chain(node, cnode) == MATCH_LT)
847 			pp = &p->rb_left;
848 		else
849 			pp = &p->rb_right;
850 
851 		rb_link_node(&new->rb_node_in, p, pp);
852 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
853 	} else {
854 		parent->hit = period;
855 		parent->count = 1;
856 	}
857 	return 0;
858 }
859 
860 static enum match_result
861 append_chain(struct callchain_node *root,
862 	     struct callchain_cursor *cursor,
863 	     u64 period);
864 
865 static int
866 append_chain_children(struct callchain_node *root,
867 		      struct callchain_cursor *cursor,
868 		      u64 period)
869 {
870 	struct callchain_node *rnode;
871 	struct callchain_cursor_node *node;
872 	struct rb_node **p = &root->rb_root_in.rb_node;
873 	struct rb_node *parent = NULL;
874 
875 	node = callchain_cursor_current(cursor);
876 	if (!node)
877 		return -1;
878 
879 	/* lookup in childrens */
880 	while (*p) {
881 		enum match_result ret;
882 
883 		parent = *p;
884 		rnode = rb_entry(parent, struct callchain_node, rb_node_in);
885 
886 		/* If at least first entry matches, rely to children */
887 		ret = append_chain(rnode, cursor, period);
888 		if (ret == MATCH_EQ)
889 			goto inc_children_hit;
890 		if (ret == MATCH_ERROR)
891 			return -1;
892 
893 		if (ret == MATCH_LT)
894 			p = &parent->rb_left;
895 		else
896 			p = &parent->rb_right;
897 	}
898 	/* nothing in children, add to the current node */
899 	rnode = add_child(root, cursor, period);
900 	if (rnode == NULL)
901 		return -1;
902 
903 	rb_link_node(&rnode->rb_node_in, parent, p);
904 	rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
905 
906 inc_children_hit:
907 	root->children_hit += period;
908 	root->children_count++;
909 	return 0;
910 }
911 
912 static enum match_result
913 append_chain(struct callchain_node *root,
914 	     struct callchain_cursor *cursor,
915 	     u64 period)
916 {
917 	struct callchain_list *cnode;
918 	u64 start = cursor->pos;
919 	bool found = false;
920 	u64 matches;
921 	enum match_result cmp = MATCH_ERROR;
922 
923 	/*
924 	 * Lookup in the current node
925 	 * If we have a symbol, then compare the start to match
926 	 * anywhere inside a function, unless function
927 	 * mode is disabled.
928 	 */
929 	list_for_each_entry(cnode, &root->val, list) {
930 		struct callchain_cursor_node *node;
931 
932 		node = callchain_cursor_current(cursor);
933 		if (!node)
934 			break;
935 
936 		cmp = match_chain(node, cnode);
937 		if (cmp != MATCH_EQ)
938 			break;
939 
940 		found = true;
941 
942 		callchain_cursor_advance(cursor);
943 	}
944 
945 	/* matches not, relay no the parent */
946 	if (!found) {
947 		WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
948 		return cmp;
949 	}
950 
951 	matches = cursor->pos - start;
952 
953 	/* we match only a part of the node. Split it and add the new chain */
954 	if (matches < root->val_nr) {
955 		if (split_add_child(root, cursor, cnode, start, matches,
956 				    period) < 0)
957 			return MATCH_ERROR;
958 
959 		return MATCH_EQ;
960 	}
961 
962 	/* we match 100% of the path, increment the hit */
963 	if (matches == root->val_nr && cursor->pos == cursor->nr) {
964 		root->hit += period;
965 		root->count++;
966 		return MATCH_EQ;
967 	}
968 
969 	/* We match the node and still have a part remaining */
970 	if (append_chain_children(root, cursor, period) < 0)
971 		return MATCH_ERROR;
972 
973 	return MATCH_EQ;
974 }
975 
976 int callchain_append(struct callchain_root *root,
977 		     struct callchain_cursor *cursor,
978 		     u64 period)
979 {
980 	if (!cursor->nr)
981 		return 0;
982 
983 	callchain_cursor_commit(cursor);
984 
985 	if (append_chain_children(&root->node, cursor, period) < 0)
986 		return -1;
987 
988 	if (cursor->nr > root->max_depth)
989 		root->max_depth = cursor->nr;
990 
991 	return 0;
992 }
993 
994 static int
995 merge_chain_branch(struct callchain_cursor *cursor,
996 		   struct callchain_node *dst, struct callchain_node *src)
997 {
998 	struct callchain_cursor_node **old_last = cursor->last;
999 	struct callchain_node *child;
1000 	struct callchain_list *list, *next_list;
1001 	struct rb_node *n;
1002 	int old_pos = cursor->nr;
1003 	int err = 0;
1004 
1005 	list_for_each_entry_safe(list, next_list, &src->val, list) {
1006 		callchain_cursor_append(cursor, list->ip,
1007 					list->ms.map, list->ms.sym,
1008 					false, NULL, 0, 0, 0, list->srcline);
1009 		list_del_init(&list->list);
1010 		map__zput(list->ms.map);
1011 		free(list);
1012 	}
1013 
1014 	if (src->hit) {
1015 		callchain_cursor_commit(cursor);
1016 		if (append_chain_children(dst, cursor, src->hit) < 0)
1017 			return -1;
1018 	}
1019 
1020 	n = rb_first(&src->rb_root_in);
1021 	while (n) {
1022 		child = container_of(n, struct callchain_node, rb_node_in);
1023 		n = rb_next(n);
1024 		rb_erase(&child->rb_node_in, &src->rb_root_in);
1025 
1026 		err = merge_chain_branch(cursor, dst, child);
1027 		if (err)
1028 			break;
1029 
1030 		free(child);
1031 	}
1032 
1033 	cursor->nr = old_pos;
1034 	cursor->last = old_last;
1035 
1036 	return err;
1037 }
1038 
1039 int callchain_merge(struct callchain_cursor *cursor,
1040 		    struct callchain_root *dst, struct callchain_root *src)
1041 {
1042 	return merge_chain_branch(cursor, &dst->node, &src->node);
1043 }
1044 
1045 int callchain_cursor_append(struct callchain_cursor *cursor,
1046 			    u64 ip, struct map *map, struct symbol *sym,
1047 			    bool branch, struct branch_flags *flags,
1048 			    int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1049 			    const char *srcline)
1050 {
1051 	struct callchain_cursor_node *node = *cursor->last;
1052 
1053 	if (!node) {
1054 		node = calloc(1, sizeof(*node));
1055 		if (!node)
1056 			return -ENOMEM;
1057 
1058 		*cursor->last = node;
1059 	}
1060 
1061 	node->ip = ip;
1062 	map__zput(node->map);
1063 	node->map = map__get(map);
1064 	node->sym = sym;
1065 	node->branch = branch;
1066 	node->nr_loop_iter = nr_loop_iter;
1067 	node->iter_cycles = iter_cycles;
1068 	node->srcline = srcline;
1069 
1070 	if (flags)
1071 		memcpy(&node->branch_flags, flags,
1072 			sizeof(struct branch_flags));
1073 
1074 	node->branch_from = branch_from;
1075 	cursor->nr++;
1076 
1077 	cursor->last = &node->next;
1078 
1079 	return 0;
1080 }
1081 
1082 int sample__resolve_callchain(struct perf_sample *sample,
1083 			      struct callchain_cursor *cursor, struct symbol **parent,
1084 			      struct evsel *evsel, struct addr_location *al,
1085 			      int max_stack)
1086 {
1087 	if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1088 		return 0;
1089 
1090 	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1091 	    perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1092 		return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1093 						 parent, al, max_stack);
1094 	}
1095 	return 0;
1096 }
1097 
1098 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1099 {
1100 	if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1101 		!symbol_conf.show_branchflag_count)
1102 		return 0;
1103 	return callchain_append(he->callchain, &callchain_cursor, sample->period);
1104 }
1105 
1106 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1107 			bool hide_unresolved)
1108 {
1109 	al->map = node->map;
1110 	al->sym = node->sym;
1111 	al->srcline = node->srcline;
1112 	al->addr = node->ip;
1113 
1114 	if (al->sym == NULL) {
1115 		if (hide_unresolved)
1116 			return 0;
1117 		if (al->map == NULL)
1118 			goto out;
1119 	}
1120 
1121 	if (al->map->groups == &al->machine->kmaps) {
1122 		if (machine__is_host(al->machine)) {
1123 			al->cpumode = PERF_RECORD_MISC_KERNEL;
1124 			al->level = 'k';
1125 		} else {
1126 			al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1127 			al->level = 'g';
1128 		}
1129 	} else {
1130 		if (machine__is_host(al->machine)) {
1131 			al->cpumode = PERF_RECORD_MISC_USER;
1132 			al->level = '.';
1133 		} else if (perf_guest) {
1134 			al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1135 			al->level = 'u';
1136 		} else {
1137 			al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1138 			al->level = 'H';
1139 		}
1140 	}
1141 
1142 out:
1143 	return 1;
1144 }
1145 
1146 char *callchain_list__sym_name(struct callchain_list *cl,
1147 			       char *bf, size_t bfsize, bool show_dso)
1148 {
1149 	bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1150 	bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1151 	int printed;
1152 
1153 	if (cl->ms.sym) {
1154 		const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1155 
1156 		if (show_srcline && cl->srcline)
1157 			printed = scnprintf(bf, bfsize, "%s %s%s",
1158 					    cl->ms.sym->name, cl->srcline,
1159 					    inlined);
1160 		else
1161 			printed = scnprintf(bf, bfsize, "%s%s",
1162 					    cl->ms.sym->name, inlined);
1163 	} else
1164 		printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1165 
1166 	if (show_dso)
1167 		scnprintf(bf + printed, bfsize - printed, " %s",
1168 			  cl->ms.map ?
1169 			  cl->ms.map->dso->short_name :
1170 			  "unknown");
1171 
1172 	return bf;
1173 }
1174 
1175 char *callchain_node__scnprintf_value(struct callchain_node *node,
1176 				      char *bf, size_t bfsize, u64 total)
1177 {
1178 	double percent = 0.0;
1179 	u64 period = callchain_cumul_hits(node);
1180 	unsigned count = callchain_cumul_counts(node);
1181 
1182 	if (callchain_param.mode == CHAIN_FOLDED) {
1183 		period = node->hit;
1184 		count = node->count;
1185 	}
1186 
1187 	switch (callchain_param.value) {
1188 	case CCVAL_PERIOD:
1189 		scnprintf(bf, bfsize, "%"PRIu64, period);
1190 		break;
1191 	case CCVAL_COUNT:
1192 		scnprintf(bf, bfsize, "%u", count);
1193 		break;
1194 	case CCVAL_PERCENT:
1195 	default:
1196 		if (total)
1197 			percent = period * 100.0 / total;
1198 		scnprintf(bf, bfsize, "%.2f%%", percent);
1199 		break;
1200 	}
1201 	return bf;
1202 }
1203 
1204 int callchain_node__fprintf_value(struct callchain_node *node,
1205 				 FILE *fp, u64 total)
1206 {
1207 	double percent = 0.0;
1208 	u64 period = callchain_cumul_hits(node);
1209 	unsigned count = callchain_cumul_counts(node);
1210 
1211 	if (callchain_param.mode == CHAIN_FOLDED) {
1212 		period = node->hit;
1213 		count = node->count;
1214 	}
1215 
1216 	switch (callchain_param.value) {
1217 	case CCVAL_PERIOD:
1218 		return fprintf(fp, "%"PRIu64, period);
1219 	case CCVAL_COUNT:
1220 		return fprintf(fp, "%u", count);
1221 	case CCVAL_PERCENT:
1222 	default:
1223 		if (total)
1224 			percent = period * 100.0 / total;
1225 		return percent_color_fprintf(fp, "%.2f%%", percent);
1226 	}
1227 	return 0;
1228 }
1229 
1230 static void callchain_counts_value(struct callchain_node *node,
1231 				   u64 *branch_count, u64 *predicted_count,
1232 				   u64 *abort_count, u64 *cycles_count)
1233 {
1234 	struct callchain_list *clist;
1235 
1236 	list_for_each_entry(clist, &node->val, list) {
1237 		if (branch_count)
1238 			*branch_count += clist->branch_count;
1239 
1240 		if (predicted_count)
1241 			*predicted_count += clist->predicted_count;
1242 
1243 		if (abort_count)
1244 			*abort_count += clist->abort_count;
1245 
1246 		if (cycles_count)
1247 			*cycles_count += clist->cycles_count;
1248 	}
1249 }
1250 
1251 static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1252 					      u64 *branch_count,
1253 					      u64 *predicted_count,
1254 					      u64 *abort_count,
1255 					      u64 *cycles_count)
1256 {
1257 	struct callchain_node *child;
1258 	struct rb_node *n;
1259 
1260 	n = rb_first(&node->rb_root_in);
1261 	while (n) {
1262 		child = rb_entry(n, struct callchain_node, rb_node_in);
1263 		n = rb_next(n);
1264 
1265 		callchain_node_branch_counts_cumul(child, branch_count,
1266 						   predicted_count,
1267 						   abort_count,
1268 						   cycles_count);
1269 
1270 		callchain_counts_value(child, branch_count,
1271 				       predicted_count, abort_count,
1272 				       cycles_count);
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 int callchain_branch_counts(struct callchain_root *root,
1279 			    u64 *branch_count, u64 *predicted_count,
1280 			    u64 *abort_count, u64 *cycles_count)
1281 {
1282 	if (branch_count)
1283 		*branch_count = 0;
1284 
1285 	if (predicted_count)
1286 		*predicted_count = 0;
1287 
1288 	if (abort_count)
1289 		*abort_count = 0;
1290 
1291 	if (cycles_count)
1292 		*cycles_count = 0;
1293 
1294 	return callchain_node_branch_counts_cumul(&root->node,
1295 						  branch_count,
1296 						  predicted_count,
1297 						  abort_count,
1298 						  cycles_count);
1299 }
1300 
1301 static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1302 {
1303 	int printed;
1304 
1305 	printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1306 
1307 	return printed;
1308 }
1309 
1310 static int count_float_printf(int idx, const char *str, float value,
1311 			      char *bf, int bfsize, float threshold)
1312 {
1313 	int printed;
1314 
1315 	if (threshold != 0.0 && value < threshold)
1316 		return 0;
1317 
1318 	printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1319 
1320 	return printed;
1321 }
1322 
1323 static int branch_to_str(char *bf, int bfsize,
1324 			 u64 branch_count, u64 predicted_count,
1325 			 u64 abort_count,
1326 			 struct branch_type_stat *brtype_stat)
1327 {
1328 	int printed, i = 0;
1329 
1330 	printed = branch_type_str(brtype_stat, bf, bfsize);
1331 	if (printed)
1332 		i++;
1333 
1334 	if (predicted_count < branch_count) {
1335 		printed += count_float_printf(i++, "predicted",
1336 				predicted_count * 100.0 / branch_count,
1337 				bf + printed, bfsize - printed, 0.0);
1338 	}
1339 
1340 	if (abort_count) {
1341 		printed += count_float_printf(i++, "abort",
1342 				abort_count * 100.0 / branch_count,
1343 				bf + printed, bfsize - printed, 0.1);
1344 	}
1345 
1346 	if (i)
1347 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1348 
1349 	return printed;
1350 }
1351 
1352 static int branch_from_str(char *bf, int bfsize,
1353 			   u64 branch_count,
1354 			   u64 cycles_count, u64 iter_count,
1355 			   u64 iter_cycles, u64 from_count)
1356 {
1357 	int printed = 0, i = 0;
1358 	u64 cycles, v = 0;
1359 
1360 	cycles = cycles_count / branch_count;
1361 	if (cycles) {
1362 		printed += count_pri64_printf(i++, "cycles",
1363 				cycles,
1364 				bf + printed, bfsize - printed);
1365 	}
1366 
1367 	if (iter_count && from_count) {
1368 		v = iter_count / from_count;
1369 		if (v) {
1370 			printed += count_pri64_printf(i++, "iter",
1371 					v, bf + printed, bfsize - printed);
1372 
1373 			printed += count_pri64_printf(i++, "avg_cycles",
1374 					iter_cycles / iter_count,
1375 					bf + printed, bfsize - printed);
1376 		}
1377 	}
1378 
1379 	if (i)
1380 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1381 
1382 	return printed;
1383 }
1384 
1385 static int counts_str_build(char *bf, int bfsize,
1386 			     u64 branch_count, u64 predicted_count,
1387 			     u64 abort_count, u64 cycles_count,
1388 			     u64 iter_count, u64 iter_cycles,
1389 			     u64 from_count,
1390 			     struct branch_type_stat *brtype_stat)
1391 {
1392 	int printed;
1393 
1394 	if (branch_count == 0)
1395 		return scnprintf(bf, bfsize, " (calltrace)");
1396 
1397 	if (brtype_stat->branch_to) {
1398 		printed = branch_to_str(bf, bfsize, branch_count,
1399 				predicted_count, abort_count, brtype_stat);
1400 	} else {
1401 		printed = branch_from_str(bf, bfsize, branch_count,
1402 				cycles_count, iter_count, iter_cycles,
1403 				from_count);
1404 	}
1405 
1406 	if (!printed)
1407 		bf[0] = 0;
1408 
1409 	return printed;
1410 }
1411 
1412 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1413 				   u64 branch_count, u64 predicted_count,
1414 				   u64 abort_count, u64 cycles_count,
1415 				   u64 iter_count, u64 iter_cycles,
1416 				   u64 from_count,
1417 				   struct branch_type_stat *brtype_stat)
1418 {
1419 	char str[256];
1420 
1421 	counts_str_build(str, sizeof(str), branch_count,
1422 			 predicted_count, abort_count, cycles_count,
1423 			 iter_count, iter_cycles, from_count, brtype_stat);
1424 
1425 	if (fp)
1426 		return fprintf(fp, "%s", str);
1427 
1428 	return scnprintf(bf, bfsize, "%s", str);
1429 }
1430 
1431 int callchain_list_counts__printf_value(struct callchain_list *clist,
1432 					FILE *fp, char *bf, int bfsize)
1433 {
1434 	u64 branch_count, predicted_count;
1435 	u64 abort_count, cycles_count;
1436 	u64 iter_count, iter_cycles;
1437 	u64 from_count;
1438 
1439 	branch_count = clist->branch_count;
1440 	predicted_count = clist->predicted_count;
1441 	abort_count = clist->abort_count;
1442 	cycles_count = clist->cycles_count;
1443 	iter_count = clist->iter_count;
1444 	iter_cycles = clist->iter_cycles;
1445 	from_count = clist->from_count;
1446 
1447 	return callchain_counts_printf(fp, bf, bfsize, branch_count,
1448 				       predicted_count, abort_count,
1449 				       cycles_count, iter_count, iter_cycles,
1450 				       from_count, &clist->brtype_stat);
1451 }
1452 
1453 static void free_callchain_node(struct callchain_node *node)
1454 {
1455 	struct callchain_list *list, *tmp;
1456 	struct callchain_node *child;
1457 	struct rb_node *n;
1458 
1459 	list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1460 		list_del_init(&list->list);
1461 		map__zput(list->ms.map);
1462 		free(list);
1463 	}
1464 
1465 	list_for_each_entry_safe(list, tmp, &node->val, list) {
1466 		list_del_init(&list->list);
1467 		map__zput(list->ms.map);
1468 		free(list);
1469 	}
1470 
1471 	n = rb_first(&node->rb_root_in);
1472 	while (n) {
1473 		child = container_of(n, struct callchain_node, rb_node_in);
1474 		n = rb_next(n);
1475 		rb_erase(&child->rb_node_in, &node->rb_root_in);
1476 
1477 		free_callchain_node(child);
1478 		free(child);
1479 	}
1480 }
1481 
1482 void free_callchain(struct callchain_root *root)
1483 {
1484 	if (!symbol_conf.use_callchain)
1485 		return;
1486 
1487 	free_callchain_node(&root->node);
1488 }
1489 
1490 static u64 decay_callchain_node(struct callchain_node *node)
1491 {
1492 	struct callchain_node *child;
1493 	struct rb_node *n;
1494 	u64 child_hits = 0;
1495 
1496 	n = rb_first(&node->rb_root_in);
1497 	while (n) {
1498 		child = container_of(n, struct callchain_node, rb_node_in);
1499 
1500 		child_hits += decay_callchain_node(child);
1501 		n = rb_next(n);
1502 	}
1503 
1504 	node->hit = (node->hit * 7) / 8;
1505 	node->children_hit = child_hits;
1506 
1507 	return node->hit;
1508 }
1509 
1510 void decay_callchain(struct callchain_root *root)
1511 {
1512 	if (!symbol_conf.use_callchain)
1513 		return;
1514 
1515 	decay_callchain_node(&root->node);
1516 }
1517 
1518 int callchain_node__make_parent_list(struct callchain_node *node)
1519 {
1520 	struct callchain_node *parent = node->parent;
1521 	struct callchain_list *chain, *new;
1522 	LIST_HEAD(head);
1523 
1524 	while (parent) {
1525 		list_for_each_entry_reverse(chain, &parent->val, list) {
1526 			new = malloc(sizeof(*new));
1527 			if (new == NULL)
1528 				goto out;
1529 			*new = *chain;
1530 			new->has_children = false;
1531 			map__get(new->ms.map);
1532 			list_add_tail(&new->list, &head);
1533 		}
1534 		parent = parent->parent;
1535 	}
1536 
1537 	list_for_each_entry_safe_reverse(chain, new, &head, list)
1538 		list_move_tail(&chain->list, &node->parent_val);
1539 
1540 	if (!list_empty(&node->parent_val)) {
1541 		chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1542 		chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1543 
1544 		chain = list_first_entry(&node->val, struct callchain_list, list);
1545 		chain->has_children = false;
1546 	}
1547 	return 0;
1548 
1549 out:
1550 	list_for_each_entry_safe(chain, new, &head, list) {
1551 		list_del_init(&chain->list);
1552 		map__zput(chain->ms.map);
1553 		free(chain);
1554 	}
1555 	return -ENOMEM;
1556 }
1557 
1558 int callchain_cursor__copy(struct callchain_cursor *dst,
1559 			   struct callchain_cursor *src)
1560 {
1561 	int rc = 0;
1562 
1563 	callchain_cursor_reset(dst);
1564 	callchain_cursor_commit(src);
1565 
1566 	while (true) {
1567 		struct callchain_cursor_node *node;
1568 
1569 		node = callchain_cursor_current(src);
1570 		if (node == NULL)
1571 			break;
1572 
1573 		rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
1574 					     node->branch, &node->branch_flags,
1575 					     node->nr_loop_iter,
1576 					     node->iter_cycles,
1577 					     node->branch_from, node->srcline);
1578 		if (rc)
1579 			break;
1580 
1581 		callchain_cursor_advance(src);
1582 	}
1583 
1584 	return rc;
1585 }
1586 
1587 /*
1588  * Initialize a cursor before adding entries inside, but keep
1589  * the previously allocated entries as a cache.
1590  */
1591 void callchain_cursor_reset(struct callchain_cursor *cursor)
1592 {
1593 	struct callchain_cursor_node *node;
1594 
1595 	cursor->nr = 0;
1596 	cursor->last = &cursor->first;
1597 
1598 	for (node = cursor->first; node != NULL; node = node->next)
1599 		map__zput(node->map);
1600 }
1601