xref: /openbmc/linux/tools/perf/util/thread-stack.c (revision abade675e02e1b73da0c20ffaf08fbe309038298)
1 /*
2  * thread-stack.c: Synthesize a thread's stack using call / return events
3  * Copyright (c) 2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/rbtree.h>
17 #include <linux/list.h>
18 #include <linux/log2.h>
19 #include <errno.h>
20 #include "thread.h"
21 #include "event.h"
22 #include "machine.h"
23 #include "env.h"
24 #include "util.h"
25 #include "debug.h"
26 #include "symbol.h"
27 #include "comm.h"
28 #include "call-path.h"
29 #include "thread-stack.h"
30 
31 #define STACK_GROWTH 2048
32 
33 /*
34  * State of retpoline detection.
35  *
36  * RETPOLINE_NONE: no retpoline detection
37  * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
38  * X86_RETPOLINE_DETECTED: x86 retpoline detected
39  */
40 enum retpoline_state_t {
41 	RETPOLINE_NONE,
42 	X86_RETPOLINE_POSSIBLE,
43 	X86_RETPOLINE_DETECTED,
44 };
45 
46 /**
47  * struct thread_stack_entry - thread stack entry.
48  * @ret_addr: return address
49  * @timestamp: timestamp (if known)
50  * @ref: external reference (e.g. db_id of sample)
51  * @branch_count: the branch count when the entry was created
52  * @db_id: id used for db-export
53  * @cp: call path
54  * @no_call: a 'call' was not seen
55  * @trace_end: a 'call' but trace ended
56  * @non_call: a branch but not a 'call' to the start of a different symbol
57  */
58 struct thread_stack_entry {
59 	u64 ret_addr;
60 	u64 timestamp;
61 	u64 ref;
62 	u64 branch_count;
63 	u64 db_id;
64 	struct call_path *cp;
65 	bool no_call;
66 	bool trace_end;
67 	bool non_call;
68 };
69 
70 /**
71  * struct thread_stack - thread stack constructed from 'call' and 'return'
72  *                       branch samples.
73  * @stack: array that holds the stack
74  * @cnt: number of entries in the stack
75  * @sz: current maximum stack size
76  * @trace_nr: current trace number
77  * @branch_count: running branch count
78  * @kernel_start: kernel start address
79  * @last_time: last timestamp
80  * @crp: call/return processor
81  * @comm: current comm
82  * @arr_sz: size of array if this is the first element of an array
83  * @rstate: used to detect retpolines
84  */
85 struct thread_stack {
86 	struct thread_stack_entry *stack;
87 	size_t cnt;
88 	size_t sz;
89 	u64 trace_nr;
90 	u64 branch_count;
91 	u64 kernel_start;
92 	u64 last_time;
93 	struct call_return_processor *crp;
94 	struct comm *comm;
95 	unsigned int arr_sz;
96 	enum retpoline_state_t rstate;
97 };
98 
99 /*
100  * Assume pid == tid == 0 identifies the idle task as defined by
101  * perf_session__register_idle_thread(). The idle task is really 1 task per cpu,
102  * and therefore requires a stack for each cpu.
103  */
104 static inline bool thread_stack__per_cpu(struct thread *thread)
105 {
106 	return !(thread->tid || thread->pid_);
107 }
108 
109 static int thread_stack__grow(struct thread_stack *ts)
110 {
111 	struct thread_stack_entry *new_stack;
112 	size_t sz, new_sz;
113 
114 	new_sz = ts->sz + STACK_GROWTH;
115 	sz = new_sz * sizeof(struct thread_stack_entry);
116 
117 	new_stack = realloc(ts->stack, sz);
118 	if (!new_stack)
119 		return -ENOMEM;
120 
121 	ts->stack = new_stack;
122 	ts->sz = new_sz;
123 
124 	return 0;
125 }
126 
127 static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
128 			      struct call_return_processor *crp)
129 {
130 	int err;
131 
132 	err = thread_stack__grow(ts);
133 	if (err)
134 		return err;
135 
136 	if (thread->mg && thread->mg->machine) {
137 		struct machine *machine = thread->mg->machine;
138 		const char *arch = perf_env__arch(machine->env);
139 
140 		ts->kernel_start = machine__kernel_start(machine);
141 		if (!strcmp(arch, "x86"))
142 			ts->rstate = X86_RETPOLINE_POSSIBLE;
143 	} else {
144 		ts->kernel_start = 1ULL << 63;
145 	}
146 	ts->crp = crp;
147 
148 	return 0;
149 }
150 
151 static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
152 					      struct call_return_processor *crp)
153 {
154 	struct thread_stack *ts = thread->ts, *new_ts;
155 	unsigned int old_sz = ts ? ts->arr_sz : 0;
156 	unsigned int new_sz = 1;
157 
158 	if (thread_stack__per_cpu(thread) && cpu > 0)
159 		new_sz = roundup_pow_of_two(cpu + 1);
160 
161 	if (!ts || new_sz > old_sz) {
162 		new_ts = calloc(new_sz, sizeof(*ts));
163 		if (!new_ts)
164 			return NULL;
165 		if (ts)
166 			memcpy(new_ts, ts, old_sz * sizeof(*ts));
167 		new_ts->arr_sz = new_sz;
168 		zfree(&thread->ts);
169 		thread->ts = new_ts;
170 		ts = new_ts;
171 	}
172 
173 	if (thread_stack__per_cpu(thread) && cpu > 0 &&
174 	    (unsigned int)cpu < ts->arr_sz)
175 		ts += cpu;
176 
177 	if (!ts->stack &&
178 	    thread_stack__init(ts, thread, crp))
179 		return NULL;
180 
181 	return ts;
182 }
183 
184 static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
185 {
186 	struct thread_stack *ts = thread->ts;
187 
188 	if (cpu < 0)
189 		cpu = 0;
190 
191 	if (!ts || (unsigned int)cpu >= ts->arr_sz)
192 		return NULL;
193 
194 	ts += cpu;
195 
196 	if (!ts->stack)
197 		return NULL;
198 
199 	return ts;
200 }
201 
202 static inline struct thread_stack *thread__stack(struct thread *thread,
203 						    int cpu)
204 {
205 	if (!thread)
206 		return NULL;
207 
208 	if (thread_stack__per_cpu(thread))
209 		return thread__cpu_stack(thread, cpu);
210 
211 	return thread->ts;
212 }
213 
214 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
215 			      bool trace_end)
216 {
217 	int err = 0;
218 
219 	if (ts->cnt == ts->sz) {
220 		err = thread_stack__grow(ts);
221 		if (err) {
222 			pr_warning("Out of memory: discarding thread stack\n");
223 			ts->cnt = 0;
224 		}
225 	}
226 
227 	ts->stack[ts->cnt].trace_end = trace_end;
228 	ts->stack[ts->cnt++].ret_addr = ret_addr;
229 
230 	return err;
231 }
232 
233 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
234 {
235 	size_t i;
236 
237 	/*
238 	 * In some cases there may be functions which are not seen to return.
239 	 * For example when setjmp / longjmp has been used.  Or the perf context
240 	 * switch in the kernel which doesn't stop and start tracing in exactly
241 	 * the same code path.  When that happens the return address will be
242 	 * further down the stack.  If the return address is not found at all,
243 	 * we assume the opposite (i.e. this is a return for a call that wasn't
244 	 * seen for some reason) and leave the stack alone.
245 	 */
246 	for (i = ts->cnt; i; ) {
247 		if (ts->stack[--i].ret_addr == ret_addr) {
248 			ts->cnt = i;
249 			return;
250 		}
251 	}
252 }
253 
254 static void thread_stack__pop_trace_end(struct thread_stack *ts)
255 {
256 	size_t i;
257 
258 	for (i = ts->cnt; i; ) {
259 		if (ts->stack[--i].trace_end)
260 			ts->cnt = i;
261 		else
262 			return;
263 	}
264 }
265 
266 static bool thread_stack__in_kernel(struct thread_stack *ts)
267 {
268 	if (!ts->cnt)
269 		return false;
270 
271 	return ts->stack[ts->cnt - 1].cp->in_kernel;
272 }
273 
274 static int thread_stack__call_return(struct thread *thread,
275 				     struct thread_stack *ts, size_t idx,
276 				     u64 timestamp, u64 ref, bool no_return)
277 {
278 	struct call_return_processor *crp = ts->crp;
279 	struct thread_stack_entry *tse;
280 	struct call_return cr = {
281 		.thread = thread,
282 		.comm = ts->comm,
283 		.db_id = 0,
284 	};
285 	u64 *parent_db_id;
286 
287 	tse = &ts->stack[idx];
288 	cr.cp = tse->cp;
289 	cr.call_time = tse->timestamp;
290 	cr.return_time = timestamp;
291 	cr.branch_count = ts->branch_count - tse->branch_count;
292 	cr.db_id = tse->db_id;
293 	cr.call_ref = tse->ref;
294 	cr.return_ref = ref;
295 	if (tse->no_call)
296 		cr.flags |= CALL_RETURN_NO_CALL;
297 	if (no_return)
298 		cr.flags |= CALL_RETURN_NO_RETURN;
299 	if (tse->non_call)
300 		cr.flags |= CALL_RETURN_NON_CALL;
301 
302 	/*
303 	 * The parent db_id must be assigned before exporting the child. Note
304 	 * it is not possible to export the parent first because its information
305 	 * is not yet complete because its 'return' has not yet been processed.
306 	 */
307 	parent_db_id = idx ? &(tse - 1)->db_id : NULL;
308 
309 	return crp->process(&cr, parent_db_id, crp->data);
310 }
311 
312 static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
313 {
314 	struct call_return_processor *crp = ts->crp;
315 	int err;
316 
317 	if (!crp) {
318 		ts->cnt = 0;
319 		return 0;
320 	}
321 
322 	while (ts->cnt) {
323 		err = thread_stack__call_return(thread, ts, --ts->cnt,
324 						ts->last_time, 0, true);
325 		if (err) {
326 			pr_err("Error flushing thread stack!\n");
327 			ts->cnt = 0;
328 			return err;
329 		}
330 	}
331 
332 	return 0;
333 }
334 
335 int thread_stack__flush(struct thread *thread)
336 {
337 	struct thread_stack *ts = thread->ts;
338 	unsigned int pos;
339 	int err = 0;
340 
341 	if (ts) {
342 		for (pos = 0; pos < ts->arr_sz; pos++) {
343 			int ret = __thread_stack__flush(thread, ts + pos);
344 
345 			if (ret)
346 				err = ret;
347 		}
348 	}
349 
350 	return err;
351 }
352 
353 int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
354 			u64 to_ip, u16 insn_len, u64 trace_nr)
355 {
356 	struct thread_stack *ts = thread__stack(thread, cpu);
357 
358 	if (!thread)
359 		return -EINVAL;
360 
361 	if (!ts) {
362 		ts = thread_stack__new(thread, cpu, NULL);
363 		if (!ts) {
364 			pr_warning("Out of memory: no thread stack\n");
365 			return -ENOMEM;
366 		}
367 		ts->trace_nr = trace_nr;
368 	}
369 
370 	/*
371 	 * When the trace is discontinuous, the trace_nr changes.  In that case
372 	 * the stack might be completely invalid.  Better to report nothing than
373 	 * to report something misleading, so flush the stack.
374 	 */
375 	if (trace_nr != ts->trace_nr) {
376 		if (ts->trace_nr)
377 			__thread_stack__flush(thread, ts);
378 		ts->trace_nr = trace_nr;
379 	}
380 
381 	/* Stop here if thread_stack__process() is in use */
382 	if (ts->crp)
383 		return 0;
384 
385 	if (flags & PERF_IP_FLAG_CALL) {
386 		u64 ret_addr;
387 
388 		if (!to_ip)
389 			return 0;
390 		ret_addr = from_ip + insn_len;
391 		if (ret_addr == to_ip)
392 			return 0; /* Zero-length calls are excluded */
393 		return thread_stack__push(ts, ret_addr,
394 					  flags & PERF_IP_FLAG_TRACE_END);
395 	} else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
396 		/*
397 		 * If the caller did not change the trace number (which would
398 		 * have flushed the stack) then try to make sense of the stack.
399 		 * Possibly, tracing began after returning to the current
400 		 * address, so try to pop that. Also, do not expect a call made
401 		 * when the trace ended, to return, so pop that.
402 		 */
403 		thread_stack__pop(ts, to_ip);
404 		thread_stack__pop_trace_end(ts);
405 	} else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
406 		thread_stack__pop(ts, to_ip);
407 	}
408 
409 	return 0;
410 }
411 
412 void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
413 {
414 	struct thread_stack *ts = thread__stack(thread, cpu);
415 
416 	if (!ts)
417 		return;
418 
419 	if (trace_nr != ts->trace_nr) {
420 		if (ts->trace_nr)
421 			__thread_stack__flush(thread, ts);
422 		ts->trace_nr = trace_nr;
423 	}
424 }
425 
426 static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
427 {
428 	__thread_stack__flush(thread, ts);
429 	zfree(&ts->stack);
430 }
431 
432 static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
433 {
434 	unsigned int arr_sz = ts->arr_sz;
435 
436 	__thread_stack__free(thread, ts);
437 	memset(ts, 0, sizeof(*ts));
438 	ts->arr_sz = arr_sz;
439 }
440 
441 void thread_stack__free(struct thread *thread)
442 {
443 	struct thread_stack *ts = thread->ts;
444 	unsigned int pos;
445 
446 	if (ts) {
447 		for (pos = 0; pos < ts->arr_sz; pos++)
448 			__thread_stack__free(thread, ts + pos);
449 		zfree(&thread->ts);
450 	}
451 }
452 
453 static inline u64 callchain_context(u64 ip, u64 kernel_start)
454 {
455 	return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
456 }
457 
458 void thread_stack__sample(struct thread *thread, int cpu,
459 			  struct ip_callchain *chain,
460 			  size_t sz, u64 ip, u64 kernel_start)
461 {
462 	struct thread_stack *ts = thread__stack(thread, cpu);
463 	u64 context = callchain_context(ip, kernel_start);
464 	u64 last_context;
465 	size_t i, j;
466 
467 	if (sz < 2) {
468 		chain->nr = 0;
469 		return;
470 	}
471 
472 	chain->ips[0] = context;
473 	chain->ips[1] = ip;
474 
475 	if (!ts) {
476 		chain->nr = 2;
477 		return;
478 	}
479 
480 	last_context = context;
481 
482 	for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
483 		ip = ts->stack[ts->cnt - j].ret_addr;
484 		context = callchain_context(ip, kernel_start);
485 		if (context != last_context) {
486 			if (i >= sz - 1)
487 				break;
488 			chain->ips[i++] = context;
489 			last_context = context;
490 		}
491 		chain->ips[i] = ip;
492 	}
493 
494 	chain->nr = i;
495 }
496 
497 struct call_return_processor *
498 call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
499 			   void *data)
500 {
501 	struct call_return_processor *crp;
502 
503 	crp = zalloc(sizeof(struct call_return_processor));
504 	if (!crp)
505 		return NULL;
506 	crp->cpr = call_path_root__new();
507 	if (!crp->cpr)
508 		goto out_free;
509 	crp->process = process;
510 	crp->data = data;
511 	return crp;
512 
513 out_free:
514 	free(crp);
515 	return NULL;
516 }
517 
518 void call_return_processor__free(struct call_return_processor *crp)
519 {
520 	if (crp) {
521 		call_path_root__free(crp->cpr);
522 		free(crp);
523 	}
524 }
525 
526 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
527 				 u64 timestamp, u64 ref, struct call_path *cp,
528 				 bool no_call, bool trace_end)
529 {
530 	struct thread_stack_entry *tse;
531 	int err;
532 
533 	if (!cp)
534 		return -ENOMEM;
535 
536 	if (ts->cnt == ts->sz) {
537 		err = thread_stack__grow(ts);
538 		if (err)
539 			return err;
540 	}
541 
542 	tse = &ts->stack[ts->cnt++];
543 	tse->ret_addr = ret_addr;
544 	tse->timestamp = timestamp;
545 	tse->ref = ref;
546 	tse->branch_count = ts->branch_count;
547 	tse->cp = cp;
548 	tse->no_call = no_call;
549 	tse->trace_end = trace_end;
550 	tse->non_call = false;
551 	tse->db_id = 0;
552 
553 	return 0;
554 }
555 
556 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
557 				u64 ret_addr, u64 timestamp, u64 ref,
558 				struct symbol *sym)
559 {
560 	int err;
561 
562 	if (!ts->cnt)
563 		return 1;
564 
565 	if (ts->cnt == 1) {
566 		struct thread_stack_entry *tse = &ts->stack[0];
567 
568 		if (tse->cp->sym == sym)
569 			return thread_stack__call_return(thread, ts, --ts->cnt,
570 							 timestamp, ref, false);
571 	}
572 
573 	if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
574 	    !ts->stack[ts->cnt - 1].non_call) {
575 		return thread_stack__call_return(thread, ts, --ts->cnt,
576 						 timestamp, ref, false);
577 	} else {
578 		size_t i = ts->cnt - 1;
579 
580 		while (i--) {
581 			if (ts->stack[i].ret_addr != ret_addr ||
582 			    ts->stack[i].non_call)
583 				continue;
584 			i += 1;
585 			while (ts->cnt > i) {
586 				err = thread_stack__call_return(thread, ts,
587 								--ts->cnt,
588 								timestamp, ref,
589 								true);
590 				if (err)
591 					return err;
592 			}
593 			return thread_stack__call_return(thread, ts, --ts->cnt,
594 							 timestamp, ref, false);
595 		}
596 	}
597 
598 	return 1;
599 }
600 
601 static int thread_stack__bottom(struct thread_stack *ts,
602 				struct perf_sample *sample,
603 				struct addr_location *from_al,
604 				struct addr_location *to_al, u64 ref)
605 {
606 	struct call_path_root *cpr = ts->crp->cpr;
607 	struct call_path *cp;
608 	struct symbol *sym;
609 	u64 ip;
610 
611 	if (sample->ip) {
612 		ip = sample->ip;
613 		sym = from_al->sym;
614 	} else if (sample->addr) {
615 		ip = sample->addr;
616 		sym = to_al->sym;
617 	} else {
618 		return 0;
619 	}
620 
621 	cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
622 				ts->kernel_start);
623 
624 	return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
625 				     true, false);
626 }
627 
628 static int thread_stack__no_call_return(struct thread *thread,
629 					struct thread_stack *ts,
630 					struct perf_sample *sample,
631 					struct addr_location *from_al,
632 					struct addr_location *to_al, u64 ref)
633 {
634 	struct call_path_root *cpr = ts->crp->cpr;
635 	struct call_path *root = &cpr->call_path;
636 	struct symbol *fsym = from_al->sym;
637 	struct symbol *tsym = to_al->sym;
638 	struct call_path *cp, *parent;
639 	u64 ks = ts->kernel_start;
640 	u64 addr = sample->addr;
641 	u64 tm = sample->time;
642 	u64 ip = sample->ip;
643 	int err;
644 
645 	if (ip >= ks && addr < ks) {
646 		/* Return to userspace, so pop all kernel addresses */
647 		while (thread_stack__in_kernel(ts)) {
648 			err = thread_stack__call_return(thread, ts, --ts->cnt,
649 							tm, ref, true);
650 			if (err)
651 				return err;
652 		}
653 
654 		/* If the stack is empty, push the userspace address */
655 		if (!ts->cnt) {
656 			cp = call_path__findnew(cpr, root, tsym, addr, ks);
657 			return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
658 						     false);
659 		}
660 	} else if (thread_stack__in_kernel(ts) && ip < ks) {
661 		/* Return to userspace, so pop all kernel addresses */
662 		while (thread_stack__in_kernel(ts)) {
663 			err = thread_stack__call_return(thread, ts, --ts->cnt,
664 							tm, ref, true);
665 			if (err)
666 				return err;
667 		}
668 	}
669 
670 	if (ts->cnt)
671 		parent = ts->stack[ts->cnt - 1].cp;
672 	else
673 		parent = root;
674 
675 	if (parent->sym == from_al->sym) {
676 		/*
677 		 * At the bottom of the stack, assume the missing 'call' was
678 		 * before the trace started. So, pop the current symbol and push
679 		 * the 'to' symbol.
680 		 */
681 		if (ts->cnt == 1) {
682 			err = thread_stack__call_return(thread, ts, --ts->cnt,
683 							tm, ref, false);
684 			if (err)
685 				return err;
686 		}
687 
688 		if (!ts->cnt) {
689 			cp = call_path__findnew(cpr, root, tsym, addr, ks);
690 
691 			return thread_stack__push_cp(ts, addr, tm, ref, cp,
692 						     true, false);
693 		}
694 
695 		/*
696 		 * Otherwise assume the 'return' is being used as a jump (e.g.
697 		 * retpoline) and just push the 'to' symbol.
698 		 */
699 		cp = call_path__findnew(cpr, parent, tsym, addr, ks);
700 
701 		err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
702 		if (!err)
703 			ts->stack[ts->cnt - 1].non_call = true;
704 
705 		return err;
706 	}
707 
708 	/*
709 	 * Assume 'parent' has not yet returned, so push 'to', and then push and
710 	 * pop 'from'.
711 	 */
712 
713 	cp = call_path__findnew(cpr, parent, tsym, addr, ks);
714 
715 	err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
716 	if (err)
717 		return err;
718 
719 	cp = call_path__findnew(cpr, cp, fsym, ip, ks);
720 
721 	err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
722 	if (err)
723 		return err;
724 
725 	return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
726 }
727 
728 static int thread_stack__trace_begin(struct thread *thread,
729 				     struct thread_stack *ts, u64 timestamp,
730 				     u64 ref)
731 {
732 	struct thread_stack_entry *tse;
733 	int err;
734 
735 	if (!ts->cnt)
736 		return 0;
737 
738 	/* Pop trace end */
739 	tse = &ts->stack[ts->cnt - 1];
740 	if (tse->trace_end) {
741 		err = thread_stack__call_return(thread, ts, --ts->cnt,
742 						timestamp, ref, false);
743 		if (err)
744 			return err;
745 	}
746 
747 	return 0;
748 }
749 
750 static int thread_stack__trace_end(struct thread_stack *ts,
751 				   struct perf_sample *sample, u64 ref)
752 {
753 	struct call_path_root *cpr = ts->crp->cpr;
754 	struct call_path *cp;
755 	u64 ret_addr;
756 
757 	/* No point having 'trace end' on the bottom of the stack */
758 	if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
759 		return 0;
760 
761 	cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
762 				ts->kernel_start);
763 
764 	ret_addr = sample->ip + sample->insn_len;
765 
766 	return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
767 				     false, true);
768 }
769 
770 static bool is_x86_retpoline(const char *name)
771 {
772 	const char *p = strstr(name, "__x86_indirect_thunk_");
773 
774 	return p == name || !strcmp(name, "__indirect_thunk_start");
775 }
776 
777 /*
778  * x86 retpoline functions pollute the call graph. This function removes them.
779  * This does not handle function return thunks, nor is there any improvement
780  * for the handling of inline thunks or extern thunks.
781  */
782 static int thread_stack__x86_retpoline(struct thread_stack *ts,
783 				       struct perf_sample *sample,
784 				       struct addr_location *to_al)
785 {
786 	struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
787 	struct call_path_root *cpr = ts->crp->cpr;
788 	struct symbol *sym = tse->cp->sym;
789 	struct symbol *tsym = to_al->sym;
790 	struct call_path *cp;
791 
792 	if (sym && is_x86_retpoline(sym->name)) {
793 		/*
794 		 * This is a x86 retpoline fn. It pollutes the call graph by
795 		 * showing up everywhere there is an indirect branch, but does
796 		 * not itself mean anything. Here the top-of-stack is removed,
797 		 * by decrementing the stack count, and then further down, the
798 		 * resulting top-of-stack is replaced with the actual target.
799 		 * The result is that the retpoline functions will no longer
800 		 * appear in the call graph. Note this only affects the call
801 		 * graph, since all the original branches are left unchanged.
802 		 */
803 		ts->cnt -= 1;
804 		sym = ts->stack[ts->cnt - 2].cp->sym;
805 		if (sym && sym == tsym && to_al->addr != tsym->start) {
806 			/*
807 			 * Target is back to the middle of the symbol we came
808 			 * from so assume it is an indirect jmp and forget it
809 			 * altogether.
810 			 */
811 			ts->cnt -= 1;
812 			return 0;
813 		}
814 	} else if (sym && sym == tsym) {
815 		/*
816 		 * Target is back to the symbol we came from so assume it is an
817 		 * indirect jmp and forget it altogether.
818 		 */
819 		ts->cnt -= 1;
820 		return 0;
821 	}
822 
823 	cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
824 				sample->addr, ts->kernel_start);
825 	if (!cp)
826 		return -ENOMEM;
827 
828 	/* Replace the top-of-stack with the actual target */
829 	ts->stack[ts->cnt - 1].cp = cp;
830 
831 	return 0;
832 }
833 
834 int thread_stack__process(struct thread *thread, struct comm *comm,
835 			  struct perf_sample *sample,
836 			  struct addr_location *from_al,
837 			  struct addr_location *to_al, u64 ref,
838 			  struct call_return_processor *crp)
839 {
840 	struct thread_stack *ts = thread__stack(thread, sample->cpu);
841 	enum retpoline_state_t rstate;
842 	int err = 0;
843 
844 	if (ts && !ts->crp) {
845 		/* Supersede thread_stack__event() */
846 		thread_stack__reset(thread, ts);
847 		ts = NULL;
848 	}
849 
850 	if (!ts) {
851 		ts = thread_stack__new(thread, sample->cpu, crp);
852 		if (!ts)
853 			return -ENOMEM;
854 		ts->comm = comm;
855 	}
856 
857 	rstate = ts->rstate;
858 	if (rstate == X86_RETPOLINE_DETECTED)
859 		ts->rstate = X86_RETPOLINE_POSSIBLE;
860 
861 	/* Flush stack on exec */
862 	if (ts->comm != comm && thread->pid_ == thread->tid) {
863 		err = __thread_stack__flush(thread, ts);
864 		if (err)
865 			return err;
866 		ts->comm = comm;
867 	}
868 
869 	/* If the stack is empty, put the current symbol on the stack */
870 	if (!ts->cnt) {
871 		err = thread_stack__bottom(ts, sample, from_al, to_al, ref);
872 		if (err)
873 			return err;
874 	}
875 
876 	ts->branch_count += 1;
877 	ts->last_time = sample->time;
878 
879 	if (sample->flags & PERF_IP_FLAG_CALL) {
880 		bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
881 		struct call_path_root *cpr = ts->crp->cpr;
882 		struct call_path *cp;
883 		u64 ret_addr;
884 
885 		if (!sample->ip || !sample->addr)
886 			return 0;
887 
888 		ret_addr = sample->ip + sample->insn_len;
889 		if (ret_addr == sample->addr)
890 			return 0; /* Zero-length calls are excluded */
891 
892 		cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
893 					to_al->sym, sample->addr,
894 					ts->kernel_start);
895 		err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
896 					    cp, false, trace_end);
897 
898 		/*
899 		 * A call to the same symbol but not the start of the symbol,
900 		 * may be the start of a x86 retpoline.
901 		 */
902 		if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
903 		    from_al->sym == to_al->sym &&
904 		    to_al->addr != to_al->sym->start)
905 			ts->rstate = X86_RETPOLINE_DETECTED;
906 
907 	} else if (sample->flags & PERF_IP_FLAG_RETURN) {
908 		if (!sample->ip || !sample->addr)
909 			return 0;
910 
911 		/* x86 retpoline 'return' doesn't match the stack */
912 		if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
913 		    ts->stack[ts->cnt - 1].ret_addr != sample->addr)
914 			return thread_stack__x86_retpoline(ts, sample, to_al);
915 
916 		err = thread_stack__pop_cp(thread, ts, sample->addr,
917 					   sample->time, ref, from_al->sym);
918 		if (err) {
919 			if (err < 0)
920 				return err;
921 			err = thread_stack__no_call_return(thread, ts, sample,
922 							   from_al, to_al, ref);
923 		}
924 	} else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
925 		err = thread_stack__trace_begin(thread, ts, sample->time, ref);
926 	} else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
927 		err = thread_stack__trace_end(ts, sample, ref);
928 	} else if (sample->flags & PERF_IP_FLAG_BRANCH &&
929 		   from_al->sym != to_al->sym && to_al->sym &&
930 		   to_al->addr == to_al->sym->start) {
931 		struct call_path_root *cpr = ts->crp->cpr;
932 		struct call_path *cp;
933 
934 		/*
935 		 * The compiler might optimize a call/ret combination by making
936 		 * it a jmp. Make that visible by recording on the stack a
937 		 * branch to the start of a different symbol. Note, that means
938 		 * when a ret pops the stack, all jmps must be popped off first.
939 		 */
940 		cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
941 					to_al->sym, sample->addr,
942 					ts->kernel_start);
943 		err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
944 					    false);
945 		if (!err)
946 			ts->stack[ts->cnt - 1].non_call = true;
947 	}
948 
949 	return err;
950 }
951 
952 size_t thread_stack__depth(struct thread *thread, int cpu)
953 {
954 	struct thread_stack *ts = thread__stack(thread, cpu);
955 
956 	if (!ts)
957 		return 0;
958 	return ts->cnt;
959 }
960