xref: /openbmc/linux/arch/powerpc/perf/callchain.c (revision c4c3c32d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance counter callchain support - powerpc architecture code
4  *
5  * Copyright © 2009 Paul Mackerras, IBM Corporation.
6  */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/perf_event.h>
10 #include <linux/percpu.h>
11 #include <linux/uaccess.h>
12 #include <linux/mm.h>
13 #include <asm/ptrace.h>
14 #include <asm/sigcontext.h>
15 #include <asm/ucontext.h>
16 #include <asm/vdso.h>
17 #include <asm/pte-walk.h>
18 
19 #include "callchain.h"
20 
21 /*
22  * Is sp valid as the address of the next kernel stack frame after prev_sp?
23  * The next frame may be in a different stack area but should not go
24  * back down in the same stack area.
25  */
26 static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
27 {
28 	if (sp & 0xf)
29 		return 0;		/* must be 16-byte aligned */
30 	if (!validate_sp(sp, current))
31 		return 0;
32 	if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
33 		return 1;
34 	/*
35 	 * sp could decrease when we jump off an interrupt stack
36 	 * back to the regular process stack.
37 	 */
38 	if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
39 		return 1;
40 	return 0;
41 }
42 
43 void __no_sanitize_address
44 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
45 {
46 	unsigned long sp, next_sp;
47 	unsigned long next_ip;
48 	unsigned long lr;
49 	long level = 0;
50 	unsigned long *fp;
51 
52 	lr = regs->link;
53 	sp = regs->gpr[1];
54 	perf_callchain_store(entry, perf_instruction_pointer(regs));
55 
56 	if (!validate_sp(sp, current))
57 		return;
58 
59 	for (;;) {
60 		fp = (unsigned long *) sp;
61 		next_sp = fp[0];
62 
63 		if (next_sp == sp + STACK_INT_FRAME_SIZE &&
64 		    validate_sp_size(sp, current, STACK_INT_FRAME_SIZE) &&
65 		    fp[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
66 			/*
67 			 * This looks like an interrupt frame for an
68 			 * interrupt that occurred in the kernel
69 			 */
70 			regs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS);
71 			next_ip = regs->nip;
72 			lr = regs->link;
73 			level = 0;
74 			perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
75 
76 		} else {
77 			if (level == 0)
78 				next_ip = lr;
79 			else
80 				next_ip = fp[STACK_FRAME_LR_SAVE];
81 
82 			/*
83 			 * We can't tell which of the first two addresses
84 			 * we get are valid, but we can filter out the
85 			 * obviously bogus ones here.  We replace them
86 			 * with 0 rather than removing them entirely so
87 			 * that userspace can tell which is which.
88 			 */
89 			if ((level == 1 && next_ip == lr) ||
90 			    (level <= 1 && !kernel_text_address(next_ip)))
91 				next_ip = 0;
92 
93 			++level;
94 		}
95 
96 		perf_callchain_store(entry, next_ip);
97 		if (!valid_next_sp(next_sp, sp))
98 			return;
99 		sp = next_sp;
100 	}
101 }
102 
103 void
104 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
105 {
106 	if (!is_32bit_task())
107 		perf_callchain_user_64(entry, regs);
108 	else
109 		perf_callchain_user_32(entry, regs);
110 }
111