xref: /openbmc/linux/arch/powerpc/perf/callchain_32.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance counter callchain support - powerpc architecture code
4  *
5  * Copyright © 2009 Paul Mackerras, IBM Corporation.
6  */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/perf_event.h>
10 #include <linux/percpu.h>
11 #include <linux/uaccess.h>
12 #include <linux/mm.h>
13 #include <asm/ptrace.h>
14 #include <asm/sigcontext.h>
15 #include <asm/ucontext.h>
16 #include <asm/vdso.h>
17 #include <asm/pte-walk.h>
18 
19 #include "callchain.h"
20 
21 #ifdef CONFIG_PPC64
22 #include "../kernel/ppc32.h"
23 #else  /* CONFIG_PPC64 */
24 
25 #define __SIGNAL_FRAMESIZE32	__SIGNAL_FRAMESIZE
26 #define sigcontext32		sigcontext
27 #define mcontext32		mcontext
28 #define ucontext32		ucontext
29 #define compat_siginfo_t	struct siginfo
30 
31 #endif /* CONFIG_PPC64 */
32 
33 /*
34  * On 32-bit we just access the address and let hash_page create a
35  * HPTE if necessary, so there is no need to fall back to reading
36  * the page tables.  Since this is called at interrupt level,
37  * do_page_fault() won't treat a DSI as a page fault.
38  */
39 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
40 {
41 	int rc;
42 
43 	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
44 	    ((unsigned long)ptr & 3))
45 		return -EFAULT;
46 
47 	rc = copy_from_user_nofault(ret, ptr, sizeof(*ret));
48 
49 	if (IS_ENABLED(CONFIG_PPC64) && rc)
50 		return read_user_stack_slow(ptr, ret, 4);
51 
52 	return rc;
53 }
54 
55 /*
56  * Layout for non-RT signal frames
57  */
58 struct signal_frame_32 {
59 	char			dummy[__SIGNAL_FRAMESIZE32];
60 	struct sigcontext32	sctx;
61 	struct mcontext32	mctx;
62 	int			abigap[56];
63 };
64 
65 /*
66  * Layout for RT signal frames
67  */
68 struct rt_signal_frame_32 {
69 	char			dummy[__SIGNAL_FRAMESIZE32 + 16];
70 	compat_siginfo_t	info;
71 	struct ucontext32	uc;
72 	int			abigap[56];
73 };
74 
75 static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
76 {
77 	if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
78 		return 1;
79 	if (vdso32_sigtramp && current->mm->context.vdso_base &&
80 	    nip == current->mm->context.vdso_base + vdso32_sigtramp)
81 		return 1;
82 	return 0;
83 }
84 
85 static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
86 {
87 	if (nip == fp + offsetof(struct rt_signal_frame_32,
88 				 uc.uc_mcontext.mc_pad))
89 		return 1;
90 	if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
91 	    nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
92 		return 1;
93 	return 0;
94 }
95 
96 static int sane_signal_32_frame(unsigned int sp)
97 {
98 	struct signal_frame_32 __user *sf;
99 	unsigned int regs;
100 
101 	sf = (struct signal_frame_32 __user *) (unsigned long) sp;
102 	if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
103 		return 0;
104 	return regs == (unsigned long) &sf->mctx;
105 }
106 
107 static int sane_rt_signal_32_frame(unsigned int sp)
108 {
109 	struct rt_signal_frame_32 __user *sf;
110 	unsigned int regs;
111 
112 	sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
113 	if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
114 		return 0;
115 	return regs == (unsigned long) &sf->uc.uc_mcontext;
116 }
117 
118 static unsigned int __user *signal_frame_32_regs(unsigned int sp,
119 				unsigned int next_sp, unsigned int next_ip)
120 {
121 	struct mcontext32 __user *mctx = NULL;
122 	struct signal_frame_32 __user *sf;
123 	struct rt_signal_frame_32 __user *rt_sf;
124 
125 	/*
126 	 * Note: the next_sp - sp >= signal frame size check
127 	 * is true when next_sp < sp, for example, when
128 	 * transitioning from an alternate signal stack to the
129 	 * normal stack.
130 	 */
131 	if (next_sp - sp >= sizeof(struct signal_frame_32) &&
132 	    is_sigreturn_32_address(next_ip, sp) &&
133 	    sane_signal_32_frame(sp)) {
134 		sf = (struct signal_frame_32 __user *) (unsigned long) sp;
135 		mctx = &sf->mctx;
136 	}
137 
138 	if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
139 	    is_rt_sigreturn_32_address(next_ip, sp) &&
140 	    sane_rt_signal_32_frame(sp)) {
141 		rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
142 		mctx = &rt_sf->uc.uc_mcontext;
143 	}
144 
145 	if (!mctx)
146 		return NULL;
147 	return mctx->mc_gregs;
148 }
149 
150 void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
151 			    struct pt_regs *regs)
152 {
153 	unsigned int sp, next_sp;
154 	unsigned int next_ip;
155 	unsigned int lr;
156 	long level = 0;
157 	unsigned int __user *fp, *uregs;
158 
159 	next_ip = perf_instruction_pointer(regs);
160 	lr = regs->link;
161 	sp = regs->gpr[1];
162 	perf_callchain_store(entry, next_ip);
163 
164 	while (entry->nr < entry->max_stack) {
165 		fp = (unsigned int __user *) (unsigned long) sp;
166 		if (invalid_user_sp(sp) || read_user_stack_32(fp, &next_sp))
167 			return;
168 		if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
169 			return;
170 
171 		uregs = signal_frame_32_regs(sp, next_sp, next_ip);
172 		if (!uregs && level <= 1)
173 			uregs = signal_frame_32_regs(sp, next_sp, lr);
174 		if (uregs) {
175 			/*
176 			 * This looks like an signal frame, so restart
177 			 * the stack trace with the values in it.
178 			 */
179 			if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
180 			    read_user_stack_32(&uregs[PT_LNK], &lr) ||
181 			    read_user_stack_32(&uregs[PT_R1], &sp))
182 				return;
183 			level = 0;
184 			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
185 			perf_callchain_store(entry, next_ip);
186 			continue;
187 		}
188 
189 		if (level == 0)
190 			next_ip = lr;
191 		perf_callchain_store(entry, next_ip);
192 		++level;
193 		sp = next_sp;
194 	}
195 }
196