xref: /openbmc/linux/arch/powerpc/perf/callchain_32.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance counter callchain support - powerpc architecture code
4  *
5  * Copyright © 2009 Paul Mackerras, IBM Corporation.
6  */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/perf_event.h>
10 #include <linux/percpu.h>
11 #include <linux/uaccess.h>
12 #include <linux/mm.h>
13 #include <asm/ptrace.h>
14 #include <asm/pgtable.h>
15 #include <asm/sigcontext.h>
16 #include <asm/ucontext.h>
17 #include <asm/vdso.h>
18 #include <asm/pte-walk.h>
19 
20 #include "callchain.h"
21 
22 #ifdef CONFIG_PPC64
23 #include "../kernel/ppc32.h"
24 #else  /* CONFIG_PPC64 */
25 
26 #define __SIGNAL_FRAMESIZE32	__SIGNAL_FRAMESIZE
27 #define sigcontext32		sigcontext
28 #define mcontext32		mcontext
29 #define ucontext32		ucontext
30 #define compat_siginfo_t	struct siginfo
31 
32 #endif /* CONFIG_PPC64 */
33 
34 /*
35  * On 32-bit we just access the address and let hash_page create a
36  * HPTE if necessary, so there is no need to fall back to reading
37  * the page tables.  Since this is called at interrupt level,
38  * do_page_fault() won't treat a DSI as a page fault.
39  */
40 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
41 {
42 	int rc;
43 
44 	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
45 	    ((unsigned long)ptr & 3))
46 		return -EFAULT;
47 
48 	rc = probe_user_read(ret, ptr, sizeof(*ret));
49 
50 	if (IS_ENABLED(CONFIG_PPC64) && rc)
51 		return read_user_stack_slow(ptr, ret, 4);
52 
53 	return rc;
54 }
55 
56 /*
57  * Layout for non-RT signal frames
58  */
59 struct signal_frame_32 {
60 	char			dummy[__SIGNAL_FRAMESIZE32];
61 	struct sigcontext32	sctx;
62 	struct mcontext32	mctx;
63 	int			abigap[56];
64 };
65 
66 /*
67  * Layout for RT signal frames
68  */
69 struct rt_signal_frame_32 {
70 	char			dummy[__SIGNAL_FRAMESIZE32 + 16];
71 	compat_siginfo_t	info;
72 	struct ucontext32	uc;
73 	int			abigap[56];
74 };
75 
76 static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
77 {
78 	if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
79 		return 1;
80 	if (vdso32_sigtramp && current->mm->context.vdso_base &&
81 	    nip == current->mm->context.vdso_base + vdso32_sigtramp)
82 		return 1;
83 	return 0;
84 }
85 
86 static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
87 {
88 	if (nip == fp + offsetof(struct rt_signal_frame_32,
89 				 uc.uc_mcontext.mc_pad))
90 		return 1;
91 	if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
92 	    nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
93 		return 1;
94 	return 0;
95 }
96 
97 static int sane_signal_32_frame(unsigned int sp)
98 {
99 	struct signal_frame_32 __user *sf;
100 	unsigned int regs;
101 
102 	sf = (struct signal_frame_32 __user *) (unsigned long) sp;
103 	if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
104 		return 0;
105 	return regs == (unsigned long) &sf->mctx;
106 }
107 
108 static int sane_rt_signal_32_frame(unsigned int sp)
109 {
110 	struct rt_signal_frame_32 __user *sf;
111 	unsigned int regs;
112 
113 	sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
114 	if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
115 		return 0;
116 	return regs == (unsigned long) &sf->uc.uc_mcontext;
117 }
118 
119 static unsigned int __user *signal_frame_32_regs(unsigned int sp,
120 				unsigned int next_sp, unsigned int next_ip)
121 {
122 	struct mcontext32 __user *mctx = NULL;
123 	struct signal_frame_32 __user *sf;
124 	struct rt_signal_frame_32 __user *rt_sf;
125 
126 	/*
127 	 * Note: the next_sp - sp >= signal frame size check
128 	 * is true when next_sp < sp, for example, when
129 	 * transitioning from an alternate signal stack to the
130 	 * normal stack.
131 	 */
132 	if (next_sp - sp >= sizeof(struct signal_frame_32) &&
133 	    is_sigreturn_32_address(next_ip, sp) &&
134 	    sane_signal_32_frame(sp)) {
135 		sf = (struct signal_frame_32 __user *) (unsigned long) sp;
136 		mctx = &sf->mctx;
137 	}
138 
139 	if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
140 	    is_rt_sigreturn_32_address(next_ip, sp) &&
141 	    sane_rt_signal_32_frame(sp)) {
142 		rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
143 		mctx = &rt_sf->uc.uc_mcontext;
144 	}
145 
146 	if (!mctx)
147 		return NULL;
148 	return mctx->mc_gregs;
149 }
150 
151 void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
152 			    struct pt_regs *regs)
153 {
154 	unsigned int sp, next_sp;
155 	unsigned int next_ip;
156 	unsigned int lr;
157 	long level = 0;
158 	unsigned int __user *fp, *uregs;
159 
160 	next_ip = perf_instruction_pointer(regs);
161 	lr = regs->link;
162 	sp = regs->gpr[1];
163 	perf_callchain_store(entry, next_ip);
164 
165 	while (entry->nr < entry->max_stack) {
166 		fp = (unsigned int __user *) (unsigned long) sp;
167 		if (invalid_user_sp(sp) || read_user_stack_32(fp, &next_sp))
168 			return;
169 		if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
170 			return;
171 
172 		uregs = signal_frame_32_regs(sp, next_sp, next_ip);
173 		if (!uregs && level <= 1)
174 			uregs = signal_frame_32_regs(sp, next_sp, lr);
175 		if (uregs) {
176 			/*
177 			 * This looks like an signal frame, so restart
178 			 * the stack trace with the values in it.
179 			 */
180 			if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
181 			    read_user_stack_32(&uregs[PT_LNK], &lr) ||
182 			    read_user_stack_32(&uregs[PT_R1], &sp))
183 				return;
184 			level = 0;
185 			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
186 			perf_callchain_store(entry, next_ip);
187 			continue;
188 		}
189 
190 		if (level == 0)
191 			next_ip = lr;
192 		perf_callchain_store(entry, next_ip);
193 		++level;
194 		sp = next_sp;
195 	}
196 }
197