xref: /openbmc/linux/arch/powerpc/perf/callchain.c (revision d9e32672)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance counter callchain support - powerpc architecture code
4  *
5  * Copyright © 2009 Paul Mackerras, IBM Corporation.
6  */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/perf_event.h>
10 #include <linux/percpu.h>
11 #include <linux/uaccess.h>
12 #include <linux/mm.h>
13 #include <asm/ptrace.h>
14 #include <asm/pgtable.h>
15 #include <asm/sigcontext.h>
16 #include <asm/ucontext.h>
17 #include <asm/vdso.h>
18 #ifdef CONFIG_PPC64
19 #include "../kernel/ppc32.h"
20 #endif
21 #include <asm/pte-walk.h>
22 
23 
24 /*
25  * Is sp valid as the address of the next kernel stack frame after prev_sp?
26  * The next frame may be in a different stack area but should not go
27  * back down in the same stack area.
28  */
29 static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
30 {
31 	if (sp & 0xf)
32 		return 0;		/* must be 16-byte aligned */
33 	if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
34 		return 0;
35 	if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
36 		return 1;
37 	/*
38 	 * sp could decrease when we jump off an interrupt stack
39 	 * back to the regular process stack.
40 	 */
41 	if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
42 		return 1;
43 	return 0;
44 }
45 
46 void
47 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
48 {
49 	unsigned long sp, next_sp;
50 	unsigned long next_ip;
51 	unsigned long lr;
52 	long level = 0;
53 	unsigned long *fp;
54 
55 	lr = regs->link;
56 	sp = regs->gpr[1];
57 	perf_callchain_store(entry, perf_instruction_pointer(regs));
58 
59 	if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
60 		return;
61 
62 	for (;;) {
63 		fp = (unsigned long *) sp;
64 		next_sp = fp[0];
65 
66 		if (next_sp == sp + STACK_INT_FRAME_SIZE &&
67 		    fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
68 			/*
69 			 * This looks like an interrupt frame for an
70 			 * interrupt that occurred in the kernel
71 			 */
72 			regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
73 			next_ip = regs->nip;
74 			lr = regs->link;
75 			level = 0;
76 			perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
77 
78 		} else {
79 			if (level == 0)
80 				next_ip = lr;
81 			else
82 				next_ip = fp[STACK_FRAME_LR_SAVE];
83 
84 			/*
85 			 * We can't tell which of the first two addresses
86 			 * we get are valid, but we can filter out the
87 			 * obviously bogus ones here.  We replace them
88 			 * with 0 rather than removing them entirely so
89 			 * that userspace can tell which is which.
90 			 */
91 			if ((level == 1 && next_ip == lr) ||
92 			    (level <= 1 && !kernel_text_address(next_ip)))
93 				next_ip = 0;
94 
95 			++level;
96 		}
97 
98 		perf_callchain_store(entry, next_ip);
99 		if (!valid_next_sp(next_sp, sp))
100 			return;
101 		sp = next_sp;
102 	}
103 }
104 
105 #ifdef CONFIG_PPC64
106 /*
107  * On 64-bit we don't want to invoke hash_page on user addresses from
108  * interrupt context, so if the access faults, we read the page tables
109  * to find which page (if any) is mapped and access it directly.
110  */
111 static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
112 {
113 	int ret = -EFAULT;
114 	pgd_t *pgdir;
115 	pte_t *ptep, pte;
116 	unsigned shift;
117 	unsigned long addr = (unsigned long) ptr;
118 	unsigned long offset;
119 	unsigned long pfn, flags;
120 	void *kaddr;
121 
122 	pgdir = current->mm->pgd;
123 	if (!pgdir)
124 		return -EFAULT;
125 
126 	local_irq_save(flags);
127 	ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
128 	if (!ptep)
129 		goto err_out;
130 	if (!shift)
131 		shift = PAGE_SHIFT;
132 
133 	/* align address to page boundary */
134 	offset = addr & ((1UL << shift) - 1);
135 
136 	pte = READ_ONCE(*ptep);
137 	if (!pte_present(pte) || !pte_user(pte))
138 		goto err_out;
139 	pfn = pte_pfn(pte);
140 	if (!page_is_ram(pfn))
141 		goto err_out;
142 
143 	/* no highmem to worry about here */
144 	kaddr = pfn_to_kaddr(pfn);
145 	memcpy(buf, kaddr + offset, nb);
146 	ret = 0;
147 err_out:
148 	local_irq_restore(flags);
149 	return ret;
150 }
151 
152 static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
153 {
154 	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
155 	    ((unsigned long)ptr & 7))
156 		return -EFAULT;
157 
158 	pagefault_disable();
159 	if (!__get_user_inatomic(*ret, ptr)) {
160 		pagefault_enable();
161 		return 0;
162 	}
163 	pagefault_enable();
164 
165 	return read_user_stack_slow(ptr, ret, 8);
166 }
167 
168 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
169 {
170 	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
171 	    ((unsigned long)ptr & 3))
172 		return -EFAULT;
173 
174 	pagefault_disable();
175 	if (!__get_user_inatomic(*ret, ptr)) {
176 		pagefault_enable();
177 		return 0;
178 	}
179 	pagefault_enable();
180 
181 	return read_user_stack_slow(ptr, ret, 4);
182 }
183 
184 static inline int valid_user_sp(unsigned long sp, int is_64)
185 {
186 	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
187 		return 0;
188 	return 1;
189 }
190 
191 /*
192  * 64-bit user processes use the same stack frame for RT and non-RT signals.
193  */
194 struct signal_frame_64 {
195 	char		dummy[__SIGNAL_FRAMESIZE];
196 	struct ucontext	uc;
197 	unsigned long	unused[2];
198 	unsigned int	tramp[6];
199 	struct siginfo	*pinfo;
200 	void		*puc;
201 	struct siginfo	info;
202 	char		abigap[288];
203 };
204 
205 static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
206 {
207 	if (nip == fp + offsetof(struct signal_frame_64, tramp))
208 		return 1;
209 	if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
210 	    nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
211 		return 1;
212 	return 0;
213 }
214 
215 /*
216  * Do some sanity checking on the signal frame pointed to by sp.
217  * We check the pinfo and puc pointers in the frame.
218  */
219 static int sane_signal_64_frame(unsigned long sp)
220 {
221 	struct signal_frame_64 __user *sf;
222 	unsigned long pinfo, puc;
223 
224 	sf = (struct signal_frame_64 __user *) sp;
225 	if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
226 	    read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
227 		return 0;
228 	return pinfo == (unsigned long) &sf->info &&
229 		puc == (unsigned long) &sf->uc;
230 }
231 
232 static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
233 				   struct pt_regs *regs)
234 {
235 	unsigned long sp, next_sp;
236 	unsigned long next_ip;
237 	unsigned long lr;
238 	long level = 0;
239 	struct signal_frame_64 __user *sigframe;
240 	unsigned long __user *fp, *uregs;
241 
242 	next_ip = perf_instruction_pointer(regs);
243 	lr = regs->link;
244 	sp = regs->gpr[1];
245 	perf_callchain_store(entry, next_ip);
246 
247 	while (entry->nr < entry->max_stack) {
248 		fp = (unsigned long __user *) sp;
249 		if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
250 			return;
251 		if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
252 			return;
253 
254 		/*
255 		 * Note: the next_sp - sp >= signal frame size check
256 		 * is true when next_sp < sp, which can happen when
257 		 * transitioning from an alternate signal stack to the
258 		 * normal stack.
259 		 */
260 		if (next_sp - sp >= sizeof(struct signal_frame_64) &&
261 		    (is_sigreturn_64_address(next_ip, sp) ||
262 		     (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
263 		    sane_signal_64_frame(sp)) {
264 			/*
265 			 * This looks like an signal frame
266 			 */
267 			sigframe = (struct signal_frame_64 __user *) sp;
268 			uregs = sigframe->uc.uc_mcontext.gp_regs;
269 			if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
270 			    read_user_stack_64(&uregs[PT_LNK], &lr) ||
271 			    read_user_stack_64(&uregs[PT_R1], &sp))
272 				return;
273 			level = 0;
274 			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
275 			perf_callchain_store(entry, next_ip);
276 			continue;
277 		}
278 
279 		if (level == 0)
280 			next_ip = lr;
281 		perf_callchain_store(entry, next_ip);
282 		++level;
283 		sp = next_sp;
284 	}
285 }
286 
287 static inline int current_is_64bit(void)
288 {
289 	/*
290 	 * We can't use test_thread_flag() here because we may be on an
291 	 * interrupt stack, and the thread flags don't get copied over
292 	 * from the thread_info on the main stack to the interrupt stack.
293 	 */
294 	return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
295 }
296 
297 #else  /* CONFIG_PPC64 */
298 /*
299  * On 32-bit we just access the address and let hash_page create a
300  * HPTE if necessary, so there is no need to fall back to reading
301  * the page tables.  Since this is called at interrupt level,
302  * do_page_fault() won't treat a DSI as a page fault.
303  */
304 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
305 {
306 	int rc;
307 
308 	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
309 	    ((unsigned long)ptr & 3))
310 		return -EFAULT;
311 
312 	pagefault_disable();
313 	rc = __get_user_inatomic(*ret, ptr);
314 	pagefault_enable();
315 
316 	return rc;
317 }
318 
319 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
320 					  struct pt_regs *regs)
321 {
322 }
323 
324 static inline int current_is_64bit(void)
325 {
326 	return 0;
327 }
328 
329 static inline int valid_user_sp(unsigned long sp, int is_64)
330 {
331 	if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
332 		return 0;
333 	return 1;
334 }
335 
336 #define __SIGNAL_FRAMESIZE32	__SIGNAL_FRAMESIZE
337 #define sigcontext32		sigcontext
338 #define mcontext32		mcontext
339 #define ucontext32		ucontext
340 #define compat_siginfo_t	struct siginfo
341 
342 #endif /* CONFIG_PPC64 */
343 
344 /*
345  * Layout for non-RT signal frames
346  */
347 struct signal_frame_32 {
348 	char			dummy[__SIGNAL_FRAMESIZE32];
349 	struct sigcontext32	sctx;
350 	struct mcontext32	mctx;
351 	int			abigap[56];
352 };
353 
354 /*
355  * Layout for RT signal frames
356  */
357 struct rt_signal_frame_32 {
358 	char			dummy[__SIGNAL_FRAMESIZE32 + 16];
359 	compat_siginfo_t	info;
360 	struct ucontext32	uc;
361 	int			abigap[56];
362 };
363 
364 static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
365 {
366 	if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
367 		return 1;
368 	if (vdso32_sigtramp && current->mm->context.vdso_base &&
369 	    nip == current->mm->context.vdso_base + vdso32_sigtramp)
370 		return 1;
371 	return 0;
372 }
373 
374 static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
375 {
376 	if (nip == fp + offsetof(struct rt_signal_frame_32,
377 				 uc.uc_mcontext.mc_pad))
378 		return 1;
379 	if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
380 	    nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
381 		return 1;
382 	return 0;
383 }
384 
385 static int sane_signal_32_frame(unsigned int sp)
386 {
387 	struct signal_frame_32 __user *sf;
388 	unsigned int regs;
389 
390 	sf = (struct signal_frame_32 __user *) (unsigned long) sp;
391 	if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
392 		return 0;
393 	return regs == (unsigned long) &sf->mctx;
394 }
395 
396 static int sane_rt_signal_32_frame(unsigned int sp)
397 {
398 	struct rt_signal_frame_32 __user *sf;
399 	unsigned int regs;
400 
401 	sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
402 	if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
403 		return 0;
404 	return regs == (unsigned long) &sf->uc.uc_mcontext;
405 }
406 
407 static unsigned int __user *signal_frame_32_regs(unsigned int sp,
408 				unsigned int next_sp, unsigned int next_ip)
409 {
410 	struct mcontext32 __user *mctx = NULL;
411 	struct signal_frame_32 __user *sf;
412 	struct rt_signal_frame_32 __user *rt_sf;
413 
414 	/*
415 	 * Note: the next_sp - sp >= signal frame size check
416 	 * is true when next_sp < sp, for example, when
417 	 * transitioning from an alternate signal stack to the
418 	 * normal stack.
419 	 */
420 	if (next_sp - sp >= sizeof(struct signal_frame_32) &&
421 	    is_sigreturn_32_address(next_ip, sp) &&
422 	    sane_signal_32_frame(sp)) {
423 		sf = (struct signal_frame_32 __user *) (unsigned long) sp;
424 		mctx = &sf->mctx;
425 	}
426 
427 	if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
428 	    is_rt_sigreturn_32_address(next_ip, sp) &&
429 	    sane_rt_signal_32_frame(sp)) {
430 		rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
431 		mctx = &rt_sf->uc.uc_mcontext;
432 	}
433 
434 	if (!mctx)
435 		return NULL;
436 	return mctx->mc_gregs;
437 }
438 
439 static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
440 				   struct pt_regs *regs)
441 {
442 	unsigned int sp, next_sp;
443 	unsigned int next_ip;
444 	unsigned int lr;
445 	long level = 0;
446 	unsigned int __user *fp, *uregs;
447 
448 	next_ip = perf_instruction_pointer(regs);
449 	lr = regs->link;
450 	sp = regs->gpr[1];
451 	perf_callchain_store(entry, next_ip);
452 
453 	while (entry->nr < entry->max_stack) {
454 		fp = (unsigned int __user *) (unsigned long) sp;
455 		if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
456 			return;
457 		if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
458 			return;
459 
460 		uregs = signal_frame_32_regs(sp, next_sp, next_ip);
461 		if (!uregs && level <= 1)
462 			uregs = signal_frame_32_regs(sp, next_sp, lr);
463 		if (uregs) {
464 			/*
465 			 * This looks like an signal frame, so restart
466 			 * the stack trace with the values in it.
467 			 */
468 			if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
469 			    read_user_stack_32(&uregs[PT_LNK], &lr) ||
470 			    read_user_stack_32(&uregs[PT_R1], &sp))
471 				return;
472 			level = 0;
473 			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
474 			perf_callchain_store(entry, next_ip);
475 			continue;
476 		}
477 
478 		if (level == 0)
479 			next_ip = lr;
480 		perf_callchain_store(entry, next_ip);
481 		++level;
482 		sp = next_sp;
483 	}
484 }
485 
486 void
487 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
488 {
489 	if (current_is_64bit())
490 		perf_callchain_user_64(entry, regs);
491 	else
492 		perf_callchain_user_32(entry, regs);
493 }
494