xref: /openbmc/linux/arch/powerpc/perf/callchain_64.c (revision e82c878d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance counter callchain support - powerpc architecture code
4  *
5  * Copyright © 2009 Paul Mackerras, IBM Corporation.
6  */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/perf_event.h>
10 #include <linux/percpu.h>
11 #include <linux/uaccess.h>
12 #include <linux/mm.h>
13 #include <asm/ptrace.h>
14 #include <asm/pgtable.h>
15 #include <asm/sigcontext.h>
16 #include <asm/ucontext.h>
17 #include <asm/vdso.h>
18 #include <asm/pte-walk.h>
19 
20 #include "callchain.h"
21 
22 /*
23  * On 64-bit we don't want to invoke hash_page on user addresses from
24  * interrupt context, so if the access faults, we read the page tables
25  * to find which page (if any) is mapped and access it directly.
26  */
27 int read_user_stack_slow(void __user *ptr, void *buf, int nb)
28 {
29 	int ret = -EFAULT;
30 	pgd_t *pgdir;
31 	pte_t *ptep, pte;
32 	unsigned int shift;
33 	unsigned long addr = (unsigned long) ptr;
34 	unsigned long offset;
35 	unsigned long pfn, flags;
36 	void *kaddr;
37 
38 	pgdir = current->mm->pgd;
39 	if (!pgdir)
40 		return -EFAULT;
41 
42 	local_irq_save(flags);
43 	ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
44 	if (!ptep)
45 		goto err_out;
46 	if (!shift)
47 		shift = PAGE_SHIFT;
48 
49 	/* align address to page boundary */
50 	offset = addr & ((1UL << shift) - 1);
51 
52 	pte = READ_ONCE(*ptep);
53 	if (!pte_present(pte) || !pte_user(pte))
54 		goto err_out;
55 	pfn = pte_pfn(pte);
56 	if (!page_is_ram(pfn))
57 		goto err_out;
58 
59 	/* no highmem to worry about here */
60 	kaddr = pfn_to_kaddr(pfn);
61 	memcpy(buf, kaddr + offset, nb);
62 	ret = 0;
63 err_out:
64 	local_irq_restore(flags);
65 	return ret;
66 }
67 
68 static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
69 {
70 	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
71 	    ((unsigned long)ptr & 7))
72 		return -EFAULT;
73 
74 	if (!probe_user_read(ret, ptr, sizeof(*ret)))
75 		return 0;
76 
77 	return read_user_stack_slow(ptr, ret, 8);
78 }
79 
80 /*
81  * 64-bit user processes use the same stack frame for RT and non-RT signals.
82  */
83 struct signal_frame_64 {
84 	char		dummy[__SIGNAL_FRAMESIZE];
85 	struct ucontext	uc;
86 	unsigned long	unused[2];
87 	unsigned int	tramp[6];
88 	struct siginfo	*pinfo;
89 	void		*puc;
90 	struct siginfo	info;
91 	char		abigap[288];
92 };
93 
94 static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
95 {
96 	if (nip == fp + offsetof(struct signal_frame_64, tramp))
97 		return 1;
98 	if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
99 	    nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
100 		return 1;
101 	return 0;
102 }
103 
104 /*
105  * Do some sanity checking on the signal frame pointed to by sp.
106  * We check the pinfo and puc pointers in the frame.
107  */
108 static int sane_signal_64_frame(unsigned long sp)
109 {
110 	struct signal_frame_64 __user *sf;
111 	unsigned long pinfo, puc;
112 
113 	sf = (struct signal_frame_64 __user *) sp;
114 	if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
115 	    read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
116 		return 0;
117 	return pinfo == (unsigned long) &sf->info &&
118 		puc == (unsigned long) &sf->uc;
119 }
120 
121 void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
122 			    struct pt_regs *regs)
123 {
124 	unsigned long sp, next_sp;
125 	unsigned long next_ip;
126 	unsigned long lr;
127 	long level = 0;
128 	struct signal_frame_64 __user *sigframe;
129 	unsigned long __user *fp, *uregs;
130 
131 	next_ip = perf_instruction_pointer(regs);
132 	lr = regs->link;
133 	sp = regs->gpr[1];
134 	perf_callchain_store(entry, next_ip);
135 
136 	while (entry->nr < entry->max_stack) {
137 		fp = (unsigned long __user *) sp;
138 		if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
139 			return;
140 		if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
141 			return;
142 
143 		/*
144 		 * Note: the next_sp - sp >= signal frame size check
145 		 * is true when next_sp < sp, which can happen when
146 		 * transitioning from an alternate signal stack to the
147 		 * normal stack.
148 		 */
149 		if (next_sp - sp >= sizeof(struct signal_frame_64) &&
150 		    (is_sigreturn_64_address(next_ip, sp) ||
151 		     (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
152 		    sane_signal_64_frame(sp)) {
153 			/*
154 			 * This looks like an signal frame
155 			 */
156 			sigframe = (struct signal_frame_64 __user *) sp;
157 			uregs = sigframe->uc.uc_mcontext.gp_regs;
158 			if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
159 			    read_user_stack_64(&uregs[PT_LNK], &lr) ||
160 			    read_user_stack_64(&uregs[PT_R1], &sp))
161 				return;
162 			level = 0;
163 			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
164 			perf_callchain_store(entry, next_ip);
165 			continue;
166 		}
167 
168 		if (level == 0)
169 			next_ip = lr;
170 		perf_callchain_store(entry, next_ip);
171 		++level;
172 		sp = next_sp;
173 	}
174 }
175