1 /*
2  * arm64 callchain support
3  *
4  * Copyright (C) 2015 ARM Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 
21 #include <asm/pointer_auth.h>
22 #include <asm/stacktrace.h>
23 
24 struct frame_tail {
25 	struct frame_tail	__user *fp;
26 	unsigned long		lr;
27 } __attribute__((packed));
28 
29 /*
30  * Get the return address for a single stackframe and return a pointer to the
31  * next frame tail.
32  */
33 static struct frame_tail __user *
34 user_backtrace(struct frame_tail __user *tail,
35 	       struct perf_callchain_entry_ctx *entry)
36 {
37 	struct frame_tail buftail;
38 	unsigned long err;
39 	unsigned long lr;
40 
41 	/* Also check accessibility of one struct frame_tail beyond */
42 	if (!access_ok(tail, sizeof(buftail)))
43 		return NULL;
44 
45 	pagefault_disable();
46 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
47 	pagefault_enable();
48 
49 	if (err)
50 		return NULL;
51 
52 	lr = ptrauth_strip_insn_pac(buftail.lr);
53 
54 	perf_callchain_store(entry, lr);
55 
56 	/*
57 	 * Frame pointers should strictly progress back up the stack
58 	 * (towards higher addresses).
59 	 */
60 	if (tail >= buftail.fp)
61 		return NULL;
62 
63 	return buftail.fp;
64 }
65 
66 #ifdef CONFIG_COMPAT
67 /*
68  * The registers we're interested in are at the end of the variable
69  * length saved register structure. The fp points at the end of this
70  * structure so the address of this struct is:
71  * (struct compat_frame_tail *)(xxx->fp)-1
72  *
73  * This code has been adapted from the ARM OProfile support.
74  */
75 struct compat_frame_tail {
76 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
77 	u32		sp;
78 	u32		lr;
79 } __attribute__((packed));
80 
81 static struct compat_frame_tail __user *
82 compat_user_backtrace(struct compat_frame_tail __user *tail,
83 		      struct perf_callchain_entry_ctx *entry)
84 {
85 	struct compat_frame_tail buftail;
86 	unsigned long err;
87 
88 	/* Also check accessibility of one struct frame_tail beyond */
89 	if (!access_ok(tail, sizeof(buftail)))
90 		return NULL;
91 
92 	pagefault_disable();
93 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
94 	pagefault_enable();
95 
96 	if (err)
97 		return NULL;
98 
99 	perf_callchain_store(entry, buftail.lr);
100 
101 	/*
102 	 * Frame pointers should strictly progress back up the stack
103 	 * (towards higher addresses).
104 	 */
105 	if (tail + 1 >= (struct compat_frame_tail __user *)
106 			compat_ptr(buftail.fp))
107 		return NULL;
108 
109 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
110 }
111 #endif /* CONFIG_COMPAT */
112 
113 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
114 			 struct pt_regs *regs)
115 {
116 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
117 		/* We don't support guest os callchain now */
118 		return;
119 	}
120 
121 	perf_callchain_store(entry, regs->pc);
122 
123 	if (!compat_user_mode(regs)) {
124 		/* AARCH64 mode */
125 		struct frame_tail __user *tail;
126 
127 		tail = (struct frame_tail __user *)regs->regs[29];
128 
129 		while (entry->nr < entry->max_stack &&
130 		       tail && !((unsigned long)tail & 0xf))
131 			tail = user_backtrace(tail, entry);
132 	} else {
133 #ifdef CONFIG_COMPAT
134 		/* AARCH32 compat mode */
135 		struct compat_frame_tail __user *tail;
136 
137 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
138 
139 		while ((entry->nr < entry->max_stack) &&
140 			tail && !((unsigned long)tail & 0x3))
141 			tail = compat_user_backtrace(tail, entry);
142 #endif
143 	}
144 }
145 
146 /*
147  * Gets called by walk_stackframe() for every stackframe. This will be called
148  * whist unwinding the stackframe and is like a subroutine return so we use
149  * the PC.
150  */
151 static int callchain_trace(struct stackframe *frame, void *data)
152 {
153 	struct perf_callchain_entry_ctx *entry = data;
154 	perf_callchain_store(entry, frame->pc);
155 	return 0;
156 }
157 
158 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
159 			   struct pt_regs *regs)
160 {
161 	struct stackframe frame;
162 
163 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
164 		/* We don't support guest os callchain now */
165 		return;
166 	}
167 
168 	frame.fp = regs->regs[29];
169 	frame.pc = regs->pc;
170 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
171 	frame.graph = 0;
172 #endif
173 
174 	walk_stackframe(current, &frame, callchain_trace, entry);
175 }
176 
177 unsigned long perf_instruction_pointer(struct pt_regs *regs)
178 {
179 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
180 		return perf_guest_cbs->get_guest_ip();
181 
182 	return instruction_pointer(regs);
183 }
184 
185 unsigned long perf_misc_flags(struct pt_regs *regs)
186 {
187 	int misc = 0;
188 
189 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
190 		if (perf_guest_cbs->is_user_mode())
191 			misc |= PERF_RECORD_MISC_GUEST_USER;
192 		else
193 			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
194 	} else {
195 		if (user_mode(regs))
196 			misc |= PERF_RECORD_MISC_USER;
197 		else
198 			misc |= PERF_RECORD_MISC_KERNEL;
199 	}
200 
201 	return misc;
202 }
203