1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arm64 callchain support
4  *
5  * Copyright (C) 2015 ARM Limited
6  */
7 #include <linux/perf_event.h>
8 #include <linux/uaccess.h>
9 
10 #include <asm/pointer_auth.h>
11 #include <asm/stacktrace.h>
12 
13 struct frame_tail {
14 	struct frame_tail	__user *fp;
15 	unsigned long		lr;
16 } __attribute__((packed));
17 
18 /*
19  * Get the return address for a single stackframe and return a pointer to the
20  * next frame tail.
21  */
22 static struct frame_tail __user *
23 user_backtrace(struct frame_tail __user *tail,
24 	       struct perf_callchain_entry_ctx *entry)
25 {
26 	struct frame_tail buftail;
27 	unsigned long err;
28 	unsigned long lr;
29 
30 	/* Also check accessibility of one struct frame_tail beyond */
31 	if (!access_ok(tail, sizeof(buftail)))
32 		return NULL;
33 
34 	pagefault_disable();
35 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
36 	pagefault_enable();
37 
38 	if (err)
39 		return NULL;
40 
41 	lr = ptrauth_strip_insn_pac(buftail.lr);
42 
43 	perf_callchain_store(entry, lr);
44 
45 	/*
46 	 * Frame pointers should strictly progress back up the stack
47 	 * (towards higher addresses).
48 	 */
49 	if (tail >= buftail.fp)
50 		return NULL;
51 
52 	return buftail.fp;
53 }
54 
55 #ifdef CONFIG_COMPAT
56 /*
57  * The registers we're interested in are at the end of the variable
58  * length saved register structure. The fp points at the end of this
59  * structure so the address of this struct is:
60  * (struct compat_frame_tail *)(xxx->fp)-1
61  *
62  * This code has been adapted from the ARM OProfile support.
63  */
64 struct compat_frame_tail {
65 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
66 	u32		sp;
67 	u32		lr;
68 } __attribute__((packed));
69 
70 static struct compat_frame_tail __user *
71 compat_user_backtrace(struct compat_frame_tail __user *tail,
72 		      struct perf_callchain_entry_ctx *entry)
73 {
74 	struct compat_frame_tail buftail;
75 	unsigned long err;
76 
77 	/* Also check accessibility of one struct frame_tail beyond */
78 	if (!access_ok(tail, sizeof(buftail)))
79 		return NULL;
80 
81 	pagefault_disable();
82 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
83 	pagefault_enable();
84 
85 	if (err)
86 		return NULL;
87 
88 	perf_callchain_store(entry, buftail.lr);
89 
90 	/*
91 	 * Frame pointers should strictly progress back up the stack
92 	 * (towards higher addresses).
93 	 */
94 	if (tail + 1 >= (struct compat_frame_tail __user *)
95 			compat_ptr(buftail.fp))
96 		return NULL;
97 
98 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
99 }
100 #endif /* CONFIG_COMPAT */
101 
102 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
103 			 struct pt_regs *regs)
104 {
105 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
106 		/* We don't support guest os callchain now */
107 		return;
108 	}
109 
110 	perf_callchain_store(entry, regs->pc);
111 
112 	if (!compat_user_mode(regs)) {
113 		/* AARCH64 mode */
114 		struct frame_tail __user *tail;
115 
116 		tail = (struct frame_tail __user *)regs->regs[29];
117 
118 		while (entry->nr < entry->max_stack &&
119 		       tail && !((unsigned long)tail & 0xf))
120 			tail = user_backtrace(tail, entry);
121 	} else {
122 #ifdef CONFIG_COMPAT
123 		/* AARCH32 compat mode */
124 		struct compat_frame_tail __user *tail;
125 
126 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
127 
128 		while ((entry->nr < entry->max_stack) &&
129 			tail && !((unsigned long)tail & 0x3))
130 			tail = compat_user_backtrace(tail, entry);
131 #endif
132 	}
133 }
134 
135 /*
136  * Gets called by walk_stackframe() for every stackframe. This will be called
137  * whist unwinding the stackframe and is like a subroutine return so we use
138  * the PC.
139  */
140 static int callchain_trace(struct stackframe *frame, void *data)
141 {
142 	struct perf_callchain_entry_ctx *entry = data;
143 	perf_callchain_store(entry, frame->pc);
144 	return 0;
145 }
146 
147 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
148 			   struct pt_regs *regs)
149 {
150 	struct stackframe frame;
151 
152 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
153 		/* We don't support guest os callchain now */
154 		return;
155 	}
156 
157 	frame.fp = regs->regs[29];
158 	frame.pc = regs->pc;
159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
160 	frame.graph = 0;
161 #endif
162 
163 	walk_stackframe(current, &frame, callchain_trace, entry);
164 }
165 
166 unsigned long perf_instruction_pointer(struct pt_regs *regs)
167 {
168 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
169 		return perf_guest_cbs->get_guest_ip();
170 
171 	return instruction_pointer(regs);
172 }
173 
174 unsigned long perf_misc_flags(struct pt_regs *regs)
175 {
176 	int misc = 0;
177 
178 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
179 		if (perf_guest_cbs->is_user_mode())
180 			misc |= PERF_RECORD_MISC_GUEST_USER;
181 		else
182 			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
183 	} else {
184 		if (user_mode(regs))
185 			misc |= PERF_RECORD_MISC_USER;
186 		else
187 			misc |= PERF_RECORD_MISC_KERNEL;
188 	}
189 
190 	return misc;
191 }
192