1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2013 Linaro Limited
4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5 * Copyright (C) 2017 Andes Technology Corporation
6 */
7
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <asm/cacheflush.h>
13 #include <asm/patch.h>
14
15 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_arch_code_modify_prepare(void)16 void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
17 {
18 mutex_lock(&text_mutex);
19
20 /*
21 * The code sequences we use for ftrace can't be patched while the
22 * kernel is running, so we need to use stop_machine() to modify them
23 * for now. This doesn't play nice with text_mutex, we use this flag
24 * to elide the check.
25 */
26 riscv_patch_in_stop_machine = true;
27 }
28
ftrace_arch_code_modify_post_process(void)29 void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
30 {
31 riscv_patch_in_stop_machine = false;
32 mutex_unlock(&text_mutex);
33 }
34
ftrace_check_current_call(unsigned long hook_pos,unsigned int * expected)35 static int ftrace_check_current_call(unsigned long hook_pos,
36 unsigned int *expected)
37 {
38 unsigned int replaced[2];
39 unsigned int nops[2] = {NOP4, NOP4};
40
41 /* we expect nops at the hook position */
42 if (!expected)
43 expected = nops;
44
45 /*
46 * Read the text we want to modify;
47 * return must be -EFAULT on read error
48 */
49 if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
50 MCOUNT_INSN_SIZE))
51 return -EFAULT;
52
53 /*
54 * Make sure it is what we expect it to be;
55 * return must be -EINVAL on failed comparison
56 */
57 if (memcmp(expected, replaced, sizeof(replaced))) {
58 pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
59 (void *)hook_pos, expected[0], expected[1], replaced[0],
60 replaced[1]);
61 return -EINVAL;
62 }
63
64 return 0;
65 }
66
__ftrace_modify_call(unsigned long hook_pos,unsigned long target,bool enable,bool ra)67 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
68 bool enable, bool ra)
69 {
70 unsigned int call[2];
71 unsigned int nops[2] = {NOP4, NOP4};
72
73 if (ra)
74 make_call_ra(hook_pos, target, call);
75 else
76 make_call_t0(hook_pos, target, call);
77
78 /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
79 if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
80 return -EPERM;
81
82 return 0;
83 }
84
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)85 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
86 {
87 unsigned int call[2];
88
89 make_call_t0(rec->ip, addr, call);
90
91 if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
92 return -EPERM;
93
94 return 0;
95 }
96
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)97 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
98 unsigned long addr)
99 {
100 unsigned int nops[2] = {NOP4, NOP4};
101
102 if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
103 return -EPERM;
104
105 return 0;
106 }
107
108 /*
109 * This is called early on, and isn't wrapped by
110 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
111 * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
112 * just directly poke the text, but it's simpler to just take the lock
113 * ourselves.
114 */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)115 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
116 {
117 int out;
118
119 mutex_lock(&text_mutex);
120 out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
121 mutex_unlock(&text_mutex);
122
123 if (!mod)
124 local_flush_icache_range(rec->ip, rec->ip + MCOUNT_INSN_SIZE);
125
126 return out;
127 }
128
ftrace_update_ftrace_func(ftrace_func_t func)129 int ftrace_update_ftrace_func(ftrace_func_t func)
130 {
131 int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
132 (unsigned long)func, true, true);
133 if (!ret) {
134 ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
135 (unsigned long)func, true, true);
136 }
137
138 return ret;
139 }
140
141 struct ftrace_modify_param {
142 int command;
143 atomic_t cpu_count;
144 };
145
__ftrace_modify_code(void * data)146 static int __ftrace_modify_code(void *data)
147 {
148 struct ftrace_modify_param *param = data;
149
150 if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
151 ftrace_modify_all_code(param->command);
152 /*
153 * Make sure the patching store is effective *before* we
154 * increment the counter which releases all waiting CPUs
155 * by using the release variant of atomic increment. The
156 * release pairs with the call to local_flush_icache_all()
157 * on the waiting CPU.
158 */
159 atomic_inc_return_release(¶m->cpu_count);
160 } else {
161 while (atomic_read(¶m->cpu_count) <= num_online_cpus())
162 cpu_relax();
163 }
164
165 local_flush_icache_all();
166
167 return 0;
168 }
169
arch_ftrace_update_code(int command)170 void arch_ftrace_update_code(int command)
171 {
172 struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
173
174 stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask);
175 }
176 #endif
177
178 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)179 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
180 unsigned long addr)
181 {
182 unsigned int call[2];
183 unsigned long caller = rec->ip;
184 int ret;
185
186 make_call_t0(caller, old_addr, call);
187 ret = ftrace_check_current_call(caller, call);
188
189 if (ret)
190 return ret;
191
192 return __ftrace_modify_call(caller, addr, true, false);
193 }
194 #endif
195
196 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
197 /*
198 * Most of this function is copied from arm64.
199 */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)200 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
201 unsigned long frame_pointer)
202 {
203 unsigned long return_hooker = (unsigned long)&return_to_handler;
204 unsigned long old;
205
206 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
207 return;
208
209 /*
210 * We don't suffer access faults, so no extra fault-recovery assembly
211 * is needed here.
212 */
213 old = *parent;
214
215 if (!function_graph_enter(old, self_addr, frame_pointer, parent))
216 *parent = return_hooker;
217 }
218
219 #ifdef CONFIG_DYNAMIC_FTRACE
220 extern void ftrace_graph_call(void);
221 extern void ftrace_graph_regs_call(void);
ftrace_enable_ftrace_graph_caller(void)222 int ftrace_enable_ftrace_graph_caller(void)
223 {
224 int ret;
225
226 ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
227 (unsigned long)&prepare_ftrace_return, true, true);
228 if (ret)
229 return ret;
230
231 return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
232 (unsigned long)&prepare_ftrace_return, true, true);
233 }
234
ftrace_disable_ftrace_graph_caller(void)235 int ftrace_disable_ftrace_graph_caller(void)
236 {
237 int ret;
238
239 ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
240 (unsigned long)&prepare_ftrace_return, false, true);
241 if (ret)
242 return ret;
243
244 return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
245 (unsigned long)&prepare_ftrace_return, false, true);
246 }
247 #endif /* CONFIG_DYNAMIC_FTRACE */
248 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
249