xref: /openbmc/linux/arch/riscv/kernel/ftrace.c (revision 3286f88f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <asm/cacheflush.h>
12 #include <asm/patch.h>
13 
14 #ifdef CONFIG_DYNAMIC_FTRACE
15 void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
16 {
17 	mutex_lock(&text_mutex);
18 
19 	/*
20 	 * The code sequences we use for ftrace can't be patched while the
21 	 * kernel is running, so we need to use stop_machine() to modify them
22 	 * for now.  This doesn't play nice with text_mutex, we use this flag
23 	 * to elide the check.
24 	 */
25 	riscv_patch_in_stop_machine = true;
26 }
27 
28 void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
29 {
30 	riscv_patch_in_stop_machine = false;
31 	mutex_unlock(&text_mutex);
32 }
33 
34 static int ftrace_check_current_call(unsigned long hook_pos,
35 				     unsigned int *expected)
36 {
37 	unsigned int replaced[2];
38 	unsigned int nops[2] = {NOP4, NOP4};
39 
40 	/* we expect nops at the hook position */
41 	if (!expected)
42 		expected = nops;
43 
44 	/*
45 	 * Read the text we want to modify;
46 	 * return must be -EFAULT on read error
47 	 */
48 	if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
49 			MCOUNT_INSN_SIZE))
50 		return -EFAULT;
51 
52 	/*
53 	 * Make sure it is what we expect it to be;
54 	 * return must be -EINVAL on failed comparison
55 	 */
56 	if (memcmp(expected, replaced, sizeof(replaced))) {
57 		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
58 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
59 		       replaced[1]);
60 		return -EINVAL;
61 	}
62 
63 	return 0;
64 }
65 
66 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
67 				bool enable, bool ra)
68 {
69 	unsigned int call[2];
70 	unsigned int nops[2] = {NOP4, NOP4};
71 
72 	if (ra)
73 		make_call_ra(hook_pos, target, call);
74 	else
75 		make_call_t0(hook_pos, target, call);
76 
77 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
78 	if (patch_text_nosync
79 	    ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
80 		return -EPERM;
81 
82 	return 0;
83 }
84 
85 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
86 {
87 	unsigned int call[2];
88 
89 	make_call_t0(rec->ip, addr, call);
90 
91 	if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
92 		return -EPERM;
93 
94 	return 0;
95 }
96 
97 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
98 		    unsigned long addr)
99 {
100 	unsigned int nops[2] = {NOP4, NOP4};
101 
102 	if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
103 		return -EPERM;
104 
105 	return 0;
106 }
107 
108 /*
109  * This is called early on, and isn't wrapped by
110  * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
111  * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
112  * just directly poke the text, but it's simpler to just take the lock
113  * ourselves.
114  */
115 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
116 {
117 	int out;
118 
119 	mutex_lock(&text_mutex);
120 	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
121 	mutex_unlock(&text_mutex);
122 
123 	return out;
124 }
125 
126 int ftrace_update_ftrace_func(ftrace_func_t func)
127 {
128 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
129 				       (unsigned long)func, true, true);
130 	if (!ret) {
131 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
132 					   (unsigned long)func, true, true);
133 	}
134 
135 	return ret;
136 }
137 #endif
138 
139 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
140 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
141 		       unsigned long addr)
142 {
143 	unsigned int call[2];
144 	unsigned long caller = rec->ip;
145 	int ret;
146 
147 	make_call_t0(caller, old_addr, call);
148 	ret = ftrace_check_current_call(caller, call);
149 
150 	if (ret)
151 		return ret;
152 
153 	return __ftrace_modify_call(caller, addr, true, false);
154 }
155 #endif
156 
157 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
158 /*
159  * Most of this function is copied from arm64.
160  */
161 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
162 			   unsigned long frame_pointer)
163 {
164 	unsigned long return_hooker = (unsigned long)&return_to_handler;
165 	unsigned long old;
166 
167 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
168 		return;
169 
170 	/*
171 	 * We don't suffer access faults, so no extra fault-recovery assembly
172 	 * is needed here.
173 	 */
174 	old = *parent;
175 
176 	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
177 		*parent = return_hooker;
178 }
179 
180 #ifdef CONFIG_DYNAMIC_FTRACE
181 extern void ftrace_graph_call(void);
182 extern void ftrace_graph_regs_call(void);
183 int ftrace_enable_ftrace_graph_caller(void)
184 {
185 	int ret;
186 
187 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
188 				    (unsigned long)&prepare_ftrace_return, true, true);
189 	if (ret)
190 		return ret;
191 
192 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
193 				    (unsigned long)&prepare_ftrace_return, true, true);
194 }
195 
196 int ftrace_disable_ftrace_graph_caller(void)
197 {
198 	int ret;
199 
200 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
201 				    (unsigned long)&prepare_ftrace_return, false, true);
202 	if (ret)
203 		return ret;
204 
205 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
206 				    (unsigned long)&prepare_ftrace_return, false, true);
207 }
208 #endif /* CONFIG_DYNAMIC_FTRACE */
209 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
210