xref: /openbmc/linux/arch/riscv/kernel/ftrace.c (revision 844f5ed5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <asm/cacheflush.h>
12 #include <asm/patch.h>
13 
14 #ifdef CONFIG_DYNAMIC_FTRACE
15 void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
16 {
17 	mutex_lock(&text_mutex);
18 }
19 
20 void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
21 {
22 	mutex_unlock(&text_mutex);
23 }
24 
25 static int ftrace_check_current_call(unsigned long hook_pos,
26 				     unsigned int *expected)
27 {
28 	unsigned int replaced[2];
29 	unsigned int nops[2] = {NOP4, NOP4};
30 
31 	/* we expect nops at the hook position */
32 	if (!expected)
33 		expected = nops;
34 
35 	/*
36 	 * Read the text we want to modify;
37 	 * return must be -EFAULT on read error
38 	 */
39 	if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
40 			MCOUNT_INSN_SIZE))
41 		return -EFAULT;
42 
43 	/*
44 	 * Make sure it is what we expect it to be;
45 	 * return must be -EINVAL on failed comparison
46 	 */
47 	if (memcmp(expected, replaced, sizeof(replaced))) {
48 		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
49 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
50 		       replaced[1]);
51 		return -EINVAL;
52 	}
53 
54 	return 0;
55 }
56 
57 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
58 				bool enable, bool ra)
59 {
60 	unsigned int call[2];
61 	unsigned int nops[2] = {NOP4, NOP4};
62 
63 	if (ra)
64 		make_call_ra(hook_pos, target, call);
65 	else
66 		make_call_t0(hook_pos, target, call);
67 
68 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
69 	if (patch_text_nosync
70 	    ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
71 		return -EPERM;
72 
73 	return 0;
74 }
75 
76 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
77 {
78 	unsigned int call[2];
79 
80 	make_call_t0(rec->ip, addr, call);
81 
82 	if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
83 		return -EPERM;
84 
85 	return 0;
86 }
87 
88 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
89 		    unsigned long addr)
90 {
91 	unsigned int nops[2] = {NOP4, NOP4};
92 
93 	if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
94 		return -EPERM;
95 
96 	return 0;
97 }
98 
99 /*
100  * This is called early on, and isn't wrapped by
101  * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
102  * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
103  * just directly poke the text, but it's simpler to just take the lock
104  * ourselves.
105  */
106 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
107 {
108 	int out;
109 
110 	ftrace_arch_code_modify_prepare();
111 	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
112 	ftrace_arch_code_modify_post_process();
113 
114 	return out;
115 }
116 
117 int ftrace_update_ftrace_func(ftrace_func_t func)
118 {
119 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
120 				       (unsigned long)func, true, true);
121 	if (!ret) {
122 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
123 					   (unsigned long)func, true, true);
124 	}
125 
126 	return ret;
127 }
128 #endif
129 
130 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
131 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
132 		       unsigned long addr)
133 {
134 	unsigned int call[2];
135 	unsigned long caller = rec->ip;
136 	int ret;
137 
138 	make_call_t0(caller, old_addr, call);
139 	ret = ftrace_check_current_call(caller, call);
140 
141 	if (ret)
142 		return ret;
143 
144 	return __ftrace_modify_call(caller, addr, true, false);
145 }
146 #endif
147 
148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
149 /*
150  * Most of this function is copied from arm64.
151  */
152 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
153 			   unsigned long frame_pointer)
154 {
155 	unsigned long return_hooker = (unsigned long)&return_to_handler;
156 	unsigned long old;
157 
158 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
159 		return;
160 
161 	/*
162 	 * We don't suffer access faults, so no extra fault-recovery assembly
163 	 * is needed here.
164 	 */
165 	old = *parent;
166 
167 	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
168 		*parent = return_hooker;
169 }
170 
171 #ifdef CONFIG_DYNAMIC_FTRACE
172 extern void ftrace_graph_call(void);
173 extern void ftrace_graph_regs_call(void);
174 int ftrace_enable_ftrace_graph_caller(void)
175 {
176 	int ret;
177 
178 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
179 				    (unsigned long)&prepare_ftrace_return, true, true);
180 	if (ret)
181 		return ret;
182 
183 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
184 				    (unsigned long)&prepare_ftrace_return, true, true);
185 }
186 
187 int ftrace_disable_ftrace_graph_caller(void)
188 {
189 	int ret;
190 
191 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
192 				    (unsigned long)&prepare_ftrace_return, false, true);
193 	if (ret)
194 		return ret;
195 
196 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
197 				    (unsigned long)&prepare_ftrace_return, false, true);
198 }
199 #endif /* CONFIG_DYNAMIC_FTRACE */
200 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
201