xref: /openbmc/linux/arch/riscv/kernel/ftrace.c (revision 1f9b7512)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <asm/cacheflush.h>
12 #include <asm/patch.h>
13 
14 #ifdef CONFIG_DYNAMIC_FTRACE
15 int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
16 {
17 	mutex_lock(&text_mutex);
18 	return 0;
19 }
20 
21 int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
22 {
23 	mutex_unlock(&text_mutex);
24 	return 0;
25 }
26 
27 static int ftrace_check_current_call(unsigned long hook_pos,
28 				     unsigned int *expected)
29 {
30 	unsigned int replaced[2];
31 	unsigned int nops[2] = {NOP4, NOP4};
32 
33 	/* we expect nops at the hook position */
34 	if (!expected)
35 		expected = nops;
36 
37 	/*
38 	 * Read the text we want to modify;
39 	 * return must be -EFAULT on read error
40 	 */
41 	if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
42 		return -EFAULT;
43 
44 	/*
45 	 * Make sure it is what we expect it to be;
46 	 * return must be -EINVAL on failed comparison
47 	 */
48 	if (memcmp(expected, replaced, sizeof(replaced))) {
49 		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
50 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
51 		       replaced[1]);
52 		return -EINVAL;
53 	}
54 
55 	return 0;
56 }
57 
58 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
59 				bool enable)
60 {
61 	unsigned int call[2];
62 	unsigned int nops[2] = {NOP4, NOP4};
63 
64 	make_call(hook_pos, target, call);
65 
66 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
67 	if (patch_text_nosync
68 	    ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
69 		return -EPERM;
70 
71 	return 0;
72 }
73 
74 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
75 {
76 	int ret = ftrace_check_current_call(rec->ip, NULL);
77 
78 	if (ret)
79 		return ret;
80 
81 	return __ftrace_modify_call(rec->ip, addr, true);
82 }
83 
84 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
85 		    unsigned long addr)
86 {
87 	unsigned int call[2];
88 	int ret;
89 
90 	make_call(rec->ip, addr, call);
91 	ret = ftrace_check_current_call(rec->ip, call);
92 
93 	if (ret)
94 		return ret;
95 
96 	return __ftrace_modify_call(rec->ip, addr, false);
97 }
98 
99 int ftrace_update_ftrace_func(ftrace_func_t func)
100 {
101 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
102 				       (unsigned long)func, true);
103 	if (!ret) {
104 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
105 					   (unsigned long)func, true);
106 	}
107 
108 	return ret;
109 }
110 
111 int __init ftrace_dyn_arch_init(void)
112 {
113 	return 0;
114 }
115 #endif
116 
117 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
118 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
119 		       unsigned long addr)
120 {
121 	unsigned int call[2];
122 	int ret;
123 
124 	make_call(rec->ip, old_addr, call);
125 	ret = ftrace_check_current_call(rec->ip, call);
126 
127 	if (ret)
128 		return ret;
129 
130 	return __ftrace_modify_call(rec->ip, addr, true);
131 }
132 #endif
133 
134 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
135 /*
136  * Most of this function is copied from arm64.
137  */
138 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
139 			   unsigned long frame_pointer)
140 {
141 	unsigned long return_hooker = (unsigned long)&return_to_handler;
142 	unsigned long old;
143 
144 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
145 		return;
146 
147 	/*
148 	 * We don't suffer access faults, so no extra fault-recovery assembly
149 	 * is needed here.
150 	 */
151 	old = *parent;
152 
153 	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
154 		*parent = return_hooker;
155 }
156 
157 #ifdef CONFIG_DYNAMIC_FTRACE
158 extern void ftrace_graph_call(void);
159 int ftrace_enable_ftrace_graph_caller(void)
160 {
161 	unsigned int call[2];
162 	static int init_graph = 1;
163 	int ret;
164 
165 	make_call(&ftrace_graph_call, &ftrace_stub, call);
166 
167 	/*
168 	 * When enabling graph tracer for the first time, ftrace_graph_call
169 	 * should contains a call to ftrace_stub.  Once it has been disabled,
170 	 * the 8-bytes at the position becomes NOPs.
171 	 */
172 	if (init_graph) {
173 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
174 						call);
175 		init_graph = 0;
176 	} else {
177 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
178 						NULL);
179 	}
180 
181 	if (ret)
182 		return ret;
183 
184 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
185 				    (unsigned long)&prepare_ftrace_return, true);
186 }
187 
188 int ftrace_disable_ftrace_graph_caller(void)
189 {
190 	unsigned int call[2];
191 	int ret;
192 
193 	make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
194 
195 	/*
196 	 * This is to make sure that ftrace_enable_ftrace_graph_caller
197 	 * did the right thing.
198 	 */
199 	ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
200 					call);
201 
202 	if (ret)
203 		return ret;
204 
205 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
206 				    (unsigned long)&prepare_ftrace_return, false);
207 }
208 #endif /* CONFIG_DYNAMIC_FTRACE */
209 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
210