xref: /openbmc/linux/arch/riscv/kernel/ftrace.c (revision 29c37341)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <asm/cacheflush.h>
12 #include <asm/patch.h>
13 
14 #ifdef CONFIG_DYNAMIC_FTRACE
15 int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
16 {
17 	mutex_lock(&text_mutex);
18 	return 0;
19 }
20 
21 int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
22 {
23 	mutex_unlock(&text_mutex);
24 	return 0;
25 }
26 
27 static int ftrace_check_current_call(unsigned long hook_pos,
28 				     unsigned int *expected)
29 {
30 	unsigned int replaced[2];
31 	unsigned int nops[2] = {NOP4, NOP4};
32 
33 	/* we expect nops at the hook position */
34 	if (!expected)
35 		expected = nops;
36 
37 	/*
38 	 * Read the text we want to modify;
39 	 * return must be -EFAULT on read error
40 	 */
41 	if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
42 			MCOUNT_INSN_SIZE))
43 		return -EFAULT;
44 
45 	/*
46 	 * Make sure it is what we expect it to be;
47 	 * return must be -EINVAL on failed comparison
48 	 */
49 	if (memcmp(expected, replaced, sizeof(replaced))) {
50 		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
51 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
52 		       replaced[1]);
53 		return -EINVAL;
54 	}
55 
56 	return 0;
57 }
58 
59 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
60 				bool enable)
61 {
62 	unsigned int call[2];
63 	unsigned int nops[2] = {NOP4, NOP4};
64 
65 	make_call(hook_pos, target, call);
66 
67 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
68 	if (patch_text_nosync
69 	    ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
70 		return -EPERM;
71 
72 	return 0;
73 }
74 
75 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
76 {
77 	int ret = ftrace_check_current_call(rec->ip, NULL);
78 
79 	if (ret)
80 		return ret;
81 
82 	return __ftrace_modify_call(rec->ip, addr, true);
83 }
84 
85 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
86 		    unsigned long addr)
87 {
88 	unsigned int call[2];
89 	int ret;
90 
91 	make_call(rec->ip, addr, call);
92 	ret = ftrace_check_current_call(rec->ip, call);
93 
94 	if (ret)
95 		return ret;
96 
97 	return __ftrace_modify_call(rec->ip, addr, false);
98 }
99 
100 int ftrace_update_ftrace_func(ftrace_func_t func)
101 {
102 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
103 				       (unsigned long)func, true);
104 	if (!ret) {
105 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
106 					   (unsigned long)func, true);
107 	}
108 
109 	return ret;
110 }
111 
112 int __init ftrace_dyn_arch_init(void)
113 {
114 	return 0;
115 }
116 #endif
117 
118 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
119 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
120 		       unsigned long addr)
121 {
122 	unsigned int call[2];
123 	int ret;
124 
125 	make_call(rec->ip, old_addr, call);
126 	ret = ftrace_check_current_call(rec->ip, call);
127 
128 	if (ret)
129 		return ret;
130 
131 	return __ftrace_modify_call(rec->ip, addr, true);
132 }
133 #endif
134 
135 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
136 /*
137  * Most of this function is copied from arm64.
138  */
139 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
140 			   unsigned long frame_pointer)
141 {
142 	unsigned long return_hooker = (unsigned long)&return_to_handler;
143 	unsigned long old;
144 
145 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
146 		return;
147 
148 	/*
149 	 * We don't suffer access faults, so no extra fault-recovery assembly
150 	 * is needed here.
151 	 */
152 	old = *parent;
153 
154 	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
155 		*parent = return_hooker;
156 }
157 
158 #ifdef CONFIG_DYNAMIC_FTRACE
159 extern void ftrace_graph_call(void);
160 int ftrace_enable_ftrace_graph_caller(void)
161 {
162 	unsigned int call[2];
163 	static int init_graph = 1;
164 	int ret;
165 
166 	make_call(&ftrace_graph_call, &ftrace_stub, call);
167 
168 	/*
169 	 * When enabling graph tracer for the first time, ftrace_graph_call
170 	 * should contains a call to ftrace_stub.  Once it has been disabled,
171 	 * the 8-bytes at the position becomes NOPs.
172 	 */
173 	if (init_graph) {
174 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
175 						call);
176 		init_graph = 0;
177 	} else {
178 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
179 						NULL);
180 	}
181 
182 	if (ret)
183 		return ret;
184 
185 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
186 				    (unsigned long)&prepare_ftrace_return, true);
187 }
188 
189 int ftrace_disable_ftrace_graph_caller(void)
190 {
191 	unsigned int call[2];
192 	int ret;
193 
194 	make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
195 
196 	/*
197 	 * This is to make sure that ftrace_enable_ftrace_graph_caller
198 	 * did the right thing.
199 	 */
200 	ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
201 					call);
202 
203 	if (ret)
204 		return ret;
205 
206 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
207 				    (unsigned long)&prepare_ftrace_return, false);
208 }
209 #endif /* CONFIG_DYNAMIC_FTRACE */
210 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
211