xref: /openbmc/linux/arch/riscv/kernel/ftrace.c (revision 5d331b7f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <asm/cacheflush.h>
11 
12 #ifdef CONFIG_DYNAMIC_FTRACE
13 static int ftrace_check_current_call(unsigned long hook_pos,
14 				     unsigned int *expected)
15 {
16 	unsigned int replaced[2];
17 	unsigned int nops[2] = {NOP4, NOP4};
18 
19 	/* we expect nops at the hook position */
20 	if (!expected)
21 		expected = nops;
22 
23 	/*
24 	 * Read the text we want to modify;
25 	 * return must be -EFAULT on read error
26 	 */
27 	if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
28 		return -EFAULT;
29 
30 	/*
31 	 * Make sure it is what we expect it to be;
32 	 * return must be -EINVAL on failed comparison
33 	 */
34 	if (memcmp(expected, replaced, sizeof(replaced))) {
35 		pr_err("%p: expected (%08x %08x) but get (%08x %08x)",
36 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
37 		       replaced[1]);
38 		return -EINVAL;
39 	}
40 
41 	return 0;
42 }
43 
44 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
45 				bool enable)
46 {
47 	unsigned int call[2];
48 	unsigned int nops[2] = {NOP4, NOP4};
49 	int ret = 0;
50 
51 	make_call(hook_pos, target, call);
52 
53 	/* replace the auipc-jalr pair at once */
54 	ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
55 				 MCOUNT_INSN_SIZE);
56 	/* return must be -EPERM on write error */
57 	if (ret)
58 		return -EPERM;
59 
60 	smp_mb();
61 	flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
62 
63 	return 0;
64 }
65 
66 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
67 {
68 	int ret = ftrace_check_current_call(rec->ip, NULL);
69 
70 	if (ret)
71 		return ret;
72 
73 	return __ftrace_modify_call(rec->ip, addr, true);
74 }
75 
76 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
77 		    unsigned long addr)
78 {
79 	unsigned int call[2];
80 	int ret;
81 
82 	make_call(rec->ip, addr, call);
83 	ret = ftrace_check_current_call(rec->ip, call);
84 
85 	if (ret)
86 		return ret;
87 
88 	return __ftrace_modify_call(rec->ip, addr, false);
89 }
90 
91 int ftrace_update_ftrace_func(ftrace_func_t func)
92 {
93 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
94 				       (unsigned long)func, true);
95 	if (!ret) {
96 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
97 					   (unsigned long)func, true);
98 	}
99 
100 	return ret;
101 }
102 
103 int __init ftrace_dyn_arch_init(void)
104 {
105 	return 0;
106 }
107 #endif
108 
109 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
110 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
111 		       unsigned long addr)
112 {
113 	unsigned int call[2];
114 	int ret;
115 
116 	make_call(rec->ip, old_addr, call);
117 	ret = ftrace_check_current_call(rec->ip, call);
118 
119 	if (ret)
120 		return ret;
121 
122 	return __ftrace_modify_call(rec->ip, addr, true);
123 }
124 #endif
125 
126 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
127 /*
128  * Most of this function is copied from arm64.
129  */
130 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
131 			   unsigned long frame_pointer)
132 {
133 	unsigned long return_hooker = (unsigned long)&return_to_handler;
134 	unsigned long old;
135 	struct ftrace_graph_ent trace;
136 	int err;
137 
138 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
139 		return;
140 
141 	/*
142 	 * We don't suffer access faults, so no extra fault-recovery assembly
143 	 * is needed here.
144 	 */
145 	old = *parent;
146 
147 	trace.func = self_addr;
148 	trace.depth = current->curr_ret_stack + 1;
149 
150 	if (!ftrace_graph_entry(&trace))
151 		return;
152 
153 	err = ftrace_push_return_trace(old, self_addr, &trace.depth,
154 				       frame_pointer, parent);
155 	if (err == -EBUSY)
156 		return;
157 	*parent = return_hooker;
158 }
159 
160 #ifdef CONFIG_DYNAMIC_FTRACE
161 extern void ftrace_graph_call(void);
162 int ftrace_enable_ftrace_graph_caller(void)
163 {
164 	unsigned int call[2];
165 	static int init_graph = 1;
166 	int ret;
167 
168 	make_call(&ftrace_graph_call, &ftrace_stub, call);
169 
170 	/*
171 	 * When enabling graph tracer for the first time, ftrace_graph_call
172 	 * should contains a call to ftrace_stub.  Once it has been disabled,
173 	 * the 8-bytes at the position becomes NOPs.
174 	 */
175 	if (init_graph) {
176 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
177 						call);
178 		init_graph = 0;
179 	} else {
180 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
181 						NULL);
182 	}
183 
184 	if (ret)
185 		return ret;
186 
187 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
188 				    (unsigned long)&prepare_ftrace_return, true);
189 }
190 
191 int ftrace_disable_ftrace_graph_caller(void)
192 {
193 	unsigned int call[2];
194 	int ret;
195 
196 	make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
197 
198 	/*
199 	 * This is to make sure that ftrace_enable_ftrace_graph_caller
200 	 * did the right thing.
201 	 */
202 	ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
203 					call);
204 
205 	if (ret)
206 		return ret;
207 
208 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
209 				    (unsigned long)&prepare_ftrace_return, false);
210 }
211 #endif /* CONFIG_DYNAMIC_FTRACE */
212 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
213