xref: /openbmc/linux/arch/riscv/kernel/ftrace.c (revision fed8b7e366e7c8f81e957ef91aa8f0a38e038c66)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <asm/cacheflush.h>
11 
12 #ifdef CONFIG_DYNAMIC_FTRACE
13 static int ftrace_check_current_call(unsigned long hook_pos,
14 				     unsigned int *expected)
15 {
16 	unsigned int replaced[2];
17 	unsigned int nops[2] = {NOP4, NOP4};
18 
19 	/* we expect nops at the hook position */
20 	if (!expected)
21 		expected = nops;
22 
23 	/*
24 	 * Read the text we want to modify;
25 	 * return must be -EFAULT on read error
26 	 */
27 	if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
28 		return -EFAULT;
29 
30 	/*
31 	 * Make sure it is what we expect it to be;
32 	 * return must be -EINVAL on failed comparison
33 	 */
34 	if (memcmp(expected, replaced, sizeof(replaced))) {
35 		pr_err("%p: expected (%08x %08x) but get (%08x %08x)",
36 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
37 		       replaced[1]);
38 		return -EINVAL;
39 	}
40 
41 	return 0;
42 }
43 
44 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
45 				bool enable)
46 {
47 	unsigned int call[2];
48 	unsigned int nops[2] = {NOP4, NOP4};
49 	int ret = 0;
50 
51 	make_call(hook_pos, target, call);
52 
53 	/* replace the auipc-jalr pair at once */
54 	ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
55 				 MCOUNT_INSN_SIZE);
56 	/* return must be -EPERM on write error */
57 	if (ret)
58 		return -EPERM;
59 
60 	smp_mb();
61 	flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
62 
63 	return 0;
64 }
65 
66 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
67 {
68 	int ret = ftrace_check_current_call(rec->ip, NULL);
69 
70 	if (ret)
71 		return ret;
72 
73 	return __ftrace_modify_call(rec->ip, addr, true);
74 }
75 
76 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
77 		    unsigned long addr)
78 {
79 	unsigned int call[2];
80 	int ret;
81 
82 	make_call(rec->ip, addr, call);
83 	ret = ftrace_check_current_call(rec->ip, call);
84 
85 	if (ret)
86 		return ret;
87 
88 	return __ftrace_modify_call(rec->ip, addr, false);
89 }
90 
91 int ftrace_update_ftrace_func(ftrace_func_t func)
92 {
93 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
94 				       (unsigned long)func, true);
95 	if (!ret) {
96 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
97 					   (unsigned long)func, true);
98 	}
99 
100 	return ret;
101 }
102 
103 int __init ftrace_dyn_arch_init(void)
104 {
105 	return 0;
106 }
107 #endif
108 
109 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
110 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
111 		       unsigned long addr)
112 {
113 	unsigned int call[2];
114 	int ret;
115 
116 	make_call(rec->ip, old_addr, call);
117 	ret = ftrace_check_current_call(rec->ip, call);
118 
119 	if (ret)
120 		return ret;
121 
122 	return __ftrace_modify_call(rec->ip, addr, true);
123 }
124 #endif
125 
126 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
127 /*
128  * Most of this function is copied from arm64.
129  */
130 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
131 			   unsigned long frame_pointer)
132 {
133 	unsigned long return_hooker = (unsigned long)&return_to_handler;
134 	unsigned long old;
135 	int err;
136 
137 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
138 		return;
139 
140 	/*
141 	 * We don't suffer access faults, so no extra fault-recovery assembly
142 	 * is needed here.
143 	 */
144 	old = *parent;
145 
146 	if (function_graph_enter(old, self_addr, frame_pointer, parent))
147 		*parent = return_hooker;
148 }
149 
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 extern void ftrace_graph_call(void);
152 int ftrace_enable_ftrace_graph_caller(void)
153 {
154 	unsigned int call[2];
155 	static int init_graph = 1;
156 	int ret;
157 
158 	make_call(&ftrace_graph_call, &ftrace_stub, call);
159 
160 	/*
161 	 * When enabling graph tracer for the first time, ftrace_graph_call
162 	 * should contains a call to ftrace_stub.  Once it has been disabled,
163 	 * the 8-bytes at the position becomes NOPs.
164 	 */
165 	if (init_graph) {
166 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
167 						call);
168 		init_graph = 0;
169 	} else {
170 		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
171 						NULL);
172 	}
173 
174 	if (ret)
175 		return ret;
176 
177 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
178 				    (unsigned long)&prepare_ftrace_return, true);
179 }
180 
181 int ftrace_disable_ftrace_graph_caller(void)
182 {
183 	unsigned int call[2];
184 	int ret;
185 
186 	make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
187 
188 	/*
189 	 * This is to make sure that ftrace_enable_ftrace_graph_caller
190 	 * did the right thing.
191 	 */
192 	ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
193 					call);
194 
195 	if (ret)
196 		return ret;
197 
198 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
199 				    (unsigned long)&prepare_ftrace_return, false);
200 }
201 #endif /* CONFIG_DYNAMIC_FTRACE */
202 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
203