xref: /openbmc/linux/arch/arm/kernel/ftrace.c (revision 565d76cb)
1 /*
2  * Dynamic function tracing support.
3  *
4  * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5  * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
6  *
7  * For licencing details, see COPYING.
8  *
9  * Defines low-level handling of mcount calls when the kernel
10  * is compiled with the -pg flag. When using dynamic ftrace, the
11  * mcount call-sites get patched with NOP till they are enabled.
12  * All code mutation routines here are called under stop_machine().
13  */
14 
15 #include <linux/ftrace.h>
16 #include <linux/uaccess.h>
17 
18 #include <asm/cacheflush.h>
19 #include <asm/ftrace.h>
20 
21 #ifdef CONFIG_THUMB2_KERNEL
22 #define	NOP		0xeb04f85d	/* pop.w {lr} */
23 #else
24 #define	NOP		0xe8bd4000	/* pop {lr} */
25 #endif
26 
27 #ifdef CONFIG_DYNAMIC_FTRACE
28 #ifdef CONFIG_OLD_MCOUNT
29 #define OLD_MCOUNT_ADDR	((unsigned long) mcount)
30 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
31 
32 #define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
33 
34 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
35 {
36 	return rec->arch.old_mcount ? OLD_NOP : NOP;
37 }
38 
39 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
40 {
41 	if (!rec->arch.old_mcount)
42 		return addr;
43 
44 	if (addr == MCOUNT_ADDR)
45 		addr = OLD_MCOUNT_ADDR;
46 	else if (addr == FTRACE_ADDR)
47 		addr = OLD_FTRACE_ADDR;
48 
49 	return addr;
50 }
51 #else
52 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
53 {
54 	return NOP;
55 }
56 
57 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
58 {
59 	return addr;
60 }
61 #endif
62 
63 #ifdef CONFIG_THUMB2_KERNEL
64 static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
65 				       bool link)
66 {
67 	unsigned long s, j1, j2, i1, i2, imm10, imm11;
68 	unsigned long first, second;
69 	long offset;
70 
71 	offset = (long)addr - (long)(pc + 4);
72 	if (offset < -16777216 || offset > 16777214) {
73 		WARN_ON_ONCE(1);
74 		return 0;
75 	}
76 
77 	s	= (offset >> 24) & 0x1;
78 	i1	= (offset >> 23) & 0x1;
79 	i2	= (offset >> 22) & 0x1;
80 	imm10	= (offset >> 12) & 0x3ff;
81 	imm11	= (offset >>  1) & 0x7ff;
82 
83 	j1 = (!i1) ^ s;
84 	j2 = (!i2) ^ s;
85 
86 	first = 0xf000 | (s << 10) | imm10;
87 	second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
88 	if (link)
89 		second |= 1 << 14;
90 
91 	return (second << 16) | first;
92 }
93 #else
94 static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
95 				       bool link)
96 {
97 	unsigned long opcode = 0xea000000;
98 	long offset;
99 
100 	if (link)
101 		opcode |= 1 << 24;
102 
103 	offset = (long)addr - (long)(pc + 8);
104 	if (unlikely(offset < -33554432 || offset > 33554428)) {
105 		/* Can't generate branches that far (from ARM ARM). Ftrace
106 		 * doesn't generate branches outside of kernel text.
107 		 */
108 		WARN_ON_ONCE(1);
109 		return 0;
110 	}
111 
112 	offset = (offset >> 2) & 0x00ffffff;
113 
114 	return opcode | offset;
115 }
116 #endif
117 
118 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
119 {
120 	return ftrace_gen_branch(pc, addr, true);
121 }
122 
123 static int ftrace_modify_code(unsigned long pc, unsigned long old,
124 			      unsigned long new)
125 {
126 	unsigned long replaced;
127 
128 	if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
129 		return -EFAULT;
130 
131 	if (replaced != old)
132 		return -EINVAL;
133 
134 	if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
135 		return -EPERM;
136 
137 	flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
138 
139 	return 0;
140 }
141 
142 int ftrace_update_ftrace_func(ftrace_func_t func)
143 {
144 	unsigned long pc, old;
145 	unsigned long new;
146 	int ret;
147 
148 	pc = (unsigned long)&ftrace_call;
149 	memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
150 	new = ftrace_call_replace(pc, (unsigned long)func);
151 
152 	ret = ftrace_modify_code(pc, old, new);
153 
154 #ifdef CONFIG_OLD_MCOUNT
155 	if (!ret) {
156 		pc = (unsigned long)&ftrace_call_old;
157 		memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
158 		new = ftrace_call_replace(pc, (unsigned long)func);
159 
160 		ret = ftrace_modify_code(pc, old, new);
161 	}
162 #endif
163 
164 	return ret;
165 }
166 
167 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
168 {
169 	unsigned long new, old;
170 	unsigned long ip = rec->ip;
171 
172 	old = ftrace_nop_replace(rec);
173 	new = ftrace_call_replace(ip, adjust_address(rec, addr));
174 
175 	return ftrace_modify_code(rec->ip, old, new);
176 }
177 
178 int ftrace_make_nop(struct module *mod,
179 		    struct dyn_ftrace *rec, unsigned long addr)
180 {
181 	unsigned long ip = rec->ip;
182 	unsigned long old;
183 	unsigned long new;
184 	int ret;
185 
186 	old = ftrace_call_replace(ip, adjust_address(rec, addr));
187 	new = ftrace_nop_replace(rec);
188 	ret = ftrace_modify_code(ip, old, new);
189 
190 #ifdef CONFIG_OLD_MCOUNT
191 	if (ret == -EINVAL && addr == MCOUNT_ADDR) {
192 		rec->arch.old_mcount = true;
193 
194 		old = ftrace_call_replace(ip, adjust_address(rec, addr));
195 		new = ftrace_nop_replace(rec);
196 		ret = ftrace_modify_code(ip, old, new);
197 	}
198 #endif
199 
200 	return ret;
201 }
202 
203 int __init ftrace_dyn_arch_init(void *data)
204 {
205 	*(unsigned long *)data = 0;
206 
207 	return 0;
208 }
209 #endif /* CONFIG_DYNAMIC_FTRACE */
210 
211 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
212 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
213 			   unsigned long frame_pointer)
214 {
215 	unsigned long return_hooker = (unsigned long) &return_to_handler;
216 	struct ftrace_graph_ent trace;
217 	unsigned long old;
218 	int err;
219 
220 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
221 		return;
222 
223 	old = *parent;
224 	*parent = return_hooker;
225 
226 	err = ftrace_push_return_trace(old, self_addr, &trace.depth,
227 				       frame_pointer);
228 	if (err == -EBUSY) {
229 		*parent = old;
230 		return;
231 	}
232 
233 	trace.func = self_addr;
234 
235 	/* Only trace if the calling function expects to */
236 	if (!ftrace_graph_entry(&trace)) {
237 		current->curr_ret_stack--;
238 		*parent = old;
239 	}
240 }
241 
242 #ifdef CONFIG_DYNAMIC_FTRACE
243 extern unsigned long ftrace_graph_call;
244 extern unsigned long ftrace_graph_call_old;
245 extern void ftrace_graph_caller_old(void);
246 
247 static int __ftrace_modify_caller(unsigned long *callsite,
248 				  void (*func) (void), bool enable)
249 {
250 	unsigned long caller_fn = (unsigned long) func;
251 	unsigned long pc = (unsigned long) callsite;
252 	unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
253 	unsigned long nop = 0xe1a00000;	/* mov r0, r0 */
254 	unsigned long old = enable ? nop : branch;
255 	unsigned long new = enable ? branch : nop;
256 
257 	return ftrace_modify_code(pc, old, new);
258 }
259 
260 static int ftrace_modify_graph_caller(bool enable)
261 {
262 	int ret;
263 
264 	ret = __ftrace_modify_caller(&ftrace_graph_call,
265 				     ftrace_graph_caller,
266 				     enable);
267 
268 #ifdef CONFIG_OLD_MCOUNT
269 	if (!ret)
270 		ret = __ftrace_modify_caller(&ftrace_graph_call_old,
271 					     ftrace_graph_caller_old,
272 					     enable);
273 #endif
274 
275 	return ret;
276 }
277 
278 int ftrace_enable_ftrace_graph_caller(void)
279 {
280 	return ftrace_modify_graph_caller(true);
281 }
282 
283 int ftrace_disable_ftrace_graph_caller(void)
284 {
285 	return ftrace_modify_graph_caller(false);
286 }
287 #endif /* CONFIG_DYNAMIC_FTRACE */
288 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
289