xref: /openbmc/linux/arch/sh/kernel/ftrace.c (revision e8e0929d)
1 /*
2  * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
3  * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
4  *
5  * Code for replacing ftrace calls with jumps.
6  *
7  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8  *
9  * Thanks goes to Ingo Molnar, for suggesting the idea.
10  * Mathieu Desnoyers, for suggesting postponing the modifications.
11  * Arjan van de Ven, for keeping me straight, and explaining to me
12  * the dangers of modifying code on the run.
13  */
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/string.h>
17 #include <linux/init.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <asm/ftrace.h>
21 #include <asm/cacheflush.h>
22 #include <asm/unistd.h>
23 #include <trace/syscall.h>
24 
25 #ifdef CONFIG_DYNAMIC_FTRACE
26 static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
27 
28 static unsigned char ftrace_nop[4];
29 /*
30  * If we're trying to nop out a call to a function, we instead
31  * place a call to the address after the memory table.
32  *
33  * 8c011060 <a>:
34  * 8c011060:       02 d1           mov.l   8c01106c <a+0xc>,r1
35  * 8c011062:       22 4f           sts.l   pr,@-r15
36  * 8c011064:       02 c7           mova    8c011070 <a+0x10>,r0
37  * 8c011066:       2b 41           jmp     @r1
38  * 8c011068:       2a 40           lds     r0,pr
39  * 8c01106a:       09 00           nop
40  * 8c01106c:       68 24           .word 0x2468     <--- ip
41  * 8c01106e:       1d 8c           .word 0x8c1d
42  * 8c011070:       26 4f           lds.l   @r15+,pr <--- ip + MCOUNT_INSN_SIZE
43  *
44  * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
45  * past the _mcount call and continue executing code like normal.
46  */
47 static unsigned char *ftrace_nop_replace(unsigned long ip)
48 {
49 	__raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
50 	return ftrace_nop;
51 }
52 
53 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
54 {
55 	/* Place the address in the memory table. */
56 	__raw_writel(addr, ftrace_replaced_code);
57 
58 	/*
59 	 * No locking needed, this must be called via kstop_machine
60 	 * which in essence is like running on a uniprocessor machine.
61 	 */
62 	return ftrace_replaced_code;
63 }
64 
65 static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
66 		       unsigned char *new_code)
67 {
68 	unsigned char replaced[MCOUNT_INSN_SIZE];
69 
70 	/*
71 	 * Note: Due to modules and __init, code can
72 	 *  disappear and change, we need to protect against faulting
73 	 *  as well as code changing. We do this by using the
74 	 *  probe_kernel_* functions.
75 	 *
76 	 * No real locking needed, this code is run through
77 	 * kstop_machine, or before SMP starts.
78 	 */
79 
80 	/* read the text we want to modify */
81 	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
82 		return -EFAULT;
83 
84 	/* Make sure it is what we expect it to be */
85 	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
86 		return -EINVAL;
87 
88 	/* replace the text with the new text */
89 	if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
90 		return -EPERM;
91 
92 	flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
93 
94 	return 0;
95 }
96 
97 int ftrace_update_ftrace_func(ftrace_func_t func)
98 {
99 	unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
100 	unsigned char old[MCOUNT_INSN_SIZE], *new;
101 
102 	memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
103 	new = ftrace_call_replace(ip, (unsigned long)func);
104 
105 	return ftrace_modify_code(ip, old, new);
106 }
107 
108 int ftrace_make_nop(struct module *mod,
109 		    struct dyn_ftrace *rec, unsigned long addr)
110 {
111 	unsigned char *new, *old;
112 	unsigned long ip = rec->ip;
113 
114 	old = ftrace_call_replace(ip, addr);
115 	new = ftrace_nop_replace(ip);
116 
117 	return ftrace_modify_code(rec->ip, old, new);
118 }
119 
120 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
121 {
122 	unsigned char *new, *old;
123 	unsigned long ip = rec->ip;
124 
125 	old = ftrace_nop_replace(ip);
126 	new = ftrace_call_replace(ip, addr);
127 
128 	return ftrace_modify_code(rec->ip, old, new);
129 }
130 
131 int __init ftrace_dyn_arch_init(void *data)
132 {
133 	/* The return code is retured via data */
134 	__raw_writel(0, (unsigned long)data);
135 
136 	return 0;
137 }
138 #endif /* CONFIG_DYNAMIC_FTRACE */
139 
140 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
141 #ifdef CONFIG_DYNAMIC_FTRACE
142 extern void ftrace_graph_call(void);
143 
144 static int ftrace_mod(unsigned long ip, unsigned long old_addr,
145 		      unsigned long new_addr)
146 {
147 	unsigned char code[MCOUNT_INSN_SIZE];
148 
149 	if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
150 		return -EFAULT;
151 
152 	if (old_addr != __raw_readl((unsigned long *)code))
153 		return -EINVAL;
154 
155 	__raw_writel(new_addr, ip);
156 	return 0;
157 }
158 
159 int ftrace_enable_ftrace_graph_caller(void)
160 {
161 	unsigned long ip, old_addr, new_addr;
162 
163 	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
164 	old_addr = (unsigned long)(&skip_trace);
165 	new_addr = (unsigned long)(&ftrace_graph_caller);
166 
167 	return ftrace_mod(ip, old_addr, new_addr);
168 }
169 
170 int ftrace_disable_ftrace_graph_caller(void)
171 {
172 	unsigned long ip, old_addr, new_addr;
173 
174 	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
175 	old_addr = (unsigned long)(&ftrace_graph_caller);
176 	new_addr = (unsigned long)(&skip_trace);
177 
178 	return ftrace_mod(ip, old_addr, new_addr);
179 }
180 #endif /* CONFIG_DYNAMIC_FTRACE */
181 
182 /*
183  * Hook the return address and push it in the stack of return addrs
184  * in the current thread info.
185  *
186  * This is the main routine for the function graph tracer. The function
187  * graph tracer essentially works like this:
188  *
189  * parent is the stack address containing self_addr's return address.
190  * We pull the real return address out of parent and store it in
191  * current's ret_stack. Then, we replace the return address on the stack
192  * with the address of return_to_handler. self_addr is the function that
193  * called mcount.
194  *
195  * When self_addr returns, it will jump to return_to_handler which calls
196  * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
197  * return address off of current's ret_stack and jump to it.
198  */
199 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
200 {
201 	unsigned long old;
202 	int faulted, err;
203 	struct ftrace_graph_ent trace;
204 	unsigned long return_hooker = (unsigned long)&return_to_handler;
205 
206 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
207 		return;
208 
209 	/*
210 	 * Protect against fault, even if it shouldn't
211 	 * happen. This tool is too much intrusive to
212 	 * ignore such a protection.
213 	 */
214 	__asm__ __volatile__(
215 		"1:						\n\t"
216 		"mov.l		@%2, %0				\n\t"
217 		"2:						\n\t"
218 		"mov.l		%3, @%2				\n\t"
219 		"mov		#0, %1				\n\t"
220 		"3:						\n\t"
221 		".section .fixup, \"ax\"			\n\t"
222 		"4:						\n\t"
223 		"mov.l		5f, %0				\n\t"
224 		"jmp		@%0				\n\t"
225 		" mov		#1, %1				\n\t"
226 		".balign 4					\n\t"
227 		"5:	.long 3b				\n\t"
228 		".previous					\n\t"
229 		".section __ex_table,\"a\"			\n\t"
230 		".long 1b, 4b					\n\t"
231 		".long 2b, 4b					\n\t"
232 		".previous					\n\t"
233 		: "=&r" (old), "=r" (faulted)
234 		: "r" (parent), "r" (return_hooker)
235 	);
236 
237 	if (unlikely(faulted)) {
238 		ftrace_graph_stop();
239 		WARN_ON(1);
240 		return;
241 	}
242 
243 	err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
244 	if (err == -EBUSY) {
245 		__raw_writel(old, parent);
246 		return;
247 	}
248 
249 	trace.func = self_addr;
250 
251 	/* Only trace if the calling function expects to */
252 	if (!ftrace_graph_entry(&trace)) {
253 		current->curr_ret_stack--;
254 		__raw_writel(old, parent);
255 	}
256 }
257 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
258 
259 #ifdef CONFIG_FTRACE_SYSCALLS
260 
261 extern unsigned long __start_syscalls_metadata[];
262 extern unsigned long __stop_syscalls_metadata[];
263 extern unsigned long *sys_call_table;
264 
265 static struct syscall_metadata **syscalls_metadata;
266 
267 static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
268 {
269 	struct syscall_metadata *start;
270 	struct syscall_metadata *stop;
271 	char str[KSYM_SYMBOL_LEN];
272 
273 
274 	start = (struct syscall_metadata *)__start_syscalls_metadata;
275 	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
276 	kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
277 
278 	for ( ; start < stop; start++) {
279 		if (start->name && !strcmp(start->name, str))
280 			return start;
281 	}
282 
283 	return NULL;
284 }
285 
286 struct syscall_metadata *syscall_nr_to_meta(int nr)
287 {
288 	if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
289 		return NULL;
290 
291 	return syscalls_metadata[nr];
292 }
293 
294 void arch_init_ftrace_syscalls(void)
295 {
296 	int i;
297 	struct syscall_metadata *meta;
298 	unsigned long **psys_syscall_table = &sys_call_table;
299 	static atomic_t refs;
300 
301 	if (atomic_inc_return(&refs) != 1)
302 		goto end;
303 
304 	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
305 					FTRACE_SYSCALL_MAX, GFP_KERNEL);
306 	if (!syscalls_metadata) {
307 		WARN_ON(1);
308 		return;
309 	}
310 
311 	for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
312 		meta = find_syscall_meta(psys_syscall_table[i]);
313 		syscalls_metadata[i] = meta;
314 	}
315 	return;
316 
317 	/* Paranoid: avoid overflow */
318 end:
319 	atomic_dec(&refs);
320 }
321 #endif /* CONFIG_FTRACE_SYSCALLS */
322