xref: /openbmc/linux/arch/mips/kernel/ftrace.c (revision 4aad8f51d0672f1c95e2cf0e1bc7b9ab42d8e1ea)
1 /*
2  * Code for replacing ftrace calls with jumps.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7  *
8  * Thanks goes to Steven Rostedt for writing the original x86 version.
9  */
10 
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14 
15 #include <asm/asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uasm.h>
19 
20 /*
21  * If the Instruction Pointer is in module space (0xc0000000), return true;
22  * otherwise, it is in kernel space (0x80000000), return false.
23  *
24  * FIXME: This will not work when the kernel space and module space are the
25  * same. If they are the same, we need to modify scripts/recordmcount.pl,
26  * ftrace_make_nop/call() and the other related parts to ensure the
27  * enabling/disabling of the calling site to _mcount is right for both kernel
28  * and module.
29  */
30 
31 static inline int in_module(unsigned long ip)
32 {
33 	return ip & 0x40000000;
34 }
35 
36 #ifdef CONFIG_DYNAMIC_FTRACE
37 
38 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
39 #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
40 
41 #define INSN_B_1F_4 0x10000004	/* b 1f; offset = 4 */
42 #define INSN_B_1F_5 0x10000005	/* b 1f; offset = 5 */
43 #define INSN_NOP 0x00000000	/* nop */
44 #define INSN_JAL(addr)	\
45 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
46 
47 static unsigned int insn_jal_ftrace_caller __read_mostly;
48 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
49 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
50 
51 static inline void ftrace_dyn_arch_init_insns(void)
52 {
53 	u32 *buf;
54 	unsigned int v1;
55 
56 	/* lui v1, hi16_mcount */
57 	v1 = 3;
58 	buf = (u32 *)&insn_lui_v1_hi16_mcount;
59 	UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
60 
61 	/* jal (ftrace_caller + 8), jump over the first two instruction */
62 	buf = (u32 *)&insn_jal_ftrace_caller;
63 	uasm_i_jal(&buf, (FTRACE_ADDR + 8));
64 
65 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
66 	/* j ftrace_graph_caller */
67 	buf = (u32 *)&insn_j_ftrace_graph_caller;
68 	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
69 #endif
70 }
71 
72 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
73 {
74 	int faulted;
75 
76 	/* *(unsigned int *)ip = new_code; */
77 	safe_store_code(new_code, ip, faulted);
78 
79 	if (unlikely(faulted))
80 		return -EFAULT;
81 
82 	flush_icache_range(ip, ip + 8);
83 
84 	return 0;
85 }
86 
87 int ftrace_make_nop(struct module *mod,
88 		    struct dyn_ftrace *rec, unsigned long addr)
89 {
90 	unsigned int new;
91 	unsigned long ip = rec->ip;
92 
93 	/*
94 	 * We have compiled module with -mlong-calls, but compiled the kernel
95 	 * without it, we need to cope with them respectively.
96 	 */
97 	if (in_module(ip)) {
98 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
99 		/*
100 		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
101 		 * addiu v1, v1, low_16bit_of_mcount
102 		 * move at, ra
103 		 * move $12, ra_address
104 		 * jalr v1
105 		 *  sub sp, sp, 8
106 		 *                                  1: offset = 5 instructions
107 		 */
108 		new = INSN_B_1F_5;
109 #else
110 		/*
111 		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
112 		 * addiu v1, v1, low_16bit_of_mcount
113 		 * move at, ra
114 		 * jalr v1
115 		 *  nop | move $12, ra_address | sub sp, sp, 8
116 		 *                                  1: offset = 4 instructions
117 		 */
118 		new = INSN_B_1F_4;
119 #endif
120 	} else {
121 		/*
122 		 * move at, ra
123 		 * jal _mcount		--> nop
124 		 */
125 		new = INSN_NOP;
126 	}
127 	return ftrace_modify_code(ip, new);
128 }
129 
130 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
131 {
132 	unsigned int new;
133 	unsigned long ip = rec->ip;
134 
135 	/* ip, module: 0xc0000000, kernel: 0x80000000 */
136 	new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
137 
138 	return ftrace_modify_code(ip, new);
139 }
140 
141 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
142 
143 int ftrace_update_ftrace_func(ftrace_func_t func)
144 {
145 	unsigned int new;
146 
147 	new = INSN_JAL((unsigned long)func);
148 
149 	return ftrace_modify_code(FTRACE_CALL_IP, new);
150 }
151 
152 int __init ftrace_dyn_arch_init(void *data)
153 {
154 	/* Encode the instructions when booting */
155 	ftrace_dyn_arch_init_insns();
156 
157 	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
158 	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
159 
160 	/* The return code is retured via data */
161 	*(unsigned long *)data = 0;
162 
163 	return 0;
164 }
165 #endif	/* CONFIG_DYNAMIC_FTRACE */
166 
167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
168 
169 #ifdef CONFIG_DYNAMIC_FTRACE
170 
171 extern void ftrace_graph_call(void);
172 #define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))
173 
174 int ftrace_enable_ftrace_graph_caller(void)
175 {
176 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
177 			insn_j_ftrace_graph_caller);
178 }
179 
180 int ftrace_disable_ftrace_graph_caller(void)
181 {
182 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
183 }
184 
185 #endif	/* CONFIG_DYNAMIC_FTRACE */
186 
187 #ifndef KBUILD_MCOUNT_RA_ADDRESS
188 
189 #define S_RA_SP	(0xafbf << 16)	/* s{d,w} ra, offset(sp) */
190 #define S_R_SP	(0xafb0 << 16)  /* s{d,w} R, offset(sp) */
191 #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
192 
193 unsigned long ftrace_get_parent_addr(unsigned long self_addr,
194 				     unsigned long parent,
195 				     unsigned long parent_addr,
196 				     unsigned long fp)
197 {
198 	unsigned long sp, ip, ra;
199 	unsigned int code;
200 	int faulted;
201 
202 	/*
203 	 * For module, move the ip from calling site of mcount to the
204 	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
205 	 * kernel, move to the instruction "move ra, at"(offset is 12)
206 	 */
207 	ip = self_addr - (in_module(self_addr) ? 20 : 12);
208 
209 	/*
210 	 * search the text until finding the non-store instruction or "s{d,w}
211 	 * ra, offset(sp)" instruction
212 	 */
213 	do {
214 		ip -= 4;
215 
216 		/* get the code at "ip": code = *(unsigned int *)ip; */
217 		safe_load_code(code, ip, faulted);
218 
219 		if (unlikely(faulted))
220 			return 0;
221 		/*
222 		 * If we hit the non-store instruction before finding where the
223 		 * ra is stored, then this is a leaf function and it does not
224 		 * store the ra on the stack
225 		 */
226 		if ((code & S_R_SP) != S_R_SP)
227 			return parent_addr;
228 
229 	} while (((code & S_RA_SP) != S_RA_SP));
230 
231 	sp = fp + (code & OFFSET_MASK);
232 
233 	/* ra = *(unsigned long *)sp; */
234 	safe_load_stack(ra, sp, faulted);
235 	if (unlikely(faulted))
236 		return 0;
237 
238 	if (ra == parent)
239 		return sp;
240 	return 0;
241 }
242 
243 #endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
244 
245 /*
246  * Hook the return address and push it in the stack of return addrs
247  * in current thread info.
248  */
249 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
250 			   unsigned long fp)
251 {
252 	unsigned long old;
253 	struct ftrace_graph_ent trace;
254 	unsigned long return_hooker = (unsigned long)
255 	    &return_to_handler;
256 	int faulted;
257 
258 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
259 		return;
260 
261 	/*
262 	 * "parent" is the stack address saved the return address of the caller
263 	 * of _mcount.
264 	 *
265 	 * if the gcc < 4.5, a leaf function does not save the return address
266 	 * in the stack address, so, we "emulate" one in _mcount's stack space,
267 	 * and hijack it directly, but for a non-leaf function, it save the
268 	 * return address to the its own stack space, we can not hijack it
269 	 * directly, but need to find the real stack address,
270 	 * ftrace_get_parent_addr() does it!
271 	 *
272 	 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
273 	 * non-leaf function, the location of the return address will be saved
274 	 * to $12 for us, and for a leaf function, only put a zero into $12. we
275 	 * do it in ftrace_graph_caller of mcount.S.
276 	 */
277 
278 	/* old = *parent; */
279 	safe_load_stack(old, parent, faulted);
280 	if (unlikely(faulted))
281 		goto out;
282 #ifndef KBUILD_MCOUNT_RA_ADDRESS
283 	parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
284 			(unsigned long)parent, fp);
285 	/*
286 	 * If fails when getting the stack address of the non-leaf function's
287 	 * ra, stop function graph tracer and return
288 	 */
289 	if (parent == 0)
290 		goto out;
291 #endif
292 	/* *parent = return_hooker; */
293 	safe_store_stack(return_hooker, parent, faulted);
294 	if (unlikely(faulted))
295 		goto out;
296 
297 	if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
298 	    -EBUSY) {
299 		*parent = old;
300 		return;
301 	}
302 
303 	trace.func = self_addr;
304 
305 	/* Only trace if the calling function expects to */
306 	if (!ftrace_graph_entry(&trace)) {
307 		current->curr_ret_stack--;
308 		*parent = old;
309 	}
310 	return;
311 out:
312 	ftrace_graph_stop();
313 	WARN_ON(1);
314 }
315 #endif	/* CONFIG_FUNCTION_GRAPH_TRACER */
316