xref: /openbmc/linux/arch/powerpc/kernel/trace/ftrace.c (revision 6355b468)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 #include <asm/sections.h>
31 
32 #define	NUM_FTRACE_TRAMPS	2
33 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
34 
ftrace_call_adjust(unsigned long addr)35 unsigned long ftrace_call_adjust(unsigned long addr)
36 {
37 	if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
38 		return 0;
39 
40 	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
41 		addr += MCOUNT_INSN_SIZE;
42 
43 	return addr;
44 }
45 
ftrace_create_branch_inst(unsigned long ip,unsigned long addr,int link)46 static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
47 {
48 	ppc_inst_t op;
49 
50 	WARN_ON(!is_offset_in_branch_range(addr - ip));
51 	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
52 
53 	return op;
54 }
55 
ftrace_read_inst(unsigned long ip,ppc_inst_t * op)56 static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op)
57 {
58 	if (copy_inst_from_kernel_nofault(op, (void *)ip)) {
59 		pr_err("0x%lx: fetching instruction failed\n", ip);
60 		return -EFAULT;
61 	}
62 
63 	return 0;
64 }
65 
ftrace_validate_inst(unsigned long ip,ppc_inst_t inst)66 static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst)
67 {
68 	ppc_inst_t op;
69 	int ret;
70 
71 	ret = ftrace_read_inst(ip, &op);
72 	if (!ret && !ppc_inst_equal(op, inst)) {
73 		pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
74 		       ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op));
75 		ret = -EINVAL;
76 	}
77 
78 	return ret;
79 }
80 
ftrace_modify_code(unsigned long ip,ppc_inst_t old,ppc_inst_t new)81 static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
82 {
83 	int ret = ftrace_validate_inst(ip, old);
84 
85 	if (!ret)
86 		ret = patch_instruction((u32 *)ip, new);
87 
88 	return ret;
89 }
90 
is_bl_op(ppc_inst_t op)91 static int is_bl_op(ppc_inst_t op)
92 {
93 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
94 }
95 
find_ftrace_tramp(unsigned long ip)96 static unsigned long find_ftrace_tramp(unsigned long ip)
97 {
98 	int i;
99 
100 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
101 		if (!ftrace_tramps[i])
102 			continue;
103 		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
104 			return ftrace_tramps[i];
105 
106 	return 0;
107 }
108 
ftrace_get_call_inst(struct dyn_ftrace * rec,unsigned long addr,ppc_inst_t * call_inst)109 static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
110 {
111 	unsigned long ip = rec->ip;
112 	unsigned long stub;
113 
114 	if (is_offset_in_branch_range(addr - ip)) {
115 		/* Within range */
116 		stub = addr;
117 #ifdef CONFIG_MODULES
118 	} else if (rec->arch.mod) {
119 		/* Module code would be going to one of the module stubs */
120 		stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp :
121 							       rec->arch.mod->arch.tramp_regs);
122 #endif
123 	} else if (core_kernel_text(ip)) {
124 		/* We would be branching to one of our ftrace stubs */
125 		stub = find_ftrace_tramp(ip);
126 		if (!stub) {
127 			pr_err("0x%lx: No ftrace stubs reachable\n", ip);
128 			return -EINVAL;
129 		}
130 	} else {
131 		return -EINVAL;
132 	}
133 
134 	*call_inst = ftrace_create_branch_inst(ip, stub, 1);
135 	return 0;
136 }
137 
138 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)139 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
140 {
141 	/* This should never be called since we override ftrace_replace_code() */
142 	WARN_ON(1);
143 	return -EINVAL;
144 }
145 #endif
146 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)147 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
148 {
149 	ppc_inst_t old, new;
150 	int ret;
151 
152 	/* This can only ever be called during module load */
153 	if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip)))
154 		return -EINVAL;
155 
156 	old = ppc_inst(PPC_RAW_NOP());
157 	ret = ftrace_get_call_inst(rec, addr, &new);
158 	if (ret)
159 		return ret;
160 
161 	return ftrace_modify_code(rec->ip, old, new);
162 }
163 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)164 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
165 {
166 	/*
167 	 * This should never be called since we override ftrace_replace_code(),
168 	 * as well as ftrace_init_nop()
169 	 */
170 	WARN_ON(1);
171 	return -EINVAL;
172 }
173 
ftrace_replace_code(int enable)174 void ftrace_replace_code(int enable)
175 {
176 	ppc_inst_t old, new, call_inst, new_call_inst;
177 	ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP());
178 	unsigned long ip, new_addr, addr;
179 	struct ftrace_rec_iter *iter;
180 	struct dyn_ftrace *rec;
181 	int ret = 0, update;
182 
183 	for_ftrace_rec_iter(iter) {
184 		rec = ftrace_rec_iter_record(iter);
185 		ip = rec->ip;
186 
187 		if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED))
188 			continue;
189 
190 		addr = ftrace_get_addr_curr(rec);
191 		new_addr = ftrace_get_addr_new(rec);
192 		update = ftrace_update_record(rec, enable);
193 
194 		switch (update) {
195 		case FTRACE_UPDATE_IGNORE:
196 		default:
197 			continue;
198 		case FTRACE_UPDATE_MODIFY_CALL:
199 			ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
200 			ret |= ftrace_get_call_inst(rec, addr, &call_inst);
201 			old = call_inst;
202 			new = new_call_inst;
203 			break;
204 		case FTRACE_UPDATE_MAKE_NOP:
205 			ret = ftrace_get_call_inst(rec, addr, &call_inst);
206 			old = call_inst;
207 			new = nop_inst;
208 			break;
209 		case FTRACE_UPDATE_MAKE_CALL:
210 			ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
211 			old = nop_inst;
212 			new = call_inst;
213 			break;
214 		}
215 
216 		if (!ret)
217 			ret = ftrace_modify_code(ip, old, new);
218 		if (ret)
219 			goto out;
220 	}
221 
222 out:
223 	if (ret)
224 		ftrace_bug(ret, rec);
225 	return;
226 }
227 
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)228 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
229 {
230 	unsigned long addr, ip = rec->ip;
231 	ppc_inst_t old, new;
232 	int ret = 0;
233 
234 	/* Verify instructions surrounding the ftrace location */
235 	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
236 		/* Expect nops */
237 		ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
238 		if (!ret)
239 			ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP()));
240 	} else if (IS_ENABLED(CONFIG_PPC32)) {
241 		/* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
242 		ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
243 		if (!ret)
244 			ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)));
245 	} else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
246 		/* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
247 		ret = ftrace_read_inst(ip - 4, &old);
248 		if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) {
249 			ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
250 			ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)));
251 		}
252 	} else {
253 		return -EINVAL;
254 	}
255 
256 	if (ret)
257 		return ret;
258 
259 	if (!core_kernel_text(ip)) {
260 		if (!mod) {
261 			pr_err("0x%lx: No module provided for non-kernel address\n", ip);
262 			return -EFAULT;
263 		}
264 		rec->arch.mod = mod;
265 	}
266 
267 	/* Nop-out the ftrace location */
268 	new = ppc_inst(PPC_RAW_NOP());
269 	addr = MCOUNT_ADDR;
270 	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
271 		/* we instead patch-in the 'mflr r0' */
272 		old = ppc_inst(PPC_RAW_NOP());
273 		new = ppc_inst(PPC_RAW_MFLR(_R0));
274 		ret = ftrace_modify_code(ip - 4, old, new);
275 	} else if (is_offset_in_branch_range(addr - ip)) {
276 		/* Within range */
277 		old = ftrace_create_branch_inst(ip, addr, 1);
278 		ret = ftrace_modify_code(ip, old, new);
279 	} else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) {
280 		/*
281 		 * We would be branching to a linker-generated stub, or to the module _mcount
282 		 * stub. Let's just confirm we have a 'bl' here.
283 		 */
284 		ret = ftrace_read_inst(ip, &old);
285 		if (ret)
286 			return ret;
287 		if (!is_bl_op(old)) {
288 			pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old));
289 			return -EINVAL;
290 		}
291 		ret = patch_instruction((u32 *)ip, new);
292 	} else {
293 		return -EINVAL;
294 	}
295 
296 	return ret;
297 }
298 
ftrace_update_ftrace_func(ftrace_func_t func)299 int ftrace_update_ftrace_func(ftrace_func_t func)
300 {
301 	unsigned long ip = (unsigned long)(&ftrace_call);
302 	ppc_inst_t old, new;
303 	int ret;
304 
305 	old = ppc_inst_read((u32 *)&ftrace_call);
306 	new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
307 	ret = ftrace_modify_code(ip, old, new);
308 
309 	/* Also update the regs callback function */
310 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
311 		ip = (unsigned long)(&ftrace_regs_call);
312 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
313 		new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
314 		ret = ftrace_modify_code(ip, old, new);
315 	}
316 
317 	return ret;
318 }
319 
320 /*
321  * Use the default ftrace_modify_all_code, but without
322  * stop_machine().
323  */
arch_ftrace_update_code(int command)324 void arch_ftrace_update_code(int command)
325 {
326 	ftrace_modify_all_code(command);
327 }
328 
ftrace_free_init_tramp(void)329 void ftrace_free_init_tramp(void)
330 {
331 	int i;
332 
333 	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
334 		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
335 			ftrace_tramps[i] = 0;
336 			return;
337 		}
338 }
339 
add_ftrace_tramp(unsigned long tramp)340 static void __init add_ftrace_tramp(unsigned long tramp)
341 {
342 	int i;
343 
344 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
345 		if (!ftrace_tramps[i]) {
346 			ftrace_tramps[i] = tramp;
347 			return;
348 		}
349 }
350 
ftrace_dyn_arch_init(void)351 int __init ftrace_dyn_arch_init(void)
352 {
353 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
354 	unsigned long addr = FTRACE_REGS_ADDR;
355 	long reladdr;
356 	int i;
357 	u32 stub_insns[] = {
358 #ifdef CONFIG_PPC_KERNEL_PCREL
359 		/* pla r12,addr */
360 		PPC_PREFIX_MLS | __PPC_PRFX_R(1),
361 		PPC_INST_PADDI | ___PPC_RT(_R12),
362 		PPC_RAW_MTCTR(_R12),
363 		PPC_RAW_BCTR()
364 #elif defined(CONFIG_PPC64)
365 		PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
366 		PPC_RAW_ADDIS(_R12, _R12, 0),
367 		PPC_RAW_ADDI(_R12, _R12, 0),
368 		PPC_RAW_MTCTR(_R12),
369 		PPC_RAW_BCTR()
370 #else
371 		PPC_RAW_LIS(_R12, 0),
372 		PPC_RAW_ADDI(_R12, _R12, 0),
373 		PPC_RAW_MTCTR(_R12),
374 		PPC_RAW_BCTR()
375 #endif
376 	};
377 
378 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
379 		for (i = 0; i < 2; i++) {
380 			reladdr = addr - (unsigned long)tramp[i];
381 
382 			if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
383 				pr_err("Address of %ps out of range of pcrel address.\n",
384 					(void *)addr);
385 				return -1;
386 			}
387 
388 			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
389 			tramp[i][0] |= IMM_H18(reladdr);
390 			tramp[i][1] |= IMM_L(reladdr);
391 			add_ftrace_tramp((unsigned long)tramp[i]);
392 		}
393 	} else if (IS_ENABLED(CONFIG_PPC64)) {
394 		reladdr = addr - kernel_toc_addr();
395 
396 		if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) {
397 			pr_err("Address of %ps out of range of kernel_toc.\n",
398 				(void *)addr);
399 			return -1;
400 		}
401 
402 		for (i = 0; i < 2; i++) {
403 			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
404 			tramp[i][1] |= PPC_HA(reladdr);
405 			tramp[i][2] |= PPC_LO(reladdr);
406 			add_ftrace_tramp((unsigned long)tramp[i]);
407 		}
408 	} else {
409 		for (i = 0; i < 2; i++) {
410 			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
411 			tramp[i][0] |= PPC_HA(addr);
412 			tramp[i][1] |= PPC_LO(addr);
413 			add_ftrace_tramp((unsigned long)tramp[i]);
414 		}
415 	}
416 
417 	return 0;
418 }
419 
420 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)421 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
422 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
423 {
424 	unsigned long sp = fregs->regs.gpr[1];
425 	int bit;
426 
427 	if (unlikely(ftrace_graph_is_dead()))
428 		goto out;
429 
430 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
431 		goto out;
432 
433 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
434 	if (bit < 0)
435 		goto out;
436 
437 	if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
438 		parent_ip = ppc_function_entry(return_to_handler);
439 
440 	ftrace_test_recursion_unlock(bit);
441 out:
442 	fregs->regs.link = parent_ip;
443 }
444 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
445