xref: /openbmc/linux/arch/arm64/kernel/ftrace.c (revision 6db6b729)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/kernel/ftrace.c
4  *
5  * Copyright (C) 2013 Linaro Limited
6  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7  */
8 
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/swab.h>
12 #include <linux/uaccess.h>
13 
14 #include <asm/cacheflush.h>
15 #include <asm/debug-monitors.h>
16 #include <asm/ftrace.h>
17 #include <asm/insn.h>
18 #include <asm/patching.h>
19 
20 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
21 struct fregs_offset {
22 	const char *name;
23 	int offset;
24 };
25 
26 #define FREGS_OFFSET(n, field)				\
27 {							\
28 	.name = n,					\
29 	.offset = offsetof(struct ftrace_regs, field),	\
30 }
31 
32 static const struct fregs_offset fregs_offsets[] = {
33 	FREGS_OFFSET("x0", regs[0]),
34 	FREGS_OFFSET("x1", regs[1]),
35 	FREGS_OFFSET("x2", regs[2]),
36 	FREGS_OFFSET("x3", regs[3]),
37 	FREGS_OFFSET("x4", regs[4]),
38 	FREGS_OFFSET("x5", regs[5]),
39 	FREGS_OFFSET("x6", regs[6]),
40 	FREGS_OFFSET("x7", regs[7]),
41 	FREGS_OFFSET("x8", regs[8]),
42 
43 	FREGS_OFFSET("x29", fp),
44 	FREGS_OFFSET("x30", lr),
45 	FREGS_OFFSET("lr", lr),
46 
47 	FREGS_OFFSET("sp", sp),
48 	FREGS_OFFSET("pc", pc),
49 };
50 
51 int ftrace_regs_query_register_offset(const char *name)
52 {
53 	for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
54 		const struct fregs_offset *roff = &fregs_offsets[i];
55 		if (!strcmp(roff->name, name))
56 			return roff->offset;
57 	}
58 
59 	return -EINVAL;
60 }
61 #endif
62 
63 unsigned long ftrace_call_adjust(unsigned long addr)
64 {
65 	/*
66 	 * When using mcount, addr is the address of the mcount call
67 	 * instruction, and no adjustment is necessary.
68 	 */
69 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
70 		return addr;
71 
72 	/*
73 	 * When using patchable-function-entry without pre-function NOPS, addr
74 	 * is the address of the first NOP after the function entry point.
75 	 *
76 	 * The compiler has either generated:
77 	 *
78 	 * addr+00:	func:	NOP		// To be patched to MOV X9, LR
79 	 * addr+04:		NOP		// To be patched to BL <caller>
80 	 *
81 	 * Or:
82 	 *
83 	 * addr-04:		BTI	C
84 	 * addr+00:	func:	NOP		// To be patched to MOV X9, LR
85 	 * addr+04:		NOP		// To be patched to BL <caller>
86 	 *
87 	 * We must adjust addr to the address of the NOP which will be patched
88 	 * to `BL <caller>`, which is at `addr + 4` bytes in either case.
89 	 *
90 	 */
91 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
92 		return addr + AARCH64_INSN_SIZE;
93 
94 	/*
95 	 * When using patchable-function-entry with pre-function NOPs, addr is
96 	 * the address of the first pre-function NOP.
97 	 *
98 	 * Starting from an 8-byte aligned base, the compiler has either
99 	 * generated:
100 	 *
101 	 * addr+00:		NOP		// Literal (first 32 bits)
102 	 * addr+04:		NOP		// Literal (last 32 bits)
103 	 * addr+08:	func:	NOP		// To be patched to MOV X9, LR
104 	 * addr+12:		NOP		// To be patched to BL <caller>
105 	 *
106 	 * Or:
107 	 *
108 	 * addr+00:		NOP		// Literal (first 32 bits)
109 	 * addr+04:		NOP		// Literal (last 32 bits)
110 	 * addr+08:	func:	BTI	C
111 	 * addr+12:		NOP		// To be patched to MOV X9, LR
112 	 * addr+16:		NOP		// To be patched to BL <caller>
113 	 *
114 	 * We must adjust addr to the address of the NOP which will be patched
115 	 * to `BL <caller>`, which is at either addr+12 or addr+16 depending on
116 	 * whether there is a BTI.
117 	 */
118 
119 	if (!IS_ALIGNED(addr, sizeof(unsigned long))) {
120 		WARN_RATELIMIT(1, "Misaligned patch-site %pS\n",
121 			       (void *)(addr + 8));
122 		return 0;
123 	}
124 
125 	/* Skip the NOPs placed before the function entry point */
126 	addr += 2 * AARCH64_INSN_SIZE;
127 
128 	/* Skip any BTI */
129 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
130 		u32 insn = le32_to_cpu(*(__le32 *)addr);
131 
132 		if (aarch64_insn_is_bti(insn)) {
133 			addr += AARCH64_INSN_SIZE;
134 		} else if (insn != aarch64_insn_gen_nop()) {
135 			WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n",
136 				       (void *)addr, insn);
137 		}
138 	}
139 
140 	/* Skip the first NOP after function entry */
141 	addr += AARCH64_INSN_SIZE;
142 
143 	return addr;
144 }
145 
146 /*
147  * Replace a single instruction, which may be a branch or NOP.
148  * If @validate == true, a replaced instruction is checked against 'old'.
149  */
150 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
151 			      bool validate)
152 {
153 	u32 replaced;
154 
155 	/*
156 	 * Note:
157 	 * We are paranoid about modifying text, as if a bug were to happen, it
158 	 * could cause us to read or write to someplace that could cause harm.
159 	 * Carefully read and modify the code with aarch64_insn_*() which uses
160 	 * probe_kernel_*(), and make sure what we read is what we expected it
161 	 * to be before modifying it.
162 	 */
163 	if (validate) {
164 		if (aarch64_insn_read((void *)pc, &replaced))
165 			return -EFAULT;
166 
167 		if (replaced != old)
168 			return -EINVAL;
169 	}
170 	if (aarch64_insn_patch_text_nosync((void *)pc, new))
171 		return -EPERM;
172 
173 	return 0;
174 }
175 
176 /*
177  * Replace tracer function in ftrace_caller()
178  */
179 int ftrace_update_ftrace_func(ftrace_func_t func)
180 {
181 	unsigned long pc;
182 	u32 new;
183 
184 	/*
185 	 * When using CALL_OPS, the function to call is associated with the
186 	 * call site, and we don't have a global function pointer to update.
187 	 */
188 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
189 		return 0;
190 
191 	pc = (unsigned long)ftrace_call;
192 	new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
193 					  AARCH64_INSN_BRANCH_LINK);
194 
195 	return ftrace_modify_code(pc, 0, new, false);
196 }
197 
198 static struct plt_entry *get_ftrace_plt(struct module *mod)
199 {
200 #ifdef CONFIG_MODULES
201 	struct plt_entry *plt = mod->arch.ftrace_trampolines;
202 
203 	return &plt[FTRACE_PLT_IDX];
204 #else
205 	return NULL;
206 #endif
207 }
208 
209 static bool reachable_by_bl(unsigned long addr, unsigned long pc)
210 {
211 	long offset = (long)addr - (long)pc;
212 
213 	return offset >= -SZ_128M && offset < SZ_128M;
214 }
215 
216 /*
217  * Find the address the callsite must branch to in order to reach '*addr'.
218  *
219  * Due to the limited range of 'BL' instructions, modules may be placed too far
220  * away to branch directly and must use a PLT.
221  *
222  * Returns true when '*addr' contains a reachable target address, or has been
223  * modified to contain a PLT address. Returns false otherwise.
224  */
225 static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
226 				      struct module *mod,
227 				      unsigned long *addr)
228 {
229 	unsigned long pc = rec->ip;
230 	struct plt_entry *plt;
231 
232 	/*
233 	 * If a custom trampoline is unreachable, rely on the ftrace_caller
234 	 * trampoline which knows how to indirectly reach that trampoline
235 	 * through ops->direct_call.
236 	 */
237 	if (*addr != FTRACE_ADDR && !reachable_by_bl(*addr, pc))
238 		*addr = FTRACE_ADDR;
239 
240 	/*
241 	 * When the target is within range of the 'BL' instruction, use 'addr'
242 	 * as-is and branch to that directly.
243 	 */
244 	if (reachable_by_bl(*addr, pc))
245 		return true;
246 
247 	/*
248 	 * When the target is outside of the range of a 'BL' instruction, we
249 	 * must use a PLT to reach it. We can only place PLTs for modules, and
250 	 * only when module PLT support is built-in.
251 	 */
252 	if (!IS_ENABLED(CONFIG_MODULES))
253 		return false;
254 
255 	/*
256 	 * 'mod' is only set at module load time, but if we end up
257 	 * dealing with an out-of-range condition, we can assume it
258 	 * is due to a module being loaded far away from the kernel.
259 	 *
260 	 * NOTE: __module_text_address() must be called with preemption
261 	 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
262 	 * retains its validity throughout the remainder of this code.
263 	 */
264 	if (!mod) {
265 		preempt_disable();
266 		mod = __module_text_address(pc);
267 		preempt_enable();
268 	}
269 
270 	if (WARN_ON(!mod))
271 		return false;
272 
273 	plt = get_ftrace_plt(mod);
274 	if (!plt) {
275 		pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
276 		return false;
277 	}
278 
279 	*addr = (unsigned long)plt;
280 	return true;
281 }
282 
283 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
284 static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec)
285 {
286 	const struct ftrace_ops *ops = NULL;
287 
288 	if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
289 		ops = ftrace_find_unique_ops(rec);
290 		WARN_ON_ONCE(!ops);
291 	}
292 
293 	if (!ops)
294 		ops = &ftrace_list_ops;
295 
296 	return ops;
297 }
298 
299 static int ftrace_rec_set_ops(const struct dyn_ftrace *rec,
300 			      const struct ftrace_ops *ops)
301 {
302 	unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
303 	return aarch64_insn_write_literal_u64((void *)literal,
304 					      (unsigned long)ops);
305 }
306 
307 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
308 {
309 	return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
310 }
311 
312 static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
313 {
314 	return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
315 }
316 #else
317 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
318 static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
319 #endif
320 
321 /*
322  * Turn on the call to ftrace_caller() in instrumented function
323  */
324 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
325 {
326 	unsigned long pc = rec->ip;
327 	u32 old, new;
328 	int ret;
329 
330 	ret = ftrace_rec_update_ops(rec);
331 	if (ret)
332 		return ret;
333 
334 	if (!ftrace_find_callable_addr(rec, NULL, &addr))
335 		return -EINVAL;
336 
337 	old = aarch64_insn_gen_nop();
338 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
339 
340 	return ftrace_modify_code(pc, old, new, true);
341 }
342 
343 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
344 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
345 		       unsigned long addr)
346 {
347 	unsigned long pc = rec->ip;
348 	u32 old, new;
349 	int ret;
350 
351 	ret = ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
352 	if (ret)
353 		return ret;
354 
355 	if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
356 		return -EINVAL;
357 	if (!ftrace_find_callable_addr(rec, NULL, &addr))
358 		return -EINVAL;
359 
360 	old = aarch64_insn_gen_branch_imm(pc, old_addr,
361 					  AARCH64_INSN_BRANCH_LINK);
362 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
363 
364 	return ftrace_modify_code(pc, old, new, true);
365 }
366 #endif
367 
368 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
369 /*
370  * The compiler has inserted two NOPs before the regular function prologue.
371  * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
372  * and x9-x18 are free for our use.
373  *
374  * At runtime we want to be able to swing a single NOP <-> BL to enable or
375  * disable the ftrace call. The BL requires us to save the original LR value,
376  * so here we insert a <MOV X9, LR> over the first NOP so the instructions
377  * before the regular prologue are:
378  *
379  * | Compiled | Disabled   | Enabled    |
380  * +----------+------------+------------+
381  * | NOP      | MOV X9, LR | MOV X9, LR |
382  * | NOP      | NOP        | BL <entry> |
383  *
384  * The LR value will be recovered by ftrace_caller, and restored into LR
385  * before returning to the regular function prologue. When a function is not
386  * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
387  *
388  * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
389  * the BL.
390  */
391 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
392 {
393 	unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
394 	u32 old, new;
395 	int ret;
396 
397 	ret = ftrace_rec_set_nop_ops(rec);
398 	if (ret)
399 		return ret;
400 
401 	old = aarch64_insn_gen_nop();
402 	new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
403 					AARCH64_INSN_REG_LR,
404 					AARCH64_INSN_VARIANT_64BIT);
405 	return ftrace_modify_code(pc, old, new, true);
406 }
407 #endif
408 
409 /*
410  * Turn off the call to ftrace_caller() in instrumented function
411  */
412 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
413 		    unsigned long addr)
414 {
415 	unsigned long pc = rec->ip;
416 	u32 old = 0, new;
417 	int ret;
418 
419 	new = aarch64_insn_gen_nop();
420 
421 	ret = ftrace_rec_set_nop_ops(rec);
422 	if (ret)
423 		return ret;
424 
425 	/*
426 	 * When using mcount, callsites in modules may have been initalized to
427 	 * call an arbitrary module PLT (which redirects to the _mcount stub)
428 	 * rather than the ftrace PLT we'll use at runtime (which redirects to
429 	 * the ftrace trampoline). We can ignore the old PLT when initializing
430 	 * the callsite.
431 	 *
432 	 * Note: 'mod' is only set at module load time.
433 	 */
434 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod)
435 		return aarch64_insn_patch_text_nosync((void *)pc, new);
436 
437 	if (!ftrace_find_callable_addr(rec, mod, &addr))
438 		return -EINVAL;
439 
440 	old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
441 
442 	return ftrace_modify_code(pc, old, new, true);
443 }
444 
445 void arch_ftrace_update_code(int command)
446 {
447 	command |= FTRACE_MAY_SLEEP;
448 	ftrace_modify_all_code(command);
449 }
450 
451 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
452 /*
453  * function_graph tracer expects ftrace_return_to_handler() to be called
454  * on the way back to parent. For this purpose, this function is called
455  * in _mcount() or ftrace_caller() to replace return address (*parent) on
456  * the call stack to return_to_handler.
457  */
458 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
459 			   unsigned long frame_pointer)
460 {
461 	unsigned long return_hooker = (unsigned long)&return_to_handler;
462 	unsigned long old;
463 
464 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
465 		return;
466 
467 	/*
468 	 * Note:
469 	 * No protection against faulting at *parent, which may be seen
470 	 * on other archs. It's unlikely on AArch64.
471 	 */
472 	old = *parent;
473 
474 	if (!function_graph_enter(old, self_addr, frame_pointer,
475 	    (void *)frame_pointer)) {
476 		*parent = return_hooker;
477 	}
478 }
479 
480 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
481 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
482 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
483 {
484 	prepare_ftrace_return(ip, &fregs->lr, fregs->fp);
485 }
486 #else
487 /*
488  * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
489  * depending on @enable.
490  */
491 static int ftrace_modify_graph_caller(bool enable)
492 {
493 	unsigned long pc = (unsigned long)&ftrace_graph_call;
494 	u32 branch, nop;
495 
496 	branch = aarch64_insn_gen_branch_imm(pc,
497 					     (unsigned long)ftrace_graph_caller,
498 					     AARCH64_INSN_BRANCH_NOLINK);
499 	nop = aarch64_insn_gen_nop();
500 
501 	if (enable)
502 		return ftrace_modify_code(pc, nop, branch, true);
503 	else
504 		return ftrace_modify_code(pc, branch, nop, true);
505 }
506 
507 int ftrace_enable_ftrace_graph_caller(void)
508 {
509 	return ftrace_modify_graph_caller(true);
510 }
511 
512 int ftrace_disable_ftrace_graph_caller(void)
513 {
514 	return ftrace_modify_graph_caller(false);
515 }
516 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
517 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
518