xref: /openbmc/linux/arch/arm64/kernel/ftrace.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2819e50e2SAKASHI Takahiro /*
3819e50e2SAKASHI Takahiro  * arch/arm64/kernel/ftrace.c
4819e50e2SAKASHI Takahiro  *
5819e50e2SAKASHI Takahiro  * Copyright (C) 2013 Linaro Limited
6819e50e2SAKASHI Takahiro  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7819e50e2SAKASHI Takahiro  */
8819e50e2SAKASHI Takahiro 
9819e50e2SAKASHI Takahiro #include <linux/ftrace.h>
10e71a4e1bSArd Biesheuvel #include <linux/module.h>
11819e50e2SAKASHI Takahiro #include <linux/swab.h>
12819e50e2SAKASHI Takahiro #include <linux/uaccess.h>
13819e50e2SAKASHI Takahiro 
14819e50e2SAKASHI Takahiro #include <asm/cacheflush.h>
15e71a4e1bSArd Biesheuvel #include <asm/debug-monitors.h>
16819e50e2SAKASHI Takahiro #include <asm/ftrace.h>
17819e50e2SAKASHI Takahiro #include <asm/insn.h>
1878b92c73SMark Rutland #include <asm/patching.h>
19819e50e2SAKASHI Takahiro 
2026299b3fSMark Rutland #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
2126299b3fSMark Rutland struct fregs_offset {
2226299b3fSMark Rutland 	const char *name;
2326299b3fSMark Rutland 	int offset;
2426299b3fSMark Rutland };
2526299b3fSMark Rutland 
2626299b3fSMark Rutland #define FREGS_OFFSET(n, field)				\
2726299b3fSMark Rutland {							\
2826299b3fSMark Rutland 	.name = n,					\
2926299b3fSMark Rutland 	.offset = offsetof(struct ftrace_regs, field),	\
3026299b3fSMark Rutland }
3126299b3fSMark Rutland 
3226299b3fSMark Rutland static const struct fregs_offset fregs_offsets[] = {
3326299b3fSMark Rutland 	FREGS_OFFSET("x0", regs[0]),
3426299b3fSMark Rutland 	FREGS_OFFSET("x1", regs[1]),
3526299b3fSMark Rutland 	FREGS_OFFSET("x2", regs[2]),
3626299b3fSMark Rutland 	FREGS_OFFSET("x3", regs[3]),
3726299b3fSMark Rutland 	FREGS_OFFSET("x4", regs[4]),
3826299b3fSMark Rutland 	FREGS_OFFSET("x5", regs[5]),
3926299b3fSMark Rutland 	FREGS_OFFSET("x6", regs[6]),
4026299b3fSMark Rutland 	FREGS_OFFSET("x7", regs[7]),
4126299b3fSMark Rutland 	FREGS_OFFSET("x8", regs[8]),
4226299b3fSMark Rutland 
4326299b3fSMark Rutland 	FREGS_OFFSET("x29", fp),
4426299b3fSMark Rutland 	FREGS_OFFSET("x30", lr),
4526299b3fSMark Rutland 	FREGS_OFFSET("lr", lr),
4626299b3fSMark Rutland 
4726299b3fSMark Rutland 	FREGS_OFFSET("sp", sp),
4826299b3fSMark Rutland 	FREGS_OFFSET("pc", pc),
4926299b3fSMark Rutland };
5026299b3fSMark Rutland 
ftrace_regs_query_register_offset(const char * name)5126299b3fSMark Rutland int ftrace_regs_query_register_offset(const char *name)
5226299b3fSMark Rutland {
5326299b3fSMark Rutland 	for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
5426299b3fSMark Rutland 		const struct fregs_offset *roff = &fregs_offsets[i];
5526299b3fSMark Rutland 		if (!strcmp(roff->name, name))
5626299b3fSMark Rutland 			return roff->offset;
5726299b3fSMark Rutland 	}
5826299b3fSMark Rutland 
5926299b3fSMark Rutland 	return -EINVAL;
6026299b3fSMark Rutland }
6126299b3fSMark Rutland #endif
6226299b3fSMark Rutland 
ftrace_call_adjust(unsigned long addr)63baaf553dSMark Rutland unsigned long ftrace_call_adjust(unsigned long addr)
64baaf553dSMark Rutland {
65baaf553dSMark Rutland 	/*
66baaf553dSMark Rutland 	 * When using mcount, addr is the address of the mcount call
67baaf553dSMark Rutland 	 * instruction, and no adjustment is necessary.
68baaf553dSMark Rutland 	 */
69baaf553dSMark Rutland 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
70baaf553dSMark Rutland 		return addr;
71baaf553dSMark Rutland 
72baaf553dSMark Rutland 	/*
73baaf553dSMark Rutland 	 * When using patchable-function-entry without pre-function NOPS, addr
74baaf553dSMark Rutland 	 * is the address of the first NOP after the function entry point.
75baaf553dSMark Rutland 	 *
76baaf553dSMark Rutland 	 * The compiler has either generated:
77baaf553dSMark Rutland 	 *
78baaf553dSMark Rutland 	 * addr+00:	func:	NOP		// To be patched to MOV X9, LR
79baaf553dSMark Rutland 	 * addr+04:		NOP		// To be patched to BL <caller>
80baaf553dSMark Rutland 	 *
81baaf553dSMark Rutland 	 * Or:
82baaf553dSMark Rutland 	 *
83baaf553dSMark Rutland 	 * addr-04:		BTI	C
84baaf553dSMark Rutland 	 * addr+00:	func:	NOP		// To be patched to MOV X9, LR
85baaf553dSMark Rutland 	 * addr+04:		NOP		// To be patched to BL <caller>
86baaf553dSMark Rutland 	 *
87baaf553dSMark Rutland 	 * We must adjust addr to the address of the NOP which will be patched
88baaf553dSMark Rutland 	 * to `BL <caller>`, which is at `addr + 4` bytes in either case.
89baaf553dSMark Rutland 	 *
90baaf553dSMark Rutland 	 */
91baaf553dSMark Rutland 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
92baaf553dSMark Rutland 		return addr + AARCH64_INSN_SIZE;
93baaf553dSMark Rutland 
94baaf553dSMark Rutland 	/*
95baaf553dSMark Rutland 	 * When using patchable-function-entry with pre-function NOPs, addr is
96baaf553dSMark Rutland 	 * the address of the first pre-function NOP.
97baaf553dSMark Rutland 	 *
98baaf553dSMark Rutland 	 * Starting from an 8-byte aligned base, the compiler has either
99baaf553dSMark Rutland 	 * generated:
100baaf553dSMark Rutland 	 *
101baaf553dSMark Rutland 	 * addr+00:		NOP		// Literal (first 32 bits)
102baaf553dSMark Rutland 	 * addr+04:		NOP		// Literal (last 32 bits)
103baaf553dSMark Rutland 	 * addr+08:	func:	NOP		// To be patched to MOV X9, LR
104baaf553dSMark Rutland 	 * addr+12:		NOP		// To be patched to BL <caller>
105baaf553dSMark Rutland 	 *
106baaf553dSMark Rutland 	 * Or:
107baaf553dSMark Rutland 	 *
108baaf553dSMark Rutland 	 * addr+00:		NOP		// Literal (first 32 bits)
109baaf553dSMark Rutland 	 * addr+04:		NOP		// Literal (last 32 bits)
110baaf553dSMark Rutland 	 * addr+08:	func:	BTI	C
111baaf553dSMark Rutland 	 * addr+12:		NOP		// To be patched to MOV X9, LR
112baaf553dSMark Rutland 	 * addr+16:		NOP		// To be patched to BL <caller>
113baaf553dSMark Rutland 	 *
114baaf553dSMark Rutland 	 * We must adjust addr to the address of the NOP which will be patched
115baaf553dSMark Rutland 	 * to `BL <caller>`, which is at either addr+12 or addr+16 depending on
116baaf553dSMark Rutland 	 * whether there is a BTI.
117baaf553dSMark Rutland 	 */
118baaf553dSMark Rutland 
119baaf553dSMark Rutland 	if (!IS_ALIGNED(addr, sizeof(unsigned long))) {
120baaf553dSMark Rutland 		WARN_RATELIMIT(1, "Misaligned patch-site %pS\n",
121baaf553dSMark Rutland 			       (void *)(addr + 8));
122baaf553dSMark Rutland 		return 0;
123baaf553dSMark Rutland 	}
124baaf553dSMark Rutland 
125baaf553dSMark Rutland 	/* Skip the NOPs placed before the function entry point */
126baaf553dSMark Rutland 	addr += 2 * AARCH64_INSN_SIZE;
127baaf553dSMark Rutland 
128baaf553dSMark Rutland 	/* Skip any BTI */
129baaf553dSMark Rutland 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
130baaf553dSMark Rutland 		u32 insn = le32_to_cpu(*(__le32 *)addr);
131baaf553dSMark Rutland 
132baaf553dSMark Rutland 		if (aarch64_insn_is_bti(insn)) {
133baaf553dSMark Rutland 			addr += AARCH64_INSN_SIZE;
134baaf553dSMark Rutland 		} else if (insn != aarch64_insn_gen_nop()) {
135baaf553dSMark Rutland 			WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n",
136baaf553dSMark Rutland 				       (void *)addr, insn);
137baaf553dSMark Rutland 		}
138baaf553dSMark Rutland 	}
139baaf553dSMark Rutland 
140baaf553dSMark Rutland 	/* Skip the first NOP after function entry */
141baaf553dSMark Rutland 	addr += AARCH64_INSN_SIZE;
142baaf553dSMark Rutland 
143baaf553dSMark Rutland 	return addr;
144baaf553dSMark Rutland }
145baaf553dSMark Rutland 
146bd7d38dbSAKASHI Takahiro /*
147bd7d38dbSAKASHI Takahiro  * Replace a single instruction, which may be a branch or NOP.
148bd7d38dbSAKASHI Takahiro  * If @validate == true, a replaced instruction is checked against 'old'.
149bd7d38dbSAKASHI Takahiro  */
ftrace_modify_code(unsigned long pc,u32 old,u32 new,bool validate)150bd7d38dbSAKASHI Takahiro static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
151bd7d38dbSAKASHI Takahiro 			      bool validate)
152bd7d38dbSAKASHI Takahiro {
153bd7d38dbSAKASHI Takahiro 	u32 replaced;
154bd7d38dbSAKASHI Takahiro 
155bd7d38dbSAKASHI Takahiro 	/*
156bd7d38dbSAKASHI Takahiro 	 * Note:
157004ab584SLi Bin 	 * We are paranoid about modifying text, as if a bug were to happen, it
158004ab584SLi Bin 	 * could cause us to read or write to someplace that could cause harm.
159004ab584SLi Bin 	 * Carefully read and modify the code with aarch64_insn_*() which uses
160004ab584SLi Bin 	 * probe_kernel_*(), and make sure what we read is what we expected it
161004ab584SLi Bin 	 * to be before modifying it.
162bd7d38dbSAKASHI Takahiro 	 */
163bd7d38dbSAKASHI Takahiro 	if (validate) {
164bd7d38dbSAKASHI Takahiro 		if (aarch64_insn_read((void *)pc, &replaced))
165bd7d38dbSAKASHI Takahiro 			return -EFAULT;
166bd7d38dbSAKASHI Takahiro 
167bd7d38dbSAKASHI Takahiro 		if (replaced != old)
168bd7d38dbSAKASHI Takahiro 			return -EINVAL;
169bd7d38dbSAKASHI Takahiro 	}
170bd7d38dbSAKASHI Takahiro 	if (aarch64_insn_patch_text_nosync((void *)pc, new))
171bd7d38dbSAKASHI Takahiro 		return -EPERM;
172bd7d38dbSAKASHI Takahiro 
173bd7d38dbSAKASHI Takahiro 	return 0;
174bd7d38dbSAKASHI Takahiro }
175bd7d38dbSAKASHI Takahiro 
176bd7d38dbSAKASHI Takahiro /*
177bd7d38dbSAKASHI Takahiro  * Replace tracer function in ftrace_caller()
178bd7d38dbSAKASHI Takahiro  */
ftrace_update_ftrace_func(ftrace_func_t func)179bd7d38dbSAKASHI Takahiro int ftrace_update_ftrace_func(ftrace_func_t func)
180bd7d38dbSAKASHI Takahiro {
181bd7d38dbSAKASHI Takahiro 	unsigned long pc;
182bd7d38dbSAKASHI Takahiro 	u32 new;
183bd7d38dbSAKASHI Takahiro 
184baaf553dSMark Rutland 	/*
185baaf553dSMark Rutland 	 * When using CALL_OPS, the function to call is associated with the
186baaf553dSMark Rutland 	 * call site, and we don't have a global function pointer to update.
187baaf553dSMark Rutland 	 */
188baaf553dSMark Rutland 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
189baaf553dSMark Rutland 		return 0;
190baaf553dSMark Rutland 
191607289a7SSami Tolvanen 	pc = (unsigned long)ftrace_call;
1929f1ae759SCatalin Marinas 	new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
1939f1ae759SCatalin Marinas 					  AARCH64_INSN_BRANCH_LINK);
194bd7d38dbSAKASHI Takahiro 
195bd7d38dbSAKASHI Takahiro 	return ftrace_modify_code(pc, 0, new, false);
196bd7d38dbSAKASHI Takahiro }
197bd7d38dbSAKASHI Takahiro 
get_ftrace_plt(struct module * mod)198*0f59dca6SFlorent Revest static struct plt_entry *get_ftrace_plt(struct module *mod)
1993b23e499STorsten Duwe {
2007f08ae53SMark Rutland #ifdef CONFIG_MODULES
2013b23e499STorsten Duwe 	struct plt_entry *plt = mod->arch.ftrace_trampolines;
2023b23e499STorsten Duwe 
2033b23e499STorsten Duwe 	return &plt[FTRACE_PLT_IDX];
204*0f59dca6SFlorent Revest #else
2053b23e499STorsten Duwe 	return NULL;
206*0f59dca6SFlorent Revest #endif
2073b23e499STorsten Duwe }
2083b23e499STorsten Duwe 
reachable_by_bl(unsigned long addr,unsigned long pc)2092aa6ac03SFlorent Revest static bool reachable_by_bl(unsigned long addr, unsigned long pc)
2102aa6ac03SFlorent Revest {
2112aa6ac03SFlorent Revest 	long offset = (long)addr - (long)pc;
2122aa6ac03SFlorent Revest 
2132aa6ac03SFlorent Revest 	return offset >= -SZ_128M && offset < SZ_128M;
2142aa6ac03SFlorent Revest }
2152aa6ac03SFlorent Revest 
216bd7d38dbSAKASHI Takahiro /*
217a6253579SMark Rutland  * Find the address the callsite must branch to in order to reach '*addr'.
218a6253579SMark Rutland  *
219a6253579SMark Rutland  * Due to the limited range of 'BL' instructions, modules may be placed too far
220a6253579SMark Rutland  * away to branch directly and must use a PLT.
221a6253579SMark Rutland  *
222a6253579SMark Rutland  * Returns true when '*addr' contains a reachable target address, or has been
223a6253579SMark Rutland  * modified to contain a PLT address. Returns false otherwise.
224a6253579SMark Rutland  */
ftrace_find_callable_addr(struct dyn_ftrace * rec,struct module * mod,unsigned long * addr)225a6253579SMark Rutland static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
226a6253579SMark Rutland 				      struct module *mod,
227a6253579SMark Rutland 				      unsigned long *addr)
228a6253579SMark Rutland {
229a6253579SMark Rutland 	unsigned long pc = rec->ip;
230a6253579SMark Rutland 	struct plt_entry *plt;
231a6253579SMark Rutland 
232a6253579SMark Rutland 	/*
2332aa6ac03SFlorent Revest 	 * If a custom trampoline is unreachable, rely on the ftrace_caller
2342aa6ac03SFlorent Revest 	 * trampoline which knows how to indirectly reach that trampoline
2352aa6ac03SFlorent Revest 	 * through ops->direct_call.
2362aa6ac03SFlorent Revest 	 */
2372aa6ac03SFlorent Revest 	if (*addr != FTRACE_ADDR && !reachable_by_bl(*addr, pc))
2382aa6ac03SFlorent Revest 		*addr = FTRACE_ADDR;
2392aa6ac03SFlorent Revest 
2402aa6ac03SFlorent Revest 	/*
241a6253579SMark Rutland 	 * When the target is within range of the 'BL' instruction, use 'addr'
242a6253579SMark Rutland 	 * as-is and branch to that directly.
243a6253579SMark Rutland 	 */
2442aa6ac03SFlorent Revest 	if (reachable_by_bl(*addr, pc))
245a6253579SMark Rutland 		return true;
246a6253579SMark Rutland 
247a6253579SMark Rutland 	/*
248a6253579SMark Rutland 	 * When the target is outside of the range of a 'BL' instruction, we
249a6253579SMark Rutland 	 * must use a PLT to reach it. We can only place PLTs for modules, and
250a6253579SMark Rutland 	 * only when module PLT support is built-in.
251a6253579SMark Rutland 	 */
252a6253579SMark Rutland 	if (!IS_ENABLED(CONFIG_MODULES))
253a6253579SMark Rutland 		return false;
254a6253579SMark Rutland 
255a6253579SMark Rutland 	/*
256a6253579SMark Rutland 	 * 'mod' is only set at module load time, but if we end up
257a6253579SMark Rutland 	 * dealing with an out-of-range condition, we can assume it
258a6253579SMark Rutland 	 * is due to a module being loaded far away from the kernel.
259a6253579SMark Rutland 	 *
260a6253579SMark Rutland 	 * NOTE: __module_text_address() must be called with preemption
261a6253579SMark Rutland 	 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
262a6253579SMark Rutland 	 * retains its validity throughout the remainder of this code.
263a6253579SMark Rutland 	 */
264a6253579SMark Rutland 	if (!mod) {
265a6253579SMark Rutland 		preempt_disable();
266a6253579SMark Rutland 		mod = __module_text_address(pc);
267a6253579SMark Rutland 		preempt_enable();
268a6253579SMark Rutland 	}
269a6253579SMark Rutland 
270a6253579SMark Rutland 	if (WARN_ON(!mod))
271a6253579SMark Rutland 		return false;
272a6253579SMark Rutland 
273*0f59dca6SFlorent Revest 	plt = get_ftrace_plt(mod);
274a6253579SMark Rutland 	if (!plt) {
275a6253579SMark Rutland 		pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
276a6253579SMark Rutland 		return false;
277a6253579SMark Rutland 	}
278a6253579SMark Rutland 
279a6253579SMark Rutland 	*addr = (unsigned long)plt;
280a6253579SMark Rutland 	return true;
281a6253579SMark Rutland }
282a6253579SMark Rutland 
283baaf553dSMark Rutland #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
arm64_rec_get_ops(struct dyn_ftrace * rec)284baaf553dSMark Rutland static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec)
285baaf553dSMark Rutland {
286baaf553dSMark Rutland 	const struct ftrace_ops *ops = NULL;
287baaf553dSMark Rutland 
288baaf553dSMark Rutland 	if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
289baaf553dSMark Rutland 		ops = ftrace_find_unique_ops(rec);
290baaf553dSMark Rutland 		WARN_ON_ONCE(!ops);
291baaf553dSMark Rutland 	}
292baaf553dSMark Rutland 
293baaf553dSMark Rutland 	if (!ops)
294baaf553dSMark Rutland 		ops = &ftrace_list_ops;
295baaf553dSMark Rutland 
296baaf553dSMark Rutland 	return ops;
297baaf553dSMark Rutland }
298baaf553dSMark Rutland 
ftrace_rec_set_ops(const struct dyn_ftrace * rec,const struct ftrace_ops * ops)299baaf553dSMark Rutland static int ftrace_rec_set_ops(const struct dyn_ftrace *rec,
300baaf553dSMark Rutland 			      const struct ftrace_ops *ops)
301baaf553dSMark Rutland {
302baaf553dSMark Rutland 	unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
303baaf553dSMark Rutland 	return aarch64_insn_write_literal_u64((void *)literal,
304baaf553dSMark Rutland 					      (unsigned long)ops);
305baaf553dSMark Rutland }
306baaf553dSMark Rutland 
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)307baaf553dSMark Rutland static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
308baaf553dSMark Rutland {
309baaf553dSMark Rutland 	return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
310baaf553dSMark Rutland }
311baaf553dSMark Rutland 
ftrace_rec_update_ops(struct dyn_ftrace * rec)312baaf553dSMark Rutland static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
313baaf553dSMark Rutland {
314baaf553dSMark Rutland 	return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
315baaf553dSMark Rutland }
316baaf553dSMark Rutland #else
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)317baaf553dSMark Rutland static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
ftrace_rec_update_ops(struct dyn_ftrace * rec)318baaf553dSMark Rutland static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
319baaf553dSMark Rutland #endif
320baaf553dSMark Rutland 
321a6253579SMark Rutland /*
322bd7d38dbSAKASHI Takahiro  * Turn on the call to ftrace_caller() in instrumented function
323bd7d38dbSAKASHI Takahiro  */
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)324bd7d38dbSAKASHI Takahiro int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
325bd7d38dbSAKASHI Takahiro {
326bd7d38dbSAKASHI Takahiro 	unsigned long pc = rec->ip;
327bd7d38dbSAKASHI Takahiro 	u32 old, new;
328baaf553dSMark Rutland 	int ret;
329baaf553dSMark Rutland 
330baaf553dSMark Rutland 	ret = ftrace_rec_update_ops(rec);
331baaf553dSMark Rutland 	if (ret)
332baaf553dSMark Rutland 		return ret;
33368764420SWill Deacon 
334a6253579SMark Rutland 	if (!ftrace_find_callable_addr(rec, NULL, &addr))
3357f08ae53SMark Rutland 		return -EINVAL;
3367f08ae53SMark Rutland 
337bd7d38dbSAKASHI Takahiro 	old = aarch64_insn_gen_nop();
3389f1ae759SCatalin Marinas 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
339bd7d38dbSAKASHI Takahiro 
340bd7d38dbSAKASHI Takahiro 	return ftrace_modify_code(pc, old, new, true);
341bd7d38dbSAKASHI Takahiro }
342bd7d38dbSAKASHI Takahiro 
343baaf553dSMark Rutland #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)344baaf553dSMark Rutland int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
345baaf553dSMark Rutland 		       unsigned long addr)
346baaf553dSMark Rutland {
3472aa6ac03SFlorent Revest 	unsigned long pc = rec->ip;
3482aa6ac03SFlorent Revest 	u32 old, new;
3492aa6ac03SFlorent Revest 	int ret;
3502aa6ac03SFlorent Revest 
3512aa6ac03SFlorent Revest 	ret = ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
3522aa6ac03SFlorent Revest 	if (ret)
3532aa6ac03SFlorent Revest 		return ret;
3542aa6ac03SFlorent Revest 
3552aa6ac03SFlorent Revest 	if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
356baaf553dSMark Rutland 		return -EINVAL;
3572aa6ac03SFlorent Revest 	if (!ftrace_find_callable_addr(rec, NULL, &addr))
358baaf553dSMark Rutland 		return -EINVAL;
359baaf553dSMark Rutland 
3602aa6ac03SFlorent Revest 	old = aarch64_insn_gen_branch_imm(pc, old_addr,
3612aa6ac03SFlorent Revest 					  AARCH64_INSN_BRANCH_LINK);
3622aa6ac03SFlorent Revest 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
3632aa6ac03SFlorent Revest 
3642aa6ac03SFlorent Revest 	return ftrace_modify_code(pc, old, new, true);
365baaf553dSMark Rutland }
366baaf553dSMark Rutland #endif
367baaf553dSMark Rutland 
36826299b3fSMark Rutland #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
3693b23e499STorsten Duwe /*
3703b23e499STorsten Duwe  * The compiler has inserted two NOPs before the regular function prologue.
3713b23e499STorsten Duwe  * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
3723b23e499STorsten Duwe  * and x9-x18 are free for our use.
3733b23e499STorsten Duwe  *
3743b23e499STorsten Duwe  * At runtime we want to be able to swing a single NOP <-> BL to enable or
3753b23e499STorsten Duwe  * disable the ftrace call. The BL requires us to save the original LR value,
3763b23e499STorsten Duwe  * so here we insert a <MOV X9, LR> over the first NOP so the instructions
3773b23e499STorsten Duwe  * before the regular prologue are:
3783b23e499STorsten Duwe  *
3793b23e499STorsten Duwe  * | Compiled | Disabled   | Enabled    |
3803b23e499STorsten Duwe  * +----------+------------+------------+
3813b23e499STorsten Duwe  * | NOP      | MOV X9, LR | MOV X9, LR |
3823b23e499STorsten Duwe  * | NOP      | NOP        | BL <entry> |
3833b23e499STorsten Duwe  *
38490955d77SMark Rutland  * The LR value will be recovered by ftrace_caller, and restored into LR
3853b23e499STorsten Duwe  * before returning to the regular function prologue. When a function is not
3863b23e499STorsten Duwe  * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
3873b23e499STorsten Duwe  *
3883b23e499STorsten Duwe  * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
3893b23e499STorsten Duwe  * the BL.
3903b23e499STorsten Duwe  */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)3913b23e499STorsten Duwe int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
3923b23e499STorsten Duwe {
3933b23e499STorsten Duwe 	unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
3943b23e499STorsten Duwe 	u32 old, new;
395baaf553dSMark Rutland 	int ret;
396baaf553dSMark Rutland 
397baaf553dSMark Rutland 	ret = ftrace_rec_set_nop_ops(rec);
398baaf553dSMark Rutland 	if (ret)
399baaf553dSMark Rutland 		return ret;
4003b23e499STorsten Duwe 
4013b23e499STorsten Duwe 	old = aarch64_insn_gen_nop();
4023b23e499STorsten Duwe 	new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
4033b23e499STorsten Duwe 					AARCH64_INSN_REG_LR,
4043b23e499STorsten Duwe 					AARCH64_INSN_VARIANT_64BIT);
4053b23e499STorsten Duwe 	return ftrace_modify_code(pc, old, new, true);
4063b23e499STorsten Duwe }
4073b23e499STorsten Duwe #endif
4083b23e499STorsten Duwe 
409bd7d38dbSAKASHI Takahiro /*
410bd7d38dbSAKASHI Takahiro  * Turn off the call to ftrace_caller() in instrumented function
411bd7d38dbSAKASHI Takahiro  */
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)412bd7d38dbSAKASHI Takahiro int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
413bd7d38dbSAKASHI Takahiro 		    unsigned long addr)
414bd7d38dbSAKASHI Takahiro {
415bd7d38dbSAKASHI Takahiro 	unsigned long pc = rec->ip;
416f8af0b36SArd Biesheuvel 	u32 old = 0, new;
417baaf553dSMark Rutland 	int ret;
41868764420SWill Deacon 
4198cfb0857SMark Rutland 	new = aarch64_insn_gen_nop();
4208cfb0857SMark Rutland 
421baaf553dSMark Rutland 	ret = ftrace_rec_set_nop_ops(rec);
422baaf553dSMark Rutland 	if (ret)
423baaf553dSMark Rutland 		return ret;
424baaf553dSMark Rutland 
4258cfb0857SMark Rutland 	/*
4268cfb0857SMark Rutland 	 * When using mcount, callsites in modules may have been initalized to
4278cfb0857SMark Rutland 	 * call an arbitrary module PLT (which redirects to the _mcount stub)
4288cfb0857SMark Rutland 	 * rather than the ftrace PLT we'll use at runtime (which redirects to
4298cfb0857SMark Rutland 	 * the ftrace trampoline). We can ignore the old PLT when initializing
4308cfb0857SMark Rutland 	 * the callsite.
4318cfb0857SMark Rutland 	 *
4328cfb0857SMark Rutland 	 * Note: 'mod' is only set at module load time.
4338cfb0857SMark Rutland 	 */
43426299b3fSMark Rutland 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod)
4358cfb0857SMark Rutland 		return aarch64_insn_patch_text_nosync((void *)pc, new);
4368cfb0857SMark Rutland 
4378cfb0857SMark Rutland 	if (!ftrace_find_callable_addr(rec, mod, &addr))
4388cfb0857SMark Rutland 		return -EINVAL;
439a6253579SMark Rutland 
4407f08ae53SMark Rutland 	old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
4417f08ae53SMark Rutland 
442a6253579SMark Rutland 	return ftrace_modify_code(pc, old, new, true);
443bd7d38dbSAKASHI Takahiro }
444a6253579SMark Rutland 
arch_ftrace_update_code(int command)445bd7d38dbSAKASHI Takahiro void arch_ftrace_update_code(int command)
446bd7d38dbSAKASHI Takahiro {
44781a6a146SLi Bin 	command |= FTRACE_MAY_SLEEP;
44881a6a146SLi Bin 	ftrace_modify_all_code(command);
449e4c07bf9SSteven Rostedt (VMware) }
45081a6a146SLi Bin 
45181a6a146SLi Bin #ifdef CONFIG_FUNCTION_GRAPH_TRACER
452bd7d38dbSAKASHI Takahiro /*
453819e50e2SAKASHI Takahiro  * function_graph tracer expects ftrace_return_to_handler() to be called
454819e50e2SAKASHI Takahiro  * on the way back to parent. For this purpose, this function is called
455819e50e2SAKASHI Takahiro  * in _mcount() or ftrace_caller() to replace return address (*parent) on
456819e50e2SAKASHI Takahiro  * the call stack to return_to_handler.
457819e50e2SAKASHI Takahiro  */
prepare_ftrace_return(unsigned long self_addr,unsigned long * parent,unsigned long frame_pointer)458819e50e2SAKASHI Takahiro void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
459819e50e2SAKASHI Takahiro 			   unsigned long frame_pointer)
4607dc48bf9SMark Rutland {
461819e50e2SAKASHI Takahiro 	unsigned long return_hooker = (unsigned long)&return_to_handler;
462819e50e2SAKASHI Takahiro 	unsigned long old;
463819e50e2SAKASHI Takahiro 
464819e50e2SAKASHI Takahiro 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
465819e50e2SAKASHI Takahiro 		return;
466819e50e2SAKASHI Takahiro 
467819e50e2SAKASHI Takahiro 	/*
468819e50e2SAKASHI Takahiro 	 * Note:
469819e50e2SAKASHI Takahiro 	 * No protection against faulting at *parent, which may be seen
470819e50e2SAKASHI Takahiro 	 * on other archs. It's unlikely on AArch64.
471819e50e2SAKASHI Takahiro 	 */
472819e50e2SAKASHI Takahiro 	old = *parent;
473819e50e2SAKASHI Takahiro 
474819e50e2SAKASHI Takahiro 	if (!function_graph_enter(old, self_addr, frame_pointer,
475819e50e2SAKASHI Takahiro 	    (void *)frame_pointer)) {
476c6d3cd32SMark Rutland 		*parent = return_hooker;
477c6d3cd32SMark Rutland 	}
47879fdee9bSAKASHI Takahiro }
479819e50e2SAKASHI Takahiro 
480c6d3cd32SMark Rutland #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)481bd7d38dbSAKASHI Takahiro void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
48226299b3fSMark Rutland 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
483c4a0ebf8SChengming Zhou {
484c4a0ebf8SChengming Zhou 	prepare_ftrace_return(ip, &fregs->lr, fregs->fp);
485c4a0ebf8SChengming Zhou }
48626299b3fSMark Rutland #else
487c4a0ebf8SChengming Zhou /*
488c4a0ebf8SChengming Zhou  * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
489bd7d38dbSAKASHI Takahiro  * depending on @enable.
490bd7d38dbSAKASHI Takahiro  */
ftrace_modify_graph_caller(bool enable)491bd7d38dbSAKASHI Takahiro static int ftrace_modify_graph_caller(bool enable)
492bd7d38dbSAKASHI Takahiro {
493bd7d38dbSAKASHI Takahiro 	unsigned long pc = (unsigned long)&ftrace_graph_call;
494bd7d38dbSAKASHI Takahiro 	u32 branch, nop;
495bd7d38dbSAKASHI Takahiro 
496bd7d38dbSAKASHI Takahiro 	branch = aarch64_insn_gen_branch_imm(pc,
497bd7d38dbSAKASHI Takahiro 					     (unsigned long)ftrace_graph_caller,
498bd7d38dbSAKASHI Takahiro 					     AARCH64_INSN_BRANCH_NOLINK);
4999f1ae759SCatalin Marinas 	nop = aarch64_insn_gen_nop();
500d0d62230SPratyush Anand 
501bd7d38dbSAKASHI Takahiro 	if (enable)
502bd7d38dbSAKASHI Takahiro 		return ftrace_modify_code(pc, nop, branch, true);
503bd7d38dbSAKASHI Takahiro 	else
504bd7d38dbSAKASHI Takahiro 		return ftrace_modify_code(pc, branch, nop, true);
505bd7d38dbSAKASHI Takahiro }
506bd7d38dbSAKASHI Takahiro 
ftrace_enable_ftrace_graph_caller(void)507bd7d38dbSAKASHI Takahiro int ftrace_enable_ftrace_graph_caller(void)
508bd7d38dbSAKASHI Takahiro {
509bd7d38dbSAKASHI Takahiro 	return ftrace_modify_graph_caller(true);
510bd7d38dbSAKASHI Takahiro }
511bd7d38dbSAKASHI Takahiro 
ftrace_disable_ftrace_graph_caller(void)512bd7d38dbSAKASHI Takahiro int ftrace_disable_ftrace_graph_caller(void)
513bd7d38dbSAKASHI Takahiro {
514bd7d38dbSAKASHI Takahiro 	return ftrace_modify_graph_caller(false);
515bd7d38dbSAKASHI Takahiro }
516bd7d38dbSAKASHI Takahiro #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
517bd7d38dbSAKASHI Takahiro #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
51826299b3fSMark Rutland