xref: /openbmc/linux/arch/arm64/kernel/entry-ftrace.S (revision 9d5dbfe0)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm64/kernel/entry-ftrace.S
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 */
8
9#include <linux/linkage.h>
10#include <linux/cfi_types.h>
11#include <asm/asm-offsets.h>
12#include <asm/assembler.h>
13#include <asm/ftrace.h>
14#include <asm/insn.h>
15
16#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
17/*
18 * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
19 * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
20 * ftrace_make_call() have patched those NOPs to:
21 *
22 * 	MOV	X9, LR
23 * 	BL	ftrace_caller
24 *
25 * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
26 * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
27 * clobber.
28 *
29 * We save the callsite's context into a struct ftrace_regs before invoking any
30 * ftrace callbacks. So that we can get a sensible backtrace, we create frame
31 * records for the callsite and the ftrace entry assembly. This is not
32 * sufficient for reliable stacktrace: until we create the callsite stack
33 * record, its caller is missing from the LR and existing chain of frame
34 * records.
35 */
36SYM_CODE_START(ftrace_caller)
37	bti	c
38
39	/* Save original SP */
40	mov	x10, sp
41
42	/* Make room for ftrace regs, plus two frame records */
43	sub	sp, sp, #(FREGS_SIZE + 32)
44
45	/* Save function arguments */
46	stp	x0, x1, [sp, #FREGS_X0]
47	stp	x2, x3, [sp, #FREGS_X2]
48	stp	x4, x5, [sp, #FREGS_X4]
49	stp	x6, x7, [sp, #FREGS_X6]
50	str	x8,     [sp, #FREGS_X8]
51
52	/* Save the callsite's FP, LR, SP */
53	str	x29, [sp, #FREGS_FP]
54	str	x9,  [sp, #FREGS_LR]
55	str	x10, [sp, #FREGS_SP]
56
57	/* Save the PC after the ftrace callsite */
58	str	x30, [sp, #FREGS_PC]
59
60	/* Create a frame record for the callsite above the ftrace regs */
61	stp	x29, x9, [sp, #FREGS_SIZE + 16]
62	add	x29, sp, #FREGS_SIZE + 16
63
64	/* Create our frame record above the ftrace regs */
65	stp	x29, x30, [sp, #FREGS_SIZE]
66	add	x29, sp, #FREGS_SIZE
67
68	sub	x0, x30, #AARCH64_INSN_SIZE	// ip (callsite's BL insn)
69	mov	x1, x9				// parent_ip (callsite's LR)
70	ldr_l	x2, function_trace_op		// op
71	mov	x3, sp				// regs
72
73SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
74	bl	ftrace_stub
75
76/*
77 * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
78 * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
79 * to restore x0-x8, x29, and x30.
80 */
81	/* Restore function arguments */
82	ldp	x0, x1, [sp, #FREGS_X0]
83	ldp	x2, x3, [sp, #FREGS_X2]
84	ldp	x4, x5, [sp, #FREGS_X4]
85	ldp	x6, x7, [sp, #FREGS_X6]
86	ldr	x8,     [sp, #FREGS_X8]
87
88	/* Restore the callsite's FP, LR, PC */
89	ldr	x29, [sp, #FREGS_FP]
90	ldr	x30, [sp, #FREGS_LR]
91	ldr	x9,  [sp, #FREGS_PC]
92
93	/* Restore the callsite's SP */
94	add	sp, sp, #FREGS_SIZE + 32
95
96	ret	x9
97SYM_CODE_END(ftrace_caller)
98
99#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
100
101/*
102 * Gcc with -pg will put the following code in the beginning of each function:
103 *      mov x0, x30
104 *      bl _mcount
105 *	[function's body ...]
106 * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
107 * ftrace is enabled.
108 *
109 * Please note that x0 as an argument will not be used here because we can
110 * get lr(x30) of instrumented function at any time by winding up call stack
111 * as long as the kernel is compiled without -fomit-frame-pointer.
112 * (or CONFIG_FRAME_POINTER, this is forced on arm64)
113 *
114 * stack layout after mcount_enter in _mcount():
115 *
116 * current sp/fp =>  0:+-----+
117 * in _mcount()        | x29 | -> instrumented function's fp
118 *                     +-----+
119 *                     | x30 | -> _mcount()'s lr (= instrumented function's pc)
120 * old sp       => +16:+-----+
121 * when instrumented   |     |
122 * function calls      | ... |
123 * _mcount()           |     |
124 *                     |     |
125 * instrumented => +xx:+-----+
126 * function's fp       | x29 | -> parent's fp
127 *                     +-----+
128 *                     | x30 | -> instrumented function's lr (= parent's pc)
129 *                     +-----+
130 *                     | ... |
131 */
132
133	.macro mcount_enter
134	stp	x29, x30, [sp, #-16]!
135	mov	x29, sp
136	.endm
137
138	.macro mcount_exit
139	ldp	x29, x30, [sp], #16
140	ret
141	.endm
142
143	.macro mcount_adjust_addr rd, rn
144	sub	\rd, \rn, #AARCH64_INSN_SIZE
145	.endm
146
147	/* for instrumented function's parent */
148	.macro mcount_get_parent_fp reg
149	ldr	\reg, [x29]
150	ldr	\reg, [\reg]
151	.endm
152
153	/* for instrumented function */
154	.macro mcount_get_pc0 reg
155	mcount_adjust_addr	\reg, x30
156	.endm
157
158	.macro mcount_get_pc reg
159	ldr	\reg, [x29, #8]
160	mcount_adjust_addr	\reg, \reg
161	.endm
162
163	.macro mcount_get_lr reg
164	ldr	\reg, [x29]
165	ldr	\reg, [\reg, #8]
166	.endm
167
168	.macro mcount_get_lr_addr reg
169	ldr	\reg, [x29]
170	add	\reg, \reg, #8
171	.endm
172
173/*
174 * _mcount() is used to build the kernel with -pg option, but all the branch
175 * instructions to _mcount() are replaced to NOP initially at kernel start up,
176 * and later on, NOP to branch to ftrace_caller() when enabled or branch to
177 * NOP when disabled per-function base.
178 */
179SYM_FUNC_START(_mcount)
180	ret
181SYM_FUNC_END(_mcount)
182EXPORT_SYMBOL(_mcount)
183NOKPROBE(_mcount)
184
185/*
186 * void ftrace_caller(unsigned long return_address)
187 * @return_address: return address to instrumented function
188 *
189 * This function is a counterpart of _mcount() in 'static' ftrace, and
190 * makes calls to:
191 *     - tracer function to probe instrumented function's entry,
192 *     - ftrace_graph_caller to set up an exit hook
193 */
194SYM_FUNC_START(ftrace_caller)
195	mcount_enter
196
197	mcount_get_pc0	x0		//     function's pc
198	mcount_get_lr	x1		//     function's lr
199
200SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)	// tracer(pc, lr);
201	nop				// This will be replaced with "bl xxx"
202					// where xxx can be any kind of tracer.
203
204#ifdef CONFIG_FUNCTION_GRAPH_TRACER
205SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
206	nop				// If enabled, this will be replaced
207					// "b ftrace_graph_caller"
208#endif
209
210	mcount_exit
211SYM_FUNC_END(ftrace_caller)
212
213#ifdef CONFIG_FUNCTION_GRAPH_TRACER
214/*
215 * void ftrace_graph_caller(void)
216 *
217 * Called from _mcount() or ftrace_caller() when function_graph tracer is
218 * selected.
219 * This function w/ prepare_ftrace_return() fakes link register's value on
220 * the call stack in order to intercept instrumented function's return path
221 * and run return_to_handler() later on its exit.
222 */
223SYM_FUNC_START(ftrace_graph_caller)
224	mcount_get_pc		  x0	//     function's pc
225	mcount_get_lr_addr	  x1	//     pointer to function's saved lr
226	mcount_get_parent_fp	  x2	//     parent's fp
227	bl	prepare_ftrace_return	// prepare_ftrace_return(pc, &lr, fp)
228
229	mcount_exit
230SYM_FUNC_END(ftrace_graph_caller)
231#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
232#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
233
234SYM_TYPED_FUNC_START(ftrace_stub)
235	ret
236SYM_FUNC_END(ftrace_stub)
237
238#ifdef CONFIG_FUNCTION_GRAPH_TRACER
239SYM_TYPED_FUNC_START(ftrace_stub_graph)
240	ret
241SYM_FUNC_END(ftrace_stub_graph)
242
243/*
244 * void return_to_handler(void)
245 *
246 * Run ftrace_return_to_handler() before going back to parent.
247 * @fp is checked against the value passed by ftrace_graph_caller().
248 */
249SYM_CODE_START(return_to_handler)
250	/* save return value regs */
251	sub sp, sp, #64
252	stp x0, x1, [sp]
253	stp x2, x3, [sp, #16]
254	stp x4, x5, [sp, #32]
255	stp x6, x7, [sp, #48]
256
257	mov	x0, x29			//     parent's fp
258	bl	ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
259	mov	x30, x0			// restore the original return address
260
261	/* restore return value regs */
262	ldp x0, x1, [sp]
263	ldp x2, x3, [sp, #16]
264	ldp x4, x5, [sp, #32]
265	ldp x6, x7, [sp, #48]
266	add sp, sp, #64
267
268	ret
269SYM_CODE_END(return_to_handler)
270#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
271