xref: /openbmc/linux/arch/x86/kernel/ftrace_32.S (revision aa1f10e8)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  Copyright (C) 2017  Steven Rostedt, VMware Inc.
4 */
5
6#include <linux/linkage.h>
7#include <asm/page_types.h>
8#include <asm/segment.h>
9#include <asm/export.h>
10#include <asm/ftrace.h>
11
12#ifdef CC_USING_FENTRY
13# define function_hook	__fentry__
14EXPORT_SYMBOL(__fentry__)
15#else
16# define function_hook	mcount
17EXPORT_SYMBOL(mcount)
18#endif
19
20#ifdef CONFIG_DYNAMIC_FTRACE
21
22/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
23#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
24# define USING_FRAME_POINTER
25#endif
26
27#ifdef USING_FRAME_POINTER
28# define MCOUNT_FRAME			1	/* using frame = true  */
29#else
30# define MCOUNT_FRAME			0	/* using frame = false */
31#endif
32
33ENTRY(function_hook)
34	ret
35END(function_hook)
36
37ENTRY(ftrace_caller)
38
39#ifdef USING_FRAME_POINTER
40# ifdef CC_USING_FENTRY
41	/*
42	 * Frame pointers are of ip followed by bp.
43	 * Since fentry is an immediate jump, we are left with
44	 * parent-ip, function-ip. We need to add a frame with
45	 * parent-ip followed by ebp.
46	 */
47	pushl	4(%esp)				/* parent ip */
48	pushl	%ebp
49	movl	%esp, %ebp
50	pushl	2*4(%esp)			/* function ip */
51# endif
52	/* For mcount, the function ip is directly above */
53	pushl	%ebp
54	movl	%esp, %ebp
55#endif
56	pushl	%eax
57	pushl	%ecx
58	pushl	%edx
59	pushl	$0				/* Pass NULL as regs pointer */
60
61#ifdef USING_FRAME_POINTER
62	/* Load parent ebp into edx */
63	movl	4*4(%esp), %edx
64#else
65	/* There's no frame pointer, load the appropriate stack addr instead */
66	lea	4*4(%esp), %edx
67#endif
68
69	movl	(MCOUNT_FRAME+4)*4(%esp), %eax	/* load the rip */
70	/* Get the parent ip */
71	movl	4(%edx), %edx			/* edx has ebp */
72
73	movl	function_trace_op, %ecx
74	subl	$MCOUNT_INSN_SIZE, %eax
75
76.globl ftrace_call
77ftrace_call:
78	call	ftrace_stub
79
80	addl	$4, %esp			/* skip NULL pointer */
81	popl	%edx
82	popl	%ecx
83	popl	%eax
84#ifdef USING_FRAME_POINTER
85	popl	%ebp
86# ifdef CC_USING_FENTRY
87	addl	$4,%esp				/* skip function ip */
88	popl	%ebp				/* this is the orig bp */
89	addl	$4, %esp			/* skip parent ip */
90# endif
91#endif
92.Lftrace_ret:
93#ifdef CONFIG_FUNCTION_GRAPH_TRACER
94.globl ftrace_graph_call
95ftrace_graph_call:
96	jmp	ftrace_stub
97#endif
98
99/* This is weak to keep gas from relaxing the jumps */
100WEAK(ftrace_stub)
101	ret
102END(ftrace_caller)
103
104ENTRY(ftrace_regs_caller)
105	/*
106	 * i386 does not save SS and ESP when coming from kernel.
107	 * Instead, to get sp, &regs->sp is used (see ptrace.h).
108	 * Unfortunately, that means eflags must be at the same location
109	 * as the current return ip is. We move the return ip into the
110	 * regs->ip location, and move flags into the return ip location.
111	 */
112	pushl	$__KERNEL_CS
113	pushl	4(%esp)				/* Save the return ip */
114	pushl	$0				/* Load 0 into orig_ax */
115	pushl	%gs
116	pushl	%fs
117	pushl	%es
118	pushl	%ds
119	pushl	%eax
120
121	/* Get flags and place them into the return ip slot */
122	pushf
123	popl	%eax
124	movl	%eax, 8*4(%esp)
125
126	pushl	%ebp
127	pushl	%edi
128	pushl	%esi
129	pushl	%edx
130	pushl	%ecx
131	pushl	%ebx
132
133	movl	12*4(%esp), %eax		/* Load ip (1st parameter) */
134	subl	$MCOUNT_INSN_SIZE, %eax		/* Adjust ip */
135#ifdef CC_USING_FENTRY
136	movl	15*4(%esp), %edx		/* Load parent ip (2nd parameter) */
137#else
138	movl	0x4(%ebp), %edx			/* Load parent ip (2nd parameter) */
139#endif
140	movl	function_trace_op, %ecx		/* Save ftrace_pos in 3rd parameter */
141	pushl	%esp				/* Save pt_regs as 4th parameter */
142
143GLOBAL(ftrace_regs_call)
144	call	ftrace_stub
145
146	addl	$4, %esp			/* Skip pt_regs */
147
148	/* restore flags */
149	push	14*4(%esp)
150	popf
151
152	/* Move return ip back to its original location */
153	movl	12*4(%esp), %eax
154	movl	%eax, 14*4(%esp)
155
156	popl	%ebx
157	popl	%ecx
158	popl	%edx
159	popl	%esi
160	popl	%edi
161	popl	%ebp
162	popl	%eax
163	popl	%ds
164	popl	%es
165	popl	%fs
166	popl	%gs
167
168	/* use lea to not affect flags */
169	lea	3*4(%esp), %esp			/* Skip orig_ax, ip and cs */
170
171	jmp	.Lftrace_ret
172#else /* ! CONFIG_DYNAMIC_FTRACE */
173
174ENTRY(function_hook)
175	cmpl	$__PAGE_OFFSET, %esp
176	jb	ftrace_stub			/* Paging not enabled yet? */
177
178	cmpl	$ftrace_stub, ftrace_trace_function
179	jnz	.Ltrace
180#ifdef CONFIG_FUNCTION_GRAPH_TRACER
181	cmpl	$ftrace_stub, ftrace_graph_return
182	jnz	ftrace_graph_caller
183
184	cmpl	$ftrace_graph_entry_stub, ftrace_graph_entry
185	jnz	ftrace_graph_caller
186#endif
187.globl ftrace_stub
188ftrace_stub:
189	ret
190
191	/* taken from glibc */
192.Ltrace:
193	pushl	%eax
194	pushl	%ecx
195	pushl	%edx
196	movl	0xc(%esp), %eax
197	movl	0x4(%ebp), %edx
198	subl	$MCOUNT_INSN_SIZE, %eax
199
200	call	*ftrace_trace_function
201
202	popl	%edx
203	popl	%ecx
204	popl	%eax
205	jmp	ftrace_stub
206END(function_hook)
207#endif /* CONFIG_DYNAMIC_FTRACE */
208
209#ifdef CONFIG_FUNCTION_GRAPH_TRACER
210ENTRY(ftrace_graph_caller)
211	pushl	%eax
212	pushl	%ecx
213	pushl	%edx
214	movl	3*4(%esp), %eax
215	/* Even with frame pointers, fentry doesn't have one here */
216#ifdef CC_USING_FENTRY
217	lea	4*4(%esp), %edx
218	movl	$0, %ecx
219#else
220	lea	0x4(%ebp), %edx
221	movl	(%ebp), %ecx
222#endif
223	subl	$MCOUNT_INSN_SIZE, %eax
224	call	prepare_ftrace_return
225	popl	%edx
226	popl	%ecx
227	popl	%eax
228	ret
229END(ftrace_graph_caller)
230
231.globl return_to_handler
232return_to_handler:
233	pushl	%eax
234	pushl	%edx
235#ifdef CC_USING_FENTRY
236	movl	$0, %eax
237#else
238	movl	%ebp, %eax
239#endif
240	call	ftrace_return_to_handler
241	movl	%eax, %ecx
242	popl	%edx
243	popl	%eax
244	jmp	*%ecx
245#endif
246