1/*
2 * Function calling ABI conversion from Linux to EFI for x86_64
3 *
4 * Copyright (C) 2007 Intel Corp
5 *	Bibo Mao <bibo.mao@intel.com>
6 *	Huang Ying <ying.huang@intel.com>
7 */
8
9#include <linux/linkage.h>
10#include <asm/segment.h>
11#include <asm/msr.h>
12#include <asm/processor-flags.h>
13#include <asm/page_types.h>
14
15#define SAVE_XMM			\
16	mov %rsp, %rax;			\
17	subq $0x70, %rsp;		\
18	and $~0xf, %rsp;		\
19	mov %rax, (%rsp);		\
20	mov %cr0, %rax;			\
21	clts;				\
22	mov %rax, 0x8(%rsp);		\
23	movaps %xmm0, 0x60(%rsp);	\
24	movaps %xmm1, 0x50(%rsp);	\
25	movaps %xmm2, 0x40(%rsp);	\
26	movaps %xmm3, 0x30(%rsp);	\
27	movaps %xmm4, 0x20(%rsp);	\
28	movaps %xmm5, 0x10(%rsp)
29
30#define RESTORE_XMM			\
31	movaps 0x60(%rsp), %xmm0;	\
32	movaps 0x50(%rsp), %xmm1;	\
33	movaps 0x40(%rsp), %xmm2;	\
34	movaps 0x30(%rsp), %xmm3;	\
35	movaps 0x20(%rsp), %xmm4;	\
36	movaps 0x10(%rsp), %xmm5;	\
37	mov 0x8(%rsp), %rsi;		\
38	mov %rsi, %cr0;			\
39	mov (%rsp), %rsp
40
41	/* stolen from gcc */
42	.macro FLUSH_TLB_ALL
43	movq %r15, efi_scratch(%rip)
44	movq %r14, efi_scratch+8(%rip)
45	movq %cr4, %r15
46	movq %r15, %r14
47	andb $0x7f, %r14b
48	movq %r14, %cr4
49	movq %r15, %cr4
50	movq efi_scratch+8(%rip), %r14
51	movq efi_scratch(%rip), %r15
52	.endm
53
54	.macro SWITCH_PGT
55	cmpb $0, efi_scratch+24(%rip)
56	je 1f
57	movq %r15, efi_scratch(%rip)		# r15
58	# save previous CR3
59	movq %cr3, %r15
60	movq %r15, efi_scratch+8(%rip)		# prev_cr3
61	movq efi_scratch+16(%rip), %r15		# EFI pgt
62	movq %r15, %cr3
63	1:
64	.endm
65
66	.macro RESTORE_PGT
67	cmpb $0, efi_scratch+24(%rip)
68	je 2f
69	movq efi_scratch+8(%rip), %r15
70	movq %r15, %cr3
71	movq efi_scratch(%rip), %r15
72	FLUSH_TLB_ALL
73	2:
74	.endm
75
76ENTRY(efi_call)
77	SAVE_XMM
78	mov (%rsp), %rax
79	mov 8(%rax), %rax
80	subq $48, %rsp
81	mov %r9, 32(%rsp)
82	mov %rax, 40(%rsp)
83	mov %r8, %r9
84	mov %rcx, %r8
85	mov %rsi, %rcx
86	SWITCH_PGT
87	call *%rdi
88	RESTORE_PGT
89	addq $48, %rsp
90	RESTORE_XMM
91	ret
92ENDPROC(efi_call)
93
94#ifdef CONFIG_EFI_MIXED
95
96/*
97 * We run this function from the 1:1 mapping.
98 *
99 * This function must be invoked with a 1:1 mapped stack.
100 */
101ENTRY(__efi64_thunk)
102	movl	%ds, %eax
103	push	%rax
104	movl	%es, %eax
105	push	%rax
106	movl	%ss, %eax
107	push	%rax
108
109	subq	$32, %rsp
110	movl	%esi, 0x0(%rsp)
111	movl	%edx, 0x4(%rsp)
112	movl	%ecx, 0x8(%rsp)
113	movq	%r8, %rsi
114	movl	%esi, 0xc(%rsp)
115	movq	%r9, %rsi
116	movl	%esi,  0x10(%rsp)
117
118	sgdt	save_gdt(%rip)
119
120	leaq	1f(%rip), %rbx
121	movq	%rbx, func_rt_ptr(%rip)
122
123	/* Switch to gdt with 32-bit segments */
124	movl	64(%rsp), %eax
125	lgdt	(%rax)
126
127	leaq	efi_enter32(%rip), %rax
128	pushq	$__KERNEL_CS
129	pushq	%rax
130	lretq
131
1321:	addq	$32, %rsp
133
134	lgdt	save_gdt(%rip)
135
136	pop	%rbx
137	movl	%ebx, %ss
138	pop	%rbx
139	movl	%ebx, %es
140	pop	%rbx
141	movl	%ebx, %ds
142
143	/*
144	 * Convert 32-bit status code into 64-bit.
145	 */
146	test	%rax, %rax
147	jz	1f
148	movl	%eax, %ecx
149	andl	$0x0fffffff, %ecx
150	andl	$0xf0000000, %eax
151	shl	$32, %rax
152	or	%rcx, %rax
1531:
154	ret
155ENDPROC(__efi64_thunk)
156
157ENTRY(efi_exit32)
158	movq	func_rt_ptr(%rip), %rax
159	push	%rax
160	mov	%rdi, %rax
161	ret
162ENDPROC(efi_exit32)
163
164	.code32
165/*
166 * EFI service pointer must be in %edi.
167 *
168 * The stack should represent the 32-bit calling convention.
169 */
170ENTRY(efi_enter32)
171	movl	$__KERNEL_DS, %eax
172	movl	%eax, %ds
173	movl	%eax, %es
174	movl	%eax, %ss
175
176	/* Reload pgtables */
177	movl	%cr3, %eax
178	movl	%eax, %cr3
179
180	/* Disable paging */
181	movl	%cr0, %eax
182	btrl	$X86_CR0_PG_BIT, %eax
183	movl	%eax, %cr0
184
185	/* Disable long mode via EFER */
186	movl	$MSR_EFER, %ecx
187	rdmsr
188	btrl	$_EFER_LME, %eax
189	wrmsr
190
191	call	*%edi
192
193	/* We must preserve return value */
194	movl	%eax, %edi
195
196	/*
197	 * Some firmware will return with interrupts enabled. Be sure to
198	 * disable them before we switch GDTs.
199	 */
200	cli
201
202	movl	68(%esp), %eax
203	movl	%eax, 2(%eax)
204	lgdtl	(%eax)
205
206	movl	%cr4, %eax
207	btsl	$(X86_CR4_PAE_BIT), %eax
208	movl	%eax, %cr4
209
210	movl	%cr3, %eax
211	movl	%eax, %cr3
212
213	movl	$MSR_EFER, %ecx
214	rdmsr
215	btsl	$_EFER_LME, %eax
216	wrmsr
217
218	xorl	%eax, %eax
219	lldt	%ax
220
221	movl	72(%esp), %eax
222	pushl	$__KERNEL_CS
223	pushl	%eax
224
225	/* Enable paging */
226	movl	%cr0, %eax
227	btsl	$X86_CR0_PG_BIT, %eax
228	movl	%eax, %cr0
229	lret
230ENDPROC(efi_enter32)
231
232	.data
233	.balign	8
234	.global	efi32_boot_gdt
235efi32_boot_gdt:	.word	0
236		.quad	0
237
238save_gdt:	.word	0
239		.quad	0
240func_rt_ptr:	.quad	0
241
242	.global efi_gdt64
243efi_gdt64:
244	.word	efi_gdt64_end - efi_gdt64
245	.long	0			/* Filled out by user */
246	.word	0
247	.quad	0x0000000000000000	/* NULL descriptor */
248	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
249	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
250	.quad	0x0080890000000000	/* TS descriptor */
251	.quad   0x0000000000000000	/* TS continued */
252efi_gdt64_end:
253#endif /* CONFIG_EFI_MIXED */
254
255	.data
256ENTRY(efi_scratch)
257	.fill 3,8,0
258	.byte 0
259	.quad 0
260