1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2017 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#include <linux/linkage.h>
11
12#include <asm/processor-flags.h>
13#include <asm/msr.h>
14#include <asm/asm-offsets.h>
15
16	.text
17	.code32
18SYM_FUNC_START(get_sev_encryption_bit)
19	xor	%eax, %eax
20
21#ifdef CONFIG_AMD_MEM_ENCRYPT
22	push	%ebx
23	push	%ecx
24	push	%edx
25
26	movl	$0x80000000, %eax	/* CPUID to check the highest leaf */
27	cpuid
28	cmpl	$0x8000001f, %eax	/* See if 0x8000001f is available */
29	jb	.Lno_sev
30
31	/*
32	 * Check for the SEV feature:
33	 *   CPUID Fn8000_001F[EAX] - Bit 1
34	 *   CPUID Fn8000_001F[EBX] - Bits 5:0
35	 *     Pagetable bit position used to indicate encryption
36	 */
37	movl	$0x8000001f, %eax
38	cpuid
39	bt	$1, %eax		/* Check if SEV is available */
40	jnc	.Lno_sev
41
42	movl	$MSR_AMD64_SEV, %ecx	/* Read the SEV MSR */
43	rdmsr
44	bt	$MSR_AMD64_SEV_ENABLED_BIT, %eax	/* Check if SEV is active */
45	jnc	.Lno_sev
46
47	movl	%ebx, %eax
48	andl	$0x3f, %eax		/* Return the encryption bit location */
49	jmp	.Lsev_exit
50
51.Lno_sev:
52	xor	%eax, %eax
53
54.Lsev_exit:
55	pop	%edx
56	pop	%ecx
57	pop	%ebx
58
59#endif	/* CONFIG_AMD_MEM_ENCRYPT */
60
61	ret
62SYM_FUNC_END(get_sev_encryption_bit)
63
64/**
65 * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using
66 *		      the GHCB MSR protocol
67 *
68 * @%eax:	Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX)
69 * @%edx:	CPUID Function
70 *
71 * Returns 0 in %eax on success, non-zero on failure
72 * %edx returns CPUID value on success
73 */
74SYM_CODE_START_LOCAL(sev_es_req_cpuid)
75	shll	$30, %eax
76	orl     $0x00000004, %eax
77	movl    $MSR_AMD64_SEV_ES_GHCB, %ecx
78	wrmsr
79	rep; vmmcall		# VMGEXIT
80	rdmsr
81
82	/* Check response */
83	movl	%eax, %ecx
84	andl	$0x3ffff000, %ecx	# Bits [12-29] MBZ
85	jnz	2f
86
87	/* Check return code */
88	andl    $0xfff, %eax
89	cmpl    $5, %eax
90	jne	2f
91
92	/* All good - return success */
93	xorl	%eax, %eax
941:
95	ret
962:
97	movl	$-1, %eax
98	jmp	1b
99SYM_CODE_END(sev_es_req_cpuid)
100
101SYM_CODE_START(startup32_vc_handler)
102	pushl	%eax
103	pushl	%ebx
104	pushl	%ecx
105	pushl	%edx
106
107	/* Keep CPUID function in %ebx */
108	movl	%eax, %ebx
109
110	/* Check if error-code == SVM_EXIT_CPUID */
111	cmpl	$0x72, 16(%esp)
112	jne	.Lfail
113
114	movl	$0, %eax		# Request CPUID[fn].EAX
115	movl	%ebx, %edx		# CPUID fn
116	call	sev_es_req_cpuid	# Call helper
117	testl	%eax, %eax		# Check return code
118	jnz	.Lfail
119	movl	%edx, 12(%esp)		# Store result
120
121	movl	$1, %eax		# Request CPUID[fn].EBX
122	movl	%ebx, %edx		# CPUID fn
123	call	sev_es_req_cpuid	# Call helper
124	testl	%eax, %eax		# Check return code
125	jnz	.Lfail
126	movl	%edx, 8(%esp)		# Store result
127
128	movl	$2, %eax		# Request CPUID[fn].ECX
129	movl	%ebx, %edx		# CPUID fn
130	call	sev_es_req_cpuid	# Call helper
131	testl	%eax, %eax		# Check return code
132	jnz	.Lfail
133	movl	%edx, 4(%esp)		# Store result
134
135	movl	$3, %eax		# Request CPUID[fn].EDX
136	movl	%ebx, %edx		# CPUID fn
137	call	sev_es_req_cpuid	# Call helper
138	testl	%eax, %eax		# Check return code
139	jnz	.Lfail
140	movl	%edx, 0(%esp)		# Store result
141
142	/*
143	 * Sanity check CPUID results from the Hypervisor. See comment in
144	 * do_vc_no_ghcb() for more details on why this is necessary.
145	 */
146
147	/* Fail if SEV leaf not available in CPUID[0x80000000].EAX */
148	cmpl    $0x80000000, %ebx
149	jne     .Lcheck_sev
150	cmpl    $0x8000001f, 12(%esp)
151	jb      .Lfail
152	jmp     .Ldone
153
154.Lcheck_sev:
155	/* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */
156	cmpl    $0x8000001f, %ebx
157	jne     .Ldone
158	btl     $1, 12(%esp)
159	jnc     .Lfail
160
161.Ldone:
162	popl	%edx
163	popl	%ecx
164	popl	%ebx
165	popl	%eax
166
167	/* Remove error code */
168	addl	$4, %esp
169
170	/* Jump over CPUID instruction */
171	addl	$2, (%esp)
172
173	iret
174.Lfail:
175	/* Send terminate request to Hypervisor */
176	movl    $0x100, %eax
177	xorl    %edx, %edx
178	movl    $MSR_AMD64_SEV_ES_GHCB, %ecx
179	wrmsr
180	rep; vmmcall
181
182	/* If request fails, go to hlt loop */
183	hlt
184	jmp .Lfail
185SYM_CODE_END(startup32_vc_handler)
186
187	.code64
188
189#include "../../kernel/sev_verify_cbit.S"
190SYM_FUNC_START(set_sev_encryption_mask)
191#ifdef CONFIG_AMD_MEM_ENCRYPT
192	push	%rbp
193	push	%rdx
194
195	movq	%rsp, %rbp		/* Save current stack pointer */
196
197	call	get_sev_encryption_bit	/* Get the encryption bit position */
198	testl	%eax, %eax
199	jz	.Lno_sev_mask
200
201	bts	%rax, sme_me_mask(%rip)	/* Create the encryption mask */
202
203	/*
204	 * Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
205	 * get_sev_encryption_bit() because this function is 32-bit code and
206	 * shared between 64-bit and 32-bit boot path.
207	 */
208	movl	$MSR_AMD64_SEV, %ecx	/* Read the SEV MSR */
209	rdmsr
210
211	/* Store MSR value in sev_status */
212	shlq	$32, %rdx
213	orq	%rdx, %rax
214	movq	%rax, sev_status(%rip)
215
216.Lno_sev_mask:
217	movq	%rbp, %rsp		/* Restore original stack pointer */
218
219	pop	%rdx
220	pop	%rbp
221#endif
222
223	xor	%rax, %rax
224	ret
225SYM_FUNC_END(set_sev_encryption_mask)
226
227	.data
228
229#ifdef CONFIG_AMD_MEM_ENCRYPT
230	.balign	8
231SYM_DATA(sme_me_mask,		.quad 0)
232SYM_DATA(sev_status,		.quad 0)
233SYM_DATA(sev_check_data,	.quad 0)
234#endif
235