xref: /openbmc/linux/arch/x86/kernel/sev-shared.c (revision b9cb9c45)
1e759959fSBrijesh Singh // SPDX-License-Identifier: GPL-2.0
2e759959fSBrijesh Singh /*
3e759959fSBrijesh Singh  * AMD Encrypted Register State Support
4e759959fSBrijesh Singh  *
5e759959fSBrijesh Singh  * Author: Joerg Roedel <jroedel@suse.de>
6e759959fSBrijesh Singh  *
7e759959fSBrijesh Singh  * This file is not compiled stand-alone. It contains code shared
8e759959fSBrijesh Singh  * between the pre-decompression boot code and the running Linux kernel
9e759959fSBrijesh Singh  * and is included directly into both code-bases.
10e759959fSBrijesh Singh  */
11e759959fSBrijesh Singh 
12e759959fSBrijesh Singh #ifndef __BOOT_COMPRESSED
13e759959fSBrijesh Singh #define error(v)	pr_err(v)
14e759959fSBrijesh Singh #define has_cpuflag(f)	boot_cpu_has(f)
156c321179STom Lendacky #else
166c321179STom Lendacky #undef WARN
176c321179STom Lendacky #define WARN(condition, format...) (!!(condition))
18e759959fSBrijesh Singh #endif
19e759959fSBrijesh Singh 
20801baa69SMichael Roth /* I/O parameters for CPUID-related helpers */
21801baa69SMichael Roth struct cpuid_leaf {
22801baa69SMichael Roth 	u32 fn;
23801baa69SMichael Roth 	u32 subfn;
24801baa69SMichael Roth 	u32 eax;
25801baa69SMichael Roth 	u32 ebx;
26801baa69SMichael Roth 	u32 ecx;
27801baa69SMichael Roth 	u32 edx;
28801baa69SMichael Roth };
29801baa69SMichael Roth 
302ea29c5aSBrijesh Singh /*
31ee0bfa08SMichael Roth  * Individual entries of the SNP CPUID table, as defined by the SNP
32ee0bfa08SMichael Roth  * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
33ee0bfa08SMichael Roth  */
34ee0bfa08SMichael Roth struct snp_cpuid_fn {
35ee0bfa08SMichael Roth 	u32 eax_in;
36ee0bfa08SMichael Roth 	u32 ecx_in;
37ee0bfa08SMichael Roth 	u64 xcr0_in;
38ee0bfa08SMichael Roth 	u64 xss_in;
39ee0bfa08SMichael Roth 	u32 eax;
40ee0bfa08SMichael Roth 	u32 ebx;
41ee0bfa08SMichael Roth 	u32 ecx;
42ee0bfa08SMichael Roth 	u32 edx;
43ee0bfa08SMichael Roth 	u64 __reserved;
44ee0bfa08SMichael Roth } __packed;
45ee0bfa08SMichael Roth 
46ee0bfa08SMichael Roth /*
47ee0bfa08SMichael Roth  * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
48ee0bfa08SMichael Roth  * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
49ee0bfa08SMichael Roth  * of 64 entries per CPUID table.
50ee0bfa08SMichael Roth  */
51ee0bfa08SMichael Roth #define SNP_CPUID_COUNT_MAX 64
52ee0bfa08SMichael Roth 
53ee0bfa08SMichael Roth struct snp_cpuid_table {
54ee0bfa08SMichael Roth 	u32 count;
55ee0bfa08SMichael Roth 	u32 __reserved1;
56ee0bfa08SMichael Roth 	u64 __reserved2;
57ee0bfa08SMichael Roth 	struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX];
58ee0bfa08SMichael Roth } __packed;
59ee0bfa08SMichael Roth 
60ee0bfa08SMichael Roth /*
612ea29c5aSBrijesh Singh  * Since feature negotiation related variables are set early in the boot
622ea29c5aSBrijesh Singh  * process they must reside in the .data section so as not to be zeroed
632ea29c5aSBrijesh Singh  * out when the .bss section is later cleared.
642ea29c5aSBrijesh Singh  *
652ea29c5aSBrijesh Singh  * GHCB protocol version negotiated with the hypervisor.
662ea29c5aSBrijesh Singh  */
672ea29c5aSBrijesh Singh static u16 ghcb_version __ro_after_init;
682ea29c5aSBrijesh Singh 
69ee0bfa08SMichael Roth /* Copy of the SNP firmware's CPUID page. */
70ee0bfa08SMichael Roth static struct snp_cpuid_table cpuid_table_copy __ro_after_init;
71ee0bfa08SMichael Roth 
72ee0bfa08SMichael Roth /*
73ee0bfa08SMichael Roth  * These will be initialized based on CPUID table so that non-present
74ee0bfa08SMichael Roth  * all-zero leaves (for sparse tables) can be differentiated from
75ee0bfa08SMichael Roth  * invalid/out-of-range leaves. This is needed since all-zero leaves
76ee0bfa08SMichael Roth  * still need to be post-processed.
77ee0bfa08SMichael Roth  */
78ee0bfa08SMichael Roth static u32 cpuid_std_range_max __ro_after_init;
79ee0bfa08SMichael Roth static u32 cpuid_hyp_range_max __ro_after_init;
80ee0bfa08SMichael Roth static u32 cpuid_ext_range_max __ro_after_init;
81ee0bfa08SMichael Roth 
82e759959fSBrijesh Singh static bool __init sev_es_check_cpu_features(void)
83e759959fSBrijesh Singh {
84e759959fSBrijesh Singh 	if (!has_cpuflag(X86_FEATURE_RDRAND)) {
85e759959fSBrijesh Singh 		error("RDRAND instruction not supported - no trusted source of randomness available\n");
86e759959fSBrijesh Singh 		return false;
87e759959fSBrijesh Singh 	}
88e759959fSBrijesh Singh 
89e759959fSBrijesh Singh 	return true;
90e759959fSBrijesh Singh }
91e759959fSBrijesh Singh 
926c0f74d6SBrijesh Singh static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
93e759959fSBrijesh Singh {
94b81fc74dSBrijesh Singh 	u64 val = GHCB_MSR_TERM_REQ;
95e759959fSBrijesh Singh 
966c0f74d6SBrijesh Singh 	/* Tell the hypervisor what went wrong. */
976c0f74d6SBrijesh Singh 	val |= GHCB_SEV_TERM_REASON(set, reason);
98e759959fSBrijesh Singh 
99e759959fSBrijesh Singh 	/* Request Guest Termination from Hypvervisor */
100e759959fSBrijesh Singh 	sev_es_wr_ghcb_msr(val);
101e759959fSBrijesh Singh 	VMGEXIT();
102e759959fSBrijesh Singh 
103e759959fSBrijesh Singh 	while (true)
104e759959fSBrijesh Singh 		asm volatile("hlt\n" : : : "memory");
105e759959fSBrijesh Singh }
106e759959fSBrijesh Singh 
107cbd3d4f7SBrijesh Singh /*
108cbd3d4f7SBrijesh Singh  * The hypervisor features are available from GHCB version 2 onward.
109cbd3d4f7SBrijesh Singh  */
110cbd3d4f7SBrijesh Singh static u64 get_hv_features(void)
111cbd3d4f7SBrijesh Singh {
112cbd3d4f7SBrijesh Singh 	u64 val;
113cbd3d4f7SBrijesh Singh 
114cbd3d4f7SBrijesh Singh 	if (ghcb_version < 2)
115cbd3d4f7SBrijesh Singh 		return 0;
116cbd3d4f7SBrijesh Singh 
117cbd3d4f7SBrijesh Singh 	sev_es_wr_ghcb_msr(GHCB_MSR_HV_FT_REQ);
118cbd3d4f7SBrijesh Singh 	VMGEXIT();
119cbd3d4f7SBrijesh Singh 
120cbd3d4f7SBrijesh Singh 	val = sev_es_rd_ghcb_msr();
121cbd3d4f7SBrijesh Singh 	if (GHCB_RESP_CODE(val) != GHCB_MSR_HV_FT_RESP)
122cbd3d4f7SBrijesh Singh 		return 0;
123cbd3d4f7SBrijesh Singh 
124cbd3d4f7SBrijesh Singh 	return GHCB_MSR_HV_FT_RESP_VAL(val);
125cbd3d4f7SBrijesh Singh }
126cbd3d4f7SBrijesh Singh 
12795d33bfaSBrijesh Singh static void snp_register_ghcb_early(unsigned long paddr)
12887294bdbSBrijesh Singh {
12987294bdbSBrijesh Singh 	unsigned long pfn = paddr >> PAGE_SHIFT;
13087294bdbSBrijesh Singh 	u64 val;
13187294bdbSBrijesh Singh 
13287294bdbSBrijesh Singh 	sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn));
13387294bdbSBrijesh Singh 	VMGEXIT();
13487294bdbSBrijesh Singh 
13587294bdbSBrijesh Singh 	val = sev_es_rd_ghcb_msr();
13687294bdbSBrijesh Singh 
13787294bdbSBrijesh Singh 	/* If the response GPA is not ours then abort the guest */
13887294bdbSBrijesh Singh 	if ((GHCB_RESP_CODE(val) != GHCB_MSR_REG_GPA_RESP) ||
13987294bdbSBrijesh Singh 	    (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn))
14087294bdbSBrijesh Singh 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER);
14187294bdbSBrijesh Singh }
14287294bdbSBrijesh Singh 
143e759959fSBrijesh Singh static bool sev_es_negotiate_protocol(void)
144e759959fSBrijesh Singh {
145e759959fSBrijesh Singh 	u64 val;
146e759959fSBrijesh Singh 
147e759959fSBrijesh Singh 	/* Do the GHCB protocol version negotiation */
148b81fc74dSBrijesh Singh 	sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
149e759959fSBrijesh Singh 	VMGEXIT();
150e759959fSBrijesh Singh 	val = sev_es_rd_ghcb_msr();
151e759959fSBrijesh Singh 
152b81fc74dSBrijesh Singh 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
153e759959fSBrijesh Singh 		return false;
154e759959fSBrijesh Singh 
1552ea29c5aSBrijesh Singh 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
1562ea29c5aSBrijesh Singh 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
157e759959fSBrijesh Singh 		return false;
158e759959fSBrijesh Singh 
1592ea29c5aSBrijesh Singh 	ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), GHCB_PROTOCOL_MAX);
1602ea29c5aSBrijesh Singh 
161e759959fSBrijesh Singh 	return true;
162e759959fSBrijesh Singh }
163e759959fSBrijesh Singh 
164e759959fSBrijesh Singh static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
165e759959fSBrijesh Singh {
166a50c5bebSTom Lendacky 	ghcb->save.sw_exit_code = 0;
1672c36d87bSPeter Zijlstra 	__builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
168e759959fSBrijesh Singh }
169e759959fSBrijesh Singh 
170e759959fSBrijesh Singh static bool vc_decoding_needed(unsigned long exit_code)
171e759959fSBrijesh Singh {
172e759959fSBrijesh Singh 	/* Exceptions don't require to decode the instruction */
173e759959fSBrijesh Singh 	return !(exit_code >= SVM_EXIT_EXCP_BASE &&
174e759959fSBrijesh Singh 		 exit_code <= SVM_EXIT_LAST_EXCP);
175e759959fSBrijesh Singh }
176e759959fSBrijesh Singh 
177e759959fSBrijesh Singh static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
178e759959fSBrijesh Singh 				      struct pt_regs *regs,
179e759959fSBrijesh Singh 				      unsigned long exit_code)
180e759959fSBrijesh Singh {
181e759959fSBrijesh Singh 	enum es_result ret = ES_OK;
182e759959fSBrijesh Singh 
183e759959fSBrijesh Singh 	memset(ctxt, 0, sizeof(*ctxt));
184e759959fSBrijesh Singh 	ctxt->regs = regs;
185e759959fSBrijesh Singh 
186e759959fSBrijesh Singh 	if (vc_decoding_needed(exit_code))
187e759959fSBrijesh Singh 		ret = vc_decode_insn(ctxt);
188e759959fSBrijesh Singh 
189e759959fSBrijesh Singh 	return ret;
190e759959fSBrijesh Singh }
191e759959fSBrijesh Singh 
192e759959fSBrijesh Singh static void vc_finish_insn(struct es_em_ctxt *ctxt)
193e759959fSBrijesh Singh {
194e759959fSBrijesh Singh 	ctxt->regs->ip += ctxt->insn.length;
195e759959fSBrijesh Singh }
196e759959fSBrijesh Singh 
197c688bd5dSBorislav Petkov static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
198e759959fSBrijesh Singh {
199c688bd5dSBorislav Petkov 	u32 ret;
200e759959fSBrijesh Singh 
201c688bd5dSBorislav Petkov 	ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0);
202c688bd5dSBorislav Petkov 	if (!ret)
203c688bd5dSBorislav Petkov 		return ES_OK;
204e759959fSBrijesh Singh 
205c688bd5dSBorislav Petkov 	if (ret == 1) {
206e759959fSBrijesh Singh 		u64 info = ghcb->save.sw_exit_info_2;
2070621210aSColin Ian King 		unsigned long v = info & SVM_EVTINJ_VEC_MASK;
208e759959fSBrijesh Singh 
209e759959fSBrijesh Singh 		/* Check if exception information from hypervisor is sane. */
210e759959fSBrijesh Singh 		if ((info & SVM_EVTINJ_VALID) &&
211e759959fSBrijesh Singh 		    ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) &&
212e759959fSBrijesh Singh 		    ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
213e759959fSBrijesh Singh 			ctxt->fi.vector = v;
214c688bd5dSBorislav Petkov 
215e759959fSBrijesh Singh 			if (info & SVM_EVTINJ_VALID_ERR)
216e759959fSBrijesh Singh 				ctxt->fi.error_code = info >> 32;
217c688bd5dSBorislav Petkov 
218c688bd5dSBorislav Petkov 			return ES_EXCEPTION;
219e759959fSBrijesh Singh 		}
220e759959fSBrijesh Singh 	}
221e759959fSBrijesh Singh 
222c688bd5dSBorislav Petkov 	return ES_VMM_ERROR;
223c688bd5dSBorislav Petkov }
224c688bd5dSBorislav Petkov 
2255bb6c1d1SBorislav Petkov static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
2265bb6c1d1SBorislav Petkov 					  struct es_em_ctxt *ctxt,
2275bb6c1d1SBorislav Petkov 					  u64 exit_code, u64 exit_info_1,
2285bb6c1d1SBorislav Petkov 					  u64 exit_info_2)
229c688bd5dSBorislav Petkov {
230c688bd5dSBorislav Petkov 	/* Fill in protocol and format specifiers */
2312ea29c5aSBrijesh Singh 	ghcb->protocol_version = ghcb_version;
232c688bd5dSBorislav Petkov 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
233c688bd5dSBorislav Petkov 
234c688bd5dSBorislav Petkov 	ghcb_set_sw_exit_code(ghcb, exit_code);
235c688bd5dSBorislav Petkov 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
236c688bd5dSBorislav Petkov 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
237c688bd5dSBorislav Petkov 
238c688bd5dSBorislav Petkov 	sev_es_wr_ghcb_msr(__pa(ghcb));
239c688bd5dSBorislav Petkov 	VMGEXIT();
240c688bd5dSBorislav Petkov 
241c688bd5dSBorislav Petkov 	return verify_exception_info(ghcb, ctxt);
242e759959fSBrijesh Singh }
243e759959fSBrijesh Singh 
244801baa69SMichael Roth static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
245801baa69SMichael Roth {
246801baa69SMichael Roth 	u64 val;
247801baa69SMichael Roth 
248801baa69SMichael Roth 	sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, reg_idx));
249801baa69SMichael Roth 	VMGEXIT();
250801baa69SMichael Roth 	val = sev_es_rd_ghcb_msr();
251801baa69SMichael Roth 	if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
252801baa69SMichael Roth 		return -EIO;
253801baa69SMichael Roth 
254801baa69SMichael Roth 	*reg = (val >> 32);
255801baa69SMichael Roth 
256801baa69SMichael Roth 	return 0;
257801baa69SMichael Roth }
258801baa69SMichael Roth 
259801baa69SMichael Roth static int sev_cpuid_hv(struct cpuid_leaf *leaf)
260801baa69SMichael Roth {
261801baa69SMichael Roth 	int ret;
262801baa69SMichael Roth 
263801baa69SMichael Roth 	/*
264801baa69SMichael Roth 	 * MSR protocol does not support fetching non-zero subfunctions, but is
265801baa69SMichael Roth 	 * sufficient to handle current early-boot cases. Should that change,
266801baa69SMichael Roth 	 * make sure to report an error rather than ignoring the index and
267801baa69SMichael Roth 	 * grabbing random values. If this issue arises in the future, handling
268801baa69SMichael Roth 	 * can be added here to use GHCB-page protocol for cases that occur late
269801baa69SMichael Roth 	 * enough in boot that GHCB page is available.
270801baa69SMichael Roth 	 */
271801baa69SMichael Roth 	if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn)
272801baa69SMichael Roth 		return -EINVAL;
273801baa69SMichael Roth 
274801baa69SMichael Roth 	ret =         __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax);
275801baa69SMichael Roth 	ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx);
276801baa69SMichael Roth 	ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx);
277801baa69SMichael Roth 	ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx);
278801baa69SMichael Roth 
279801baa69SMichael Roth 	return ret;
280801baa69SMichael Roth }
281801baa69SMichael Roth 
282e759959fSBrijesh Singh /*
283ee0bfa08SMichael Roth  * This may be called early while still running on the initial identity
284ee0bfa08SMichael Roth  * mapping. Use RIP-relative addressing to obtain the correct address
285ee0bfa08SMichael Roth  * while running with the initial identity mapping as well as the
286ee0bfa08SMichael Roth  * switch-over to kernel virtual addresses later.
287ee0bfa08SMichael Roth  */
288ee0bfa08SMichael Roth static const struct snp_cpuid_table *snp_cpuid_get_table(void)
289ee0bfa08SMichael Roth {
290ee0bfa08SMichael Roth 	void *ptr;
291ee0bfa08SMichael Roth 
292ee0bfa08SMichael Roth 	asm ("lea cpuid_table_copy(%%rip), %0"
293ee0bfa08SMichael Roth 	     : "=r" (ptr)
294ee0bfa08SMichael Roth 	     : "p" (&cpuid_table_copy));
295ee0bfa08SMichael Roth 
296ee0bfa08SMichael Roth 	return ptr;
297ee0bfa08SMichael Roth }
298ee0bfa08SMichael Roth 
299ee0bfa08SMichael Roth /*
300ee0bfa08SMichael Roth  * The SNP Firmware ABI, Revision 0.9, Section 7.1, details the use of
301ee0bfa08SMichael Roth  * XCR0_IN and XSS_IN to encode multiple versions of 0xD subfunctions 0
302ee0bfa08SMichael Roth  * and 1 based on the corresponding features enabled by a particular
303ee0bfa08SMichael Roth  * combination of XCR0 and XSS registers so that a guest can look up the
304ee0bfa08SMichael Roth  * version corresponding to the features currently enabled in its XCR0/XSS
305ee0bfa08SMichael Roth  * registers. The only values that differ between these versions/table
306ee0bfa08SMichael Roth  * entries is the enabled XSAVE area size advertised via EBX.
307ee0bfa08SMichael Roth  *
308ee0bfa08SMichael Roth  * While hypervisors may choose to make use of this support, it is more
309ee0bfa08SMichael Roth  * robust/secure for a guest to simply find the entry corresponding to the
310ee0bfa08SMichael Roth  * base/legacy XSAVE area size (XCR0=1 or XCR0=3), and then calculate the
311ee0bfa08SMichael Roth  * XSAVE area size using subfunctions 2 through 64, as documented in APM
312ee0bfa08SMichael Roth  * Volume 3, Rev 3.31, Appendix E.3.8, which is what is done here.
313ee0bfa08SMichael Roth  *
314ee0bfa08SMichael Roth  * Since base/legacy XSAVE area size is documented as 0x240, use that value
315ee0bfa08SMichael Roth  * directly rather than relying on the base size in the CPUID table.
316ee0bfa08SMichael Roth  *
317ee0bfa08SMichael Roth  * Return: XSAVE area size on success, 0 otherwise.
318ee0bfa08SMichael Roth  */
319ee0bfa08SMichael Roth static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
320ee0bfa08SMichael Roth {
321ee0bfa08SMichael Roth 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
322ee0bfa08SMichael Roth 	u64 xfeatures_found = 0;
323ee0bfa08SMichael Roth 	u32 xsave_size = 0x240;
324ee0bfa08SMichael Roth 	int i;
325ee0bfa08SMichael Roth 
326ee0bfa08SMichael Roth 	for (i = 0; i < cpuid_table->count; i++) {
327ee0bfa08SMichael Roth 		const struct snp_cpuid_fn *e = &cpuid_table->fn[i];
328ee0bfa08SMichael Roth 
329ee0bfa08SMichael Roth 		if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64))
330ee0bfa08SMichael Roth 			continue;
331ee0bfa08SMichael Roth 		if (!(xfeatures_en & (BIT_ULL(e->ecx_in))))
332ee0bfa08SMichael Roth 			continue;
333ee0bfa08SMichael Roth 		if (xfeatures_found & (BIT_ULL(e->ecx_in)))
334ee0bfa08SMichael Roth 			continue;
335ee0bfa08SMichael Roth 
336ee0bfa08SMichael Roth 		xfeatures_found |= (BIT_ULL(e->ecx_in));
337ee0bfa08SMichael Roth 
338ee0bfa08SMichael Roth 		if (compacted)
339ee0bfa08SMichael Roth 			xsave_size += e->eax;
340ee0bfa08SMichael Roth 		else
341ee0bfa08SMichael Roth 			xsave_size = max(xsave_size, e->eax + e->ebx);
342ee0bfa08SMichael Roth 	}
343ee0bfa08SMichael Roth 
344ee0bfa08SMichael Roth 	/*
345ee0bfa08SMichael Roth 	 * Either the guest set unsupported XCR0/XSS bits, or the corresponding
346ee0bfa08SMichael Roth 	 * entries in the CPUID table were not present. This is not a valid
347ee0bfa08SMichael Roth 	 * state to be in.
348ee0bfa08SMichael Roth 	 */
349ee0bfa08SMichael Roth 	if (xfeatures_found != (xfeatures_en & GENMASK_ULL(63, 2)))
350ee0bfa08SMichael Roth 		return 0;
351ee0bfa08SMichael Roth 
352ee0bfa08SMichael Roth 	return xsave_size;
353ee0bfa08SMichael Roth }
354ee0bfa08SMichael Roth 
355ee0bfa08SMichael Roth static bool
356ee0bfa08SMichael Roth snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
357ee0bfa08SMichael Roth {
358ee0bfa08SMichael Roth 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
359ee0bfa08SMichael Roth 	int i;
360ee0bfa08SMichael Roth 
361ee0bfa08SMichael Roth 	for (i = 0; i < cpuid_table->count; i++) {
362ee0bfa08SMichael Roth 		const struct snp_cpuid_fn *e = &cpuid_table->fn[i];
363ee0bfa08SMichael Roth 
364ee0bfa08SMichael Roth 		if (e->eax_in != leaf->fn)
365ee0bfa08SMichael Roth 			continue;
366ee0bfa08SMichael Roth 
367ee0bfa08SMichael Roth 		if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn)
368ee0bfa08SMichael Roth 			continue;
369ee0bfa08SMichael Roth 
370ee0bfa08SMichael Roth 		/*
371ee0bfa08SMichael Roth 		 * For 0xD subfunctions 0 and 1, only use the entry corresponding
372ee0bfa08SMichael Roth 		 * to the base/legacy XSAVE area size (XCR0=1 or XCR0=3, XSS=0).
373ee0bfa08SMichael Roth 		 * See the comments above snp_cpuid_calc_xsave_size() for more
374ee0bfa08SMichael Roth 		 * details.
375ee0bfa08SMichael Roth 		 */
376ee0bfa08SMichael Roth 		if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1))
377ee0bfa08SMichael Roth 			if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in)
378ee0bfa08SMichael Roth 				continue;
379ee0bfa08SMichael Roth 
380ee0bfa08SMichael Roth 		leaf->eax = e->eax;
381ee0bfa08SMichael Roth 		leaf->ebx = e->ebx;
382ee0bfa08SMichael Roth 		leaf->ecx = e->ecx;
383ee0bfa08SMichael Roth 		leaf->edx = e->edx;
384ee0bfa08SMichael Roth 
385ee0bfa08SMichael Roth 		return true;
386ee0bfa08SMichael Roth 	}
387ee0bfa08SMichael Roth 
388ee0bfa08SMichael Roth 	return false;
389ee0bfa08SMichael Roth }
390ee0bfa08SMichael Roth 
391ee0bfa08SMichael Roth static void snp_cpuid_hv(struct cpuid_leaf *leaf)
392ee0bfa08SMichael Roth {
393ee0bfa08SMichael Roth 	if (sev_cpuid_hv(leaf))
394ee0bfa08SMichael Roth 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
395ee0bfa08SMichael Roth }
396ee0bfa08SMichael Roth 
397ee0bfa08SMichael Roth static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
398ee0bfa08SMichael Roth {
399ee0bfa08SMichael Roth 	struct cpuid_leaf leaf_hv = *leaf;
400ee0bfa08SMichael Roth 
401ee0bfa08SMichael Roth 	switch (leaf->fn) {
402ee0bfa08SMichael Roth 	case 0x1:
403ee0bfa08SMichael Roth 		snp_cpuid_hv(&leaf_hv);
404ee0bfa08SMichael Roth 
405ee0bfa08SMichael Roth 		/* initial APIC ID */
406ee0bfa08SMichael Roth 		leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
407ee0bfa08SMichael Roth 		/* APIC enabled bit */
408ee0bfa08SMichael Roth 		leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9));
409ee0bfa08SMichael Roth 
410ee0bfa08SMichael Roth 		/* OSXSAVE enabled bit */
411ee0bfa08SMichael Roth 		if (native_read_cr4() & X86_CR4_OSXSAVE)
412ee0bfa08SMichael Roth 			leaf->ecx |= BIT(27);
413ee0bfa08SMichael Roth 		break;
414ee0bfa08SMichael Roth 	case 0x7:
415ee0bfa08SMichael Roth 		/* OSPKE enabled bit */
416ee0bfa08SMichael Roth 		leaf->ecx &= ~BIT(4);
417ee0bfa08SMichael Roth 		if (native_read_cr4() & X86_CR4_PKE)
418ee0bfa08SMichael Roth 			leaf->ecx |= BIT(4);
419ee0bfa08SMichael Roth 		break;
420ee0bfa08SMichael Roth 	case 0xB:
421ee0bfa08SMichael Roth 		leaf_hv.subfn = 0;
422ee0bfa08SMichael Roth 		snp_cpuid_hv(&leaf_hv);
423ee0bfa08SMichael Roth 
424ee0bfa08SMichael Roth 		/* extended APIC ID */
425ee0bfa08SMichael Roth 		leaf->edx = leaf_hv.edx;
426ee0bfa08SMichael Roth 		break;
427ee0bfa08SMichael Roth 	case 0xD: {
428ee0bfa08SMichael Roth 		bool compacted = false;
429ee0bfa08SMichael Roth 		u64 xcr0 = 1, xss = 0;
430ee0bfa08SMichael Roth 		u32 xsave_size;
431ee0bfa08SMichael Roth 
432ee0bfa08SMichael Roth 		if (leaf->subfn != 0 && leaf->subfn != 1)
433ee0bfa08SMichael Roth 			return 0;
434ee0bfa08SMichael Roth 
435ee0bfa08SMichael Roth 		if (native_read_cr4() & X86_CR4_OSXSAVE)
436ee0bfa08SMichael Roth 			xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
437ee0bfa08SMichael Roth 		if (leaf->subfn == 1) {
438ee0bfa08SMichael Roth 			/* Get XSS value if XSAVES is enabled. */
439ee0bfa08SMichael Roth 			if (leaf->eax & BIT(3)) {
440ee0bfa08SMichael Roth 				unsigned long lo, hi;
441ee0bfa08SMichael Roth 
442ee0bfa08SMichael Roth 				asm volatile("rdmsr" : "=a" (lo), "=d" (hi)
443ee0bfa08SMichael Roth 						     : "c" (MSR_IA32_XSS));
444ee0bfa08SMichael Roth 				xss = (hi << 32) | lo;
445ee0bfa08SMichael Roth 			}
446ee0bfa08SMichael Roth 
447ee0bfa08SMichael Roth 			/*
448ee0bfa08SMichael Roth 			 * The PPR and APM aren't clear on what size should be
449ee0bfa08SMichael Roth 			 * encoded in 0xD:0x1:EBX when compaction is not enabled
450ee0bfa08SMichael Roth 			 * by either XSAVEC (feature bit 1) or XSAVES (feature
451ee0bfa08SMichael Roth 			 * bit 3) since SNP-capable hardware has these feature
452ee0bfa08SMichael Roth 			 * bits fixed as 1. KVM sets it to 0 in this case, but
453ee0bfa08SMichael Roth 			 * to avoid this becoming an issue it's safer to simply
454ee0bfa08SMichael Roth 			 * treat this as unsupported for SNP guests.
455ee0bfa08SMichael Roth 			 */
456ee0bfa08SMichael Roth 			if (!(leaf->eax & (BIT(1) | BIT(3))))
457ee0bfa08SMichael Roth 				return -EINVAL;
458ee0bfa08SMichael Roth 
459ee0bfa08SMichael Roth 			compacted = true;
460ee0bfa08SMichael Roth 		}
461ee0bfa08SMichael Roth 
462ee0bfa08SMichael Roth 		xsave_size = snp_cpuid_calc_xsave_size(xcr0 | xss, compacted);
463ee0bfa08SMichael Roth 		if (!xsave_size)
464ee0bfa08SMichael Roth 			return -EINVAL;
465ee0bfa08SMichael Roth 
466ee0bfa08SMichael Roth 		leaf->ebx = xsave_size;
467ee0bfa08SMichael Roth 		}
468ee0bfa08SMichael Roth 		break;
469ee0bfa08SMichael Roth 	case 0x8000001E:
470ee0bfa08SMichael Roth 		snp_cpuid_hv(&leaf_hv);
471ee0bfa08SMichael Roth 
472ee0bfa08SMichael Roth 		/* extended APIC ID */
473ee0bfa08SMichael Roth 		leaf->eax = leaf_hv.eax;
474ee0bfa08SMichael Roth 		/* compute ID */
475ee0bfa08SMichael Roth 		leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0));
476ee0bfa08SMichael Roth 		/* node ID */
477ee0bfa08SMichael Roth 		leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0));
478ee0bfa08SMichael Roth 		break;
479ee0bfa08SMichael Roth 	default:
480ee0bfa08SMichael Roth 		/* No fix-ups needed, use values as-is. */
481ee0bfa08SMichael Roth 		break;
482ee0bfa08SMichael Roth 	}
483ee0bfa08SMichael Roth 
484ee0bfa08SMichael Roth 	return 0;
485ee0bfa08SMichael Roth }
486ee0bfa08SMichael Roth 
487ee0bfa08SMichael Roth /*
488ee0bfa08SMichael Roth  * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
489ee0bfa08SMichael Roth  * should be treated as fatal by caller.
490ee0bfa08SMichael Roth  */
491ee0bfa08SMichael Roth static int snp_cpuid(struct cpuid_leaf *leaf)
492ee0bfa08SMichael Roth {
493ee0bfa08SMichael Roth 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
494ee0bfa08SMichael Roth 
495ee0bfa08SMichael Roth 	if (!cpuid_table->count)
496ee0bfa08SMichael Roth 		return -EOPNOTSUPP;
497ee0bfa08SMichael Roth 
498ee0bfa08SMichael Roth 	if (!snp_cpuid_get_validated_func(leaf)) {
499ee0bfa08SMichael Roth 		/*
500ee0bfa08SMichael Roth 		 * Some hypervisors will avoid keeping track of CPUID entries
501ee0bfa08SMichael Roth 		 * where all values are zero, since they can be handled the
502ee0bfa08SMichael Roth 		 * same as out-of-range values (all-zero). This is useful here
503ee0bfa08SMichael Roth 		 * as well as it allows virtually all guest configurations to
504ee0bfa08SMichael Roth 		 * work using a single SNP CPUID table.
505ee0bfa08SMichael Roth 		 *
506ee0bfa08SMichael Roth 		 * To allow for this, there is a need to distinguish between
507ee0bfa08SMichael Roth 		 * out-of-range entries and in-range zero entries, since the
508ee0bfa08SMichael Roth 		 * CPUID table entries are only a template that may need to be
509ee0bfa08SMichael Roth 		 * augmented with additional values for things like
510ee0bfa08SMichael Roth 		 * CPU-specific information during post-processing. So if it's
511ee0bfa08SMichael Roth 		 * not in the table, set the values to zero. Then, if they are
512ee0bfa08SMichael Roth 		 * within a valid CPUID range, proceed with post-processing
513ee0bfa08SMichael Roth 		 * using zeros as the initial values. Otherwise, skip
514ee0bfa08SMichael Roth 		 * post-processing and just return zeros immediately.
515ee0bfa08SMichael Roth 		 */
516ee0bfa08SMichael Roth 		leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
517ee0bfa08SMichael Roth 
518ee0bfa08SMichael Roth 		/* Skip post-processing for out-of-range zero leafs. */
519ee0bfa08SMichael Roth 		if (!(leaf->fn <= cpuid_std_range_max ||
520ee0bfa08SMichael Roth 		      (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
521ee0bfa08SMichael Roth 		      (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
522ee0bfa08SMichael Roth 			return 0;
523ee0bfa08SMichael Roth 	}
524ee0bfa08SMichael Roth 
525ee0bfa08SMichael Roth 	return snp_cpuid_postprocess(leaf);
526ee0bfa08SMichael Roth }
527ee0bfa08SMichael Roth 
528ee0bfa08SMichael Roth /*
529e759959fSBrijesh Singh  * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
530e759959fSBrijesh Singh  * page yet, so it only supports the MSR based communication with the
531e759959fSBrijesh Singh  * hypervisor and only the CPUID exit-code.
532e759959fSBrijesh Singh  */
533e759959fSBrijesh Singh void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
534e759959fSBrijesh Singh {
535801baa69SMichael Roth 	unsigned int subfn = lower_bits(regs->cx, 32);
536e759959fSBrijesh Singh 	unsigned int fn = lower_bits(regs->ax, 32);
537801baa69SMichael Roth 	struct cpuid_leaf leaf;
538ee0bfa08SMichael Roth 	int ret;
539e759959fSBrijesh Singh 
540e759959fSBrijesh Singh 	/* Only CPUID is supported via MSR protocol */
541e759959fSBrijesh Singh 	if (exit_code != SVM_EXIT_CPUID)
542e759959fSBrijesh Singh 		goto fail;
543e759959fSBrijesh Singh 
544801baa69SMichael Roth 	leaf.fn = fn;
545801baa69SMichael Roth 	leaf.subfn = subfn;
546ee0bfa08SMichael Roth 
547ee0bfa08SMichael Roth 	ret = snp_cpuid(&leaf);
548ee0bfa08SMichael Roth 	if (!ret)
549ee0bfa08SMichael Roth 		goto cpuid_done;
550ee0bfa08SMichael Roth 
551ee0bfa08SMichael Roth 	if (ret != -EOPNOTSUPP)
552ee0bfa08SMichael Roth 		goto fail;
553ee0bfa08SMichael Roth 
554801baa69SMichael Roth 	if (sev_cpuid_hv(&leaf))
555e759959fSBrijesh Singh 		goto fail;
556e759959fSBrijesh Singh 
557ee0bfa08SMichael Roth cpuid_done:
558801baa69SMichael Roth 	regs->ax = leaf.eax;
559801baa69SMichael Roth 	regs->bx = leaf.ebx;
560801baa69SMichael Roth 	regs->cx = leaf.ecx;
561801baa69SMichael Roth 	regs->dx = leaf.edx;
562e759959fSBrijesh Singh 
563e759959fSBrijesh Singh 	/*
564e759959fSBrijesh Singh 	 * This is a VC handler and the #VC is only raised when SEV-ES is
565e759959fSBrijesh Singh 	 * active, which means SEV must be active too. Do sanity checks on the
566e759959fSBrijesh Singh 	 * CPUID results to make sure the hypervisor does not trick the kernel
567e759959fSBrijesh Singh 	 * into the no-sev path. This could map sensitive data unencrypted and
568e759959fSBrijesh Singh 	 * make it accessible to the hypervisor.
569e759959fSBrijesh Singh 	 *
570e759959fSBrijesh Singh 	 * In particular, check for:
571e759959fSBrijesh Singh 	 *	- Availability of CPUID leaf 0x8000001f
572e759959fSBrijesh Singh 	 *	- SEV CPUID bit.
573e759959fSBrijesh Singh 	 *
574e759959fSBrijesh Singh 	 * The hypervisor might still report the wrong C-bit position, but this
575e759959fSBrijesh Singh 	 * can't be checked here.
576e759959fSBrijesh Singh 	 */
577e759959fSBrijesh Singh 
578e759959fSBrijesh Singh 	if (fn == 0x80000000 && (regs->ax < 0x8000001f))
579e759959fSBrijesh Singh 		/* SEV leaf check */
580e759959fSBrijesh Singh 		goto fail;
581e759959fSBrijesh Singh 	else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
582e759959fSBrijesh Singh 		/* SEV bit */
583e759959fSBrijesh Singh 		goto fail;
584e759959fSBrijesh Singh 
585e759959fSBrijesh Singh 	/* Skip over the CPUID two-byte opcode */
586e759959fSBrijesh Singh 	regs->ip += 2;
587e759959fSBrijesh Singh 
588e759959fSBrijesh Singh 	return;
589e759959fSBrijesh Singh 
590e759959fSBrijesh Singh fail:
591e759959fSBrijesh Singh 	/* Terminate the guest */
5926c0f74d6SBrijesh Singh 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
593e759959fSBrijesh Singh }
594e759959fSBrijesh Singh 
595e759959fSBrijesh Singh static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
596e759959fSBrijesh Singh 					  void *src, char *buf,
597e759959fSBrijesh Singh 					  unsigned int data_size,
598e759959fSBrijesh Singh 					  unsigned int count,
599e759959fSBrijesh Singh 					  bool backwards)
600e759959fSBrijesh Singh {
601e759959fSBrijesh Singh 	int i, b = backwards ? -1 : 1;
602e759959fSBrijesh Singh 	enum es_result ret = ES_OK;
603e759959fSBrijesh Singh 
604e759959fSBrijesh Singh 	for (i = 0; i < count; i++) {
605e759959fSBrijesh Singh 		void *s = src + (i * data_size * b);
606e759959fSBrijesh Singh 		char *d = buf + (i * data_size);
607e759959fSBrijesh Singh 
608e759959fSBrijesh Singh 		ret = vc_read_mem(ctxt, s, d, data_size);
609e759959fSBrijesh Singh 		if (ret != ES_OK)
610e759959fSBrijesh Singh 			break;
611e759959fSBrijesh Singh 	}
612e759959fSBrijesh Singh 
613e759959fSBrijesh Singh 	return ret;
614e759959fSBrijesh Singh }
615e759959fSBrijesh Singh 
616e759959fSBrijesh Singh static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
617e759959fSBrijesh Singh 					   void *dst, char *buf,
618e759959fSBrijesh Singh 					   unsigned int data_size,
619e759959fSBrijesh Singh 					   unsigned int count,
620e759959fSBrijesh Singh 					   bool backwards)
621e759959fSBrijesh Singh {
622e759959fSBrijesh Singh 	int i, s = backwards ? -1 : 1;
623e759959fSBrijesh Singh 	enum es_result ret = ES_OK;
624e759959fSBrijesh Singh 
625e759959fSBrijesh Singh 	for (i = 0; i < count; i++) {
626e759959fSBrijesh Singh 		void *d = dst + (i * data_size * s);
627e759959fSBrijesh Singh 		char *b = buf + (i * data_size);
628e759959fSBrijesh Singh 
629e759959fSBrijesh Singh 		ret = vc_write_mem(ctxt, d, b, data_size);
630e759959fSBrijesh Singh 		if (ret != ES_OK)
631e759959fSBrijesh Singh 			break;
632e759959fSBrijesh Singh 	}
633e759959fSBrijesh Singh 
634e759959fSBrijesh Singh 	return ret;
635e759959fSBrijesh Singh }
636e759959fSBrijesh Singh 
637e759959fSBrijesh Singh #define IOIO_TYPE_STR  BIT(2)
638e759959fSBrijesh Singh #define IOIO_TYPE_IN   1
639e759959fSBrijesh Singh #define IOIO_TYPE_INS  (IOIO_TYPE_IN | IOIO_TYPE_STR)
640e759959fSBrijesh Singh #define IOIO_TYPE_OUT  0
641e759959fSBrijesh Singh #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
642e759959fSBrijesh Singh 
643e759959fSBrijesh Singh #define IOIO_REP       BIT(3)
644e759959fSBrijesh Singh 
645e759959fSBrijesh Singh #define IOIO_ADDR_64   BIT(9)
646e759959fSBrijesh Singh #define IOIO_ADDR_32   BIT(8)
647e759959fSBrijesh Singh #define IOIO_ADDR_16   BIT(7)
648e759959fSBrijesh Singh 
649e759959fSBrijesh Singh #define IOIO_DATA_32   BIT(6)
650e759959fSBrijesh Singh #define IOIO_DATA_16   BIT(5)
651e759959fSBrijesh Singh #define IOIO_DATA_8    BIT(4)
652e759959fSBrijesh Singh 
653e759959fSBrijesh Singh #define IOIO_SEG_ES    (0 << 10)
654e759959fSBrijesh Singh #define IOIO_SEG_DS    (3 << 10)
655e759959fSBrijesh Singh 
656e759959fSBrijesh Singh static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
657e759959fSBrijesh Singh {
658e759959fSBrijesh Singh 	struct insn *insn = &ctxt->insn;
659*b9cb9c45SJoerg Roedel 	size_t size;
660*b9cb9c45SJoerg Roedel 	u64 port;
661*b9cb9c45SJoerg Roedel 
662e759959fSBrijesh Singh 	*exitinfo = 0;
663e759959fSBrijesh Singh 
664e759959fSBrijesh Singh 	switch (insn->opcode.bytes[0]) {
665e759959fSBrijesh Singh 	/* INS opcodes */
666e759959fSBrijesh Singh 	case 0x6c:
667e759959fSBrijesh Singh 	case 0x6d:
668e759959fSBrijesh Singh 		*exitinfo |= IOIO_TYPE_INS;
669e759959fSBrijesh Singh 		*exitinfo |= IOIO_SEG_ES;
670*b9cb9c45SJoerg Roedel 		port	   = ctxt->regs->dx & 0xffff;
671e759959fSBrijesh Singh 		break;
672e759959fSBrijesh Singh 
673e759959fSBrijesh Singh 	/* OUTS opcodes */
674e759959fSBrijesh Singh 	case 0x6e:
675e759959fSBrijesh Singh 	case 0x6f:
676e759959fSBrijesh Singh 		*exitinfo |= IOIO_TYPE_OUTS;
677e759959fSBrijesh Singh 		*exitinfo |= IOIO_SEG_DS;
678*b9cb9c45SJoerg Roedel 		port	   = ctxt->regs->dx & 0xffff;
679e759959fSBrijesh Singh 		break;
680e759959fSBrijesh Singh 
681e759959fSBrijesh Singh 	/* IN immediate opcodes */
682e759959fSBrijesh Singh 	case 0xe4:
683e759959fSBrijesh Singh 	case 0xe5:
684e759959fSBrijesh Singh 		*exitinfo |= IOIO_TYPE_IN;
685*b9cb9c45SJoerg Roedel 		port	   = (u8)insn->immediate.value & 0xffff;
686e759959fSBrijesh Singh 		break;
687e759959fSBrijesh Singh 
688e759959fSBrijesh Singh 	/* OUT immediate opcodes */
689e759959fSBrijesh Singh 	case 0xe6:
690e759959fSBrijesh Singh 	case 0xe7:
691e759959fSBrijesh Singh 		*exitinfo |= IOIO_TYPE_OUT;
692*b9cb9c45SJoerg Roedel 		port	   = (u8)insn->immediate.value & 0xffff;
693e759959fSBrijesh Singh 		break;
694e759959fSBrijesh Singh 
695e759959fSBrijesh Singh 	/* IN register opcodes */
696e759959fSBrijesh Singh 	case 0xec:
697e759959fSBrijesh Singh 	case 0xed:
698e759959fSBrijesh Singh 		*exitinfo |= IOIO_TYPE_IN;
699*b9cb9c45SJoerg Roedel 		port	   = ctxt->regs->dx & 0xffff;
700e759959fSBrijesh Singh 		break;
701e759959fSBrijesh Singh 
702e759959fSBrijesh Singh 	/* OUT register opcodes */
703e759959fSBrijesh Singh 	case 0xee:
704e759959fSBrijesh Singh 	case 0xef:
705e759959fSBrijesh Singh 		*exitinfo |= IOIO_TYPE_OUT;
706*b9cb9c45SJoerg Roedel 		port	   = ctxt->regs->dx & 0xffff;
707e759959fSBrijesh Singh 		break;
708e759959fSBrijesh Singh 
709e759959fSBrijesh Singh 	default:
710e759959fSBrijesh Singh 		return ES_DECODE_FAILED;
711e759959fSBrijesh Singh 	}
712e759959fSBrijesh Singh 
713*b9cb9c45SJoerg Roedel 	*exitinfo |= port << 16;
714*b9cb9c45SJoerg Roedel 
715e759959fSBrijesh Singh 	switch (insn->opcode.bytes[0]) {
716e759959fSBrijesh Singh 	case 0x6c:
717e759959fSBrijesh Singh 	case 0x6e:
718e759959fSBrijesh Singh 	case 0xe4:
719e759959fSBrijesh Singh 	case 0xe6:
720e759959fSBrijesh Singh 	case 0xec:
721e759959fSBrijesh Singh 	case 0xee:
722e759959fSBrijesh Singh 		/* Single byte opcodes */
723e759959fSBrijesh Singh 		*exitinfo |= IOIO_DATA_8;
724*b9cb9c45SJoerg Roedel 		size       = 1;
725e759959fSBrijesh Singh 		break;
726e759959fSBrijesh Singh 	default:
727e759959fSBrijesh Singh 		/* Length determined by instruction parsing */
728e759959fSBrijesh Singh 		*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
729e759959fSBrijesh Singh 						     : IOIO_DATA_32;
730*b9cb9c45SJoerg Roedel 		size       = (insn->opnd_bytes == 2) ? 2 : 4;
731e759959fSBrijesh Singh 	}
732*b9cb9c45SJoerg Roedel 
733e759959fSBrijesh Singh 	switch (insn->addr_bytes) {
734e759959fSBrijesh Singh 	case 2:
735e759959fSBrijesh Singh 		*exitinfo |= IOIO_ADDR_16;
736e759959fSBrijesh Singh 		break;
737e759959fSBrijesh Singh 	case 4:
738e759959fSBrijesh Singh 		*exitinfo |= IOIO_ADDR_32;
739e759959fSBrijesh Singh 		break;
740e759959fSBrijesh Singh 	case 8:
741e759959fSBrijesh Singh 		*exitinfo |= IOIO_ADDR_64;
742e759959fSBrijesh Singh 		break;
743e759959fSBrijesh Singh 	}
744e759959fSBrijesh Singh 
745e759959fSBrijesh Singh 	if (insn_has_rep_prefix(insn))
746e759959fSBrijesh Singh 		*exitinfo |= IOIO_REP;
747e759959fSBrijesh Singh 
748*b9cb9c45SJoerg Roedel 	return vc_ioio_check(ctxt, (u16)port, size);
749e759959fSBrijesh Singh }
750e759959fSBrijesh Singh 
751e759959fSBrijesh Singh static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
752e759959fSBrijesh Singh {
753e759959fSBrijesh Singh 	struct pt_regs *regs = ctxt->regs;
754e759959fSBrijesh Singh 	u64 exit_info_1, exit_info_2;
755e759959fSBrijesh Singh 	enum es_result ret;
756e759959fSBrijesh Singh 
757e759959fSBrijesh Singh 	ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
758e759959fSBrijesh Singh 	if (ret != ES_OK)
759e759959fSBrijesh Singh 		return ret;
760e759959fSBrijesh Singh 
761e759959fSBrijesh Singh 	if (exit_info_1 & IOIO_TYPE_STR) {
762e759959fSBrijesh Singh 
763e759959fSBrijesh Singh 		/* (REP) INS/OUTS */
764e759959fSBrijesh Singh 
765e759959fSBrijesh Singh 		bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
766e759959fSBrijesh Singh 		unsigned int io_bytes, exit_bytes;
767e759959fSBrijesh Singh 		unsigned int ghcb_count, op_count;
768e759959fSBrijesh Singh 		unsigned long es_base;
769e759959fSBrijesh Singh 		u64 sw_scratch;
770e759959fSBrijesh Singh 
771e759959fSBrijesh Singh 		/*
772e759959fSBrijesh Singh 		 * For the string variants with rep prefix the amount of in/out
773e759959fSBrijesh Singh 		 * operations per #VC exception is limited so that the kernel
774e759959fSBrijesh Singh 		 * has a chance to take interrupts and re-schedule while the
775e759959fSBrijesh Singh 		 * instruction is emulated.
776e759959fSBrijesh Singh 		 */
777e759959fSBrijesh Singh 		io_bytes   = (exit_info_1 >> 4) & 0x7;
778e759959fSBrijesh Singh 		ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
779e759959fSBrijesh Singh 
780e759959fSBrijesh Singh 		op_count    = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
781e759959fSBrijesh Singh 		exit_info_2 = min(op_count, ghcb_count);
782e759959fSBrijesh Singh 		exit_bytes  = exit_info_2 * io_bytes;
783e759959fSBrijesh Singh 
784e759959fSBrijesh Singh 		es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
785e759959fSBrijesh Singh 
786e759959fSBrijesh Singh 		/* Read bytes of OUTS into the shared buffer */
787e759959fSBrijesh Singh 		if (!(exit_info_1 & IOIO_TYPE_IN)) {
788e759959fSBrijesh Singh 			ret = vc_insn_string_read(ctxt,
789e759959fSBrijesh Singh 					       (void *)(es_base + regs->si),
790e759959fSBrijesh Singh 					       ghcb->shared_buffer, io_bytes,
791e759959fSBrijesh Singh 					       exit_info_2, df);
792e759959fSBrijesh Singh 			if (ret)
793e759959fSBrijesh Singh 				return ret;
794e759959fSBrijesh Singh 		}
795e759959fSBrijesh Singh 
796e759959fSBrijesh Singh 		/*
797e759959fSBrijesh Singh 		 * Issue an VMGEXIT to the HV to consume the bytes from the
798e759959fSBrijesh Singh 		 * shared buffer or to have it write them into the shared buffer
799e759959fSBrijesh Singh 		 * depending on the instruction: OUTS or INS.
800e759959fSBrijesh Singh 		 */
801e759959fSBrijesh Singh 		sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
802e759959fSBrijesh Singh 		ghcb_set_sw_scratch(ghcb, sw_scratch);
8035bb6c1d1SBorislav Petkov 		ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
804e759959fSBrijesh Singh 					  exit_info_1, exit_info_2);
805e759959fSBrijesh Singh 		if (ret != ES_OK)
806e759959fSBrijesh Singh 			return ret;
807e759959fSBrijesh Singh 
808e759959fSBrijesh Singh 		/* Read bytes from shared buffer into the guest's destination. */
809e759959fSBrijesh Singh 		if (exit_info_1 & IOIO_TYPE_IN) {
810e759959fSBrijesh Singh 			ret = vc_insn_string_write(ctxt,
811e759959fSBrijesh Singh 						   (void *)(es_base + regs->di),
812e759959fSBrijesh Singh 						   ghcb->shared_buffer, io_bytes,
813e759959fSBrijesh Singh 						   exit_info_2, df);
814e759959fSBrijesh Singh 			if (ret)
815e759959fSBrijesh Singh 				return ret;
816e759959fSBrijesh Singh 
817e759959fSBrijesh Singh 			if (df)
818e759959fSBrijesh Singh 				regs->di -= exit_bytes;
819e759959fSBrijesh Singh 			else
820e759959fSBrijesh Singh 				regs->di += exit_bytes;
821e759959fSBrijesh Singh 		} else {
822e759959fSBrijesh Singh 			if (df)
823e759959fSBrijesh Singh 				regs->si -= exit_bytes;
824e759959fSBrijesh Singh 			else
825e759959fSBrijesh Singh 				regs->si += exit_bytes;
826e759959fSBrijesh Singh 		}
827e759959fSBrijesh Singh 
828e759959fSBrijesh Singh 		if (exit_info_1 & IOIO_REP)
829e759959fSBrijesh Singh 			regs->cx -= exit_info_2;
830e759959fSBrijesh Singh 
831e759959fSBrijesh Singh 		ret = regs->cx ? ES_RETRY : ES_OK;
832e759959fSBrijesh Singh 
833e759959fSBrijesh Singh 	} else {
834e759959fSBrijesh Singh 
835e759959fSBrijesh Singh 		/* IN/OUT into/from rAX */
836e759959fSBrijesh Singh 
837e759959fSBrijesh Singh 		int bits = (exit_info_1 & 0x70) >> 1;
838e759959fSBrijesh Singh 		u64 rax = 0;
839e759959fSBrijesh Singh 
840e759959fSBrijesh Singh 		if (!(exit_info_1 & IOIO_TYPE_IN))
841e759959fSBrijesh Singh 			rax = lower_bits(regs->ax, bits);
842e759959fSBrijesh Singh 
843e759959fSBrijesh Singh 		ghcb_set_rax(ghcb, rax);
844e759959fSBrijesh Singh 
8455bb6c1d1SBorislav Petkov 		ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
846e759959fSBrijesh Singh 		if (ret != ES_OK)
847e759959fSBrijesh Singh 			return ret;
848e759959fSBrijesh Singh 
849e759959fSBrijesh Singh 		if (exit_info_1 & IOIO_TYPE_IN) {
850e759959fSBrijesh Singh 			if (!ghcb_rax_is_valid(ghcb))
851e759959fSBrijesh Singh 				return ES_VMM_ERROR;
852e759959fSBrijesh Singh 			regs->ax = lower_bits(ghcb->save.rax, bits);
853e759959fSBrijesh Singh 		}
854e759959fSBrijesh Singh 	}
855e759959fSBrijesh Singh 
856e759959fSBrijesh Singh 	return ret;
857e759959fSBrijesh Singh }
858e759959fSBrijesh Singh 
859ee0bfa08SMichael Roth static int vc_handle_cpuid_snp(struct pt_regs *regs)
860ee0bfa08SMichael Roth {
861ee0bfa08SMichael Roth 	struct cpuid_leaf leaf;
862ee0bfa08SMichael Roth 	int ret;
863ee0bfa08SMichael Roth 
864ee0bfa08SMichael Roth 	leaf.fn = regs->ax;
865ee0bfa08SMichael Roth 	leaf.subfn = regs->cx;
866ee0bfa08SMichael Roth 	ret = snp_cpuid(&leaf);
867ee0bfa08SMichael Roth 	if (!ret) {
868ee0bfa08SMichael Roth 		regs->ax = leaf.eax;
869ee0bfa08SMichael Roth 		regs->bx = leaf.ebx;
870ee0bfa08SMichael Roth 		regs->cx = leaf.ecx;
871ee0bfa08SMichael Roth 		regs->dx = leaf.edx;
872ee0bfa08SMichael Roth 	}
873ee0bfa08SMichael Roth 
874ee0bfa08SMichael Roth 	return ret;
875ee0bfa08SMichael Roth }
876ee0bfa08SMichael Roth 
877e759959fSBrijesh Singh static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
878e759959fSBrijesh Singh 				      struct es_em_ctxt *ctxt)
879e759959fSBrijesh Singh {
880e759959fSBrijesh Singh 	struct pt_regs *regs = ctxt->regs;
881e759959fSBrijesh Singh 	u32 cr4 = native_read_cr4();
882e759959fSBrijesh Singh 	enum es_result ret;
883ee0bfa08SMichael Roth 	int snp_cpuid_ret;
884ee0bfa08SMichael Roth 
885ee0bfa08SMichael Roth 	snp_cpuid_ret = vc_handle_cpuid_snp(regs);
886ee0bfa08SMichael Roth 	if (!snp_cpuid_ret)
887ee0bfa08SMichael Roth 		return ES_OK;
888ee0bfa08SMichael Roth 	if (snp_cpuid_ret != -EOPNOTSUPP)
889ee0bfa08SMichael Roth 		return ES_VMM_ERROR;
890e759959fSBrijesh Singh 
891e759959fSBrijesh Singh 	ghcb_set_rax(ghcb, regs->ax);
892e759959fSBrijesh Singh 	ghcb_set_rcx(ghcb, regs->cx);
893e759959fSBrijesh Singh 
894e759959fSBrijesh Singh 	if (cr4 & X86_CR4_OSXSAVE)
895e759959fSBrijesh Singh 		/* Safe to read xcr0 */
896e759959fSBrijesh Singh 		ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
897e759959fSBrijesh Singh 	else
898e759959fSBrijesh Singh 		/* xgetbv will cause #GP - use reset value for xcr0 */
899e759959fSBrijesh Singh 		ghcb_set_xcr0(ghcb, 1);
900e759959fSBrijesh Singh 
9015bb6c1d1SBorislav Petkov 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
902e759959fSBrijesh Singh 	if (ret != ES_OK)
903e759959fSBrijesh Singh 		return ret;
904e759959fSBrijesh Singh 
905e759959fSBrijesh Singh 	if (!(ghcb_rax_is_valid(ghcb) &&
906e759959fSBrijesh Singh 	      ghcb_rbx_is_valid(ghcb) &&
907e759959fSBrijesh Singh 	      ghcb_rcx_is_valid(ghcb) &&
908e759959fSBrijesh Singh 	      ghcb_rdx_is_valid(ghcb)))
909e759959fSBrijesh Singh 		return ES_VMM_ERROR;
910e759959fSBrijesh Singh 
911e759959fSBrijesh Singh 	regs->ax = ghcb->save.rax;
912e759959fSBrijesh Singh 	regs->bx = ghcb->save.rbx;
913e759959fSBrijesh Singh 	regs->cx = ghcb->save.rcx;
914e759959fSBrijesh Singh 	regs->dx = ghcb->save.rdx;
915e759959fSBrijesh Singh 
916e759959fSBrijesh Singh 	return ES_OK;
917e759959fSBrijesh Singh }
918e759959fSBrijesh Singh 
919e759959fSBrijesh Singh static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
920e759959fSBrijesh Singh 				      struct es_em_ctxt *ctxt,
921e759959fSBrijesh Singh 				      unsigned long exit_code)
922e759959fSBrijesh Singh {
923e759959fSBrijesh Singh 	bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
924e759959fSBrijesh Singh 	enum es_result ret;
925e759959fSBrijesh Singh 
9265bb6c1d1SBorislav Petkov 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
927e759959fSBrijesh Singh 	if (ret != ES_OK)
928e759959fSBrijesh Singh 		return ret;
929e759959fSBrijesh Singh 
930e759959fSBrijesh Singh 	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
931e759959fSBrijesh Singh 	     (!rdtscp || ghcb_rcx_is_valid(ghcb))))
932e759959fSBrijesh Singh 		return ES_VMM_ERROR;
933e759959fSBrijesh Singh 
934e759959fSBrijesh Singh 	ctxt->regs->ax = ghcb->save.rax;
935e759959fSBrijesh Singh 	ctxt->regs->dx = ghcb->save.rdx;
936e759959fSBrijesh Singh 	if (rdtscp)
937e759959fSBrijesh Singh 		ctxt->regs->cx = ghcb->save.rcx;
938e759959fSBrijesh Singh 
939e759959fSBrijesh Singh 	return ES_OK;
940e759959fSBrijesh Singh }
941b190a043SMichael Roth 
942b190a043SMichael Roth struct cc_setup_data {
943b190a043SMichael Roth 	struct setup_data header;
944b190a043SMichael Roth 	u32 cc_blob_address;
945b190a043SMichael Roth };
946b190a043SMichael Roth 
947b190a043SMichael Roth /*
948b190a043SMichael Roth  * Search for a Confidential Computing blob passed in as a setup_data entry
949b190a043SMichael Roth  * via the Linux Boot Protocol.
950b190a043SMichael Roth  */
951b190a043SMichael Roth static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
952b190a043SMichael Roth {
953b190a043SMichael Roth 	struct cc_setup_data *sd = NULL;
954b190a043SMichael Roth 	struct setup_data *hdr;
955b190a043SMichael Roth 
956b190a043SMichael Roth 	hdr = (struct setup_data *)bp->hdr.setup_data;
957b190a043SMichael Roth 
958b190a043SMichael Roth 	while (hdr) {
959b190a043SMichael Roth 		if (hdr->type == SETUP_CC_BLOB) {
960b190a043SMichael Roth 			sd = (struct cc_setup_data *)hdr;
961b190a043SMichael Roth 			return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address;
962b190a043SMichael Roth 		}
963b190a043SMichael Roth 		hdr = (struct setup_data *)hdr->next;
964b190a043SMichael Roth 	}
965b190a043SMichael Roth 
966b190a043SMichael Roth 	return NULL;
967b190a043SMichael Roth }
96830612045SMichael Roth 
96930612045SMichael Roth /*
97030612045SMichael Roth  * Initialize the kernel's copy of the SNP CPUID table, and set up the
97130612045SMichael Roth  * pointer that will be used to access it.
97230612045SMichael Roth  *
97330612045SMichael Roth  * Maintaining a direct mapping of the SNP CPUID table used by firmware would
97430612045SMichael Roth  * be possible as an alternative, but the approach is brittle since the
97530612045SMichael Roth  * mapping needs to be updated in sync with all the changes to virtual memory
97630612045SMichael Roth  * layout and related mapping facilities throughout the boot process.
97730612045SMichael Roth  */
97830612045SMichael Roth static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
97930612045SMichael Roth {
98030612045SMichael Roth 	const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
98130612045SMichael Roth 	int i;
98230612045SMichael Roth 
98330612045SMichael Roth 	if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE)
98430612045SMichael Roth 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID);
98530612045SMichael Roth 
98630612045SMichael Roth 	cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys;
98730612045SMichael Roth 	if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX)
98830612045SMichael Roth 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID);
98930612045SMichael Roth 
99030612045SMichael Roth 	cpuid_table = snp_cpuid_get_table();
99130612045SMichael Roth 	memcpy((void *)cpuid_table, cpuid_table_fw, sizeof(*cpuid_table));
99230612045SMichael Roth 
99330612045SMichael Roth 	/* Initialize CPUID ranges for range-checking. */
99430612045SMichael Roth 	for (i = 0; i < cpuid_table->count; i++) {
99530612045SMichael Roth 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
99630612045SMichael Roth 
99730612045SMichael Roth 		if (fn->eax_in == 0x0)
99830612045SMichael Roth 			cpuid_std_range_max = fn->eax;
99930612045SMichael Roth 		else if (fn->eax_in == 0x40000000)
100030612045SMichael Roth 			cpuid_hyp_range_max = fn->eax;
100130612045SMichael Roth 		else if (fn->eax_in == 0x80000000)
100230612045SMichael Roth 			cpuid_ext_range_max = fn->eax;
100330612045SMichael Roth 	}
100430612045SMichael Roth }
10056c321179STom Lendacky 
10066c321179STom Lendacky static void pvalidate_pages(struct snp_psc_desc *desc)
10076c321179STom Lendacky {
10086c321179STom Lendacky 	struct psc_entry *e;
10096c321179STom Lendacky 	unsigned long vaddr;
10106c321179STom Lendacky 	unsigned int size;
10116c321179STom Lendacky 	unsigned int i;
10126c321179STom Lendacky 	bool validate;
10136c321179STom Lendacky 	int rc;
10146c321179STom Lendacky 
10156c321179STom Lendacky 	for (i = 0; i <= desc->hdr.end_entry; i++) {
10166c321179STom Lendacky 		e = &desc->entries[i];
10176c321179STom Lendacky 
10186c321179STom Lendacky 		vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
10196c321179STom Lendacky 		size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
10206c321179STom Lendacky 		validate = e->operation == SNP_PAGE_STATE_PRIVATE;
10216c321179STom Lendacky 
10226c321179STom Lendacky 		rc = pvalidate(vaddr, size, validate);
10236c321179STom Lendacky 		if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
10246c321179STom Lendacky 			unsigned long vaddr_end = vaddr + PMD_SIZE;
10256c321179STom Lendacky 
10266c321179STom Lendacky 			for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
10276c321179STom Lendacky 				rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
10286c321179STom Lendacky 				if (rc)
10296c321179STom Lendacky 					break;
10306c321179STom Lendacky 			}
10316c321179STom Lendacky 		}
10326c321179STom Lendacky 
10336c321179STom Lendacky 		if (rc) {
10346c321179STom Lendacky 			WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, rc);
10356c321179STom Lendacky 			sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
10366c321179STom Lendacky 		}
10376c321179STom Lendacky 	}
10386c321179STom Lendacky }
10396c321179STom Lendacky 
10406c321179STom Lendacky static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
10416c321179STom Lendacky {
10426c321179STom Lendacky 	int cur_entry, end_entry, ret = 0;
10436c321179STom Lendacky 	struct snp_psc_desc *data;
10446c321179STom Lendacky 	struct es_em_ctxt ctxt;
10456c321179STom Lendacky 
10466c321179STom Lendacky 	vc_ghcb_invalidate(ghcb);
10476c321179STom Lendacky 
10486c321179STom Lendacky 	/* Copy the input desc into GHCB shared buffer */
10496c321179STom Lendacky 	data = (struct snp_psc_desc *)ghcb->shared_buffer;
10506c321179STom Lendacky 	memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
10516c321179STom Lendacky 
10526c321179STom Lendacky 	/*
10536c321179STom Lendacky 	 * As per the GHCB specification, the hypervisor can resume the guest
10546c321179STom Lendacky 	 * before processing all the entries. Check whether all the entries
10556c321179STom Lendacky 	 * are processed. If not, then keep retrying. Note, the hypervisor
10566c321179STom Lendacky 	 * will update the data memory directly to indicate the status, so
10576c321179STom Lendacky 	 * reference the data->hdr everywhere.
10586c321179STom Lendacky 	 *
10596c321179STom Lendacky 	 * The strategy here is to wait for the hypervisor to change the page
10606c321179STom Lendacky 	 * state in the RMP table before guest accesses the memory pages. If the
10616c321179STom Lendacky 	 * page state change was not successful, then later memory access will
10626c321179STom Lendacky 	 * result in a crash.
10636c321179STom Lendacky 	 */
10646c321179STom Lendacky 	cur_entry = data->hdr.cur_entry;
10656c321179STom Lendacky 	end_entry = data->hdr.end_entry;
10666c321179STom Lendacky 
10676c321179STom Lendacky 	while (data->hdr.cur_entry <= data->hdr.end_entry) {
10686c321179STom Lendacky 		ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
10696c321179STom Lendacky 
10706c321179STom Lendacky 		/* This will advance the shared buffer data points to. */
10716c321179STom Lendacky 		ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
10726c321179STom Lendacky 
10736c321179STom Lendacky 		/*
10746c321179STom Lendacky 		 * Page State Change VMGEXIT can pass error code through
10756c321179STom Lendacky 		 * exit_info_2.
10766c321179STom Lendacky 		 */
10776c321179STom Lendacky 		if (WARN(ret || ghcb->save.sw_exit_info_2,
10786c321179STom Lendacky 			 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
10796c321179STom Lendacky 			 ret, ghcb->save.sw_exit_info_2)) {
10806c321179STom Lendacky 			ret = 1;
10816c321179STom Lendacky 			goto out;
10826c321179STom Lendacky 		}
10836c321179STom Lendacky 
10846c321179STom Lendacky 		/* Verify that reserved bit is not set */
10856c321179STom Lendacky 		if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
10866c321179STom Lendacky 			ret = 1;
10876c321179STom Lendacky 			goto out;
10886c321179STom Lendacky 		}
10896c321179STom Lendacky 
10906c321179STom Lendacky 		/*
10916c321179STom Lendacky 		 * Sanity check that entry processing is not going backwards.
10926c321179STom Lendacky 		 * This will happen only if hypervisor is tricking us.
10936c321179STom Lendacky 		 */
10946c321179STom Lendacky 		if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
10956c321179STom Lendacky "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
10966c321179STom Lendacky 			 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
10976c321179STom Lendacky 			ret = 1;
10986c321179STom Lendacky 			goto out;
10996c321179STom Lendacky 		}
11006c321179STom Lendacky 	}
11016c321179STom Lendacky 
11026c321179STom Lendacky out:
11036c321179STom Lendacky 	return ret;
11046c321179STom Lendacky }
1105