xref: /openbmc/linux/arch/x86/kernel/sev-shared.c (revision 657c45b3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AMD Encrypted Register State Support
4  *
5  * Author: Joerg Roedel <jroedel@suse.de>
6  *
7  * This file is not compiled stand-alone. It contains code shared
8  * between the pre-decompression boot code and the running Linux kernel
9  * and is included directly into both code-bases.
10  */
11 
12 #ifndef __BOOT_COMPRESSED
13 #define error(v)	pr_err(v)
14 #define has_cpuflag(f)	boot_cpu_has(f)
15 #else
16 #undef WARN
17 #define WARN(condition, format...) (!!(condition))
18 #endif
19 
20 /* I/O parameters for CPUID-related helpers */
21 struct cpuid_leaf {
22 	u32 fn;
23 	u32 subfn;
24 	u32 eax;
25 	u32 ebx;
26 	u32 ecx;
27 	u32 edx;
28 };
29 
30 /*
31  * Individual entries of the SNP CPUID table, as defined by the SNP
32  * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
33  */
34 struct snp_cpuid_fn {
35 	u32 eax_in;
36 	u32 ecx_in;
37 	u64 xcr0_in;
38 	u64 xss_in;
39 	u32 eax;
40 	u32 ebx;
41 	u32 ecx;
42 	u32 edx;
43 	u64 __reserved;
44 } __packed;
45 
46 /*
47  * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
48  * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
49  * of 64 entries per CPUID table.
50  */
51 #define SNP_CPUID_COUNT_MAX 64
52 
53 struct snp_cpuid_table {
54 	u32 count;
55 	u32 __reserved1;
56 	u64 __reserved2;
57 	struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX];
58 } __packed;
59 
60 /*
61  * Since feature negotiation related variables are set early in the boot
62  * process they must reside in the .data section so as not to be zeroed
63  * out when the .bss section is later cleared.
64  *
65  * GHCB protocol version negotiated with the hypervisor.
66  */
67 static u16 ghcb_version __ro_after_init;
68 
69 /* Copy of the SNP firmware's CPUID page. */
70 static struct snp_cpuid_table cpuid_table_copy __ro_after_init;
71 
72 /*
73  * These will be initialized based on CPUID table so that non-present
74  * all-zero leaves (for sparse tables) can be differentiated from
75  * invalid/out-of-range leaves. This is needed since all-zero leaves
76  * still need to be post-processed.
77  */
78 static u32 cpuid_std_range_max __ro_after_init;
79 static u32 cpuid_hyp_range_max __ro_after_init;
80 static u32 cpuid_ext_range_max __ro_after_init;
81 
82 static bool __init sev_es_check_cpu_features(void)
83 {
84 	if (!has_cpuflag(X86_FEATURE_RDRAND)) {
85 		error("RDRAND instruction not supported - no trusted source of randomness available\n");
86 		return false;
87 	}
88 
89 	return true;
90 }
91 
92 static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
93 {
94 	u64 val = GHCB_MSR_TERM_REQ;
95 
96 	/* Tell the hypervisor what went wrong. */
97 	val |= GHCB_SEV_TERM_REASON(set, reason);
98 
99 	/* Request Guest Termination from Hypvervisor */
100 	sev_es_wr_ghcb_msr(val);
101 	VMGEXIT();
102 
103 	while (true)
104 		asm volatile("hlt\n" : : : "memory");
105 }
106 
107 /*
108  * The hypervisor features are available from GHCB version 2 onward.
109  */
110 static u64 get_hv_features(void)
111 {
112 	u64 val;
113 
114 	if (ghcb_version < 2)
115 		return 0;
116 
117 	sev_es_wr_ghcb_msr(GHCB_MSR_HV_FT_REQ);
118 	VMGEXIT();
119 
120 	val = sev_es_rd_ghcb_msr();
121 	if (GHCB_RESP_CODE(val) != GHCB_MSR_HV_FT_RESP)
122 		return 0;
123 
124 	return GHCB_MSR_HV_FT_RESP_VAL(val);
125 }
126 
127 static void snp_register_ghcb_early(unsigned long paddr)
128 {
129 	unsigned long pfn = paddr >> PAGE_SHIFT;
130 	u64 val;
131 
132 	sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn));
133 	VMGEXIT();
134 
135 	val = sev_es_rd_ghcb_msr();
136 
137 	/* If the response GPA is not ours then abort the guest */
138 	if ((GHCB_RESP_CODE(val) != GHCB_MSR_REG_GPA_RESP) ||
139 	    (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn))
140 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER);
141 }
142 
143 static bool sev_es_negotiate_protocol(void)
144 {
145 	u64 val;
146 
147 	/* Do the GHCB protocol version negotiation */
148 	sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
149 	VMGEXIT();
150 	val = sev_es_rd_ghcb_msr();
151 
152 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
153 		return false;
154 
155 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
156 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
157 		return false;
158 
159 	ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), GHCB_PROTOCOL_MAX);
160 
161 	return true;
162 }
163 
164 static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
165 {
166 	ghcb->save.sw_exit_code = 0;
167 	__builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
168 }
169 
170 static bool vc_decoding_needed(unsigned long exit_code)
171 {
172 	/* Exceptions don't require to decode the instruction */
173 	return !(exit_code >= SVM_EXIT_EXCP_BASE &&
174 		 exit_code <= SVM_EXIT_LAST_EXCP);
175 }
176 
177 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
178 				      struct pt_regs *regs,
179 				      unsigned long exit_code)
180 {
181 	enum es_result ret = ES_OK;
182 
183 	memset(ctxt, 0, sizeof(*ctxt));
184 	ctxt->regs = regs;
185 
186 	if (vc_decoding_needed(exit_code))
187 		ret = vc_decode_insn(ctxt);
188 
189 	return ret;
190 }
191 
192 static void vc_finish_insn(struct es_em_ctxt *ctxt)
193 {
194 	ctxt->regs->ip += ctxt->insn.length;
195 }
196 
197 static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
198 {
199 	u32 ret;
200 
201 	ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0);
202 	if (!ret)
203 		return ES_OK;
204 
205 	if (ret == 1) {
206 		u64 info = ghcb->save.sw_exit_info_2;
207 		unsigned long v = info & SVM_EVTINJ_VEC_MASK;
208 
209 		/* Check if exception information from hypervisor is sane. */
210 		if ((info & SVM_EVTINJ_VALID) &&
211 		    ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) &&
212 		    ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
213 			ctxt->fi.vector = v;
214 
215 			if (info & SVM_EVTINJ_VALID_ERR)
216 				ctxt->fi.error_code = info >> 32;
217 
218 			return ES_EXCEPTION;
219 		}
220 	}
221 
222 	return ES_VMM_ERROR;
223 }
224 
225 static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
226 					  struct es_em_ctxt *ctxt,
227 					  u64 exit_code, u64 exit_info_1,
228 					  u64 exit_info_2)
229 {
230 	/* Fill in protocol and format specifiers */
231 	ghcb->protocol_version = ghcb_version;
232 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
233 
234 	ghcb_set_sw_exit_code(ghcb, exit_code);
235 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
236 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
237 
238 	sev_es_wr_ghcb_msr(__pa(ghcb));
239 	VMGEXIT();
240 
241 	return verify_exception_info(ghcb, ctxt);
242 }
243 
244 static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
245 {
246 	u64 val;
247 
248 	sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, reg_idx));
249 	VMGEXIT();
250 	val = sev_es_rd_ghcb_msr();
251 	if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
252 		return -EIO;
253 
254 	*reg = (val >> 32);
255 
256 	return 0;
257 }
258 
259 static int sev_cpuid_hv(struct cpuid_leaf *leaf)
260 {
261 	int ret;
262 
263 	/*
264 	 * MSR protocol does not support fetching non-zero subfunctions, but is
265 	 * sufficient to handle current early-boot cases. Should that change,
266 	 * make sure to report an error rather than ignoring the index and
267 	 * grabbing random values. If this issue arises in the future, handling
268 	 * can be added here to use GHCB-page protocol for cases that occur late
269 	 * enough in boot that GHCB page is available.
270 	 */
271 	if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn)
272 		return -EINVAL;
273 
274 	ret =         __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax);
275 	ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx);
276 	ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx);
277 	ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx);
278 
279 	return ret;
280 }
281 
282 /*
283  * This may be called early while still running on the initial identity
284  * mapping. Use RIP-relative addressing to obtain the correct address
285  * while running with the initial identity mapping as well as the
286  * switch-over to kernel virtual addresses later.
287  */
288 static const struct snp_cpuid_table *snp_cpuid_get_table(void)
289 {
290 	void *ptr;
291 
292 	asm ("lea cpuid_table_copy(%%rip), %0"
293 	     : "=r" (ptr)
294 	     : "p" (&cpuid_table_copy));
295 
296 	return ptr;
297 }
298 
299 /*
300  * The SNP Firmware ABI, Revision 0.9, Section 7.1, details the use of
301  * XCR0_IN and XSS_IN to encode multiple versions of 0xD subfunctions 0
302  * and 1 based on the corresponding features enabled by a particular
303  * combination of XCR0 and XSS registers so that a guest can look up the
304  * version corresponding to the features currently enabled in its XCR0/XSS
305  * registers. The only values that differ between these versions/table
306  * entries is the enabled XSAVE area size advertised via EBX.
307  *
308  * While hypervisors may choose to make use of this support, it is more
309  * robust/secure for a guest to simply find the entry corresponding to the
310  * base/legacy XSAVE area size (XCR0=1 or XCR0=3), and then calculate the
311  * XSAVE area size using subfunctions 2 through 64, as documented in APM
312  * Volume 3, Rev 3.31, Appendix E.3.8, which is what is done here.
313  *
314  * Since base/legacy XSAVE area size is documented as 0x240, use that value
315  * directly rather than relying on the base size in the CPUID table.
316  *
317  * Return: XSAVE area size on success, 0 otherwise.
318  */
319 static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
320 {
321 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
322 	u64 xfeatures_found = 0;
323 	u32 xsave_size = 0x240;
324 	int i;
325 
326 	for (i = 0; i < cpuid_table->count; i++) {
327 		const struct snp_cpuid_fn *e = &cpuid_table->fn[i];
328 
329 		if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64))
330 			continue;
331 		if (!(xfeatures_en & (BIT_ULL(e->ecx_in))))
332 			continue;
333 		if (xfeatures_found & (BIT_ULL(e->ecx_in)))
334 			continue;
335 
336 		xfeatures_found |= (BIT_ULL(e->ecx_in));
337 
338 		if (compacted)
339 			xsave_size += e->eax;
340 		else
341 			xsave_size = max(xsave_size, e->eax + e->ebx);
342 	}
343 
344 	/*
345 	 * Either the guest set unsupported XCR0/XSS bits, or the corresponding
346 	 * entries in the CPUID table were not present. This is not a valid
347 	 * state to be in.
348 	 */
349 	if (xfeatures_found != (xfeatures_en & GENMASK_ULL(63, 2)))
350 		return 0;
351 
352 	return xsave_size;
353 }
354 
355 static bool
356 snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
357 {
358 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
359 	int i;
360 
361 	for (i = 0; i < cpuid_table->count; i++) {
362 		const struct snp_cpuid_fn *e = &cpuid_table->fn[i];
363 
364 		if (e->eax_in != leaf->fn)
365 			continue;
366 
367 		if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn)
368 			continue;
369 
370 		/*
371 		 * For 0xD subfunctions 0 and 1, only use the entry corresponding
372 		 * to the base/legacy XSAVE area size (XCR0=1 or XCR0=3, XSS=0).
373 		 * See the comments above snp_cpuid_calc_xsave_size() for more
374 		 * details.
375 		 */
376 		if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1))
377 			if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in)
378 				continue;
379 
380 		leaf->eax = e->eax;
381 		leaf->ebx = e->ebx;
382 		leaf->ecx = e->ecx;
383 		leaf->edx = e->edx;
384 
385 		return true;
386 	}
387 
388 	return false;
389 }
390 
391 static void snp_cpuid_hv(struct cpuid_leaf *leaf)
392 {
393 	if (sev_cpuid_hv(leaf))
394 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
395 }
396 
397 static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
398 {
399 	struct cpuid_leaf leaf_hv = *leaf;
400 
401 	switch (leaf->fn) {
402 	case 0x1:
403 		snp_cpuid_hv(&leaf_hv);
404 
405 		/* initial APIC ID */
406 		leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
407 		/* APIC enabled bit */
408 		leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9));
409 
410 		/* OSXSAVE enabled bit */
411 		if (native_read_cr4() & X86_CR4_OSXSAVE)
412 			leaf->ecx |= BIT(27);
413 		break;
414 	case 0x7:
415 		/* OSPKE enabled bit */
416 		leaf->ecx &= ~BIT(4);
417 		if (native_read_cr4() & X86_CR4_PKE)
418 			leaf->ecx |= BIT(4);
419 		break;
420 	case 0xB:
421 		leaf_hv.subfn = 0;
422 		snp_cpuid_hv(&leaf_hv);
423 
424 		/* extended APIC ID */
425 		leaf->edx = leaf_hv.edx;
426 		break;
427 	case 0xD: {
428 		bool compacted = false;
429 		u64 xcr0 = 1, xss = 0;
430 		u32 xsave_size;
431 
432 		if (leaf->subfn != 0 && leaf->subfn != 1)
433 			return 0;
434 
435 		if (native_read_cr4() & X86_CR4_OSXSAVE)
436 			xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
437 		if (leaf->subfn == 1) {
438 			/* Get XSS value if XSAVES is enabled. */
439 			if (leaf->eax & BIT(3)) {
440 				unsigned long lo, hi;
441 
442 				asm volatile("rdmsr" : "=a" (lo), "=d" (hi)
443 						     : "c" (MSR_IA32_XSS));
444 				xss = (hi << 32) | lo;
445 			}
446 
447 			/*
448 			 * The PPR and APM aren't clear on what size should be
449 			 * encoded in 0xD:0x1:EBX when compaction is not enabled
450 			 * by either XSAVEC (feature bit 1) or XSAVES (feature
451 			 * bit 3) since SNP-capable hardware has these feature
452 			 * bits fixed as 1. KVM sets it to 0 in this case, but
453 			 * to avoid this becoming an issue it's safer to simply
454 			 * treat this as unsupported for SNP guests.
455 			 */
456 			if (!(leaf->eax & (BIT(1) | BIT(3))))
457 				return -EINVAL;
458 
459 			compacted = true;
460 		}
461 
462 		xsave_size = snp_cpuid_calc_xsave_size(xcr0 | xss, compacted);
463 		if (!xsave_size)
464 			return -EINVAL;
465 
466 		leaf->ebx = xsave_size;
467 		}
468 		break;
469 	case 0x8000001E:
470 		snp_cpuid_hv(&leaf_hv);
471 
472 		/* extended APIC ID */
473 		leaf->eax = leaf_hv.eax;
474 		/* compute ID */
475 		leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0));
476 		/* node ID */
477 		leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0));
478 		break;
479 	default:
480 		/* No fix-ups needed, use values as-is. */
481 		break;
482 	}
483 
484 	return 0;
485 }
486 
487 /*
488  * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
489  * should be treated as fatal by caller.
490  */
491 static int snp_cpuid(struct cpuid_leaf *leaf)
492 {
493 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
494 
495 	if (!cpuid_table->count)
496 		return -EOPNOTSUPP;
497 
498 	if (!snp_cpuid_get_validated_func(leaf)) {
499 		/*
500 		 * Some hypervisors will avoid keeping track of CPUID entries
501 		 * where all values are zero, since they can be handled the
502 		 * same as out-of-range values (all-zero). This is useful here
503 		 * as well as it allows virtually all guest configurations to
504 		 * work using a single SNP CPUID table.
505 		 *
506 		 * To allow for this, there is a need to distinguish between
507 		 * out-of-range entries and in-range zero entries, since the
508 		 * CPUID table entries are only a template that may need to be
509 		 * augmented with additional values for things like
510 		 * CPU-specific information during post-processing. So if it's
511 		 * not in the table, set the values to zero. Then, if they are
512 		 * within a valid CPUID range, proceed with post-processing
513 		 * using zeros as the initial values. Otherwise, skip
514 		 * post-processing and just return zeros immediately.
515 		 */
516 		leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
517 
518 		/* Skip post-processing for out-of-range zero leafs. */
519 		if (!(leaf->fn <= cpuid_std_range_max ||
520 		      (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
521 		      (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
522 			return 0;
523 	}
524 
525 	return snp_cpuid_postprocess(leaf);
526 }
527 
528 /*
529  * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
530  * page yet, so it only supports the MSR based communication with the
531  * hypervisor and only the CPUID exit-code.
532  */
533 void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
534 {
535 	unsigned int subfn = lower_bits(regs->cx, 32);
536 	unsigned int fn = lower_bits(regs->ax, 32);
537 	struct cpuid_leaf leaf;
538 	int ret;
539 
540 	/* Only CPUID is supported via MSR protocol */
541 	if (exit_code != SVM_EXIT_CPUID)
542 		goto fail;
543 
544 	leaf.fn = fn;
545 	leaf.subfn = subfn;
546 
547 	ret = snp_cpuid(&leaf);
548 	if (!ret)
549 		goto cpuid_done;
550 
551 	if (ret != -EOPNOTSUPP)
552 		goto fail;
553 
554 	if (sev_cpuid_hv(&leaf))
555 		goto fail;
556 
557 cpuid_done:
558 	regs->ax = leaf.eax;
559 	regs->bx = leaf.ebx;
560 	regs->cx = leaf.ecx;
561 	regs->dx = leaf.edx;
562 
563 	/*
564 	 * This is a VC handler and the #VC is only raised when SEV-ES is
565 	 * active, which means SEV must be active too. Do sanity checks on the
566 	 * CPUID results to make sure the hypervisor does not trick the kernel
567 	 * into the no-sev path. This could map sensitive data unencrypted and
568 	 * make it accessible to the hypervisor.
569 	 *
570 	 * In particular, check for:
571 	 *	- Availability of CPUID leaf 0x8000001f
572 	 *	- SEV CPUID bit.
573 	 *
574 	 * The hypervisor might still report the wrong C-bit position, but this
575 	 * can't be checked here.
576 	 */
577 
578 	if (fn == 0x80000000 && (regs->ax < 0x8000001f))
579 		/* SEV leaf check */
580 		goto fail;
581 	else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
582 		/* SEV bit */
583 		goto fail;
584 
585 	/* Skip over the CPUID two-byte opcode */
586 	regs->ip += 2;
587 
588 	return;
589 
590 fail:
591 	/* Terminate the guest */
592 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
593 }
594 
595 static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
596 					  void *src, char *buf,
597 					  unsigned int data_size,
598 					  unsigned int count,
599 					  bool backwards)
600 {
601 	int i, b = backwards ? -1 : 1;
602 	enum es_result ret = ES_OK;
603 
604 	for (i = 0; i < count; i++) {
605 		void *s = src + (i * data_size * b);
606 		char *d = buf + (i * data_size);
607 
608 		ret = vc_read_mem(ctxt, s, d, data_size);
609 		if (ret != ES_OK)
610 			break;
611 	}
612 
613 	return ret;
614 }
615 
616 static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
617 					   void *dst, char *buf,
618 					   unsigned int data_size,
619 					   unsigned int count,
620 					   bool backwards)
621 {
622 	int i, s = backwards ? -1 : 1;
623 	enum es_result ret = ES_OK;
624 
625 	for (i = 0; i < count; i++) {
626 		void *d = dst + (i * data_size * s);
627 		char *b = buf + (i * data_size);
628 
629 		ret = vc_write_mem(ctxt, d, b, data_size);
630 		if (ret != ES_OK)
631 			break;
632 	}
633 
634 	return ret;
635 }
636 
637 #define IOIO_TYPE_STR  BIT(2)
638 #define IOIO_TYPE_IN   1
639 #define IOIO_TYPE_INS  (IOIO_TYPE_IN | IOIO_TYPE_STR)
640 #define IOIO_TYPE_OUT  0
641 #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
642 
643 #define IOIO_REP       BIT(3)
644 
645 #define IOIO_ADDR_64   BIT(9)
646 #define IOIO_ADDR_32   BIT(8)
647 #define IOIO_ADDR_16   BIT(7)
648 
649 #define IOIO_DATA_32   BIT(6)
650 #define IOIO_DATA_16   BIT(5)
651 #define IOIO_DATA_8    BIT(4)
652 
653 #define IOIO_SEG_ES    (0 << 10)
654 #define IOIO_SEG_DS    (3 << 10)
655 
656 static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
657 {
658 	struct insn *insn = &ctxt->insn;
659 	*exitinfo = 0;
660 
661 	switch (insn->opcode.bytes[0]) {
662 	/* INS opcodes */
663 	case 0x6c:
664 	case 0x6d:
665 		*exitinfo |= IOIO_TYPE_INS;
666 		*exitinfo |= IOIO_SEG_ES;
667 		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
668 		break;
669 
670 	/* OUTS opcodes */
671 	case 0x6e:
672 	case 0x6f:
673 		*exitinfo |= IOIO_TYPE_OUTS;
674 		*exitinfo |= IOIO_SEG_DS;
675 		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
676 		break;
677 
678 	/* IN immediate opcodes */
679 	case 0xe4:
680 	case 0xe5:
681 		*exitinfo |= IOIO_TYPE_IN;
682 		*exitinfo |= (u8)insn->immediate.value << 16;
683 		break;
684 
685 	/* OUT immediate opcodes */
686 	case 0xe6:
687 	case 0xe7:
688 		*exitinfo |= IOIO_TYPE_OUT;
689 		*exitinfo |= (u8)insn->immediate.value << 16;
690 		break;
691 
692 	/* IN register opcodes */
693 	case 0xec:
694 	case 0xed:
695 		*exitinfo |= IOIO_TYPE_IN;
696 		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
697 		break;
698 
699 	/* OUT register opcodes */
700 	case 0xee:
701 	case 0xef:
702 		*exitinfo |= IOIO_TYPE_OUT;
703 		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
704 		break;
705 
706 	default:
707 		return ES_DECODE_FAILED;
708 	}
709 
710 	switch (insn->opcode.bytes[0]) {
711 	case 0x6c:
712 	case 0x6e:
713 	case 0xe4:
714 	case 0xe6:
715 	case 0xec:
716 	case 0xee:
717 		/* Single byte opcodes */
718 		*exitinfo |= IOIO_DATA_8;
719 		break;
720 	default:
721 		/* Length determined by instruction parsing */
722 		*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
723 						     : IOIO_DATA_32;
724 	}
725 	switch (insn->addr_bytes) {
726 	case 2:
727 		*exitinfo |= IOIO_ADDR_16;
728 		break;
729 	case 4:
730 		*exitinfo |= IOIO_ADDR_32;
731 		break;
732 	case 8:
733 		*exitinfo |= IOIO_ADDR_64;
734 		break;
735 	}
736 
737 	if (insn_has_rep_prefix(insn))
738 		*exitinfo |= IOIO_REP;
739 
740 	return ES_OK;
741 }
742 
743 static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
744 {
745 	struct pt_regs *regs = ctxt->regs;
746 	u64 exit_info_1, exit_info_2;
747 	enum es_result ret;
748 
749 	ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
750 	if (ret != ES_OK)
751 		return ret;
752 
753 	if (exit_info_1 & IOIO_TYPE_STR) {
754 
755 		/* (REP) INS/OUTS */
756 
757 		bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
758 		unsigned int io_bytes, exit_bytes;
759 		unsigned int ghcb_count, op_count;
760 		unsigned long es_base;
761 		u64 sw_scratch;
762 
763 		/*
764 		 * For the string variants with rep prefix the amount of in/out
765 		 * operations per #VC exception is limited so that the kernel
766 		 * has a chance to take interrupts and re-schedule while the
767 		 * instruction is emulated.
768 		 */
769 		io_bytes   = (exit_info_1 >> 4) & 0x7;
770 		ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
771 
772 		op_count    = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
773 		exit_info_2 = min(op_count, ghcb_count);
774 		exit_bytes  = exit_info_2 * io_bytes;
775 
776 		es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
777 
778 		/* Read bytes of OUTS into the shared buffer */
779 		if (!(exit_info_1 & IOIO_TYPE_IN)) {
780 			ret = vc_insn_string_read(ctxt,
781 					       (void *)(es_base + regs->si),
782 					       ghcb->shared_buffer, io_bytes,
783 					       exit_info_2, df);
784 			if (ret)
785 				return ret;
786 		}
787 
788 		/*
789 		 * Issue an VMGEXIT to the HV to consume the bytes from the
790 		 * shared buffer or to have it write them into the shared buffer
791 		 * depending on the instruction: OUTS or INS.
792 		 */
793 		sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
794 		ghcb_set_sw_scratch(ghcb, sw_scratch);
795 		ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
796 					  exit_info_1, exit_info_2);
797 		if (ret != ES_OK)
798 			return ret;
799 
800 		/* Read bytes from shared buffer into the guest's destination. */
801 		if (exit_info_1 & IOIO_TYPE_IN) {
802 			ret = vc_insn_string_write(ctxt,
803 						   (void *)(es_base + regs->di),
804 						   ghcb->shared_buffer, io_bytes,
805 						   exit_info_2, df);
806 			if (ret)
807 				return ret;
808 
809 			if (df)
810 				regs->di -= exit_bytes;
811 			else
812 				regs->di += exit_bytes;
813 		} else {
814 			if (df)
815 				regs->si -= exit_bytes;
816 			else
817 				regs->si += exit_bytes;
818 		}
819 
820 		if (exit_info_1 & IOIO_REP)
821 			regs->cx -= exit_info_2;
822 
823 		ret = regs->cx ? ES_RETRY : ES_OK;
824 
825 	} else {
826 
827 		/* IN/OUT into/from rAX */
828 
829 		int bits = (exit_info_1 & 0x70) >> 1;
830 		u64 rax = 0;
831 
832 		if (!(exit_info_1 & IOIO_TYPE_IN))
833 			rax = lower_bits(regs->ax, bits);
834 
835 		ghcb_set_rax(ghcb, rax);
836 
837 		ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
838 		if (ret != ES_OK)
839 			return ret;
840 
841 		if (exit_info_1 & IOIO_TYPE_IN) {
842 			if (!ghcb_rax_is_valid(ghcb))
843 				return ES_VMM_ERROR;
844 			regs->ax = lower_bits(ghcb->save.rax, bits);
845 		}
846 	}
847 
848 	return ret;
849 }
850 
851 static int vc_handle_cpuid_snp(struct pt_regs *regs)
852 {
853 	struct cpuid_leaf leaf;
854 	int ret;
855 
856 	leaf.fn = regs->ax;
857 	leaf.subfn = regs->cx;
858 	ret = snp_cpuid(&leaf);
859 	if (!ret) {
860 		regs->ax = leaf.eax;
861 		regs->bx = leaf.ebx;
862 		regs->cx = leaf.ecx;
863 		regs->dx = leaf.edx;
864 	}
865 
866 	return ret;
867 }
868 
869 static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
870 				      struct es_em_ctxt *ctxt)
871 {
872 	struct pt_regs *regs = ctxt->regs;
873 	u32 cr4 = native_read_cr4();
874 	enum es_result ret;
875 	int snp_cpuid_ret;
876 
877 	snp_cpuid_ret = vc_handle_cpuid_snp(regs);
878 	if (!snp_cpuid_ret)
879 		return ES_OK;
880 	if (snp_cpuid_ret != -EOPNOTSUPP)
881 		return ES_VMM_ERROR;
882 
883 	ghcb_set_rax(ghcb, regs->ax);
884 	ghcb_set_rcx(ghcb, regs->cx);
885 
886 	if (cr4 & X86_CR4_OSXSAVE)
887 		/* Safe to read xcr0 */
888 		ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
889 	else
890 		/* xgetbv will cause #GP - use reset value for xcr0 */
891 		ghcb_set_xcr0(ghcb, 1);
892 
893 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
894 	if (ret != ES_OK)
895 		return ret;
896 
897 	if (!(ghcb_rax_is_valid(ghcb) &&
898 	      ghcb_rbx_is_valid(ghcb) &&
899 	      ghcb_rcx_is_valid(ghcb) &&
900 	      ghcb_rdx_is_valid(ghcb)))
901 		return ES_VMM_ERROR;
902 
903 	regs->ax = ghcb->save.rax;
904 	regs->bx = ghcb->save.rbx;
905 	regs->cx = ghcb->save.rcx;
906 	regs->dx = ghcb->save.rdx;
907 
908 	return ES_OK;
909 }
910 
911 static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
912 				      struct es_em_ctxt *ctxt,
913 				      unsigned long exit_code)
914 {
915 	bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
916 	enum es_result ret;
917 
918 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
919 	if (ret != ES_OK)
920 		return ret;
921 
922 	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
923 	     (!rdtscp || ghcb_rcx_is_valid(ghcb))))
924 		return ES_VMM_ERROR;
925 
926 	ctxt->regs->ax = ghcb->save.rax;
927 	ctxt->regs->dx = ghcb->save.rdx;
928 	if (rdtscp)
929 		ctxt->regs->cx = ghcb->save.rcx;
930 
931 	return ES_OK;
932 }
933 
934 struct cc_setup_data {
935 	struct setup_data header;
936 	u32 cc_blob_address;
937 };
938 
939 /*
940  * Search for a Confidential Computing blob passed in as a setup_data entry
941  * via the Linux Boot Protocol.
942  */
943 static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
944 {
945 	struct cc_setup_data *sd = NULL;
946 	struct setup_data *hdr;
947 
948 	hdr = (struct setup_data *)bp->hdr.setup_data;
949 
950 	while (hdr) {
951 		if (hdr->type == SETUP_CC_BLOB) {
952 			sd = (struct cc_setup_data *)hdr;
953 			return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address;
954 		}
955 		hdr = (struct setup_data *)hdr->next;
956 	}
957 
958 	return NULL;
959 }
960 
961 /*
962  * Initialize the kernel's copy of the SNP CPUID table, and set up the
963  * pointer that will be used to access it.
964  *
965  * Maintaining a direct mapping of the SNP CPUID table used by firmware would
966  * be possible as an alternative, but the approach is brittle since the
967  * mapping needs to be updated in sync with all the changes to virtual memory
968  * layout and related mapping facilities throughout the boot process.
969  */
970 static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
971 {
972 	const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
973 	int i;
974 
975 	if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE)
976 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID);
977 
978 	cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys;
979 	if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX)
980 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID);
981 
982 	cpuid_table = snp_cpuid_get_table();
983 	memcpy((void *)cpuid_table, cpuid_table_fw, sizeof(*cpuid_table));
984 
985 	/* Initialize CPUID ranges for range-checking. */
986 	for (i = 0; i < cpuid_table->count; i++) {
987 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
988 
989 		if (fn->eax_in == 0x0)
990 			cpuid_std_range_max = fn->eax;
991 		else if (fn->eax_in == 0x40000000)
992 			cpuid_hyp_range_max = fn->eax;
993 		else if (fn->eax_in == 0x80000000)
994 			cpuid_ext_range_max = fn->eax;
995 	}
996 }
997 
998 static void pvalidate_pages(struct snp_psc_desc *desc)
999 {
1000 	struct psc_entry *e;
1001 	unsigned long vaddr;
1002 	unsigned int size;
1003 	unsigned int i;
1004 	bool validate;
1005 	int rc;
1006 
1007 	for (i = 0; i <= desc->hdr.end_entry; i++) {
1008 		e = &desc->entries[i];
1009 
1010 		vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
1011 		size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
1012 		validate = e->operation == SNP_PAGE_STATE_PRIVATE;
1013 
1014 		rc = pvalidate(vaddr, size, validate);
1015 		if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
1016 			unsigned long vaddr_end = vaddr + PMD_SIZE;
1017 
1018 			for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
1019 				rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
1020 				if (rc)
1021 					break;
1022 			}
1023 		}
1024 
1025 		if (rc) {
1026 			WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, rc);
1027 			sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
1028 		}
1029 	}
1030 }
1031 
1032 static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
1033 {
1034 	int cur_entry, end_entry, ret = 0;
1035 	struct snp_psc_desc *data;
1036 	struct es_em_ctxt ctxt;
1037 
1038 	vc_ghcb_invalidate(ghcb);
1039 
1040 	/* Copy the input desc into GHCB shared buffer */
1041 	data = (struct snp_psc_desc *)ghcb->shared_buffer;
1042 	memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
1043 
1044 	/*
1045 	 * As per the GHCB specification, the hypervisor can resume the guest
1046 	 * before processing all the entries. Check whether all the entries
1047 	 * are processed. If not, then keep retrying. Note, the hypervisor
1048 	 * will update the data memory directly to indicate the status, so
1049 	 * reference the data->hdr everywhere.
1050 	 *
1051 	 * The strategy here is to wait for the hypervisor to change the page
1052 	 * state in the RMP table before guest accesses the memory pages. If the
1053 	 * page state change was not successful, then later memory access will
1054 	 * result in a crash.
1055 	 */
1056 	cur_entry = data->hdr.cur_entry;
1057 	end_entry = data->hdr.end_entry;
1058 
1059 	while (data->hdr.cur_entry <= data->hdr.end_entry) {
1060 		ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
1061 
1062 		/* This will advance the shared buffer data points to. */
1063 		ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
1064 
1065 		/*
1066 		 * Page State Change VMGEXIT can pass error code through
1067 		 * exit_info_2.
1068 		 */
1069 		if (WARN(ret || ghcb->save.sw_exit_info_2,
1070 			 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
1071 			 ret, ghcb->save.sw_exit_info_2)) {
1072 			ret = 1;
1073 			goto out;
1074 		}
1075 
1076 		/* Verify that reserved bit is not set */
1077 		if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
1078 			ret = 1;
1079 			goto out;
1080 		}
1081 
1082 		/*
1083 		 * Sanity check that entry processing is not going backwards.
1084 		 * This will happen only if hypervisor is tricking us.
1085 		 */
1086 		if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
1087 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
1088 			 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
1089 			ret = 1;
1090 			goto out;
1091 		}
1092 	}
1093 
1094 out:
1095 	return ret;
1096 }
1097