xref: /openbmc/linux/arch/x86/kernel/sev.c (revision 52451502)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Memory Encryption Support
4  *
5  * Copyright (C) 2019 SUSE
6  *
7  * Author: Joerg Roedel <jroedel@suse.de>
8  */
9 
10 #define pr_fmt(fmt)	"SEV: " fmt
11 
12 #include <linux/sched/debug.h>	/* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/cc_platform.h>
15 #include <linux/printk.h>
16 #include <linux/mm_types.h>
17 #include <linux/set_memory.h>
18 #include <linux/memblock.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/efi.h>
23 #include <linux/platform_device.h>
24 #include <linux/io.h>
25 #include <linux/psp-sev.h>
26 #include <uapi/linux/sev-guest.h>
27 
28 #include <asm/cpu_entry_area.h>
29 #include <asm/stacktrace.h>
30 #include <asm/sev.h>
31 #include <asm/insn-eval.h>
32 #include <asm/fpu/xcr.h>
33 #include <asm/processor.h>
34 #include <asm/realmode.h>
35 #include <asm/setup.h>
36 #include <asm/traps.h>
37 #include <asm/svm.h>
38 #include <asm/smp.h>
39 #include <asm/cpu.h>
40 #include <asm/apic.h>
41 #include <asm/cpuid.h>
42 #include <asm/cmdline.h>
43 
44 #define DR7_RESET_VALUE        0x400
45 
46 /* AP INIT values as documented in the APM2  section "Processor Initialization State" */
47 #define AP_INIT_CS_LIMIT		0xffff
48 #define AP_INIT_DS_LIMIT		0xffff
49 #define AP_INIT_LDTR_LIMIT		0xffff
50 #define AP_INIT_GDTR_LIMIT		0xffff
51 #define AP_INIT_IDTR_LIMIT		0xffff
52 #define AP_INIT_TR_LIMIT		0xffff
53 #define AP_INIT_RFLAGS_DEFAULT		0x2
54 #define AP_INIT_DR6_DEFAULT		0xffff0ff0
55 #define AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
56 #define AP_INIT_XCR0_DEFAULT		0x1
57 #define AP_INIT_X87_FTW_DEFAULT		0x5555
58 #define AP_INIT_X87_FCW_DEFAULT		0x0040
59 #define AP_INIT_CR0_DEFAULT		0x60000010
60 #define AP_INIT_MXCSR_DEFAULT		0x1f80
61 
62 /* For early boot hypervisor communication in SEV-ES enabled guests */
63 static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
64 
65 /*
66  * Needs to be in the .data section because we need it NULL before bss is
67  * cleared
68  */
69 static struct ghcb *boot_ghcb __section(".data");
70 
71 /* Bitmap of SEV features supported by the hypervisor */
72 static u64 sev_hv_features __ro_after_init;
73 
74 /* #VC handler runtime per-CPU data */
75 struct sev_es_runtime_data {
76 	struct ghcb ghcb_page;
77 
78 	/*
79 	 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
80 	 * It is needed when an NMI happens while the #VC handler uses the real
81 	 * GHCB, and the NMI handler itself is causing another #VC exception. In
82 	 * that case the GHCB content of the first handler needs to be backed up
83 	 * and restored.
84 	 */
85 	struct ghcb backup_ghcb;
86 
87 	/*
88 	 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
89 	 * There is no need for it to be atomic, because nothing is written to
90 	 * the GHCB between the read and the write of ghcb_active. So it is safe
91 	 * to use it when a nested #VC exception happens before the write.
92 	 *
93 	 * This is necessary for example in the #VC->NMI->#VC case when the NMI
94 	 * happens while the first #VC handler uses the GHCB. When the NMI code
95 	 * raises a second #VC handler it might overwrite the contents of the
96 	 * GHCB written by the first handler. To avoid this the content of the
97 	 * GHCB is saved and restored when the GHCB is detected to be in use
98 	 * already.
99 	 */
100 	bool ghcb_active;
101 	bool backup_ghcb_active;
102 
103 	/*
104 	 * Cached DR7 value - write it on DR7 writes and return it on reads.
105 	 * That value will never make it to the real hardware DR7 as debugging
106 	 * is currently unsupported in SEV-ES guests.
107 	 */
108 	unsigned long dr7;
109 };
110 
111 struct ghcb_state {
112 	struct ghcb *ghcb;
113 };
114 
115 static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
116 static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
117 
118 struct sev_config {
119 	__u64 debug		: 1,
120 
121 	      /*
122 	       * A flag used by __set_pages_state() that indicates when the
123 	       * per-CPU GHCB has been created and registered and thus can be
124 	       * used by the BSP instead of the early boot GHCB.
125 	       *
126 	       * For APs, the per-CPU GHCB is created before they are started
127 	       * and registered upon startup, so this flag can be used globally
128 	       * for the BSP and APs.
129 	       */
130 	      ghcbs_initialized	: 1,
131 
132 	      __reserved	: 62;
133 };
134 
135 static struct sev_config sev_cfg __read_mostly;
136 
137 static __always_inline bool on_vc_stack(struct pt_regs *regs)
138 {
139 	unsigned long sp = regs->sp;
140 
141 	/* User-mode RSP is not trusted */
142 	if (user_mode(regs))
143 		return false;
144 
145 	/* SYSCALL gap still has user-mode RSP */
146 	if (ip_within_syscall_gap(regs))
147 		return false;
148 
149 	return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
150 }
151 
152 /*
153  * This function handles the case when an NMI is raised in the #VC
154  * exception handler entry code, before the #VC handler has switched off
155  * its IST stack. In this case, the IST entry for #VC must be adjusted,
156  * so that any nested #VC exception will not overwrite the stack
157  * contents of the interrupted #VC handler.
158  *
159  * The IST entry is adjusted unconditionally so that it can be also be
160  * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
161  * nested sev_es_ist_exit() call may adjust back the IST entry too
162  * early.
163  *
164  * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
165  * on the NMI IST stack, as they are only called from NMI handling code
166  * right now.
167  */
168 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
169 {
170 	unsigned long old_ist, new_ist;
171 
172 	/* Read old IST entry */
173 	new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
174 
175 	/*
176 	 * If NMI happened while on the #VC IST stack, set the new IST
177 	 * value below regs->sp, so that the interrupted stack frame is
178 	 * not overwritten by subsequent #VC exceptions.
179 	 */
180 	if (on_vc_stack(regs))
181 		new_ist = regs->sp;
182 
183 	/*
184 	 * Reserve additional 8 bytes and store old IST value so this
185 	 * adjustment can be unrolled in __sev_es_ist_exit().
186 	 */
187 	new_ist -= sizeof(old_ist);
188 	*(unsigned long *)new_ist = old_ist;
189 
190 	/* Set new IST entry */
191 	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
192 }
193 
194 void noinstr __sev_es_ist_exit(void)
195 {
196 	unsigned long ist;
197 
198 	/* Read IST entry */
199 	ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
200 
201 	if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
202 		return;
203 
204 	/* Read back old IST entry and write it to the TSS */
205 	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
206 }
207 
208 /*
209  * Nothing shall interrupt this code path while holding the per-CPU
210  * GHCB. The backup GHCB is only for NMIs interrupting this path.
211  *
212  * Callers must disable local interrupts around it.
213  */
214 static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
215 {
216 	struct sev_es_runtime_data *data;
217 	struct ghcb *ghcb;
218 
219 	WARN_ON(!irqs_disabled());
220 
221 	data = this_cpu_read(runtime_data);
222 	ghcb = &data->ghcb_page;
223 
224 	if (unlikely(data->ghcb_active)) {
225 		/* GHCB is already in use - save its contents */
226 
227 		if (unlikely(data->backup_ghcb_active)) {
228 			/*
229 			 * Backup-GHCB is also already in use. There is no way
230 			 * to continue here so just kill the machine. To make
231 			 * panic() work, mark GHCBs inactive so that messages
232 			 * can be printed out.
233 			 */
234 			data->ghcb_active        = false;
235 			data->backup_ghcb_active = false;
236 
237 			instrumentation_begin();
238 			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
239 			instrumentation_end();
240 		}
241 
242 		/* Mark backup_ghcb active before writing to it */
243 		data->backup_ghcb_active = true;
244 
245 		state->ghcb = &data->backup_ghcb;
246 
247 		/* Backup GHCB content */
248 		*state->ghcb = *ghcb;
249 	} else {
250 		state->ghcb = NULL;
251 		data->ghcb_active = true;
252 	}
253 
254 	return ghcb;
255 }
256 
257 static inline u64 sev_es_rd_ghcb_msr(void)
258 {
259 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
260 }
261 
262 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
263 {
264 	u32 low, high;
265 
266 	low  = (u32)(val);
267 	high = (u32)(val >> 32);
268 
269 	native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
270 }
271 
272 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
273 				unsigned char *buffer)
274 {
275 	return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
276 }
277 
278 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
279 {
280 	char buffer[MAX_INSN_SIZE];
281 	int insn_bytes;
282 
283 	insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
284 	if (insn_bytes == 0) {
285 		/* Nothing could be copied */
286 		ctxt->fi.vector     = X86_TRAP_PF;
287 		ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
288 		ctxt->fi.cr2        = ctxt->regs->ip;
289 		return ES_EXCEPTION;
290 	} else if (insn_bytes == -EINVAL) {
291 		/* Effective RIP could not be calculated */
292 		ctxt->fi.vector     = X86_TRAP_GP;
293 		ctxt->fi.error_code = 0;
294 		ctxt->fi.cr2        = 0;
295 		return ES_EXCEPTION;
296 	}
297 
298 	if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
299 		return ES_DECODE_FAILED;
300 
301 	if (ctxt->insn.immediate.got)
302 		return ES_OK;
303 	else
304 		return ES_DECODE_FAILED;
305 }
306 
307 static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
308 {
309 	char buffer[MAX_INSN_SIZE];
310 	int res, ret;
311 
312 	res = vc_fetch_insn_kernel(ctxt, buffer);
313 	if (res) {
314 		ctxt->fi.vector     = X86_TRAP_PF;
315 		ctxt->fi.error_code = X86_PF_INSTR;
316 		ctxt->fi.cr2        = ctxt->regs->ip;
317 		return ES_EXCEPTION;
318 	}
319 
320 	ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
321 	if (ret < 0)
322 		return ES_DECODE_FAILED;
323 	else
324 		return ES_OK;
325 }
326 
327 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
328 {
329 	if (user_mode(ctxt->regs))
330 		return __vc_decode_user_insn(ctxt);
331 	else
332 		return __vc_decode_kern_insn(ctxt);
333 }
334 
335 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
336 				   char *dst, char *buf, size_t size)
337 {
338 	unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
339 
340 	/*
341 	 * This function uses __put_user() independent of whether kernel or user
342 	 * memory is accessed. This works fine because __put_user() does no
343 	 * sanity checks of the pointer being accessed. All that it does is
344 	 * to report when the access failed.
345 	 *
346 	 * Also, this function runs in atomic context, so __put_user() is not
347 	 * allowed to sleep. The page-fault handler detects that it is running
348 	 * in atomic context and will not try to take mmap_sem and handle the
349 	 * fault, so additional pagefault_enable()/disable() calls are not
350 	 * needed.
351 	 *
352 	 * The access can't be done via copy_to_user() here because
353 	 * vc_write_mem() must not use string instructions to access unsafe
354 	 * memory. The reason is that MOVS is emulated by the #VC handler by
355 	 * splitting the move up into a read and a write and taking a nested #VC
356 	 * exception on whatever of them is the MMIO access. Using string
357 	 * instructions here would cause infinite nesting.
358 	 */
359 	switch (size) {
360 	case 1: {
361 		u8 d1;
362 		u8 __user *target = (u8 __user *)dst;
363 
364 		memcpy(&d1, buf, 1);
365 		if (__put_user(d1, target))
366 			goto fault;
367 		break;
368 	}
369 	case 2: {
370 		u16 d2;
371 		u16 __user *target = (u16 __user *)dst;
372 
373 		memcpy(&d2, buf, 2);
374 		if (__put_user(d2, target))
375 			goto fault;
376 		break;
377 	}
378 	case 4: {
379 		u32 d4;
380 		u32 __user *target = (u32 __user *)dst;
381 
382 		memcpy(&d4, buf, 4);
383 		if (__put_user(d4, target))
384 			goto fault;
385 		break;
386 	}
387 	case 8: {
388 		u64 d8;
389 		u64 __user *target = (u64 __user *)dst;
390 
391 		memcpy(&d8, buf, 8);
392 		if (__put_user(d8, target))
393 			goto fault;
394 		break;
395 	}
396 	default:
397 		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
398 		return ES_UNSUPPORTED;
399 	}
400 
401 	return ES_OK;
402 
403 fault:
404 	if (user_mode(ctxt->regs))
405 		error_code |= X86_PF_USER;
406 
407 	ctxt->fi.vector = X86_TRAP_PF;
408 	ctxt->fi.error_code = error_code;
409 	ctxt->fi.cr2 = (unsigned long)dst;
410 
411 	return ES_EXCEPTION;
412 }
413 
414 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
415 				  char *src, char *buf, size_t size)
416 {
417 	unsigned long error_code = X86_PF_PROT;
418 
419 	/*
420 	 * This function uses __get_user() independent of whether kernel or user
421 	 * memory is accessed. This works fine because __get_user() does no
422 	 * sanity checks of the pointer being accessed. All that it does is
423 	 * to report when the access failed.
424 	 *
425 	 * Also, this function runs in atomic context, so __get_user() is not
426 	 * allowed to sleep. The page-fault handler detects that it is running
427 	 * in atomic context and will not try to take mmap_sem and handle the
428 	 * fault, so additional pagefault_enable()/disable() calls are not
429 	 * needed.
430 	 *
431 	 * The access can't be done via copy_from_user() here because
432 	 * vc_read_mem() must not use string instructions to access unsafe
433 	 * memory. The reason is that MOVS is emulated by the #VC handler by
434 	 * splitting the move up into a read and a write and taking a nested #VC
435 	 * exception on whatever of them is the MMIO access. Using string
436 	 * instructions here would cause infinite nesting.
437 	 */
438 	switch (size) {
439 	case 1: {
440 		u8 d1;
441 		u8 __user *s = (u8 __user *)src;
442 
443 		if (__get_user(d1, s))
444 			goto fault;
445 		memcpy(buf, &d1, 1);
446 		break;
447 	}
448 	case 2: {
449 		u16 d2;
450 		u16 __user *s = (u16 __user *)src;
451 
452 		if (__get_user(d2, s))
453 			goto fault;
454 		memcpy(buf, &d2, 2);
455 		break;
456 	}
457 	case 4: {
458 		u32 d4;
459 		u32 __user *s = (u32 __user *)src;
460 
461 		if (__get_user(d4, s))
462 			goto fault;
463 		memcpy(buf, &d4, 4);
464 		break;
465 	}
466 	case 8: {
467 		u64 d8;
468 		u64 __user *s = (u64 __user *)src;
469 		if (__get_user(d8, s))
470 			goto fault;
471 		memcpy(buf, &d8, 8);
472 		break;
473 	}
474 	default:
475 		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
476 		return ES_UNSUPPORTED;
477 	}
478 
479 	return ES_OK;
480 
481 fault:
482 	if (user_mode(ctxt->regs))
483 		error_code |= X86_PF_USER;
484 
485 	ctxt->fi.vector = X86_TRAP_PF;
486 	ctxt->fi.error_code = error_code;
487 	ctxt->fi.cr2 = (unsigned long)src;
488 
489 	return ES_EXCEPTION;
490 }
491 
492 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
493 					   unsigned long vaddr, phys_addr_t *paddr)
494 {
495 	unsigned long va = (unsigned long)vaddr;
496 	unsigned int level;
497 	phys_addr_t pa;
498 	pgd_t *pgd;
499 	pte_t *pte;
500 
501 	pgd = __va(read_cr3_pa());
502 	pgd = &pgd[pgd_index(va)];
503 	pte = lookup_address_in_pgd(pgd, va, &level);
504 	if (!pte) {
505 		ctxt->fi.vector     = X86_TRAP_PF;
506 		ctxt->fi.cr2        = vaddr;
507 		ctxt->fi.error_code = 0;
508 
509 		if (user_mode(ctxt->regs))
510 			ctxt->fi.error_code |= X86_PF_USER;
511 
512 		return ES_EXCEPTION;
513 	}
514 
515 	if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
516 		/* Emulated MMIO to/from encrypted memory not supported */
517 		return ES_UNSUPPORTED;
518 
519 	pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
520 	pa |= va & ~page_level_mask(level);
521 
522 	*paddr = pa;
523 
524 	return ES_OK;
525 }
526 
527 /* Include code shared with pre-decompression boot stage */
528 #include "sev-shared.c"
529 
530 static noinstr void __sev_put_ghcb(struct ghcb_state *state)
531 {
532 	struct sev_es_runtime_data *data;
533 	struct ghcb *ghcb;
534 
535 	WARN_ON(!irqs_disabled());
536 
537 	data = this_cpu_read(runtime_data);
538 	ghcb = &data->ghcb_page;
539 
540 	if (state->ghcb) {
541 		/* Restore GHCB from Backup */
542 		*ghcb = *state->ghcb;
543 		data->backup_ghcb_active = false;
544 		state->ghcb = NULL;
545 	} else {
546 		/*
547 		 * Invalidate the GHCB so a VMGEXIT instruction issued
548 		 * from userspace won't appear to be valid.
549 		 */
550 		vc_ghcb_invalidate(ghcb);
551 		data->ghcb_active = false;
552 	}
553 }
554 
555 void noinstr __sev_es_nmi_complete(void)
556 {
557 	struct ghcb_state state;
558 	struct ghcb *ghcb;
559 
560 	ghcb = __sev_get_ghcb(&state);
561 
562 	vc_ghcb_invalidate(ghcb);
563 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
564 	ghcb_set_sw_exit_info_1(ghcb, 0);
565 	ghcb_set_sw_exit_info_2(ghcb, 0);
566 
567 	sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
568 	VMGEXIT();
569 
570 	__sev_put_ghcb(&state);
571 }
572 
573 static u64 __init get_secrets_page(void)
574 {
575 	u64 pa_data = boot_params.cc_blob_address;
576 	struct cc_blob_sev_info info;
577 	void *map;
578 
579 	/*
580 	 * The CC blob contains the address of the secrets page, check if the
581 	 * blob is present.
582 	 */
583 	if (!pa_data)
584 		return 0;
585 
586 	map = early_memremap(pa_data, sizeof(info));
587 	if (!map) {
588 		pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
589 		return 0;
590 	}
591 	memcpy(&info, map, sizeof(info));
592 	early_memunmap(map, sizeof(info));
593 
594 	/* smoke-test the secrets page passed */
595 	if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
596 		return 0;
597 
598 	return info.secrets_phys;
599 }
600 
601 static u64 __init get_snp_jump_table_addr(void)
602 {
603 	struct snp_secrets_page_layout *layout;
604 	void __iomem *mem;
605 	u64 pa, addr;
606 
607 	pa = get_secrets_page();
608 	if (!pa)
609 		return 0;
610 
611 	mem = ioremap_encrypted(pa, PAGE_SIZE);
612 	if (!mem) {
613 		pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
614 		return 0;
615 	}
616 
617 	layout = (__force struct snp_secrets_page_layout *)mem;
618 
619 	addr = layout->os_area.ap_jump_table_pa;
620 	iounmap(mem);
621 
622 	return addr;
623 }
624 
625 static u64 __init get_jump_table_addr(void)
626 {
627 	struct ghcb_state state;
628 	unsigned long flags;
629 	struct ghcb *ghcb;
630 	u64 ret = 0;
631 
632 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
633 		return get_snp_jump_table_addr();
634 
635 	local_irq_save(flags);
636 
637 	ghcb = __sev_get_ghcb(&state);
638 
639 	vc_ghcb_invalidate(ghcb);
640 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
641 	ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
642 	ghcb_set_sw_exit_info_2(ghcb, 0);
643 
644 	sev_es_wr_ghcb_msr(__pa(ghcb));
645 	VMGEXIT();
646 
647 	if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
648 	    ghcb_sw_exit_info_2_is_valid(ghcb))
649 		ret = ghcb->save.sw_exit_info_2;
650 
651 	__sev_put_ghcb(&state);
652 
653 	local_irq_restore(flags);
654 
655 	return ret;
656 }
657 
658 static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
659 				  unsigned long npages, enum psc_op op)
660 {
661 	unsigned long paddr_end;
662 	u64 val;
663 	int ret;
664 
665 	vaddr = vaddr & PAGE_MASK;
666 
667 	paddr = paddr & PAGE_MASK;
668 	paddr_end = paddr + (npages << PAGE_SHIFT);
669 
670 	while (paddr < paddr_end) {
671 		if (op == SNP_PAGE_STATE_SHARED) {
672 			/* Page validation must be rescinded before changing to shared */
673 			ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false);
674 			if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
675 				goto e_term;
676 		}
677 
678 		/*
679 		 * Use the MSR protocol because this function can be called before
680 		 * the GHCB is established.
681 		 */
682 		sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
683 		VMGEXIT();
684 
685 		val = sev_es_rd_ghcb_msr();
686 
687 		if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
688 			 "Wrong PSC response code: 0x%x\n",
689 			 (unsigned int)GHCB_RESP_CODE(val)))
690 			goto e_term;
691 
692 		if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
693 			 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
694 			 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
695 			 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
696 			goto e_term;
697 
698 		if (op == SNP_PAGE_STATE_PRIVATE) {
699 			/* Page validation must be performed after changing to private */
700 			ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true);
701 			if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
702 				goto e_term;
703 		}
704 
705 		vaddr += PAGE_SIZE;
706 		paddr += PAGE_SIZE;
707 	}
708 
709 	return;
710 
711 e_term:
712 	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
713 }
714 
715 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
716 					 unsigned long npages)
717 {
718 	/*
719 	 * This can be invoked in early boot while running identity mapped, so
720 	 * use an open coded check for SNP instead of using cc_platform_has().
721 	 * This eliminates worries about jump tables or checking boot_cpu_data
722 	 * in the cc_platform_has() function.
723 	 */
724 	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
725 		return;
726 
727 	 /*
728 	  * Ask the hypervisor to mark the memory pages as private in the RMP
729 	  * table.
730 	  */
731 	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
732 }
733 
734 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
735 					unsigned long npages)
736 {
737 	/*
738 	 * This can be invoked in early boot while running identity mapped, so
739 	 * use an open coded check for SNP instead of using cc_platform_has().
740 	 * This eliminates worries about jump tables or checking boot_cpu_data
741 	 * in the cc_platform_has() function.
742 	 */
743 	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
744 		return;
745 
746 	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
747 	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
748 }
749 
750 void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
751 {
752 	unsigned long vaddr, npages;
753 
754 	vaddr = (unsigned long)__va(paddr);
755 	npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
756 
757 	if (op == SNP_PAGE_STATE_PRIVATE)
758 		early_snp_set_memory_private(vaddr, paddr, npages);
759 	else if (op == SNP_PAGE_STATE_SHARED)
760 		early_snp_set_memory_shared(vaddr, paddr, npages);
761 	else
762 		WARN(1, "invalid memory op %d\n", op);
763 }
764 
765 static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
766 				       unsigned long vaddr_end, int op)
767 {
768 	struct ghcb_state state;
769 	bool use_large_entry;
770 	struct psc_hdr *hdr;
771 	struct psc_entry *e;
772 	unsigned long flags;
773 	unsigned long pfn;
774 	struct ghcb *ghcb;
775 	int i;
776 
777 	hdr = &data->hdr;
778 	e = data->entries;
779 
780 	memset(data, 0, sizeof(*data));
781 	i = 0;
782 
783 	while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) {
784 		hdr->end_entry = i;
785 
786 		if (is_vmalloc_addr((void *)vaddr)) {
787 			pfn = vmalloc_to_pfn((void *)vaddr);
788 			use_large_entry = false;
789 		} else {
790 			pfn = __pa(vaddr) >> PAGE_SHIFT;
791 			use_large_entry = true;
792 		}
793 
794 		e->gfn = pfn;
795 		e->operation = op;
796 
797 		if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
798 		    (vaddr_end - vaddr) >= PMD_SIZE) {
799 			e->pagesize = RMP_PG_SIZE_2M;
800 			vaddr += PMD_SIZE;
801 		} else {
802 			e->pagesize = RMP_PG_SIZE_4K;
803 			vaddr += PAGE_SIZE;
804 		}
805 
806 		e++;
807 		i++;
808 	}
809 
810 	/* Page validation must be rescinded before changing to shared */
811 	if (op == SNP_PAGE_STATE_SHARED)
812 		pvalidate_pages(data);
813 
814 	local_irq_save(flags);
815 
816 	if (sev_cfg.ghcbs_initialized)
817 		ghcb = __sev_get_ghcb(&state);
818 	else
819 		ghcb = boot_ghcb;
820 
821 	/* Invoke the hypervisor to perform the page state changes */
822 	if (!ghcb || vmgexit_psc(ghcb, data))
823 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
824 
825 	if (sev_cfg.ghcbs_initialized)
826 		__sev_put_ghcb(&state);
827 
828 	local_irq_restore(flags);
829 
830 	/* Page validation must be performed after changing to private */
831 	if (op == SNP_PAGE_STATE_PRIVATE)
832 		pvalidate_pages(data);
833 
834 	return vaddr;
835 }
836 
837 static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
838 {
839 	struct snp_psc_desc desc;
840 	unsigned long vaddr_end;
841 
842 	/* Use the MSR protocol when a GHCB is not available. */
843 	if (!boot_ghcb)
844 		return early_set_pages_state(vaddr, __pa(vaddr), npages, op);
845 
846 	vaddr = vaddr & PAGE_MASK;
847 	vaddr_end = vaddr + (npages << PAGE_SHIFT);
848 
849 	while (vaddr < vaddr_end)
850 		vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
851 }
852 
853 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
854 {
855 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
856 		return;
857 
858 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
859 }
860 
861 void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
862 {
863 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
864 		return;
865 
866 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
867 }
868 
869 void snp_accept_memory(phys_addr_t start, phys_addr_t end)
870 {
871 	unsigned long vaddr, npages;
872 
873 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
874 		return;
875 
876 	vaddr = (unsigned long)__va(start);
877 	npages = (end - start) >> PAGE_SHIFT;
878 
879 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
880 }
881 
882 static int snp_set_vmsa(void *va, bool vmsa)
883 {
884 	u64 attrs;
885 
886 	/*
887 	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
888 	 * using the RMPADJUST instruction. However, for the instruction to
889 	 * succeed it must target the permissions of a lesser privileged
890 	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
891 	 * instruction in the AMD64 APM Volume 3).
892 	 */
893 	attrs = 1;
894 	if (vmsa)
895 		attrs |= RMPADJUST_VMSA_PAGE_BIT;
896 
897 	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
898 }
899 
900 #define __ATTR_BASE		(SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
901 #define INIT_CS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
902 #define INIT_DS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
903 
904 #define INIT_LDTR_ATTRIBS	(SVM_SELECTOR_P_MASK | 2)
905 #define INIT_TR_ATTRIBS		(SVM_SELECTOR_P_MASK | 3)
906 
907 static void *snp_alloc_vmsa_page(void)
908 {
909 	struct page *p;
910 
911 	/*
912 	 * Allocate VMSA page to work around the SNP erratum where the CPU will
913 	 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
914 	 * collides with the RMP entry of VMSA page. The recommended workaround
915 	 * is to not use a large page.
916 	 *
917 	 * Allocate an 8k page which is also 8k-aligned.
918 	 */
919 	p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
920 	if (!p)
921 		return NULL;
922 
923 	split_page(p, 1);
924 
925 	/* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
926 	__free_page(p);
927 
928 	return page_address(p + 1);
929 }
930 
931 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
932 {
933 	int err;
934 
935 	err = snp_set_vmsa(vmsa, false);
936 	if (err)
937 		pr_err("clear VMSA page failed (%u), leaking page\n", err);
938 	else
939 		free_page((unsigned long)vmsa);
940 }
941 
942 static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
943 {
944 	struct sev_es_save_area *cur_vmsa, *vmsa;
945 	struct ghcb_state state;
946 	unsigned long flags;
947 	struct ghcb *ghcb;
948 	u8 sipi_vector;
949 	int cpu, ret;
950 	u64 cr4;
951 
952 	/*
953 	 * The hypervisor SNP feature support check has happened earlier, just check
954 	 * the AP_CREATION one here.
955 	 */
956 	if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
957 		return -EOPNOTSUPP;
958 
959 	/*
960 	 * Verify the desired start IP against the known trampoline start IP
961 	 * to catch any future new trampolines that may be introduced that
962 	 * would require a new protected guest entry point.
963 	 */
964 	if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
965 		      "Unsupported SNP start_ip: %lx\n", start_ip))
966 		return -EINVAL;
967 
968 	/* Override start_ip with known protected guest start IP */
969 	start_ip = real_mode_header->sev_es_trampoline_start;
970 
971 	/* Find the logical CPU for the APIC ID */
972 	for_each_present_cpu(cpu) {
973 		if (arch_match_cpu_phys_id(cpu, apic_id))
974 			break;
975 	}
976 	if (cpu >= nr_cpu_ids)
977 		return -EINVAL;
978 
979 	cur_vmsa = per_cpu(sev_vmsa, cpu);
980 
981 	/*
982 	 * A new VMSA is created each time because there is no guarantee that
983 	 * the current VMSA is the kernels or that the vCPU is not running. If
984 	 * an attempt was done to use the current VMSA with a running vCPU, a
985 	 * #VMEXIT of that vCPU would wipe out all of the settings being done
986 	 * here.
987 	 */
988 	vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
989 	if (!vmsa)
990 		return -ENOMEM;
991 
992 	/* CR4 should maintain the MCE value */
993 	cr4 = native_read_cr4() & X86_CR4_MCE;
994 
995 	/* Set the CS value based on the start_ip converted to a SIPI vector */
996 	sipi_vector		= (start_ip >> 12);
997 	vmsa->cs.base		= sipi_vector << 12;
998 	vmsa->cs.limit		= AP_INIT_CS_LIMIT;
999 	vmsa->cs.attrib		= INIT_CS_ATTRIBS;
1000 	vmsa->cs.selector	= sipi_vector << 8;
1001 
1002 	/* Set the RIP value based on start_ip */
1003 	vmsa->rip		= start_ip & 0xfff;
1004 
1005 	/* Set AP INIT defaults as documented in the APM */
1006 	vmsa->ds.limit		= AP_INIT_DS_LIMIT;
1007 	vmsa->ds.attrib		= INIT_DS_ATTRIBS;
1008 	vmsa->es		= vmsa->ds;
1009 	vmsa->fs		= vmsa->ds;
1010 	vmsa->gs		= vmsa->ds;
1011 	vmsa->ss		= vmsa->ds;
1012 
1013 	vmsa->gdtr.limit	= AP_INIT_GDTR_LIMIT;
1014 	vmsa->ldtr.limit	= AP_INIT_LDTR_LIMIT;
1015 	vmsa->ldtr.attrib	= INIT_LDTR_ATTRIBS;
1016 	vmsa->idtr.limit	= AP_INIT_IDTR_LIMIT;
1017 	vmsa->tr.limit		= AP_INIT_TR_LIMIT;
1018 	vmsa->tr.attrib		= INIT_TR_ATTRIBS;
1019 
1020 	vmsa->cr4		= cr4;
1021 	vmsa->cr0		= AP_INIT_CR0_DEFAULT;
1022 	vmsa->dr7		= DR7_RESET_VALUE;
1023 	vmsa->dr6		= AP_INIT_DR6_DEFAULT;
1024 	vmsa->rflags		= AP_INIT_RFLAGS_DEFAULT;
1025 	vmsa->g_pat		= AP_INIT_GPAT_DEFAULT;
1026 	vmsa->xcr0		= AP_INIT_XCR0_DEFAULT;
1027 	vmsa->mxcsr		= AP_INIT_MXCSR_DEFAULT;
1028 	vmsa->x87_ftw		= AP_INIT_X87_FTW_DEFAULT;
1029 	vmsa->x87_fcw		= AP_INIT_X87_FCW_DEFAULT;
1030 
1031 	/* SVME must be set. */
1032 	vmsa->efer		= EFER_SVME;
1033 
1034 	/*
1035 	 * Set the SNP-specific fields for this VMSA:
1036 	 *   VMPL level
1037 	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
1038 	 */
1039 	vmsa->vmpl		= 0;
1040 	vmsa->sev_features	= sev_status >> 2;
1041 
1042 	/* Switch the page over to a VMSA page now that it is initialized */
1043 	ret = snp_set_vmsa(vmsa, true);
1044 	if (ret) {
1045 		pr_err("set VMSA page failed (%u)\n", ret);
1046 		free_page((unsigned long)vmsa);
1047 
1048 		return -EINVAL;
1049 	}
1050 
1051 	/* Issue VMGEXIT AP Creation NAE event */
1052 	local_irq_save(flags);
1053 
1054 	ghcb = __sev_get_ghcb(&state);
1055 
1056 	vc_ghcb_invalidate(ghcb);
1057 	ghcb_set_rax(ghcb, vmsa->sev_features);
1058 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1059 	ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1060 	ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1061 
1062 	sev_es_wr_ghcb_msr(__pa(ghcb));
1063 	VMGEXIT();
1064 
1065 	if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1066 	    lower_32_bits(ghcb->save.sw_exit_info_1)) {
1067 		pr_err("SNP AP Creation error\n");
1068 		ret = -EINVAL;
1069 	}
1070 
1071 	__sev_put_ghcb(&state);
1072 
1073 	local_irq_restore(flags);
1074 
1075 	/* Perform cleanup if there was an error */
1076 	if (ret) {
1077 		snp_cleanup_vmsa(vmsa);
1078 		vmsa = NULL;
1079 	}
1080 
1081 	/* Free up any previous VMSA page */
1082 	if (cur_vmsa)
1083 		snp_cleanup_vmsa(cur_vmsa);
1084 
1085 	/* Record the current VMSA page */
1086 	per_cpu(sev_vmsa, cpu) = vmsa;
1087 
1088 	return ret;
1089 }
1090 
1091 void __init snp_set_wakeup_secondary_cpu(void)
1092 {
1093 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1094 		return;
1095 
1096 	/*
1097 	 * Always set this override if SNP is enabled. This makes it the
1098 	 * required method to start APs under SNP. If the hypervisor does
1099 	 * not support AP creation, then no APs will be started.
1100 	 */
1101 	apic_update_callback(wakeup_secondary_cpu, wakeup_cpu_via_vmgexit);
1102 }
1103 
1104 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1105 {
1106 	u16 startup_cs, startup_ip;
1107 	phys_addr_t jump_table_pa;
1108 	u64 jump_table_addr;
1109 	u16 __iomem *jump_table;
1110 
1111 	jump_table_addr = get_jump_table_addr();
1112 
1113 	/* On UP guests there is no jump table so this is not a failure */
1114 	if (!jump_table_addr)
1115 		return 0;
1116 
1117 	/* Check if AP Jump Table is page-aligned */
1118 	if (jump_table_addr & ~PAGE_MASK)
1119 		return -EINVAL;
1120 
1121 	jump_table_pa = jump_table_addr & PAGE_MASK;
1122 
1123 	startup_cs = (u16)(rmh->trampoline_start >> 4);
1124 	startup_ip = (u16)(rmh->sev_es_trampoline_start -
1125 			   rmh->trampoline_start);
1126 
1127 	jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1128 	if (!jump_table)
1129 		return -EIO;
1130 
1131 	writew(startup_ip, &jump_table[0]);
1132 	writew(startup_cs, &jump_table[1]);
1133 
1134 	iounmap(jump_table);
1135 
1136 	return 0;
1137 }
1138 
1139 /*
1140  * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1141  * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1142  * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1143  */
1144 int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1145 {
1146 	struct sev_es_runtime_data *data;
1147 	unsigned long address, pflags;
1148 	int cpu;
1149 	u64 pfn;
1150 
1151 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1152 		return 0;
1153 
1154 	pflags = _PAGE_NX | _PAGE_RW;
1155 
1156 	for_each_possible_cpu(cpu) {
1157 		data = per_cpu(runtime_data, cpu);
1158 
1159 		address = __pa(&data->ghcb_page);
1160 		pfn = address >> PAGE_SHIFT;
1161 
1162 		if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1163 			return 1;
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1170 {
1171 	struct pt_regs *regs = ctxt->regs;
1172 	enum es_result ret;
1173 	u64 exit_info_1;
1174 
1175 	/* Is it a WRMSR? */
1176 	exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1177 
1178 	ghcb_set_rcx(ghcb, regs->cx);
1179 	if (exit_info_1) {
1180 		ghcb_set_rax(ghcb, regs->ax);
1181 		ghcb_set_rdx(ghcb, regs->dx);
1182 	}
1183 
1184 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1185 
1186 	if ((ret == ES_OK) && (!exit_info_1)) {
1187 		regs->ax = ghcb->save.rax;
1188 		regs->dx = ghcb->save.rdx;
1189 	}
1190 
1191 	return ret;
1192 }
1193 
1194 static void snp_register_per_cpu_ghcb(void)
1195 {
1196 	struct sev_es_runtime_data *data;
1197 	struct ghcb *ghcb;
1198 
1199 	data = this_cpu_read(runtime_data);
1200 	ghcb = &data->ghcb_page;
1201 
1202 	snp_register_ghcb_early(__pa(ghcb));
1203 }
1204 
1205 void setup_ghcb(void)
1206 {
1207 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1208 		return;
1209 
1210 	/* First make sure the hypervisor talks a supported protocol. */
1211 	if (!sev_es_negotiate_protocol())
1212 		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1213 
1214 	/*
1215 	 * Check whether the runtime #VC exception handler is active. It uses
1216 	 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1217 	 *
1218 	 * If SNP is active, register the per-CPU GHCB page so that the runtime
1219 	 * exception handler can use it.
1220 	 */
1221 	if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1222 		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1223 			snp_register_per_cpu_ghcb();
1224 
1225 		sev_cfg.ghcbs_initialized = true;
1226 
1227 		return;
1228 	}
1229 
1230 	/*
1231 	 * Clear the boot_ghcb. The first exception comes in before the bss
1232 	 * section is cleared.
1233 	 */
1234 	memset(&boot_ghcb_page, 0, PAGE_SIZE);
1235 
1236 	/* Alright - Make the boot-ghcb public */
1237 	boot_ghcb = &boot_ghcb_page;
1238 
1239 	/* SNP guest requires that GHCB GPA must be registered. */
1240 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1241 		snp_register_ghcb_early(__pa(&boot_ghcb_page));
1242 }
1243 
1244 #ifdef CONFIG_HOTPLUG_CPU
1245 static void sev_es_ap_hlt_loop(void)
1246 {
1247 	struct ghcb_state state;
1248 	struct ghcb *ghcb;
1249 
1250 	ghcb = __sev_get_ghcb(&state);
1251 
1252 	while (true) {
1253 		vc_ghcb_invalidate(ghcb);
1254 		ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1255 		ghcb_set_sw_exit_info_1(ghcb, 0);
1256 		ghcb_set_sw_exit_info_2(ghcb, 0);
1257 
1258 		sev_es_wr_ghcb_msr(__pa(ghcb));
1259 		VMGEXIT();
1260 
1261 		/* Wakeup signal? */
1262 		if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1263 		    ghcb->save.sw_exit_info_2)
1264 			break;
1265 	}
1266 
1267 	__sev_put_ghcb(&state);
1268 }
1269 
1270 /*
1271  * Play_dead handler when running under SEV-ES. This is needed because
1272  * the hypervisor can't deliver an SIPI request to restart the AP.
1273  * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1274  * hypervisor wakes it up again.
1275  */
1276 static void sev_es_play_dead(void)
1277 {
1278 	play_dead_common();
1279 
1280 	/* IRQs now disabled */
1281 
1282 	sev_es_ap_hlt_loop();
1283 
1284 	/*
1285 	 * If we get here, the VCPU was woken up again. Jump to CPU
1286 	 * startup code to get it back online.
1287 	 */
1288 	soft_restart_cpu();
1289 }
1290 #else  /* CONFIG_HOTPLUG_CPU */
1291 #define sev_es_play_dead	native_play_dead
1292 #endif /* CONFIG_HOTPLUG_CPU */
1293 
1294 #ifdef CONFIG_SMP
1295 static void __init sev_es_setup_play_dead(void)
1296 {
1297 	smp_ops.play_dead = sev_es_play_dead;
1298 }
1299 #else
1300 static inline void sev_es_setup_play_dead(void) { }
1301 #endif
1302 
1303 static void __init alloc_runtime_data(int cpu)
1304 {
1305 	struct sev_es_runtime_data *data;
1306 
1307 	data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1308 	if (!data)
1309 		panic("Can't allocate SEV-ES runtime data");
1310 
1311 	per_cpu(runtime_data, cpu) = data;
1312 }
1313 
1314 static void __init init_ghcb(int cpu)
1315 {
1316 	struct sev_es_runtime_data *data;
1317 	int err;
1318 
1319 	data = per_cpu(runtime_data, cpu);
1320 
1321 	err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1322 					 sizeof(data->ghcb_page));
1323 	if (err)
1324 		panic("Can't map GHCBs unencrypted");
1325 
1326 	memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1327 
1328 	data->ghcb_active = false;
1329 	data->backup_ghcb_active = false;
1330 }
1331 
1332 void __init sev_es_init_vc_handling(void)
1333 {
1334 	int cpu;
1335 
1336 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1337 
1338 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1339 		return;
1340 
1341 	if (!sev_es_check_cpu_features())
1342 		panic("SEV-ES CPU Features missing");
1343 
1344 	/*
1345 	 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1346 	 * features.
1347 	 */
1348 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1349 		sev_hv_features = get_hv_features();
1350 
1351 		if (!(sev_hv_features & GHCB_HV_FT_SNP))
1352 			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1353 	}
1354 
1355 	/* Initialize per-cpu GHCB pages */
1356 	for_each_possible_cpu(cpu) {
1357 		alloc_runtime_data(cpu);
1358 		init_ghcb(cpu);
1359 	}
1360 
1361 	sev_es_setup_play_dead();
1362 
1363 	/* Secondary CPUs use the runtime #VC handler */
1364 	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1365 }
1366 
1367 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1368 {
1369 	int trapnr = ctxt->fi.vector;
1370 
1371 	if (trapnr == X86_TRAP_PF)
1372 		native_write_cr2(ctxt->fi.cr2);
1373 
1374 	ctxt->regs->orig_ax = ctxt->fi.error_code;
1375 	do_early_exception(ctxt->regs, trapnr);
1376 }
1377 
1378 static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1379 {
1380 	long *reg_array;
1381 	int offset;
1382 
1383 	reg_array = (long *)ctxt->regs;
1384 	offset    = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1385 
1386 	if (offset < 0)
1387 		return NULL;
1388 
1389 	offset /= sizeof(long);
1390 
1391 	return reg_array + offset;
1392 }
1393 static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1394 				 unsigned int bytes, bool read)
1395 {
1396 	u64 exit_code, exit_info_1, exit_info_2;
1397 	unsigned long ghcb_pa = __pa(ghcb);
1398 	enum es_result res;
1399 	phys_addr_t paddr;
1400 	void __user *ref;
1401 
1402 	ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1403 	if (ref == (void __user *)-1L)
1404 		return ES_UNSUPPORTED;
1405 
1406 	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1407 
1408 	res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1409 	if (res != ES_OK) {
1410 		if (res == ES_EXCEPTION && !read)
1411 			ctxt->fi.error_code |= X86_PF_WRITE;
1412 
1413 		return res;
1414 	}
1415 
1416 	exit_info_1 = paddr;
1417 	/* Can never be greater than 8 */
1418 	exit_info_2 = bytes;
1419 
1420 	ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1421 
1422 	return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
1423 }
1424 
1425 /*
1426  * The MOVS instruction has two memory operands, which raises the
1427  * problem that it is not known whether the access to the source or the
1428  * destination caused the #VC exception (and hence whether an MMIO read
1429  * or write operation needs to be emulated).
1430  *
1431  * Instead of playing games with walking page-tables and trying to guess
1432  * whether the source or destination is an MMIO range, split the move
1433  * into two operations, a read and a write with only one memory operand.
1434  * This will cause a nested #VC exception on the MMIO address which can
1435  * then be handled.
1436  *
1437  * This implementation has the benefit that it also supports MOVS where
1438  * source _and_ destination are MMIO regions.
1439  *
1440  * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1441  * rare operation. If it turns out to be a performance problem the split
1442  * operations can be moved to memcpy_fromio() and memcpy_toio().
1443  */
1444 static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1445 					  unsigned int bytes)
1446 {
1447 	unsigned long ds_base, es_base;
1448 	unsigned char *src, *dst;
1449 	unsigned char buffer[8];
1450 	enum es_result ret;
1451 	bool rep;
1452 	int off;
1453 
1454 	ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1455 	es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1456 
1457 	if (ds_base == -1L || es_base == -1L) {
1458 		ctxt->fi.vector = X86_TRAP_GP;
1459 		ctxt->fi.error_code = 0;
1460 		return ES_EXCEPTION;
1461 	}
1462 
1463 	src = ds_base + (unsigned char *)ctxt->regs->si;
1464 	dst = es_base + (unsigned char *)ctxt->regs->di;
1465 
1466 	ret = vc_read_mem(ctxt, src, buffer, bytes);
1467 	if (ret != ES_OK)
1468 		return ret;
1469 
1470 	ret = vc_write_mem(ctxt, dst, buffer, bytes);
1471 	if (ret != ES_OK)
1472 		return ret;
1473 
1474 	if (ctxt->regs->flags & X86_EFLAGS_DF)
1475 		off = -bytes;
1476 	else
1477 		off =  bytes;
1478 
1479 	ctxt->regs->si += off;
1480 	ctxt->regs->di += off;
1481 
1482 	rep = insn_has_rep_prefix(&ctxt->insn);
1483 	if (rep)
1484 		ctxt->regs->cx -= 1;
1485 
1486 	if (!rep || ctxt->regs->cx == 0)
1487 		return ES_OK;
1488 	else
1489 		return ES_RETRY;
1490 }
1491 
1492 static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1493 {
1494 	struct insn *insn = &ctxt->insn;
1495 	enum insn_mmio_type mmio;
1496 	unsigned int bytes = 0;
1497 	enum es_result ret;
1498 	u8 sign_byte;
1499 	long *reg_data;
1500 
1501 	mmio = insn_decode_mmio(insn, &bytes);
1502 	if (mmio == INSN_MMIO_DECODE_FAILED)
1503 		return ES_DECODE_FAILED;
1504 
1505 	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
1506 		reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1507 		if (!reg_data)
1508 			return ES_DECODE_FAILED;
1509 	}
1510 
1511 	switch (mmio) {
1512 	case INSN_MMIO_WRITE:
1513 		memcpy(ghcb->shared_buffer, reg_data, bytes);
1514 		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1515 		break;
1516 	case INSN_MMIO_WRITE_IMM:
1517 		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1518 		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1519 		break;
1520 	case INSN_MMIO_READ:
1521 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1522 		if (ret)
1523 			break;
1524 
1525 		/* Zero-extend for 32-bit operation */
1526 		if (bytes == 4)
1527 			*reg_data = 0;
1528 
1529 		memcpy(reg_data, ghcb->shared_buffer, bytes);
1530 		break;
1531 	case INSN_MMIO_READ_ZERO_EXTEND:
1532 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1533 		if (ret)
1534 			break;
1535 
1536 		/* Zero extend based on operand size */
1537 		memset(reg_data, 0, insn->opnd_bytes);
1538 		memcpy(reg_data, ghcb->shared_buffer, bytes);
1539 		break;
1540 	case INSN_MMIO_READ_SIGN_EXTEND:
1541 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1542 		if (ret)
1543 			break;
1544 
1545 		if (bytes == 1) {
1546 			u8 *val = (u8 *)ghcb->shared_buffer;
1547 
1548 			sign_byte = (*val & 0x80) ? 0xff : 0x00;
1549 		} else {
1550 			u16 *val = (u16 *)ghcb->shared_buffer;
1551 
1552 			sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1553 		}
1554 
1555 		/* Sign extend based on operand size */
1556 		memset(reg_data, sign_byte, insn->opnd_bytes);
1557 		memcpy(reg_data, ghcb->shared_buffer, bytes);
1558 		break;
1559 	case INSN_MMIO_MOVS:
1560 		ret = vc_handle_mmio_movs(ctxt, bytes);
1561 		break;
1562 	default:
1563 		ret = ES_UNSUPPORTED;
1564 		break;
1565 	}
1566 
1567 	return ret;
1568 }
1569 
1570 static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1571 					  struct es_em_ctxt *ctxt)
1572 {
1573 	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1574 	long val, *reg = vc_insn_get_rm(ctxt);
1575 	enum es_result ret;
1576 
1577 	if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
1578 		return ES_VMM_ERROR;
1579 
1580 	if (!reg)
1581 		return ES_DECODE_FAILED;
1582 
1583 	val = *reg;
1584 
1585 	/* Upper 32 bits must be written as zeroes */
1586 	if (val >> 32) {
1587 		ctxt->fi.vector = X86_TRAP_GP;
1588 		ctxt->fi.error_code = 0;
1589 		return ES_EXCEPTION;
1590 	}
1591 
1592 	/* Clear out other reserved bits and set bit 10 */
1593 	val = (val & 0xffff23ffL) | BIT(10);
1594 
1595 	/* Early non-zero writes to DR7 are not supported */
1596 	if (!data && (val & ~DR7_RESET_VALUE))
1597 		return ES_UNSUPPORTED;
1598 
1599 	/* Using a value of 0 for ExitInfo1 means RAX holds the value */
1600 	ghcb_set_rax(ghcb, val);
1601 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1602 	if (ret != ES_OK)
1603 		return ret;
1604 
1605 	if (data)
1606 		data->dr7 = val;
1607 
1608 	return ES_OK;
1609 }
1610 
1611 static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1612 					 struct es_em_ctxt *ctxt)
1613 {
1614 	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1615 	long *reg = vc_insn_get_rm(ctxt);
1616 
1617 	if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
1618 		return ES_VMM_ERROR;
1619 
1620 	if (!reg)
1621 		return ES_DECODE_FAILED;
1622 
1623 	if (data)
1624 		*reg = data->dr7;
1625 	else
1626 		*reg = DR7_RESET_VALUE;
1627 
1628 	return ES_OK;
1629 }
1630 
1631 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1632 				       struct es_em_ctxt *ctxt)
1633 {
1634 	return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1635 }
1636 
1637 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1638 {
1639 	enum es_result ret;
1640 
1641 	ghcb_set_rcx(ghcb, ctxt->regs->cx);
1642 
1643 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1644 	if (ret != ES_OK)
1645 		return ret;
1646 
1647 	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1648 		return ES_VMM_ERROR;
1649 
1650 	ctxt->regs->ax = ghcb->save.rax;
1651 	ctxt->regs->dx = ghcb->save.rdx;
1652 
1653 	return ES_OK;
1654 }
1655 
1656 static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1657 					struct es_em_ctxt *ctxt)
1658 {
1659 	/*
1660 	 * Treat it as a NOP and do not leak a physical address to the
1661 	 * hypervisor.
1662 	 */
1663 	return ES_OK;
1664 }
1665 
1666 static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1667 				      struct es_em_ctxt *ctxt)
1668 {
1669 	/* Treat the same as MONITOR/MONITORX */
1670 	return ES_OK;
1671 }
1672 
1673 static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1674 					struct es_em_ctxt *ctxt)
1675 {
1676 	enum es_result ret;
1677 
1678 	ghcb_set_rax(ghcb, ctxt->regs->ax);
1679 	ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1680 
1681 	if (x86_platform.hyper.sev_es_hcall_prepare)
1682 		x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1683 
1684 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1685 	if (ret != ES_OK)
1686 		return ret;
1687 
1688 	if (!ghcb_rax_is_valid(ghcb))
1689 		return ES_VMM_ERROR;
1690 
1691 	ctxt->regs->ax = ghcb->save.rax;
1692 
1693 	/*
1694 	 * Call sev_es_hcall_finish() after regs->ax is already set.
1695 	 * This allows the hypervisor handler to overwrite it again if
1696 	 * necessary.
1697 	 */
1698 	if (x86_platform.hyper.sev_es_hcall_finish &&
1699 	    !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1700 		return ES_VMM_ERROR;
1701 
1702 	return ES_OK;
1703 }
1704 
1705 static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1706 					struct es_em_ctxt *ctxt)
1707 {
1708 	/*
1709 	 * Calling ecx_alignment_check() directly does not work, because it
1710 	 * enables IRQs and the GHCB is active. Forward the exception and call
1711 	 * it later from vc_forward_exception().
1712 	 */
1713 	ctxt->fi.vector = X86_TRAP_AC;
1714 	ctxt->fi.error_code = 0;
1715 	return ES_EXCEPTION;
1716 }
1717 
1718 static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1719 					 struct ghcb *ghcb,
1720 					 unsigned long exit_code)
1721 {
1722 	enum es_result result;
1723 
1724 	switch (exit_code) {
1725 	case SVM_EXIT_READ_DR7:
1726 		result = vc_handle_dr7_read(ghcb, ctxt);
1727 		break;
1728 	case SVM_EXIT_WRITE_DR7:
1729 		result = vc_handle_dr7_write(ghcb, ctxt);
1730 		break;
1731 	case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1732 		result = vc_handle_trap_ac(ghcb, ctxt);
1733 		break;
1734 	case SVM_EXIT_RDTSC:
1735 	case SVM_EXIT_RDTSCP:
1736 		result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1737 		break;
1738 	case SVM_EXIT_RDPMC:
1739 		result = vc_handle_rdpmc(ghcb, ctxt);
1740 		break;
1741 	case SVM_EXIT_INVD:
1742 		pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1743 		result = ES_UNSUPPORTED;
1744 		break;
1745 	case SVM_EXIT_CPUID:
1746 		result = vc_handle_cpuid(ghcb, ctxt);
1747 		break;
1748 	case SVM_EXIT_IOIO:
1749 		result = vc_handle_ioio(ghcb, ctxt);
1750 		break;
1751 	case SVM_EXIT_MSR:
1752 		result = vc_handle_msr(ghcb, ctxt);
1753 		break;
1754 	case SVM_EXIT_VMMCALL:
1755 		result = vc_handle_vmmcall(ghcb, ctxt);
1756 		break;
1757 	case SVM_EXIT_WBINVD:
1758 		result = vc_handle_wbinvd(ghcb, ctxt);
1759 		break;
1760 	case SVM_EXIT_MONITOR:
1761 		result = vc_handle_monitor(ghcb, ctxt);
1762 		break;
1763 	case SVM_EXIT_MWAIT:
1764 		result = vc_handle_mwait(ghcb, ctxt);
1765 		break;
1766 	case SVM_EXIT_NPF:
1767 		result = vc_handle_mmio(ghcb, ctxt);
1768 		break;
1769 	default:
1770 		/*
1771 		 * Unexpected #VC exception
1772 		 */
1773 		result = ES_UNSUPPORTED;
1774 	}
1775 
1776 	return result;
1777 }
1778 
1779 static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1780 {
1781 	long error_code = ctxt->fi.error_code;
1782 	int trapnr = ctxt->fi.vector;
1783 
1784 	ctxt->regs->orig_ax = ctxt->fi.error_code;
1785 
1786 	switch (trapnr) {
1787 	case X86_TRAP_GP:
1788 		exc_general_protection(ctxt->regs, error_code);
1789 		break;
1790 	case X86_TRAP_UD:
1791 		exc_invalid_op(ctxt->regs);
1792 		break;
1793 	case X86_TRAP_PF:
1794 		write_cr2(ctxt->fi.cr2);
1795 		exc_page_fault(ctxt->regs, error_code);
1796 		break;
1797 	case X86_TRAP_AC:
1798 		exc_alignment_check(ctxt->regs, error_code);
1799 		break;
1800 	default:
1801 		pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1802 		BUG();
1803 	}
1804 }
1805 
1806 static __always_inline bool is_vc2_stack(unsigned long sp)
1807 {
1808 	return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1809 }
1810 
1811 static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1812 {
1813 	unsigned long sp, prev_sp;
1814 
1815 	sp      = (unsigned long)regs;
1816 	prev_sp = regs->sp;
1817 
1818 	/*
1819 	 * If the code was already executing on the VC2 stack when the #VC
1820 	 * happened, let it proceed to the normal handling routine. This way the
1821 	 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1822 	 */
1823 	return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1824 }
1825 
1826 static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1827 {
1828 	struct ghcb_state state;
1829 	struct es_em_ctxt ctxt;
1830 	enum es_result result;
1831 	struct ghcb *ghcb;
1832 	bool ret = true;
1833 
1834 	ghcb = __sev_get_ghcb(&state);
1835 
1836 	vc_ghcb_invalidate(ghcb);
1837 	result = vc_init_em_ctxt(&ctxt, regs, error_code);
1838 
1839 	if (result == ES_OK)
1840 		result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1841 
1842 	__sev_put_ghcb(&state);
1843 
1844 	/* Done - now check the result */
1845 	switch (result) {
1846 	case ES_OK:
1847 		vc_finish_insn(&ctxt);
1848 		break;
1849 	case ES_UNSUPPORTED:
1850 		pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1851 				   error_code, regs->ip);
1852 		ret = false;
1853 		break;
1854 	case ES_VMM_ERROR:
1855 		pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1856 				   error_code, regs->ip);
1857 		ret = false;
1858 		break;
1859 	case ES_DECODE_FAILED:
1860 		pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1861 				   error_code, regs->ip);
1862 		ret = false;
1863 		break;
1864 	case ES_EXCEPTION:
1865 		vc_forward_exception(&ctxt);
1866 		break;
1867 	case ES_RETRY:
1868 		/* Nothing to do */
1869 		break;
1870 	default:
1871 		pr_emerg("Unknown result in %s():%d\n", __func__, result);
1872 		/*
1873 		 * Emulating the instruction which caused the #VC exception
1874 		 * failed - can't continue so print debug information
1875 		 */
1876 		BUG();
1877 	}
1878 
1879 	return ret;
1880 }
1881 
1882 static __always_inline bool vc_is_db(unsigned long error_code)
1883 {
1884 	return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1885 }
1886 
1887 /*
1888  * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1889  * and will panic when an error happens.
1890  */
1891 DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1892 {
1893 	irqentry_state_t irq_state;
1894 
1895 	/*
1896 	 * With the current implementation it is always possible to switch to a
1897 	 * safe stack because #VC exceptions only happen at known places, like
1898 	 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1899 	 * also happen with code instrumentation when the hypervisor intercepts
1900 	 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1901 	 * exceptions currently also only happen in safe places.
1902 	 *
1903 	 * But keep this here in case the noinstr annotations are violated due
1904 	 * to bug elsewhere.
1905 	 */
1906 	if (unlikely(vc_from_invalid_context(regs))) {
1907 		instrumentation_begin();
1908 		panic("Can't handle #VC exception from unsupported context\n");
1909 		instrumentation_end();
1910 	}
1911 
1912 	/*
1913 	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1914 	 */
1915 	if (vc_is_db(error_code)) {
1916 		exc_debug(regs);
1917 		return;
1918 	}
1919 
1920 	irq_state = irqentry_nmi_enter(regs);
1921 
1922 	instrumentation_begin();
1923 
1924 	if (!vc_raw_handle_exception(regs, error_code)) {
1925 		/* Show some debug info */
1926 		show_regs(regs);
1927 
1928 		/* Ask hypervisor to sev_es_terminate */
1929 		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1930 
1931 		/* If that fails and we get here - just panic */
1932 		panic("Returned from Terminate-Request to Hypervisor\n");
1933 	}
1934 
1935 	instrumentation_end();
1936 	irqentry_nmi_exit(regs, irq_state);
1937 }
1938 
1939 /*
1940  * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1941  * and will kill the current task with SIGBUS when an error happens.
1942  */
1943 DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1944 {
1945 	/*
1946 	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1947 	 */
1948 	if (vc_is_db(error_code)) {
1949 		noist_exc_debug(regs);
1950 		return;
1951 	}
1952 
1953 	irqentry_enter_from_user_mode(regs);
1954 	instrumentation_begin();
1955 
1956 	if (!vc_raw_handle_exception(regs, error_code)) {
1957 		/*
1958 		 * Do not kill the machine if user-space triggered the
1959 		 * exception. Send SIGBUS instead and let user-space deal with
1960 		 * it.
1961 		 */
1962 		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
1963 	}
1964 
1965 	instrumentation_end();
1966 	irqentry_exit_to_user_mode(regs);
1967 }
1968 
1969 bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
1970 {
1971 	unsigned long exit_code = regs->orig_ax;
1972 	struct es_em_ctxt ctxt;
1973 	enum es_result result;
1974 
1975 	vc_ghcb_invalidate(boot_ghcb);
1976 
1977 	result = vc_init_em_ctxt(&ctxt, regs, exit_code);
1978 	if (result == ES_OK)
1979 		result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
1980 
1981 	/* Done - now check the result */
1982 	switch (result) {
1983 	case ES_OK:
1984 		vc_finish_insn(&ctxt);
1985 		break;
1986 	case ES_UNSUPPORTED:
1987 		early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1988 				exit_code, regs->ip);
1989 		goto fail;
1990 	case ES_VMM_ERROR:
1991 		early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1992 				exit_code, regs->ip);
1993 		goto fail;
1994 	case ES_DECODE_FAILED:
1995 		early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1996 				exit_code, regs->ip);
1997 		goto fail;
1998 	case ES_EXCEPTION:
1999 		vc_early_forward_exception(&ctxt);
2000 		break;
2001 	case ES_RETRY:
2002 		/* Nothing to do */
2003 		break;
2004 	default:
2005 		BUG();
2006 	}
2007 
2008 	return true;
2009 
2010 fail:
2011 	show_regs(regs);
2012 
2013 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2014 }
2015 
2016 /*
2017  * Initial set up of SNP relies on information provided by the
2018  * Confidential Computing blob, which can be passed to the kernel
2019  * in the following ways, depending on how it is booted:
2020  *
2021  * - when booted via the boot/decompress kernel:
2022  *   - via boot_params
2023  *
2024  * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2025  *   - via a setup_data entry, as defined by the Linux Boot Protocol
2026  *
2027  * Scan for the blob in that order.
2028  */
2029 static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2030 {
2031 	struct cc_blob_sev_info *cc_info;
2032 
2033 	/* Boot kernel would have passed the CC blob via boot_params. */
2034 	if (bp->cc_blob_address) {
2035 		cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2036 		goto found_cc_info;
2037 	}
2038 
2039 	/*
2040 	 * If kernel was booted directly, without the use of the
2041 	 * boot/decompression kernel, the CC blob may have been passed via
2042 	 * setup_data instead.
2043 	 */
2044 	cc_info = find_cc_blob_setup_data(bp);
2045 	if (!cc_info)
2046 		return NULL;
2047 
2048 found_cc_info:
2049 	if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2050 		snp_abort();
2051 
2052 	return cc_info;
2053 }
2054 
2055 bool __init snp_init(struct boot_params *bp)
2056 {
2057 	struct cc_blob_sev_info *cc_info;
2058 
2059 	if (!bp)
2060 		return false;
2061 
2062 	cc_info = find_cc_blob(bp);
2063 	if (!cc_info)
2064 		return false;
2065 
2066 	setup_cpuid_table(cc_info);
2067 
2068 	/*
2069 	 * The CC blob will be used later to access the secrets page. Cache
2070 	 * it here like the boot kernel does.
2071 	 */
2072 	bp->cc_blob_address = (u32)(unsigned long)cc_info;
2073 
2074 	return true;
2075 }
2076 
2077 void __init __noreturn snp_abort(void)
2078 {
2079 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2080 }
2081 
2082 static void dump_cpuid_table(void)
2083 {
2084 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2085 	int i = 0;
2086 
2087 	pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2088 		cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2089 
2090 	for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2091 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2092 
2093 		pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2094 			i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2095 			fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2096 	}
2097 }
2098 
2099 /*
2100  * It is useful from an auditing/testing perspective to provide an easy way
2101  * for the guest owner to know that the CPUID table has been initialized as
2102  * expected, but that initialization happens too early in boot to print any
2103  * sort of indicator, and there's not really any other good place to do it,
2104  * so do it here.
2105  */
2106 static int __init report_cpuid_table(void)
2107 {
2108 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2109 
2110 	if (!cpuid_table->count)
2111 		return 0;
2112 
2113 	pr_info("Using SNP CPUID table, %d entries present.\n",
2114 		cpuid_table->count);
2115 
2116 	if (sev_cfg.debug)
2117 		dump_cpuid_table();
2118 
2119 	return 0;
2120 }
2121 arch_initcall(report_cpuid_table);
2122 
2123 static int __init init_sev_config(char *str)
2124 {
2125 	char *s;
2126 
2127 	while ((s = strsep(&str, ","))) {
2128 		if (!strcmp(s, "debug")) {
2129 			sev_cfg.debug = true;
2130 			continue;
2131 		}
2132 
2133 		pr_info("SEV command-line option '%s' was not recognized\n", s);
2134 	}
2135 
2136 	return 1;
2137 }
2138 __setup("sev=", init_sev_config);
2139 
2140 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
2141 {
2142 	struct ghcb_state state;
2143 	struct es_em_ctxt ctxt;
2144 	unsigned long flags;
2145 	struct ghcb *ghcb;
2146 	int ret;
2147 
2148 	rio->exitinfo2 = SEV_RET_NO_FW_CALL;
2149 
2150 	/*
2151 	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
2152 	 * a per-CPU GHCB.
2153 	 */
2154 	local_irq_save(flags);
2155 
2156 	ghcb = __sev_get_ghcb(&state);
2157 	if (!ghcb) {
2158 		ret = -EIO;
2159 		goto e_restore_irq;
2160 	}
2161 
2162 	vc_ghcb_invalidate(ghcb);
2163 
2164 	if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2165 		ghcb_set_rax(ghcb, input->data_gpa);
2166 		ghcb_set_rbx(ghcb, input->data_npages);
2167 	}
2168 
2169 	ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2170 	if (ret)
2171 		goto e_put;
2172 
2173 	rio->exitinfo2 = ghcb->save.sw_exit_info_2;
2174 	switch (rio->exitinfo2) {
2175 	case 0:
2176 		break;
2177 
2178 	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
2179 		ret = -EAGAIN;
2180 		break;
2181 
2182 	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
2183 		/* Number of expected pages are returned in RBX */
2184 		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2185 			input->data_npages = ghcb_get_rbx(ghcb);
2186 			ret = -ENOSPC;
2187 			break;
2188 		}
2189 		fallthrough;
2190 	default:
2191 		ret = -EIO;
2192 		break;
2193 	}
2194 
2195 e_put:
2196 	__sev_put_ghcb(&state);
2197 e_restore_irq:
2198 	local_irq_restore(flags);
2199 
2200 	return ret;
2201 }
2202 EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2203 
2204 static struct platform_device sev_guest_device = {
2205 	.name		= "sev-guest",
2206 	.id		= -1,
2207 };
2208 
2209 static int __init snp_init_platform_device(void)
2210 {
2211 	struct sev_guest_platform_data data;
2212 	u64 gpa;
2213 
2214 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2215 		return -ENODEV;
2216 
2217 	gpa = get_secrets_page();
2218 	if (!gpa)
2219 		return -ENODEV;
2220 
2221 	data.secrets_gpa = gpa;
2222 	if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2223 		return -ENODEV;
2224 
2225 	if (platform_device_register(&sev_guest_device))
2226 		return -ENODEV;
2227 
2228 	pr_info("SNP guest platform device initialized.\n");
2229 	return 0;
2230 }
2231 device_initcall(snp_init_platform_device);
2232