1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Memory Encryption Support 4 * 5 * Copyright (C) 2019 SUSE 6 * 7 * Author: Joerg Roedel <jroedel@suse.de> 8 */ 9 10 #define pr_fmt(fmt) "SEV: " fmt 11 12 #include <linux/sched/debug.h> /* For show_regs() */ 13 #include <linux/percpu-defs.h> 14 #include <linux/cc_platform.h> 15 #include <linux/printk.h> 16 #include <linux/mm_types.h> 17 #include <linux/set_memory.h> 18 #include <linux/memblock.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/cpumask.h> 22 #include <linux/efi.h> 23 #include <linux/platform_device.h> 24 #include <linux/io.h> 25 26 #include <asm/cpu_entry_area.h> 27 #include <asm/stacktrace.h> 28 #include <asm/sev.h> 29 #include <asm/insn-eval.h> 30 #include <asm/fpu/xcr.h> 31 #include <asm/processor.h> 32 #include <asm/realmode.h> 33 #include <asm/setup.h> 34 #include <asm/traps.h> 35 #include <asm/svm.h> 36 #include <asm/smp.h> 37 #include <asm/cpu.h> 38 #include <asm/apic.h> 39 #include <asm/cpuid.h> 40 #include <asm/cmdline.h> 41 42 #define DR7_RESET_VALUE 0x400 43 44 /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ 45 #define AP_INIT_CS_LIMIT 0xffff 46 #define AP_INIT_DS_LIMIT 0xffff 47 #define AP_INIT_LDTR_LIMIT 0xffff 48 #define AP_INIT_GDTR_LIMIT 0xffff 49 #define AP_INIT_IDTR_LIMIT 0xffff 50 #define AP_INIT_TR_LIMIT 0xffff 51 #define AP_INIT_RFLAGS_DEFAULT 0x2 52 #define AP_INIT_DR6_DEFAULT 0xffff0ff0 53 #define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL 54 #define AP_INIT_XCR0_DEFAULT 0x1 55 #define AP_INIT_X87_FTW_DEFAULT 0x5555 56 #define AP_INIT_X87_FCW_DEFAULT 0x0040 57 #define AP_INIT_CR0_DEFAULT 0x60000010 58 #define AP_INIT_MXCSR_DEFAULT 0x1f80 59 60 /* For early boot hypervisor communication in SEV-ES enabled guests */ 61 static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE); 62 63 /* 64 * Needs to be in the .data section because we need it NULL before bss is 65 * cleared 66 */ 67 static struct ghcb *boot_ghcb __section(".data"); 68 69 /* Bitmap of SEV features supported by the hypervisor */ 70 static u64 sev_hv_features __ro_after_init; 71 72 /* #VC handler runtime per-CPU data */ 73 struct sev_es_runtime_data { 74 struct ghcb ghcb_page; 75 76 /* 77 * Reserve one page per CPU as backup storage for the unencrypted GHCB. 78 * It is needed when an NMI happens while the #VC handler uses the real 79 * GHCB, and the NMI handler itself is causing another #VC exception. In 80 * that case the GHCB content of the first handler needs to be backed up 81 * and restored. 82 */ 83 struct ghcb backup_ghcb; 84 85 /* 86 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions. 87 * There is no need for it to be atomic, because nothing is written to 88 * the GHCB between the read and the write of ghcb_active. So it is safe 89 * to use it when a nested #VC exception happens before the write. 90 * 91 * This is necessary for example in the #VC->NMI->#VC case when the NMI 92 * happens while the first #VC handler uses the GHCB. When the NMI code 93 * raises a second #VC handler it might overwrite the contents of the 94 * GHCB written by the first handler. To avoid this the content of the 95 * GHCB is saved and restored when the GHCB is detected to be in use 96 * already. 97 */ 98 bool ghcb_active; 99 bool backup_ghcb_active; 100 101 /* 102 * Cached DR7 value - write it on DR7 writes and return it on reads. 103 * That value will never make it to the real hardware DR7 as debugging 104 * is currently unsupported in SEV-ES guests. 105 */ 106 unsigned long dr7; 107 }; 108 109 struct ghcb_state { 110 struct ghcb *ghcb; 111 }; 112 113 static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); 114 DEFINE_STATIC_KEY_FALSE(sev_es_enable_key); 115 116 static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); 117 118 struct sev_config { 119 __u64 debug : 1, 120 __reserved : 63; 121 }; 122 123 static struct sev_config sev_cfg __read_mostly; 124 125 static __always_inline bool on_vc_stack(struct pt_regs *regs) 126 { 127 unsigned long sp = regs->sp; 128 129 /* User-mode RSP is not trusted */ 130 if (user_mode(regs)) 131 return false; 132 133 /* SYSCALL gap still has user-mode RSP */ 134 if (ip_within_syscall_gap(regs)) 135 return false; 136 137 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC))); 138 } 139 140 /* 141 * This function handles the case when an NMI is raised in the #VC 142 * exception handler entry code, before the #VC handler has switched off 143 * its IST stack. In this case, the IST entry for #VC must be adjusted, 144 * so that any nested #VC exception will not overwrite the stack 145 * contents of the interrupted #VC handler. 146 * 147 * The IST entry is adjusted unconditionally so that it can be also be 148 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a 149 * nested sev_es_ist_exit() call may adjust back the IST entry too 150 * early. 151 * 152 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run 153 * on the NMI IST stack, as they are only called from NMI handling code 154 * right now. 155 */ 156 void noinstr __sev_es_ist_enter(struct pt_regs *regs) 157 { 158 unsigned long old_ist, new_ist; 159 160 /* Read old IST entry */ 161 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); 162 163 /* 164 * If NMI happened while on the #VC IST stack, set the new IST 165 * value below regs->sp, so that the interrupted stack frame is 166 * not overwritten by subsequent #VC exceptions. 167 */ 168 if (on_vc_stack(regs)) 169 new_ist = regs->sp; 170 171 /* 172 * Reserve additional 8 bytes and store old IST value so this 173 * adjustment can be unrolled in __sev_es_ist_exit(). 174 */ 175 new_ist -= sizeof(old_ist); 176 *(unsigned long *)new_ist = old_ist; 177 178 /* Set new IST entry */ 179 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist); 180 } 181 182 void noinstr __sev_es_ist_exit(void) 183 { 184 unsigned long ist; 185 186 /* Read IST entry */ 187 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); 188 189 if (WARN_ON(ist == __this_cpu_ist_top_va(VC))) 190 return; 191 192 /* Read back old IST entry and write it to the TSS */ 193 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist); 194 } 195 196 /* 197 * Nothing shall interrupt this code path while holding the per-CPU 198 * GHCB. The backup GHCB is only for NMIs interrupting this path. 199 * 200 * Callers must disable local interrupts around it. 201 */ 202 static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state) 203 { 204 struct sev_es_runtime_data *data; 205 struct ghcb *ghcb; 206 207 WARN_ON(!irqs_disabled()); 208 209 data = this_cpu_read(runtime_data); 210 ghcb = &data->ghcb_page; 211 212 if (unlikely(data->ghcb_active)) { 213 /* GHCB is already in use - save its contents */ 214 215 if (unlikely(data->backup_ghcb_active)) { 216 /* 217 * Backup-GHCB is also already in use. There is no way 218 * to continue here so just kill the machine. To make 219 * panic() work, mark GHCBs inactive so that messages 220 * can be printed out. 221 */ 222 data->ghcb_active = false; 223 data->backup_ghcb_active = false; 224 225 instrumentation_begin(); 226 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use"); 227 instrumentation_end(); 228 } 229 230 /* Mark backup_ghcb active before writing to it */ 231 data->backup_ghcb_active = true; 232 233 state->ghcb = &data->backup_ghcb; 234 235 /* Backup GHCB content */ 236 *state->ghcb = *ghcb; 237 } else { 238 state->ghcb = NULL; 239 data->ghcb_active = true; 240 } 241 242 return ghcb; 243 } 244 245 static inline u64 sev_es_rd_ghcb_msr(void) 246 { 247 return __rdmsr(MSR_AMD64_SEV_ES_GHCB); 248 } 249 250 static __always_inline void sev_es_wr_ghcb_msr(u64 val) 251 { 252 u32 low, high; 253 254 low = (u32)(val); 255 high = (u32)(val >> 32); 256 257 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); 258 } 259 260 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, 261 unsigned char *buffer) 262 { 263 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); 264 } 265 266 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) 267 { 268 char buffer[MAX_INSN_SIZE]; 269 int insn_bytes; 270 271 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); 272 if (insn_bytes == 0) { 273 /* Nothing could be copied */ 274 ctxt->fi.vector = X86_TRAP_PF; 275 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; 276 ctxt->fi.cr2 = ctxt->regs->ip; 277 return ES_EXCEPTION; 278 } else if (insn_bytes == -EINVAL) { 279 /* Effective RIP could not be calculated */ 280 ctxt->fi.vector = X86_TRAP_GP; 281 ctxt->fi.error_code = 0; 282 ctxt->fi.cr2 = 0; 283 return ES_EXCEPTION; 284 } 285 286 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes)) 287 return ES_DECODE_FAILED; 288 289 if (ctxt->insn.immediate.got) 290 return ES_OK; 291 else 292 return ES_DECODE_FAILED; 293 } 294 295 static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt) 296 { 297 char buffer[MAX_INSN_SIZE]; 298 int res, ret; 299 300 res = vc_fetch_insn_kernel(ctxt, buffer); 301 if (res) { 302 ctxt->fi.vector = X86_TRAP_PF; 303 ctxt->fi.error_code = X86_PF_INSTR; 304 ctxt->fi.cr2 = ctxt->regs->ip; 305 return ES_EXCEPTION; 306 } 307 308 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); 309 if (ret < 0) 310 return ES_DECODE_FAILED; 311 else 312 return ES_OK; 313 } 314 315 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) 316 { 317 if (user_mode(ctxt->regs)) 318 return __vc_decode_user_insn(ctxt); 319 else 320 return __vc_decode_kern_insn(ctxt); 321 } 322 323 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, 324 char *dst, char *buf, size_t size) 325 { 326 unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; 327 328 /* 329 * This function uses __put_user() independent of whether kernel or user 330 * memory is accessed. This works fine because __put_user() does no 331 * sanity checks of the pointer being accessed. All that it does is 332 * to report when the access failed. 333 * 334 * Also, this function runs in atomic context, so __put_user() is not 335 * allowed to sleep. The page-fault handler detects that it is running 336 * in atomic context and will not try to take mmap_sem and handle the 337 * fault, so additional pagefault_enable()/disable() calls are not 338 * needed. 339 * 340 * The access can't be done via copy_to_user() here because 341 * vc_write_mem() must not use string instructions to access unsafe 342 * memory. The reason is that MOVS is emulated by the #VC handler by 343 * splitting the move up into a read and a write and taking a nested #VC 344 * exception on whatever of them is the MMIO access. Using string 345 * instructions here would cause infinite nesting. 346 */ 347 switch (size) { 348 case 1: { 349 u8 d1; 350 u8 __user *target = (u8 __user *)dst; 351 352 memcpy(&d1, buf, 1); 353 if (__put_user(d1, target)) 354 goto fault; 355 break; 356 } 357 case 2: { 358 u16 d2; 359 u16 __user *target = (u16 __user *)dst; 360 361 memcpy(&d2, buf, 2); 362 if (__put_user(d2, target)) 363 goto fault; 364 break; 365 } 366 case 4: { 367 u32 d4; 368 u32 __user *target = (u32 __user *)dst; 369 370 memcpy(&d4, buf, 4); 371 if (__put_user(d4, target)) 372 goto fault; 373 break; 374 } 375 case 8: { 376 u64 d8; 377 u64 __user *target = (u64 __user *)dst; 378 379 memcpy(&d8, buf, 8); 380 if (__put_user(d8, target)) 381 goto fault; 382 break; 383 } 384 default: 385 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); 386 return ES_UNSUPPORTED; 387 } 388 389 return ES_OK; 390 391 fault: 392 if (user_mode(ctxt->regs)) 393 error_code |= X86_PF_USER; 394 395 ctxt->fi.vector = X86_TRAP_PF; 396 ctxt->fi.error_code = error_code; 397 ctxt->fi.cr2 = (unsigned long)dst; 398 399 return ES_EXCEPTION; 400 } 401 402 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, 403 char *src, char *buf, size_t size) 404 { 405 unsigned long error_code = X86_PF_PROT; 406 407 /* 408 * This function uses __get_user() independent of whether kernel or user 409 * memory is accessed. This works fine because __get_user() does no 410 * sanity checks of the pointer being accessed. All that it does is 411 * to report when the access failed. 412 * 413 * Also, this function runs in atomic context, so __get_user() is not 414 * allowed to sleep. The page-fault handler detects that it is running 415 * in atomic context and will not try to take mmap_sem and handle the 416 * fault, so additional pagefault_enable()/disable() calls are not 417 * needed. 418 * 419 * The access can't be done via copy_from_user() here because 420 * vc_read_mem() must not use string instructions to access unsafe 421 * memory. The reason is that MOVS is emulated by the #VC handler by 422 * splitting the move up into a read and a write and taking a nested #VC 423 * exception on whatever of them is the MMIO access. Using string 424 * instructions here would cause infinite nesting. 425 */ 426 switch (size) { 427 case 1: { 428 u8 d1; 429 u8 __user *s = (u8 __user *)src; 430 431 if (__get_user(d1, s)) 432 goto fault; 433 memcpy(buf, &d1, 1); 434 break; 435 } 436 case 2: { 437 u16 d2; 438 u16 __user *s = (u16 __user *)src; 439 440 if (__get_user(d2, s)) 441 goto fault; 442 memcpy(buf, &d2, 2); 443 break; 444 } 445 case 4: { 446 u32 d4; 447 u32 __user *s = (u32 __user *)src; 448 449 if (__get_user(d4, s)) 450 goto fault; 451 memcpy(buf, &d4, 4); 452 break; 453 } 454 case 8: { 455 u64 d8; 456 u64 __user *s = (u64 __user *)src; 457 if (__get_user(d8, s)) 458 goto fault; 459 memcpy(buf, &d8, 8); 460 break; 461 } 462 default: 463 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); 464 return ES_UNSUPPORTED; 465 } 466 467 return ES_OK; 468 469 fault: 470 if (user_mode(ctxt->regs)) 471 error_code |= X86_PF_USER; 472 473 ctxt->fi.vector = X86_TRAP_PF; 474 ctxt->fi.error_code = error_code; 475 ctxt->fi.cr2 = (unsigned long)src; 476 477 return ES_EXCEPTION; 478 } 479 480 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, 481 unsigned long vaddr, phys_addr_t *paddr) 482 { 483 unsigned long va = (unsigned long)vaddr; 484 unsigned int level; 485 phys_addr_t pa; 486 pgd_t *pgd; 487 pte_t *pte; 488 489 pgd = __va(read_cr3_pa()); 490 pgd = &pgd[pgd_index(va)]; 491 pte = lookup_address_in_pgd(pgd, va, &level); 492 if (!pte) { 493 ctxt->fi.vector = X86_TRAP_PF; 494 ctxt->fi.cr2 = vaddr; 495 ctxt->fi.error_code = 0; 496 497 if (user_mode(ctxt->regs)) 498 ctxt->fi.error_code |= X86_PF_USER; 499 500 return ES_EXCEPTION; 501 } 502 503 if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC)) 504 /* Emulated MMIO to/from encrypted memory not supported */ 505 return ES_UNSUPPORTED; 506 507 pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; 508 pa |= va & ~page_level_mask(level); 509 510 *paddr = pa; 511 512 return ES_OK; 513 } 514 515 /* Include code shared with pre-decompression boot stage */ 516 #include "sev-shared.c" 517 518 static noinstr void __sev_put_ghcb(struct ghcb_state *state) 519 { 520 struct sev_es_runtime_data *data; 521 struct ghcb *ghcb; 522 523 WARN_ON(!irqs_disabled()); 524 525 data = this_cpu_read(runtime_data); 526 ghcb = &data->ghcb_page; 527 528 if (state->ghcb) { 529 /* Restore GHCB from Backup */ 530 *ghcb = *state->ghcb; 531 data->backup_ghcb_active = false; 532 state->ghcb = NULL; 533 } else { 534 /* 535 * Invalidate the GHCB so a VMGEXIT instruction issued 536 * from userspace won't appear to be valid. 537 */ 538 vc_ghcb_invalidate(ghcb); 539 data->ghcb_active = false; 540 } 541 } 542 543 void noinstr __sev_es_nmi_complete(void) 544 { 545 struct ghcb_state state; 546 struct ghcb *ghcb; 547 548 ghcb = __sev_get_ghcb(&state); 549 550 vc_ghcb_invalidate(ghcb); 551 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); 552 ghcb_set_sw_exit_info_1(ghcb, 0); 553 ghcb_set_sw_exit_info_2(ghcb, 0); 554 555 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); 556 VMGEXIT(); 557 558 __sev_put_ghcb(&state); 559 } 560 561 static u64 __init get_secrets_page(void) 562 { 563 u64 pa_data = boot_params.cc_blob_address; 564 struct cc_blob_sev_info info; 565 void *map; 566 567 /* 568 * The CC blob contains the address of the secrets page, check if the 569 * blob is present. 570 */ 571 if (!pa_data) 572 return 0; 573 574 map = early_memremap(pa_data, sizeof(info)); 575 if (!map) { 576 pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n"); 577 return 0; 578 } 579 memcpy(&info, map, sizeof(info)); 580 early_memunmap(map, sizeof(info)); 581 582 /* smoke-test the secrets page passed */ 583 if (!info.secrets_phys || info.secrets_len != PAGE_SIZE) 584 return 0; 585 586 return info.secrets_phys; 587 } 588 589 static u64 __init get_snp_jump_table_addr(void) 590 { 591 struct snp_secrets_page_layout *layout; 592 void __iomem *mem; 593 u64 pa, addr; 594 595 pa = get_secrets_page(); 596 if (!pa) 597 return 0; 598 599 mem = ioremap_encrypted(pa, PAGE_SIZE); 600 if (!mem) { 601 pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n"); 602 return 0; 603 } 604 605 layout = (__force struct snp_secrets_page_layout *)mem; 606 607 addr = layout->os_area.ap_jump_table_pa; 608 iounmap(mem); 609 610 return addr; 611 } 612 613 static u64 __init get_jump_table_addr(void) 614 { 615 struct ghcb_state state; 616 unsigned long flags; 617 struct ghcb *ghcb; 618 u64 ret = 0; 619 620 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 621 return get_snp_jump_table_addr(); 622 623 local_irq_save(flags); 624 625 ghcb = __sev_get_ghcb(&state); 626 627 vc_ghcb_invalidate(ghcb); 628 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); 629 ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE); 630 ghcb_set_sw_exit_info_2(ghcb, 0); 631 632 sev_es_wr_ghcb_msr(__pa(ghcb)); 633 VMGEXIT(); 634 635 if (ghcb_sw_exit_info_1_is_valid(ghcb) && 636 ghcb_sw_exit_info_2_is_valid(ghcb)) 637 ret = ghcb->save.sw_exit_info_2; 638 639 __sev_put_ghcb(&state); 640 641 local_irq_restore(flags); 642 643 return ret; 644 } 645 646 static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate) 647 { 648 unsigned long vaddr_end; 649 int rc; 650 651 vaddr = vaddr & PAGE_MASK; 652 vaddr_end = vaddr + (npages << PAGE_SHIFT); 653 654 while (vaddr < vaddr_end) { 655 rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate); 656 if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc)) 657 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 658 659 vaddr = vaddr + PAGE_SIZE; 660 } 661 } 662 663 static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op) 664 { 665 unsigned long paddr_end; 666 u64 val; 667 668 paddr = paddr & PAGE_MASK; 669 paddr_end = paddr + (npages << PAGE_SHIFT); 670 671 while (paddr < paddr_end) { 672 /* 673 * Use the MSR protocol because this function can be called before 674 * the GHCB is established. 675 */ 676 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op)); 677 VMGEXIT(); 678 679 val = sev_es_rd_ghcb_msr(); 680 681 if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP, 682 "Wrong PSC response code: 0x%x\n", 683 (unsigned int)GHCB_RESP_CODE(val))) 684 goto e_term; 685 686 if (WARN(GHCB_MSR_PSC_RESP_VAL(val), 687 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n", 688 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared", 689 paddr, GHCB_MSR_PSC_RESP_VAL(val))) 690 goto e_term; 691 692 paddr = paddr + PAGE_SIZE; 693 } 694 695 return; 696 697 e_term: 698 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); 699 } 700 701 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, 702 unsigned int npages) 703 { 704 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 705 return; 706 707 /* 708 * Ask the hypervisor to mark the memory pages as private in the RMP 709 * table. 710 */ 711 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE); 712 713 /* Validate the memory pages after they've been added in the RMP table. */ 714 pvalidate_pages(vaddr, npages, true); 715 } 716 717 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, 718 unsigned int npages) 719 { 720 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 721 return; 722 723 /* Invalidate the memory pages before they are marked shared in the RMP table. */ 724 pvalidate_pages(vaddr, npages, false); 725 726 /* Ask hypervisor to mark the memory pages shared in the RMP table. */ 727 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED); 728 } 729 730 void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) 731 { 732 unsigned long vaddr, npages; 733 734 vaddr = (unsigned long)__va(paddr); 735 npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; 736 737 if (op == SNP_PAGE_STATE_PRIVATE) 738 early_snp_set_memory_private(vaddr, paddr, npages); 739 else if (op == SNP_PAGE_STATE_SHARED) 740 early_snp_set_memory_shared(vaddr, paddr, npages); 741 else 742 WARN(1, "invalid memory op %d\n", op); 743 } 744 745 static int vmgexit_psc(struct snp_psc_desc *desc) 746 { 747 int cur_entry, end_entry, ret = 0; 748 struct snp_psc_desc *data; 749 struct ghcb_state state; 750 struct es_em_ctxt ctxt; 751 unsigned long flags; 752 struct ghcb *ghcb; 753 754 /* 755 * __sev_get_ghcb() needs to run with IRQs disabled because it is using 756 * a per-CPU GHCB. 757 */ 758 local_irq_save(flags); 759 760 ghcb = __sev_get_ghcb(&state); 761 if (!ghcb) { 762 ret = 1; 763 goto out_unlock; 764 } 765 766 /* Copy the input desc into GHCB shared buffer */ 767 data = (struct snp_psc_desc *)ghcb->shared_buffer; 768 memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc))); 769 770 /* 771 * As per the GHCB specification, the hypervisor can resume the guest 772 * before processing all the entries. Check whether all the entries 773 * are processed. If not, then keep retrying. Note, the hypervisor 774 * will update the data memory directly to indicate the status, so 775 * reference the data->hdr everywhere. 776 * 777 * The strategy here is to wait for the hypervisor to change the page 778 * state in the RMP table before guest accesses the memory pages. If the 779 * page state change was not successful, then later memory access will 780 * result in a crash. 781 */ 782 cur_entry = data->hdr.cur_entry; 783 end_entry = data->hdr.end_entry; 784 785 while (data->hdr.cur_entry <= data->hdr.end_entry) { 786 ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); 787 788 /* This will advance the shared buffer data points to. */ 789 ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, SVM_VMGEXIT_PSC, 0, 0); 790 791 /* 792 * Page State Change VMGEXIT can pass error code through 793 * exit_info_2. 794 */ 795 if (WARN(ret || ghcb->save.sw_exit_info_2, 796 "SNP: PSC failed ret=%d exit_info_2=%llx\n", 797 ret, ghcb->save.sw_exit_info_2)) { 798 ret = 1; 799 goto out; 800 } 801 802 /* Verify that reserved bit is not set */ 803 if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) { 804 ret = 1; 805 goto out; 806 } 807 808 /* 809 * Sanity check that entry processing is not going backwards. 810 * This will happen only if hypervisor is tricking us. 811 */ 812 if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry, 813 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n", 814 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) { 815 ret = 1; 816 goto out; 817 } 818 } 819 820 out: 821 __sev_put_ghcb(&state); 822 823 out_unlock: 824 local_irq_restore(flags); 825 826 return ret; 827 } 828 829 static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr, 830 unsigned long vaddr_end, int op) 831 { 832 struct psc_hdr *hdr; 833 struct psc_entry *e; 834 unsigned long pfn; 835 int i; 836 837 hdr = &data->hdr; 838 e = data->entries; 839 840 memset(data, 0, sizeof(*data)); 841 i = 0; 842 843 while (vaddr < vaddr_end) { 844 if (is_vmalloc_addr((void *)vaddr)) 845 pfn = vmalloc_to_pfn((void *)vaddr); 846 else 847 pfn = __pa(vaddr) >> PAGE_SHIFT; 848 849 e->gfn = pfn; 850 e->operation = op; 851 hdr->end_entry = i; 852 853 /* 854 * Current SNP implementation doesn't keep track of the RMP page 855 * size so use 4K for simplicity. 856 */ 857 e->pagesize = RMP_PG_SIZE_4K; 858 859 vaddr = vaddr + PAGE_SIZE; 860 e++; 861 i++; 862 } 863 864 if (vmgexit_psc(data)) 865 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); 866 } 867 868 static void set_pages_state(unsigned long vaddr, unsigned int npages, int op) 869 { 870 unsigned long vaddr_end, next_vaddr; 871 struct snp_psc_desc *desc; 872 873 desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT); 874 if (!desc) 875 panic("SNP: failed to allocate memory for PSC descriptor\n"); 876 877 vaddr = vaddr & PAGE_MASK; 878 vaddr_end = vaddr + (npages << PAGE_SHIFT); 879 880 while (vaddr < vaddr_end) { 881 /* Calculate the last vaddr that fits in one struct snp_psc_desc. */ 882 next_vaddr = min_t(unsigned long, vaddr_end, 883 (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr); 884 885 __set_pages_state(desc, vaddr, next_vaddr, op); 886 887 vaddr = next_vaddr; 888 } 889 890 kfree(desc); 891 } 892 893 void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) 894 { 895 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 896 return; 897 898 pvalidate_pages(vaddr, npages, false); 899 900 set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED); 901 } 902 903 void snp_set_memory_private(unsigned long vaddr, unsigned int npages) 904 { 905 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 906 return; 907 908 set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); 909 910 pvalidate_pages(vaddr, npages, true); 911 } 912 913 static int snp_set_vmsa(void *va, bool vmsa) 914 { 915 u64 attrs; 916 917 /* 918 * Running at VMPL0 allows the kernel to change the VMSA bit for a page 919 * using the RMPADJUST instruction. However, for the instruction to 920 * succeed it must target the permissions of a lesser privileged 921 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST 922 * instruction in the AMD64 APM Volume 3). 923 */ 924 attrs = 1; 925 if (vmsa) 926 attrs |= RMPADJUST_VMSA_PAGE_BIT; 927 928 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); 929 } 930 931 #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK) 932 #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK) 933 #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK) 934 935 #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2) 936 #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3) 937 938 static void *snp_alloc_vmsa_page(void) 939 { 940 struct page *p; 941 942 /* 943 * Allocate VMSA page to work around the SNP erratum where the CPU will 944 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB) 945 * collides with the RMP entry of VMSA page. The recommended workaround 946 * is to not use a large page. 947 * 948 * Allocate an 8k page which is also 8k-aligned. 949 */ 950 p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1); 951 if (!p) 952 return NULL; 953 954 split_page(p, 1); 955 956 /* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */ 957 __free_page(p); 958 959 return page_address(p + 1); 960 } 961 962 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) 963 { 964 int err; 965 966 err = snp_set_vmsa(vmsa, false); 967 if (err) 968 pr_err("clear VMSA page failed (%u), leaking page\n", err); 969 else 970 free_page((unsigned long)vmsa); 971 } 972 973 static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip) 974 { 975 struct sev_es_save_area *cur_vmsa, *vmsa; 976 struct ghcb_state state; 977 unsigned long flags; 978 struct ghcb *ghcb; 979 u8 sipi_vector; 980 int cpu, ret; 981 u64 cr4; 982 983 /* 984 * The hypervisor SNP feature support check has happened earlier, just check 985 * the AP_CREATION one here. 986 */ 987 if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION)) 988 return -EOPNOTSUPP; 989 990 /* 991 * Verify the desired start IP against the known trampoline start IP 992 * to catch any future new trampolines that may be introduced that 993 * would require a new protected guest entry point. 994 */ 995 if (WARN_ONCE(start_ip != real_mode_header->trampoline_start, 996 "Unsupported SNP start_ip: %lx\n", start_ip)) 997 return -EINVAL; 998 999 /* Override start_ip with known protected guest start IP */ 1000 start_ip = real_mode_header->sev_es_trampoline_start; 1001 1002 /* Find the logical CPU for the APIC ID */ 1003 for_each_present_cpu(cpu) { 1004 if (arch_match_cpu_phys_id(cpu, apic_id)) 1005 break; 1006 } 1007 if (cpu >= nr_cpu_ids) 1008 return -EINVAL; 1009 1010 cur_vmsa = per_cpu(sev_vmsa, cpu); 1011 1012 /* 1013 * A new VMSA is created each time because there is no guarantee that 1014 * the current VMSA is the kernels or that the vCPU is not running. If 1015 * an attempt was done to use the current VMSA with a running vCPU, a 1016 * #VMEXIT of that vCPU would wipe out all of the settings being done 1017 * here. 1018 */ 1019 vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(); 1020 if (!vmsa) 1021 return -ENOMEM; 1022 1023 /* CR4 should maintain the MCE value */ 1024 cr4 = native_read_cr4() & X86_CR4_MCE; 1025 1026 /* Set the CS value based on the start_ip converted to a SIPI vector */ 1027 sipi_vector = (start_ip >> 12); 1028 vmsa->cs.base = sipi_vector << 12; 1029 vmsa->cs.limit = AP_INIT_CS_LIMIT; 1030 vmsa->cs.attrib = INIT_CS_ATTRIBS; 1031 vmsa->cs.selector = sipi_vector << 8; 1032 1033 /* Set the RIP value based on start_ip */ 1034 vmsa->rip = start_ip & 0xfff; 1035 1036 /* Set AP INIT defaults as documented in the APM */ 1037 vmsa->ds.limit = AP_INIT_DS_LIMIT; 1038 vmsa->ds.attrib = INIT_DS_ATTRIBS; 1039 vmsa->es = vmsa->ds; 1040 vmsa->fs = vmsa->ds; 1041 vmsa->gs = vmsa->ds; 1042 vmsa->ss = vmsa->ds; 1043 1044 vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT; 1045 vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT; 1046 vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS; 1047 vmsa->idtr.limit = AP_INIT_IDTR_LIMIT; 1048 vmsa->tr.limit = AP_INIT_TR_LIMIT; 1049 vmsa->tr.attrib = INIT_TR_ATTRIBS; 1050 1051 vmsa->cr4 = cr4; 1052 vmsa->cr0 = AP_INIT_CR0_DEFAULT; 1053 vmsa->dr7 = DR7_RESET_VALUE; 1054 vmsa->dr6 = AP_INIT_DR6_DEFAULT; 1055 vmsa->rflags = AP_INIT_RFLAGS_DEFAULT; 1056 vmsa->g_pat = AP_INIT_GPAT_DEFAULT; 1057 vmsa->xcr0 = AP_INIT_XCR0_DEFAULT; 1058 vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT; 1059 vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT; 1060 vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT; 1061 1062 /* SVME must be set. */ 1063 vmsa->efer = EFER_SVME; 1064 1065 /* 1066 * Set the SNP-specific fields for this VMSA: 1067 * VMPL level 1068 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) 1069 */ 1070 vmsa->vmpl = 0; 1071 vmsa->sev_features = sev_status >> 2; 1072 1073 /* Switch the page over to a VMSA page now that it is initialized */ 1074 ret = snp_set_vmsa(vmsa, true); 1075 if (ret) { 1076 pr_err("set VMSA page failed (%u)\n", ret); 1077 free_page((unsigned long)vmsa); 1078 1079 return -EINVAL; 1080 } 1081 1082 /* Issue VMGEXIT AP Creation NAE event */ 1083 local_irq_save(flags); 1084 1085 ghcb = __sev_get_ghcb(&state); 1086 1087 vc_ghcb_invalidate(ghcb); 1088 ghcb_set_rax(ghcb, vmsa->sev_features); 1089 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION); 1090 ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE); 1091 ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa)); 1092 1093 sev_es_wr_ghcb_msr(__pa(ghcb)); 1094 VMGEXIT(); 1095 1096 if (!ghcb_sw_exit_info_1_is_valid(ghcb) || 1097 lower_32_bits(ghcb->save.sw_exit_info_1)) { 1098 pr_err("SNP AP Creation error\n"); 1099 ret = -EINVAL; 1100 } 1101 1102 __sev_put_ghcb(&state); 1103 1104 local_irq_restore(flags); 1105 1106 /* Perform cleanup if there was an error */ 1107 if (ret) { 1108 snp_cleanup_vmsa(vmsa); 1109 vmsa = NULL; 1110 } 1111 1112 /* Free up any previous VMSA page */ 1113 if (cur_vmsa) 1114 snp_cleanup_vmsa(cur_vmsa); 1115 1116 /* Record the current VMSA page */ 1117 per_cpu(sev_vmsa, cpu) = vmsa; 1118 1119 return ret; 1120 } 1121 1122 void snp_set_wakeup_secondary_cpu(void) 1123 { 1124 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 1125 return; 1126 1127 /* 1128 * Always set this override if SNP is enabled. This makes it the 1129 * required method to start APs under SNP. If the hypervisor does 1130 * not support AP creation, then no APs will be started. 1131 */ 1132 apic->wakeup_secondary_cpu = wakeup_cpu_via_vmgexit; 1133 } 1134 1135 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh) 1136 { 1137 u16 startup_cs, startup_ip; 1138 phys_addr_t jump_table_pa; 1139 u64 jump_table_addr; 1140 u16 __iomem *jump_table; 1141 1142 jump_table_addr = get_jump_table_addr(); 1143 1144 /* On UP guests there is no jump table so this is not a failure */ 1145 if (!jump_table_addr) 1146 return 0; 1147 1148 /* Check if AP Jump Table is page-aligned */ 1149 if (jump_table_addr & ~PAGE_MASK) 1150 return -EINVAL; 1151 1152 jump_table_pa = jump_table_addr & PAGE_MASK; 1153 1154 startup_cs = (u16)(rmh->trampoline_start >> 4); 1155 startup_ip = (u16)(rmh->sev_es_trampoline_start - 1156 rmh->trampoline_start); 1157 1158 jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE); 1159 if (!jump_table) 1160 return -EIO; 1161 1162 writew(startup_ip, &jump_table[0]); 1163 writew(startup_cs, &jump_table[1]); 1164 1165 iounmap(jump_table); 1166 1167 return 0; 1168 } 1169 1170 /* 1171 * This is needed by the OVMF UEFI firmware which will use whatever it finds in 1172 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu 1173 * runtime GHCBs used by the kernel are also mapped in the EFI page-table. 1174 */ 1175 int __init sev_es_efi_map_ghcbs(pgd_t *pgd) 1176 { 1177 struct sev_es_runtime_data *data; 1178 unsigned long address, pflags; 1179 int cpu; 1180 u64 pfn; 1181 1182 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 1183 return 0; 1184 1185 pflags = _PAGE_NX | _PAGE_RW; 1186 1187 for_each_possible_cpu(cpu) { 1188 data = per_cpu(runtime_data, cpu); 1189 1190 address = __pa(&data->ghcb_page); 1191 pfn = address >> PAGE_SHIFT; 1192 1193 if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags)) 1194 return 1; 1195 } 1196 1197 return 0; 1198 } 1199 1200 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 1201 { 1202 struct pt_regs *regs = ctxt->regs; 1203 enum es_result ret; 1204 u64 exit_info_1; 1205 1206 /* Is it a WRMSR? */ 1207 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0; 1208 1209 ghcb_set_rcx(ghcb, regs->cx); 1210 if (exit_info_1) { 1211 ghcb_set_rax(ghcb, regs->ax); 1212 ghcb_set_rdx(ghcb, regs->dx); 1213 } 1214 1215 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR, 1216 exit_info_1, 0); 1217 1218 if ((ret == ES_OK) && (!exit_info_1)) { 1219 regs->ax = ghcb->save.rax; 1220 regs->dx = ghcb->save.rdx; 1221 } 1222 1223 return ret; 1224 } 1225 1226 static void snp_register_per_cpu_ghcb(void) 1227 { 1228 struct sev_es_runtime_data *data; 1229 struct ghcb *ghcb; 1230 1231 data = this_cpu_read(runtime_data); 1232 ghcb = &data->ghcb_page; 1233 1234 snp_register_ghcb_early(__pa(ghcb)); 1235 } 1236 1237 void setup_ghcb(void) 1238 { 1239 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 1240 return; 1241 1242 /* First make sure the hypervisor talks a supported protocol. */ 1243 if (!sev_es_negotiate_protocol()) 1244 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 1245 1246 /* 1247 * Check whether the runtime #VC exception handler is active. It uses 1248 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling(). 1249 * 1250 * If SNP is active, register the per-CPU GHCB page so that the runtime 1251 * exception handler can use it. 1252 */ 1253 if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) { 1254 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 1255 snp_register_per_cpu_ghcb(); 1256 1257 return; 1258 } 1259 1260 /* 1261 * Clear the boot_ghcb. The first exception comes in before the bss 1262 * section is cleared. 1263 */ 1264 memset(&boot_ghcb_page, 0, PAGE_SIZE); 1265 1266 /* Alright - Make the boot-ghcb public */ 1267 boot_ghcb = &boot_ghcb_page; 1268 1269 /* SNP guest requires that GHCB GPA must be registered. */ 1270 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 1271 snp_register_ghcb_early(__pa(&boot_ghcb_page)); 1272 } 1273 1274 #ifdef CONFIG_HOTPLUG_CPU 1275 static void sev_es_ap_hlt_loop(void) 1276 { 1277 struct ghcb_state state; 1278 struct ghcb *ghcb; 1279 1280 ghcb = __sev_get_ghcb(&state); 1281 1282 while (true) { 1283 vc_ghcb_invalidate(ghcb); 1284 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP); 1285 ghcb_set_sw_exit_info_1(ghcb, 0); 1286 ghcb_set_sw_exit_info_2(ghcb, 0); 1287 1288 sev_es_wr_ghcb_msr(__pa(ghcb)); 1289 VMGEXIT(); 1290 1291 /* Wakeup signal? */ 1292 if (ghcb_sw_exit_info_2_is_valid(ghcb) && 1293 ghcb->save.sw_exit_info_2) 1294 break; 1295 } 1296 1297 __sev_put_ghcb(&state); 1298 } 1299 1300 /* 1301 * Play_dead handler when running under SEV-ES. This is needed because 1302 * the hypervisor can't deliver an SIPI request to restart the AP. 1303 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the 1304 * hypervisor wakes it up again. 1305 */ 1306 static void sev_es_play_dead(void) 1307 { 1308 play_dead_common(); 1309 1310 /* IRQs now disabled */ 1311 1312 sev_es_ap_hlt_loop(); 1313 1314 /* 1315 * If we get here, the VCPU was woken up again. Jump to CPU 1316 * startup code to get it back online. 1317 */ 1318 start_cpu0(); 1319 } 1320 #else /* CONFIG_HOTPLUG_CPU */ 1321 #define sev_es_play_dead native_play_dead 1322 #endif /* CONFIG_HOTPLUG_CPU */ 1323 1324 #ifdef CONFIG_SMP 1325 static void __init sev_es_setup_play_dead(void) 1326 { 1327 smp_ops.play_dead = sev_es_play_dead; 1328 } 1329 #else 1330 static inline void sev_es_setup_play_dead(void) { } 1331 #endif 1332 1333 static void __init alloc_runtime_data(int cpu) 1334 { 1335 struct sev_es_runtime_data *data; 1336 1337 data = memblock_alloc(sizeof(*data), PAGE_SIZE); 1338 if (!data) 1339 panic("Can't allocate SEV-ES runtime data"); 1340 1341 per_cpu(runtime_data, cpu) = data; 1342 } 1343 1344 static void __init init_ghcb(int cpu) 1345 { 1346 struct sev_es_runtime_data *data; 1347 int err; 1348 1349 data = per_cpu(runtime_data, cpu); 1350 1351 err = early_set_memory_decrypted((unsigned long)&data->ghcb_page, 1352 sizeof(data->ghcb_page)); 1353 if (err) 1354 panic("Can't map GHCBs unencrypted"); 1355 1356 memset(&data->ghcb_page, 0, sizeof(data->ghcb_page)); 1357 1358 data->ghcb_active = false; 1359 data->backup_ghcb_active = false; 1360 } 1361 1362 void __init sev_es_init_vc_handling(void) 1363 { 1364 int cpu; 1365 1366 BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE); 1367 1368 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 1369 return; 1370 1371 if (!sev_es_check_cpu_features()) 1372 panic("SEV-ES CPU Features missing"); 1373 1374 /* 1375 * SNP is supported in v2 of the GHCB spec which mandates support for HV 1376 * features. 1377 */ 1378 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) { 1379 sev_hv_features = get_hv_features(); 1380 1381 if (!(sev_hv_features & GHCB_HV_FT_SNP)) 1382 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 1383 } 1384 1385 /* Enable SEV-ES special handling */ 1386 static_branch_enable(&sev_es_enable_key); 1387 1388 /* Initialize per-cpu GHCB pages */ 1389 for_each_possible_cpu(cpu) { 1390 alloc_runtime_data(cpu); 1391 init_ghcb(cpu); 1392 } 1393 1394 sev_es_setup_play_dead(); 1395 1396 /* Secondary CPUs use the runtime #VC handler */ 1397 initial_vc_handler = (unsigned long)kernel_exc_vmm_communication; 1398 } 1399 1400 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt) 1401 { 1402 int trapnr = ctxt->fi.vector; 1403 1404 if (trapnr == X86_TRAP_PF) 1405 native_write_cr2(ctxt->fi.cr2); 1406 1407 ctxt->regs->orig_ax = ctxt->fi.error_code; 1408 do_early_exception(ctxt->regs, trapnr); 1409 } 1410 1411 static long *vc_insn_get_rm(struct es_em_ctxt *ctxt) 1412 { 1413 long *reg_array; 1414 int offset; 1415 1416 reg_array = (long *)ctxt->regs; 1417 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); 1418 1419 if (offset < 0) 1420 return NULL; 1421 1422 offset /= sizeof(long); 1423 1424 return reg_array + offset; 1425 } 1426 static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, 1427 unsigned int bytes, bool read) 1428 { 1429 u64 exit_code, exit_info_1, exit_info_2; 1430 unsigned long ghcb_pa = __pa(ghcb); 1431 enum es_result res; 1432 phys_addr_t paddr; 1433 void __user *ref; 1434 1435 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); 1436 if (ref == (void __user *)-1L) 1437 return ES_UNSUPPORTED; 1438 1439 exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; 1440 1441 res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr); 1442 if (res != ES_OK) { 1443 if (res == ES_EXCEPTION && !read) 1444 ctxt->fi.error_code |= X86_PF_WRITE; 1445 1446 return res; 1447 } 1448 1449 exit_info_1 = paddr; 1450 /* Can never be greater than 8 */ 1451 exit_info_2 = bytes; 1452 1453 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); 1454 1455 return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2); 1456 } 1457 1458 /* 1459 * The MOVS instruction has two memory operands, which raises the 1460 * problem that it is not known whether the access to the source or the 1461 * destination caused the #VC exception (and hence whether an MMIO read 1462 * or write operation needs to be emulated). 1463 * 1464 * Instead of playing games with walking page-tables and trying to guess 1465 * whether the source or destination is an MMIO range, split the move 1466 * into two operations, a read and a write with only one memory operand. 1467 * This will cause a nested #VC exception on the MMIO address which can 1468 * then be handled. 1469 * 1470 * This implementation has the benefit that it also supports MOVS where 1471 * source _and_ destination are MMIO regions. 1472 * 1473 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a 1474 * rare operation. If it turns out to be a performance problem the split 1475 * operations can be moved to memcpy_fromio() and memcpy_toio(). 1476 */ 1477 static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt, 1478 unsigned int bytes) 1479 { 1480 unsigned long ds_base, es_base; 1481 unsigned char *src, *dst; 1482 unsigned char buffer[8]; 1483 enum es_result ret; 1484 bool rep; 1485 int off; 1486 1487 ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS); 1488 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); 1489 1490 if (ds_base == -1L || es_base == -1L) { 1491 ctxt->fi.vector = X86_TRAP_GP; 1492 ctxt->fi.error_code = 0; 1493 return ES_EXCEPTION; 1494 } 1495 1496 src = ds_base + (unsigned char *)ctxt->regs->si; 1497 dst = es_base + (unsigned char *)ctxt->regs->di; 1498 1499 ret = vc_read_mem(ctxt, src, buffer, bytes); 1500 if (ret != ES_OK) 1501 return ret; 1502 1503 ret = vc_write_mem(ctxt, dst, buffer, bytes); 1504 if (ret != ES_OK) 1505 return ret; 1506 1507 if (ctxt->regs->flags & X86_EFLAGS_DF) 1508 off = -bytes; 1509 else 1510 off = bytes; 1511 1512 ctxt->regs->si += off; 1513 ctxt->regs->di += off; 1514 1515 rep = insn_has_rep_prefix(&ctxt->insn); 1516 if (rep) 1517 ctxt->regs->cx -= 1; 1518 1519 if (!rep || ctxt->regs->cx == 0) 1520 return ES_OK; 1521 else 1522 return ES_RETRY; 1523 } 1524 1525 static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 1526 { 1527 struct insn *insn = &ctxt->insn; 1528 unsigned int bytes = 0; 1529 enum mmio_type mmio; 1530 enum es_result ret; 1531 u8 sign_byte; 1532 long *reg_data; 1533 1534 mmio = insn_decode_mmio(insn, &bytes); 1535 if (mmio == MMIO_DECODE_FAILED) 1536 return ES_DECODE_FAILED; 1537 1538 if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) { 1539 reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs); 1540 if (!reg_data) 1541 return ES_DECODE_FAILED; 1542 } 1543 1544 switch (mmio) { 1545 case MMIO_WRITE: 1546 memcpy(ghcb->shared_buffer, reg_data, bytes); 1547 ret = vc_do_mmio(ghcb, ctxt, bytes, false); 1548 break; 1549 case MMIO_WRITE_IMM: 1550 memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); 1551 ret = vc_do_mmio(ghcb, ctxt, bytes, false); 1552 break; 1553 case MMIO_READ: 1554 ret = vc_do_mmio(ghcb, ctxt, bytes, true); 1555 if (ret) 1556 break; 1557 1558 /* Zero-extend for 32-bit operation */ 1559 if (bytes == 4) 1560 *reg_data = 0; 1561 1562 memcpy(reg_data, ghcb->shared_buffer, bytes); 1563 break; 1564 case MMIO_READ_ZERO_EXTEND: 1565 ret = vc_do_mmio(ghcb, ctxt, bytes, true); 1566 if (ret) 1567 break; 1568 1569 /* Zero extend based on operand size */ 1570 memset(reg_data, 0, insn->opnd_bytes); 1571 memcpy(reg_data, ghcb->shared_buffer, bytes); 1572 break; 1573 case MMIO_READ_SIGN_EXTEND: 1574 ret = vc_do_mmio(ghcb, ctxt, bytes, true); 1575 if (ret) 1576 break; 1577 1578 if (bytes == 1) { 1579 u8 *val = (u8 *)ghcb->shared_buffer; 1580 1581 sign_byte = (*val & 0x80) ? 0xff : 0x00; 1582 } else { 1583 u16 *val = (u16 *)ghcb->shared_buffer; 1584 1585 sign_byte = (*val & 0x8000) ? 0xff : 0x00; 1586 } 1587 1588 /* Sign extend based on operand size */ 1589 memset(reg_data, sign_byte, insn->opnd_bytes); 1590 memcpy(reg_data, ghcb->shared_buffer, bytes); 1591 break; 1592 case MMIO_MOVS: 1593 ret = vc_handle_mmio_movs(ctxt, bytes); 1594 break; 1595 default: 1596 ret = ES_UNSUPPORTED; 1597 break; 1598 } 1599 1600 return ret; 1601 } 1602 1603 static enum es_result vc_handle_dr7_write(struct ghcb *ghcb, 1604 struct es_em_ctxt *ctxt) 1605 { 1606 struct sev_es_runtime_data *data = this_cpu_read(runtime_data); 1607 long val, *reg = vc_insn_get_rm(ctxt); 1608 enum es_result ret; 1609 1610 if (!reg) 1611 return ES_DECODE_FAILED; 1612 1613 val = *reg; 1614 1615 /* Upper 32 bits must be written as zeroes */ 1616 if (val >> 32) { 1617 ctxt->fi.vector = X86_TRAP_GP; 1618 ctxt->fi.error_code = 0; 1619 return ES_EXCEPTION; 1620 } 1621 1622 /* Clear out other reserved bits and set bit 10 */ 1623 val = (val & 0xffff23ffL) | BIT(10); 1624 1625 /* Early non-zero writes to DR7 are not supported */ 1626 if (!data && (val & ~DR7_RESET_VALUE)) 1627 return ES_UNSUPPORTED; 1628 1629 /* Using a value of 0 for ExitInfo1 means RAX holds the value */ 1630 ghcb_set_rax(ghcb, val); 1631 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); 1632 if (ret != ES_OK) 1633 return ret; 1634 1635 if (data) 1636 data->dr7 = val; 1637 1638 return ES_OK; 1639 } 1640 1641 static enum es_result vc_handle_dr7_read(struct ghcb *ghcb, 1642 struct es_em_ctxt *ctxt) 1643 { 1644 struct sev_es_runtime_data *data = this_cpu_read(runtime_data); 1645 long *reg = vc_insn_get_rm(ctxt); 1646 1647 if (!reg) 1648 return ES_DECODE_FAILED; 1649 1650 if (data) 1651 *reg = data->dr7; 1652 else 1653 *reg = DR7_RESET_VALUE; 1654 1655 return ES_OK; 1656 } 1657 1658 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, 1659 struct es_em_ctxt *ctxt) 1660 { 1661 return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0); 1662 } 1663 1664 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 1665 { 1666 enum es_result ret; 1667 1668 ghcb_set_rcx(ghcb, ctxt->regs->cx); 1669 1670 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0); 1671 if (ret != ES_OK) 1672 return ret; 1673 1674 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb))) 1675 return ES_VMM_ERROR; 1676 1677 ctxt->regs->ax = ghcb->save.rax; 1678 ctxt->regs->dx = ghcb->save.rdx; 1679 1680 return ES_OK; 1681 } 1682 1683 static enum es_result vc_handle_monitor(struct ghcb *ghcb, 1684 struct es_em_ctxt *ctxt) 1685 { 1686 /* 1687 * Treat it as a NOP and do not leak a physical address to the 1688 * hypervisor. 1689 */ 1690 return ES_OK; 1691 } 1692 1693 static enum es_result vc_handle_mwait(struct ghcb *ghcb, 1694 struct es_em_ctxt *ctxt) 1695 { 1696 /* Treat the same as MONITOR/MONITORX */ 1697 return ES_OK; 1698 } 1699 1700 static enum es_result vc_handle_vmmcall(struct ghcb *ghcb, 1701 struct es_em_ctxt *ctxt) 1702 { 1703 enum es_result ret; 1704 1705 ghcb_set_rax(ghcb, ctxt->regs->ax); 1706 ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); 1707 1708 if (x86_platform.hyper.sev_es_hcall_prepare) 1709 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); 1710 1711 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0); 1712 if (ret != ES_OK) 1713 return ret; 1714 1715 if (!ghcb_rax_is_valid(ghcb)) 1716 return ES_VMM_ERROR; 1717 1718 ctxt->regs->ax = ghcb->save.rax; 1719 1720 /* 1721 * Call sev_es_hcall_finish() after regs->ax is already set. 1722 * This allows the hypervisor handler to overwrite it again if 1723 * necessary. 1724 */ 1725 if (x86_platform.hyper.sev_es_hcall_finish && 1726 !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs)) 1727 return ES_VMM_ERROR; 1728 1729 return ES_OK; 1730 } 1731 1732 static enum es_result vc_handle_trap_ac(struct ghcb *ghcb, 1733 struct es_em_ctxt *ctxt) 1734 { 1735 /* 1736 * Calling ecx_alignment_check() directly does not work, because it 1737 * enables IRQs and the GHCB is active. Forward the exception and call 1738 * it later from vc_forward_exception(). 1739 */ 1740 ctxt->fi.vector = X86_TRAP_AC; 1741 ctxt->fi.error_code = 0; 1742 return ES_EXCEPTION; 1743 } 1744 1745 static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, 1746 struct ghcb *ghcb, 1747 unsigned long exit_code) 1748 { 1749 enum es_result result; 1750 1751 switch (exit_code) { 1752 case SVM_EXIT_READ_DR7: 1753 result = vc_handle_dr7_read(ghcb, ctxt); 1754 break; 1755 case SVM_EXIT_WRITE_DR7: 1756 result = vc_handle_dr7_write(ghcb, ctxt); 1757 break; 1758 case SVM_EXIT_EXCP_BASE + X86_TRAP_AC: 1759 result = vc_handle_trap_ac(ghcb, ctxt); 1760 break; 1761 case SVM_EXIT_RDTSC: 1762 case SVM_EXIT_RDTSCP: 1763 result = vc_handle_rdtsc(ghcb, ctxt, exit_code); 1764 break; 1765 case SVM_EXIT_RDPMC: 1766 result = vc_handle_rdpmc(ghcb, ctxt); 1767 break; 1768 case SVM_EXIT_INVD: 1769 pr_err_ratelimited("#VC exception for INVD??? Seriously???\n"); 1770 result = ES_UNSUPPORTED; 1771 break; 1772 case SVM_EXIT_CPUID: 1773 result = vc_handle_cpuid(ghcb, ctxt); 1774 break; 1775 case SVM_EXIT_IOIO: 1776 result = vc_handle_ioio(ghcb, ctxt); 1777 break; 1778 case SVM_EXIT_MSR: 1779 result = vc_handle_msr(ghcb, ctxt); 1780 break; 1781 case SVM_EXIT_VMMCALL: 1782 result = vc_handle_vmmcall(ghcb, ctxt); 1783 break; 1784 case SVM_EXIT_WBINVD: 1785 result = vc_handle_wbinvd(ghcb, ctxt); 1786 break; 1787 case SVM_EXIT_MONITOR: 1788 result = vc_handle_monitor(ghcb, ctxt); 1789 break; 1790 case SVM_EXIT_MWAIT: 1791 result = vc_handle_mwait(ghcb, ctxt); 1792 break; 1793 case SVM_EXIT_NPF: 1794 result = vc_handle_mmio(ghcb, ctxt); 1795 break; 1796 default: 1797 /* 1798 * Unexpected #VC exception 1799 */ 1800 result = ES_UNSUPPORTED; 1801 } 1802 1803 return result; 1804 } 1805 1806 static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt) 1807 { 1808 long error_code = ctxt->fi.error_code; 1809 int trapnr = ctxt->fi.vector; 1810 1811 ctxt->regs->orig_ax = ctxt->fi.error_code; 1812 1813 switch (trapnr) { 1814 case X86_TRAP_GP: 1815 exc_general_protection(ctxt->regs, error_code); 1816 break; 1817 case X86_TRAP_UD: 1818 exc_invalid_op(ctxt->regs); 1819 break; 1820 case X86_TRAP_PF: 1821 write_cr2(ctxt->fi.cr2); 1822 exc_page_fault(ctxt->regs, error_code); 1823 break; 1824 case X86_TRAP_AC: 1825 exc_alignment_check(ctxt->regs, error_code); 1826 break; 1827 default: 1828 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n"); 1829 BUG(); 1830 } 1831 } 1832 1833 static __always_inline bool is_vc2_stack(unsigned long sp) 1834 { 1835 return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2)); 1836 } 1837 1838 static __always_inline bool vc_from_invalid_context(struct pt_regs *regs) 1839 { 1840 unsigned long sp, prev_sp; 1841 1842 sp = (unsigned long)regs; 1843 prev_sp = regs->sp; 1844 1845 /* 1846 * If the code was already executing on the VC2 stack when the #VC 1847 * happened, let it proceed to the normal handling routine. This way the 1848 * code executing on the VC2 stack can cause #VC exceptions to get handled. 1849 */ 1850 return is_vc2_stack(sp) && !is_vc2_stack(prev_sp); 1851 } 1852 1853 static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code) 1854 { 1855 struct ghcb_state state; 1856 struct es_em_ctxt ctxt; 1857 enum es_result result; 1858 struct ghcb *ghcb; 1859 bool ret = true; 1860 1861 ghcb = __sev_get_ghcb(&state); 1862 1863 vc_ghcb_invalidate(ghcb); 1864 result = vc_init_em_ctxt(&ctxt, regs, error_code); 1865 1866 if (result == ES_OK) 1867 result = vc_handle_exitcode(&ctxt, ghcb, error_code); 1868 1869 __sev_put_ghcb(&state); 1870 1871 /* Done - now check the result */ 1872 switch (result) { 1873 case ES_OK: 1874 vc_finish_insn(&ctxt); 1875 break; 1876 case ES_UNSUPPORTED: 1877 pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n", 1878 error_code, regs->ip); 1879 ret = false; 1880 break; 1881 case ES_VMM_ERROR: 1882 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", 1883 error_code, regs->ip); 1884 ret = false; 1885 break; 1886 case ES_DECODE_FAILED: 1887 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", 1888 error_code, regs->ip); 1889 ret = false; 1890 break; 1891 case ES_EXCEPTION: 1892 vc_forward_exception(&ctxt); 1893 break; 1894 case ES_RETRY: 1895 /* Nothing to do */ 1896 break; 1897 default: 1898 pr_emerg("Unknown result in %s():%d\n", __func__, result); 1899 /* 1900 * Emulating the instruction which caused the #VC exception 1901 * failed - can't continue so print debug information 1902 */ 1903 BUG(); 1904 } 1905 1906 return ret; 1907 } 1908 1909 static __always_inline bool vc_is_db(unsigned long error_code) 1910 { 1911 return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB; 1912 } 1913 1914 /* 1915 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode 1916 * and will panic when an error happens. 1917 */ 1918 DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication) 1919 { 1920 irqentry_state_t irq_state; 1921 1922 /* 1923 * With the current implementation it is always possible to switch to a 1924 * safe stack because #VC exceptions only happen at known places, like 1925 * intercepted instructions or accesses to MMIO areas/IO ports. They can 1926 * also happen with code instrumentation when the hypervisor intercepts 1927 * #DB, but the critical paths are forbidden to be instrumented, so #DB 1928 * exceptions currently also only happen in safe places. 1929 * 1930 * But keep this here in case the noinstr annotations are violated due 1931 * to bug elsewhere. 1932 */ 1933 if (unlikely(vc_from_invalid_context(regs))) { 1934 instrumentation_begin(); 1935 panic("Can't handle #VC exception from unsupported context\n"); 1936 instrumentation_end(); 1937 } 1938 1939 /* 1940 * Handle #DB before calling into !noinstr code to avoid recursive #DB. 1941 */ 1942 if (vc_is_db(error_code)) { 1943 exc_debug(regs); 1944 return; 1945 } 1946 1947 irq_state = irqentry_nmi_enter(regs); 1948 1949 instrumentation_begin(); 1950 1951 if (!vc_raw_handle_exception(regs, error_code)) { 1952 /* Show some debug info */ 1953 show_regs(regs); 1954 1955 /* Ask hypervisor to sev_es_terminate */ 1956 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 1957 1958 /* If that fails and we get here - just panic */ 1959 panic("Returned from Terminate-Request to Hypervisor\n"); 1960 } 1961 1962 instrumentation_end(); 1963 irqentry_nmi_exit(regs, irq_state); 1964 } 1965 1966 /* 1967 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode 1968 * and will kill the current task with SIGBUS when an error happens. 1969 */ 1970 DEFINE_IDTENTRY_VC_USER(exc_vmm_communication) 1971 { 1972 /* 1973 * Handle #DB before calling into !noinstr code to avoid recursive #DB. 1974 */ 1975 if (vc_is_db(error_code)) { 1976 noist_exc_debug(regs); 1977 return; 1978 } 1979 1980 irqentry_enter_from_user_mode(regs); 1981 instrumentation_begin(); 1982 1983 if (!vc_raw_handle_exception(regs, error_code)) { 1984 /* 1985 * Do not kill the machine if user-space triggered the 1986 * exception. Send SIGBUS instead and let user-space deal with 1987 * it. 1988 */ 1989 force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0); 1990 } 1991 1992 instrumentation_end(); 1993 irqentry_exit_to_user_mode(regs); 1994 } 1995 1996 bool __init handle_vc_boot_ghcb(struct pt_regs *regs) 1997 { 1998 unsigned long exit_code = regs->orig_ax; 1999 struct es_em_ctxt ctxt; 2000 enum es_result result; 2001 2002 vc_ghcb_invalidate(boot_ghcb); 2003 2004 result = vc_init_em_ctxt(&ctxt, regs, exit_code); 2005 if (result == ES_OK) 2006 result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code); 2007 2008 /* Done - now check the result */ 2009 switch (result) { 2010 case ES_OK: 2011 vc_finish_insn(&ctxt); 2012 break; 2013 case ES_UNSUPPORTED: 2014 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", 2015 exit_code, regs->ip); 2016 goto fail; 2017 case ES_VMM_ERROR: 2018 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", 2019 exit_code, regs->ip); 2020 goto fail; 2021 case ES_DECODE_FAILED: 2022 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", 2023 exit_code, regs->ip); 2024 goto fail; 2025 case ES_EXCEPTION: 2026 vc_early_forward_exception(&ctxt); 2027 break; 2028 case ES_RETRY: 2029 /* Nothing to do */ 2030 break; 2031 default: 2032 BUG(); 2033 } 2034 2035 return true; 2036 2037 fail: 2038 show_regs(regs); 2039 2040 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 2041 } 2042 2043 /* 2044 * Initial set up of SNP relies on information provided by the 2045 * Confidential Computing blob, which can be passed to the kernel 2046 * in the following ways, depending on how it is booted: 2047 * 2048 * - when booted via the boot/decompress kernel: 2049 * - via boot_params 2050 * 2051 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH): 2052 * - via a setup_data entry, as defined by the Linux Boot Protocol 2053 * 2054 * Scan for the blob in that order. 2055 */ 2056 static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp) 2057 { 2058 struct cc_blob_sev_info *cc_info; 2059 2060 /* Boot kernel would have passed the CC blob via boot_params. */ 2061 if (bp->cc_blob_address) { 2062 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address; 2063 goto found_cc_info; 2064 } 2065 2066 /* 2067 * If kernel was booted directly, without the use of the 2068 * boot/decompression kernel, the CC blob may have been passed via 2069 * setup_data instead. 2070 */ 2071 cc_info = find_cc_blob_setup_data(bp); 2072 if (!cc_info) 2073 return NULL; 2074 2075 found_cc_info: 2076 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC) 2077 snp_abort(); 2078 2079 return cc_info; 2080 } 2081 2082 bool __init snp_init(struct boot_params *bp) 2083 { 2084 struct cc_blob_sev_info *cc_info; 2085 2086 if (!bp) 2087 return false; 2088 2089 cc_info = find_cc_blob(bp); 2090 if (!cc_info) 2091 return false; 2092 2093 setup_cpuid_table(cc_info); 2094 2095 /* 2096 * The CC blob will be used later to access the secrets page. Cache 2097 * it here like the boot kernel does. 2098 */ 2099 bp->cc_blob_address = (u32)(unsigned long)cc_info; 2100 2101 return true; 2102 } 2103 2104 void __init snp_abort(void) 2105 { 2106 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 2107 } 2108 2109 static void dump_cpuid_table(void) 2110 { 2111 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); 2112 int i = 0; 2113 2114 pr_info("count=%d reserved=0x%x reserved2=0x%llx\n", 2115 cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2); 2116 2117 for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) { 2118 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; 2119 2120 pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n", 2121 i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx, 2122 fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved); 2123 } 2124 } 2125 2126 /* 2127 * It is useful from an auditing/testing perspective to provide an easy way 2128 * for the guest owner to know that the CPUID table has been initialized as 2129 * expected, but that initialization happens too early in boot to print any 2130 * sort of indicator, and there's not really any other good place to do it, 2131 * so do it here. 2132 */ 2133 static int __init report_cpuid_table(void) 2134 { 2135 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); 2136 2137 if (!cpuid_table->count) 2138 return 0; 2139 2140 pr_info("Using SNP CPUID table, %d entries present.\n", 2141 cpuid_table->count); 2142 2143 if (sev_cfg.debug) 2144 dump_cpuid_table(); 2145 2146 return 0; 2147 } 2148 arch_initcall(report_cpuid_table); 2149 2150 static int __init init_sev_config(char *str) 2151 { 2152 char *s; 2153 2154 while ((s = strsep(&str, ","))) { 2155 if (!strcmp(s, "debug")) { 2156 sev_cfg.debug = true; 2157 continue; 2158 } 2159 2160 pr_info("SEV command-line option '%s' was not recognized\n", s); 2161 } 2162 2163 return 1; 2164 } 2165 __setup("sev=", init_sev_config); 2166 2167 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err) 2168 { 2169 struct ghcb_state state; 2170 struct es_em_ctxt ctxt; 2171 unsigned long flags; 2172 struct ghcb *ghcb; 2173 int ret; 2174 2175 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 2176 return -ENODEV; 2177 2178 if (!fw_err) 2179 return -EINVAL; 2180 2181 /* 2182 * __sev_get_ghcb() needs to run with IRQs disabled because it is using 2183 * a per-CPU GHCB. 2184 */ 2185 local_irq_save(flags); 2186 2187 ghcb = __sev_get_ghcb(&state); 2188 if (!ghcb) { 2189 ret = -EIO; 2190 goto e_restore_irq; 2191 } 2192 2193 vc_ghcb_invalidate(ghcb); 2194 2195 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { 2196 ghcb_set_rax(ghcb, input->data_gpa); 2197 ghcb_set_rbx(ghcb, input->data_npages); 2198 } 2199 2200 ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, exit_code, input->req_gpa, input->resp_gpa); 2201 if (ret) 2202 goto e_put; 2203 2204 if (ghcb->save.sw_exit_info_2) { 2205 /* Number of expected pages are returned in RBX */ 2206 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST && 2207 ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN) 2208 input->data_npages = ghcb_get_rbx(ghcb); 2209 2210 *fw_err = ghcb->save.sw_exit_info_2; 2211 2212 ret = -EIO; 2213 } 2214 2215 e_put: 2216 __sev_put_ghcb(&state); 2217 e_restore_irq: 2218 local_irq_restore(flags); 2219 2220 return ret; 2221 } 2222 EXPORT_SYMBOL_GPL(snp_issue_guest_request); 2223 2224 static struct platform_device sev_guest_device = { 2225 .name = "sev-guest", 2226 .id = -1, 2227 }; 2228 2229 static int __init snp_init_platform_device(void) 2230 { 2231 struct sev_guest_platform_data data; 2232 u64 gpa; 2233 2234 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 2235 return -ENODEV; 2236 2237 gpa = get_secrets_page(); 2238 if (!gpa) 2239 return -ENODEV; 2240 2241 data.secrets_gpa = gpa; 2242 if (platform_device_add_data(&sev_guest_device, &data, sizeof(data))) 2243 return -ENODEV; 2244 2245 if (platform_device_register(&sev_guest_device)) 2246 return -ENODEV; 2247 2248 pr_info("SNP guest platform device initialized.\n"); 2249 return 0; 2250 } 2251 device_initcall(snp_init_platform_device); 2252