1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * AMD Encrypted Register State Support 4 * 5 * Author: Joerg Roedel <jroedel@suse.de> 6 * 7 * This file is not compiled stand-alone. It contains code shared 8 * between the pre-decompression boot code and the running Linux kernel 9 * and is included directly into both code-bases. 10 */ 11 12 #ifndef __BOOT_COMPRESSED 13 #define error(v) pr_err(v) 14 #define has_cpuflag(f) boot_cpu_has(f) 15 #endif 16 17 static bool __init sev_es_check_cpu_features(void) 18 { 19 if (!has_cpuflag(X86_FEATURE_RDRAND)) { 20 error("RDRAND instruction not supported - no trusted source of randomness available\n"); 21 return false; 22 } 23 24 return true; 25 } 26 27 static void __noreturn sev_es_terminate(unsigned int reason) 28 { 29 u64 val = GHCB_MSR_TERM_REQ; 30 31 /* 32 * Tell the hypervisor what went wrong - only reason-set 0 is 33 * currently supported. 34 */ 35 val |= GHCB_SEV_TERM_REASON(0, reason); 36 37 /* Request Guest Termination from Hypvervisor */ 38 sev_es_wr_ghcb_msr(val); 39 VMGEXIT(); 40 41 while (true) 42 asm volatile("hlt\n" : : : "memory"); 43 } 44 45 static bool sev_es_negotiate_protocol(void) 46 { 47 u64 val; 48 49 /* Do the GHCB protocol version negotiation */ 50 sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); 51 VMGEXIT(); 52 val = sev_es_rd_ghcb_msr(); 53 54 if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) 55 return false; 56 57 if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTO_OUR || 58 GHCB_MSR_PROTO_MIN(val) > GHCB_PROTO_OUR) 59 return false; 60 61 return true; 62 } 63 64 static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb) 65 { 66 ghcb->save.sw_exit_code = 0; 67 __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); 68 } 69 70 static bool vc_decoding_needed(unsigned long exit_code) 71 { 72 /* Exceptions don't require to decode the instruction */ 73 return !(exit_code >= SVM_EXIT_EXCP_BASE && 74 exit_code <= SVM_EXIT_LAST_EXCP); 75 } 76 77 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, 78 struct pt_regs *regs, 79 unsigned long exit_code) 80 { 81 enum es_result ret = ES_OK; 82 83 memset(ctxt, 0, sizeof(*ctxt)); 84 ctxt->regs = regs; 85 86 if (vc_decoding_needed(exit_code)) 87 ret = vc_decode_insn(ctxt); 88 89 return ret; 90 } 91 92 static void vc_finish_insn(struct es_em_ctxt *ctxt) 93 { 94 ctxt->regs->ip += ctxt->insn.length; 95 } 96 97 static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 98 { 99 u32 ret; 100 101 ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0); 102 if (!ret) 103 return ES_OK; 104 105 if (ret == 1) { 106 u64 info = ghcb->save.sw_exit_info_2; 107 unsigned long v; 108 109 info = ghcb->save.sw_exit_info_2; 110 v = info & SVM_EVTINJ_VEC_MASK; 111 112 /* Check if exception information from hypervisor is sane. */ 113 if ((info & SVM_EVTINJ_VALID) && 114 ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) && 115 ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) { 116 ctxt->fi.vector = v; 117 118 if (info & SVM_EVTINJ_VALID_ERR) 119 ctxt->fi.error_code = info >> 32; 120 121 return ES_EXCEPTION; 122 } 123 } 124 125 return ES_VMM_ERROR; 126 } 127 128 enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr, 129 struct es_em_ctxt *ctxt, u64 exit_code, 130 u64 exit_info_1, u64 exit_info_2) 131 { 132 /* Fill in protocol and format specifiers */ 133 ghcb->protocol_version = GHCB_PROTOCOL_MAX; 134 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 135 136 ghcb_set_sw_exit_code(ghcb, exit_code); 137 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 138 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 139 140 /* 141 * Hyper-V unenlightened guests use a paravisor for communicating and 142 * GHCB pages are being allocated and set up by that paravisor. Linux 143 * should not change the GHCB page's physical address. 144 */ 145 if (set_ghcb_msr) 146 sev_es_wr_ghcb_msr(__pa(ghcb)); 147 148 VMGEXIT(); 149 150 return verify_exception_info(ghcb, ctxt); 151 } 152 153 /* 154 * Boot VC Handler - This is the first VC handler during boot, there is no GHCB 155 * page yet, so it only supports the MSR based communication with the 156 * hypervisor and only the CPUID exit-code. 157 */ 158 void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) 159 { 160 unsigned int fn = lower_bits(regs->ax, 32); 161 unsigned long val; 162 163 /* Only CPUID is supported via MSR protocol */ 164 if (exit_code != SVM_EXIT_CPUID) 165 goto fail; 166 167 sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX)); 168 VMGEXIT(); 169 val = sev_es_rd_ghcb_msr(); 170 if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP) 171 goto fail; 172 regs->ax = val >> 32; 173 174 sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX)); 175 VMGEXIT(); 176 val = sev_es_rd_ghcb_msr(); 177 if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP) 178 goto fail; 179 regs->bx = val >> 32; 180 181 sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX)); 182 VMGEXIT(); 183 val = sev_es_rd_ghcb_msr(); 184 if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP) 185 goto fail; 186 regs->cx = val >> 32; 187 188 sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX)); 189 VMGEXIT(); 190 val = sev_es_rd_ghcb_msr(); 191 if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP) 192 goto fail; 193 regs->dx = val >> 32; 194 195 /* 196 * This is a VC handler and the #VC is only raised when SEV-ES is 197 * active, which means SEV must be active too. Do sanity checks on the 198 * CPUID results to make sure the hypervisor does not trick the kernel 199 * into the no-sev path. This could map sensitive data unencrypted and 200 * make it accessible to the hypervisor. 201 * 202 * In particular, check for: 203 * - Availability of CPUID leaf 0x8000001f 204 * - SEV CPUID bit. 205 * 206 * The hypervisor might still report the wrong C-bit position, but this 207 * can't be checked here. 208 */ 209 210 if (fn == 0x80000000 && (regs->ax < 0x8000001f)) 211 /* SEV leaf check */ 212 goto fail; 213 else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) 214 /* SEV bit */ 215 goto fail; 216 217 /* Skip over the CPUID two-byte opcode */ 218 regs->ip += 2; 219 220 return; 221 222 fail: 223 /* Terminate the guest */ 224 sev_es_terminate(GHCB_SEV_ES_GEN_REQ); 225 } 226 227 static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, 228 void *src, char *buf, 229 unsigned int data_size, 230 unsigned int count, 231 bool backwards) 232 { 233 int i, b = backwards ? -1 : 1; 234 enum es_result ret = ES_OK; 235 236 for (i = 0; i < count; i++) { 237 void *s = src + (i * data_size * b); 238 char *d = buf + (i * data_size); 239 240 ret = vc_read_mem(ctxt, s, d, data_size); 241 if (ret != ES_OK) 242 break; 243 } 244 245 return ret; 246 } 247 248 static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, 249 void *dst, char *buf, 250 unsigned int data_size, 251 unsigned int count, 252 bool backwards) 253 { 254 int i, s = backwards ? -1 : 1; 255 enum es_result ret = ES_OK; 256 257 for (i = 0; i < count; i++) { 258 void *d = dst + (i * data_size * s); 259 char *b = buf + (i * data_size); 260 261 ret = vc_write_mem(ctxt, d, b, data_size); 262 if (ret != ES_OK) 263 break; 264 } 265 266 return ret; 267 } 268 269 #define IOIO_TYPE_STR BIT(2) 270 #define IOIO_TYPE_IN 1 271 #define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR) 272 #define IOIO_TYPE_OUT 0 273 #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR) 274 275 #define IOIO_REP BIT(3) 276 277 #define IOIO_ADDR_64 BIT(9) 278 #define IOIO_ADDR_32 BIT(8) 279 #define IOIO_ADDR_16 BIT(7) 280 281 #define IOIO_DATA_32 BIT(6) 282 #define IOIO_DATA_16 BIT(5) 283 #define IOIO_DATA_8 BIT(4) 284 285 #define IOIO_SEG_ES (0 << 10) 286 #define IOIO_SEG_DS (3 << 10) 287 288 static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) 289 { 290 struct insn *insn = &ctxt->insn; 291 *exitinfo = 0; 292 293 switch (insn->opcode.bytes[0]) { 294 /* INS opcodes */ 295 case 0x6c: 296 case 0x6d: 297 *exitinfo |= IOIO_TYPE_INS; 298 *exitinfo |= IOIO_SEG_ES; 299 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; 300 break; 301 302 /* OUTS opcodes */ 303 case 0x6e: 304 case 0x6f: 305 *exitinfo |= IOIO_TYPE_OUTS; 306 *exitinfo |= IOIO_SEG_DS; 307 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; 308 break; 309 310 /* IN immediate opcodes */ 311 case 0xe4: 312 case 0xe5: 313 *exitinfo |= IOIO_TYPE_IN; 314 *exitinfo |= (u8)insn->immediate.value << 16; 315 break; 316 317 /* OUT immediate opcodes */ 318 case 0xe6: 319 case 0xe7: 320 *exitinfo |= IOIO_TYPE_OUT; 321 *exitinfo |= (u8)insn->immediate.value << 16; 322 break; 323 324 /* IN register opcodes */ 325 case 0xec: 326 case 0xed: 327 *exitinfo |= IOIO_TYPE_IN; 328 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; 329 break; 330 331 /* OUT register opcodes */ 332 case 0xee: 333 case 0xef: 334 *exitinfo |= IOIO_TYPE_OUT; 335 *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; 336 break; 337 338 default: 339 return ES_DECODE_FAILED; 340 } 341 342 switch (insn->opcode.bytes[0]) { 343 case 0x6c: 344 case 0x6e: 345 case 0xe4: 346 case 0xe6: 347 case 0xec: 348 case 0xee: 349 /* Single byte opcodes */ 350 *exitinfo |= IOIO_DATA_8; 351 break; 352 default: 353 /* Length determined by instruction parsing */ 354 *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 355 : IOIO_DATA_32; 356 } 357 switch (insn->addr_bytes) { 358 case 2: 359 *exitinfo |= IOIO_ADDR_16; 360 break; 361 case 4: 362 *exitinfo |= IOIO_ADDR_32; 363 break; 364 case 8: 365 *exitinfo |= IOIO_ADDR_64; 366 break; 367 } 368 369 if (insn_has_rep_prefix(insn)) 370 *exitinfo |= IOIO_REP; 371 372 return ES_OK; 373 } 374 375 static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 376 { 377 struct pt_regs *regs = ctxt->regs; 378 u64 exit_info_1, exit_info_2; 379 enum es_result ret; 380 381 ret = vc_ioio_exitinfo(ctxt, &exit_info_1); 382 if (ret != ES_OK) 383 return ret; 384 385 if (exit_info_1 & IOIO_TYPE_STR) { 386 387 /* (REP) INS/OUTS */ 388 389 bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); 390 unsigned int io_bytes, exit_bytes; 391 unsigned int ghcb_count, op_count; 392 unsigned long es_base; 393 u64 sw_scratch; 394 395 /* 396 * For the string variants with rep prefix the amount of in/out 397 * operations per #VC exception is limited so that the kernel 398 * has a chance to take interrupts and re-schedule while the 399 * instruction is emulated. 400 */ 401 io_bytes = (exit_info_1 >> 4) & 0x7; 402 ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; 403 404 op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; 405 exit_info_2 = min(op_count, ghcb_count); 406 exit_bytes = exit_info_2 * io_bytes; 407 408 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); 409 410 /* Read bytes of OUTS into the shared buffer */ 411 if (!(exit_info_1 & IOIO_TYPE_IN)) { 412 ret = vc_insn_string_read(ctxt, 413 (void *)(es_base + regs->si), 414 ghcb->shared_buffer, io_bytes, 415 exit_info_2, df); 416 if (ret) 417 return ret; 418 } 419 420 /* 421 * Issue an VMGEXIT to the HV to consume the bytes from the 422 * shared buffer or to have it write them into the shared buffer 423 * depending on the instruction: OUTS or INS. 424 */ 425 sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); 426 ghcb_set_sw_scratch(ghcb, sw_scratch); 427 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_IOIO, 428 exit_info_1, exit_info_2); 429 if (ret != ES_OK) 430 return ret; 431 432 /* Read bytes from shared buffer into the guest's destination. */ 433 if (exit_info_1 & IOIO_TYPE_IN) { 434 ret = vc_insn_string_write(ctxt, 435 (void *)(es_base + regs->di), 436 ghcb->shared_buffer, io_bytes, 437 exit_info_2, df); 438 if (ret) 439 return ret; 440 441 if (df) 442 regs->di -= exit_bytes; 443 else 444 regs->di += exit_bytes; 445 } else { 446 if (df) 447 regs->si -= exit_bytes; 448 else 449 regs->si += exit_bytes; 450 } 451 452 if (exit_info_1 & IOIO_REP) 453 regs->cx -= exit_info_2; 454 455 ret = regs->cx ? ES_RETRY : ES_OK; 456 457 } else { 458 459 /* IN/OUT into/from rAX */ 460 461 int bits = (exit_info_1 & 0x70) >> 1; 462 u64 rax = 0; 463 464 if (!(exit_info_1 & IOIO_TYPE_IN)) 465 rax = lower_bits(regs->ax, bits); 466 467 ghcb_set_rax(ghcb, rax); 468 469 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, 470 SVM_EXIT_IOIO, exit_info_1, 0); 471 if (ret != ES_OK) 472 return ret; 473 474 if (exit_info_1 & IOIO_TYPE_IN) { 475 if (!ghcb_rax_is_valid(ghcb)) 476 return ES_VMM_ERROR; 477 regs->ax = lower_bits(ghcb->save.rax, bits); 478 } 479 } 480 481 return ret; 482 } 483 484 static enum es_result vc_handle_cpuid(struct ghcb *ghcb, 485 struct es_em_ctxt *ctxt) 486 { 487 struct pt_regs *regs = ctxt->regs; 488 u32 cr4 = native_read_cr4(); 489 enum es_result ret; 490 491 ghcb_set_rax(ghcb, regs->ax); 492 ghcb_set_rcx(ghcb, regs->cx); 493 494 if (cr4 & X86_CR4_OSXSAVE) 495 /* Safe to read xcr0 */ 496 ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); 497 else 498 /* xgetbv will cause #GP - use reset value for xcr0 */ 499 ghcb_set_xcr0(ghcb, 1); 500 501 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_CPUID, 0, 0); 502 if (ret != ES_OK) 503 return ret; 504 505 if (!(ghcb_rax_is_valid(ghcb) && 506 ghcb_rbx_is_valid(ghcb) && 507 ghcb_rcx_is_valid(ghcb) && 508 ghcb_rdx_is_valid(ghcb))) 509 return ES_VMM_ERROR; 510 511 regs->ax = ghcb->save.rax; 512 regs->bx = ghcb->save.rbx; 513 regs->cx = ghcb->save.rcx; 514 regs->dx = ghcb->save.rdx; 515 516 return ES_OK; 517 } 518 519 static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, 520 struct es_em_ctxt *ctxt, 521 unsigned long exit_code) 522 { 523 bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); 524 enum es_result ret; 525 526 ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, 0, 0); 527 if (ret != ES_OK) 528 return ret; 529 530 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && 531 (!rdtscp || ghcb_rcx_is_valid(ghcb)))) 532 return ES_VMM_ERROR; 533 534 ctxt->regs->ax = ghcb->save.rax; 535 ctxt->regs->dx = ghcb->save.rdx; 536 if (rdtscp) 537 ctxt->regs->cx = ghcb->save.rcx; 538 539 return ES_OK; 540 } 541