1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Encrypted Virtualization (SEV) interface 4 * 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/sched.h> 14 #include <linux/interrupt.h> 15 #include <linux/spinlock.h> 16 #include <linux/spinlock_types.h> 17 #include <linux/types.h> 18 #include <linux/mutex.h> 19 #include <linux/delay.h> 20 #include <linux/hw_random.h> 21 #include <linux/ccp.h> 22 #include <linux/firmware.h> 23 #include <linux/gfp.h> 24 #include <linux/cpufeature.h> 25 26 #include <asm/smp.h> 27 28 #include "psp-dev.h" 29 #include "sev-dev.h" 30 31 #define DEVICE_NAME "sev" 32 #define SEV_FW_FILE "amd/sev.fw" 33 #define SEV_FW_NAME_SIZE 64 34 35 static DEFINE_MUTEX(sev_cmd_mutex); 36 static struct sev_misc_dev *misc_dev; 37 38 static int psp_cmd_timeout = 100; 39 module_param(psp_cmd_timeout, int, 0644); 40 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); 41 42 static int psp_probe_timeout = 5; 43 module_param(psp_probe_timeout, int, 0644); 44 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); 45 46 static bool psp_dead; 47 static int psp_timeout; 48 49 /* Trusted Memory Region (TMR): 50 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator 51 * to allocate the memory, which will return aligned memory for the specified 52 * allocation order. 53 */ 54 #define SEV_ES_TMR_SIZE (1024 * 1024) 55 static void *sev_es_tmr; 56 57 static inline bool sev_version_greater_or_equal(u8 maj, u8 min) 58 { 59 struct sev_device *sev = psp_master->sev_data; 60 61 if (sev->api_major > maj) 62 return true; 63 64 if (sev->api_major == maj && sev->api_minor >= min) 65 return true; 66 67 return false; 68 } 69 70 static void sev_irq_handler(int irq, void *data, unsigned int status) 71 { 72 struct sev_device *sev = data; 73 int reg; 74 75 /* Check if it is command completion: */ 76 if (!(status & SEV_CMD_COMPLETE)) 77 return; 78 79 /* Check if it is SEV command completion: */ 80 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 81 if (reg & PSP_CMDRESP_RESP) { 82 sev->int_rcvd = 1; 83 wake_up(&sev->int_queue); 84 } 85 } 86 87 static int sev_wait_cmd_ioc(struct sev_device *sev, 88 unsigned int *reg, unsigned int timeout) 89 { 90 int ret; 91 92 ret = wait_event_timeout(sev->int_queue, 93 sev->int_rcvd, timeout * HZ); 94 if (!ret) 95 return -ETIMEDOUT; 96 97 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 98 99 return 0; 100 } 101 102 static int sev_cmd_buffer_len(int cmd) 103 { 104 switch (cmd) { 105 case SEV_CMD_INIT: return sizeof(struct sev_data_init); 106 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); 107 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); 108 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); 109 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); 110 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); 111 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); 112 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); 113 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); 114 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); 115 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); 116 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); 117 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); 118 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); 119 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); 120 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); 121 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); 122 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); 123 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); 124 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); 125 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); 126 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); 127 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); 128 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); 129 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); 130 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); 131 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); 132 case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); 133 case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); 134 default: return 0; 135 } 136 137 return 0; 138 } 139 140 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) 141 { 142 struct psp_device *psp = psp_master; 143 struct sev_device *sev; 144 unsigned int phys_lsb, phys_msb; 145 unsigned int reg, ret = 0; 146 int buf_len; 147 148 if (!psp || !psp->sev_data) 149 return -ENODEV; 150 151 if (psp_dead) 152 return -EBUSY; 153 154 sev = psp->sev_data; 155 156 buf_len = sev_cmd_buffer_len(cmd); 157 if (WARN_ON_ONCE(!data != !buf_len)) 158 return -EINVAL; 159 160 /* 161 * Copy the incoming data to driver's scratch buffer as __pa() will not 162 * work for some memory, e.g. vmalloc'd addresses, and @data may not be 163 * physically contiguous. 164 */ 165 if (data) 166 memcpy(sev->cmd_buf, data, buf_len); 167 168 /* Get the physical address of the command buffer */ 169 phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0; 170 phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0; 171 172 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", 173 cmd, phys_msb, phys_lsb, psp_timeout); 174 175 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 176 buf_len, false); 177 178 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 179 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 180 181 sev->int_rcvd = 0; 182 183 reg = cmd; 184 reg <<= SEV_CMDRESP_CMD_SHIFT; 185 reg |= SEV_CMDRESP_IOC; 186 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); 187 188 /* wait for command completion */ 189 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); 190 if (ret) { 191 if (psp_ret) 192 *psp_ret = 0; 193 194 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); 195 psp_dead = true; 196 197 return ret; 198 } 199 200 psp_timeout = psp_cmd_timeout; 201 202 if (psp_ret) 203 *psp_ret = reg & PSP_CMDRESP_ERR_MASK; 204 205 if (reg & PSP_CMDRESP_ERR_MASK) { 206 dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n", 207 cmd, reg & PSP_CMDRESP_ERR_MASK); 208 ret = -EIO; 209 } 210 211 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, 212 buf_len, false); 213 214 /* 215 * Copy potential output from the PSP back to data. Do this even on 216 * failure in case the caller wants to glean something from the error. 217 */ 218 if (data) 219 memcpy(data, sev->cmd_buf, buf_len); 220 221 return ret; 222 } 223 224 static int sev_do_cmd(int cmd, void *data, int *psp_ret) 225 { 226 int rc; 227 228 mutex_lock(&sev_cmd_mutex); 229 rc = __sev_do_cmd_locked(cmd, data, psp_ret); 230 mutex_unlock(&sev_cmd_mutex); 231 232 return rc; 233 } 234 235 static int __sev_platform_init_locked(int *error) 236 { 237 struct psp_device *psp = psp_master; 238 struct sev_data_init data; 239 struct sev_device *sev; 240 int rc = 0; 241 242 if (!psp || !psp->sev_data) 243 return -ENODEV; 244 245 sev = psp->sev_data; 246 247 if (sev->state == SEV_STATE_INIT) 248 return 0; 249 250 memset(&data, 0, sizeof(data)); 251 if (sev_es_tmr) { 252 u64 tmr_pa; 253 254 /* 255 * Do not include the encryption mask on the physical 256 * address of the TMR (firmware should clear it anyway). 257 */ 258 tmr_pa = __pa(sev_es_tmr); 259 260 data.flags |= SEV_INIT_FLAGS_SEV_ES; 261 data.tmr_address = tmr_pa; 262 data.tmr_len = SEV_ES_TMR_SIZE; 263 } 264 265 rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); 266 if (rc) 267 return rc; 268 269 sev->state = SEV_STATE_INIT; 270 271 /* Prepare for first SEV guest launch after INIT */ 272 wbinvd_on_all_cpus(); 273 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); 274 if (rc) 275 return rc; 276 277 dev_dbg(sev->dev, "SEV firmware initialized\n"); 278 279 return rc; 280 } 281 282 int sev_platform_init(int *error) 283 { 284 int rc; 285 286 mutex_lock(&sev_cmd_mutex); 287 rc = __sev_platform_init_locked(error); 288 mutex_unlock(&sev_cmd_mutex); 289 290 return rc; 291 } 292 EXPORT_SYMBOL_GPL(sev_platform_init); 293 294 static int __sev_platform_shutdown_locked(int *error) 295 { 296 struct sev_device *sev = psp_master->sev_data; 297 int ret; 298 299 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); 300 if (ret) 301 return ret; 302 303 sev->state = SEV_STATE_UNINIT; 304 dev_dbg(sev->dev, "SEV firmware shutdown\n"); 305 306 return ret; 307 } 308 309 static int sev_platform_shutdown(int *error) 310 { 311 int rc; 312 313 mutex_lock(&sev_cmd_mutex); 314 rc = __sev_platform_shutdown_locked(NULL); 315 mutex_unlock(&sev_cmd_mutex); 316 317 return rc; 318 } 319 320 static int sev_get_platform_state(int *state, int *error) 321 { 322 struct sev_user_data_status data; 323 int rc; 324 325 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); 326 if (rc) 327 return rc; 328 329 *state = data.state; 330 return rc; 331 } 332 333 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) 334 { 335 int state, rc; 336 337 if (!writable) 338 return -EPERM; 339 340 /* 341 * The SEV spec requires that FACTORY_RESET must be issued in 342 * UNINIT state. Before we go further lets check if any guest is 343 * active. 344 * 345 * If FW is in WORKING state then deny the request otherwise issue 346 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. 347 * 348 */ 349 rc = sev_get_platform_state(&state, &argp->error); 350 if (rc) 351 return rc; 352 353 if (state == SEV_STATE_WORKING) 354 return -EBUSY; 355 356 if (state == SEV_STATE_INIT) { 357 rc = __sev_platform_shutdown_locked(&argp->error); 358 if (rc) 359 return rc; 360 } 361 362 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); 363 } 364 365 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) 366 { 367 struct sev_user_data_status data; 368 int ret; 369 370 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); 371 if (ret) 372 return ret; 373 374 if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) 375 ret = -EFAULT; 376 377 return ret; 378 } 379 380 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) 381 { 382 struct sev_device *sev = psp_master->sev_data; 383 int rc; 384 385 if (!writable) 386 return -EPERM; 387 388 if (sev->state == SEV_STATE_UNINIT) { 389 rc = __sev_platform_init_locked(&argp->error); 390 if (rc) 391 return rc; 392 } 393 394 return __sev_do_cmd_locked(cmd, NULL, &argp->error); 395 } 396 397 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) 398 { 399 struct sev_device *sev = psp_master->sev_data; 400 struct sev_user_data_pek_csr input; 401 struct sev_data_pek_csr data; 402 void __user *input_address; 403 void *blob = NULL; 404 int ret; 405 406 if (!writable) 407 return -EPERM; 408 409 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 410 return -EFAULT; 411 412 memset(&data, 0, sizeof(data)); 413 414 /* userspace wants to query CSR length */ 415 if (!input.address || !input.length) 416 goto cmd; 417 418 /* allocate a physically contiguous buffer to store the CSR blob */ 419 input_address = (void __user *)input.address; 420 if (input.length > SEV_FW_BLOB_MAX_SIZE) 421 return -EFAULT; 422 423 blob = kmalloc(input.length, GFP_KERNEL); 424 if (!blob) 425 return -ENOMEM; 426 427 data.address = __psp_pa(blob); 428 data.len = input.length; 429 430 cmd: 431 if (sev->state == SEV_STATE_UNINIT) { 432 ret = __sev_platform_init_locked(&argp->error); 433 if (ret) 434 goto e_free_blob; 435 } 436 437 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); 438 439 /* If we query the CSR length, FW responded with expected data. */ 440 input.length = data.len; 441 442 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 443 ret = -EFAULT; 444 goto e_free_blob; 445 } 446 447 if (blob) { 448 if (copy_to_user(input_address, blob, input.length)) 449 ret = -EFAULT; 450 } 451 452 e_free_blob: 453 kfree(blob); 454 return ret; 455 } 456 457 void *psp_copy_user_blob(u64 uaddr, u32 len) 458 { 459 if (!uaddr || !len) 460 return ERR_PTR(-EINVAL); 461 462 /* verify that blob length does not exceed our limit */ 463 if (len > SEV_FW_BLOB_MAX_SIZE) 464 return ERR_PTR(-EINVAL); 465 466 return memdup_user((void __user *)uaddr, len); 467 } 468 EXPORT_SYMBOL_GPL(psp_copy_user_blob); 469 470 static int sev_get_api_version(void) 471 { 472 struct sev_device *sev = psp_master->sev_data; 473 struct sev_user_data_status status; 474 int error = 0, ret; 475 476 ret = sev_platform_status(&status, &error); 477 if (ret) { 478 dev_err(sev->dev, 479 "SEV: failed to get status. Error: %#x\n", error); 480 return 1; 481 } 482 483 sev->api_major = status.api_major; 484 sev->api_minor = status.api_minor; 485 sev->build = status.build; 486 sev->state = status.state; 487 488 return 0; 489 } 490 491 static int sev_get_firmware(struct device *dev, 492 const struct firmware **firmware) 493 { 494 char fw_name_specific[SEV_FW_NAME_SIZE]; 495 char fw_name_subset[SEV_FW_NAME_SIZE]; 496 497 snprintf(fw_name_specific, sizeof(fw_name_specific), 498 "amd/amd_sev_fam%.2xh_model%.2xh.sbin", 499 boot_cpu_data.x86, boot_cpu_data.x86_model); 500 501 snprintf(fw_name_subset, sizeof(fw_name_subset), 502 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", 503 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); 504 505 /* Check for SEV FW for a particular model. 506 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h 507 * 508 * or 509 * 510 * Check for SEV FW common to a subset of models. 511 * Ex. amd_sev_fam17h_model0xh.sbin for 512 * Family 17h Model 00h -- Family 17h Model 0Fh 513 * 514 * or 515 * 516 * Fall-back to using generic name: sev.fw 517 */ 518 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || 519 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || 520 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) 521 return 0; 522 523 return -ENOENT; 524 } 525 526 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ 527 static int sev_update_firmware(struct device *dev) 528 { 529 struct sev_data_download_firmware *data; 530 const struct firmware *firmware; 531 int ret, error, order; 532 struct page *p; 533 u64 data_size; 534 535 if (sev_get_firmware(dev, &firmware) == -ENOENT) { 536 dev_dbg(dev, "No SEV firmware file present\n"); 537 return -1; 538 } 539 540 /* 541 * SEV FW expects the physical address given to it to be 32 542 * byte aligned. Memory allocated has structure placed at the 543 * beginning followed by the firmware being passed to the SEV 544 * FW. Allocate enough memory for data structure + alignment 545 * padding + SEV FW. 546 */ 547 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); 548 549 order = get_order(firmware->size + data_size); 550 p = alloc_pages(GFP_KERNEL, order); 551 if (!p) { 552 ret = -1; 553 goto fw_err; 554 } 555 556 /* 557 * Copy firmware data to a kernel allocated contiguous 558 * memory region. 559 */ 560 data = page_address(p); 561 memcpy(page_address(p) + data_size, firmware->data, firmware->size); 562 563 data->address = __psp_pa(page_address(p) + data_size); 564 data->len = firmware->size; 565 566 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 567 if (ret) 568 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); 569 else 570 dev_info(dev, "SEV firmware update successful\n"); 571 572 __free_pages(p, order); 573 574 fw_err: 575 release_firmware(firmware); 576 577 return ret; 578 } 579 580 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) 581 { 582 struct sev_device *sev = psp_master->sev_data; 583 struct sev_user_data_pek_cert_import input; 584 struct sev_data_pek_cert_import data; 585 void *pek_blob, *oca_blob; 586 int ret; 587 588 if (!writable) 589 return -EPERM; 590 591 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 592 return -EFAULT; 593 594 /* copy PEK certificate blobs from userspace */ 595 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); 596 if (IS_ERR(pek_blob)) 597 return PTR_ERR(pek_blob); 598 599 data.reserved = 0; 600 data.pek_cert_address = __psp_pa(pek_blob); 601 data.pek_cert_len = input.pek_cert_len; 602 603 /* copy PEK certificate blobs from userspace */ 604 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); 605 if (IS_ERR(oca_blob)) { 606 ret = PTR_ERR(oca_blob); 607 goto e_free_pek; 608 } 609 610 data.oca_cert_address = __psp_pa(oca_blob); 611 data.oca_cert_len = input.oca_cert_len; 612 613 /* If platform is not in INIT state then transition it to INIT */ 614 if (sev->state != SEV_STATE_INIT) { 615 ret = __sev_platform_init_locked(&argp->error); 616 if (ret) 617 goto e_free_oca; 618 } 619 620 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); 621 622 e_free_oca: 623 kfree(oca_blob); 624 e_free_pek: 625 kfree(pek_blob); 626 return ret; 627 } 628 629 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) 630 { 631 struct sev_user_data_get_id2 input; 632 struct sev_data_get_id data; 633 void __user *input_address; 634 void *id_blob = NULL; 635 int ret; 636 637 /* SEV GET_ID is available from SEV API v0.16 and up */ 638 if (!sev_version_greater_or_equal(0, 16)) 639 return -ENOTSUPP; 640 641 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 642 return -EFAULT; 643 644 input_address = (void __user *)input.address; 645 646 if (input.address && input.length) { 647 id_blob = kmalloc(input.length, GFP_KERNEL); 648 if (!id_blob) 649 return -ENOMEM; 650 651 data.address = __psp_pa(id_blob); 652 data.len = input.length; 653 } else { 654 data.address = 0; 655 data.len = 0; 656 } 657 658 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); 659 660 /* 661 * Firmware will return the length of the ID value (either the minimum 662 * required length or the actual length written), return it to the user. 663 */ 664 input.length = data.len; 665 666 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 667 ret = -EFAULT; 668 goto e_free; 669 } 670 671 if (id_blob) { 672 if (copy_to_user(input_address, id_blob, data.len)) { 673 ret = -EFAULT; 674 goto e_free; 675 } 676 } 677 678 e_free: 679 kfree(id_blob); 680 681 return ret; 682 } 683 684 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) 685 { 686 struct sev_data_get_id *data; 687 u64 data_size, user_size; 688 void *id_blob, *mem; 689 int ret; 690 691 /* SEV GET_ID available from SEV API v0.16 and up */ 692 if (!sev_version_greater_or_equal(0, 16)) 693 return -ENOTSUPP; 694 695 /* SEV FW expects the buffer it fills with the ID to be 696 * 8-byte aligned. Memory allocated should be enough to 697 * hold data structure + alignment padding + memory 698 * where SEV FW writes the ID. 699 */ 700 data_size = ALIGN(sizeof(struct sev_data_get_id), 8); 701 user_size = sizeof(struct sev_user_data_get_id); 702 703 mem = kzalloc(data_size + user_size, GFP_KERNEL); 704 if (!mem) 705 return -ENOMEM; 706 707 data = mem; 708 id_blob = mem + data_size; 709 710 data->address = __psp_pa(id_blob); 711 data->len = user_size; 712 713 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); 714 if (!ret) { 715 if (copy_to_user((void __user *)argp->data, id_blob, data->len)) 716 ret = -EFAULT; 717 } 718 719 kfree(mem); 720 721 return ret; 722 } 723 724 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) 725 { 726 struct sev_device *sev = psp_master->sev_data; 727 struct sev_user_data_pdh_cert_export input; 728 void *pdh_blob = NULL, *cert_blob = NULL; 729 struct sev_data_pdh_cert_export data; 730 void __user *input_cert_chain_address; 731 void __user *input_pdh_cert_address; 732 int ret; 733 734 /* If platform is not in INIT state then transition it to INIT. */ 735 if (sev->state != SEV_STATE_INIT) { 736 if (!writable) 737 return -EPERM; 738 739 ret = __sev_platform_init_locked(&argp->error); 740 if (ret) 741 return ret; 742 } 743 744 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 745 return -EFAULT; 746 747 memset(&data, 0, sizeof(data)); 748 749 /* Userspace wants to query the certificate length. */ 750 if (!input.pdh_cert_address || 751 !input.pdh_cert_len || 752 !input.cert_chain_address) 753 goto cmd; 754 755 input_pdh_cert_address = (void __user *)input.pdh_cert_address; 756 input_cert_chain_address = (void __user *)input.cert_chain_address; 757 758 /* Allocate a physically contiguous buffer to store the PDH blob. */ 759 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) 760 return -EFAULT; 761 762 /* Allocate a physically contiguous buffer to store the cert chain blob. */ 763 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) 764 return -EFAULT; 765 766 pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); 767 if (!pdh_blob) 768 return -ENOMEM; 769 770 data.pdh_cert_address = __psp_pa(pdh_blob); 771 data.pdh_cert_len = input.pdh_cert_len; 772 773 cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); 774 if (!cert_blob) { 775 ret = -ENOMEM; 776 goto e_free_pdh; 777 } 778 779 data.cert_chain_address = __psp_pa(cert_blob); 780 data.cert_chain_len = input.cert_chain_len; 781 782 cmd: 783 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); 784 785 /* If we query the length, FW responded with expected data. */ 786 input.cert_chain_len = data.cert_chain_len; 787 input.pdh_cert_len = data.pdh_cert_len; 788 789 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 790 ret = -EFAULT; 791 goto e_free_cert; 792 } 793 794 if (pdh_blob) { 795 if (copy_to_user(input_pdh_cert_address, 796 pdh_blob, input.pdh_cert_len)) { 797 ret = -EFAULT; 798 goto e_free_cert; 799 } 800 } 801 802 if (cert_blob) { 803 if (copy_to_user(input_cert_chain_address, 804 cert_blob, input.cert_chain_len)) 805 ret = -EFAULT; 806 } 807 808 e_free_cert: 809 kfree(cert_blob); 810 e_free_pdh: 811 kfree(pdh_blob); 812 return ret; 813 } 814 815 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 816 { 817 void __user *argp = (void __user *)arg; 818 struct sev_issue_cmd input; 819 int ret = -EFAULT; 820 bool writable = file->f_mode & FMODE_WRITE; 821 822 if (!psp_master || !psp_master->sev_data) 823 return -ENODEV; 824 825 if (ioctl != SEV_ISSUE_CMD) 826 return -EINVAL; 827 828 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) 829 return -EFAULT; 830 831 if (input.cmd > SEV_MAX) 832 return -EINVAL; 833 834 mutex_lock(&sev_cmd_mutex); 835 836 switch (input.cmd) { 837 838 case SEV_FACTORY_RESET: 839 ret = sev_ioctl_do_reset(&input, writable); 840 break; 841 case SEV_PLATFORM_STATUS: 842 ret = sev_ioctl_do_platform_status(&input); 843 break; 844 case SEV_PEK_GEN: 845 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); 846 break; 847 case SEV_PDH_GEN: 848 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); 849 break; 850 case SEV_PEK_CSR: 851 ret = sev_ioctl_do_pek_csr(&input, writable); 852 break; 853 case SEV_PEK_CERT_IMPORT: 854 ret = sev_ioctl_do_pek_import(&input, writable); 855 break; 856 case SEV_PDH_CERT_EXPORT: 857 ret = sev_ioctl_do_pdh_export(&input, writable); 858 break; 859 case SEV_GET_ID: 860 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); 861 ret = sev_ioctl_do_get_id(&input); 862 break; 863 case SEV_GET_ID2: 864 ret = sev_ioctl_do_get_id2(&input); 865 break; 866 default: 867 ret = -EINVAL; 868 goto out; 869 } 870 871 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) 872 ret = -EFAULT; 873 out: 874 mutex_unlock(&sev_cmd_mutex); 875 876 return ret; 877 } 878 879 static const struct file_operations sev_fops = { 880 .owner = THIS_MODULE, 881 .unlocked_ioctl = sev_ioctl, 882 }; 883 884 int sev_platform_status(struct sev_user_data_status *data, int *error) 885 { 886 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); 887 } 888 EXPORT_SYMBOL_GPL(sev_platform_status); 889 890 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) 891 { 892 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); 893 } 894 EXPORT_SYMBOL_GPL(sev_guest_deactivate); 895 896 int sev_guest_activate(struct sev_data_activate *data, int *error) 897 { 898 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); 899 } 900 EXPORT_SYMBOL_GPL(sev_guest_activate); 901 902 int sev_guest_decommission(struct sev_data_decommission *data, int *error) 903 { 904 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); 905 } 906 EXPORT_SYMBOL_GPL(sev_guest_decommission); 907 908 int sev_guest_df_flush(int *error) 909 { 910 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); 911 } 912 EXPORT_SYMBOL_GPL(sev_guest_df_flush); 913 914 static void sev_exit(struct kref *ref) 915 { 916 misc_deregister(&misc_dev->misc); 917 kfree(misc_dev); 918 misc_dev = NULL; 919 } 920 921 static int sev_misc_init(struct sev_device *sev) 922 { 923 struct device *dev = sev->dev; 924 int ret; 925 926 /* 927 * SEV feature support can be detected on multiple devices but the SEV 928 * FW commands must be issued on the master. During probe, we do not 929 * know the master hence we create /dev/sev on the first device probe. 930 * sev_do_cmd() finds the right master device to which to issue the 931 * command to the firmware. 932 */ 933 if (!misc_dev) { 934 struct miscdevice *misc; 935 936 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); 937 if (!misc_dev) 938 return -ENOMEM; 939 940 misc = &misc_dev->misc; 941 misc->minor = MISC_DYNAMIC_MINOR; 942 misc->name = DEVICE_NAME; 943 misc->fops = &sev_fops; 944 945 ret = misc_register(misc); 946 if (ret) 947 return ret; 948 949 kref_init(&misc_dev->refcount); 950 } else { 951 kref_get(&misc_dev->refcount); 952 } 953 954 init_waitqueue_head(&sev->int_queue); 955 sev->misc = misc_dev; 956 dev_dbg(dev, "registered SEV device\n"); 957 958 return 0; 959 } 960 961 int sev_dev_init(struct psp_device *psp) 962 { 963 struct device *dev = psp->dev; 964 struct sev_device *sev; 965 int ret = -ENOMEM; 966 967 if (!boot_cpu_has(X86_FEATURE_SEV)) { 968 dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); 969 return 0; 970 } 971 972 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); 973 if (!sev) 974 goto e_err; 975 976 sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); 977 if (!sev->cmd_buf) 978 goto e_sev; 979 980 psp->sev_data = sev; 981 982 sev->dev = dev; 983 sev->psp = psp; 984 985 sev->io_regs = psp->io_regs; 986 987 sev->vdata = (struct sev_vdata *)psp->vdata->sev; 988 if (!sev->vdata) { 989 ret = -ENODEV; 990 dev_err(dev, "sev: missing driver data\n"); 991 goto e_buf; 992 } 993 994 psp_set_sev_irq_handler(psp, sev_irq_handler, sev); 995 996 ret = sev_misc_init(sev); 997 if (ret) 998 goto e_irq; 999 1000 dev_notice(dev, "sev enabled\n"); 1001 1002 return 0; 1003 1004 e_irq: 1005 psp_clear_sev_irq_handler(psp); 1006 e_buf: 1007 devm_free_pages(dev, (unsigned long)sev->cmd_buf); 1008 e_sev: 1009 devm_kfree(dev, sev); 1010 e_err: 1011 psp->sev_data = NULL; 1012 1013 dev_notice(dev, "sev initialization failed\n"); 1014 1015 return ret; 1016 } 1017 1018 void sev_dev_destroy(struct psp_device *psp) 1019 { 1020 struct sev_device *sev = psp->sev_data; 1021 1022 if (!sev) 1023 return; 1024 1025 if (sev->misc) 1026 kref_put(&misc_dev->refcount, sev_exit); 1027 1028 psp_clear_sev_irq_handler(psp); 1029 } 1030 1031 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, 1032 void *data, int *error) 1033 { 1034 if (!filep || filep->f_op != &sev_fops) 1035 return -EBADF; 1036 1037 return sev_do_cmd(cmd, data, error); 1038 } 1039 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); 1040 1041 void sev_pci_init(void) 1042 { 1043 struct sev_device *sev = psp_master->sev_data; 1044 struct page *tmr_page; 1045 int error, rc; 1046 1047 if (!sev) 1048 return; 1049 1050 psp_timeout = psp_probe_timeout; 1051 1052 if (sev_get_api_version()) 1053 goto err; 1054 1055 /* 1056 * If platform is not in UNINIT state then firmware upgrade and/or 1057 * platform INIT command will fail. These command require UNINIT state. 1058 * 1059 * In a normal boot we should never run into case where the firmware 1060 * is not in UNINIT state on boot. But in case of kexec boot, a reboot 1061 * may not go through a typical shutdown sequence and may leave the 1062 * firmware in INIT or WORKING state. 1063 */ 1064 1065 if (sev->state != SEV_STATE_UNINIT) { 1066 sev_platform_shutdown(NULL); 1067 sev->state = SEV_STATE_UNINIT; 1068 } 1069 1070 if (sev_version_greater_or_equal(0, 15) && 1071 sev_update_firmware(sev->dev) == 0) 1072 sev_get_api_version(); 1073 1074 /* Obtain the TMR memory area for SEV-ES use */ 1075 tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE)); 1076 if (tmr_page) { 1077 sev_es_tmr = page_address(tmr_page); 1078 } else { 1079 sev_es_tmr = NULL; 1080 dev_warn(sev->dev, 1081 "SEV: TMR allocation failed, SEV-ES support unavailable\n"); 1082 } 1083 1084 /* Initialize the platform */ 1085 rc = sev_platform_init(&error); 1086 if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) { 1087 /* 1088 * INIT command returned an integrity check failure 1089 * status code, meaning that firmware load and 1090 * validation of SEV related persistent data has 1091 * failed and persistent state has been erased. 1092 * Retrying INIT command here should succeed. 1093 */ 1094 dev_dbg(sev->dev, "SEV: retrying INIT command"); 1095 rc = sev_platform_init(&error); 1096 } 1097 1098 if (rc) { 1099 dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error); 1100 return; 1101 } 1102 1103 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, 1104 sev->api_minor, sev->build); 1105 1106 return; 1107 1108 err: 1109 psp_master->sev_data = NULL; 1110 } 1111 1112 void sev_pci_exit(void) 1113 { 1114 if (!psp_master->sev_data) 1115 return; 1116 1117 sev_platform_shutdown(NULL); 1118 1119 if (sev_es_tmr) { 1120 /* The TMR area was encrypted, flush it from the cache */ 1121 wbinvd_on_all_cpus(); 1122 1123 free_pages((unsigned long)sev_es_tmr, 1124 get_order(SEV_ES_TMR_SIZE)); 1125 sev_es_tmr = NULL; 1126 } 1127 } 1128