1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Encrypted Virtualization (SEV) guest driver interface 4 * 5 * Copyright (C) 2021 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/mutex.h> 14 #include <linux/io.h> 15 #include <linux/platform_device.h> 16 #include <linux/miscdevice.h> 17 #include <linux/set_memory.h> 18 #include <linux/fs.h> 19 #include <crypto/aead.h> 20 #include <linux/scatterlist.h> 21 #include <linux/psp-sev.h> 22 #include <uapi/linux/sev-guest.h> 23 #include <uapi/linux/psp-sev.h> 24 25 #include <asm/svm.h> 26 #include <asm/sev.h> 27 28 #include "sev-guest.h" 29 30 #define DEVICE_NAME "sev-guest" 31 #define AAD_LEN 48 32 #define MSG_HDR_VER 1 33 34 struct snp_guest_crypto { 35 struct crypto_aead *tfm; 36 u8 *iv, *authtag; 37 int iv_len, a_len; 38 }; 39 40 struct snp_guest_dev { 41 struct device *dev; 42 struct miscdevice misc; 43 44 void *certs_data; 45 struct snp_guest_crypto *crypto; 46 struct snp_guest_msg *request, *response; 47 struct snp_secrets_page_layout *layout; 48 struct snp_req_data input; 49 u32 *os_area_msg_seqno; 50 u8 *vmpck; 51 }; 52 53 static u32 vmpck_id; 54 module_param(vmpck_id, uint, 0444); 55 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); 56 57 /* Mutex to serialize the shared buffer access and command handling. */ 58 static DEFINE_MUTEX(snp_cmd_mutex); 59 60 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev) 61 { 62 char zero_key[VMPCK_KEY_LEN] = {0}; 63 64 if (snp_dev->vmpck) 65 return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN); 66 67 return true; 68 } 69 70 /* 71 * If an error is received from the host or AMD Secure Processor (ASP) there 72 * are two options. Either retry the exact same encrypted request or discontinue 73 * using the VMPCK. 74 * 75 * This is because in the current encryption scheme GHCB v2 uses AES-GCM to 76 * encrypt the requests. The IV for this scheme is the sequence number. GCM 77 * cannot tolerate IV reuse. 78 * 79 * The ASP FW v1.51 only increments the sequence numbers on a successful 80 * guest<->ASP back and forth and only accepts messages at its exact sequence 81 * number. 82 * 83 * So if the sequence number were to be reused the encryption scheme is 84 * vulnerable. If the sequence number were incremented for a fresh IV the ASP 85 * will reject the request. 86 */ 87 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev) 88 { 89 dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n", 90 vmpck_id); 91 memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN); 92 snp_dev->vmpck = NULL; 93 } 94 95 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev) 96 { 97 u64 count; 98 99 lockdep_assert_held(&snp_cmd_mutex); 100 101 /* Read the current message sequence counter from secrets pages */ 102 count = *snp_dev->os_area_msg_seqno; 103 104 return count + 1; 105 } 106 107 /* Return a non-zero on success */ 108 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev) 109 { 110 u64 count = __snp_get_msg_seqno(snp_dev); 111 112 /* 113 * The message sequence counter for the SNP guest request is a 64-bit 114 * value but the version 2 of GHCB specification defines a 32-bit storage 115 * for it. If the counter exceeds the 32-bit value then return zero. 116 * The caller should check the return value, but if the caller happens to 117 * not check the value and use it, then the firmware treats zero as an 118 * invalid number and will fail the message request. 119 */ 120 if (count >= UINT_MAX) { 121 dev_err(snp_dev->dev, "request message sequence counter overflow\n"); 122 return 0; 123 } 124 125 return count; 126 } 127 128 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev) 129 { 130 /* 131 * The counter is also incremented by the PSP, so increment it by 2 132 * and save in secrets page. 133 */ 134 *snp_dev->os_area_msg_seqno += 2; 135 } 136 137 static inline struct snp_guest_dev *to_snp_dev(struct file *file) 138 { 139 struct miscdevice *dev = file->private_data; 140 141 return container_of(dev, struct snp_guest_dev, misc); 142 } 143 144 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen) 145 { 146 struct snp_guest_crypto *crypto; 147 148 crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT); 149 if (!crypto) 150 return NULL; 151 152 crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 153 if (IS_ERR(crypto->tfm)) 154 goto e_free; 155 156 if (crypto_aead_setkey(crypto->tfm, key, keylen)) 157 goto e_free_crypto; 158 159 crypto->iv_len = crypto_aead_ivsize(crypto->tfm); 160 crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT); 161 if (!crypto->iv) 162 goto e_free_crypto; 163 164 if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) { 165 if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) { 166 dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN); 167 goto e_free_iv; 168 } 169 } 170 171 crypto->a_len = crypto_aead_authsize(crypto->tfm); 172 crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT); 173 if (!crypto->authtag) 174 goto e_free_iv; 175 176 return crypto; 177 178 e_free_iv: 179 kfree(crypto->iv); 180 e_free_crypto: 181 crypto_free_aead(crypto->tfm); 182 e_free: 183 kfree(crypto); 184 185 return NULL; 186 } 187 188 static void deinit_crypto(struct snp_guest_crypto *crypto) 189 { 190 crypto_free_aead(crypto->tfm); 191 kfree(crypto->iv); 192 kfree(crypto->authtag); 193 kfree(crypto); 194 } 195 196 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg, 197 u8 *src_buf, u8 *dst_buf, size_t len, bool enc) 198 { 199 struct snp_guest_msg_hdr *hdr = &msg->hdr; 200 struct scatterlist src[3], dst[3]; 201 DECLARE_CRYPTO_WAIT(wait); 202 struct aead_request *req; 203 int ret; 204 205 req = aead_request_alloc(crypto->tfm, GFP_KERNEL); 206 if (!req) 207 return -ENOMEM; 208 209 /* 210 * AEAD memory operations: 211 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+ 212 * | msg header | plaintext | hdr->authtag | 213 * | bytes 30h - 5Fh | or | | 214 * | | cipher | | 215 * +------------------+------------------+----------------+ 216 */ 217 sg_init_table(src, 3); 218 sg_set_buf(&src[0], &hdr->algo, AAD_LEN); 219 sg_set_buf(&src[1], src_buf, hdr->msg_sz); 220 sg_set_buf(&src[2], hdr->authtag, crypto->a_len); 221 222 sg_init_table(dst, 3); 223 sg_set_buf(&dst[0], &hdr->algo, AAD_LEN); 224 sg_set_buf(&dst[1], dst_buf, hdr->msg_sz); 225 sg_set_buf(&dst[2], hdr->authtag, crypto->a_len); 226 227 aead_request_set_ad(req, AAD_LEN); 228 aead_request_set_tfm(req, crypto->tfm); 229 aead_request_set_callback(req, 0, crypto_req_done, &wait); 230 231 aead_request_set_crypt(req, src, dst, len, crypto->iv); 232 ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait); 233 234 aead_request_free(req); 235 return ret; 236 } 237 238 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, 239 void *plaintext, size_t len) 240 { 241 struct snp_guest_crypto *crypto = snp_dev->crypto; 242 struct snp_guest_msg_hdr *hdr = &msg->hdr; 243 244 memset(crypto->iv, 0, crypto->iv_len); 245 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); 246 247 return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true); 248 } 249 250 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, 251 void *plaintext, size_t len) 252 { 253 struct snp_guest_crypto *crypto = snp_dev->crypto; 254 struct snp_guest_msg_hdr *hdr = &msg->hdr; 255 256 /* Build IV with response buffer sequence number */ 257 memset(crypto->iv, 0, crypto->iv_len); 258 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); 259 260 return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false); 261 } 262 263 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz) 264 { 265 struct snp_guest_crypto *crypto = snp_dev->crypto; 266 struct snp_guest_msg *resp = snp_dev->response; 267 struct snp_guest_msg *req = snp_dev->request; 268 struct snp_guest_msg_hdr *req_hdr = &req->hdr; 269 struct snp_guest_msg_hdr *resp_hdr = &resp->hdr; 270 271 dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n", 272 resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz); 273 274 /* Verify that the sequence counter is incremented by 1 */ 275 if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1))) 276 return -EBADMSG; 277 278 /* Verify response message type and version number. */ 279 if (resp_hdr->msg_type != (req_hdr->msg_type + 1) || 280 resp_hdr->msg_version != req_hdr->msg_version) 281 return -EBADMSG; 282 283 /* 284 * If the message size is greater than our buffer length then return 285 * an error. 286 */ 287 if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz)) 288 return -EBADMSG; 289 290 /* Decrypt the payload */ 291 return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len); 292 } 293 294 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type, 295 void *payload, size_t sz) 296 { 297 struct snp_guest_msg *req = snp_dev->request; 298 struct snp_guest_msg_hdr *hdr = &req->hdr; 299 300 memset(req, 0, sizeof(*req)); 301 302 hdr->algo = SNP_AEAD_AES_256_GCM; 303 hdr->hdr_version = MSG_HDR_VER; 304 hdr->hdr_sz = sizeof(*hdr); 305 hdr->msg_type = type; 306 hdr->msg_version = version; 307 hdr->msg_seqno = seqno; 308 hdr->msg_vmpck = vmpck_id; 309 hdr->msg_sz = sz; 310 311 /* Verify the sequence number is non-zero */ 312 if (!hdr->msg_seqno) 313 return -ENOSR; 314 315 dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n", 316 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); 317 318 return __enc_payload(snp_dev, req, payload, sz); 319 } 320 321 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver, 322 u8 type, void *req_buf, size_t req_sz, void *resp_buf, 323 u32 resp_sz, __u64 *fw_err) 324 { 325 unsigned long err; 326 u64 seqno; 327 int rc; 328 329 /* Get message sequence and verify that its a non-zero */ 330 seqno = snp_get_msg_seqno(snp_dev); 331 if (!seqno) 332 return -EIO; 333 334 memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); 335 336 /* Encrypt the userspace provided payload */ 337 rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz); 338 if (rc) 339 return rc; 340 341 /* 342 * Call firmware to process the request. In this function the encrypted 343 * message enters shared memory with the host. So after this call the 344 * sequence number must be incremented or the VMPCK must be deleted to 345 * prevent reuse of the IV. 346 */ 347 rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); 348 349 /* 350 * If the extended guest request fails due to having too small of a 351 * certificate data buffer, retry the same guest request without the 352 * extended data request in order to increment the sequence number 353 * and thus avoid IV reuse. 354 */ 355 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST && 356 err == SNP_GUEST_REQ_INVALID_LEN) { 357 const unsigned int certs_npages = snp_dev->input.data_npages; 358 359 exit_code = SVM_VMGEXIT_GUEST_REQUEST; 360 361 /* 362 * If this call to the firmware succeeds, the sequence number can 363 * be incremented allowing for continued use of the VMPCK. If 364 * there is an error reflected in the return value, this value 365 * is checked further down and the result will be the deletion 366 * of the VMPCK and the error code being propagated back to the 367 * user as an ioctl() return code. 368 */ 369 rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); 370 371 /* 372 * Override the error to inform callers the given extended 373 * request buffer size was too small and give the caller the 374 * required buffer size. 375 */ 376 err = SNP_GUEST_REQ_INVALID_LEN; 377 snp_dev->input.data_npages = certs_npages; 378 } 379 380 /* 381 * Increment the message sequence number. There is no harm in doing 382 * this now because decryption uses the value stored in the response 383 * structure and any failure will wipe the VMPCK, preventing further 384 * use anyway. 385 */ 386 snp_inc_msg_seqno(snp_dev); 387 388 if (fw_err) 389 *fw_err = err; 390 391 /* 392 * If an extended guest request was issued and the supplied certificate 393 * buffer was not large enough, a standard guest request was issued to 394 * prevent IV reuse. If the standard request was successful, return -EIO 395 * back to the caller as would have originally been returned. 396 */ 397 if (!rc && err == SNP_GUEST_REQ_INVALID_LEN) 398 return -EIO; 399 400 if (rc) { 401 dev_alert(snp_dev->dev, 402 "Detected error from ASP request. rc: %d, fw_err: %llu\n", 403 rc, *fw_err); 404 goto disable_vmpck; 405 } 406 407 rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); 408 if (rc) { 409 dev_alert(snp_dev->dev, 410 "Detected unexpected decode failure from ASP. rc: %d\n", 411 rc); 412 goto disable_vmpck; 413 } 414 415 return 0; 416 417 disable_vmpck: 418 snp_disable_vmpck(snp_dev); 419 return rc; 420 } 421 422 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 423 { 424 struct snp_guest_crypto *crypto = snp_dev->crypto; 425 struct snp_report_resp *resp; 426 struct snp_report_req req; 427 int rc, resp_len; 428 429 lockdep_assert_held(&snp_cmd_mutex); 430 431 if (!arg->req_data || !arg->resp_data) 432 return -EINVAL; 433 434 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) 435 return -EFAULT; 436 437 /* 438 * The intermediate response buffer is used while decrypting the 439 * response payload. Make sure that it has enough space to cover the 440 * authtag. 441 */ 442 resp_len = sizeof(resp->data) + crypto->a_len; 443 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 444 if (!resp) 445 return -ENOMEM; 446 447 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version, 448 SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data, 449 resp_len, &arg->fw_err); 450 if (rc) 451 goto e_free; 452 453 if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) 454 rc = -EFAULT; 455 456 e_free: 457 kfree(resp); 458 return rc; 459 } 460 461 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 462 { 463 struct snp_guest_crypto *crypto = snp_dev->crypto; 464 struct snp_derived_key_resp resp = {0}; 465 struct snp_derived_key_req req; 466 int rc, resp_len; 467 /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ 468 u8 buf[64 + 16]; 469 470 lockdep_assert_held(&snp_cmd_mutex); 471 472 if (!arg->req_data || !arg->resp_data) 473 return -EINVAL; 474 475 /* 476 * The intermediate response buffer is used while decrypting the 477 * response payload. Make sure that it has enough space to cover the 478 * authtag. 479 */ 480 resp_len = sizeof(resp.data) + crypto->a_len; 481 if (sizeof(buf) < resp_len) 482 return -ENOMEM; 483 484 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) 485 return -EFAULT; 486 487 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version, 488 SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len, 489 &arg->fw_err); 490 if (rc) 491 return rc; 492 493 memcpy(resp.data, buf, sizeof(resp.data)); 494 if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp))) 495 rc = -EFAULT; 496 497 /* The response buffer contains the sensitive data, explicitly clear it. */ 498 memzero_explicit(buf, sizeof(buf)); 499 memzero_explicit(&resp, sizeof(resp)); 500 return rc; 501 } 502 503 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 504 { 505 struct snp_guest_crypto *crypto = snp_dev->crypto; 506 struct snp_ext_report_req req; 507 struct snp_report_resp *resp; 508 int ret, npages = 0, resp_len; 509 510 lockdep_assert_held(&snp_cmd_mutex); 511 512 if (!arg->req_data || !arg->resp_data) 513 return -EINVAL; 514 515 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) 516 return -EFAULT; 517 518 /* userspace does not want certificate data */ 519 if (!req.certs_len || !req.certs_address) 520 goto cmd; 521 522 if (req.certs_len > SEV_FW_BLOB_MAX_SIZE || 523 !IS_ALIGNED(req.certs_len, PAGE_SIZE)) 524 return -EINVAL; 525 526 if (!access_ok((const void __user *)req.certs_address, req.certs_len)) 527 return -EFAULT; 528 529 /* 530 * Initialize the intermediate buffer with all zeros. This buffer 531 * is used in the guest request message to get the certs blob from 532 * the host. If host does not supply any certs in it, then copy 533 * zeros to indicate that certificate data was not provided. 534 */ 535 memset(snp_dev->certs_data, 0, req.certs_len); 536 npages = req.certs_len >> PAGE_SHIFT; 537 cmd: 538 /* 539 * The intermediate response buffer is used while decrypting the 540 * response payload. Make sure that it has enough space to cover the 541 * authtag. 542 */ 543 resp_len = sizeof(resp->data) + crypto->a_len; 544 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 545 if (!resp) 546 return -ENOMEM; 547 548 snp_dev->input.data_npages = npages; 549 ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version, 550 SNP_MSG_REPORT_REQ, &req.data, 551 sizeof(req.data), resp->data, resp_len, &arg->fw_err); 552 553 /* If certs length is invalid then copy the returned length */ 554 if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) { 555 req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT; 556 557 if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req))) 558 ret = -EFAULT; 559 } 560 561 if (ret) 562 goto e_free; 563 564 if (npages && 565 copy_to_user((void __user *)req.certs_address, snp_dev->certs_data, 566 req.certs_len)) { 567 ret = -EFAULT; 568 goto e_free; 569 } 570 571 if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) 572 ret = -EFAULT; 573 574 e_free: 575 kfree(resp); 576 return ret; 577 } 578 579 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 580 { 581 struct snp_guest_dev *snp_dev = to_snp_dev(file); 582 void __user *argp = (void __user *)arg; 583 struct snp_guest_request_ioctl input; 584 int ret = -ENOTTY; 585 586 if (copy_from_user(&input, argp, sizeof(input))) 587 return -EFAULT; 588 589 input.fw_err = 0xff; 590 591 /* Message version must be non-zero */ 592 if (!input.msg_version) 593 return -EINVAL; 594 595 mutex_lock(&snp_cmd_mutex); 596 597 /* Check if the VMPCK is not empty */ 598 if (is_vmpck_empty(snp_dev)) { 599 dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); 600 mutex_unlock(&snp_cmd_mutex); 601 return -ENOTTY; 602 } 603 604 switch (ioctl) { 605 case SNP_GET_REPORT: 606 ret = get_report(snp_dev, &input); 607 break; 608 case SNP_GET_DERIVED_KEY: 609 ret = get_derived_key(snp_dev, &input); 610 break; 611 case SNP_GET_EXT_REPORT: 612 ret = get_ext_report(snp_dev, &input); 613 break; 614 default: 615 break; 616 } 617 618 mutex_unlock(&snp_cmd_mutex); 619 620 if (input.fw_err && copy_to_user(argp, &input, sizeof(input))) 621 return -EFAULT; 622 623 return ret; 624 } 625 626 static void free_shared_pages(void *buf, size_t sz) 627 { 628 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; 629 int ret; 630 631 if (!buf) 632 return; 633 634 ret = set_memory_encrypted((unsigned long)buf, npages); 635 if (ret) { 636 WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); 637 return; 638 } 639 640 __free_pages(virt_to_page(buf), get_order(sz)); 641 } 642 643 static void *alloc_shared_pages(struct device *dev, size_t sz) 644 { 645 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; 646 struct page *page; 647 int ret; 648 649 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz)); 650 if (!page) 651 return NULL; 652 653 ret = set_memory_decrypted((unsigned long)page_address(page), npages); 654 if (ret) { 655 dev_err(dev, "failed to mark page shared, ret=%d\n", ret); 656 __free_pages(page, get_order(sz)); 657 return NULL; 658 } 659 660 return page_address(page); 661 } 662 663 static const struct file_operations snp_guest_fops = { 664 .owner = THIS_MODULE, 665 .unlocked_ioctl = snp_guest_ioctl, 666 }; 667 668 static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno) 669 { 670 u8 *key = NULL; 671 672 switch (id) { 673 case 0: 674 *seqno = &layout->os_area.msg_seqno_0; 675 key = layout->vmpck0; 676 break; 677 case 1: 678 *seqno = &layout->os_area.msg_seqno_1; 679 key = layout->vmpck1; 680 break; 681 case 2: 682 *seqno = &layout->os_area.msg_seqno_2; 683 key = layout->vmpck2; 684 break; 685 case 3: 686 *seqno = &layout->os_area.msg_seqno_3; 687 key = layout->vmpck3; 688 break; 689 default: 690 break; 691 } 692 693 return key; 694 } 695 696 static int __init sev_guest_probe(struct platform_device *pdev) 697 { 698 struct snp_secrets_page_layout *layout; 699 struct sev_guest_platform_data *data; 700 struct device *dev = &pdev->dev; 701 struct snp_guest_dev *snp_dev; 702 struct miscdevice *misc; 703 void __iomem *mapping; 704 int ret; 705 706 if (!dev->platform_data) 707 return -ENODEV; 708 709 data = (struct sev_guest_platform_data *)dev->platform_data; 710 mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); 711 if (!mapping) 712 return -ENODEV; 713 714 layout = (__force void *)mapping; 715 716 ret = -ENOMEM; 717 snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); 718 if (!snp_dev) 719 goto e_unmap; 720 721 ret = -EINVAL; 722 snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno); 723 if (!snp_dev->vmpck) { 724 dev_err(dev, "invalid vmpck id %d\n", vmpck_id); 725 goto e_unmap; 726 } 727 728 /* Verify that VMPCK is not zero. */ 729 if (is_vmpck_empty(snp_dev)) { 730 dev_err(dev, "vmpck id %d is null\n", vmpck_id); 731 goto e_unmap; 732 } 733 734 platform_set_drvdata(pdev, snp_dev); 735 snp_dev->dev = dev; 736 snp_dev->layout = layout; 737 738 /* Allocate the shared page used for the request and response message. */ 739 snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 740 if (!snp_dev->request) 741 goto e_unmap; 742 743 snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 744 if (!snp_dev->response) 745 goto e_free_request; 746 747 snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE); 748 if (!snp_dev->certs_data) 749 goto e_free_response; 750 751 ret = -EIO; 752 snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN); 753 if (!snp_dev->crypto) 754 goto e_free_cert_data; 755 756 misc = &snp_dev->misc; 757 misc->minor = MISC_DYNAMIC_MINOR; 758 misc->name = DEVICE_NAME; 759 misc->fops = &snp_guest_fops; 760 761 /* initial the input address for guest request */ 762 snp_dev->input.req_gpa = __pa(snp_dev->request); 763 snp_dev->input.resp_gpa = __pa(snp_dev->response); 764 snp_dev->input.data_gpa = __pa(snp_dev->certs_data); 765 766 ret = misc_register(misc); 767 if (ret) 768 goto e_free_cert_data; 769 770 dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id); 771 return 0; 772 773 e_free_cert_data: 774 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); 775 e_free_response: 776 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); 777 e_free_request: 778 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); 779 e_unmap: 780 iounmap(mapping); 781 return ret; 782 } 783 784 static int __exit sev_guest_remove(struct platform_device *pdev) 785 { 786 struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); 787 788 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); 789 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); 790 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); 791 deinit_crypto(snp_dev->crypto); 792 misc_deregister(&snp_dev->misc); 793 794 return 0; 795 } 796 797 /* 798 * This driver is meant to be a common SEV guest interface driver and to 799 * support any SEV guest API. As such, even though it has been introduced 800 * with the SEV-SNP support, it is named "sev-guest". 801 */ 802 static struct platform_driver sev_guest_driver = { 803 .remove = __exit_p(sev_guest_remove), 804 .driver = { 805 .name = "sev-guest", 806 }, 807 }; 808 809 module_platform_driver_probe(sev_guest_driver, sev_guest_probe); 810 811 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>"); 812 MODULE_LICENSE("GPL"); 813 MODULE_VERSION("1.0.0"); 814 MODULE_DESCRIPTION("AMD SEV Guest Driver"); 815 MODULE_ALIAS("platform:sev-guest"); 816