1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015, 2016 IBM Corporation 4 * Copyright (C) 2016 Intel Corporation 5 * 6 * Author: Stefan Berger <stefanb@us.ibm.com> 7 * 8 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 9 * 10 * Device driver for vTPM (vTPM proxy driver) 11 */ 12 13 #include <linux/types.h> 14 #include <linux/spinlock.h> 15 #include <linux/uaccess.h> 16 #include <linux/wait.h> 17 #include <linux/miscdevice.h> 18 #include <linux/vtpm_proxy.h> 19 #include <linux/file.h> 20 #include <linux/anon_inodes.h> 21 #include <linux/poll.h> 22 #include <linux/compat.h> 23 24 #include "tpm.h" 25 26 #define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0) 27 28 struct proxy_dev { 29 struct tpm_chip *chip; 30 31 u32 flags; /* public API flags */ 32 33 wait_queue_head_t wq; 34 35 struct mutex buf_lock; /* protect buffer and flags */ 36 37 long state; /* internal state */ 38 #define STATE_OPENED_FLAG BIT(0) 39 #define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */ 40 #define STATE_REGISTERED_FLAG BIT(2) 41 #define STATE_DRIVER_COMMAND BIT(3) /* sending a driver specific command */ 42 43 size_t req_len; /* length of queued TPM request */ 44 size_t resp_len; /* length of queued TPM response */ 45 u8 buffer[TPM_BUFSIZE]; /* request/response buffer */ 46 47 struct work_struct work; /* task that retrieves TPM timeouts */ 48 }; 49 50 /* all supported flags */ 51 #define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2) 52 53 static struct workqueue_struct *workqueue; 54 55 static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev); 56 57 /* 58 * Functions related to 'server side' 59 */ 60 61 /** 62 * vtpm_proxy_fops_read - Read TPM commands on 'server side' 63 * 64 * @filp: file pointer 65 * @buf: read buffer 66 * @count: number of bytes to read 67 * @off: offset 68 * 69 * Return: 70 * Number of bytes read or negative error code 71 */ 72 static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf, 73 size_t count, loff_t *off) 74 { 75 struct proxy_dev *proxy_dev = filp->private_data; 76 size_t len; 77 int sig, rc; 78 79 sig = wait_event_interruptible(proxy_dev->wq, 80 proxy_dev->req_len != 0 || 81 !(proxy_dev->state & STATE_OPENED_FLAG)); 82 if (sig) 83 return -EINTR; 84 85 mutex_lock(&proxy_dev->buf_lock); 86 87 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 88 mutex_unlock(&proxy_dev->buf_lock); 89 return -EPIPE; 90 } 91 92 len = proxy_dev->req_len; 93 94 if (count < len) { 95 mutex_unlock(&proxy_dev->buf_lock); 96 pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n", 97 count, len); 98 return -EIO; 99 } 100 101 rc = copy_to_user(buf, proxy_dev->buffer, len); 102 memset(proxy_dev->buffer, 0, len); 103 proxy_dev->req_len = 0; 104 105 if (!rc) 106 proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG; 107 108 mutex_unlock(&proxy_dev->buf_lock); 109 110 if (rc) 111 return -EFAULT; 112 113 return len; 114 } 115 116 /** 117 * vtpm_proxy_fops_write - Write TPM responses on 'server side' 118 * 119 * @filp: file pointer 120 * @buf: write buffer 121 * @count: number of bytes to write 122 * @off: offset 123 * 124 * Return: 125 * Number of bytes read or negative error value 126 */ 127 static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf, 128 size_t count, loff_t *off) 129 { 130 struct proxy_dev *proxy_dev = filp->private_data; 131 132 mutex_lock(&proxy_dev->buf_lock); 133 134 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 135 mutex_unlock(&proxy_dev->buf_lock); 136 return -EPIPE; 137 } 138 139 if (count > sizeof(proxy_dev->buffer) || 140 !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) { 141 mutex_unlock(&proxy_dev->buf_lock); 142 return -EIO; 143 } 144 145 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; 146 147 proxy_dev->req_len = 0; 148 149 if (copy_from_user(proxy_dev->buffer, buf, count)) { 150 mutex_unlock(&proxy_dev->buf_lock); 151 return -EFAULT; 152 } 153 154 proxy_dev->resp_len = count; 155 156 mutex_unlock(&proxy_dev->buf_lock); 157 158 wake_up_interruptible(&proxy_dev->wq); 159 160 return count; 161 } 162 163 /* 164 * vtpm_proxy_fops_poll - Poll status on 'server side' 165 * 166 * @filp: file pointer 167 * @wait: poll table 168 * 169 * Return: Poll flags 170 */ 171 static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait) 172 { 173 struct proxy_dev *proxy_dev = filp->private_data; 174 __poll_t ret; 175 176 poll_wait(filp, &proxy_dev->wq, wait); 177 178 ret = EPOLLOUT; 179 180 mutex_lock(&proxy_dev->buf_lock); 181 182 if (proxy_dev->req_len) 183 ret |= EPOLLIN | EPOLLRDNORM; 184 185 if (!(proxy_dev->state & STATE_OPENED_FLAG)) 186 ret |= EPOLLHUP; 187 188 mutex_unlock(&proxy_dev->buf_lock); 189 190 return ret; 191 } 192 193 /* 194 * vtpm_proxy_fops_open - Open vTPM device on 'server side' 195 * 196 * @filp: file pointer 197 * 198 * Called when setting up the anonymous file descriptor 199 */ 200 static void vtpm_proxy_fops_open(struct file *filp) 201 { 202 struct proxy_dev *proxy_dev = filp->private_data; 203 204 proxy_dev->state |= STATE_OPENED_FLAG; 205 } 206 207 /** 208 * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open 209 * Call to undo vtpm_proxy_fops_open 210 * 211 *@proxy_dev: tpm proxy device 212 */ 213 static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev) 214 { 215 mutex_lock(&proxy_dev->buf_lock); 216 217 proxy_dev->state &= ~STATE_OPENED_FLAG; 218 219 mutex_unlock(&proxy_dev->buf_lock); 220 221 /* no more TPM responses -- wake up anyone waiting for them */ 222 wake_up_interruptible(&proxy_dev->wq); 223 } 224 225 /* 226 * vtpm_proxy_fops_release - Close 'server side' 227 * 228 * @inode: inode 229 * @filp: file pointer 230 * Return: 231 * Always returns 0. 232 */ 233 static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) 234 { 235 struct proxy_dev *proxy_dev = filp->private_data; 236 237 filp->private_data = NULL; 238 239 vtpm_proxy_delete_device(proxy_dev); 240 241 return 0; 242 } 243 244 static const struct file_operations vtpm_proxy_fops = { 245 .owner = THIS_MODULE, 246 .llseek = no_llseek, 247 .read = vtpm_proxy_fops_read, 248 .write = vtpm_proxy_fops_write, 249 .poll = vtpm_proxy_fops_poll, 250 .release = vtpm_proxy_fops_release, 251 }; 252 253 /* 254 * Functions invoked by the core TPM driver to send TPM commands to 255 * 'server side' and receive responses from there. 256 */ 257 258 /* 259 * Called when core TPM driver reads TPM responses from 'server side' 260 * 261 * @chip: tpm chip to use 262 * @buf: receive buffer 263 * @count: bytes to read 264 * Return: 265 * Number of TPM response bytes read, negative error value otherwise 266 */ 267 static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) 268 { 269 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 270 size_t len; 271 272 /* process gone ? */ 273 mutex_lock(&proxy_dev->buf_lock); 274 275 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 276 mutex_unlock(&proxy_dev->buf_lock); 277 return -EPIPE; 278 } 279 280 len = proxy_dev->resp_len; 281 if (count < len) { 282 dev_err(&chip->dev, 283 "Invalid size in recv: count=%zd, resp_len=%zd\n", 284 count, len); 285 len = -EIO; 286 goto out; 287 } 288 289 memcpy(buf, proxy_dev->buffer, len); 290 proxy_dev->resp_len = 0; 291 292 out: 293 mutex_unlock(&proxy_dev->buf_lock); 294 295 return len; 296 } 297 298 static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, 299 u8 *buf, size_t count) 300 { 301 struct tpm_header *hdr = (struct tpm_header *)buf; 302 303 if (count < sizeof(struct tpm_header)) 304 return 0; 305 306 if (chip->flags & TPM_CHIP_FLAG_TPM2) { 307 switch (be32_to_cpu(hdr->ordinal)) { 308 case TPM2_CC_SET_LOCALITY: 309 return 1; 310 } 311 } else { 312 switch (be32_to_cpu(hdr->ordinal)) { 313 case TPM_ORD_SET_LOCALITY: 314 return 1; 315 } 316 } 317 return 0; 318 } 319 320 /* 321 * Called when core TPM driver forwards TPM requests to 'server side'. 322 * 323 * @chip: tpm chip to use 324 * @buf: send buffer 325 * @count: bytes to send 326 * 327 * Return: 328 * 0 in case of success, negative error value otherwise. 329 */ 330 static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) 331 { 332 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 333 334 if (count > sizeof(proxy_dev->buffer)) { 335 dev_err(&chip->dev, 336 "Invalid size in send: count=%zd, buffer size=%zd\n", 337 count, sizeof(proxy_dev->buffer)); 338 return -EIO; 339 } 340 341 if (!(proxy_dev->state & STATE_DRIVER_COMMAND) && 342 vtpm_proxy_is_driver_command(chip, buf, count)) 343 return -EFAULT; 344 345 mutex_lock(&proxy_dev->buf_lock); 346 347 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 348 mutex_unlock(&proxy_dev->buf_lock); 349 return -EPIPE; 350 } 351 352 proxy_dev->resp_len = 0; 353 354 proxy_dev->req_len = count; 355 memcpy(proxy_dev->buffer, buf, count); 356 357 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; 358 359 mutex_unlock(&proxy_dev->buf_lock); 360 361 wake_up_interruptible(&proxy_dev->wq); 362 363 return 0; 364 } 365 366 static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) 367 { 368 /* not supported */ 369 } 370 371 static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip) 372 { 373 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 374 375 if (proxy_dev->resp_len) 376 return VTPM_PROXY_REQ_COMPLETE_FLAG; 377 378 return 0; 379 } 380 381 static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status) 382 { 383 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 384 bool ret; 385 386 mutex_lock(&proxy_dev->buf_lock); 387 388 ret = !(proxy_dev->state & STATE_OPENED_FLAG); 389 390 mutex_unlock(&proxy_dev->buf_lock); 391 392 return ret; 393 } 394 395 static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) 396 { 397 struct tpm_buf buf; 398 int rc; 399 const struct tpm_header *header; 400 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 401 402 if (chip->flags & TPM_CHIP_FLAG_TPM2) 403 rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, 404 TPM2_CC_SET_LOCALITY); 405 else 406 rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, 407 TPM_ORD_SET_LOCALITY); 408 if (rc) 409 return rc; 410 tpm_buf_append_u8(&buf, locality); 411 412 proxy_dev->state |= STATE_DRIVER_COMMAND; 413 414 rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality"); 415 416 proxy_dev->state &= ~STATE_DRIVER_COMMAND; 417 418 if (rc < 0) { 419 locality = rc; 420 goto out; 421 } 422 423 header = (const struct tpm_header *)buf.data; 424 rc = be32_to_cpu(header->return_code); 425 if (rc) 426 locality = -1; 427 428 out: 429 tpm_buf_destroy(&buf); 430 431 return locality; 432 } 433 434 static const struct tpm_class_ops vtpm_proxy_tpm_ops = { 435 .flags = TPM_OPS_AUTO_STARTUP, 436 .recv = vtpm_proxy_tpm_op_recv, 437 .send = vtpm_proxy_tpm_op_send, 438 .cancel = vtpm_proxy_tpm_op_cancel, 439 .status = vtpm_proxy_tpm_op_status, 440 .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG, 441 .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG, 442 .req_canceled = vtpm_proxy_tpm_req_canceled, 443 .request_locality = vtpm_proxy_request_locality, 444 }; 445 446 /* 447 * Code related to the startup of the TPM 2 and startup of TPM 1.2 + 448 * retrieval of timeouts and durations. 449 */ 450 451 static void vtpm_proxy_work(struct work_struct *work) 452 { 453 struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev, 454 work); 455 int rc; 456 457 rc = tpm_chip_register(proxy_dev->chip); 458 if (rc) 459 vtpm_proxy_fops_undo_open(proxy_dev); 460 else 461 proxy_dev->state |= STATE_REGISTERED_FLAG; 462 } 463 464 /* 465 * vtpm_proxy_work_stop: make sure the work has finished 466 * 467 * This function is useful when user space closed the fd 468 * while the driver still determines timeouts. 469 */ 470 static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev) 471 { 472 vtpm_proxy_fops_undo_open(proxy_dev); 473 flush_work(&proxy_dev->work); 474 } 475 476 /* 477 * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization 478 */ 479 static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev) 480 { 481 queue_work(workqueue, &proxy_dev->work); 482 } 483 484 /* 485 * Code related to creation and deletion of device pairs 486 */ 487 static struct proxy_dev *vtpm_proxy_create_proxy_dev(void) 488 { 489 struct proxy_dev *proxy_dev; 490 struct tpm_chip *chip; 491 int err; 492 493 proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL); 494 if (proxy_dev == NULL) 495 return ERR_PTR(-ENOMEM); 496 497 init_waitqueue_head(&proxy_dev->wq); 498 mutex_init(&proxy_dev->buf_lock); 499 INIT_WORK(&proxy_dev->work, vtpm_proxy_work); 500 501 chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops); 502 if (IS_ERR(chip)) { 503 err = PTR_ERR(chip); 504 goto err_proxy_dev_free; 505 } 506 dev_set_drvdata(&chip->dev, proxy_dev); 507 508 proxy_dev->chip = chip; 509 510 return proxy_dev; 511 512 err_proxy_dev_free: 513 kfree(proxy_dev); 514 515 return ERR_PTR(err); 516 } 517 518 /* 519 * Undo what has been done in vtpm_create_proxy_dev 520 */ 521 static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev) 522 { 523 put_device(&proxy_dev->chip->dev); /* frees chip */ 524 kfree(proxy_dev); 525 } 526 527 /* 528 * Create a /dev/tpm%d and 'server side' file descriptor pair 529 * 530 * Return: 531 * Returns file pointer on success, an error value otherwise 532 */ 533 static struct file *vtpm_proxy_create_device( 534 struct vtpm_proxy_new_dev *vtpm_new_dev) 535 { 536 struct proxy_dev *proxy_dev; 537 int rc, fd; 538 struct file *file; 539 540 if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL) 541 return ERR_PTR(-EOPNOTSUPP); 542 543 proxy_dev = vtpm_proxy_create_proxy_dev(); 544 if (IS_ERR(proxy_dev)) 545 return ERR_CAST(proxy_dev); 546 547 proxy_dev->flags = vtpm_new_dev->flags; 548 549 /* setup an anonymous file for the server-side */ 550 fd = get_unused_fd_flags(O_RDWR); 551 if (fd < 0) { 552 rc = fd; 553 goto err_delete_proxy_dev; 554 } 555 556 file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev, 557 O_RDWR); 558 if (IS_ERR(file)) { 559 rc = PTR_ERR(file); 560 goto err_put_unused_fd; 561 } 562 563 /* from now on we can unwind with put_unused_fd() + fput() */ 564 /* simulate an open() on the server side */ 565 vtpm_proxy_fops_open(file); 566 567 if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2) 568 proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2; 569 570 vtpm_proxy_work_start(proxy_dev); 571 572 vtpm_new_dev->fd = fd; 573 vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt); 574 vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt); 575 vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num; 576 577 return file; 578 579 err_put_unused_fd: 580 put_unused_fd(fd); 581 582 err_delete_proxy_dev: 583 vtpm_proxy_delete_proxy_dev(proxy_dev); 584 585 return ERR_PTR(rc); 586 } 587 588 /* 589 * Counter part to vtpm_create_device. 590 */ 591 static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev) 592 { 593 vtpm_proxy_work_stop(proxy_dev); 594 595 /* 596 * A client may hold the 'ops' lock, so let it know that the server 597 * side shuts down before we try to grab the 'ops' lock when 598 * unregistering the chip. 599 */ 600 vtpm_proxy_fops_undo_open(proxy_dev); 601 602 if (proxy_dev->state & STATE_REGISTERED_FLAG) 603 tpm_chip_unregister(proxy_dev->chip); 604 605 vtpm_proxy_delete_proxy_dev(proxy_dev); 606 } 607 608 /* 609 * Code related to the control device /dev/vtpmx 610 */ 611 612 /** 613 * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl 614 * @file: /dev/vtpmx 615 * @ioctl: the ioctl number 616 * @arg: pointer to the struct vtpmx_proxy_new_dev 617 * 618 * Creates an anonymous file that is used by the process acting as a TPM to 619 * communicate with the client processes. The function will also add a new TPM 620 * device through which data is proxied to this TPM acting process. The caller 621 * will be provided with a file descriptor to communicate with the clients and 622 * major and minor numbers for the TPM device. 623 */ 624 static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl, 625 unsigned long arg) 626 { 627 void __user *argp = (void __user *)arg; 628 struct vtpm_proxy_new_dev __user *vtpm_new_dev_p; 629 struct vtpm_proxy_new_dev vtpm_new_dev; 630 struct file *vtpm_file; 631 632 if (!capable(CAP_SYS_ADMIN)) 633 return -EPERM; 634 635 vtpm_new_dev_p = argp; 636 637 if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p, 638 sizeof(vtpm_new_dev))) 639 return -EFAULT; 640 641 vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev); 642 if (IS_ERR(vtpm_file)) 643 return PTR_ERR(vtpm_file); 644 645 if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev, 646 sizeof(vtpm_new_dev))) { 647 put_unused_fd(vtpm_new_dev.fd); 648 fput(vtpm_file); 649 return -EFAULT; 650 } 651 652 fd_install(vtpm_new_dev.fd, vtpm_file); 653 return 0; 654 } 655 656 /* 657 * vtpmx_fops_ioctl: ioctl on /dev/vtpmx 658 * 659 * Return: 660 * Returns 0 on success, a negative error code otherwise. 661 */ 662 static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl, 663 unsigned long arg) 664 { 665 switch (ioctl) { 666 case VTPM_PROXY_IOC_NEW_DEV: 667 return vtpmx_ioc_new_dev(f, ioctl, arg); 668 default: 669 return -ENOIOCTLCMD; 670 } 671 } 672 673 static const struct file_operations vtpmx_fops = { 674 .owner = THIS_MODULE, 675 .unlocked_ioctl = vtpmx_fops_ioctl, 676 .compat_ioctl = compat_ptr_ioctl, 677 .llseek = noop_llseek, 678 }; 679 680 static struct miscdevice vtpmx_miscdev = { 681 .minor = MISC_DYNAMIC_MINOR, 682 .name = "vtpmx", 683 .fops = &vtpmx_fops, 684 }; 685 686 static int vtpmx_init(void) 687 { 688 return misc_register(&vtpmx_miscdev); 689 } 690 691 static void vtpmx_cleanup(void) 692 { 693 misc_deregister(&vtpmx_miscdev); 694 } 695 696 static int __init vtpm_module_init(void) 697 { 698 int rc; 699 700 rc = vtpmx_init(); 701 if (rc) { 702 pr_err("couldn't create vtpmx device\n"); 703 return rc; 704 } 705 706 workqueue = create_workqueue("tpm-vtpm"); 707 if (!workqueue) { 708 pr_err("couldn't create workqueue\n"); 709 rc = -ENOMEM; 710 goto err_vtpmx_cleanup; 711 } 712 713 return 0; 714 715 err_vtpmx_cleanup: 716 vtpmx_cleanup(); 717 718 return rc; 719 } 720 721 static void __exit vtpm_module_exit(void) 722 { 723 destroy_workqueue(workqueue); 724 vtpmx_cleanup(); 725 } 726 727 module_init(vtpm_module_init); 728 module_exit(vtpm_module_exit); 729 730 MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)"); 731 MODULE_DESCRIPTION("vTPM Driver"); 732 MODULE_VERSION("0.1"); 733 MODULE_LICENSE("GPL"); 734