1 /* 2 * Copyright (C) 2015, 2016 IBM Corporation 3 * 4 * Author: Stefan Berger <stefanb@us.ibm.com> 5 * 6 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 7 * 8 * Device driver for vTPM (vTPM proxy driver) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License as 12 * published by the Free Software Foundation, version 2 of the 13 * License. 14 * 15 */ 16 17 #include <linux/types.h> 18 #include <linux/spinlock.h> 19 #include <linux/uaccess.h> 20 #include <linux/wait.h> 21 #include <linux/miscdevice.h> 22 #include <linux/vtpm_proxy.h> 23 #include <linux/file.h> 24 #include <linux/anon_inodes.h> 25 #include <linux/poll.h> 26 #include <linux/compat.h> 27 28 #include "tpm.h" 29 30 #define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0) 31 32 struct proxy_dev { 33 struct tpm_chip *chip; 34 35 u32 flags; /* public API flags */ 36 37 wait_queue_head_t wq; 38 39 struct mutex buf_lock; /* protect buffer and flags */ 40 41 long state; /* internal state */ 42 #define STATE_OPENED_FLAG BIT(0) 43 #define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */ 44 45 size_t req_len; /* length of queued TPM request */ 46 size_t resp_len; /* length of queued TPM response */ 47 u8 buffer[TPM_BUFSIZE]; /* request/response buffer */ 48 49 struct work_struct work; /* task that retrieves TPM timeouts */ 50 }; 51 52 /* all supported flags */ 53 #define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2) 54 55 static struct workqueue_struct *workqueue; 56 57 static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev); 58 59 /* 60 * Functions related to 'server side' 61 */ 62 63 /** 64 * vtpm_proxy_fops_read - Read TPM commands on 'server side' 65 * 66 * Return value: 67 * Number of bytes read or negative error code 68 */ 69 static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf, 70 size_t count, loff_t *off) 71 { 72 struct proxy_dev *proxy_dev = filp->private_data; 73 size_t len; 74 int sig, rc; 75 76 sig = wait_event_interruptible(proxy_dev->wq, 77 proxy_dev->req_len != 0 || 78 !(proxy_dev->state & STATE_OPENED_FLAG)); 79 if (sig) 80 return -EINTR; 81 82 mutex_lock(&proxy_dev->buf_lock); 83 84 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 85 mutex_unlock(&proxy_dev->buf_lock); 86 return -EPIPE; 87 } 88 89 len = proxy_dev->req_len; 90 91 if (count < len) { 92 mutex_unlock(&proxy_dev->buf_lock); 93 pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n", 94 count, len); 95 return -EIO; 96 } 97 98 rc = copy_to_user(buf, proxy_dev->buffer, len); 99 memset(proxy_dev->buffer, 0, len); 100 proxy_dev->req_len = 0; 101 102 if (!rc) 103 proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG; 104 105 mutex_unlock(&proxy_dev->buf_lock); 106 107 if (rc) 108 return -EFAULT; 109 110 return len; 111 } 112 113 /** 114 * vtpm_proxy_fops_write - Write TPM responses on 'server side' 115 * 116 * Return value: 117 * Number of bytes read or negative error value 118 */ 119 static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf, 120 size_t count, loff_t *off) 121 { 122 struct proxy_dev *proxy_dev = filp->private_data; 123 124 mutex_lock(&proxy_dev->buf_lock); 125 126 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 127 mutex_unlock(&proxy_dev->buf_lock); 128 return -EPIPE; 129 } 130 131 if (count > sizeof(proxy_dev->buffer) || 132 !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) { 133 mutex_unlock(&proxy_dev->buf_lock); 134 return -EIO; 135 } 136 137 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; 138 139 proxy_dev->req_len = 0; 140 141 if (copy_from_user(proxy_dev->buffer, buf, count)) { 142 mutex_unlock(&proxy_dev->buf_lock); 143 return -EFAULT; 144 } 145 146 proxy_dev->resp_len = count; 147 148 mutex_unlock(&proxy_dev->buf_lock); 149 150 wake_up_interruptible(&proxy_dev->wq); 151 152 return count; 153 } 154 155 /* 156 * vtpm_proxy_fops_poll: Poll status on 'server side' 157 * 158 * Return value: 159 * Poll flags 160 */ 161 static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait) 162 { 163 struct proxy_dev *proxy_dev = filp->private_data; 164 unsigned ret; 165 166 poll_wait(filp, &proxy_dev->wq, wait); 167 168 ret = POLLOUT; 169 170 mutex_lock(&proxy_dev->buf_lock); 171 172 if (proxy_dev->req_len) 173 ret |= POLLIN | POLLRDNORM; 174 175 if (!(proxy_dev->state & STATE_OPENED_FLAG)) 176 ret |= POLLHUP; 177 178 mutex_unlock(&proxy_dev->buf_lock); 179 180 return ret; 181 } 182 183 /* 184 * vtpm_proxy_fops_open - Open vTPM device on 'server side' 185 * 186 * Called when setting up the anonymous file descriptor 187 */ 188 static void vtpm_proxy_fops_open(struct file *filp) 189 { 190 struct proxy_dev *proxy_dev = filp->private_data; 191 192 proxy_dev->state |= STATE_OPENED_FLAG; 193 } 194 195 /** 196 * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open 197 * 198 * Call to undo vtpm_proxy_fops_open 199 */ 200 static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev) 201 { 202 mutex_lock(&proxy_dev->buf_lock); 203 204 proxy_dev->state &= ~STATE_OPENED_FLAG; 205 206 mutex_unlock(&proxy_dev->buf_lock); 207 208 /* no more TPM responses -- wake up anyone waiting for them */ 209 wake_up_interruptible(&proxy_dev->wq); 210 } 211 212 /* 213 * vtpm_proxy_fops_release: Close 'server side' 214 * 215 * Return value: 216 * Always returns 0. 217 */ 218 static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) 219 { 220 struct proxy_dev *proxy_dev = filp->private_data; 221 222 filp->private_data = NULL; 223 224 vtpm_proxy_delete_device(proxy_dev); 225 226 return 0; 227 } 228 229 static const struct file_operations vtpm_proxy_fops = { 230 .owner = THIS_MODULE, 231 .llseek = no_llseek, 232 .read = vtpm_proxy_fops_read, 233 .write = vtpm_proxy_fops_write, 234 .poll = vtpm_proxy_fops_poll, 235 .release = vtpm_proxy_fops_release, 236 }; 237 238 /* 239 * Functions invoked by the core TPM driver to send TPM commands to 240 * 'server side' and receive responses from there. 241 */ 242 243 /* 244 * Called when core TPM driver reads TPM responses from 'server side' 245 * 246 * Return value: 247 * Number of TPM response bytes read, negative error value otherwise 248 */ 249 static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) 250 { 251 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 252 size_t len; 253 254 /* process gone ? */ 255 mutex_lock(&proxy_dev->buf_lock); 256 257 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 258 mutex_unlock(&proxy_dev->buf_lock); 259 return -EPIPE; 260 } 261 262 len = proxy_dev->resp_len; 263 if (count < len) { 264 dev_err(&chip->dev, 265 "Invalid size in recv: count=%zd, resp_len=%zd\n", 266 count, len); 267 len = -EIO; 268 goto out; 269 } 270 271 memcpy(buf, proxy_dev->buffer, len); 272 proxy_dev->resp_len = 0; 273 274 out: 275 mutex_unlock(&proxy_dev->buf_lock); 276 277 return len; 278 } 279 280 /* 281 * Called when core TPM driver forwards TPM requests to 'server side'. 282 * 283 * Return value: 284 * 0 in case of success, negative error value otherwise. 285 */ 286 static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) 287 { 288 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 289 int rc = 0; 290 291 if (count > sizeof(proxy_dev->buffer)) { 292 dev_err(&chip->dev, 293 "Invalid size in send: count=%zd, buffer size=%zd\n", 294 count, sizeof(proxy_dev->buffer)); 295 return -EIO; 296 } 297 298 mutex_lock(&proxy_dev->buf_lock); 299 300 if (!(proxy_dev->state & STATE_OPENED_FLAG)) { 301 mutex_unlock(&proxy_dev->buf_lock); 302 return -EPIPE; 303 } 304 305 proxy_dev->resp_len = 0; 306 307 proxy_dev->req_len = count; 308 memcpy(proxy_dev->buffer, buf, count); 309 310 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG; 311 312 mutex_unlock(&proxy_dev->buf_lock); 313 314 wake_up_interruptible(&proxy_dev->wq); 315 316 return rc; 317 } 318 319 static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) 320 { 321 /* not supported */ 322 } 323 324 static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip) 325 { 326 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 327 328 if (proxy_dev->resp_len) 329 return VTPM_PROXY_REQ_COMPLETE_FLAG; 330 331 return 0; 332 } 333 334 static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status) 335 { 336 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); 337 bool ret; 338 339 mutex_lock(&proxy_dev->buf_lock); 340 341 ret = !(proxy_dev->state & STATE_OPENED_FLAG); 342 343 mutex_unlock(&proxy_dev->buf_lock); 344 345 return ret; 346 } 347 348 static const struct tpm_class_ops vtpm_proxy_tpm_ops = { 349 .flags = TPM_OPS_AUTO_STARTUP, 350 .recv = vtpm_proxy_tpm_op_recv, 351 .send = vtpm_proxy_tpm_op_send, 352 .cancel = vtpm_proxy_tpm_op_cancel, 353 .status = vtpm_proxy_tpm_op_status, 354 .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG, 355 .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG, 356 .req_canceled = vtpm_proxy_tpm_req_canceled, 357 }; 358 359 /* 360 * Code related to the startup of the TPM 2 and startup of TPM 1.2 + 361 * retrieval of timeouts and durations. 362 */ 363 364 static void vtpm_proxy_work(struct work_struct *work) 365 { 366 struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev, 367 work); 368 int rc; 369 370 rc = tpm_chip_register(proxy_dev->chip); 371 if (rc) 372 goto err; 373 374 return; 375 376 err: 377 vtpm_proxy_fops_undo_open(proxy_dev); 378 } 379 380 /* 381 * vtpm_proxy_work_stop: make sure the work has finished 382 * 383 * This function is useful when user space closed the fd 384 * while the driver still determines timeouts. 385 */ 386 static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev) 387 { 388 vtpm_proxy_fops_undo_open(proxy_dev); 389 flush_work(&proxy_dev->work); 390 } 391 392 /* 393 * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization 394 */ 395 static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev) 396 { 397 queue_work(workqueue, &proxy_dev->work); 398 } 399 400 /* 401 * Code related to creation and deletion of device pairs 402 */ 403 static struct proxy_dev *vtpm_proxy_create_proxy_dev(void) 404 { 405 struct proxy_dev *proxy_dev; 406 struct tpm_chip *chip; 407 int err; 408 409 proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL); 410 if (proxy_dev == NULL) 411 return ERR_PTR(-ENOMEM); 412 413 init_waitqueue_head(&proxy_dev->wq); 414 mutex_init(&proxy_dev->buf_lock); 415 INIT_WORK(&proxy_dev->work, vtpm_proxy_work); 416 417 chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops); 418 if (IS_ERR(chip)) { 419 err = PTR_ERR(chip); 420 goto err_proxy_dev_free; 421 } 422 dev_set_drvdata(&chip->dev, proxy_dev); 423 424 proxy_dev->chip = chip; 425 426 return proxy_dev; 427 428 err_proxy_dev_free: 429 kfree(proxy_dev); 430 431 return ERR_PTR(err); 432 } 433 434 /* 435 * Undo what has been done in vtpm_create_proxy_dev 436 */ 437 static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev) 438 { 439 put_device(&proxy_dev->chip->dev); /* frees chip */ 440 kfree(proxy_dev); 441 } 442 443 /* 444 * Create a /dev/tpm%d and 'server side' file descriptor pair 445 * 446 * Return value: 447 * Returns file pointer on success, an error value otherwise 448 */ 449 static struct file *vtpm_proxy_create_device( 450 struct vtpm_proxy_new_dev *vtpm_new_dev) 451 { 452 struct proxy_dev *proxy_dev; 453 int rc, fd; 454 struct file *file; 455 456 if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL) 457 return ERR_PTR(-EOPNOTSUPP); 458 459 proxy_dev = vtpm_proxy_create_proxy_dev(); 460 if (IS_ERR(proxy_dev)) 461 return ERR_CAST(proxy_dev); 462 463 proxy_dev->flags = vtpm_new_dev->flags; 464 465 /* setup an anonymous file for the server-side */ 466 fd = get_unused_fd_flags(O_RDWR); 467 if (fd < 0) { 468 rc = fd; 469 goto err_delete_proxy_dev; 470 } 471 472 file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev, 473 O_RDWR); 474 if (IS_ERR(file)) { 475 rc = PTR_ERR(file); 476 goto err_put_unused_fd; 477 } 478 479 /* from now on we can unwind with put_unused_fd() + fput() */ 480 /* simulate an open() on the server side */ 481 vtpm_proxy_fops_open(file); 482 483 if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2) 484 proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2; 485 486 vtpm_proxy_work_start(proxy_dev); 487 488 vtpm_new_dev->fd = fd; 489 vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt); 490 vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt); 491 vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num; 492 493 return file; 494 495 err_put_unused_fd: 496 put_unused_fd(fd); 497 498 err_delete_proxy_dev: 499 vtpm_proxy_delete_proxy_dev(proxy_dev); 500 501 return ERR_PTR(rc); 502 } 503 504 /* 505 * Counter part to vtpm_create_device. 506 */ 507 static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev) 508 { 509 vtpm_proxy_work_stop(proxy_dev); 510 511 /* 512 * A client may hold the 'ops' lock, so let it know that the server 513 * side shuts down before we try to grab the 'ops' lock when 514 * unregistering the chip. 515 */ 516 vtpm_proxy_fops_undo_open(proxy_dev); 517 518 tpm_chip_unregister(proxy_dev->chip); 519 520 vtpm_proxy_delete_proxy_dev(proxy_dev); 521 } 522 523 /* 524 * Code related to the control device /dev/vtpmx 525 */ 526 527 /* 528 * vtpmx_fops_ioctl: ioctl on /dev/vtpmx 529 * 530 * Return value: 531 * Returns 0 on success, a negative error code otherwise. 532 */ 533 static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl, 534 unsigned long arg) 535 { 536 void __user *argp = (void __user *)arg; 537 struct vtpm_proxy_new_dev __user *vtpm_new_dev_p; 538 struct vtpm_proxy_new_dev vtpm_new_dev; 539 struct file *file; 540 541 switch (ioctl) { 542 case VTPM_PROXY_IOC_NEW_DEV: 543 if (!capable(CAP_SYS_ADMIN)) 544 return -EPERM; 545 vtpm_new_dev_p = argp; 546 if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p, 547 sizeof(vtpm_new_dev))) 548 return -EFAULT; 549 file = vtpm_proxy_create_device(&vtpm_new_dev); 550 if (IS_ERR(file)) 551 return PTR_ERR(file); 552 if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev, 553 sizeof(vtpm_new_dev))) { 554 put_unused_fd(vtpm_new_dev.fd); 555 fput(file); 556 return -EFAULT; 557 } 558 559 fd_install(vtpm_new_dev.fd, file); 560 return 0; 561 562 default: 563 return -ENOIOCTLCMD; 564 } 565 } 566 567 #ifdef CONFIG_COMPAT 568 static long vtpmx_fops_compat_ioctl(struct file *f, unsigned int ioctl, 569 unsigned long arg) 570 { 571 return vtpmx_fops_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 572 } 573 #endif 574 575 static const struct file_operations vtpmx_fops = { 576 .owner = THIS_MODULE, 577 .unlocked_ioctl = vtpmx_fops_ioctl, 578 #ifdef CONFIG_COMPAT 579 .compat_ioctl = vtpmx_fops_compat_ioctl, 580 #endif 581 .llseek = noop_llseek, 582 }; 583 584 static struct miscdevice vtpmx_miscdev = { 585 .minor = MISC_DYNAMIC_MINOR, 586 .name = "vtpmx", 587 .fops = &vtpmx_fops, 588 }; 589 590 static int vtpmx_init(void) 591 { 592 return misc_register(&vtpmx_miscdev); 593 } 594 595 static void vtpmx_cleanup(void) 596 { 597 misc_deregister(&vtpmx_miscdev); 598 } 599 600 static int __init vtpm_module_init(void) 601 { 602 int rc; 603 604 rc = vtpmx_init(); 605 if (rc) { 606 pr_err("couldn't create vtpmx device\n"); 607 return rc; 608 } 609 610 workqueue = create_workqueue("tpm-vtpm"); 611 if (!workqueue) { 612 pr_err("couldn't create workqueue\n"); 613 rc = -ENOMEM; 614 goto err_vtpmx_cleanup; 615 } 616 617 return 0; 618 619 err_vtpmx_cleanup: 620 vtpmx_cleanup(); 621 622 return rc; 623 } 624 625 static void __exit vtpm_module_exit(void) 626 { 627 destroy_workqueue(workqueue); 628 vtpmx_cleanup(); 629 } 630 631 module_init(vtpm_module_init); 632 module_exit(vtpm_module_exit); 633 634 MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)"); 635 MODULE_DESCRIPTION("vTPM Driver"); 636 MODULE_VERSION("0.1"); 637 MODULE_LICENSE("GPL"); 638