1 /* 2 * Copyright (C) 2012 IBM Corporation 3 * 4 * Author: Ashley Lai <ashleydlai@gmail.com> 5 * 6 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 7 * 8 * Device driver for TCG/TCPA TPM (trusted platform module). 9 * Specifications at www.trustedcomputinggroup.org 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License as 13 * published by the Free Software Foundation, version 2 of the 14 * License. 15 * 16 */ 17 18 #include <linux/dma-mapping.h> 19 #include <linux/dmapool.h> 20 #include <linux/slab.h> 21 #include <asm/vio.h> 22 #include <asm/irq.h> 23 #include <linux/types.h> 24 #include <linux/list.h> 25 #include <linux/spinlock.h> 26 #include <linux/interrupt.h> 27 #include <linux/wait.h> 28 #include <asm/prom.h> 29 30 #include "tpm.h" 31 #include "tpm_ibmvtpm.h" 32 33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; 34 35 static struct vio_device_id tpm_ibmvtpm_device_table[] = { 36 { "IBM,vtpm", "IBM,vtpm"}, 37 { "", "" } 38 }; 39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); 40 41 /** 42 * ibmvtpm_send_crq - Send a CRQ request 43 * 44 * @vdev: vio device struct 45 * @w1: first word 46 * @w2: second word 47 * 48 * Return: 49 * 0 -Sucess 50 * Non-zero - Failure 51 */ 52 static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) 53 { 54 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); 55 } 56 57 /** 58 * tpm_ibmvtpm_recv - Receive data after send 59 * 60 * @chip: tpm chip struct 61 * @buf: buffer to read 62 * @count: size of buffer 63 * 64 * Return: 65 * Number of bytes read 66 */ 67 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) 68 { 69 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); 70 u16 len; 71 int sig; 72 73 if (!ibmvtpm->rtce_buf) { 74 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); 75 return 0; 76 } 77 78 sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); 79 if (sig) 80 return -EINTR; 81 82 len = ibmvtpm->res_len; 83 84 if (count < len) { 85 dev_err(ibmvtpm->dev, 86 "Invalid size in recv: count=%zd, crq_size=%d\n", 87 count, len); 88 return -EIO; 89 } 90 91 spin_lock(&ibmvtpm->rtce_lock); 92 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len); 93 memset(ibmvtpm->rtce_buf, 0, len); 94 ibmvtpm->res_len = 0; 95 spin_unlock(&ibmvtpm->rtce_lock); 96 return len; 97 } 98 99 /** 100 * tpm_ibmvtpm_send - Send tpm request 101 * 102 * @chip: tpm chip struct 103 * @buf: buffer contains data to send 104 * @count: size of buffer 105 * 106 * Return: 107 * Number of bytes sent or < 0 on error. 108 */ 109 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) 110 { 111 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); 112 struct ibmvtpm_crq crq; 113 __be64 *word = (__be64 *)&crq; 114 int rc, sig; 115 116 if (!ibmvtpm->rtce_buf) { 117 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); 118 return 0; 119 } 120 121 if (count > ibmvtpm->rtce_size) { 122 dev_err(ibmvtpm->dev, 123 "Invalid size in send: count=%zd, rtce_size=%d\n", 124 count, ibmvtpm->rtce_size); 125 return -EIO; 126 } 127 128 if (ibmvtpm->tpm_processing_cmd) { 129 dev_info(ibmvtpm->dev, 130 "Need to wait for TPM to finish\n"); 131 /* wait for previous command to finish */ 132 sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); 133 if (sig) 134 return -EINTR; 135 } 136 137 spin_lock(&ibmvtpm->rtce_lock); 138 ibmvtpm->res_len = 0; 139 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); 140 crq.valid = (u8)IBMVTPM_VALID_CMD; 141 crq.msg = (u8)VTPM_TPM_COMMAND; 142 crq.len = cpu_to_be16(count); 143 crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); 144 145 /* 146 * set the processing flag before the Hcall, since we may get the 147 * result (interrupt) before even being able to check rc. 148 */ 149 ibmvtpm->tpm_processing_cmd = true; 150 151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), 152 be64_to_cpu(word[1])); 153 if (rc != H_SUCCESS) { 154 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); 155 rc = 0; 156 ibmvtpm->tpm_processing_cmd = false; 157 } else 158 rc = count; 159 160 spin_unlock(&ibmvtpm->rtce_lock); 161 return rc; 162 } 163 164 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) 165 { 166 return; 167 } 168 169 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) 170 { 171 return 0; 172 } 173 174 /** 175 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size 176 * 177 * @ibmvtpm: vtpm device struct 178 * 179 * Return: 180 * 0 on success. 181 * Non-zero on failure. 182 */ 183 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) 184 { 185 struct ibmvtpm_crq crq; 186 u64 *buf = (u64 *) &crq; 187 int rc; 188 189 crq.valid = (u8)IBMVTPM_VALID_CMD; 190 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; 191 192 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), 193 cpu_to_be64(buf[1])); 194 if (rc != H_SUCCESS) 195 dev_err(ibmvtpm->dev, 196 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); 197 198 return rc; 199 } 200 201 /** 202 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version 203 * - Note that this is vtpm version and not tpm version 204 * 205 * @ibmvtpm: vtpm device struct 206 * 207 * Return: 208 * 0 on success. 209 * Non-zero on failure. 210 */ 211 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) 212 { 213 struct ibmvtpm_crq crq; 214 u64 *buf = (u64 *) &crq; 215 int rc; 216 217 crq.valid = (u8)IBMVTPM_VALID_CMD; 218 crq.msg = (u8)VTPM_GET_VERSION; 219 220 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), 221 cpu_to_be64(buf[1])); 222 if (rc != H_SUCCESS) 223 dev_err(ibmvtpm->dev, 224 "ibmvtpm_crq_get_version failed rc=%d\n", rc); 225 226 return rc; 227 } 228 229 /** 230 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message 231 * @ibmvtpm: vtpm device struct 232 * 233 * Return: 234 * 0 on success. 235 * Non-zero on failure. 236 */ 237 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) 238 { 239 int rc; 240 241 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); 242 if (rc != H_SUCCESS) 243 dev_err(ibmvtpm->dev, 244 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); 245 246 return rc; 247 } 248 249 /** 250 * ibmvtpm_crq_send_init - Send a CRQ initialize message 251 * @ibmvtpm: vtpm device struct 252 * 253 * Return: 254 * 0 on success. 255 * Non-zero on failure. 256 */ 257 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) 258 { 259 int rc; 260 261 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); 262 if (rc != H_SUCCESS) 263 dev_err(ibmvtpm->dev, 264 "ibmvtpm_crq_send_init failed rc=%d\n", rc); 265 266 return rc; 267 } 268 269 /** 270 * tpm_ibmvtpm_remove - ibm vtpm remove entry point 271 * @vdev: vio device struct 272 * 273 * Return: Always 0. 274 */ 275 static int tpm_ibmvtpm_remove(struct vio_dev *vdev) 276 { 277 struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); 278 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); 279 int rc = 0; 280 281 tpm_chip_unregister(chip); 282 283 free_irq(vdev->irq, ibmvtpm); 284 285 do { 286 if (rc) 287 msleep(100); 288 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 289 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 290 291 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle, 292 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); 293 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr); 294 295 if (ibmvtpm->rtce_buf) { 296 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle, 297 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL); 298 kfree(ibmvtpm->rtce_buf); 299 } 300 301 kfree(ibmvtpm); 302 /* For tpm_ibmvtpm_get_desired_dma */ 303 dev_set_drvdata(&vdev->dev, NULL); 304 305 return 0; 306 } 307 308 /** 309 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver 310 * @vdev: vio device struct 311 * 312 * Return: 313 * Number of bytes the driver needs to DMA map. 314 */ 315 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) 316 { 317 struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); 318 struct ibmvtpm_dev *ibmvtpm; 319 320 /* 321 * ibmvtpm initializes at probe time, so the data we are 322 * asking for may not be set yet. Estimate that 4K required 323 * for TCE-mapped buffer in addition to CRQ. 324 */ 325 if (chip) 326 ibmvtpm = dev_get_drvdata(&chip->dev); 327 else 328 return CRQ_RES_BUF_SIZE + PAGE_SIZE; 329 330 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; 331 } 332 333 /** 334 * tpm_ibmvtpm_suspend - Suspend 335 * @dev: device struct 336 * 337 * Return: Always 0. 338 */ 339 static int tpm_ibmvtpm_suspend(struct device *dev) 340 { 341 struct tpm_chip *chip = dev_get_drvdata(dev); 342 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); 343 struct ibmvtpm_crq crq; 344 u64 *buf = (u64 *) &crq; 345 int rc = 0; 346 347 crq.valid = (u8)IBMVTPM_VALID_CMD; 348 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; 349 350 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), 351 cpu_to_be64(buf[1])); 352 if (rc != H_SUCCESS) 353 dev_err(ibmvtpm->dev, 354 "tpm_ibmvtpm_suspend failed rc=%d\n", rc); 355 356 return rc; 357 } 358 359 /** 360 * ibmvtpm_reset_crq - Reset CRQ 361 * 362 * @ibmvtpm: ibm vtpm struct 363 * 364 * Return: 365 * 0 on success. 366 * Non-zero on failure. 367 */ 368 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) 369 { 370 int rc = 0; 371 372 do { 373 if (rc) 374 msleep(100); 375 rc = plpar_hcall_norets(H_FREE_CRQ, 376 ibmvtpm->vdev->unit_address); 377 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 378 379 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE); 380 ibmvtpm->crq_queue.index = 0; 381 382 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address, 383 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); 384 } 385 386 /** 387 * tpm_ibmvtpm_resume - Resume from suspend 388 * 389 * @dev: device struct 390 * 391 * Return: Always 0. 392 */ 393 static int tpm_ibmvtpm_resume(struct device *dev) 394 { 395 struct tpm_chip *chip = dev_get_drvdata(dev); 396 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); 397 int rc = 0; 398 399 do { 400 if (rc) 401 msleep(100); 402 rc = plpar_hcall_norets(H_ENABLE_CRQ, 403 ibmvtpm->vdev->unit_address); 404 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 405 406 if (rc) { 407 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); 408 return rc; 409 } 410 411 rc = vio_enable_interrupts(ibmvtpm->vdev); 412 if (rc) { 413 dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); 414 return rc; 415 } 416 417 rc = ibmvtpm_crq_send_init(ibmvtpm); 418 if (rc) 419 dev_err(dev, "Error send_init rc=%d\n", rc); 420 421 return rc; 422 } 423 424 static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) 425 { 426 return (status == 0); 427 } 428 429 static const struct tpm_class_ops tpm_ibmvtpm = { 430 .recv = tpm_ibmvtpm_recv, 431 .send = tpm_ibmvtpm_send, 432 .cancel = tpm_ibmvtpm_cancel, 433 .status = tpm_ibmvtpm_status, 434 .req_complete_mask = 0, 435 .req_complete_val = 0, 436 .req_canceled = tpm_ibmvtpm_req_canceled, 437 }; 438 439 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = { 440 .suspend = tpm_ibmvtpm_suspend, 441 .resume = tpm_ibmvtpm_resume, 442 }; 443 444 /** 445 * ibmvtpm_crq_get_next - Get next responded crq 446 * 447 * @ibmvtpm: vtpm device struct 448 * 449 * Return: vtpm crq pointer or NULL. 450 */ 451 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm) 452 { 453 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue; 454 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index]; 455 456 if (crq->valid & VTPM_MSG_RES) { 457 if (++crq_q->index == crq_q->num_entry) 458 crq_q->index = 0; 459 smp_rmb(); 460 } else 461 crq = NULL; 462 return crq; 463 } 464 465 /** 466 * ibmvtpm_crq_process - Process responded crq 467 * 468 * @crq: crq to be processed 469 * @ibmvtpm: vtpm device struct 470 * 471 */ 472 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, 473 struct ibmvtpm_dev *ibmvtpm) 474 { 475 int rc = 0; 476 477 switch (crq->valid) { 478 case VALID_INIT_CRQ: 479 switch (crq->msg) { 480 case INIT_CRQ_RES: 481 dev_info(ibmvtpm->dev, "CRQ initialized\n"); 482 rc = ibmvtpm_crq_send_init_complete(ibmvtpm); 483 if (rc) 484 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc); 485 return; 486 case INIT_CRQ_COMP_RES: 487 dev_info(ibmvtpm->dev, 488 "CRQ initialization completed\n"); 489 return; 490 default: 491 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg); 492 return; 493 } 494 case IBMVTPM_VALID_CMD: 495 switch (crq->msg) { 496 case VTPM_GET_RTCE_BUFFER_SIZE_RES: 497 if (be16_to_cpu(crq->len) <= 0) { 498 dev_err(ibmvtpm->dev, "Invalid rtce size\n"); 499 return; 500 } 501 ibmvtpm->rtce_size = be16_to_cpu(crq->len); 502 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, 503 GFP_ATOMIC); 504 if (!ibmvtpm->rtce_buf) { 505 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n"); 506 return; 507 } 508 509 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev, 510 ibmvtpm->rtce_buf, ibmvtpm->rtce_size, 511 DMA_BIDIRECTIONAL); 512 513 if (dma_mapping_error(ibmvtpm->dev, 514 ibmvtpm->rtce_dma_handle)) { 515 kfree(ibmvtpm->rtce_buf); 516 ibmvtpm->rtce_buf = NULL; 517 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n"); 518 } 519 520 return; 521 case VTPM_GET_VERSION_RES: 522 ibmvtpm->vtpm_version = be32_to_cpu(crq->data); 523 return; 524 case VTPM_TPM_COMMAND_RES: 525 /* len of the data in rtce buffer */ 526 ibmvtpm->res_len = be16_to_cpu(crq->len); 527 ibmvtpm->tpm_processing_cmd = false; 528 wake_up_interruptible(&ibmvtpm->wq); 529 return; 530 default: 531 return; 532 } 533 } 534 return; 535 } 536 537 /** 538 * ibmvtpm_interrupt - Interrupt handler 539 * 540 * @irq: irq number to handle 541 * @vtpm_instance: vtpm that received interrupt 542 * 543 * Returns: 544 * IRQ_HANDLED 545 **/ 546 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) 547 { 548 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance; 549 struct ibmvtpm_crq *crq; 550 551 /* while loop is needed for initial setup (get version and 552 * get rtce_size). There should be only one tpm request at any 553 * given time. 554 */ 555 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { 556 ibmvtpm_crq_process(crq, ibmvtpm); 557 crq->valid = 0; 558 smp_wmb(); 559 } 560 561 return IRQ_HANDLED; 562 } 563 564 /** 565 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point 566 * 567 * @vio_dev: vio device struct 568 * @id: vio device id struct 569 * 570 * Return: 571 * 0 on success. 572 * Non-zero on failure. 573 */ 574 static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, 575 const struct vio_device_id *id) 576 { 577 struct ibmvtpm_dev *ibmvtpm; 578 struct device *dev = &vio_dev->dev; 579 struct ibmvtpm_crq_queue *crq_q; 580 struct tpm_chip *chip; 581 int rc = -ENOMEM, rc1; 582 583 chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm); 584 if (IS_ERR(chip)) 585 return PTR_ERR(chip); 586 587 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL); 588 if (!ibmvtpm) { 589 dev_err(dev, "kzalloc for ibmvtpm failed\n"); 590 goto cleanup; 591 } 592 593 ibmvtpm->dev = dev; 594 ibmvtpm->vdev = vio_dev; 595 596 crq_q = &ibmvtpm->crq_queue; 597 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL); 598 if (!crq_q->crq_addr) { 599 dev_err(dev, "Unable to allocate memory for crq_addr\n"); 600 goto cleanup; 601 } 602 603 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); 604 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, 605 CRQ_RES_BUF_SIZE, 606 DMA_BIDIRECTIONAL); 607 608 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) { 609 dev_err(dev, "dma mapping failed\n"); 610 goto cleanup; 611 } 612 613 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address, 614 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); 615 if (rc == H_RESOURCE) 616 rc = ibmvtpm_reset_crq(ibmvtpm); 617 618 if (rc) { 619 dev_err(dev, "Unable to register CRQ rc=%d\n", rc); 620 goto reg_crq_cleanup; 621 } 622 623 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0, 624 tpm_ibmvtpm_driver_name, ibmvtpm); 625 if (rc) { 626 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq); 627 goto init_irq_cleanup; 628 } 629 630 rc = vio_enable_interrupts(vio_dev); 631 if (rc) { 632 dev_err(dev, "Error %d enabling interrupts\n", rc); 633 goto init_irq_cleanup; 634 } 635 636 init_waitqueue_head(&ibmvtpm->wq); 637 638 crq_q->index = 0; 639 640 dev_set_drvdata(&chip->dev, ibmvtpm); 641 642 spin_lock_init(&ibmvtpm->rtce_lock); 643 644 rc = ibmvtpm_crq_send_init(ibmvtpm); 645 if (rc) 646 goto init_irq_cleanup; 647 648 rc = ibmvtpm_crq_get_version(ibmvtpm); 649 if (rc) 650 goto init_irq_cleanup; 651 652 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm); 653 if (rc) 654 goto init_irq_cleanup; 655 656 return tpm_chip_register(chip); 657 init_irq_cleanup: 658 do { 659 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address); 660 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1)); 661 reg_crq_cleanup: 662 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE, 663 DMA_BIDIRECTIONAL); 664 cleanup: 665 if (ibmvtpm) { 666 if (crq_q->crq_addr) 667 free_page((unsigned long)crq_q->crq_addr); 668 kfree(ibmvtpm); 669 } 670 671 return rc; 672 } 673 674 static struct vio_driver ibmvtpm_driver = { 675 .id_table = tpm_ibmvtpm_device_table, 676 .probe = tpm_ibmvtpm_probe, 677 .remove = tpm_ibmvtpm_remove, 678 .get_desired_dma = tpm_ibmvtpm_get_desired_dma, 679 .name = tpm_ibmvtpm_driver_name, 680 .pm = &tpm_ibmvtpm_pm_ops, 681 }; 682 683 /** 684 * ibmvtpm_module_init - Initialize ibm vtpm module. 685 * 686 * 687 * Return: 688 * 0 on success. 689 * Non-zero on failure. 690 */ 691 static int __init ibmvtpm_module_init(void) 692 { 693 return vio_register_driver(&ibmvtpm_driver); 694 } 695 696 /** 697 * ibmvtpm_module_exit - Tear down ibm vtpm module. 698 */ 699 static void __exit ibmvtpm_module_exit(void) 700 { 701 vio_unregister_driver(&ibmvtpm_driver); 702 } 703 704 module_init(ibmvtpm_module_init); 705 module_exit(ibmvtpm_module_exit); 706 707 MODULE_AUTHOR("adlai@us.ibm.com"); 708 MODULE_DESCRIPTION("IBM vTPM Driver"); 709 MODULE_VERSION("1.0"); 710 MODULE_LICENSE("GPL"); 711