1 /* 2 * Copyright (C) 2005, 2006 IBM Corporation 3 * 4 * Authors: 5 * Leendert van Doorn <leendert@watson.ibm.com> 6 * Kylene Hall <kjhall@us.ibm.com> 7 * 8 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 9 * 10 * Device driver for TCG/TCPA TPM (trusted platform module). 11 * Specifications at www.trustedcomputinggroup.org 12 * 13 * This device driver implements the TPM interface as defined in 14 * the TCG TPM Interface Spec version 1.2, revision 1.0. 15 * 16 * This program is free software; you can redistribute it and/or 17 * modify it under the terms of the GNU General Public License as 18 * published by the Free Software Foundation, version 2 of the 19 * License. 20 */ 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/pnp.h> 25 #include <linux/interrupt.h> 26 #include <linux/wait.h> 27 #include "tpm.h" 28 29 #define TPM_HEADER_SIZE 10 30 31 enum tis_access { 32 TPM_ACCESS_VALID = 0x80, 33 TPM_ACCESS_ACTIVE_LOCALITY = 0x20, 34 TPM_ACCESS_REQUEST_PENDING = 0x04, 35 TPM_ACCESS_REQUEST_USE = 0x02, 36 }; 37 38 enum tis_status { 39 TPM_STS_VALID = 0x80, 40 TPM_STS_COMMAND_READY = 0x40, 41 TPM_STS_GO = 0x20, 42 TPM_STS_DATA_AVAIL = 0x10, 43 TPM_STS_DATA_EXPECT = 0x08, 44 }; 45 46 enum tis_int_flags { 47 TPM_GLOBAL_INT_ENABLE = 0x80000000, 48 TPM_INTF_BURST_COUNT_STATIC = 0x100, 49 TPM_INTF_CMD_READY_INT = 0x080, 50 TPM_INTF_INT_EDGE_FALLING = 0x040, 51 TPM_INTF_INT_EDGE_RISING = 0x020, 52 TPM_INTF_INT_LEVEL_LOW = 0x010, 53 TPM_INTF_INT_LEVEL_HIGH = 0x008, 54 TPM_INTF_LOCALITY_CHANGE_INT = 0x004, 55 TPM_INTF_STS_VALID_INT = 0x002, 56 TPM_INTF_DATA_AVAIL_INT = 0x001, 57 }; 58 59 enum tis_defaults { 60 TIS_MEM_BASE = 0xFED40000, 61 TIS_MEM_LEN = 0x5000, 62 TIS_SHORT_TIMEOUT = 750, /* ms */ 63 TIS_LONG_TIMEOUT = 2000, /* 2 sec */ 64 }; 65 66 #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) 67 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) 68 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) 69 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) 70 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) 71 #define TPM_STS(l) (0x0018 | ((l) << 12)) 72 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) 73 74 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) 75 #define TPM_RID(l) (0x0F04 | ((l) << 12)) 76 77 static LIST_HEAD(tis_chips); 78 static DEFINE_SPINLOCK(tis_lock); 79 80 static int check_locality(struct tpm_chip *chip, int l) 81 { 82 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 83 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == 84 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) 85 return chip->vendor.locality = l; 86 87 return -1; 88 } 89 90 static void release_locality(struct tpm_chip *chip, int l, int force) 91 { 92 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 93 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == 94 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) 95 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, 96 chip->vendor.iobase + TPM_ACCESS(l)); 97 } 98 99 static int request_locality(struct tpm_chip *chip, int l) 100 { 101 unsigned long stop; 102 long rc; 103 104 if (check_locality(chip, l) >= 0) 105 return l; 106 107 iowrite8(TPM_ACCESS_REQUEST_USE, 108 chip->vendor.iobase + TPM_ACCESS(l)); 109 110 if (chip->vendor.irq) { 111 rc = wait_event_interruptible_timeout(chip->vendor.int_queue, 112 (check_locality 113 (chip, l) >= 0), 114 chip->vendor.timeout_a); 115 if (rc > 0) 116 return l; 117 118 } else { 119 /* wait for burstcount */ 120 stop = jiffies + chip->vendor.timeout_a; 121 do { 122 if (check_locality(chip, l) >= 0) 123 return l; 124 msleep(TPM_TIMEOUT); 125 } 126 while (time_before(jiffies, stop)); 127 } 128 return -1; 129 } 130 131 static u8 tpm_tis_status(struct tpm_chip *chip) 132 { 133 return ioread8(chip->vendor.iobase + 134 TPM_STS(chip->vendor.locality)); 135 } 136 137 static void tpm_tis_ready(struct tpm_chip *chip) 138 { 139 /* this causes the current command to be aborted */ 140 iowrite8(TPM_STS_COMMAND_READY, 141 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 142 } 143 144 static int get_burstcount(struct tpm_chip *chip) 145 { 146 unsigned long stop; 147 int burstcnt; 148 149 /* wait for burstcount */ 150 /* which timeout value, spec has 2 answers (c & d) */ 151 stop = jiffies + chip->vendor.timeout_d; 152 do { 153 burstcnt = ioread8(chip->vendor.iobase + 154 TPM_STS(chip->vendor.locality) + 1); 155 burstcnt += ioread8(chip->vendor.iobase + 156 TPM_STS(chip->vendor.locality) + 157 2) << 8; 158 if (burstcnt) 159 return burstcnt; 160 msleep(TPM_TIMEOUT); 161 } while (time_before(jiffies, stop)); 162 return -EBUSY; 163 } 164 165 static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, 166 wait_queue_head_t *queue) 167 { 168 unsigned long stop; 169 long rc; 170 u8 status; 171 172 /* check current status */ 173 status = tpm_tis_status(chip); 174 if ((status & mask) == mask) 175 return 0; 176 177 if (chip->vendor.irq) { 178 rc = wait_event_interruptible_timeout(*queue, 179 ((tpm_tis_status 180 (chip) & mask) == 181 mask), timeout); 182 if (rc > 0) 183 return 0; 184 } else { 185 stop = jiffies + timeout; 186 do { 187 msleep(TPM_TIMEOUT); 188 status = tpm_tis_status(chip); 189 if ((status & mask) == mask) 190 return 0; 191 } while (time_before(jiffies, stop)); 192 } 193 return -ETIME; 194 } 195 196 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) 197 { 198 int size = 0, burstcnt; 199 while (size < count && 200 wait_for_stat(chip, 201 TPM_STS_DATA_AVAIL | TPM_STS_VALID, 202 chip->vendor.timeout_c, 203 &chip->vendor.read_queue) 204 == 0) { 205 burstcnt = get_burstcount(chip); 206 for (; burstcnt > 0 && size < count; burstcnt--) 207 buf[size++] = ioread8(chip->vendor.iobase + 208 TPM_DATA_FIFO(chip->vendor. 209 locality)); 210 } 211 return size; 212 } 213 214 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) 215 { 216 int size = 0; 217 int expected, status; 218 219 if (count < TPM_HEADER_SIZE) { 220 size = -EIO; 221 goto out; 222 } 223 224 /* read first 10 bytes, including tag, paramsize, and result */ 225 if ((size = 226 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { 227 dev_err(chip->dev, "Unable to read header\n"); 228 goto out; 229 } 230 231 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 232 if (expected > count) { 233 size = -EIO; 234 goto out; 235 } 236 237 if ((size += 238 recv_data(chip, &buf[TPM_HEADER_SIZE], 239 expected - TPM_HEADER_SIZE)) < expected) { 240 dev_err(chip->dev, "Unable to read remainder of result\n"); 241 size = -ETIME; 242 goto out; 243 } 244 245 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 246 &chip->vendor.int_queue); 247 status = tpm_tis_status(chip); 248 if (status & TPM_STS_DATA_AVAIL) { /* retry? */ 249 dev_err(chip->dev, "Error left over data\n"); 250 size = -EIO; 251 goto out; 252 } 253 254 out: 255 tpm_tis_ready(chip); 256 release_locality(chip, chip->vendor.locality, 0); 257 return size; 258 } 259 260 static int itpm; 261 module_param(itpm, bool, 0444); 262 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); 263 264 /* 265 * If interrupts are used (signaled by an irq set in the vendor structure) 266 * tpm.c can skip polling for the data to be available as the interrupt is 267 * waited for here 268 */ 269 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) 270 { 271 int rc, status, burstcnt; 272 size_t count = 0; 273 u32 ordinal; 274 275 if (request_locality(chip, 0) < 0) 276 return -EBUSY; 277 278 status = tpm_tis_status(chip); 279 if ((status & TPM_STS_COMMAND_READY) == 0) { 280 tpm_tis_ready(chip); 281 if (wait_for_stat 282 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, 283 &chip->vendor.int_queue) < 0) { 284 rc = -ETIME; 285 goto out_err; 286 } 287 } 288 289 while (count < len - 1) { 290 burstcnt = get_burstcount(chip); 291 for (; burstcnt > 0 && count < len - 1; burstcnt--) { 292 iowrite8(buf[count], chip->vendor.iobase + 293 TPM_DATA_FIFO(chip->vendor.locality)); 294 count++; 295 } 296 297 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 298 &chip->vendor.int_queue); 299 status = tpm_tis_status(chip); 300 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { 301 rc = -EIO; 302 goto out_err; 303 } 304 } 305 306 /* write last byte */ 307 iowrite8(buf[count], 308 chip->vendor.iobase + 309 TPM_DATA_FIFO(chip->vendor.locality)); 310 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 311 &chip->vendor.int_queue); 312 status = tpm_tis_status(chip); 313 if ((status & TPM_STS_DATA_EXPECT) != 0) { 314 rc = -EIO; 315 goto out_err; 316 } 317 318 /* go and do it */ 319 iowrite8(TPM_STS_GO, 320 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 321 322 if (chip->vendor.irq) { 323 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 324 if (wait_for_stat 325 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, 326 tpm_calc_ordinal_duration(chip, ordinal), 327 &chip->vendor.read_queue) < 0) { 328 rc = -ETIME; 329 goto out_err; 330 } 331 } 332 return len; 333 out_err: 334 tpm_tis_ready(chip); 335 release_locality(chip, chip->vendor.locality, 0); 336 return rc; 337 } 338 339 static const struct file_operations tis_ops = { 340 .owner = THIS_MODULE, 341 .llseek = no_llseek, 342 .open = tpm_open, 343 .read = tpm_read, 344 .write = tpm_write, 345 .release = tpm_release, 346 }; 347 348 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); 349 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); 350 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); 351 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); 352 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); 353 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, 354 NULL); 355 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 356 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 357 358 static struct attribute *tis_attrs[] = { 359 &dev_attr_pubek.attr, 360 &dev_attr_pcrs.attr, 361 &dev_attr_enabled.attr, 362 &dev_attr_active.attr, 363 &dev_attr_owned.attr, 364 &dev_attr_temp_deactivated.attr, 365 &dev_attr_caps.attr, 366 &dev_attr_cancel.attr, NULL, 367 }; 368 369 static struct attribute_group tis_attr_grp = { 370 .attrs = tis_attrs 371 }; 372 373 static struct tpm_vendor_specific tpm_tis = { 374 .status = tpm_tis_status, 375 .recv = tpm_tis_recv, 376 .send = tpm_tis_send, 377 .cancel = tpm_tis_ready, 378 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 379 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 380 .req_canceled = TPM_STS_COMMAND_READY, 381 .attr_group = &tis_attr_grp, 382 .miscdev = { 383 .fops = &tis_ops,}, 384 }; 385 386 static irqreturn_t tis_int_probe(int irq, void *dev_id) 387 { 388 struct tpm_chip *chip = dev_id; 389 u32 interrupt; 390 391 interrupt = ioread32(chip->vendor.iobase + 392 TPM_INT_STATUS(chip->vendor.locality)); 393 394 if (interrupt == 0) 395 return IRQ_NONE; 396 397 chip->vendor.irq = irq; 398 399 /* Clear interrupts handled with TPM_EOI */ 400 iowrite32(interrupt, 401 chip->vendor.iobase + 402 TPM_INT_STATUS(chip->vendor.locality)); 403 return IRQ_HANDLED; 404 } 405 406 static irqreturn_t tis_int_handler(int dummy, void *dev_id) 407 { 408 struct tpm_chip *chip = dev_id; 409 u32 interrupt; 410 int i; 411 412 interrupt = ioread32(chip->vendor.iobase + 413 TPM_INT_STATUS(chip->vendor.locality)); 414 415 if (interrupt == 0) 416 return IRQ_NONE; 417 418 if (interrupt & TPM_INTF_DATA_AVAIL_INT) 419 wake_up_interruptible(&chip->vendor.read_queue); 420 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) 421 for (i = 0; i < 5; i++) 422 if (check_locality(chip, i) >= 0) 423 break; 424 if (interrupt & 425 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | 426 TPM_INTF_CMD_READY_INT)) 427 wake_up_interruptible(&chip->vendor.int_queue); 428 429 /* Clear interrupts handled with TPM_EOI */ 430 iowrite32(interrupt, 431 chip->vendor.iobase + 432 TPM_INT_STATUS(chip->vendor.locality)); 433 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); 434 return IRQ_HANDLED; 435 } 436 437 static int interrupts = 1; 438 module_param(interrupts, bool, 0444); 439 MODULE_PARM_DESC(interrupts, "Enable interrupts"); 440 441 static int tpm_tis_init(struct device *dev, resource_size_t start, 442 resource_size_t len, unsigned int irq) 443 { 444 u32 vendor, intfcaps, intmask; 445 int rc, i; 446 struct tpm_chip *chip; 447 448 if (!(chip = tpm_register_hardware(dev, &tpm_tis))) 449 return -ENODEV; 450 451 chip->vendor.iobase = ioremap(start, len); 452 if (!chip->vendor.iobase) { 453 rc = -EIO; 454 goto out_err; 455 } 456 457 /* Default timeouts */ 458 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 459 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); 460 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 461 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 462 463 if (request_locality(chip, 0) != 0) { 464 rc = -ENODEV; 465 goto out_err; 466 } 467 468 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 469 470 dev_info(dev, 471 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 472 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 473 474 if (itpm) 475 dev_info(dev, "Intel iTPM workaround enabled\n"); 476 477 478 /* Figure out the capabilities */ 479 intfcaps = 480 ioread32(chip->vendor.iobase + 481 TPM_INTF_CAPS(chip->vendor.locality)); 482 dev_dbg(dev, "TPM interface capabilities (0x%x):\n", 483 intfcaps); 484 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) 485 dev_dbg(dev, "\tBurst Count Static\n"); 486 if (intfcaps & TPM_INTF_CMD_READY_INT) 487 dev_dbg(dev, "\tCommand Ready Int Support\n"); 488 if (intfcaps & TPM_INTF_INT_EDGE_FALLING) 489 dev_dbg(dev, "\tInterrupt Edge Falling\n"); 490 if (intfcaps & TPM_INTF_INT_EDGE_RISING) 491 dev_dbg(dev, "\tInterrupt Edge Rising\n"); 492 if (intfcaps & TPM_INTF_INT_LEVEL_LOW) 493 dev_dbg(dev, "\tInterrupt Level Low\n"); 494 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) 495 dev_dbg(dev, "\tInterrupt Level High\n"); 496 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) 497 dev_dbg(dev, "\tLocality Change Int Support\n"); 498 if (intfcaps & TPM_INTF_STS_VALID_INT) 499 dev_dbg(dev, "\tSts Valid Int Support\n"); 500 if (intfcaps & TPM_INTF_DATA_AVAIL_INT) 501 dev_dbg(dev, "\tData Avail Int Support\n"); 502 503 /* INTERRUPT Setup */ 504 init_waitqueue_head(&chip->vendor.read_queue); 505 init_waitqueue_head(&chip->vendor.int_queue); 506 507 intmask = 508 ioread32(chip->vendor.iobase + 509 TPM_INT_ENABLE(chip->vendor.locality)); 510 511 intmask |= TPM_INTF_CMD_READY_INT 512 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT 513 | TPM_INTF_STS_VALID_INT; 514 515 iowrite32(intmask, 516 chip->vendor.iobase + 517 TPM_INT_ENABLE(chip->vendor.locality)); 518 if (interrupts) 519 chip->vendor.irq = irq; 520 if (interrupts && !chip->vendor.irq) { 521 chip->vendor.irq = 522 ioread8(chip->vendor.iobase + 523 TPM_INT_VECTOR(chip->vendor.locality)); 524 525 for (i = 3; i < 16 && chip->vendor.irq == 0; i++) { 526 iowrite8(i, chip->vendor.iobase + 527 TPM_INT_VECTOR(chip->vendor.locality)); 528 if (request_irq 529 (i, tis_int_probe, IRQF_SHARED, 530 chip->vendor.miscdev.name, chip) != 0) { 531 dev_info(chip->dev, 532 "Unable to request irq: %d for probe\n", 533 i); 534 continue; 535 } 536 537 /* Clear all existing */ 538 iowrite32(ioread32 539 (chip->vendor.iobase + 540 TPM_INT_STATUS(chip->vendor.locality)), 541 chip->vendor.iobase + 542 TPM_INT_STATUS(chip->vendor.locality)); 543 544 /* Turn on */ 545 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, 546 chip->vendor.iobase + 547 TPM_INT_ENABLE(chip->vendor.locality)); 548 549 /* Generate Interrupts */ 550 tpm_gen_interrupt(chip); 551 552 /* Turn off */ 553 iowrite32(intmask, 554 chip->vendor.iobase + 555 TPM_INT_ENABLE(chip->vendor.locality)); 556 free_irq(i, chip); 557 } 558 } 559 if (chip->vendor.irq) { 560 iowrite8(chip->vendor.irq, 561 chip->vendor.iobase + 562 TPM_INT_VECTOR(chip->vendor.locality)); 563 if (request_irq 564 (chip->vendor.irq, tis_int_handler, IRQF_SHARED, 565 chip->vendor.miscdev.name, chip) != 0) { 566 dev_info(chip->dev, 567 "Unable to request irq: %d for use\n", 568 chip->vendor.irq); 569 chip->vendor.irq = 0; 570 } else { 571 /* Clear all existing */ 572 iowrite32(ioread32 573 (chip->vendor.iobase + 574 TPM_INT_STATUS(chip->vendor.locality)), 575 chip->vendor.iobase + 576 TPM_INT_STATUS(chip->vendor.locality)); 577 578 /* Turn on */ 579 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, 580 chip->vendor.iobase + 581 TPM_INT_ENABLE(chip->vendor.locality)); 582 } 583 } 584 585 INIT_LIST_HEAD(&chip->vendor.list); 586 spin_lock(&tis_lock); 587 list_add(&chip->vendor.list, &tis_chips); 588 spin_unlock(&tis_lock); 589 590 tpm_get_timeouts(chip); 591 tpm_continue_selftest(chip); 592 593 return 0; 594 out_err: 595 if (chip->vendor.iobase) 596 iounmap(chip->vendor.iobase); 597 tpm_remove_hardware(chip->dev); 598 return rc; 599 } 600 601 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 602 const struct pnp_device_id *pnp_id) 603 { 604 resource_size_t start, len; 605 unsigned int irq = 0; 606 607 start = pnp_mem_start(pnp_dev, 0); 608 len = pnp_mem_len(pnp_dev, 0); 609 610 if (pnp_irq_valid(pnp_dev, 0)) 611 irq = pnp_irq(pnp_dev, 0); 612 else 613 interrupts = 0; 614 615 return tpm_tis_init(&pnp_dev->dev, start, len, irq); 616 } 617 618 static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) 619 { 620 return tpm_pm_suspend(&dev->dev, msg); 621 } 622 623 static int tpm_tis_pnp_resume(struct pnp_dev *dev) 624 { 625 return tpm_pm_resume(&dev->dev); 626 } 627 628 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { 629 {"PNP0C31", 0}, /* TPM */ 630 {"ATM1200", 0}, /* Atmel */ 631 {"IFX0102", 0}, /* Infineon */ 632 {"BCM0101", 0}, /* Broadcom */ 633 {"BCM0102", 0}, /* Broadcom */ 634 {"NSC1200", 0}, /* National */ 635 {"ICO0102", 0}, /* Intel */ 636 /* Add new here */ 637 {"", 0}, /* User Specified */ 638 {"", 0} /* Terminator */ 639 }; 640 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 641 642 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev) 643 { 644 struct tpm_chip *chip = pnp_get_drvdata(dev); 645 646 tpm_dev_vendor_release(chip); 647 648 kfree(chip); 649 } 650 651 652 static struct pnp_driver tis_pnp_driver = { 653 .name = "tpm_tis", 654 .id_table = tpm_pnp_tbl, 655 .probe = tpm_tis_pnp_init, 656 .suspend = tpm_tis_pnp_suspend, 657 .resume = tpm_tis_pnp_resume, 658 .remove = tpm_tis_pnp_remove, 659 }; 660 661 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2 662 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, 663 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 664 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 665 666 static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg) 667 { 668 return tpm_pm_suspend(&dev->dev, msg); 669 } 670 671 static int tpm_tis_resume(struct platform_device *dev) 672 { 673 return tpm_pm_resume(&dev->dev); 674 } 675 static struct platform_driver tis_drv = { 676 .driver = { 677 .name = "tpm_tis", 678 .owner = THIS_MODULE, 679 }, 680 .suspend = tpm_tis_suspend, 681 .resume = tpm_tis_resume, 682 }; 683 684 static struct platform_device *pdev; 685 686 static int force; 687 module_param(force, bool, 0444); 688 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); 689 static int __init init_tis(void) 690 { 691 int rc; 692 693 if (force) { 694 rc = platform_driver_register(&tis_drv); 695 if (rc < 0) 696 return rc; 697 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0))) 698 return PTR_ERR(pdev); 699 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) { 700 platform_device_unregister(pdev); 701 platform_driver_unregister(&tis_drv); 702 } 703 return rc; 704 } 705 706 return pnp_register_driver(&tis_pnp_driver); 707 } 708 709 static void __exit cleanup_tis(void) 710 { 711 struct tpm_vendor_specific *i, *j; 712 struct tpm_chip *chip; 713 spin_lock(&tis_lock); 714 list_for_each_entry_safe(i, j, &tis_chips, list) { 715 chip = to_tpm_chip(i); 716 tpm_remove_hardware(chip->dev); 717 iowrite32(~TPM_GLOBAL_INT_ENABLE & 718 ioread32(chip->vendor.iobase + 719 TPM_INT_ENABLE(chip->vendor. 720 locality)), 721 chip->vendor.iobase + 722 TPM_INT_ENABLE(chip->vendor.locality)); 723 release_locality(chip, chip->vendor.locality, 1); 724 if (chip->vendor.irq) 725 free_irq(chip->vendor.irq, chip); 726 iounmap(i->iobase); 727 list_del(&i->list); 728 } 729 spin_unlock(&tis_lock); 730 731 if (force) { 732 platform_device_unregister(pdev); 733 platform_driver_unregister(&tis_drv); 734 } else 735 pnp_unregister_driver(&tis_pnp_driver); 736 } 737 738 module_init(init_tis); 739 module_exit(cleanup_tis); 740 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); 741 MODULE_DESCRIPTION("TPM Driver"); 742 MODULE_VERSION("2.0"); 743 MODULE_LICENSE("GPL"); 744