1 /* 2 * Copyright (C) 2005, 2006 IBM Corporation 3 * 4 * Authors: 5 * Leendert van Doorn <leendert@watson.ibm.com> 6 * Kylene Hall <kjhall@us.ibm.com> 7 * 8 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 9 * 10 * Device driver for TCG/TCPA TPM (trusted platform module). 11 * Specifications at www.trustedcomputinggroup.org 12 * 13 * This device driver implements the TPM interface as defined in 14 * the TCG TPM Interface Spec version 1.2, revision 1.0. 15 * 16 * This program is free software; you can redistribute it and/or 17 * modify it under the terms of the GNU General Public License as 18 * published by the Free Software Foundation, version 2 of the 19 * License. 20 */ 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/pnp.h> 25 #include <linux/slab.h> 26 #include <linux/interrupt.h> 27 #include <linux/wait.h> 28 #include <linux/acpi.h> 29 #include <linux/freezer.h> 30 #include "tpm.h" 31 32 enum tis_access { 33 TPM_ACCESS_VALID = 0x80, 34 TPM_ACCESS_ACTIVE_LOCALITY = 0x20, 35 TPM_ACCESS_REQUEST_PENDING = 0x04, 36 TPM_ACCESS_REQUEST_USE = 0x02, 37 }; 38 39 enum tis_status { 40 TPM_STS_VALID = 0x80, 41 TPM_STS_COMMAND_READY = 0x40, 42 TPM_STS_GO = 0x20, 43 TPM_STS_DATA_AVAIL = 0x10, 44 TPM_STS_DATA_EXPECT = 0x08, 45 }; 46 47 enum tis_int_flags { 48 TPM_GLOBAL_INT_ENABLE = 0x80000000, 49 TPM_INTF_BURST_COUNT_STATIC = 0x100, 50 TPM_INTF_CMD_READY_INT = 0x080, 51 TPM_INTF_INT_EDGE_FALLING = 0x040, 52 TPM_INTF_INT_EDGE_RISING = 0x020, 53 TPM_INTF_INT_LEVEL_LOW = 0x010, 54 TPM_INTF_INT_LEVEL_HIGH = 0x008, 55 TPM_INTF_LOCALITY_CHANGE_INT = 0x004, 56 TPM_INTF_STS_VALID_INT = 0x002, 57 TPM_INTF_DATA_AVAIL_INT = 0x001, 58 }; 59 60 enum tis_defaults { 61 TIS_MEM_BASE = 0xFED40000, 62 TIS_MEM_LEN = 0x5000, 63 TIS_SHORT_TIMEOUT = 750, /* ms */ 64 TIS_LONG_TIMEOUT = 2000, /* 2 sec */ 65 }; 66 67 #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) 68 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) 69 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) 70 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) 71 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) 72 #define TPM_STS(l) (0x0018 | ((l) << 12)) 73 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) 74 75 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) 76 #define TPM_RID(l) (0x0F04 | ((l) << 12)) 77 78 static LIST_HEAD(tis_chips); 79 static DEFINE_MUTEX(tis_lock); 80 81 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) 82 static int is_itpm(struct pnp_dev *dev) 83 { 84 struct acpi_device *acpi = pnp_acpi_device(dev); 85 struct acpi_hardware_id *id; 86 87 if (!acpi) 88 return 0; 89 90 list_for_each_entry(id, &acpi->pnp.ids, list) { 91 if (!strcmp("INTC0102", id->id)) 92 return 1; 93 } 94 95 return 0; 96 } 97 #else 98 static inline int is_itpm(struct pnp_dev *dev) 99 { 100 return 0; 101 } 102 #endif 103 104 /* Before we attempt to access the TPM we must see that the valid bit is set. 105 * The specification says that this bit is 0 at reset and remains 0 until the 106 * 'TPM has gone through its self test and initialization and has established 107 * correct values in the other bits.' */ 108 static int wait_startup(struct tpm_chip *chip, int l) 109 { 110 unsigned long stop = jiffies + chip->vendor.timeout_a; 111 do { 112 if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 113 TPM_ACCESS_VALID) 114 return 0; 115 msleep(TPM_TIMEOUT); 116 } while (time_before(jiffies, stop)); 117 return -1; 118 } 119 120 static int check_locality(struct tpm_chip *chip, int l) 121 { 122 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 123 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == 124 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) 125 return chip->vendor.locality = l; 126 127 return -1; 128 } 129 130 static void release_locality(struct tpm_chip *chip, int l, int force) 131 { 132 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 133 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == 134 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) 135 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, 136 chip->vendor.iobase + TPM_ACCESS(l)); 137 } 138 139 static int request_locality(struct tpm_chip *chip, int l) 140 { 141 unsigned long stop, timeout; 142 long rc; 143 144 if (check_locality(chip, l) >= 0) 145 return l; 146 147 iowrite8(TPM_ACCESS_REQUEST_USE, 148 chip->vendor.iobase + TPM_ACCESS(l)); 149 150 stop = jiffies + chip->vendor.timeout_a; 151 152 if (chip->vendor.irq) { 153 again: 154 timeout = stop - jiffies; 155 if ((long)timeout <= 0) 156 return -1; 157 rc = wait_event_interruptible_timeout(chip->vendor.int_queue, 158 (check_locality 159 (chip, l) >= 0), 160 timeout); 161 if (rc > 0) 162 return l; 163 if (rc == -ERESTARTSYS && freezing(current)) { 164 clear_thread_flag(TIF_SIGPENDING); 165 goto again; 166 } 167 } else { 168 /* wait for burstcount */ 169 do { 170 if (check_locality(chip, l) >= 0) 171 return l; 172 msleep(TPM_TIMEOUT); 173 } 174 while (time_before(jiffies, stop)); 175 } 176 return -1; 177 } 178 179 static u8 tpm_tis_status(struct tpm_chip *chip) 180 { 181 return ioread8(chip->vendor.iobase + 182 TPM_STS(chip->vendor.locality)); 183 } 184 185 static void tpm_tis_ready(struct tpm_chip *chip) 186 { 187 /* this causes the current command to be aborted */ 188 iowrite8(TPM_STS_COMMAND_READY, 189 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 190 } 191 192 static int get_burstcount(struct tpm_chip *chip) 193 { 194 unsigned long stop; 195 int burstcnt; 196 197 /* wait for burstcount */ 198 /* which timeout value, spec has 2 answers (c & d) */ 199 stop = jiffies + chip->vendor.timeout_d; 200 do { 201 burstcnt = ioread8(chip->vendor.iobase + 202 TPM_STS(chip->vendor.locality) + 1); 203 burstcnt += ioread8(chip->vendor.iobase + 204 TPM_STS(chip->vendor.locality) + 205 2) << 8; 206 if (burstcnt) 207 return burstcnt; 208 msleep(TPM_TIMEOUT); 209 } while (time_before(jiffies, stop)); 210 return -EBUSY; 211 } 212 213 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) 214 { 215 int size = 0, burstcnt; 216 while (size < count && 217 wait_for_tpm_stat(chip, 218 TPM_STS_DATA_AVAIL | TPM_STS_VALID, 219 chip->vendor.timeout_c, 220 &chip->vendor.read_queue, true) 221 == 0) { 222 burstcnt = get_burstcount(chip); 223 for (; burstcnt > 0 && size < count; burstcnt--) 224 buf[size++] = ioread8(chip->vendor.iobase + 225 TPM_DATA_FIFO(chip->vendor. 226 locality)); 227 } 228 return size; 229 } 230 231 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) 232 { 233 int size = 0; 234 int expected, status; 235 236 if (count < TPM_HEADER_SIZE) { 237 size = -EIO; 238 goto out; 239 } 240 241 /* read first 10 bytes, including tag, paramsize, and result */ 242 if ((size = 243 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { 244 dev_err(chip->dev, "Unable to read header\n"); 245 goto out; 246 } 247 248 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 249 if (expected > count) { 250 size = -EIO; 251 goto out; 252 } 253 254 if ((size += 255 recv_data(chip, &buf[TPM_HEADER_SIZE], 256 expected - TPM_HEADER_SIZE)) < expected) { 257 dev_err(chip->dev, "Unable to read remainder of result\n"); 258 size = -ETIME; 259 goto out; 260 } 261 262 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 263 &chip->vendor.int_queue, false); 264 status = tpm_tis_status(chip); 265 if (status & TPM_STS_DATA_AVAIL) { /* retry? */ 266 dev_err(chip->dev, "Error left over data\n"); 267 size = -EIO; 268 goto out; 269 } 270 271 out: 272 tpm_tis_ready(chip); 273 release_locality(chip, chip->vendor.locality, 0); 274 return size; 275 } 276 277 static bool itpm; 278 module_param(itpm, bool, 0444); 279 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); 280 281 /* 282 * If interrupts are used (signaled by an irq set in the vendor structure) 283 * tpm.c can skip polling for the data to be available as the interrupt is 284 * waited for here 285 */ 286 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) 287 { 288 int rc, status, burstcnt; 289 size_t count = 0; 290 291 if (request_locality(chip, 0) < 0) 292 return -EBUSY; 293 294 status = tpm_tis_status(chip); 295 if ((status & TPM_STS_COMMAND_READY) == 0) { 296 tpm_tis_ready(chip); 297 if (wait_for_tpm_stat 298 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, 299 &chip->vendor.int_queue, false) < 0) { 300 rc = -ETIME; 301 goto out_err; 302 } 303 } 304 305 while (count < len - 1) { 306 burstcnt = get_burstcount(chip); 307 for (; burstcnt > 0 && count < len - 1; burstcnt--) { 308 iowrite8(buf[count], chip->vendor.iobase + 309 TPM_DATA_FIFO(chip->vendor.locality)); 310 count++; 311 } 312 313 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 314 &chip->vendor.int_queue, false); 315 status = tpm_tis_status(chip); 316 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { 317 rc = -EIO; 318 goto out_err; 319 } 320 } 321 322 /* write last byte */ 323 iowrite8(buf[count], 324 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); 325 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 326 &chip->vendor.int_queue, false); 327 status = tpm_tis_status(chip); 328 if ((status & TPM_STS_DATA_EXPECT) != 0) { 329 rc = -EIO; 330 goto out_err; 331 } 332 333 return 0; 334 335 out_err: 336 tpm_tis_ready(chip); 337 release_locality(chip, chip->vendor.locality, 0); 338 return rc; 339 } 340 341 /* 342 * If interrupts are used (signaled by an irq set in the vendor structure) 343 * tpm.c can skip polling for the data to be available as the interrupt is 344 * waited for here 345 */ 346 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) 347 { 348 int rc; 349 u32 ordinal; 350 351 rc = tpm_tis_send_data(chip, buf, len); 352 if (rc < 0) 353 return rc; 354 355 /* go and do it */ 356 iowrite8(TPM_STS_GO, 357 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 358 359 if (chip->vendor.irq) { 360 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 361 if (wait_for_tpm_stat 362 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, 363 tpm_calc_ordinal_duration(chip, ordinal), 364 &chip->vendor.read_queue, false) < 0) { 365 rc = -ETIME; 366 goto out_err; 367 } 368 } 369 return len; 370 out_err: 371 tpm_tis_ready(chip); 372 release_locality(chip, chip->vendor.locality, 0); 373 return rc; 374 } 375 376 struct tis_vendor_timeout_override { 377 u32 did_vid; 378 unsigned long timeout_us[4]; 379 }; 380 381 static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { 382 /* Atmel 3204 */ 383 { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), 384 (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, 385 }; 386 387 static bool tpm_tis_update_timeouts(struct tpm_chip *chip, 388 unsigned long *timeout_cap) 389 { 390 int i; 391 u32 did_vid; 392 393 did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 394 395 for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { 396 if (vendor_timeout_overrides[i].did_vid != did_vid) 397 continue; 398 memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, 399 sizeof(vendor_timeout_overrides[i].timeout_us)); 400 return true; 401 } 402 403 return false; 404 } 405 406 /* 407 * Early probing for iTPM with STS_DATA_EXPECT flaw. 408 * Try sending command without itpm flag set and if that 409 * fails, repeat with itpm flag set. 410 */ 411 static int probe_itpm(struct tpm_chip *chip) 412 { 413 int rc = 0; 414 u8 cmd_getticks[] = { 415 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, 416 0x00, 0x00, 0x00, 0xf1 417 }; 418 size_t len = sizeof(cmd_getticks); 419 bool rem_itpm = itpm; 420 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0)); 421 422 /* probe only iTPMS */ 423 if (vendor != TPM_VID_INTEL) 424 return 0; 425 426 itpm = false; 427 428 rc = tpm_tis_send_data(chip, cmd_getticks, len); 429 if (rc == 0) 430 goto out; 431 432 tpm_tis_ready(chip); 433 release_locality(chip, chip->vendor.locality, 0); 434 435 itpm = true; 436 437 rc = tpm_tis_send_data(chip, cmd_getticks, len); 438 if (rc == 0) { 439 dev_info(chip->dev, "Detected an iTPM.\n"); 440 rc = 1; 441 } else 442 rc = -EFAULT; 443 444 out: 445 itpm = rem_itpm; 446 tpm_tis_ready(chip); 447 release_locality(chip, chip->vendor.locality, 0); 448 449 return rc; 450 } 451 452 static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) 453 { 454 switch (chip->vendor.manufacturer_id) { 455 case TPM_VID_WINBOND: 456 return ((status == TPM_STS_VALID) || 457 (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); 458 case TPM_VID_STM: 459 return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); 460 default: 461 return (status == TPM_STS_COMMAND_READY); 462 } 463 } 464 465 static const struct tpm_class_ops tpm_tis = { 466 .status = tpm_tis_status, 467 .recv = tpm_tis_recv, 468 .send = tpm_tis_send, 469 .cancel = tpm_tis_ready, 470 .update_timeouts = tpm_tis_update_timeouts, 471 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 472 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 473 .req_canceled = tpm_tis_req_canceled, 474 }; 475 476 static irqreturn_t tis_int_probe(int irq, void *dev_id) 477 { 478 struct tpm_chip *chip = dev_id; 479 u32 interrupt; 480 481 interrupt = ioread32(chip->vendor.iobase + 482 TPM_INT_STATUS(chip->vendor.locality)); 483 484 if (interrupt == 0) 485 return IRQ_NONE; 486 487 chip->vendor.probed_irq = irq; 488 489 /* Clear interrupts handled with TPM_EOI */ 490 iowrite32(interrupt, 491 chip->vendor.iobase + 492 TPM_INT_STATUS(chip->vendor.locality)); 493 return IRQ_HANDLED; 494 } 495 496 static irqreturn_t tis_int_handler(int dummy, void *dev_id) 497 { 498 struct tpm_chip *chip = dev_id; 499 u32 interrupt; 500 int i; 501 502 interrupt = ioread32(chip->vendor.iobase + 503 TPM_INT_STATUS(chip->vendor.locality)); 504 505 if (interrupt == 0) 506 return IRQ_NONE; 507 508 if (interrupt & TPM_INTF_DATA_AVAIL_INT) 509 wake_up_interruptible(&chip->vendor.read_queue); 510 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) 511 for (i = 0; i < 5; i++) 512 if (check_locality(chip, i) >= 0) 513 break; 514 if (interrupt & 515 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | 516 TPM_INTF_CMD_READY_INT)) 517 wake_up_interruptible(&chip->vendor.int_queue); 518 519 /* Clear interrupts handled with TPM_EOI */ 520 iowrite32(interrupt, 521 chip->vendor.iobase + 522 TPM_INT_STATUS(chip->vendor.locality)); 523 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); 524 return IRQ_HANDLED; 525 } 526 527 static bool interrupts = true; 528 module_param(interrupts, bool, 0444); 529 MODULE_PARM_DESC(interrupts, "Enable interrupts"); 530 531 static int tpm_tis_init(struct device *dev, resource_size_t start, 532 resource_size_t len, unsigned int irq) 533 { 534 u32 vendor, intfcaps, intmask; 535 int rc, i, irq_s, irq_e, probe; 536 struct tpm_chip *chip; 537 538 if (!(chip = tpm_register_hardware(dev, &tpm_tis))) 539 return -ENODEV; 540 541 chip->vendor.iobase = ioremap(start, len); 542 if (!chip->vendor.iobase) { 543 rc = -EIO; 544 goto out_err; 545 } 546 547 /* Default timeouts */ 548 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 549 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); 550 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 551 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); 552 553 if (wait_startup(chip, 0) != 0) { 554 rc = -ENODEV; 555 goto out_err; 556 } 557 558 if (request_locality(chip, 0) != 0) { 559 rc = -ENODEV; 560 goto out_err; 561 } 562 563 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 564 chip->vendor.manufacturer_id = vendor; 565 566 dev_info(dev, 567 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 568 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 569 570 if (!itpm) { 571 probe = probe_itpm(chip); 572 if (probe < 0) { 573 rc = -ENODEV; 574 goto out_err; 575 } 576 itpm = !!probe; 577 } 578 579 if (itpm) 580 dev_info(dev, "Intel iTPM workaround enabled\n"); 581 582 583 /* Figure out the capabilities */ 584 intfcaps = 585 ioread32(chip->vendor.iobase + 586 TPM_INTF_CAPS(chip->vendor.locality)); 587 dev_dbg(dev, "TPM interface capabilities (0x%x):\n", 588 intfcaps); 589 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) 590 dev_dbg(dev, "\tBurst Count Static\n"); 591 if (intfcaps & TPM_INTF_CMD_READY_INT) 592 dev_dbg(dev, "\tCommand Ready Int Support\n"); 593 if (intfcaps & TPM_INTF_INT_EDGE_FALLING) 594 dev_dbg(dev, "\tInterrupt Edge Falling\n"); 595 if (intfcaps & TPM_INTF_INT_EDGE_RISING) 596 dev_dbg(dev, "\tInterrupt Edge Rising\n"); 597 if (intfcaps & TPM_INTF_INT_LEVEL_LOW) 598 dev_dbg(dev, "\tInterrupt Level Low\n"); 599 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) 600 dev_dbg(dev, "\tInterrupt Level High\n"); 601 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) 602 dev_dbg(dev, "\tLocality Change Int Support\n"); 603 if (intfcaps & TPM_INTF_STS_VALID_INT) 604 dev_dbg(dev, "\tSts Valid Int Support\n"); 605 if (intfcaps & TPM_INTF_DATA_AVAIL_INT) 606 dev_dbg(dev, "\tData Avail Int Support\n"); 607 608 /* get the timeouts before testing for irqs */ 609 if (tpm_get_timeouts(chip)) { 610 dev_err(dev, "Could not get TPM timeouts and durations\n"); 611 rc = -ENODEV; 612 goto out_err; 613 } 614 615 if (tpm_do_selftest(chip)) { 616 dev_err(dev, "TPM self test failed\n"); 617 rc = -ENODEV; 618 goto out_err; 619 } 620 621 /* INTERRUPT Setup */ 622 init_waitqueue_head(&chip->vendor.read_queue); 623 init_waitqueue_head(&chip->vendor.int_queue); 624 625 intmask = 626 ioread32(chip->vendor.iobase + 627 TPM_INT_ENABLE(chip->vendor.locality)); 628 629 intmask |= TPM_INTF_CMD_READY_INT 630 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT 631 | TPM_INTF_STS_VALID_INT; 632 633 iowrite32(intmask, 634 chip->vendor.iobase + 635 TPM_INT_ENABLE(chip->vendor.locality)); 636 if (interrupts) 637 chip->vendor.irq = irq; 638 if (interrupts && !chip->vendor.irq) { 639 irq_s = 640 ioread8(chip->vendor.iobase + 641 TPM_INT_VECTOR(chip->vendor.locality)); 642 if (irq_s) { 643 irq_e = irq_s; 644 } else { 645 irq_s = 3; 646 irq_e = 15; 647 } 648 649 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) { 650 iowrite8(i, chip->vendor.iobase + 651 TPM_INT_VECTOR(chip->vendor.locality)); 652 if (request_irq 653 (i, tis_int_probe, IRQF_SHARED, 654 chip->vendor.miscdev.name, chip) != 0) { 655 dev_info(chip->dev, 656 "Unable to request irq: %d for probe\n", 657 i); 658 continue; 659 } 660 661 /* Clear all existing */ 662 iowrite32(ioread32 663 (chip->vendor.iobase + 664 TPM_INT_STATUS(chip->vendor.locality)), 665 chip->vendor.iobase + 666 TPM_INT_STATUS(chip->vendor.locality)); 667 668 /* Turn on */ 669 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, 670 chip->vendor.iobase + 671 TPM_INT_ENABLE(chip->vendor.locality)); 672 673 chip->vendor.probed_irq = 0; 674 675 /* Generate Interrupts */ 676 tpm_gen_interrupt(chip); 677 678 chip->vendor.irq = chip->vendor.probed_irq; 679 680 /* free_irq will call into tis_int_probe; 681 clear all irqs we haven't seen while doing 682 tpm_gen_interrupt */ 683 iowrite32(ioread32 684 (chip->vendor.iobase + 685 TPM_INT_STATUS(chip->vendor.locality)), 686 chip->vendor.iobase + 687 TPM_INT_STATUS(chip->vendor.locality)); 688 689 /* Turn off */ 690 iowrite32(intmask, 691 chip->vendor.iobase + 692 TPM_INT_ENABLE(chip->vendor.locality)); 693 free_irq(i, chip); 694 } 695 } 696 if (chip->vendor.irq) { 697 iowrite8(chip->vendor.irq, 698 chip->vendor.iobase + 699 TPM_INT_VECTOR(chip->vendor.locality)); 700 if (request_irq 701 (chip->vendor.irq, tis_int_handler, IRQF_SHARED, 702 chip->vendor.miscdev.name, chip) != 0) { 703 dev_info(chip->dev, 704 "Unable to request irq: %d for use\n", 705 chip->vendor.irq); 706 chip->vendor.irq = 0; 707 } else { 708 /* Clear all existing */ 709 iowrite32(ioread32 710 (chip->vendor.iobase + 711 TPM_INT_STATUS(chip->vendor.locality)), 712 chip->vendor.iobase + 713 TPM_INT_STATUS(chip->vendor.locality)); 714 715 /* Turn on */ 716 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, 717 chip->vendor.iobase + 718 TPM_INT_ENABLE(chip->vendor.locality)); 719 } 720 } 721 722 INIT_LIST_HEAD(&chip->vendor.list); 723 mutex_lock(&tis_lock); 724 list_add(&chip->vendor.list, &tis_chips); 725 mutex_unlock(&tis_lock); 726 727 728 return 0; 729 out_err: 730 if (chip->vendor.iobase) 731 iounmap(chip->vendor.iobase); 732 tpm_remove_hardware(chip->dev); 733 return rc; 734 } 735 736 #ifdef CONFIG_PM_SLEEP 737 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) 738 { 739 u32 intmask; 740 741 /* reenable interrupts that device may have lost or 742 BIOS/firmware may have disabled */ 743 iowrite8(chip->vendor.irq, chip->vendor.iobase + 744 TPM_INT_VECTOR(chip->vendor.locality)); 745 746 intmask = 747 ioread32(chip->vendor.iobase + 748 TPM_INT_ENABLE(chip->vendor.locality)); 749 750 intmask |= TPM_INTF_CMD_READY_INT 751 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT 752 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; 753 754 iowrite32(intmask, 755 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); 756 } 757 758 static int tpm_tis_resume(struct device *dev) 759 { 760 struct tpm_chip *chip = dev_get_drvdata(dev); 761 int ret; 762 763 if (chip->vendor.irq) 764 tpm_tis_reenable_interrupts(chip); 765 766 ret = tpm_pm_resume(dev); 767 if (!ret) 768 tpm_do_selftest(chip); 769 770 return ret; 771 } 772 #endif 773 774 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); 775 776 #ifdef CONFIG_PNP 777 static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 778 const struct pnp_device_id *pnp_id) 779 { 780 resource_size_t start, len; 781 unsigned int irq = 0; 782 783 start = pnp_mem_start(pnp_dev, 0); 784 len = pnp_mem_len(pnp_dev, 0); 785 786 if (pnp_irq_valid(pnp_dev, 0)) 787 irq = pnp_irq(pnp_dev, 0); 788 else 789 interrupts = false; 790 791 if (is_itpm(pnp_dev)) 792 itpm = true; 793 794 return tpm_tis_init(&pnp_dev->dev, start, len, irq); 795 } 796 797 static struct pnp_device_id tpm_pnp_tbl[] = { 798 {"PNP0C31", 0}, /* TPM */ 799 {"ATM1200", 0}, /* Atmel */ 800 {"IFX0102", 0}, /* Infineon */ 801 {"BCM0101", 0}, /* Broadcom */ 802 {"BCM0102", 0}, /* Broadcom */ 803 {"NSC1200", 0}, /* National */ 804 {"ICO0102", 0}, /* Intel */ 805 /* Add new here */ 806 {"", 0}, /* User Specified */ 807 {"", 0} /* Terminator */ 808 }; 809 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 810 811 static void tpm_tis_pnp_remove(struct pnp_dev *dev) 812 { 813 struct tpm_chip *chip = pnp_get_drvdata(dev); 814 815 tpm_dev_vendor_release(chip); 816 817 kfree(chip); 818 } 819 820 821 static struct pnp_driver tis_pnp_driver = { 822 .name = "tpm_tis", 823 .id_table = tpm_pnp_tbl, 824 .probe = tpm_tis_pnp_init, 825 .remove = tpm_tis_pnp_remove, 826 .driver = { 827 .pm = &tpm_tis_pm, 828 }, 829 }; 830 831 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2 832 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, 833 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 834 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 835 #endif 836 837 static struct platform_driver tis_drv = { 838 .driver = { 839 .name = "tpm_tis", 840 .owner = THIS_MODULE, 841 .pm = &tpm_tis_pm, 842 }, 843 }; 844 845 static struct platform_device *pdev; 846 847 static bool force; 848 module_param(force, bool, 0444); 849 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); 850 static int __init init_tis(void) 851 { 852 int rc; 853 #ifdef CONFIG_PNP 854 if (!force) 855 return pnp_register_driver(&tis_pnp_driver); 856 #endif 857 858 rc = platform_driver_register(&tis_drv); 859 if (rc < 0) 860 return rc; 861 pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0); 862 if (IS_ERR(pdev)) { 863 rc = PTR_ERR(pdev); 864 goto err_dev; 865 } 866 rc = tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0); 867 if (rc) 868 goto err_init; 869 return 0; 870 err_init: 871 platform_device_unregister(pdev); 872 err_dev: 873 platform_driver_unregister(&tis_drv); 874 return rc; 875 } 876 877 static void __exit cleanup_tis(void) 878 { 879 struct tpm_vendor_specific *i, *j; 880 struct tpm_chip *chip; 881 mutex_lock(&tis_lock); 882 list_for_each_entry_safe(i, j, &tis_chips, list) { 883 chip = to_tpm_chip(i); 884 tpm_remove_hardware(chip->dev); 885 iowrite32(~TPM_GLOBAL_INT_ENABLE & 886 ioread32(chip->vendor.iobase + 887 TPM_INT_ENABLE(chip->vendor. 888 locality)), 889 chip->vendor.iobase + 890 TPM_INT_ENABLE(chip->vendor.locality)); 891 release_locality(chip, chip->vendor.locality, 1); 892 if (chip->vendor.irq) 893 free_irq(chip->vendor.irq, chip); 894 iounmap(i->iobase); 895 list_del(&i->list); 896 } 897 mutex_unlock(&tis_lock); 898 #ifdef CONFIG_PNP 899 if (!force) { 900 pnp_unregister_driver(&tis_pnp_driver); 901 return; 902 } 903 #endif 904 platform_device_unregister(pdev); 905 platform_driver_unregister(&tis_drv); 906 } 907 908 module_init(init_tis); 909 module_exit(cleanup_tis); 910 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); 911 MODULE_DESCRIPTION("TPM Driver"); 912 MODULE_VERSION("2.0"); 913 MODULE_LICENSE("GPL"); 914