1 /* 2 * Copyright (C) 2005, 2006 IBM Corporation 3 * Copyright (C) 2014 Intel Corporation 4 * 5 * Authors: 6 * Leendert van Doorn <leendert@watson.ibm.com> 7 * Kylene Hall <kjhall@us.ibm.com> 8 * 9 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 10 * 11 * Device driver for TCG/TCPA TPM (trusted platform module). 12 * Specifications at www.trustedcomputinggroup.org 13 * 14 * This device driver implements the TPM interface as defined in 15 * the TCG TPM Interface Spec version 1.2, revision 1.0. 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License as 19 * published by the Free Software Foundation, version 2 of the 20 * License. 21 */ 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/pnp.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/wait.h> 29 #include <linux/acpi.h> 30 #include <linux/freezer.h> 31 #include "tpm.h" 32 33 enum tis_access { 34 TPM_ACCESS_VALID = 0x80, 35 TPM_ACCESS_ACTIVE_LOCALITY = 0x20, 36 TPM_ACCESS_REQUEST_PENDING = 0x04, 37 TPM_ACCESS_REQUEST_USE = 0x02, 38 }; 39 40 enum tis_status { 41 TPM_STS_VALID = 0x80, 42 TPM_STS_COMMAND_READY = 0x40, 43 TPM_STS_GO = 0x20, 44 TPM_STS_DATA_AVAIL = 0x10, 45 TPM_STS_DATA_EXPECT = 0x08, 46 }; 47 48 enum tis_int_flags { 49 TPM_GLOBAL_INT_ENABLE = 0x80000000, 50 TPM_INTF_BURST_COUNT_STATIC = 0x100, 51 TPM_INTF_CMD_READY_INT = 0x080, 52 TPM_INTF_INT_EDGE_FALLING = 0x040, 53 TPM_INTF_INT_EDGE_RISING = 0x020, 54 TPM_INTF_INT_LEVEL_LOW = 0x010, 55 TPM_INTF_INT_LEVEL_HIGH = 0x008, 56 TPM_INTF_LOCALITY_CHANGE_INT = 0x004, 57 TPM_INTF_STS_VALID_INT = 0x002, 58 TPM_INTF_DATA_AVAIL_INT = 0x001, 59 }; 60 61 enum tis_defaults { 62 TIS_MEM_BASE = 0xFED40000, 63 TIS_MEM_LEN = 0x5000, 64 TIS_SHORT_TIMEOUT = 750, /* ms */ 65 TIS_LONG_TIMEOUT = 2000, /* 2 sec */ 66 }; 67 68 69 /* Some timeout values are needed before it is known whether the chip is 70 * TPM 1.0 or TPM 2.0. 71 */ 72 #define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A) 73 #define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B) 74 #define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C) 75 #define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D) 76 77 #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) 78 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) 79 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) 80 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) 81 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) 82 #define TPM_STS(l) (0x0018 | ((l) << 12)) 83 #define TPM_STS3(l) (0x001b | ((l) << 12)) 84 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) 85 86 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) 87 #define TPM_RID(l) (0x0F04 | ((l) << 12)) 88 89 struct priv_data { 90 bool irq_tested; 91 }; 92 93 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) 94 static int is_itpm(struct pnp_dev *dev) 95 { 96 struct acpi_device *acpi = pnp_acpi_device(dev); 97 struct acpi_hardware_id *id; 98 99 if (!acpi) 100 return 0; 101 102 list_for_each_entry(id, &acpi->pnp.ids, list) { 103 if (!strcmp("INTC0102", id->id)) 104 return 1; 105 } 106 107 return 0; 108 } 109 #else 110 static inline int is_itpm(struct pnp_dev *dev) 111 { 112 return 0; 113 } 114 #endif 115 116 /* Before we attempt to access the TPM we must see that the valid bit is set. 117 * The specification says that this bit is 0 at reset and remains 0 until the 118 * 'TPM has gone through its self test and initialization and has established 119 * correct values in the other bits.' */ 120 static int wait_startup(struct tpm_chip *chip, int l) 121 { 122 unsigned long stop = jiffies + chip->vendor.timeout_a; 123 do { 124 if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 125 TPM_ACCESS_VALID) 126 return 0; 127 msleep(TPM_TIMEOUT); 128 } while (time_before(jiffies, stop)); 129 return -1; 130 } 131 132 static int check_locality(struct tpm_chip *chip, int l) 133 { 134 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 135 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == 136 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) 137 return chip->vendor.locality = l; 138 139 return -1; 140 } 141 142 static void release_locality(struct tpm_chip *chip, int l, int force) 143 { 144 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 145 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == 146 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) 147 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, 148 chip->vendor.iobase + TPM_ACCESS(l)); 149 } 150 151 static int request_locality(struct tpm_chip *chip, int l) 152 { 153 unsigned long stop, timeout; 154 long rc; 155 156 if (check_locality(chip, l) >= 0) 157 return l; 158 159 iowrite8(TPM_ACCESS_REQUEST_USE, 160 chip->vendor.iobase + TPM_ACCESS(l)); 161 162 stop = jiffies + chip->vendor.timeout_a; 163 164 if (chip->vendor.irq) { 165 again: 166 timeout = stop - jiffies; 167 if ((long)timeout <= 0) 168 return -1; 169 rc = wait_event_interruptible_timeout(chip->vendor.int_queue, 170 (check_locality 171 (chip, l) >= 0), 172 timeout); 173 if (rc > 0) 174 return l; 175 if (rc == -ERESTARTSYS && freezing(current)) { 176 clear_thread_flag(TIF_SIGPENDING); 177 goto again; 178 } 179 } else { 180 /* wait for burstcount */ 181 do { 182 if (check_locality(chip, l) >= 0) 183 return l; 184 msleep(TPM_TIMEOUT); 185 } 186 while (time_before(jiffies, stop)); 187 } 188 return -1; 189 } 190 191 static u8 tpm_tis_status(struct tpm_chip *chip) 192 { 193 return ioread8(chip->vendor.iobase + 194 TPM_STS(chip->vendor.locality)); 195 } 196 197 static void tpm_tis_ready(struct tpm_chip *chip) 198 { 199 /* this causes the current command to be aborted */ 200 iowrite8(TPM_STS_COMMAND_READY, 201 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 202 } 203 204 static int get_burstcount(struct tpm_chip *chip) 205 { 206 unsigned long stop; 207 int burstcnt; 208 209 /* wait for burstcount */ 210 /* which timeout value, spec has 2 answers (c & d) */ 211 stop = jiffies + chip->vendor.timeout_d; 212 do { 213 burstcnt = ioread8(chip->vendor.iobase + 214 TPM_STS(chip->vendor.locality) + 1); 215 burstcnt += ioread8(chip->vendor.iobase + 216 TPM_STS(chip->vendor.locality) + 217 2) << 8; 218 if (burstcnt) 219 return burstcnt; 220 msleep(TPM_TIMEOUT); 221 } while (time_before(jiffies, stop)); 222 return -EBUSY; 223 } 224 225 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) 226 { 227 int size = 0, burstcnt; 228 while (size < count && 229 wait_for_tpm_stat(chip, 230 TPM_STS_DATA_AVAIL | TPM_STS_VALID, 231 chip->vendor.timeout_c, 232 &chip->vendor.read_queue, true) 233 == 0) { 234 burstcnt = get_burstcount(chip); 235 for (; burstcnt > 0 && size < count; burstcnt--) 236 buf[size++] = ioread8(chip->vendor.iobase + 237 TPM_DATA_FIFO(chip->vendor. 238 locality)); 239 } 240 return size; 241 } 242 243 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) 244 { 245 int size = 0; 246 int expected, status; 247 248 if (count < TPM_HEADER_SIZE) { 249 size = -EIO; 250 goto out; 251 } 252 253 /* read first 10 bytes, including tag, paramsize, and result */ 254 if ((size = 255 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { 256 dev_err(chip->pdev, "Unable to read header\n"); 257 goto out; 258 } 259 260 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 261 if (expected > count) { 262 size = -EIO; 263 goto out; 264 } 265 266 if ((size += 267 recv_data(chip, &buf[TPM_HEADER_SIZE], 268 expected - TPM_HEADER_SIZE)) < expected) { 269 dev_err(chip->pdev, "Unable to read remainder of result\n"); 270 size = -ETIME; 271 goto out; 272 } 273 274 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 275 &chip->vendor.int_queue, false); 276 status = tpm_tis_status(chip); 277 if (status & TPM_STS_DATA_AVAIL) { /* retry? */ 278 dev_err(chip->pdev, "Error left over data\n"); 279 size = -EIO; 280 goto out; 281 } 282 283 out: 284 tpm_tis_ready(chip); 285 release_locality(chip, chip->vendor.locality, 0); 286 return size; 287 } 288 289 static bool itpm; 290 module_param(itpm, bool, 0444); 291 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); 292 293 /* 294 * If interrupts are used (signaled by an irq set in the vendor structure) 295 * tpm.c can skip polling for the data to be available as the interrupt is 296 * waited for here 297 */ 298 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) 299 { 300 int rc, status, burstcnt; 301 size_t count = 0; 302 303 if (request_locality(chip, 0) < 0) 304 return -EBUSY; 305 306 status = tpm_tis_status(chip); 307 if ((status & TPM_STS_COMMAND_READY) == 0) { 308 tpm_tis_ready(chip); 309 if (wait_for_tpm_stat 310 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, 311 &chip->vendor.int_queue, false) < 0) { 312 rc = -ETIME; 313 goto out_err; 314 } 315 } 316 317 while (count < len - 1) { 318 burstcnt = get_burstcount(chip); 319 for (; burstcnt > 0 && count < len - 1; burstcnt--) { 320 iowrite8(buf[count], chip->vendor.iobase + 321 TPM_DATA_FIFO(chip->vendor.locality)); 322 count++; 323 } 324 325 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 326 &chip->vendor.int_queue, false); 327 status = tpm_tis_status(chip); 328 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { 329 rc = -EIO; 330 goto out_err; 331 } 332 } 333 334 /* write last byte */ 335 iowrite8(buf[count], 336 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); 337 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 338 &chip->vendor.int_queue, false); 339 status = tpm_tis_status(chip); 340 if ((status & TPM_STS_DATA_EXPECT) != 0) { 341 rc = -EIO; 342 goto out_err; 343 } 344 345 return 0; 346 347 out_err: 348 tpm_tis_ready(chip); 349 release_locality(chip, chip->vendor.locality, 0); 350 return rc; 351 } 352 353 static void disable_interrupts(struct tpm_chip *chip) 354 { 355 u32 intmask; 356 357 intmask = 358 ioread32(chip->vendor.iobase + 359 TPM_INT_ENABLE(chip->vendor.locality)); 360 intmask &= ~TPM_GLOBAL_INT_ENABLE; 361 iowrite32(intmask, 362 chip->vendor.iobase + 363 TPM_INT_ENABLE(chip->vendor.locality)); 364 free_irq(chip->vendor.irq, chip); 365 chip->vendor.irq = 0; 366 } 367 368 /* 369 * If interrupts are used (signaled by an irq set in the vendor structure) 370 * tpm.c can skip polling for the data to be available as the interrupt is 371 * waited for here 372 */ 373 static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) 374 { 375 int rc; 376 u32 ordinal; 377 unsigned long dur; 378 379 rc = tpm_tis_send_data(chip, buf, len); 380 if (rc < 0) 381 return rc; 382 383 /* go and do it */ 384 iowrite8(TPM_STS_GO, 385 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 386 387 if (chip->vendor.irq) { 388 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 389 390 if (chip->flags & TPM_CHIP_FLAG_TPM2) 391 dur = tpm2_calc_ordinal_duration(chip, ordinal); 392 else 393 dur = tpm_calc_ordinal_duration(chip, ordinal); 394 395 if (wait_for_tpm_stat 396 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur, 397 &chip->vendor.read_queue, false) < 0) { 398 rc = -ETIME; 399 goto out_err; 400 } 401 } 402 return len; 403 out_err: 404 tpm_tis_ready(chip); 405 release_locality(chip, chip->vendor.locality, 0); 406 return rc; 407 } 408 409 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) 410 { 411 int rc, irq; 412 struct priv_data *priv = chip->vendor.priv; 413 414 if (!chip->vendor.irq || priv->irq_tested) 415 return tpm_tis_send_main(chip, buf, len); 416 417 /* Verify receipt of the expected IRQ */ 418 irq = chip->vendor.irq; 419 chip->vendor.irq = 0; 420 rc = tpm_tis_send_main(chip, buf, len); 421 chip->vendor.irq = irq; 422 if (!priv->irq_tested) 423 msleep(1); 424 if (!priv->irq_tested) { 425 disable_interrupts(chip); 426 dev_err(chip->pdev, 427 FW_BUG "TPM interrupt not working, polling instead\n"); 428 } 429 priv->irq_tested = true; 430 return rc; 431 } 432 433 struct tis_vendor_timeout_override { 434 u32 did_vid; 435 unsigned long timeout_us[4]; 436 }; 437 438 static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { 439 /* Atmel 3204 */ 440 { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), 441 (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, 442 }; 443 444 static bool tpm_tis_update_timeouts(struct tpm_chip *chip, 445 unsigned long *timeout_cap) 446 { 447 int i; 448 u32 did_vid; 449 450 did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 451 452 for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { 453 if (vendor_timeout_overrides[i].did_vid != did_vid) 454 continue; 455 memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, 456 sizeof(vendor_timeout_overrides[i].timeout_us)); 457 return true; 458 } 459 460 return false; 461 } 462 463 /* 464 * Early probing for iTPM with STS_DATA_EXPECT flaw. 465 * Try sending command without itpm flag set and if that 466 * fails, repeat with itpm flag set. 467 */ 468 static int probe_itpm(struct tpm_chip *chip) 469 { 470 int rc = 0; 471 u8 cmd_getticks[] = { 472 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, 473 0x00, 0x00, 0x00, 0xf1 474 }; 475 size_t len = sizeof(cmd_getticks); 476 bool rem_itpm = itpm; 477 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0)); 478 479 /* probe only iTPMS */ 480 if (vendor != TPM_VID_INTEL) 481 return 0; 482 483 itpm = false; 484 485 rc = tpm_tis_send_data(chip, cmd_getticks, len); 486 if (rc == 0) 487 goto out; 488 489 tpm_tis_ready(chip); 490 release_locality(chip, chip->vendor.locality, 0); 491 492 itpm = true; 493 494 rc = tpm_tis_send_data(chip, cmd_getticks, len); 495 if (rc == 0) { 496 dev_info(chip->pdev, "Detected an iTPM.\n"); 497 rc = 1; 498 } else 499 rc = -EFAULT; 500 501 out: 502 itpm = rem_itpm; 503 tpm_tis_ready(chip); 504 release_locality(chip, chip->vendor.locality, 0); 505 506 return rc; 507 } 508 509 static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) 510 { 511 switch (chip->vendor.manufacturer_id) { 512 case TPM_VID_WINBOND: 513 return ((status == TPM_STS_VALID) || 514 (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); 515 case TPM_VID_STM: 516 return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); 517 default: 518 return (status == TPM_STS_COMMAND_READY); 519 } 520 } 521 522 static const struct tpm_class_ops tpm_tis = { 523 .status = tpm_tis_status, 524 .recv = tpm_tis_recv, 525 .send = tpm_tis_send, 526 .cancel = tpm_tis_ready, 527 .update_timeouts = tpm_tis_update_timeouts, 528 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 529 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, 530 .req_canceled = tpm_tis_req_canceled, 531 }; 532 533 static irqreturn_t tis_int_probe(int irq, void *dev_id) 534 { 535 struct tpm_chip *chip = dev_id; 536 u32 interrupt; 537 538 interrupt = ioread32(chip->vendor.iobase + 539 TPM_INT_STATUS(chip->vendor.locality)); 540 541 if (interrupt == 0) 542 return IRQ_NONE; 543 544 chip->vendor.probed_irq = irq; 545 546 /* Clear interrupts handled with TPM_EOI */ 547 iowrite32(interrupt, 548 chip->vendor.iobase + 549 TPM_INT_STATUS(chip->vendor.locality)); 550 return IRQ_HANDLED; 551 } 552 553 static irqreturn_t tis_int_handler(int dummy, void *dev_id) 554 { 555 struct tpm_chip *chip = dev_id; 556 u32 interrupt; 557 int i; 558 559 interrupt = ioread32(chip->vendor.iobase + 560 TPM_INT_STATUS(chip->vendor.locality)); 561 562 if (interrupt == 0) 563 return IRQ_NONE; 564 565 ((struct priv_data *)chip->vendor.priv)->irq_tested = true; 566 if (interrupt & TPM_INTF_DATA_AVAIL_INT) 567 wake_up_interruptible(&chip->vendor.read_queue); 568 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) 569 for (i = 0; i < 5; i++) 570 if (check_locality(chip, i) >= 0) 571 break; 572 if (interrupt & 573 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | 574 TPM_INTF_CMD_READY_INT)) 575 wake_up_interruptible(&chip->vendor.int_queue); 576 577 /* Clear interrupts handled with TPM_EOI */ 578 iowrite32(interrupt, 579 chip->vendor.iobase + 580 TPM_INT_STATUS(chip->vendor.locality)); 581 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); 582 return IRQ_HANDLED; 583 } 584 585 static bool interrupts = true; 586 module_param(interrupts, bool, 0444); 587 MODULE_PARM_DESC(interrupts, "Enable interrupts"); 588 589 static void tpm_tis_remove(struct tpm_chip *chip) 590 { 591 if (chip->flags & TPM_CHIP_FLAG_TPM2) 592 tpm2_shutdown(chip, TPM2_SU_CLEAR); 593 594 iowrite32(~TPM_GLOBAL_INT_ENABLE & 595 ioread32(chip->vendor.iobase + 596 TPM_INT_ENABLE(chip->vendor. 597 locality)), 598 chip->vendor.iobase + 599 TPM_INT_ENABLE(chip->vendor.locality)); 600 release_locality(chip, chip->vendor.locality, 1); 601 } 602 603 static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle, 604 resource_size_t start, resource_size_t len, 605 unsigned int irq) 606 { 607 u32 vendor, intfcaps, intmask; 608 int rc, i, irq_s, irq_e, probe; 609 struct tpm_chip *chip; 610 struct priv_data *priv; 611 612 priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); 613 if (priv == NULL) 614 return -ENOMEM; 615 616 chip = tpmm_chip_alloc(dev, &tpm_tis); 617 if (IS_ERR(chip)) 618 return PTR_ERR(chip); 619 620 chip->vendor.priv = priv; 621 #ifdef CONFIG_ACPI 622 chip->acpi_dev_handle = acpi_dev_handle; 623 #endif 624 625 chip->vendor.iobase = devm_ioremap(dev, start, len); 626 if (!chip->vendor.iobase) 627 return -EIO; 628 629 /* Maximum timeouts */ 630 chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX; 631 chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX; 632 chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX; 633 chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX; 634 635 if (wait_startup(chip, 0) != 0) { 636 rc = -ENODEV; 637 goto out_err; 638 } 639 640 if (request_locality(chip, 0) != 0) { 641 rc = -ENODEV; 642 goto out_err; 643 } 644 645 rc = tpm2_probe(chip); 646 if (rc) 647 goto out_err; 648 649 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 650 chip->vendor.manufacturer_id = vendor; 651 652 dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", 653 (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", 654 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 655 656 if (!itpm) { 657 probe = probe_itpm(chip); 658 if (probe < 0) { 659 rc = -ENODEV; 660 goto out_err; 661 } 662 itpm = !!probe; 663 } 664 665 if (itpm) 666 dev_info(dev, "Intel iTPM workaround enabled\n"); 667 668 669 /* Figure out the capabilities */ 670 intfcaps = 671 ioread32(chip->vendor.iobase + 672 TPM_INTF_CAPS(chip->vendor.locality)); 673 dev_dbg(dev, "TPM interface capabilities (0x%x):\n", 674 intfcaps); 675 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) 676 dev_dbg(dev, "\tBurst Count Static\n"); 677 if (intfcaps & TPM_INTF_CMD_READY_INT) 678 dev_dbg(dev, "\tCommand Ready Int Support\n"); 679 if (intfcaps & TPM_INTF_INT_EDGE_FALLING) 680 dev_dbg(dev, "\tInterrupt Edge Falling\n"); 681 if (intfcaps & TPM_INTF_INT_EDGE_RISING) 682 dev_dbg(dev, "\tInterrupt Edge Rising\n"); 683 if (intfcaps & TPM_INTF_INT_LEVEL_LOW) 684 dev_dbg(dev, "\tInterrupt Level Low\n"); 685 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) 686 dev_dbg(dev, "\tInterrupt Level High\n"); 687 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) 688 dev_dbg(dev, "\tLocality Change Int Support\n"); 689 if (intfcaps & TPM_INTF_STS_VALID_INT) 690 dev_dbg(dev, "\tSts Valid Int Support\n"); 691 if (intfcaps & TPM_INTF_DATA_AVAIL_INT) 692 dev_dbg(dev, "\tData Avail Int Support\n"); 693 694 /* INTERRUPT Setup */ 695 init_waitqueue_head(&chip->vendor.read_queue); 696 init_waitqueue_head(&chip->vendor.int_queue); 697 698 intmask = 699 ioread32(chip->vendor.iobase + 700 TPM_INT_ENABLE(chip->vendor.locality)); 701 702 intmask |= TPM_INTF_CMD_READY_INT 703 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT 704 | TPM_INTF_STS_VALID_INT; 705 706 iowrite32(intmask, 707 chip->vendor.iobase + 708 TPM_INT_ENABLE(chip->vendor.locality)); 709 if (interrupts) 710 chip->vendor.irq = irq; 711 if (interrupts && !chip->vendor.irq) { 712 irq_s = 713 ioread8(chip->vendor.iobase + 714 TPM_INT_VECTOR(chip->vendor.locality)); 715 if (irq_s) { 716 irq_e = irq_s; 717 } else { 718 irq_s = 3; 719 irq_e = 15; 720 } 721 722 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) { 723 iowrite8(i, chip->vendor.iobase + 724 TPM_INT_VECTOR(chip->vendor.locality)); 725 if (devm_request_irq 726 (dev, i, tis_int_probe, IRQF_SHARED, 727 chip->devname, chip) != 0) { 728 dev_info(chip->pdev, 729 "Unable to request irq: %d for probe\n", 730 i); 731 continue; 732 } 733 734 /* Clear all existing */ 735 iowrite32(ioread32 736 (chip->vendor.iobase + 737 TPM_INT_STATUS(chip->vendor.locality)), 738 chip->vendor.iobase + 739 TPM_INT_STATUS(chip->vendor.locality)); 740 741 /* Turn on */ 742 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, 743 chip->vendor.iobase + 744 TPM_INT_ENABLE(chip->vendor.locality)); 745 746 chip->vendor.probed_irq = 0; 747 748 /* Generate Interrupts */ 749 if (chip->flags & TPM_CHIP_FLAG_TPM2) 750 tpm2_gen_interrupt(chip); 751 else 752 tpm_gen_interrupt(chip); 753 754 chip->vendor.irq = chip->vendor.probed_irq; 755 756 /* free_irq will call into tis_int_probe; 757 clear all irqs we haven't seen while doing 758 tpm_gen_interrupt */ 759 iowrite32(ioread32 760 (chip->vendor.iobase + 761 TPM_INT_STATUS(chip->vendor.locality)), 762 chip->vendor.iobase + 763 TPM_INT_STATUS(chip->vendor.locality)); 764 765 /* Turn off */ 766 iowrite32(intmask, 767 chip->vendor.iobase + 768 TPM_INT_ENABLE(chip->vendor.locality)); 769 } 770 } 771 if (chip->vendor.irq) { 772 iowrite8(chip->vendor.irq, 773 chip->vendor.iobase + 774 TPM_INT_VECTOR(chip->vendor.locality)); 775 if (devm_request_irq 776 (dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED, 777 chip->devname, chip) != 0) { 778 dev_info(chip->pdev, 779 "Unable to request irq: %d for use\n", 780 chip->vendor.irq); 781 chip->vendor.irq = 0; 782 } else { 783 /* Clear all existing */ 784 iowrite32(ioread32 785 (chip->vendor.iobase + 786 TPM_INT_STATUS(chip->vendor.locality)), 787 chip->vendor.iobase + 788 TPM_INT_STATUS(chip->vendor.locality)); 789 790 /* Turn on */ 791 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, 792 chip->vendor.iobase + 793 TPM_INT_ENABLE(chip->vendor.locality)); 794 } 795 } 796 797 if (chip->flags & TPM_CHIP_FLAG_TPM2) { 798 chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A); 799 chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B); 800 chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C); 801 chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D); 802 chip->vendor.duration[TPM_SHORT] = 803 msecs_to_jiffies(TPM2_DURATION_SHORT); 804 chip->vendor.duration[TPM_MEDIUM] = 805 msecs_to_jiffies(TPM2_DURATION_MEDIUM); 806 chip->vendor.duration[TPM_LONG] = 807 msecs_to_jiffies(TPM2_DURATION_LONG); 808 809 rc = tpm2_do_selftest(chip); 810 if (rc == TPM2_RC_INITIALIZE) { 811 dev_warn(dev, "Firmware has not started TPM\n"); 812 rc = tpm2_startup(chip, TPM2_SU_CLEAR); 813 if (!rc) 814 rc = tpm2_do_selftest(chip); 815 } 816 817 if (rc) { 818 dev_err(dev, "TPM self test failed\n"); 819 if (rc > 0) 820 rc = -ENODEV; 821 goto out_err; 822 } 823 } else { 824 if (tpm_get_timeouts(chip)) { 825 dev_err(dev, "Could not get TPM timeouts and durations\n"); 826 rc = -ENODEV; 827 goto out_err; 828 } 829 830 if (tpm_do_selftest(chip)) { 831 dev_err(dev, "TPM self test failed\n"); 832 rc = -ENODEV; 833 goto out_err; 834 } 835 } 836 837 return tpm_chip_register(chip); 838 out_err: 839 tpm_tis_remove(chip); 840 return rc; 841 } 842 843 #ifdef CONFIG_PM_SLEEP 844 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) 845 { 846 u32 intmask; 847 848 /* reenable interrupts that device may have lost or 849 BIOS/firmware may have disabled */ 850 iowrite8(chip->vendor.irq, chip->vendor.iobase + 851 TPM_INT_VECTOR(chip->vendor.locality)); 852 853 intmask = 854 ioread32(chip->vendor.iobase + 855 TPM_INT_ENABLE(chip->vendor.locality)); 856 857 intmask |= TPM_INTF_CMD_READY_INT 858 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT 859 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; 860 861 iowrite32(intmask, 862 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); 863 } 864 865 static int tpm_tis_resume(struct device *dev) 866 { 867 struct tpm_chip *chip = dev_get_drvdata(dev); 868 int ret; 869 870 if (chip->vendor.irq) 871 tpm_tis_reenable_interrupts(chip); 872 873 ret = tpm_pm_resume(dev); 874 if (ret) 875 return ret; 876 877 /* TPM 1.2 requires self-test on resume. This function actually returns 878 * an error code but for unknown reason it isn't handled. 879 */ 880 if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) 881 tpm_do_selftest(chip); 882 883 return 0; 884 } 885 #endif 886 887 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); 888 889 #ifdef CONFIG_PNP 890 static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 891 const struct pnp_device_id *pnp_id) 892 { 893 resource_size_t start, len; 894 unsigned int irq = 0; 895 acpi_handle acpi_dev_handle = NULL; 896 897 start = pnp_mem_start(pnp_dev, 0); 898 len = pnp_mem_len(pnp_dev, 0); 899 900 if (pnp_irq_valid(pnp_dev, 0)) 901 irq = pnp_irq(pnp_dev, 0); 902 else 903 interrupts = false; 904 905 if (is_itpm(pnp_dev)) 906 itpm = true; 907 908 #ifdef CONFIG_ACPI 909 if (pnp_acpi_device(pnp_dev)) 910 acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle; 911 #endif 912 913 return tpm_tis_init(&pnp_dev->dev, acpi_dev_handle, start, len, irq); 914 } 915 916 static struct pnp_device_id tpm_pnp_tbl[] = { 917 {"PNP0C31", 0}, /* TPM */ 918 {"ATM1200", 0}, /* Atmel */ 919 {"IFX0102", 0}, /* Infineon */ 920 {"BCM0101", 0}, /* Broadcom */ 921 {"BCM0102", 0}, /* Broadcom */ 922 {"NSC1200", 0}, /* National */ 923 {"ICO0102", 0}, /* Intel */ 924 /* Add new here */ 925 {"", 0}, /* User Specified */ 926 {"", 0} /* Terminator */ 927 }; 928 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 929 930 static void tpm_tis_pnp_remove(struct pnp_dev *dev) 931 { 932 struct tpm_chip *chip = pnp_get_drvdata(dev); 933 tpm_chip_unregister(chip); 934 tpm_tis_remove(chip); 935 } 936 937 static struct pnp_driver tis_pnp_driver = { 938 .name = "tpm_tis", 939 .id_table = tpm_pnp_tbl, 940 .probe = tpm_tis_pnp_init, 941 .remove = tpm_tis_pnp_remove, 942 .driver = { 943 .pm = &tpm_tis_pm, 944 }, 945 }; 946 947 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2 948 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, 949 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); 950 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 951 #endif 952 953 static struct platform_driver tis_drv = { 954 .driver = { 955 .name = "tpm_tis", 956 .pm = &tpm_tis_pm, 957 }, 958 }; 959 960 static struct platform_device *pdev; 961 962 static bool force; 963 module_param(force, bool, 0444); 964 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); 965 static int __init init_tis(void) 966 { 967 int rc; 968 #ifdef CONFIG_PNP 969 if (!force) 970 return pnp_register_driver(&tis_pnp_driver); 971 #endif 972 973 rc = platform_driver_register(&tis_drv); 974 if (rc < 0) 975 return rc; 976 pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0); 977 if (IS_ERR(pdev)) { 978 rc = PTR_ERR(pdev); 979 goto err_dev; 980 } 981 rc = tpm_tis_init(&pdev->dev, NULL, TIS_MEM_BASE, TIS_MEM_LEN, 0); 982 if (rc) 983 goto err_init; 984 return 0; 985 err_init: 986 platform_device_unregister(pdev); 987 err_dev: 988 platform_driver_unregister(&tis_drv); 989 return rc; 990 } 991 992 static void __exit cleanup_tis(void) 993 { 994 struct tpm_chip *chip; 995 #ifdef CONFIG_PNP 996 if (!force) { 997 pnp_unregister_driver(&tis_pnp_driver); 998 return; 999 } 1000 #endif 1001 chip = dev_get_drvdata(&pdev->dev); 1002 tpm_chip_unregister(chip); 1003 tpm_tis_remove(chip); 1004 platform_device_unregister(pdev); 1005 platform_driver_unregister(&tis_drv); 1006 } 1007 1008 module_init(init_tis); 1009 module_exit(cleanup_tis); 1010 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); 1011 MODULE_DESCRIPTION("TPM Driver"); 1012 MODULE_VERSION("2.0"); 1013 MODULE_LICENSE("GPL"); 1014