xref: /openbmc/linux/drivers/char/tpm/tpm_tis.c (revision 4dc7ccf7)
1 /*
2  * Copyright (C) 2005, 2006 IBM Corporation
3  *
4  * Authors:
5  * Leendert van Doorn <leendert@watson.ibm.com>
6  * Kylene Hall <kjhall@us.ibm.com>
7  *
8  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
9  *
10  * Device driver for TCG/TCPA TPM (trusted platform module).
11  * Specifications at www.trustedcomputinggroup.org
12  *
13  * This device driver implements the TPM interface as defined in
14  * the TCG TPM Interface Spec version 1.2, revision 1.0.
15  *
16  * This program is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License as
18  * published by the Free Software Foundation, version 2 of the
19  * License.
20  */
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include "tpm.h"
29 
30 #define TPM_HEADER_SIZE 10
31 
32 enum tis_access {
33 	TPM_ACCESS_VALID = 0x80,
34 	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
35 	TPM_ACCESS_REQUEST_PENDING = 0x04,
36 	TPM_ACCESS_REQUEST_USE = 0x02,
37 };
38 
39 enum tis_status {
40 	TPM_STS_VALID = 0x80,
41 	TPM_STS_COMMAND_READY = 0x40,
42 	TPM_STS_GO = 0x20,
43 	TPM_STS_DATA_AVAIL = 0x10,
44 	TPM_STS_DATA_EXPECT = 0x08,
45 };
46 
47 enum tis_int_flags {
48 	TPM_GLOBAL_INT_ENABLE = 0x80000000,
49 	TPM_INTF_BURST_COUNT_STATIC = 0x100,
50 	TPM_INTF_CMD_READY_INT = 0x080,
51 	TPM_INTF_INT_EDGE_FALLING = 0x040,
52 	TPM_INTF_INT_EDGE_RISING = 0x020,
53 	TPM_INTF_INT_LEVEL_LOW = 0x010,
54 	TPM_INTF_INT_LEVEL_HIGH = 0x008,
55 	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
56 	TPM_INTF_STS_VALID_INT = 0x002,
57 	TPM_INTF_DATA_AVAIL_INT = 0x001,
58 };
59 
60 enum tis_defaults {
61 	TIS_MEM_BASE = 0xFED40000,
62 	TIS_MEM_LEN = 0x5000,
63 	TIS_SHORT_TIMEOUT = 750,	/* ms */
64 	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
65 };
66 
67 #define	TPM_ACCESS(l)			(0x0000 | ((l) << 12))
68 #define	TPM_INT_ENABLE(l)		(0x0008 | ((l) << 12))
69 #define	TPM_INT_VECTOR(l)		(0x000C | ((l) << 12))
70 #define	TPM_INT_STATUS(l)		(0x0010 | ((l) << 12))
71 #define	TPM_INTF_CAPS(l)		(0x0014 | ((l) << 12))
72 #define	TPM_STS(l)			(0x0018 | ((l) << 12))
73 #define	TPM_DATA_FIFO(l)		(0x0024 | ((l) << 12))
74 
75 #define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
76 #define	TPM_RID(l)			(0x0F04 | ((l) << 12))
77 
78 static LIST_HEAD(tis_chips);
79 static DEFINE_SPINLOCK(tis_lock);
80 
81 static int check_locality(struct tpm_chip *chip, int l)
82 {
83 	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
84 	     (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
85 	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
86 		return chip->vendor.locality = l;
87 
88 	return -1;
89 }
90 
91 static void release_locality(struct tpm_chip *chip, int l, int force)
92 {
93 	if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
94 		      (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
95 	    (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
96 		iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
97 			 chip->vendor.iobase + TPM_ACCESS(l));
98 }
99 
100 static int request_locality(struct tpm_chip *chip, int l)
101 {
102 	unsigned long stop;
103 	long rc;
104 
105 	if (check_locality(chip, l) >= 0)
106 		return l;
107 
108 	iowrite8(TPM_ACCESS_REQUEST_USE,
109 		 chip->vendor.iobase + TPM_ACCESS(l));
110 
111 	if (chip->vendor.irq) {
112 		rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
113 						      (check_locality
114 						       (chip, l) >= 0),
115 						      chip->vendor.timeout_a);
116 		if (rc > 0)
117 			return l;
118 
119 	} else {
120 		/* wait for burstcount */
121 		stop = jiffies + chip->vendor.timeout_a;
122 		do {
123 			if (check_locality(chip, l) >= 0)
124 				return l;
125 			msleep(TPM_TIMEOUT);
126 		}
127 		while (time_before(jiffies, stop));
128 	}
129 	return -1;
130 }
131 
132 static u8 tpm_tis_status(struct tpm_chip *chip)
133 {
134 	return ioread8(chip->vendor.iobase +
135 		       TPM_STS(chip->vendor.locality));
136 }
137 
138 static void tpm_tis_ready(struct tpm_chip *chip)
139 {
140 	/* this causes the current command to be aborted */
141 	iowrite8(TPM_STS_COMMAND_READY,
142 		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
143 }
144 
145 static int get_burstcount(struct tpm_chip *chip)
146 {
147 	unsigned long stop;
148 	int burstcnt;
149 
150 	/* wait for burstcount */
151 	/* which timeout value, spec has 2 answers (c & d) */
152 	stop = jiffies + chip->vendor.timeout_d;
153 	do {
154 		burstcnt = ioread8(chip->vendor.iobase +
155 				   TPM_STS(chip->vendor.locality) + 1);
156 		burstcnt += ioread8(chip->vendor.iobase +
157 				    TPM_STS(chip->vendor.locality) +
158 				    2) << 8;
159 		if (burstcnt)
160 			return burstcnt;
161 		msleep(TPM_TIMEOUT);
162 	} while (time_before(jiffies, stop));
163 	return -EBUSY;
164 }
165 
166 static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
167 			 wait_queue_head_t *queue)
168 {
169 	unsigned long stop;
170 	long rc;
171 	u8 status;
172 
173 	/* check current status */
174 	status = tpm_tis_status(chip);
175 	if ((status & mask) == mask)
176 		return 0;
177 
178 	if (chip->vendor.irq) {
179 		rc = wait_event_interruptible_timeout(*queue,
180 						      ((tpm_tis_status
181 							(chip) & mask) ==
182 						       mask), timeout);
183 		if (rc > 0)
184 			return 0;
185 	} else {
186 		stop = jiffies + timeout;
187 		do {
188 			msleep(TPM_TIMEOUT);
189 			status = tpm_tis_status(chip);
190 			if ((status & mask) == mask)
191 				return 0;
192 		} while (time_before(jiffies, stop));
193 	}
194 	return -ETIME;
195 }
196 
197 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
198 {
199 	int size = 0, burstcnt;
200 	while (size < count &&
201 	       wait_for_stat(chip,
202 			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
203 			     chip->vendor.timeout_c,
204 			     &chip->vendor.read_queue)
205 	       == 0) {
206 		burstcnt = get_burstcount(chip);
207 		for (; burstcnt > 0 && size < count; burstcnt--)
208 			buf[size++] = ioread8(chip->vendor.iobase +
209 					      TPM_DATA_FIFO(chip->vendor.
210 							    locality));
211 	}
212 	return size;
213 }
214 
215 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
216 {
217 	int size = 0;
218 	int expected, status;
219 
220 	if (count < TPM_HEADER_SIZE) {
221 		size = -EIO;
222 		goto out;
223 	}
224 
225 	/* read first 10 bytes, including tag, paramsize, and result */
226 	if ((size =
227 	     recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
228 		dev_err(chip->dev, "Unable to read header\n");
229 		goto out;
230 	}
231 
232 	expected = be32_to_cpu(*(__be32 *) (buf + 2));
233 	if (expected > count) {
234 		size = -EIO;
235 		goto out;
236 	}
237 
238 	if ((size +=
239 	     recv_data(chip, &buf[TPM_HEADER_SIZE],
240 		       expected - TPM_HEADER_SIZE)) < expected) {
241 		dev_err(chip->dev, "Unable to read remainder of result\n");
242 		size = -ETIME;
243 		goto out;
244 	}
245 
246 	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
247 		      &chip->vendor.int_queue);
248 	status = tpm_tis_status(chip);
249 	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
250 		dev_err(chip->dev, "Error left over data\n");
251 		size = -EIO;
252 		goto out;
253 	}
254 
255 out:
256 	tpm_tis_ready(chip);
257 	release_locality(chip, chip->vendor.locality, 0);
258 	return size;
259 }
260 
261 static int itpm;
262 module_param(itpm, bool, 0444);
263 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
264 
265 /*
266  * If interrupts are used (signaled by an irq set in the vendor structure)
267  * tpm.c can skip polling for the data to be available as the interrupt is
268  * waited for here
269  */
270 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
271 {
272 	int rc, status, burstcnt;
273 	size_t count = 0;
274 	u32 ordinal;
275 
276 	if (request_locality(chip, 0) < 0)
277 		return -EBUSY;
278 
279 	status = tpm_tis_status(chip);
280 	if ((status & TPM_STS_COMMAND_READY) == 0) {
281 		tpm_tis_ready(chip);
282 		if (wait_for_stat
283 		    (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
284 		     &chip->vendor.int_queue) < 0) {
285 			rc = -ETIME;
286 			goto out_err;
287 		}
288 	}
289 
290 	while (count < len - 1) {
291 		burstcnt = get_burstcount(chip);
292 		for (; burstcnt > 0 && count < len - 1; burstcnt--) {
293 			iowrite8(buf[count], chip->vendor.iobase +
294 				 TPM_DATA_FIFO(chip->vendor.locality));
295 			count++;
296 		}
297 
298 		wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
299 			      &chip->vendor.int_queue);
300 		status = tpm_tis_status(chip);
301 		if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
302 			rc = -EIO;
303 			goto out_err;
304 		}
305 	}
306 
307 	/* write last byte */
308 	iowrite8(buf[count],
309 		 chip->vendor.iobase +
310 		 TPM_DATA_FIFO(chip->vendor.locality));
311 	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
312 		      &chip->vendor.int_queue);
313 	status = tpm_tis_status(chip);
314 	if ((status & TPM_STS_DATA_EXPECT) != 0) {
315 		rc = -EIO;
316 		goto out_err;
317 	}
318 
319 	/* go and do it */
320 	iowrite8(TPM_STS_GO,
321 		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
322 
323 	if (chip->vendor.irq) {
324 		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
325 		if (wait_for_stat
326 		    (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
327 		     tpm_calc_ordinal_duration(chip, ordinal),
328 		     &chip->vendor.read_queue) < 0) {
329 			rc = -ETIME;
330 			goto out_err;
331 		}
332 	}
333 	return len;
334 out_err:
335 	tpm_tis_ready(chip);
336 	release_locality(chip, chip->vendor.locality, 0);
337 	return rc;
338 }
339 
340 static const struct file_operations tis_ops = {
341 	.owner = THIS_MODULE,
342 	.llseek = no_llseek,
343 	.open = tpm_open,
344 	.read = tpm_read,
345 	.write = tpm_write,
346 	.release = tpm_release,
347 };
348 
349 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
350 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
351 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
352 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
353 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
354 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
355 		   NULL);
356 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
357 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
358 
359 static struct attribute *tis_attrs[] = {
360 	&dev_attr_pubek.attr,
361 	&dev_attr_pcrs.attr,
362 	&dev_attr_enabled.attr,
363 	&dev_attr_active.attr,
364 	&dev_attr_owned.attr,
365 	&dev_attr_temp_deactivated.attr,
366 	&dev_attr_caps.attr,
367 	&dev_attr_cancel.attr, NULL,
368 };
369 
370 static struct attribute_group tis_attr_grp = {
371 	.attrs = tis_attrs
372 };
373 
374 static struct tpm_vendor_specific tpm_tis = {
375 	.status = tpm_tis_status,
376 	.recv = tpm_tis_recv,
377 	.send = tpm_tis_send,
378 	.cancel = tpm_tis_ready,
379 	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
380 	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
381 	.req_canceled = TPM_STS_COMMAND_READY,
382 	.attr_group = &tis_attr_grp,
383 	.miscdev = {
384 		    .fops = &tis_ops,},
385 };
386 
387 static irqreturn_t tis_int_probe(int irq, void *dev_id)
388 {
389 	struct tpm_chip *chip = dev_id;
390 	u32 interrupt;
391 
392 	interrupt = ioread32(chip->vendor.iobase +
393 			     TPM_INT_STATUS(chip->vendor.locality));
394 
395 	if (interrupt == 0)
396 		return IRQ_NONE;
397 
398 	chip->vendor.irq = irq;
399 
400 	/* Clear interrupts handled with TPM_EOI */
401 	iowrite32(interrupt,
402 		  chip->vendor.iobase +
403 		  TPM_INT_STATUS(chip->vendor.locality));
404 	return IRQ_HANDLED;
405 }
406 
407 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
408 {
409 	struct tpm_chip *chip = dev_id;
410 	u32 interrupt;
411 	int i;
412 
413 	interrupt = ioread32(chip->vendor.iobase +
414 			     TPM_INT_STATUS(chip->vendor.locality));
415 
416 	if (interrupt == 0)
417 		return IRQ_NONE;
418 
419 	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
420 		wake_up_interruptible(&chip->vendor.read_queue);
421 	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
422 		for (i = 0; i < 5; i++)
423 			if (check_locality(chip, i) >= 0)
424 				break;
425 	if (interrupt &
426 	    (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
427 	     TPM_INTF_CMD_READY_INT))
428 		wake_up_interruptible(&chip->vendor.int_queue);
429 
430 	/* Clear interrupts handled with TPM_EOI */
431 	iowrite32(interrupt,
432 		  chip->vendor.iobase +
433 		  TPM_INT_STATUS(chip->vendor.locality));
434 	ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
435 	return IRQ_HANDLED;
436 }
437 
438 static int interrupts = 1;
439 module_param(interrupts, bool, 0444);
440 MODULE_PARM_DESC(interrupts, "Enable interrupts");
441 
442 static int tpm_tis_init(struct device *dev, resource_size_t start,
443 			resource_size_t len, unsigned int irq)
444 {
445 	u32 vendor, intfcaps, intmask;
446 	int rc, i;
447 	struct tpm_chip *chip;
448 
449 	if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
450 		return -ENODEV;
451 
452 	chip->vendor.iobase = ioremap(start, len);
453 	if (!chip->vendor.iobase) {
454 		rc = -EIO;
455 		goto out_err;
456 	}
457 
458 	/* Default timeouts */
459 	chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
460 	chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
461 	chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
462 	chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
463 
464 	if (request_locality(chip, 0) != 0) {
465 		rc = -ENODEV;
466 		goto out_err;
467 	}
468 
469 	vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
470 
471 	dev_info(dev,
472 		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
473 		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
474 
475 	if (itpm)
476 		dev_info(dev, "Intel iTPM workaround enabled\n");
477 
478 
479 	/* Figure out the capabilities */
480 	intfcaps =
481 	    ioread32(chip->vendor.iobase +
482 		     TPM_INTF_CAPS(chip->vendor.locality));
483 	dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
484 		intfcaps);
485 	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
486 		dev_dbg(dev, "\tBurst Count Static\n");
487 	if (intfcaps & TPM_INTF_CMD_READY_INT)
488 		dev_dbg(dev, "\tCommand Ready Int Support\n");
489 	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
490 		dev_dbg(dev, "\tInterrupt Edge Falling\n");
491 	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
492 		dev_dbg(dev, "\tInterrupt Edge Rising\n");
493 	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
494 		dev_dbg(dev, "\tInterrupt Level Low\n");
495 	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
496 		dev_dbg(dev, "\tInterrupt Level High\n");
497 	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
498 		dev_dbg(dev, "\tLocality Change Int Support\n");
499 	if (intfcaps & TPM_INTF_STS_VALID_INT)
500 		dev_dbg(dev, "\tSts Valid Int Support\n");
501 	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
502 		dev_dbg(dev, "\tData Avail Int Support\n");
503 
504 	/* INTERRUPT Setup */
505 	init_waitqueue_head(&chip->vendor.read_queue);
506 	init_waitqueue_head(&chip->vendor.int_queue);
507 
508 	intmask =
509 	    ioread32(chip->vendor.iobase +
510 		     TPM_INT_ENABLE(chip->vendor.locality));
511 
512 	intmask |= TPM_INTF_CMD_READY_INT
513 	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
514 	    | TPM_INTF_STS_VALID_INT;
515 
516 	iowrite32(intmask,
517 		  chip->vendor.iobase +
518 		  TPM_INT_ENABLE(chip->vendor.locality));
519 	if (interrupts)
520 		chip->vendor.irq = irq;
521 	if (interrupts && !chip->vendor.irq) {
522 		chip->vendor.irq =
523 		    ioread8(chip->vendor.iobase +
524 			    TPM_INT_VECTOR(chip->vendor.locality));
525 
526 		for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
527 			iowrite8(i, chip->vendor.iobase +
528 				    TPM_INT_VECTOR(chip->vendor.locality));
529 			if (request_irq
530 			    (i, tis_int_probe, IRQF_SHARED,
531 			     chip->vendor.miscdev.name, chip) != 0) {
532 				dev_info(chip->dev,
533 					 "Unable to request irq: %d for probe\n",
534 					 i);
535 				continue;
536 			}
537 
538 			/* Clear all existing */
539 			iowrite32(ioread32
540 				  (chip->vendor.iobase +
541 				   TPM_INT_STATUS(chip->vendor.locality)),
542 				  chip->vendor.iobase +
543 				  TPM_INT_STATUS(chip->vendor.locality));
544 
545 			/* Turn on */
546 			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
547 				  chip->vendor.iobase +
548 				  TPM_INT_ENABLE(chip->vendor.locality));
549 
550 			/* Generate Interrupts */
551 			tpm_gen_interrupt(chip);
552 
553 			/* Turn off */
554 			iowrite32(intmask,
555 				  chip->vendor.iobase +
556 				  TPM_INT_ENABLE(chip->vendor.locality));
557 			free_irq(i, chip);
558 		}
559 	}
560 	if (chip->vendor.irq) {
561 		iowrite8(chip->vendor.irq,
562 			 chip->vendor.iobase +
563 			 TPM_INT_VECTOR(chip->vendor.locality));
564 		if (request_irq
565 		    (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
566 		     chip->vendor.miscdev.name, chip) != 0) {
567 			dev_info(chip->dev,
568 				 "Unable to request irq: %d for use\n",
569 				 chip->vendor.irq);
570 			chip->vendor.irq = 0;
571 		} else {
572 			/* Clear all existing */
573 			iowrite32(ioread32
574 				  (chip->vendor.iobase +
575 				   TPM_INT_STATUS(chip->vendor.locality)),
576 				  chip->vendor.iobase +
577 				  TPM_INT_STATUS(chip->vendor.locality));
578 
579 			/* Turn on */
580 			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
581 				  chip->vendor.iobase +
582 				  TPM_INT_ENABLE(chip->vendor.locality));
583 		}
584 	}
585 
586 	INIT_LIST_HEAD(&chip->vendor.list);
587 	spin_lock(&tis_lock);
588 	list_add(&chip->vendor.list, &tis_chips);
589 	spin_unlock(&tis_lock);
590 
591 	tpm_get_timeouts(chip);
592 	tpm_continue_selftest(chip);
593 
594 	return 0;
595 out_err:
596 	if (chip->vendor.iobase)
597 		iounmap(chip->vendor.iobase);
598 	tpm_remove_hardware(chip->dev);
599 	return rc;
600 }
601 
602 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
603 				      const struct pnp_device_id *pnp_id)
604 {
605 	resource_size_t start, len;
606 	unsigned int irq = 0;
607 
608 	start = pnp_mem_start(pnp_dev, 0);
609 	len = pnp_mem_len(pnp_dev, 0);
610 
611 	if (pnp_irq_valid(pnp_dev, 0))
612 		irq = pnp_irq(pnp_dev, 0);
613 	else
614 		interrupts = 0;
615 
616 	return tpm_tis_init(&pnp_dev->dev, start, len, irq);
617 }
618 
619 static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
620 {
621 	return tpm_pm_suspend(&dev->dev, msg);
622 }
623 
624 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
625 {
626 	return tpm_pm_resume(&dev->dev);
627 }
628 
629 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
630 	{"PNP0C31", 0},		/* TPM */
631 	{"ATM1200", 0},		/* Atmel */
632 	{"IFX0102", 0},		/* Infineon */
633 	{"BCM0101", 0},		/* Broadcom */
634 	{"BCM0102", 0},		/* Broadcom */
635 	{"NSC1200", 0},		/* National */
636 	{"ICO0102", 0},		/* Intel */
637 	/* Add new here */
638 	{"", 0},		/* User Specified */
639 	{"", 0}			/* Terminator */
640 };
641 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
642 
643 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
644 {
645 	struct tpm_chip *chip = pnp_get_drvdata(dev);
646 
647 	tpm_dev_vendor_release(chip);
648 
649 	kfree(chip);
650 }
651 
652 
653 static struct pnp_driver tis_pnp_driver = {
654 	.name = "tpm_tis",
655 	.id_table = tpm_pnp_tbl,
656 	.probe = tpm_tis_pnp_init,
657 	.suspend = tpm_tis_pnp_suspend,
658 	.resume = tpm_tis_pnp_resume,
659 	.remove = tpm_tis_pnp_remove,
660 };
661 
662 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
663 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
664 		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
665 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
666 
667 static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
668 {
669 	return tpm_pm_suspend(&dev->dev, msg);
670 }
671 
672 static int tpm_tis_resume(struct platform_device *dev)
673 {
674 	return tpm_pm_resume(&dev->dev);
675 }
676 static struct platform_driver tis_drv = {
677 	.driver = {
678 		.name = "tpm_tis",
679 		.owner		= THIS_MODULE,
680 	},
681 	.suspend = tpm_tis_suspend,
682 	.resume = tpm_tis_resume,
683 };
684 
685 static struct platform_device *pdev;
686 
687 static int force;
688 module_param(force, bool, 0444);
689 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
690 static int __init init_tis(void)
691 {
692 	int rc;
693 
694 	if (force) {
695 		rc = platform_driver_register(&tis_drv);
696 		if (rc < 0)
697 			return rc;
698 		if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
699 			return PTR_ERR(pdev);
700 		if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
701 			platform_device_unregister(pdev);
702 			platform_driver_unregister(&tis_drv);
703 		}
704 		return rc;
705 	}
706 
707 	return pnp_register_driver(&tis_pnp_driver);
708 }
709 
710 static void __exit cleanup_tis(void)
711 {
712 	struct tpm_vendor_specific *i, *j;
713 	struct tpm_chip *chip;
714 	spin_lock(&tis_lock);
715 	list_for_each_entry_safe(i, j, &tis_chips, list) {
716 		chip = to_tpm_chip(i);
717 		tpm_remove_hardware(chip->dev);
718 		iowrite32(~TPM_GLOBAL_INT_ENABLE &
719 			  ioread32(chip->vendor.iobase +
720 				   TPM_INT_ENABLE(chip->vendor.
721 						  locality)),
722 			  chip->vendor.iobase +
723 			  TPM_INT_ENABLE(chip->vendor.locality));
724 		release_locality(chip, chip->vendor.locality, 1);
725 		if (chip->vendor.irq)
726 			free_irq(chip->vendor.irq, chip);
727 		iounmap(i->iobase);
728 		list_del(&i->list);
729 	}
730 	spin_unlock(&tis_lock);
731 
732 	if (force) {
733 		platform_device_unregister(pdev);
734 		platform_driver_unregister(&tis_drv);
735 	} else
736 		pnp_unregister_driver(&tis_pnp_driver);
737 }
738 
739 module_init(init_tis);
740 module_exit(cleanup_tis);
741 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
742 MODULE_DESCRIPTION("TPM Driver");
743 MODULE_VERSION("2.0");
744 MODULE_LICENSE("GPL");
745