xref: /openbmc/linux/drivers/char/tpm/tpm_ibmvtpm.c (revision d2574c33)
1 /*
2  * Copyright (C) 2012 IBM Corporation
3  *
4  * Author: Ashley Lai <ashleydlai@gmail.com>
5  *
6  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7  *
8  * Device driver for TCG/TCPA TPM (trusted platform module).
9  * Specifications at www.trustedcomputinggroup.org
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation, version 2 of the
14  * License.
15  *
16  */
17 
18 #include <linux/dma-mapping.h>
19 #include <linux/dmapool.h>
20 #include <linux/slab.h>
21 #include <asm/vio.h>
22 #include <asm/irq.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <asm/prom.h>
29 
30 #include "tpm.h"
31 #include "tpm_ibmvtpm.h"
32 
33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34 
35 static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
36 	{ "IBM,vtpm", "IBM,vtpm"},
37 	{ "", "" }
38 };
39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40 
41 /**
42  * ibmvtpm_send_crq_word() - Send a CRQ request
43  * @vdev:	vio device struct
44  * @w1:		pre-constructed first word of tpm crq (second word is reserved)
45  *
46  * Return:
47  *	0 - Success
48  *	Non-zero - Failure
49  */
50 static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
51 {
52 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
53 }
54 
55 /**
56  * ibmvtpm_send_crq() - Send a CRQ request
57  *
58  * @vdev:	vio device struct
59  * @valid:	Valid field
60  * @msg:	Type field
61  * @len:	Length field
62  * @data:	Data field
63  *
64  * The ibmvtpm crq is defined as follows:
65  *
66  * Byte  |   0   |   1   |   2   |   3   |   4   |   5   |   6   |   7
67  * -----------------------------------------------------------------------
68  * Word0 | Valid | Type  |     Length    |              Data
69  * -----------------------------------------------------------------------
70  * Word1 |                Reserved
71  * -----------------------------------------------------------------------
72  *
73  * Which matches the following structure (on bigendian host):
74  *
75  * struct ibmvtpm_crq {
76  *         u8 valid;
77  *         u8 msg;
78  *         __be16 len;
79  *         __be32 data;
80  *         __be64 reserved;
81  * } __attribute__((packed, aligned(8)));
82  *
83  * However, the value is passed in a register so just compute the numeric value
84  * to load into the register avoiding byteswap altogether. Endian only affects
85  * memory loads and stores - registers are internally represented the same.
86  *
87  * Return:
88  *	0 (H_SUCCESS) - Success
89  *	Non-zero - Failure
90  */
91 static int ibmvtpm_send_crq(struct vio_dev *vdev,
92 		u8 valid, u8 msg, u16 len, u32 data)
93 {
94 	u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
95 		(u64)data;
96 	return ibmvtpm_send_crq_word(vdev, w1);
97 }
98 
99 /**
100  * tpm_ibmvtpm_recv - Receive data after send
101  *
102  * @chip:	tpm chip struct
103  * @buf:	buffer to read
104  * @count:	size of buffer
105  *
106  * Return:
107  *	Number of bytes read
108  */
109 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
110 {
111 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
112 	u16 len;
113 	int sig;
114 
115 	if (!ibmvtpm->rtce_buf) {
116 		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
117 		return 0;
118 	}
119 
120 	sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
121 	if (sig)
122 		return -EINTR;
123 
124 	len = ibmvtpm->res_len;
125 
126 	if (count < len) {
127 		dev_err(ibmvtpm->dev,
128 			"Invalid size in recv: count=%zd, crq_size=%d\n",
129 			count, len);
130 		return -EIO;
131 	}
132 
133 	spin_lock(&ibmvtpm->rtce_lock);
134 	memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
135 	memset(ibmvtpm->rtce_buf, 0, len);
136 	ibmvtpm->res_len = 0;
137 	spin_unlock(&ibmvtpm->rtce_lock);
138 	return len;
139 }
140 
141 /**
142  * tpm_ibmvtpm_send() - Send a TPM command
143  * @chip:	tpm chip struct
144  * @buf:	buffer contains data to send
145  * @count:	size of buffer
146  *
147  * Return:
148  *   0 on success,
149  *   -errno on error
150  */
151 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
152 {
153 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
154 	int rc, sig;
155 
156 	if (!ibmvtpm->rtce_buf) {
157 		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
158 		return 0;
159 	}
160 
161 	if (count > ibmvtpm->rtce_size) {
162 		dev_err(ibmvtpm->dev,
163 			"Invalid size in send: count=%zd, rtce_size=%d\n",
164 			count, ibmvtpm->rtce_size);
165 		return -EIO;
166 	}
167 
168 	if (ibmvtpm->tpm_processing_cmd) {
169 		dev_info(ibmvtpm->dev,
170 		         "Need to wait for TPM to finish\n");
171 		/* wait for previous command to finish */
172 		sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
173 		if (sig)
174 			return -EINTR;
175 	}
176 
177 	spin_lock(&ibmvtpm->rtce_lock);
178 	ibmvtpm->res_len = 0;
179 	memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
180 
181 	/*
182 	 * set the processing flag before the Hcall, since we may get the
183 	 * result (interrupt) before even being able to check rc.
184 	 */
185 	ibmvtpm->tpm_processing_cmd = true;
186 
187 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
188 			IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
189 			count, ibmvtpm->rtce_dma_handle);
190 	if (rc != H_SUCCESS) {
191 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
192 		rc = 0;
193 		ibmvtpm->tpm_processing_cmd = false;
194 	} else
195 		rc = 0;
196 
197 	spin_unlock(&ibmvtpm->rtce_lock);
198 	return rc;
199 }
200 
201 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
202 {
203 	return;
204 }
205 
206 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
207 {
208 	return 0;
209 }
210 
211 /**
212  * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
213  *
214  * @ibmvtpm:	vtpm device struct
215  *
216  * Return:
217  *	0 on success.
218  *	Non-zero on failure.
219  */
220 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
221 {
222 	int rc;
223 
224 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
225 			IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
226 	if (rc != H_SUCCESS)
227 		dev_err(ibmvtpm->dev,
228 			"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
229 
230 	return rc;
231 }
232 
233 /**
234  * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
235  *			   - Note that this is vtpm version and not tpm version
236  *
237  * @ibmvtpm:	vtpm device struct
238  *
239  * Return:
240  *	0 on success.
241  *	Non-zero on failure.
242  */
243 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
244 {
245 	int rc;
246 
247 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
248 			IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
249 	if (rc != H_SUCCESS)
250 		dev_err(ibmvtpm->dev,
251 			"ibmvtpm_crq_get_version failed rc=%d\n", rc);
252 
253 	return rc;
254 }
255 
256 /**
257  * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
258  * @ibmvtpm:	vtpm device struct
259  *
260  * Return:
261  *	0 on success.
262  *	Non-zero on failure.
263  */
264 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
265 {
266 	int rc;
267 
268 	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
269 	if (rc != H_SUCCESS)
270 		dev_err(ibmvtpm->dev,
271 			"ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
272 
273 	return rc;
274 }
275 
276 /**
277  * ibmvtpm_crq_send_init - Send a CRQ initialize message
278  * @ibmvtpm:	vtpm device struct
279  *
280  * Return:
281  *	0 on success.
282  *	Non-zero on failure.
283  */
284 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
285 {
286 	int rc;
287 
288 	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
289 	if (rc != H_SUCCESS)
290 		dev_err(ibmvtpm->dev,
291 			"ibmvtpm_crq_send_init failed rc=%d\n", rc);
292 
293 	return rc;
294 }
295 
296 /**
297  * tpm_ibmvtpm_remove - ibm vtpm remove entry point
298  * @vdev:	vio device struct
299  *
300  * Return: Always 0.
301  */
302 static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
303 {
304 	struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
305 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
306 	int rc = 0;
307 
308 	tpm_chip_unregister(chip);
309 
310 	free_irq(vdev->irq, ibmvtpm);
311 
312 	do {
313 		if (rc)
314 			msleep(100);
315 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
316 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
317 
318 	dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
319 			 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
320 	free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
321 
322 	if (ibmvtpm->rtce_buf) {
323 		dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
324 				 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
325 		kfree(ibmvtpm->rtce_buf);
326 	}
327 
328 	kfree(ibmvtpm);
329 	/* For tpm_ibmvtpm_get_desired_dma */
330 	dev_set_drvdata(&vdev->dev, NULL);
331 
332 	return 0;
333 }
334 
335 /**
336  * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
337  * @vdev:	vio device struct
338  *
339  * Return:
340  *	Number of bytes the driver needs to DMA map.
341  */
342 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
343 {
344 	struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
345 	struct ibmvtpm_dev *ibmvtpm;
346 
347 	/*
348 	 * ibmvtpm initializes at probe time, so the data we are
349 	 * asking for may not be set yet. Estimate that 4K required
350 	 * for TCE-mapped buffer in addition to CRQ.
351 	 */
352 	if (chip)
353 		ibmvtpm = dev_get_drvdata(&chip->dev);
354 	else
355 		return CRQ_RES_BUF_SIZE + PAGE_SIZE;
356 
357 	return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
358 }
359 
360 /**
361  * tpm_ibmvtpm_suspend - Suspend
362  * @dev:	device struct
363  *
364  * Return: Always 0.
365  */
366 static int tpm_ibmvtpm_suspend(struct device *dev)
367 {
368 	struct tpm_chip *chip = dev_get_drvdata(dev);
369 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
370 	int rc = 0;
371 
372 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
373 			IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
374 	if (rc != H_SUCCESS)
375 		dev_err(ibmvtpm->dev,
376 			"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
377 
378 	return rc;
379 }
380 
381 /**
382  * ibmvtpm_reset_crq - Reset CRQ
383  *
384  * @ibmvtpm:	ibm vtpm struct
385  *
386  * Return:
387  *	0 on success.
388  *	Non-zero on failure.
389  */
390 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
391 {
392 	int rc = 0;
393 
394 	do {
395 		if (rc)
396 			msleep(100);
397 		rc = plpar_hcall_norets(H_FREE_CRQ,
398 					ibmvtpm->vdev->unit_address);
399 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
400 
401 	memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
402 	ibmvtpm->crq_queue.index = 0;
403 
404 	return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
405 				  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
406 }
407 
408 /**
409  * tpm_ibmvtpm_resume - Resume from suspend
410  *
411  * @dev:	device struct
412  *
413  * Return: Always 0.
414  */
415 static int tpm_ibmvtpm_resume(struct device *dev)
416 {
417 	struct tpm_chip *chip = dev_get_drvdata(dev);
418 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
419 	int rc = 0;
420 
421 	do {
422 		if (rc)
423 			msleep(100);
424 		rc = plpar_hcall_norets(H_ENABLE_CRQ,
425 					ibmvtpm->vdev->unit_address);
426 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
427 
428 	if (rc) {
429 		dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
430 		return rc;
431 	}
432 
433 	rc = vio_enable_interrupts(ibmvtpm->vdev);
434 	if (rc) {
435 		dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
436 		return rc;
437 	}
438 
439 	rc = ibmvtpm_crq_send_init(ibmvtpm);
440 	if (rc)
441 		dev_err(dev, "Error send_init rc=%d\n", rc);
442 
443 	return rc;
444 }
445 
446 static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
447 {
448 	return (status == 0);
449 }
450 
451 static const struct tpm_class_ops tpm_ibmvtpm = {
452 	.recv = tpm_ibmvtpm_recv,
453 	.send = tpm_ibmvtpm_send,
454 	.cancel = tpm_ibmvtpm_cancel,
455 	.status = tpm_ibmvtpm_status,
456 	.req_complete_mask = 0,
457 	.req_complete_val = 0,
458 	.req_canceled = tpm_ibmvtpm_req_canceled,
459 };
460 
461 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
462 	.suspend = tpm_ibmvtpm_suspend,
463 	.resume = tpm_ibmvtpm_resume,
464 };
465 
466 /**
467  * ibmvtpm_crq_get_next - Get next responded crq
468  *
469  * @ibmvtpm:	vtpm device struct
470  *
471  * Return: vtpm crq pointer or NULL.
472  */
473 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
474 {
475 	struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
476 	struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
477 
478 	if (crq->valid & VTPM_MSG_RES) {
479 		if (++crq_q->index == crq_q->num_entry)
480 			crq_q->index = 0;
481 		smp_rmb();
482 	} else
483 		crq = NULL;
484 	return crq;
485 }
486 
487 /**
488  * ibmvtpm_crq_process - Process responded crq
489  *
490  * @crq:	crq to be processed
491  * @ibmvtpm:	vtpm device struct
492  *
493  */
494 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
495 				struct ibmvtpm_dev *ibmvtpm)
496 {
497 	int rc = 0;
498 
499 	switch (crq->valid) {
500 	case VALID_INIT_CRQ:
501 		switch (crq->msg) {
502 		case INIT_CRQ_RES:
503 			dev_info(ibmvtpm->dev, "CRQ initialized\n");
504 			rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
505 			if (rc)
506 				dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
507 			return;
508 		case INIT_CRQ_COMP_RES:
509 			dev_info(ibmvtpm->dev,
510 				 "CRQ initialization completed\n");
511 			return;
512 		default:
513 			dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
514 			return;
515 		}
516 	case IBMVTPM_VALID_CMD:
517 		switch (crq->msg) {
518 		case VTPM_GET_RTCE_BUFFER_SIZE_RES:
519 			if (be16_to_cpu(crq->len) <= 0) {
520 				dev_err(ibmvtpm->dev, "Invalid rtce size\n");
521 				return;
522 			}
523 			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
524 			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
525 						    GFP_ATOMIC);
526 			if (!ibmvtpm->rtce_buf) {
527 				dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
528 				return;
529 			}
530 
531 			ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
532 				ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
533 				DMA_BIDIRECTIONAL);
534 
535 			if (dma_mapping_error(ibmvtpm->dev,
536 					      ibmvtpm->rtce_dma_handle)) {
537 				kfree(ibmvtpm->rtce_buf);
538 				ibmvtpm->rtce_buf = NULL;
539 				dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
540 			}
541 
542 			return;
543 		case VTPM_GET_VERSION_RES:
544 			ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
545 			return;
546 		case VTPM_TPM_COMMAND_RES:
547 			/* len of the data in rtce buffer */
548 			ibmvtpm->res_len = be16_to_cpu(crq->len);
549 			ibmvtpm->tpm_processing_cmd = false;
550 			wake_up_interruptible(&ibmvtpm->wq);
551 			return;
552 		default:
553 			return;
554 		}
555 	}
556 	return;
557 }
558 
559 /**
560  * ibmvtpm_interrupt -	Interrupt handler
561  *
562  * @irq:		irq number to handle
563  * @vtpm_instance:	vtpm that received interrupt
564  *
565  * Returns:
566  *	IRQ_HANDLED
567  **/
568 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
569 {
570 	struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
571 	struct ibmvtpm_crq *crq;
572 
573 	/* while loop is needed for initial setup (get version and
574 	 * get rtce_size). There should be only one tpm request at any
575 	 * given time.
576 	 */
577 	while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
578 		ibmvtpm_crq_process(crq, ibmvtpm);
579 		crq->valid = 0;
580 		smp_wmb();
581 	}
582 
583 	return IRQ_HANDLED;
584 }
585 
586 /**
587  * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
588  *
589  * @vio_dev:	vio device struct
590  * @id:		vio device id struct
591  *
592  * Return:
593  *	0 on success.
594  *	Non-zero on failure.
595  */
596 static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
597 				   const struct vio_device_id *id)
598 {
599 	struct ibmvtpm_dev *ibmvtpm;
600 	struct device *dev = &vio_dev->dev;
601 	struct ibmvtpm_crq_queue *crq_q;
602 	struct tpm_chip *chip;
603 	int rc = -ENOMEM, rc1;
604 
605 	chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
606 	if (IS_ERR(chip))
607 		return PTR_ERR(chip);
608 
609 	ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
610 	if (!ibmvtpm) {
611 		dev_err(dev, "kzalloc for ibmvtpm failed\n");
612 		goto cleanup;
613 	}
614 
615 	ibmvtpm->dev = dev;
616 	ibmvtpm->vdev = vio_dev;
617 
618 	crq_q = &ibmvtpm->crq_queue;
619 	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
620 	if (!crq_q->crq_addr) {
621 		dev_err(dev, "Unable to allocate memory for crq_addr\n");
622 		goto cleanup;
623 	}
624 
625 	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
626 	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
627 						 CRQ_RES_BUF_SIZE,
628 						 DMA_BIDIRECTIONAL);
629 
630 	if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
631 		dev_err(dev, "dma mapping failed\n");
632 		goto cleanup;
633 	}
634 
635 	rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
636 				ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
637 	if (rc == H_RESOURCE)
638 		rc = ibmvtpm_reset_crq(ibmvtpm);
639 
640 	if (rc) {
641 		dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
642 		goto reg_crq_cleanup;
643 	}
644 
645 	rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
646 			 tpm_ibmvtpm_driver_name, ibmvtpm);
647 	if (rc) {
648 		dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
649 		goto init_irq_cleanup;
650 	}
651 
652 	rc = vio_enable_interrupts(vio_dev);
653 	if (rc) {
654 		dev_err(dev, "Error %d enabling interrupts\n", rc);
655 		goto init_irq_cleanup;
656 	}
657 
658 	init_waitqueue_head(&ibmvtpm->wq);
659 
660 	crq_q->index = 0;
661 
662 	dev_set_drvdata(&chip->dev, ibmvtpm);
663 
664 	spin_lock_init(&ibmvtpm->rtce_lock);
665 
666 	rc = ibmvtpm_crq_send_init(ibmvtpm);
667 	if (rc)
668 		goto init_irq_cleanup;
669 
670 	rc = ibmvtpm_crq_get_version(ibmvtpm);
671 	if (rc)
672 		goto init_irq_cleanup;
673 
674 	rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
675 	if (rc)
676 		goto init_irq_cleanup;
677 
678 	return tpm_chip_register(chip);
679 init_irq_cleanup:
680 	do {
681 		rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
682 	} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
683 reg_crq_cleanup:
684 	dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
685 			 DMA_BIDIRECTIONAL);
686 cleanup:
687 	if (ibmvtpm) {
688 		if (crq_q->crq_addr)
689 			free_page((unsigned long)crq_q->crq_addr);
690 		kfree(ibmvtpm);
691 	}
692 
693 	return rc;
694 }
695 
696 static struct vio_driver ibmvtpm_driver = {
697 	.id_table	 = tpm_ibmvtpm_device_table,
698 	.probe		 = tpm_ibmvtpm_probe,
699 	.remove		 = tpm_ibmvtpm_remove,
700 	.get_desired_dma = tpm_ibmvtpm_get_desired_dma,
701 	.name		 = tpm_ibmvtpm_driver_name,
702 	.pm		 = &tpm_ibmvtpm_pm_ops,
703 };
704 
705 /**
706  * ibmvtpm_module_init - Initialize ibm vtpm module.
707  *
708  *
709  * Return:
710  *	0 on success.
711  *	Non-zero on failure.
712  */
713 static int __init ibmvtpm_module_init(void)
714 {
715 	return vio_register_driver(&ibmvtpm_driver);
716 }
717 
718 /**
719  * ibmvtpm_module_exit - Tear down ibm vtpm module.
720  */
721 static void __exit ibmvtpm_module_exit(void)
722 {
723 	vio_unregister_driver(&ibmvtpm_driver);
724 }
725 
726 module_init(ibmvtpm_module_init);
727 module_exit(ibmvtpm_module_exit);
728 
729 MODULE_AUTHOR("adlai@us.ibm.com");
730 MODULE_DESCRIPTION("IBM vTPM Driver");
731 MODULE_VERSION("1.0");
732 MODULE_LICENSE("GPL");
733