xref: /openbmc/linux/drivers/char/tpm/tpm_ibmvtpm.c (revision a06c488d)
1 /*
2  * Copyright (C) 2012 IBM Corporation
3  *
4  * Author: Ashley Lai <ashleydlai@gmail.com>
5  *
6  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7  *
8  * Device driver for TCG/TCPA TPM (trusted platform module).
9  * Specifications at www.trustedcomputinggroup.org
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation, version 2 of the
14  * License.
15  *
16  */
17 
18 #include <linux/dma-mapping.h>
19 #include <linux/dmapool.h>
20 #include <linux/slab.h>
21 #include <asm/vio.h>
22 #include <asm/irq.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <asm/prom.h>
29 
30 #include "tpm.h"
31 #include "tpm_ibmvtpm.h"
32 
33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34 
35 static struct vio_device_id tpm_ibmvtpm_device_table[] = {
36 	{ "IBM,vtpm", "IBM,vtpm"},
37 	{ "", "" }
38 };
39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40 
41 /**
42  * ibmvtpm_send_crq - Send a CRQ request
43  * @vdev:	vio device struct
44  * @w1:		first word
45  * @w2:		second word
46  *
47  * Return value:
48  *	0 -Sucess
49  *	Non-zero - Failure
50  */
51 static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
52 {
53 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
54 }
55 
56 /**
57  * ibmvtpm_get_data - Retrieve ibm vtpm data
58  * @dev:	device struct
59  *
60  * Return value:
61  *	vtpm device struct
62  */
63 static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
64 {
65 	struct tpm_chip *chip = dev_get_drvdata(dev);
66 	if (chip)
67 		return (struct ibmvtpm_dev *)TPM_VPRIV(chip);
68 	return NULL;
69 }
70 
71 /**
72  * tpm_ibmvtpm_recv - Receive data after send
73  * @chip:	tpm chip struct
74  * @buf:	buffer to read
75  * count:	size of buffer
76  *
77  * Return value:
78  *	Number of bytes read
79  */
80 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
81 {
82 	struct ibmvtpm_dev *ibmvtpm;
83 	u16 len;
84 	int sig;
85 
86 	ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
87 
88 	if (!ibmvtpm->rtce_buf) {
89 		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
90 		return 0;
91 	}
92 
93 	sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
94 	if (sig)
95 		return -EINTR;
96 
97 	len = ibmvtpm->res_len;
98 
99 	if (count < len) {
100 		dev_err(ibmvtpm->dev,
101 			"Invalid size in recv: count=%zd, crq_size=%d\n",
102 			count, len);
103 		return -EIO;
104 	}
105 
106 	spin_lock(&ibmvtpm->rtce_lock);
107 	memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
108 	memset(ibmvtpm->rtce_buf, 0, len);
109 	ibmvtpm->res_len = 0;
110 	spin_unlock(&ibmvtpm->rtce_lock);
111 	return len;
112 }
113 
114 /**
115  * tpm_ibmvtpm_send - Send tpm request
116  * @chip:	tpm chip struct
117  * @buf:	buffer contains data to send
118  * count:	size of buffer
119  *
120  * Return value:
121  *	Number of bytes sent
122  */
123 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124 {
125 	struct ibmvtpm_dev *ibmvtpm;
126 	struct ibmvtpm_crq crq;
127 	__be64 *word = (__be64 *)&crq;
128 	int rc, sig;
129 
130 	ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
131 
132 	if (!ibmvtpm->rtce_buf) {
133 		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
134 		return 0;
135 	}
136 
137 	if (count > ibmvtpm->rtce_size) {
138 		dev_err(ibmvtpm->dev,
139 			"Invalid size in send: count=%zd, rtce_size=%d\n",
140 			count, ibmvtpm->rtce_size);
141 		return -EIO;
142 	}
143 
144 	if (ibmvtpm->tpm_processing_cmd) {
145 		dev_info(ibmvtpm->dev,
146 		         "Need to wait for TPM to finish\n");
147 		/* wait for previous command to finish */
148 		sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
149 		if (sig)
150 			return -EINTR;
151 	}
152 
153 	spin_lock(&ibmvtpm->rtce_lock);
154 	ibmvtpm->res_len = 0;
155 	memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
156 	crq.valid = (u8)IBMVTPM_VALID_CMD;
157 	crq.msg = (u8)VTPM_TPM_COMMAND;
158 	crq.len = cpu_to_be16(count);
159 	crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
160 
161 	/*
162 	 * set the processing flag before the Hcall, since we may get the
163 	 * result (interrupt) before even being able to check rc.
164 	 */
165 	ibmvtpm->tpm_processing_cmd = true;
166 
167 	rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
168 			      be64_to_cpu(word[1]));
169 	if (rc != H_SUCCESS) {
170 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
171 		rc = 0;
172 		ibmvtpm->tpm_processing_cmd = false;
173 	} else
174 		rc = count;
175 
176 	spin_unlock(&ibmvtpm->rtce_lock);
177 	return rc;
178 }
179 
180 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
181 {
182 	return;
183 }
184 
185 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
186 {
187 	return 0;
188 }
189 
190 /**
191  * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
192  * @ibmvtpm:	vtpm device struct
193  *
194  * Return value:
195  *	0 - Success
196  *	Non-zero - Failure
197  */
198 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
199 {
200 	struct ibmvtpm_crq crq;
201 	u64 *buf = (u64 *) &crq;
202 	int rc;
203 
204 	crq.valid = (u8)IBMVTPM_VALID_CMD;
205 	crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
206 
207 	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
208 			      cpu_to_be64(buf[1]));
209 	if (rc != H_SUCCESS)
210 		dev_err(ibmvtpm->dev,
211 			"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
212 
213 	return rc;
214 }
215 
216 /**
217  * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
218  *			   - Note that this is vtpm version and not tpm version
219  * @ibmvtpm:	vtpm device struct
220  *
221  * Return value:
222  *	0 - Success
223  *	Non-zero - Failure
224  */
225 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
226 {
227 	struct ibmvtpm_crq crq;
228 	u64 *buf = (u64 *) &crq;
229 	int rc;
230 
231 	crq.valid = (u8)IBMVTPM_VALID_CMD;
232 	crq.msg = (u8)VTPM_GET_VERSION;
233 
234 	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
235 			      cpu_to_be64(buf[1]));
236 	if (rc != H_SUCCESS)
237 		dev_err(ibmvtpm->dev,
238 			"ibmvtpm_crq_get_version failed rc=%d\n", rc);
239 
240 	return rc;
241 }
242 
243 /**
244  * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
245  * @ibmvtpm:	vtpm device struct
246  *
247  * Return value:
248  *	0 - Success
249  *	Non-zero - Failure
250  */
251 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
252 {
253 	int rc;
254 
255 	rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
256 	if (rc != H_SUCCESS)
257 		dev_err(ibmvtpm->dev,
258 			"ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
259 
260 	return rc;
261 }
262 
263 /**
264  * ibmvtpm_crq_send_init - Send a CRQ initialize message
265  * @ibmvtpm:	vtpm device struct
266  *
267  * Return value:
268  *	0 - Success
269  *	Non-zero - Failure
270  */
271 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
272 {
273 	int rc;
274 
275 	rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
276 	if (rc != H_SUCCESS)
277 		dev_err(ibmvtpm->dev,
278 			"ibmvtpm_crq_send_init failed rc=%d\n", rc);
279 
280 	return rc;
281 }
282 
283 /**
284  * tpm_ibmvtpm_remove - ibm vtpm remove entry point
285  * @vdev:	vio device struct
286  *
287  * Return value:
288  *	0
289  */
290 static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
291 {
292 	struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
293 	struct tpm_chip *chip = dev_get_drvdata(ibmvtpm->dev);
294 	int rc = 0;
295 
296 	tpm_chip_unregister(chip);
297 
298 	free_irq(vdev->irq, ibmvtpm);
299 
300 	do {
301 		if (rc)
302 			msleep(100);
303 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
304 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
305 
306 	dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
307 			 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
308 	free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
309 
310 	if (ibmvtpm->rtce_buf) {
311 		dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
312 				 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
313 		kfree(ibmvtpm->rtce_buf);
314 	}
315 
316 	kfree(ibmvtpm);
317 
318 	return 0;
319 }
320 
321 /**
322  * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
323  * @vdev:	vio device struct
324  *
325  * Return value:
326  *	Number of bytes the driver needs to DMA map
327  */
328 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
329 {
330 	struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
331 
332 	/* ibmvtpm initializes at probe time, so the data we are
333 	* asking for may not be set yet. Estimate that 4K required
334 	* for TCE-mapped buffer in addition to CRQ.
335 	*/
336 	if (!ibmvtpm)
337 		return CRQ_RES_BUF_SIZE + PAGE_SIZE;
338 
339 	return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
340 }
341 
342 /**
343  * tpm_ibmvtpm_suspend - Suspend
344  * @dev:	device struct
345  *
346  * Return value:
347  *	0
348  */
349 static int tpm_ibmvtpm_suspend(struct device *dev)
350 {
351 	struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
352 	struct ibmvtpm_crq crq;
353 	u64 *buf = (u64 *) &crq;
354 	int rc = 0;
355 
356 	crq.valid = (u8)IBMVTPM_VALID_CMD;
357 	crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
358 
359 	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
360 			      cpu_to_be64(buf[1]));
361 	if (rc != H_SUCCESS)
362 		dev_err(ibmvtpm->dev,
363 			"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
364 
365 	return rc;
366 }
367 
368 /**
369  * ibmvtpm_reset_crq - Reset CRQ
370  * @ibmvtpm:	ibm vtpm struct
371  *
372  * Return value:
373  *	0 - Success
374  *	Non-zero - Failure
375  */
376 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
377 {
378 	int rc = 0;
379 
380 	do {
381 		if (rc)
382 			msleep(100);
383 		rc = plpar_hcall_norets(H_FREE_CRQ,
384 					ibmvtpm->vdev->unit_address);
385 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
386 
387 	memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
388 	ibmvtpm->crq_queue.index = 0;
389 
390 	return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
391 				  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
392 }
393 
394 /**
395  * tpm_ibmvtpm_resume - Resume from suspend
396  * @dev:	device struct
397  *
398  * Return value:
399  *	0
400  */
401 static int tpm_ibmvtpm_resume(struct device *dev)
402 {
403 	struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
404 	int rc = 0;
405 
406 	do {
407 		if (rc)
408 			msleep(100);
409 		rc = plpar_hcall_norets(H_ENABLE_CRQ,
410 					ibmvtpm->vdev->unit_address);
411 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
412 
413 	if (rc) {
414 		dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
415 		return rc;
416 	}
417 
418 	rc = vio_enable_interrupts(ibmvtpm->vdev);
419 	if (rc) {
420 		dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
421 		return rc;
422 	}
423 
424 	rc = ibmvtpm_crq_send_init(ibmvtpm);
425 	if (rc)
426 		dev_err(dev, "Error send_init rc=%d\n", rc);
427 
428 	return rc;
429 }
430 
431 static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
432 {
433 	return (status == 0);
434 }
435 
436 static const struct tpm_class_ops tpm_ibmvtpm = {
437 	.recv = tpm_ibmvtpm_recv,
438 	.send = tpm_ibmvtpm_send,
439 	.cancel = tpm_ibmvtpm_cancel,
440 	.status = tpm_ibmvtpm_status,
441 	.req_complete_mask = 0,
442 	.req_complete_val = 0,
443 	.req_canceled = tpm_ibmvtpm_req_canceled,
444 };
445 
446 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
447 	.suspend = tpm_ibmvtpm_suspend,
448 	.resume = tpm_ibmvtpm_resume,
449 };
450 
451 /**
452  * ibmvtpm_crq_get_next - Get next responded crq
453  * @ibmvtpm	vtpm device struct
454  *
455  * Return value:
456  *	vtpm crq pointer
457  */
458 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
459 {
460 	struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
461 	struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
462 
463 	if (crq->valid & VTPM_MSG_RES) {
464 		if (++crq_q->index == crq_q->num_entry)
465 			crq_q->index = 0;
466 		smp_rmb();
467 	} else
468 		crq = NULL;
469 	return crq;
470 }
471 
472 /**
473  * ibmvtpm_crq_process - Process responded crq
474  * @crq		crq to be processed
475  * @ibmvtpm	vtpm device struct
476  *
477  * Return value:
478  *	Nothing
479  */
480 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
481 				struct ibmvtpm_dev *ibmvtpm)
482 {
483 	int rc = 0;
484 
485 	switch (crq->valid) {
486 	case VALID_INIT_CRQ:
487 		switch (crq->msg) {
488 		case INIT_CRQ_RES:
489 			dev_info(ibmvtpm->dev, "CRQ initialized\n");
490 			rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
491 			if (rc)
492 				dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
493 			return;
494 		case INIT_CRQ_COMP_RES:
495 			dev_info(ibmvtpm->dev,
496 				 "CRQ initialization completed\n");
497 			return;
498 		default:
499 			dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
500 			return;
501 		}
502 	case IBMVTPM_VALID_CMD:
503 		switch (crq->msg) {
504 		case VTPM_GET_RTCE_BUFFER_SIZE_RES:
505 			if (be16_to_cpu(crq->len) <= 0) {
506 				dev_err(ibmvtpm->dev, "Invalid rtce size\n");
507 				return;
508 			}
509 			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
510 			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
511 						    GFP_ATOMIC);
512 			if (!ibmvtpm->rtce_buf) {
513 				dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
514 				return;
515 			}
516 
517 			ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
518 				ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
519 				DMA_BIDIRECTIONAL);
520 
521 			if (dma_mapping_error(ibmvtpm->dev,
522 					      ibmvtpm->rtce_dma_handle)) {
523 				kfree(ibmvtpm->rtce_buf);
524 				ibmvtpm->rtce_buf = NULL;
525 				dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
526 			}
527 
528 			return;
529 		case VTPM_GET_VERSION_RES:
530 			ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
531 			return;
532 		case VTPM_TPM_COMMAND_RES:
533 			/* len of the data in rtce buffer */
534 			ibmvtpm->res_len = be16_to_cpu(crq->len);
535 			ibmvtpm->tpm_processing_cmd = false;
536 			wake_up_interruptible(&ibmvtpm->wq);
537 			return;
538 		default:
539 			return;
540 		}
541 	}
542 	return;
543 }
544 
545 /**
546  * ibmvtpm_interrupt -	Interrupt handler
547  * @irq:		irq number to handle
548  * @vtpm_instance:	vtpm that received interrupt
549  *
550  * Returns:
551  *	IRQ_HANDLED
552  **/
553 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
554 {
555 	struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
556 	struct ibmvtpm_crq *crq;
557 
558 	/* while loop is needed for initial setup (get version and
559 	 * get rtce_size). There should be only one tpm request at any
560 	 * given time.
561 	 */
562 	while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
563 		ibmvtpm_crq_process(crq, ibmvtpm);
564 		crq->valid = 0;
565 		smp_wmb();
566 	}
567 
568 	return IRQ_HANDLED;
569 }
570 
571 /**
572  * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
573  * @vio_dev:	vio device struct
574  * @id:		vio device id struct
575  *
576  * Return value:
577  *	0 - Success
578  *	Non-zero - Failure
579  */
580 static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
581 				   const struct vio_device_id *id)
582 {
583 	struct ibmvtpm_dev *ibmvtpm;
584 	struct device *dev = &vio_dev->dev;
585 	struct ibmvtpm_crq_queue *crq_q;
586 	struct tpm_chip *chip;
587 	int rc = -ENOMEM, rc1;
588 
589 	chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
590 	if (IS_ERR(chip))
591 		return PTR_ERR(chip);
592 
593 	ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
594 	if (!ibmvtpm) {
595 		dev_err(dev, "kzalloc for ibmvtpm failed\n");
596 		goto cleanup;
597 	}
598 
599 	ibmvtpm->dev = dev;
600 	ibmvtpm->vdev = vio_dev;
601 
602 	crq_q = &ibmvtpm->crq_queue;
603 	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
604 	if (!crq_q->crq_addr) {
605 		dev_err(dev, "Unable to allocate memory for crq_addr\n");
606 		goto cleanup;
607 	}
608 
609 	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
610 	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
611 						 CRQ_RES_BUF_SIZE,
612 						 DMA_BIDIRECTIONAL);
613 
614 	if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
615 		dev_err(dev, "dma mapping failed\n");
616 		goto cleanup;
617 	}
618 
619 	rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
620 				ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
621 	if (rc == H_RESOURCE)
622 		rc = ibmvtpm_reset_crq(ibmvtpm);
623 
624 	if (rc) {
625 		dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
626 		goto reg_crq_cleanup;
627 	}
628 
629 	rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
630 			 tpm_ibmvtpm_driver_name, ibmvtpm);
631 	if (rc) {
632 		dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
633 		goto init_irq_cleanup;
634 	}
635 
636 	rc = vio_enable_interrupts(vio_dev);
637 	if (rc) {
638 		dev_err(dev, "Error %d enabling interrupts\n", rc);
639 		goto init_irq_cleanup;
640 	}
641 
642 	init_waitqueue_head(&ibmvtpm->wq);
643 
644 	crq_q->index = 0;
645 
646 	TPM_VPRIV(chip) = (void *)ibmvtpm;
647 
648 	spin_lock_init(&ibmvtpm->rtce_lock);
649 
650 	rc = ibmvtpm_crq_send_init(ibmvtpm);
651 	if (rc)
652 		goto init_irq_cleanup;
653 
654 	rc = ibmvtpm_crq_get_version(ibmvtpm);
655 	if (rc)
656 		goto init_irq_cleanup;
657 
658 	rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
659 	if (rc)
660 		goto init_irq_cleanup;
661 
662 	return tpm_chip_register(chip);
663 init_irq_cleanup:
664 	do {
665 		rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
666 	} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
667 reg_crq_cleanup:
668 	dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
669 			 DMA_BIDIRECTIONAL);
670 cleanup:
671 	if (ibmvtpm) {
672 		if (crq_q->crq_addr)
673 			free_page((unsigned long)crq_q->crq_addr);
674 		kfree(ibmvtpm);
675 	}
676 
677 	return rc;
678 }
679 
680 static struct vio_driver ibmvtpm_driver = {
681 	.id_table	 = tpm_ibmvtpm_device_table,
682 	.probe		 = tpm_ibmvtpm_probe,
683 	.remove		 = tpm_ibmvtpm_remove,
684 	.get_desired_dma = tpm_ibmvtpm_get_desired_dma,
685 	.name		 = tpm_ibmvtpm_driver_name,
686 	.pm		 = &tpm_ibmvtpm_pm_ops,
687 };
688 
689 /**
690  * ibmvtpm_module_init - Initialize ibm vtpm module
691  *
692  * Return value:
693  *	0 -Success
694  *	Non-zero - Failure
695  */
696 static int __init ibmvtpm_module_init(void)
697 {
698 	return vio_register_driver(&ibmvtpm_driver);
699 }
700 
701 /**
702  * ibmvtpm_module_exit - Teardown ibm vtpm module
703  *
704  * Return value:
705  *	Nothing
706  */
707 static void __exit ibmvtpm_module_exit(void)
708 {
709 	vio_unregister_driver(&ibmvtpm_driver);
710 }
711 
712 module_init(ibmvtpm_module_init);
713 module_exit(ibmvtpm_module_exit);
714 
715 MODULE_AUTHOR("adlai@us.ibm.com");
716 MODULE_DESCRIPTION("IBM vTPM Driver");
717 MODULE_VERSION("1.0");
718 MODULE_LICENSE("GPL");
719