xref: /openbmc/linux/drivers/char/tpm/xen-tpmfront.c (revision f677b30b487ca3763c3de3f1b4d8c976c2961cd1)
1 /*
2  * Implementation of the Xen vTPM device frontend
3  *
4  * Author:  Daniel De Graaf <dgdegra@tycho.nsa.gov>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2,
8  * as published by the Free Software Foundation.
9  */
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/interrupt.h>
13 #include <xen/xen.h>
14 #include <xen/events.h>
15 #include <xen/interface/io/tpmif.h>
16 #include <xen/grant_table.h>
17 #include <xen/xenbus.h>
18 #include <xen/page.h>
19 #include "tpm.h"
20 
21 struct tpm_private {
22 	struct tpm_chip *chip;
23 	struct xenbus_device *dev;
24 
25 	struct vtpm_shared_page *shr;
26 
27 	unsigned int evtchn;
28 	int ring_ref;
29 	domid_t backend_id;
30 };
31 
32 enum status_bits {
33 	VTPM_STATUS_RUNNING  = 0x1,
34 	VTPM_STATUS_IDLE     = 0x2,
35 	VTPM_STATUS_RESULT   = 0x4,
36 	VTPM_STATUS_CANCELED = 0x8,
37 };
38 
39 static u8 vtpm_status(struct tpm_chip *chip)
40 {
41 	struct tpm_private *priv = TPM_VPRIV(chip);
42 	switch (priv->shr->state) {
43 	case VTPM_STATE_IDLE:
44 		return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
45 	case VTPM_STATE_FINISH:
46 		return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
47 	case VTPM_STATE_SUBMIT:
48 	case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
49 		return VTPM_STATUS_RUNNING;
50 	default:
51 		return 0;
52 	}
53 }
54 
55 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
56 {
57 	return status & VTPM_STATUS_CANCELED;
58 }
59 
60 static void vtpm_cancel(struct tpm_chip *chip)
61 {
62 	struct tpm_private *priv = TPM_VPRIV(chip);
63 	priv->shr->state = VTPM_STATE_CANCEL;
64 	wmb();
65 	notify_remote_via_evtchn(priv->evtchn);
66 }
67 
68 static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
69 {
70 	return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
71 }
72 
73 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
74 {
75 	struct tpm_private *priv = TPM_VPRIV(chip);
76 	struct vtpm_shared_page *shr = priv->shr;
77 	unsigned int offset = shr_data_offset(shr);
78 
79 	u32 ordinal;
80 	unsigned long duration;
81 
82 	if (offset > PAGE_SIZE)
83 		return -EINVAL;
84 
85 	if (offset + count > PAGE_SIZE)
86 		return -EINVAL;
87 
88 	/* Wait for completion of any existing command or cancellation */
89 	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
90 			&chip->vendor.read_queue, true) < 0) {
91 		vtpm_cancel(chip);
92 		return -ETIME;
93 	}
94 
95 	memcpy(offset + (u8 *)shr, buf, count);
96 	shr->length = count;
97 	barrier();
98 	shr->state = VTPM_STATE_SUBMIT;
99 	wmb();
100 	notify_remote_via_evtchn(priv->evtchn);
101 
102 	ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
103 	duration = tpm_calc_ordinal_duration(chip, ordinal);
104 
105 	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
106 			&chip->vendor.read_queue, true) < 0) {
107 		/* got a signal or timeout, try to cancel */
108 		vtpm_cancel(chip);
109 		return -ETIME;
110 	}
111 
112 	return count;
113 }
114 
115 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
116 {
117 	struct tpm_private *priv = TPM_VPRIV(chip);
118 	struct vtpm_shared_page *shr = priv->shr;
119 	unsigned int offset = shr_data_offset(shr);
120 	size_t length = shr->length;
121 
122 	if (shr->state == VTPM_STATE_IDLE)
123 		return -ECANCELED;
124 
125 	/* In theory the wait at the end of _send makes this one unnecessary */
126 	if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
127 			&chip->vendor.read_queue, true) < 0) {
128 		vtpm_cancel(chip);
129 		return -ETIME;
130 	}
131 
132 	if (offset > PAGE_SIZE)
133 		return -EIO;
134 
135 	if (offset + length > PAGE_SIZE)
136 		length = PAGE_SIZE - offset;
137 
138 	if (length > count)
139 		length = count;
140 
141 	memcpy(buf, offset + (u8 *)shr, length);
142 
143 	return length;
144 }
145 
146 static const struct file_operations vtpm_ops = {
147 	.owner = THIS_MODULE,
148 	.llseek = no_llseek,
149 	.open = tpm_open,
150 	.read = tpm_read,
151 	.write = tpm_write,
152 	.release = tpm_release,
153 };
154 
155 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
156 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
157 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
158 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
159 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
160 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
161 		NULL);
162 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
163 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
164 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
165 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
166 
167 static struct attribute *vtpm_attrs[] = {
168 	&dev_attr_pubek.attr,
169 	&dev_attr_pcrs.attr,
170 	&dev_attr_enabled.attr,
171 	&dev_attr_active.attr,
172 	&dev_attr_owned.attr,
173 	&dev_attr_temp_deactivated.attr,
174 	&dev_attr_caps.attr,
175 	&dev_attr_cancel.attr,
176 	&dev_attr_durations.attr,
177 	&dev_attr_timeouts.attr,
178 	NULL,
179 };
180 
181 static struct attribute_group vtpm_attr_grp = {
182 	.attrs = vtpm_attrs,
183 };
184 
185 static const struct tpm_vendor_specific tpm_vtpm = {
186 	.status = vtpm_status,
187 	.recv = vtpm_recv,
188 	.send = vtpm_send,
189 	.cancel = vtpm_cancel,
190 	.req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
191 	.req_complete_val  = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
192 	.req_canceled      = vtpm_req_canceled,
193 	.attr_group = &vtpm_attr_grp,
194 	.miscdev = {
195 		.fops = &vtpm_ops,
196 	},
197 };
198 
199 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
200 {
201 	struct tpm_private *priv = dev_id;
202 
203 	switch (priv->shr->state) {
204 	case VTPM_STATE_IDLE:
205 	case VTPM_STATE_FINISH:
206 		wake_up_interruptible(&priv->chip->vendor.read_queue);
207 		break;
208 	case VTPM_STATE_SUBMIT:
209 	case VTPM_STATE_CANCEL:
210 	default:
211 		break;
212 	}
213 	return IRQ_HANDLED;
214 }
215 
216 static int setup_chip(struct device *dev, struct tpm_private *priv)
217 {
218 	struct tpm_chip *chip;
219 
220 	chip = tpm_register_hardware(dev, &tpm_vtpm);
221 	if (!chip)
222 		return -ENODEV;
223 
224 	init_waitqueue_head(&chip->vendor.read_queue);
225 
226 	priv->chip = chip;
227 	TPM_VPRIV(chip) = priv;
228 
229 	return 0;
230 }
231 
232 /* caller must clean up in case of errors */
233 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
234 {
235 	struct xenbus_transaction xbt;
236 	const char *message = NULL;
237 	int rv;
238 
239 	priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
240 	if (!priv->shr) {
241 		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
242 		return -ENOMEM;
243 	}
244 
245 	rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
246 	if (rv < 0)
247 		return rv;
248 
249 	priv->ring_ref = rv;
250 
251 	rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
252 	if (rv)
253 		return rv;
254 
255 	rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
256 				       "tpmif", priv);
257 	if (rv <= 0) {
258 		xenbus_dev_fatal(dev, rv, "allocating TPM irq");
259 		return rv;
260 	}
261 	priv->chip->vendor.irq = rv;
262 
263  again:
264 	rv = xenbus_transaction_start(&xbt);
265 	if (rv) {
266 		xenbus_dev_fatal(dev, rv, "starting transaction");
267 		return rv;
268 	}
269 
270 	rv = xenbus_printf(xbt, dev->nodename,
271 			"ring-ref", "%u", priv->ring_ref);
272 	if (rv) {
273 		message = "writing ring-ref";
274 		goto abort_transaction;
275 	}
276 
277 	rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
278 			priv->evtchn);
279 	if (rv) {
280 		message = "writing event-channel";
281 		goto abort_transaction;
282 	}
283 
284 	rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
285 	if (rv) {
286 		message = "writing feature-protocol-v2";
287 		goto abort_transaction;
288 	}
289 
290 	rv = xenbus_transaction_end(xbt, 0);
291 	if (rv == -EAGAIN)
292 		goto again;
293 	if (rv) {
294 		xenbus_dev_fatal(dev, rv, "completing transaction");
295 		return rv;
296 	}
297 
298 	xenbus_switch_state(dev, XenbusStateInitialised);
299 
300 	return 0;
301 
302  abort_transaction:
303 	xenbus_transaction_end(xbt, 1);
304 	if (message)
305 		xenbus_dev_error(dev, rv, "%s", message);
306 
307 	return rv;
308 }
309 
310 static void ring_free(struct tpm_private *priv)
311 {
312 	if (!priv)
313 		return;
314 
315 	if (priv->ring_ref)
316 		gnttab_end_foreign_access(priv->ring_ref, 0,
317 				(unsigned long)priv->shr);
318 	else
319 		free_page((unsigned long)priv->shr);
320 
321 	if (priv->chip && priv->chip->vendor.irq)
322 		unbind_from_irqhandler(priv->chip->vendor.irq, priv);
323 
324 	kfree(priv);
325 }
326 
327 static int tpmfront_probe(struct xenbus_device *dev,
328 		const struct xenbus_device_id *id)
329 {
330 	struct tpm_private *priv;
331 	int rv;
332 
333 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
334 	if (!priv) {
335 		xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
336 		return -ENOMEM;
337 	}
338 
339 	rv = setup_chip(&dev->dev, priv);
340 	if (rv) {
341 		kfree(priv);
342 		return rv;
343 	}
344 
345 	rv = setup_ring(dev, priv);
346 	if (rv) {
347 		tpm_remove_hardware(&dev->dev);
348 		ring_free(priv);
349 		return rv;
350 	}
351 
352 	tpm_get_timeouts(priv->chip);
353 
354 	return rv;
355 }
356 
357 static int tpmfront_remove(struct xenbus_device *dev)
358 {
359 	struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
360 	struct tpm_private *priv = TPM_VPRIV(chip);
361 	tpm_remove_hardware(&dev->dev);
362 	ring_free(priv);
363 	TPM_VPRIV(chip) = NULL;
364 	return 0;
365 }
366 
367 static int tpmfront_resume(struct xenbus_device *dev)
368 {
369 	/* A suspend/resume/migrate will interrupt a vTPM anyway */
370 	tpmfront_remove(dev);
371 	return tpmfront_probe(dev, NULL);
372 }
373 
374 static void backend_changed(struct xenbus_device *dev,
375 		enum xenbus_state backend_state)
376 {
377 	int val;
378 
379 	switch (backend_state) {
380 	case XenbusStateInitialised:
381 	case XenbusStateConnected:
382 		if (dev->state == XenbusStateConnected)
383 			break;
384 
385 		if (xenbus_scanf(XBT_NIL, dev->otherend,
386 				"feature-protocol-v2", "%d", &val) < 0)
387 			val = 0;
388 		if (!val) {
389 			xenbus_dev_fatal(dev, -EINVAL,
390 					"vTPM protocol 2 required");
391 			return;
392 		}
393 		xenbus_switch_state(dev, XenbusStateConnected);
394 		break;
395 
396 	case XenbusStateClosing:
397 	case XenbusStateClosed:
398 		device_unregister(&dev->dev);
399 		xenbus_frontend_closed(dev);
400 		break;
401 	default:
402 		break;
403 	}
404 }
405 
406 static const struct xenbus_device_id tpmfront_ids[] = {
407 	{ "vtpm" },
408 	{ "" }
409 };
410 MODULE_ALIAS("xen:vtpm");
411 
412 static DEFINE_XENBUS_DRIVER(tpmfront, ,
413 		.probe = tpmfront_probe,
414 		.remove = tpmfront_remove,
415 		.resume = tpmfront_resume,
416 		.otherend_changed = backend_changed,
417 	);
418 
419 static int __init xen_tpmfront_init(void)
420 {
421 	if (!xen_domain())
422 		return -ENODEV;
423 
424 	return xenbus_register_frontend(&tpmfront_driver);
425 }
426 module_init(xen_tpmfront_init);
427 
428 static void __exit xen_tpmfront_exit(void)
429 {
430 	xenbus_unregister_driver(&tpmfront_driver);
431 }
432 module_exit(xen_tpmfront_exit);
433 
434 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
435 MODULE_DESCRIPTION("Xen vTPM Driver");
436 MODULE_LICENSE("GPL");
437