1 /* 2 * Implementation of the Xen vTPM device frontend 3 * 4 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2, 8 * as published by the Free Software Foundation. 9 */ 10 #include <linux/errno.h> 11 #include <linux/err.h> 12 #include <linux/interrupt.h> 13 #include <xen/events.h> 14 #include <xen/interface/io/tpmif.h> 15 #include <xen/grant_table.h> 16 #include <xen/xenbus.h> 17 #include <xen/page.h> 18 #include "tpm.h" 19 20 struct tpm_private { 21 struct tpm_chip *chip; 22 struct xenbus_device *dev; 23 24 struct vtpm_shared_page *shr; 25 26 unsigned int evtchn; 27 int ring_ref; 28 domid_t backend_id; 29 }; 30 31 enum status_bits { 32 VTPM_STATUS_RUNNING = 0x1, 33 VTPM_STATUS_IDLE = 0x2, 34 VTPM_STATUS_RESULT = 0x4, 35 VTPM_STATUS_CANCELED = 0x8, 36 }; 37 38 static u8 vtpm_status(struct tpm_chip *chip) 39 { 40 struct tpm_private *priv = TPM_VPRIV(chip); 41 switch (priv->shr->state) { 42 case VTPM_STATE_IDLE: 43 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; 44 case VTPM_STATE_FINISH: 45 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT; 46 case VTPM_STATE_SUBMIT: 47 case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */ 48 return VTPM_STATUS_RUNNING; 49 default: 50 return 0; 51 } 52 } 53 54 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status) 55 { 56 return status & VTPM_STATUS_CANCELED; 57 } 58 59 static void vtpm_cancel(struct tpm_chip *chip) 60 { 61 struct tpm_private *priv = TPM_VPRIV(chip); 62 priv->shr->state = VTPM_STATE_CANCEL; 63 wmb(); 64 notify_remote_via_evtchn(priv->evtchn); 65 } 66 67 static unsigned int shr_data_offset(struct vtpm_shared_page *shr) 68 { 69 return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages; 70 } 71 72 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) 73 { 74 struct tpm_private *priv = TPM_VPRIV(chip); 75 struct vtpm_shared_page *shr = priv->shr; 76 unsigned int offset = shr_data_offset(shr); 77 78 u32 ordinal; 79 unsigned long duration; 80 81 if (offset > PAGE_SIZE) 82 return -EINVAL; 83 84 if (offset + count > PAGE_SIZE) 85 return -EINVAL; 86 87 /* Wait for completion of any existing command or cancellation */ 88 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c, 89 &chip->vendor.read_queue, true) < 0) { 90 vtpm_cancel(chip); 91 return -ETIME; 92 } 93 94 memcpy(offset + (u8 *)shr, buf, count); 95 shr->length = count; 96 barrier(); 97 shr->state = VTPM_STATE_SUBMIT; 98 wmb(); 99 notify_remote_via_evtchn(priv->evtchn); 100 101 ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal); 102 duration = tpm_calc_ordinal_duration(chip, ordinal); 103 104 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, 105 &chip->vendor.read_queue, true) < 0) { 106 /* got a signal or timeout, try to cancel */ 107 vtpm_cancel(chip); 108 return -ETIME; 109 } 110 111 return count; 112 } 113 114 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) 115 { 116 struct tpm_private *priv = TPM_VPRIV(chip); 117 struct vtpm_shared_page *shr = priv->shr; 118 unsigned int offset = shr_data_offset(shr); 119 size_t length = shr->length; 120 121 if (shr->state == VTPM_STATE_IDLE) 122 return -ECANCELED; 123 124 /* In theory the wait at the end of _send makes this one unnecessary */ 125 if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c, 126 &chip->vendor.read_queue, true) < 0) { 127 vtpm_cancel(chip); 128 return -ETIME; 129 } 130 131 if (offset > PAGE_SIZE) 132 return -EIO; 133 134 if (offset + length > PAGE_SIZE) 135 length = PAGE_SIZE - offset; 136 137 if (length > count) 138 length = count; 139 140 memcpy(buf, offset + (u8 *)shr, length); 141 142 return length; 143 } 144 145 ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr, 146 char *buf) 147 { 148 struct tpm_chip *chip = dev_get_drvdata(dev); 149 struct tpm_private *priv = TPM_VPRIV(chip); 150 u8 locality = priv->shr->locality; 151 152 return sprintf(buf, "%d\n", locality); 153 } 154 155 ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr, 156 const char *buf, size_t len) 157 { 158 struct tpm_chip *chip = dev_get_drvdata(dev); 159 struct tpm_private *priv = TPM_VPRIV(chip); 160 u8 val; 161 162 int rv = kstrtou8(buf, 0, &val); 163 if (rv) 164 return rv; 165 166 priv->shr->locality = val; 167 168 return len; 169 } 170 171 static const struct file_operations vtpm_ops = { 172 .owner = THIS_MODULE, 173 .llseek = no_llseek, 174 .open = tpm_open, 175 .read = tpm_read, 176 .write = tpm_write, 177 .release = tpm_release, 178 }; 179 180 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); 181 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); 182 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); 183 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); 184 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); 185 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, 186 NULL); 187 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); 188 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 189 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 190 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 191 static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality, 192 tpm_store_locality); 193 194 static struct attribute *vtpm_attrs[] = { 195 &dev_attr_pubek.attr, 196 &dev_attr_pcrs.attr, 197 &dev_attr_enabled.attr, 198 &dev_attr_active.attr, 199 &dev_attr_owned.attr, 200 &dev_attr_temp_deactivated.attr, 201 &dev_attr_caps.attr, 202 &dev_attr_cancel.attr, 203 &dev_attr_durations.attr, 204 &dev_attr_timeouts.attr, 205 &dev_attr_locality.attr, 206 NULL, 207 }; 208 209 static struct attribute_group vtpm_attr_grp = { 210 .attrs = vtpm_attrs, 211 }; 212 213 #define TPM_LONG_TIMEOUT (10 * 60 * HZ) 214 215 static const struct tpm_vendor_specific tpm_vtpm = { 216 .status = vtpm_status, 217 .recv = vtpm_recv, 218 .send = vtpm_send, 219 .cancel = vtpm_cancel, 220 .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, 221 .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, 222 .req_canceled = vtpm_req_canceled, 223 .attr_group = &vtpm_attr_grp, 224 .miscdev = { 225 .fops = &vtpm_ops, 226 }, 227 .duration = { 228 TPM_LONG_TIMEOUT, 229 TPM_LONG_TIMEOUT, 230 TPM_LONG_TIMEOUT, 231 }, 232 }; 233 234 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 235 { 236 struct tpm_private *priv = dev_id; 237 238 switch (priv->shr->state) { 239 case VTPM_STATE_IDLE: 240 case VTPM_STATE_FINISH: 241 wake_up_interruptible(&priv->chip->vendor.read_queue); 242 break; 243 case VTPM_STATE_SUBMIT: 244 case VTPM_STATE_CANCEL: 245 default: 246 break; 247 } 248 return IRQ_HANDLED; 249 } 250 251 static int setup_chip(struct device *dev, struct tpm_private *priv) 252 { 253 struct tpm_chip *chip; 254 255 chip = tpm_register_hardware(dev, &tpm_vtpm); 256 if (!chip) 257 return -ENODEV; 258 259 init_waitqueue_head(&chip->vendor.read_queue); 260 261 priv->chip = chip; 262 TPM_VPRIV(chip) = priv; 263 264 return 0; 265 } 266 267 /* caller must clean up in case of errors */ 268 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) 269 { 270 struct xenbus_transaction xbt; 271 const char *message = NULL; 272 int rv; 273 274 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 275 if (!priv->shr) { 276 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 277 return -ENOMEM; 278 } 279 280 rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr)); 281 if (rv < 0) 282 return rv; 283 284 priv->ring_ref = rv; 285 286 rv = xenbus_alloc_evtchn(dev, &priv->evtchn); 287 if (rv) 288 return rv; 289 290 rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, 291 "tpmif", priv); 292 if (rv <= 0) { 293 xenbus_dev_fatal(dev, rv, "allocating TPM irq"); 294 return rv; 295 } 296 priv->chip->vendor.irq = rv; 297 298 again: 299 rv = xenbus_transaction_start(&xbt); 300 if (rv) { 301 xenbus_dev_fatal(dev, rv, "starting transaction"); 302 return rv; 303 } 304 305 rv = xenbus_printf(xbt, dev->nodename, 306 "ring-ref", "%u", priv->ring_ref); 307 if (rv) { 308 message = "writing ring-ref"; 309 goto abort_transaction; 310 } 311 312 rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", 313 priv->evtchn); 314 if (rv) { 315 message = "writing event-channel"; 316 goto abort_transaction; 317 } 318 319 rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); 320 if (rv) { 321 message = "writing feature-protocol-v2"; 322 goto abort_transaction; 323 } 324 325 rv = xenbus_transaction_end(xbt, 0); 326 if (rv == -EAGAIN) 327 goto again; 328 if (rv) { 329 xenbus_dev_fatal(dev, rv, "completing transaction"); 330 return rv; 331 } 332 333 xenbus_switch_state(dev, XenbusStateInitialised); 334 335 return 0; 336 337 abort_transaction: 338 xenbus_transaction_end(xbt, 1); 339 if (message) 340 xenbus_dev_error(dev, rv, "%s", message); 341 342 return rv; 343 } 344 345 static void ring_free(struct tpm_private *priv) 346 { 347 if (!priv) 348 return; 349 350 if (priv->ring_ref) 351 gnttab_end_foreign_access(priv->ring_ref, 0, 352 (unsigned long)priv->shr); 353 else 354 free_page((unsigned long)priv->shr); 355 356 if (priv->chip && priv->chip->vendor.irq) 357 unbind_from_irqhandler(priv->chip->vendor.irq, priv); 358 359 kfree(priv); 360 } 361 362 static int tpmfront_probe(struct xenbus_device *dev, 363 const struct xenbus_device_id *id) 364 { 365 struct tpm_private *priv; 366 int rv; 367 368 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 369 if (!priv) { 370 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure"); 371 return -ENOMEM; 372 } 373 374 rv = setup_chip(&dev->dev, priv); 375 if (rv) { 376 kfree(priv); 377 return rv; 378 } 379 380 rv = setup_ring(dev, priv); 381 if (rv) { 382 tpm_remove_hardware(&dev->dev); 383 ring_free(priv); 384 return rv; 385 } 386 387 tpm_get_timeouts(priv->chip); 388 389 dev_set_drvdata(&dev->dev, priv->chip); 390 391 return rv; 392 } 393 394 static int tpmfront_remove(struct xenbus_device *dev) 395 { 396 struct tpm_chip *chip = dev_get_drvdata(&dev->dev); 397 struct tpm_private *priv = TPM_VPRIV(chip); 398 tpm_remove_hardware(&dev->dev); 399 ring_free(priv); 400 TPM_VPRIV(chip) = NULL; 401 return 0; 402 } 403 404 static int tpmfront_resume(struct xenbus_device *dev) 405 { 406 /* A suspend/resume/migrate will interrupt a vTPM anyway */ 407 tpmfront_remove(dev); 408 return tpmfront_probe(dev, NULL); 409 } 410 411 static void backend_changed(struct xenbus_device *dev, 412 enum xenbus_state backend_state) 413 { 414 int val; 415 416 switch (backend_state) { 417 case XenbusStateInitialised: 418 case XenbusStateConnected: 419 if (dev->state == XenbusStateConnected) 420 break; 421 422 if (xenbus_scanf(XBT_NIL, dev->otherend, 423 "feature-protocol-v2", "%d", &val) < 0) 424 val = 0; 425 if (!val) { 426 xenbus_dev_fatal(dev, -EINVAL, 427 "vTPM protocol 2 required"); 428 return; 429 } 430 xenbus_switch_state(dev, XenbusStateConnected); 431 break; 432 433 case XenbusStateClosing: 434 case XenbusStateClosed: 435 device_unregister(&dev->dev); 436 xenbus_frontend_closed(dev); 437 break; 438 default: 439 break; 440 } 441 } 442 443 static const struct xenbus_device_id tpmfront_ids[] = { 444 { "vtpm" }, 445 { "" } 446 }; 447 MODULE_ALIAS("xen:vtpm"); 448 449 static DEFINE_XENBUS_DRIVER(tpmfront, , 450 .probe = tpmfront_probe, 451 .remove = tpmfront_remove, 452 .resume = tpmfront_resume, 453 .otherend_changed = backend_changed, 454 ); 455 456 static int __init xen_tpmfront_init(void) 457 { 458 if (!xen_domain()) 459 return -ENODEV; 460 461 return xenbus_register_frontend(&tpmfront_driver); 462 } 463 module_init(xen_tpmfront_init); 464 465 static void __exit xen_tpmfront_exit(void) 466 { 467 xenbus_unregister_driver(&tpmfront_driver); 468 } 469 module_exit(xen_tpmfront_exit); 470 471 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>"); 472 MODULE_DESCRIPTION("Xen vTPM Driver"); 473 MODULE_LICENSE("GPL"); 474