1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt bus support 4 * 5 * Copyright (C) 2017, Intel Corporation 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/dmar.h> 11 #include <linux/idr.h> 12 #include <linux/iommu.h> 13 #include <linux/module.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/slab.h> 16 #include <linux/random.h> 17 #include <crypto/hash.h> 18 19 #include "tb.h" 20 21 static DEFINE_IDA(tb_domain_ida); 22 23 static bool match_service_id(const struct tb_service_id *id, 24 const struct tb_service *svc) 25 { 26 if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) { 27 if (strcmp(id->protocol_key, svc->key)) 28 return false; 29 } 30 31 if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) { 32 if (id->protocol_id != svc->prtcid) 33 return false; 34 } 35 36 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { 37 if (id->protocol_version != svc->prtcvers) 38 return false; 39 } 40 41 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { 42 if (id->protocol_revision != svc->prtcrevs) 43 return false; 44 } 45 46 return true; 47 } 48 49 static const struct tb_service_id *__tb_service_match(struct device *dev, 50 struct device_driver *drv) 51 { 52 struct tb_service_driver *driver; 53 const struct tb_service_id *ids; 54 struct tb_service *svc; 55 56 svc = tb_to_service(dev); 57 if (!svc) 58 return NULL; 59 60 driver = container_of(drv, struct tb_service_driver, driver); 61 if (!driver->id_table) 62 return NULL; 63 64 for (ids = driver->id_table; ids->match_flags != 0; ids++) { 65 if (match_service_id(ids, svc)) 66 return ids; 67 } 68 69 return NULL; 70 } 71 72 static int tb_service_match(struct device *dev, struct device_driver *drv) 73 { 74 return !!__tb_service_match(dev, drv); 75 } 76 77 static int tb_service_probe(struct device *dev) 78 { 79 struct tb_service *svc = tb_to_service(dev); 80 struct tb_service_driver *driver; 81 const struct tb_service_id *id; 82 83 driver = container_of(dev->driver, struct tb_service_driver, driver); 84 id = __tb_service_match(dev, &driver->driver); 85 86 return driver->probe(svc, id); 87 } 88 89 static void tb_service_remove(struct device *dev) 90 { 91 struct tb_service *svc = tb_to_service(dev); 92 struct tb_service_driver *driver; 93 94 driver = container_of(dev->driver, struct tb_service_driver, driver); 95 if (driver->remove) 96 driver->remove(svc); 97 } 98 99 static void tb_service_shutdown(struct device *dev) 100 { 101 struct tb_service_driver *driver; 102 struct tb_service *svc; 103 104 svc = tb_to_service(dev); 105 if (!svc || !dev->driver) 106 return; 107 108 driver = container_of(dev->driver, struct tb_service_driver, driver); 109 if (driver->shutdown) 110 driver->shutdown(svc); 111 } 112 113 static const char * const tb_security_names[] = { 114 [TB_SECURITY_NONE] = "none", 115 [TB_SECURITY_USER] = "user", 116 [TB_SECURITY_SECURE] = "secure", 117 [TB_SECURITY_DPONLY] = "dponly", 118 [TB_SECURITY_USBONLY] = "usbonly", 119 [TB_SECURITY_NOPCIE] = "nopcie", 120 }; 121 122 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, 123 char *buf) 124 { 125 struct tb *tb = container_of(dev, struct tb, dev); 126 uuid_t *uuids; 127 ssize_t ret; 128 int i; 129 130 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); 131 if (!uuids) 132 return -ENOMEM; 133 134 pm_runtime_get_sync(&tb->dev); 135 136 if (mutex_lock_interruptible(&tb->lock)) { 137 ret = -ERESTARTSYS; 138 goto out; 139 } 140 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl); 141 if (ret) { 142 mutex_unlock(&tb->lock); 143 goto out; 144 } 145 mutex_unlock(&tb->lock); 146 147 for (ret = 0, i = 0; i < tb->nboot_acl; i++) { 148 if (!uuid_is_null(&uuids[i])) 149 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb", 150 &uuids[i]); 151 152 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s", 153 i < tb->nboot_acl - 1 ? "," : "\n"); 154 } 155 156 out: 157 pm_runtime_mark_last_busy(&tb->dev); 158 pm_runtime_put_autosuspend(&tb->dev); 159 kfree(uuids); 160 161 return ret; 162 } 163 164 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, 165 const char *buf, size_t count) 166 { 167 struct tb *tb = container_of(dev, struct tb, dev); 168 char *str, *s, *uuid_str; 169 ssize_t ret = 0; 170 uuid_t *acl; 171 int i = 0; 172 173 /* 174 * Make sure the value is not bigger than tb->nboot_acl * UUID 175 * length + commas and optional "\n". Also the smallest allowable 176 * string is tb->nboot_acl * ",". 177 */ 178 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1) 179 return -EINVAL; 180 if (count < tb->nboot_acl - 1) 181 return -EINVAL; 182 183 str = kstrdup(buf, GFP_KERNEL); 184 if (!str) 185 return -ENOMEM; 186 187 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); 188 if (!acl) { 189 ret = -ENOMEM; 190 goto err_free_str; 191 } 192 193 uuid_str = strim(str); 194 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) { 195 size_t len = strlen(s); 196 197 if (len) { 198 if (len != UUID_STRING_LEN) { 199 ret = -EINVAL; 200 goto err_free_acl; 201 } 202 ret = uuid_parse(s, &acl[i]); 203 if (ret) 204 goto err_free_acl; 205 } 206 207 i++; 208 } 209 210 if (s || i < tb->nboot_acl) { 211 ret = -EINVAL; 212 goto err_free_acl; 213 } 214 215 pm_runtime_get_sync(&tb->dev); 216 217 if (mutex_lock_interruptible(&tb->lock)) { 218 ret = -ERESTARTSYS; 219 goto err_rpm_put; 220 } 221 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); 222 if (!ret) { 223 /* Notify userspace about the change */ 224 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); 225 } 226 mutex_unlock(&tb->lock); 227 228 err_rpm_put: 229 pm_runtime_mark_last_busy(&tb->dev); 230 pm_runtime_put_autosuspend(&tb->dev); 231 err_free_acl: 232 kfree(acl); 233 err_free_str: 234 kfree(str); 235 236 return ret ?: count; 237 } 238 static DEVICE_ATTR_RW(boot_acl); 239 240 static ssize_t deauthorization_show(struct device *dev, 241 struct device_attribute *attr, 242 char *buf) 243 { 244 const struct tb *tb = container_of(dev, struct tb, dev); 245 bool deauthorization = false; 246 247 /* Only meaningful if authorization is supported */ 248 if (tb->security_level == TB_SECURITY_USER || 249 tb->security_level == TB_SECURITY_SECURE) 250 deauthorization = !!tb->cm_ops->disapprove_switch; 251 252 return sprintf(buf, "%d\n", deauthorization); 253 } 254 static DEVICE_ATTR_RO(deauthorization); 255 256 static ssize_t iommu_dma_protection_show(struct device *dev, 257 struct device_attribute *attr, 258 char *buf) 259 { 260 /* 261 * Kernel DMA protection is a feature where Thunderbolt security is 262 * handled natively using IOMMU. It is enabled when IOMMU is 263 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set. 264 */ 265 return sprintf(buf, "%d\n", 266 iommu_present(&pci_bus_type) && dmar_platform_optin()); 267 } 268 static DEVICE_ATTR_RO(iommu_dma_protection); 269 270 static ssize_t security_show(struct device *dev, struct device_attribute *attr, 271 char *buf) 272 { 273 struct tb *tb = container_of(dev, struct tb, dev); 274 const char *name = "unknown"; 275 276 if (tb->security_level < ARRAY_SIZE(tb_security_names)) 277 name = tb_security_names[tb->security_level]; 278 279 return sprintf(buf, "%s\n", name); 280 } 281 static DEVICE_ATTR_RO(security); 282 283 static struct attribute *domain_attrs[] = { 284 &dev_attr_boot_acl.attr, 285 &dev_attr_deauthorization.attr, 286 &dev_attr_iommu_dma_protection.attr, 287 &dev_attr_security.attr, 288 NULL, 289 }; 290 291 static umode_t domain_attr_is_visible(struct kobject *kobj, 292 struct attribute *attr, int n) 293 { 294 struct device *dev = kobj_to_dev(kobj); 295 struct tb *tb = container_of(dev, struct tb, dev); 296 297 if (attr == &dev_attr_boot_acl.attr) { 298 if (tb->nboot_acl && 299 tb->cm_ops->get_boot_acl && 300 tb->cm_ops->set_boot_acl) 301 return attr->mode; 302 return 0; 303 } 304 305 return attr->mode; 306 } 307 308 static const struct attribute_group domain_attr_group = { 309 .is_visible = domain_attr_is_visible, 310 .attrs = domain_attrs, 311 }; 312 313 static const struct attribute_group *domain_attr_groups[] = { 314 &domain_attr_group, 315 NULL, 316 }; 317 318 struct bus_type tb_bus_type = { 319 .name = "thunderbolt", 320 .match = tb_service_match, 321 .probe = tb_service_probe, 322 .remove = tb_service_remove, 323 .shutdown = tb_service_shutdown, 324 }; 325 326 static void tb_domain_release(struct device *dev) 327 { 328 struct tb *tb = container_of(dev, struct tb, dev); 329 330 tb_ctl_free(tb->ctl); 331 destroy_workqueue(tb->wq); 332 ida_simple_remove(&tb_domain_ida, tb->index); 333 mutex_destroy(&tb->lock); 334 kfree(tb); 335 } 336 337 struct device_type tb_domain_type = { 338 .name = "thunderbolt_domain", 339 .release = tb_domain_release, 340 }; 341 342 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, 343 const void *buf, size_t size) 344 { 345 struct tb *tb = data; 346 347 if (!tb->cm_ops->handle_event) { 348 tb_warn(tb, "domain does not have event handler\n"); 349 return true; 350 } 351 352 switch (type) { 353 case TB_CFG_PKG_XDOMAIN_REQ: 354 case TB_CFG_PKG_XDOMAIN_RESP: 355 if (tb_is_xdomain_enabled()) 356 return tb_xdomain_handle_request(tb, type, buf, size); 357 break; 358 359 default: 360 tb->cm_ops->handle_event(tb, type, buf, size); 361 } 362 363 return true; 364 } 365 366 /** 367 * tb_domain_alloc() - Allocate a domain 368 * @nhi: Pointer to the host controller 369 * @timeout_msec: Control channel timeout for non-raw messages 370 * @privsize: Size of the connection manager private data 371 * 372 * Allocates and initializes a new Thunderbolt domain. Connection 373 * managers are expected to call this and then fill in @cm_ops 374 * accordingly. 375 * 376 * Call tb_domain_put() to release the domain before it has been added 377 * to the system. 378 * 379 * Return: allocated domain structure on %NULL in case of error 380 */ 381 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize) 382 { 383 struct tb *tb; 384 385 /* 386 * Make sure the structure sizes map with that the hardware 387 * expects because bit-fields are being used. 388 */ 389 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4); 390 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4); 391 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4); 392 393 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL); 394 if (!tb) 395 return NULL; 396 397 tb->nhi = nhi; 398 mutex_init(&tb->lock); 399 400 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL); 401 if (tb->index < 0) 402 goto err_free; 403 404 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index); 405 if (!tb->wq) 406 goto err_remove_ida; 407 408 tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb); 409 if (!tb->ctl) 410 goto err_destroy_wq; 411 412 tb->dev.parent = &nhi->pdev->dev; 413 tb->dev.bus = &tb_bus_type; 414 tb->dev.type = &tb_domain_type; 415 tb->dev.groups = domain_attr_groups; 416 dev_set_name(&tb->dev, "domain%d", tb->index); 417 device_initialize(&tb->dev); 418 419 return tb; 420 421 err_destroy_wq: 422 destroy_workqueue(tb->wq); 423 err_remove_ida: 424 ida_simple_remove(&tb_domain_ida, tb->index); 425 err_free: 426 kfree(tb); 427 428 return NULL; 429 } 430 431 /** 432 * tb_domain_add() - Add domain to the system 433 * @tb: Domain to add 434 * 435 * Starts the domain and adds it to the system. Hotplugging devices will 436 * work after this has been returned successfully. In order to remove 437 * and release the domain after this function has been called, call 438 * tb_domain_remove(). 439 * 440 * Return: %0 in case of success and negative errno in case of error 441 */ 442 int tb_domain_add(struct tb *tb) 443 { 444 int ret; 445 446 if (WARN_ON(!tb->cm_ops)) 447 return -EINVAL; 448 449 mutex_lock(&tb->lock); 450 /* 451 * tb_schedule_hotplug_handler may be called as soon as the config 452 * channel is started. Thats why we have to hold the lock here. 453 */ 454 tb_ctl_start(tb->ctl); 455 456 if (tb->cm_ops->driver_ready) { 457 ret = tb->cm_ops->driver_ready(tb); 458 if (ret) 459 goto err_ctl_stop; 460 } 461 462 tb_dbg(tb, "security level set to %s\n", 463 tb_security_names[tb->security_level]); 464 465 ret = device_add(&tb->dev); 466 if (ret) 467 goto err_ctl_stop; 468 469 /* Start the domain */ 470 if (tb->cm_ops->start) { 471 ret = tb->cm_ops->start(tb); 472 if (ret) 473 goto err_domain_del; 474 } 475 476 /* This starts event processing */ 477 mutex_unlock(&tb->lock); 478 479 device_init_wakeup(&tb->dev, true); 480 481 pm_runtime_no_callbacks(&tb->dev); 482 pm_runtime_set_active(&tb->dev); 483 pm_runtime_enable(&tb->dev); 484 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY); 485 pm_runtime_mark_last_busy(&tb->dev); 486 pm_runtime_use_autosuspend(&tb->dev); 487 488 return 0; 489 490 err_domain_del: 491 device_del(&tb->dev); 492 err_ctl_stop: 493 tb_ctl_stop(tb->ctl); 494 mutex_unlock(&tb->lock); 495 496 return ret; 497 } 498 499 /** 500 * tb_domain_remove() - Removes and releases a domain 501 * @tb: Domain to remove 502 * 503 * Stops the domain, removes it from the system and releases all 504 * resources once the last reference has been released. 505 */ 506 void tb_domain_remove(struct tb *tb) 507 { 508 mutex_lock(&tb->lock); 509 if (tb->cm_ops->stop) 510 tb->cm_ops->stop(tb); 511 /* Stop the domain control traffic */ 512 tb_ctl_stop(tb->ctl); 513 mutex_unlock(&tb->lock); 514 515 flush_workqueue(tb->wq); 516 device_unregister(&tb->dev); 517 } 518 519 /** 520 * tb_domain_suspend_noirq() - Suspend a domain 521 * @tb: Domain to suspend 522 * 523 * Suspends all devices in the domain and stops the control channel. 524 */ 525 int tb_domain_suspend_noirq(struct tb *tb) 526 { 527 int ret = 0; 528 529 /* 530 * The control channel interrupt is left enabled during suspend 531 * and taking the lock here prevents any events happening before 532 * we actually have stopped the domain and the control channel. 533 */ 534 mutex_lock(&tb->lock); 535 if (tb->cm_ops->suspend_noirq) 536 ret = tb->cm_ops->suspend_noirq(tb); 537 if (!ret) 538 tb_ctl_stop(tb->ctl); 539 mutex_unlock(&tb->lock); 540 541 return ret; 542 } 543 544 /** 545 * tb_domain_resume_noirq() - Resume a domain 546 * @tb: Domain to resume 547 * 548 * Re-starts the control channel, and resumes all devices connected to 549 * the domain. 550 */ 551 int tb_domain_resume_noirq(struct tb *tb) 552 { 553 int ret = 0; 554 555 mutex_lock(&tb->lock); 556 tb_ctl_start(tb->ctl); 557 if (tb->cm_ops->resume_noirq) 558 ret = tb->cm_ops->resume_noirq(tb); 559 mutex_unlock(&tb->lock); 560 561 return ret; 562 } 563 564 int tb_domain_suspend(struct tb *tb) 565 { 566 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; 567 } 568 569 int tb_domain_freeze_noirq(struct tb *tb) 570 { 571 int ret = 0; 572 573 mutex_lock(&tb->lock); 574 if (tb->cm_ops->freeze_noirq) 575 ret = tb->cm_ops->freeze_noirq(tb); 576 if (!ret) 577 tb_ctl_stop(tb->ctl); 578 mutex_unlock(&tb->lock); 579 580 return ret; 581 } 582 583 int tb_domain_thaw_noirq(struct tb *tb) 584 { 585 int ret = 0; 586 587 mutex_lock(&tb->lock); 588 tb_ctl_start(tb->ctl); 589 if (tb->cm_ops->thaw_noirq) 590 ret = tb->cm_ops->thaw_noirq(tb); 591 mutex_unlock(&tb->lock); 592 593 return ret; 594 } 595 596 void tb_domain_complete(struct tb *tb) 597 { 598 if (tb->cm_ops->complete) 599 tb->cm_ops->complete(tb); 600 } 601 602 int tb_domain_runtime_suspend(struct tb *tb) 603 { 604 if (tb->cm_ops->runtime_suspend) { 605 int ret = tb->cm_ops->runtime_suspend(tb); 606 if (ret) 607 return ret; 608 } 609 tb_ctl_stop(tb->ctl); 610 return 0; 611 } 612 613 int tb_domain_runtime_resume(struct tb *tb) 614 { 615 tb_ctl_start(tb->ctl); 616 if (tb->cm_ops->runtime_resume) { 617 int ret = tb->cm_ops->runtime_resume(tb); 618 if (ret) 619 return ret; 620 } 621 return 0; 622 } 623 624 /** 625 * tb_domain_disapprove_switch() - Disapprove switch 626 * @tb: Domain the switch belongs to 627 * @sw: Switch to disapprove 628 * 629 * This will disconnect PCIe tunnel from parent to this @sw. 630 * 631 * Return: %0 on success and negative errno in case of failure. 632 */ 633 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw) 634 { 635 if (!tb->cm_ops->disapprove_switch) 636 return -EPERM; 637 638 return tb->cm_ops->disapprove_switch(tb, sw); 639 } 640 641 /** 642 * tb_domain_approve_switch() - Approve switch 643 * @tb: Domain the switch belongs to 644 * @sw: Switch to approve 645 * 646 * This will approve switch by connection manager specific means. In 647 * case of success the connection manager will create PCIe tunnel from 648 * parent to @sw. 649 */ 650 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) 651 { 652 struct tb_switch *parent_sw; 653 654 if (!tb->cm_ops->approve_switch) 655 return -EPERM; 656 657 /* The parent switch must be authorized before this one */ 658 parent_sw = tb_to_switch(sw->dev.parent); 659 if (!parent_sw || !parent_sw->authorized) 660 return -EINVAL; 661 662 return tb->cm_ops->approve_switch(tb, sw); 663 } 664 665 /** 666 * tb_domain_approve_switch_key() - Approve switch and add key 667 * @tb: Domain the switch belongs to 668 * @sw: Switch to approve 669 * 670 * For switches that support secure connect, this function first adds 671 * key to the switch NVM using connection manager specific means. If 672 * adding the key is successful, the switch is approved and connected. 673 * 674 * Return: %0 on success and negative errno in case of failure. 675 */ 676 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw) 677 { 678 struct tb_switch *parent_sw; 679 int ret; 680 681 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key) 682 return -EPERM; 683 684 /* The parent switch must be authorized before this one */ 685 parent_sw = tb_to_switch(sw->dev.parent); 686 if (!parent_sw || !parent_sw->authorized) 687 return -EINVAL; 688 689 ret = tb->cm_ops->add_switch_key(tb, sw); 690 if (ret) 691 return ret; 692 693 return tb->cm_ops->approve_switch(tb, sw); 694 } 695 696 /** 697 * tb_domain_challenge_switch_key() - Challenge and approve switch 698 * @tb: Domain the switch belongs to 699 * @sw: Switch to approve 700 * 701 * For switches that support secure connect, this function generates 702 * random challenge and sends it to the switch. The switch responds to 703 * this and if the response matches our random challenge, the switch is 704 * approved and connected. 705 * 706 * Return: %0 on success and negative errno in case of failure. 707 */ 708 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) 709 { 710 u8 challenge[TB_SWITCH_KEY_SIZE]; 711 u8 response[TB_SWITCH_KEY_SIZE]; 712 u8 hmac[TB_SWITCH_KEY_SIZE]; 713 struct tb_switch *parent_sw; 714 struct crypto_shash *tfm; 715 struct shash_desc *shash; 716 int ret; 717 718 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) 719 return -EPERM; 720 721 /* The parent switch must be authorized before this one */ 722 parent_sw = tb_to_switch(sw->dev.parent); 723 if (!parent_sw || !parent_sw->authorized) 724 return -EINVAL; 725 726 get_random_bytes(challenge, sizeof(challenge)); 727 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); 728 if (ret) 729 return ret; 730 731 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); 732 if (IS_ERR(tfm)) 733 return PTR_ERR(tfm); 734 735 ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); 736 if (ret) 737 goto err_free_tfm; 738 739 shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), 740 GFP_KERNEL); 741 if (!shash) { 742 ret = -ENOMEM; 743 goto err_free_tfm; 744 } 745 746 shash->tfm = tfm; 747 748 memset(hmac, 0, sizeof(hmac)); 749 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); 750 if (ret) 751 goto err_free_shash; 752 753 /* The returned HMAC must match the one we calculated */ 754 if (memcmp(response, hmac, sizeof(hmac))) { 755 ret = -EKEYREJECTED; 756 goto err_free_shash; 757 } 758 759 crypto_free_shash(tfm); 760 kfree(shash); 761 762 return tb->cm_ops->approve_switch(tb, sw); 763 764 err_free_shash: 765 kfree(shash); 766 err_free_tfm: 767 crypto_free_shash(tfm); 768 769 return ret; 770 } 771 772 /** 773 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths 774 * @tb: Domain whose PCIe paths to disconnect 775 * 776 * This needs to be called in preparation for NVM upgrade of the host 777 * controller. Makes sure all PCIe paths are disconnected. 778 * 779 * Return %0 on success and negative errno in case of error. 780 */ 781 int tb_domain_disconnect_pcie_paths(struct tb *tb) 782 { 783 if (!tb->cm_ops->disconnect_pcie_paths) 784 return -EPERM; 785 786 return tb->cm_ops->disconnect_pcie_paths(tb); 787 } 788 789 /** 790 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain 791 * @tb: Domain enabling the DMA paths 792 * @xd: XDomain DMA paths are created to 793 * @transmit_path: HopID we are using to send out packets 794 * @transmit_ring: DMA ring used to send out packets 795 * @receive_path: HopID the other end is using to send packets to us 796 * @receive_ring: DMA ring used to receive packets from @receive_path 797 * 798 * Calls connection manager specific method to enable DMA paths to the 799 * XDomain in question. 800 * 801 * Return: 0% in case of success and negative errno otherwise. In 802 * particular returns %-ENOTSUPP if the connection manager 803 * implementation does not support XDomains. 804 */ 805 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 806 int transmit_path, int transmit_ring, 807 int receive_path, int receive_ring) 808 { 809 if (!tb->cm_ops->approve_xdomain_paths) 810 return -ENOTSUPP; 811 812 return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path, 813 transmit_ring, receive_path, receive_ring); 814 } 815 816 /** 817 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain 818 * @tb: Domain disabling the DMA paths 819 * @xd: XDomain whose DMA paths are disconnected 820 * @transmit_path: HopID we are using to send out packets 821 * @transmit_ring: DMA ring used to send out packets 822 * @receive_path: HopID the other end is using to send packets to us 823 * @receive_ring: DMA ring used to receive packets from @receive_path 824 * 825 * Calls connection manager specific method to disconnect DMA paths to 826 * the XDomain in question. 827 * 828 * Return: 0% in case of success and negative errno otherwise. In 829 * particular returns %-ENOTSUPP if the connection manager 830 * implementation does not support XDomains. 831 */ 832 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 833 int transmit_path, int transmit_ring, 834 int receive_path, int receive_ring) 835 { 836 if (!tb->cm_ops->disconnect_xdomain_paths) 837 return -ENOTSUPP; 838 839 return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path, 840 transmit_ring, receive_path, receive_ring); 841 } 842 843 static int disconnect_xdomain(struct device *dev, void *data) 844 { 845 struct tb_xdomain *xd; 846 struct tb *tb = data; 847 int ret = 0; 848 849 xd = tb_to_xdomain(dev); 850 if (xd && xd->tb == tb) 851 ret = tb_xdomain_disable_all_paths(xd); 852 853 return ret; 854 } 855 856 /** 857 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain 858 * @tb: Domain whose paths are disconnected 859 * 860 * This function can be used to disconnect all paths (PCIe, XDomain) for 861 * example in preparation for host NVM firmware upgrade. After this is 862 * called the paths cannot be established without resetting the switch. 863 * 864 * Return: %0 in case of success and negative errno otherwise. 865 */ 866 int tb_domain_disconnect_all_paths(struct tb *tb) 867 { 868 int ret; 869 870 ret = tb_domain_disconnect_pcie_paths(tb); 871 if (ret) 872 return ret; 873 874 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain); 875 } 876 877 int tb_domain_init(void) 878 { 879 int ret; 880 881 tb_test_init(); 882 tb_debugfs_init(); 883 tb_acpi_init(); 884 885 ret = tb_xdomain_init(); 886 if (ret) 887 goto err_acpi; 888 ret = bus_register(&tb_bus_type); 889 if (ret) 890 goto err_xdomain; 891 892 return 0; 893 894 err_xdomain: 895 tb_xdomain_exit(); 896 err_acpi: 897 tb_acpi_exit(); 898 tb_debugfs_exit(); 899 tb_test_exit(); 900 901 return ret; 902 } 903 904 void tb_domain_exit(void) 905 { 906 bus_unregister(&tb_bus_type); 907 ida_destroy(&tb_domain_ida); 908 tb_nvm_exit(); 909 tb_xdomain_exit(); 910 tb_acpi_exit(); 911 tb_debugfs_exit(); 912 tb_test_exit(); 913 } 914