1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt Cactus Ridge driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 */ 7 8 #include <linux/delay.h> 9 #include <linux/idr.h> 10 #include <linux/nvmem-provider.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/sizes.h> 13 #include <linux/slab.h> 14 #include <linux/vmalloc.h> 15 16 #include "tb.h" 17 18 /* Switch authorization from userspace is serialized by this lock */ 19 static DEFINE_MUTEX(switch_lock); 20 21 /* Switch NVM support */ 22 23 #define NVM_DEVID 0x05 24 #define NVM_VERSION 0x08 25 #define NVM_CSS 0x10 26 #define NVM_FLASH_SIZE 0x45 27 28 #define NVM_MIN_SIZE SZ_32K 29 #define NVM_MAX_SIZE SZ_512K 30 31 static DEFINE_IDA(nvm_ida); 32 33 struct nvm_auth_status { 34 struct list_head list; 35 uuid_t uuid; 36 u32 status; 37 }; 38 39 /* 40 * Hold NVM authentication failure status per switch This information 41 * needs to stay around even when the switch gets power cycled so we 42 * keep it separately. 43 */ 44 static LIST_HEAD(nvm_auth_status_cache); 45 static DEFINE_MUTEX(nvm_auth_status_lock); 46 47 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 48 { 49 struct nvm_auth_status *st; 50 51 list_for_each_entry(st, &nvm_auth_status_cache, list) { 52 if (uuid_equal(&st->uuid, sw->uuid)) 53 return st; 54 } 55 56 return NULL; 57 } 58 59 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 60 { 61 struct nvm_auth_status *st; 62 63 mutex_lock(&nvm_auth_status_lock); 64 st = __nvm_get_auth_status(sw); 65 mutex_unlock(&nvm_auth_status_lock); 66 67 *status = st ? st->status : 0; 68 } 69 70 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 71 { 72 struct nvm_auth_status *st; 73 74 if (WARN_ON(!sw->uuid)) 75 return; 76 77 mutex_lock(&nvm_auth_status_lock); 78 st = __nvm_get_auth_status(sw); 79 80 if (!st) { 81 st = kzalloc(sizeof(*st), GFP_KERNEL); 82 if (!st) 83 goto unlock; 84 85 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 86 INIT_LIST_HEAD(&st->list); 87 list_add_tail(&st->list, &nvm_auth_status_cache); 88 } 89 90 st->status = status; 91 unlock: 92 mutex_unlock(&nvm_auth_status_lock); 93 } 94 95 static void nvm_clear_auth_status(const struct tb_switch *sw) 96 { 97 struct nvm_auth_status *st; 98 99 mutex_lock(&nvm_auth_status_lock); 100 st = __nvm_get_auth_status(sw); 101 if (st) { 102 list_del(&st->list); 103 kfree(st); 104 } 105 mutex_unlock(&nvm_auth_status_lock); 106 } 107 108 static int nvm_validate_and_write(struct tb_switch *sw) 109 { 110 unsigned int image_size, hdr_size; 111 const u8 *buf = sw->nvm->buf; 112 u16 ds_size; 113 int ret; 114 115 if (!buf) 116 return -EINVAL; 117 118 image_size = sw->nvm->buf_data_size; 119 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 120 return -EINVAL; 121 122 /* 123 * FARB pointer must point inside the image and must at least 124 * contain parts of the digital section we will be reading here. 125 */ 126 hdr_size = (*(u32 *)buf) & 0xffffff; 127 if (hdr_size + NVM_DEVID + 2 >= image_size) 128 return -EINVAL; 129 130 /* Digital section start should be aligned to 4k page */ 131 if (!IS_ALIGNED(hdr_size, SZ_4K)) 132 return -EINVAL; 133 134 /* 135 * Read digital section size and check that it also fits inside 136 * the image. 137 */ 138 ds_size = *(u16 *)(buf + hdr_size); 139 if (ds_size >= image_size) 140 return -EINVAL; 141 142 if (!sw->safe_mode) { 143 u16 device_id; 144 145 /* 146 * Make sure the device ID in the image matches the one 147 * we read from the switch config space. 148 */ 149 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 150 if (device_id != sw->config.device_id) 151 return -EINVAL; 152 153 if (sw->generation < 3) { 154 /* Write CSS headers first */ 155 ret = dma_port_flash_write(sw->dma_port, 156 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 157 DMA_PORT_CSS_MAX_SIZE); 158 if (ret) 159 return ret; 160 } 161 162 /* Skip headers in the image */ 163 buf += hdr_size; 164 image_size -= hdr_size; 165 } 166 167 return dma_port_flash_write(sw->dma_port, 0, buf, image_size); 168 } 169 170 static int nvm_authenticate_host(struct tb_switch *sw) 171 { 172 int ret; 173 174 /* 175 * Root switch NVM upgrade requires that we disconnect the 176 * existing paths first (in case it is not in safe mode 177 * already). 178 */ 179 if (!sw->safe_mode) { 180 ret = tb_domain_disconnect_all_paths(sw->tb); 181 if (ret) 182 return ret; 183 /* 184 * The host controller goes away pretty soon after this if 185 * everything goes well so getting timeout is expected. 186 */ 187 ret = dma_port_flash_update_auth(sw->dma_port); 188 return ret == -ETIMEDOUT ? 0 : ret; 189 } 190 191 /* 192 * From safe mode we can get out by just power cycling the 193 * switch. 194 */ 195 dma_port_power_cycle(sw->dma_port); 196 return 0; 197 } 198 199 static int nvm_authenticate_device(struct tb_switch *sw) 200 { 201 int ret, retries = 10; 202 203 ret = dma_port_flash_update_auth(sw->dma_port); 204 if (ret && ret != -ETIMEDOUT) 205 return ret; 206 207 /* 208 * Poll here for the authentication status. It takes some time 209 * for the device to respond (we get timeout for a while). Once 210 * we get response the device needs to be power cycled in order 211 * to the new NVM to be taken into use. 212 */ 213 do { 214 u32 status; 215 216 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 217 if (ret < 0 && ret != -ETIMEDOUT) 218 return ret; 219 if (ret > 0) { 220 if (status) { 221 tb_sw_warn(sw, "failed to authenticate NVM\n"); 222 nvm_set_auth_status(sw, status); 223 } 224 225 tb_sw_info(sw, "power cycling the switch now\n"); 226 dma_port_power_cycle(sw->dma_port); 227 return 0; 228 } 229 230 msleep(500); 231 } while (--retries); 232 233 return -ETIMEDOUT; 234 } 235 236 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 237 size_t bytes) 238 { 239 struct tb_switch *sw = priv; 240 int ret; 241 242 pm_runtime_get_sync(&sw->dev); 243 ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); 244 pm_runtime_mark_last_busy(&sw->dev); 245 pm_runtime_put_autosuspend(&sw->dev); 246 247 return ret; 248 } 249 250 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 251 size_t bytes) 252 { 253 struct tb_switch *sw = priv; 254 int ret = 0; 255 256 if (mutex_lock_interruptible(&switch_lock)) 257 return -ERESTARTSYS; 258 259 /* 260 * Since writing the NVM image might require some special steps, 261 * for example when CSS headers are written, we cache the image 262 * locally here and handle the special cases when the user asks 263 * us to authenticate the image. 264 */ 265 if (!sw->nvm->buf) { 266 sw->nvm->buf = vmalloc(NVM_MAX_SIZE); 267 if (!sw->nvm->buf) { 268 ret = -ENOMEM; 269 goto unlock; 270 } 271 } 272 273 sw->nvm->buf_data_size = offset + bytes; 274 memcpy(sw->nvm->buf + offset, val, bytes); 275 276 unlock: 277 mutex_unlock(&switch_lock); 278 279 return ret; 280 } 281 282 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, 283 size_t size, bool active) 284 { 285 struct nvmem_config config; 286 287 memset(&config, 0, sizeof(config)); 288 289 if (active) { 290 config.name = "nvm_active"; 291 config.reg_read = tb_switch_nvm_read; 292 config.read_only = true; 293 } else { 294 config.name = "nvm_non_active"; 295 config.reg_write = tb_switch_nvm_write; 296 config.root_only = true; 297 } 298 299 config.id = id; 300 config.stride = 4; 301 config.word_size = 4; 302 config.size = size; 303 config.dev = &sw->dev; 304 config.owner = THIS_MODULE; 305 config.priv = sw; 306 307 return nvmem_register(&config); 308 } 309 310 static int tb_switch_nvm_add(struct tb_switch *sw) 311 { 312 struct nvmem_device *nvm_dev; 313 struct tb_switch_nvm *nvm; 314 u32 val; 315 int ret; 316 317 if (!sw->dma_port) 318 return 0; 319 320 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 321 if (!nvm) 322 return -ENOMEM; 323 324 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); 325 326 /* 327 * If the switch is in safe-mode the only accessible portion of 328 * the NVM is the non-active one where userspace is expected to 329 * write new functional NVM. 330 */ 331 if (!sw->safe_mode) { 332 u32 nvm_size, hdr_size; 333 334 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, 335 sizeof(val)); 336 if (ret) 337 goto err_ida; 338 339 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 340 nvm_size = (SZ_1M << (val & 7)) / 8; 341 nvm_size = (nvm_size - hdr_size) / 2; 342 343 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, 344 sizeof(val)); 345 if (ret) 346 goto err_ida; 347 348 nvm->major = val >> 16; 349 nvm->minor = val >> 8; 350 351 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); 352 if (IS_ERR(nvm_dev)) { 353 ret = PTR_ERR(nvm_dev); 354 goto err_ida; 355 } 356 nvm->active = nvm_dev; 357 } 358 359 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); 360 if (IS_ERR(nvm_dev)) { 361 ret = PTR_ERR(nvm_dev); 362 goto err_nvm_active; 363 } 364 nvm->non_active = nvm_dev; 365 366 mutex_lock(&switch_lock); 367 sw->nvm = nvm; 368 mutex_unlock(&switch_lock); 369 370 return 0; 371 372 err_nvm_active: 373 if (nvm->active) 374 nvmem_unregister(nvm->active); 375 err_ida: 376 ida_simple_remove(&nvm_ida, nvm->id); 377 kfree(nvm); 378 379 return ret; 380 } 381 382 static void tb_switch_nvm_remove(struct tb_switch *sw) 383 { 384 struct tb_switch_nvm *nvm; 385 386 mutex_lock(&switch_lock); 387 nvm = sw->nvm; 388 sw->nvm = NULL; 389 mutex_unlock(&switch_lock); 390 391 if (!nvm) 392 return; 393 394 /* Remove authentication status in case the switch is unplugged */ 395 if (!nvm->authenticating) 396 nvm_clear_auth_status(sw); 397 398 nvmem_unregister(nvm->non_active); 399 if (nvm->active) 400 nvmem_unregister(nvm->active); 401 ida_simple_remove(&nvm_ida, nvm->id); 402 vfree(nvm->buf); 403 kfree(nvm); 404 } 405 406 /* port utility functions */ 407 408 static const char *tb_port_type(struct tb_regs_port_header *port) 409 { 410 switch (port->type >> 16) { 411 case 0: 412 switch ((u8) port->type) { 413 case 0: 414 return "Inactive"; 415 case 1: 416 return "Port"; 417 case 2: 418 return "NHI"; 419 default: 420 return "unknown"; 421 } 422 case 0x2: 423 return "Ethernet"; 424 case 0x8: 425 return "SATA"; 426 case 0xe: 427 return "DP/HDMI"; 428 case 0x10: 429 return "PCIe"; 430 case 0x20: 431 return "USB"; 432 default: 433 return "unknown"; 434 } 435 } 436 437 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 438 { 439 tb_info(tb, 440 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 441 port->port_number, port->vendor_id, port->device_id, 442 port->revision, port->thunderbolt_version, tb_port_type(port), 443 port->type); 444 tb_info(tb, " Max hop id (in/out): %d/%d\n", 445 port->max_in_hop_id, port->max_out_hop_id); 446 tb_info(tb, " Max counters: %d\n", port->max_counters); 447 tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits); 448 } 449 450 /** 451 * tb_port_state() - get connectedness state of a port 452 * 453 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 454 * 455 * Return: Returns an enum tb_port_state on success or an error code on failure. 456 */ 457 static int tb_port_state(struct tb_port *port) 458 { 459 struct tb_cap_phy phy; 460 int res; 461 if (port->cap_phy == 0) { 462 tb_port_WARN(port, "does not have a PHY\n"); 463 return -EINVAL; 464 } 465 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 466 if (res) 467 return res; 468 return phy.state; 469 } 470 471 /** 472 * tb_wait_for_port() - wait for a port to become ready 473 * 474 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 475 * wait_if_unplugged is set then we also wait if the port is in state 476 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 477 * switch resume). Otherwise we only wait if a device is registered but the link 478 * has not yet been established. 479 * 480 * Return: Returns an error code on failure. Returns 0 if the port is not 481 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 482 * if the port is connected and in state TB_PORT_UP. 483 */ 484 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 485 { 486 int retries = 10; 487 int state; 488 if (!port->cap_phy) { 489 tb_port_WARN(port, "does not have PHY\n"); 490 return -EINVAL; 491 } 492 if (tb_is_upstream_port(port)) { 493 tb_port_WARN(port, "is the upstream port\n"); 494 return -EINVAL; 495 } 496 497 while (retries--) { 498 state = tb_port_state(port); 499 if (state < 0) 500 return state; 501 if (state == TB_PORT_DISABLED) { 502 tb_port_info(port, "is disabled (state: 0)\n"); 503 return 0; 504 } 505 if (state == TB_PORT_UNPLUGGED) { 506 if (wait_if_unplugged) { 507 /* used during resume */ 508 tb_port_info(port, 509 "is unplugged (state: 7), retrying...\n"); 510 msleep(100); 511 continue; 512 } 513 tb_port_info(port, "is unplugged (state: 7)\n"); 514 return 0; 515 } 516 if (state == TB_PORT_UP) { 517 tb_port_info(port, 518 "is connected, link is up (state: 2)\n"); 519 return 1; 520 } 521 522 /* 523 * After plug-in the state is TB_PORT_CONNECTING. Give it some 524 * time. 525 */ 526 tb_port_info(port, 527 "is connected, link is not up (state: %d), retrying...\n", 528 state); 529 msleep(100); 530 } 531 tb_port_warn(port, 532 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 533 return 0; 534 } 535 536 /** 537 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 538 * 539 * Change the number of NFC credits allocated to @port by @credits. To remove 540 * NFC credits pass a negative amount of credits. 541 * 542 * Return: Returns 0 on success or an error code on failure. 543 */ 544 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 545 { 546 if (credits == 0) 547 return 0; 548 tb_port_info(port, 549 "adding %#x NFC credits (%#x -> %#x)", 550 credits, 551 port->config.nfc_credits, 552 port->config.nfc_credits + credits); 553 port->config.nfc_credits += credits; 554 return tb_port_write(port, &port->config.nfc_credits, 555 TB_CFG_PORT, 4, 1); 556 } 557 558 /** 559 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 560 * 561 * Return: Returns 0 on success or an error code on failure. 562 */ 563 int tb_port_clear_counter(struct tb_port *port, int counter) 564 { 565 u32 zero[3] = { 0, 0, 0 }; 566 tb_port_info(port, "clearing counter %d\n", counter); 567 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 568 } 569 570 /** 571 * tb_init_port() - initialize a port 572 * 573 * This is a helper method for tb_switch_alloc. Does not check or initialize 574 * any downstream switches. 575 * 576 * Return: Returns 0 on success or an error code on failure. 577 */ 578 static int tb_init_port(struct tb_port *port) 579 { 580 int res; 581 int cap; 582 583 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 584 if (res) 585 return res; 586 587 /* Port 0 is the switch itself and has no PHY. */ 588 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 589 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 590 591 if (cap > 0) 592 port->cap_phy = cap; 593 else 594 tb_port_WARN(port, "non switch port without a PHY\n"); 595 } 596 597 tb_dump_port(port->sw->tb, &port->config); 598 599 /* TODO: Read dual link port, DP port and more from EEPROM. */ 600 return 0; 601 602 } 603 604 /* switch utility functions */ 605 606 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) 607 { 608 tb_info(tb, 609 " Switch: %x:%x (Revision: %d, TB Version: %d)\n", 610 sw->vendor_id, sw->device_id, sw->revision, 611 sw->thunderbolt_version); 612 tb_info(tb, " Max Port Number: %d\n", sw->max_port_number); 613 tb_info(tb, " Config:\n"); 614 tb_info(tb, 615 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 616 sw->upstream_port_number, sw->depth, 617 (((u64) sw->route_hi) << 32) | sw->route_lo, 618 sw->enabled, sw->plug_events_delay); 619 tb_info(tb, 620 " unknown1: %#x unknown4: %#x\n", 621 sw->__unknown1, sw->__unknown4); 622 } 623 624 /** 625 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET 626 * 627 * Return: Returns 0 on success or an error code on failure. 628 */ 629 int tb_switch_reset(struct tb *tb, u64 route) 630 { 631 struct tb_cfg_result res; 632 struct tb_regs_switch_header header = { 633 header.route_hi = route >> 32, 634 header.route_lo = route, 635 header.enabled = true, 636 }; 637 tb_info(tb, "resetting switch at %llx\n", route); 638 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, 639 0, 2, 2, 2); 640 if (res.err) 641 return res.err; 642 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); 643 if (res.err > 0) 644 return -EIO; 645 return res.err; 646 } 647 648 struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route) 649 { 650 u8 next_port = route; /* 651 * Routes use a stride of 8 bits, 652 * eventhough a port index has 6 bits at most. 653 * */ 654 if (route == 0) 655 return sw; 656 if (next_port > sw->config.max_port_number) 657 return NULL; 658 if (tb_is_upstream_port(&sw->ports[next_port])) 659 return NULL; 660 if (!sw->ports[next_port].remote) 661 return NULL; 662 return get_switch_at_route(sw->ports[next_port].remote->sw, 663 route >> TB_ROUTE_SHIFT); 664 } 665 666 /** 667 * tb_plug_events_active() - enable/disable plug events on a switch 668 * 669 * Also configures a sane plug_events_delay of 255ms. 670 * 671 * Return: Returns 0 on success or an error code on failure. 672 */ 673 static int tb_plug_events_active(struct tb_switch *sw, bool active) 674 { 675 u32 data; 676 int res; 677 678 if (!sw->config.enabled) 679 return 0; 680 681 sw->config.plug_events_delay = 0xff; 682 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 683 if (res) 684 return res; 685 686 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 687 if (res) 688 return res; 689 690 if (active) { 691 data = data & 0xFFFFFF83; 692 switch (sw->config.device_id) { 693 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 694 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 695 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 696 break; 697 default: 698 data |= 4; 699 } 700 } else { 701 data = data | 0x7c; 702 } 703 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 704 sw->cap_plug_events + 1, 1); 705 } 706 707 static ssize_t authorized_show(struct device *dev, 708 struct device_attribute *attr, 709 char *buf) 710 { 711 struct tb_switch *sw = tb_to_switch(dev); 712 713 return sprintf(buf, "%u\n", sw->authorized); 714 } 715 716 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 717 { 718 int ret = -EINVAL; 719 720 if (mutex_lock_interruptible(&switch_lock)) 721 return -ERESTARTSYS; 722 723 if (sw->authorized) 724 goto unlock; 725 726 /* 727 * Make sure there is no PCIe rescan ongoing when a new PCIe 728 * tunnel is created. Otherwise the PCIe rescan code might find 729 * the new tunnel too early. 730 */ 731 pci_lock_rescan_remove(); 732 pm_runtime_get_sync(&sw->dev); 733 734 switch (val) { 735 /* Approve switch */ 736 case 1: 737 if (sw->key) 738 ret = tb_domain_approve_switch_key(sw->tb, sw); 739 else 740 ret = tb_domain_approve_switch(sw->tb, sw); 741 break; 742 743 /* Challenge switch */ 744 case 2: 745 if (sw->key) 746 ret = tb_domain_challenge_switch_key(sw->tb, sw); 747 break; 748 749 default: 750 break; 751 } 752 753 pm_runtime_mark_last_busy(&sw->dev); 754 pm_runtime_put_autosuspend(&sw->dev); 755 pci_unlock_rescan_remove(); 756 757 if (!ret) { 758 sw->authorized = val; 759 /* Notify status change to the userspace */ 760 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 761 } 762 763 unlock: 764 mutex_unlock(&switch_lock); 765 return ret; 766 } 767 768 static ssize_t authorized_store(struct device *dev, 769 struct device_attribute *attr, 770 const char *buf, size_t count) 771 { 772 struct tb_switch *sw = tb_to_switch(dev); 773 unsigned int val; 774 ssize_t ret; 775 776 ret = kstrtouint(buf, 0, &val); 777 if (ret) 778 return ret; 779 if (val > 2) 780 return -EINVAL; 781 782 ret = tb_switch_set_authorized(sw, val); 783 784 return ret ? ret : count; 785 } 786 static DEVICE_ATTR_RW(authorized); 787 788 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 789 char *buf) 790 { 791 struct tb_switch *sw = tb_to_switch(dev); 792 793 return sprintf(buf, "%u\n", sw->boot); 794 } 795 static DEVICE_ATTR_RO(boot); 796 797 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 798 char *buf) 799 { 800 struct tb_switch *sw = tb_to_switch(dev); 801 802 return sprintf(buf, "%#x\n", sw->device); 803 } 804 static DEVICE_ATTR_RO(device); 805 806 static ssize_t 807 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 808 { 809 struct tb_switch *sw = tb_to_switch(dev); 810 811 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 812 } 813 static DEVICE_ATTR_RO(device_name); 814 815 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 816 char *buf) 817 { 818 struct tb_switch *sw = tb_to_switch(dev); 819 ssize_t ret; 820 821 if (mutex_lock_interruptible(&switch_lock)) 822 return -ERESTARTSYS; 823 824 if (sw->key) 825 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 826 else 827 ret = sprintf(buf, "\n"); 828 829 mutex_unlock(&switch_lock); 830 return ret; 831 } 832 833 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 834 const char *buf, size_t count) 835 { 836 struct tb_switch *sw = tb_to_switch(dev); 837 u8 key[TB_SWITCH_KEY_SIZE]; 838 ssize_t ret = count; 839 bool clear = false; 840 841 if (!strcmp(buf, "\n")) 842 clear = true; 843 else if (hex2bin(key, buf, sizeof(key))) 844 return -EINVAL; 845 846 if (mutex_lock_interruptible(&switch_lock)) 847 return -ERESTARTSYS; 848 849 if (sw->authorized) { 850 ret = -EBUSY; 851 } else { 852 kfree(sw->key); 853 if (clear) { 854 sw->key = NULL; 855 } else { 856 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 857 if (!sw->key) 858 ret = -ENOMEM; 859 } 860 } 861 862 mutex_unlock(&switch_lock); 863 return ret; 864 } 865 static DEVICE_ATTR(key, 0600, key_show, key_store); 866 867 static ssize_t nvm_authenticate_show(struct device *dev, 868 struct device_attribute *attr, char *buf) 869 { 870 struct tb_switch *sw = tb_to_switch(dev); 871 u32 status; 872 873 nvm_get_auth_status(sw, &status); 874 return sprintf(buf, "%#x\n", status); 875 } 876 877 static ssize_t nvm_authenticate_store(struct device *dev, 878 struct device_attribute *attr, const char *buf, size_t count) 879 { 880 struct tb_switch *sw = tb_to_switch(dev); 881 bool val; 882 int ret; 883 884 if (mutex_lock_interruptible(&switch_lock)) 885 return -ERESTARTSYS; 886 887 /* If NVMem devices are not yet added */ 888 if (!sw->nvm) { 889 ret = -EAGAIN; 890 goto exit_unlock; 891 } 892 893 ret = kstrtobool(buf, &val); 894 if (ret) 895 goto exit_unlock; 896 897 /* Always clear the authentication status */ 898 nvm_clear_auth_status(sw); 899 900 if (val) { 901 if (!sw->nvm->buf) { 902 ret = -EINVAL; 903 goto exit_unlock; 904 } 905 906 pm_runtime_get_sync(&sw->dev); 907 ret = nvm_validate_and_write(sw); 908 if (ret) { 909 pm_runtime_mark_last_busy(&sw->dev); 910 pm_runtime_put_autosuspend(&sw->dev); 911 goto exit_unlock; 912 } 913 914 sw->nvm->authenticating = true; 915 916 if (!tb_route(sw)) 917 ret = nvm_authenticate_host(sw); 918 else 919 ret = nvm_authenticate_device(sw); 920 pm_runtime_mark_last_busy(&sw->dev); 921 pm_runtime_put_autosuspend(&sw->dev); 922 } 923 924 exit_unlock: 925 mutex_unlock(&switch_lock); 926 927 if (ret) 928 return ret; 929 return count; 930 } 931 static DEVICE_ATTR_RW(nvm_authenticate); 932 933 static ssize_t nvm_version_show(struct device *dev, 934 struct device_attribute *attr, char *buf) 935 { 936 struct tb_switch *sw = tb_to_switch(dev); 937 int ret; 938 939 if (mutex_lock_interruptible(&switch_lock)) 940 return -ERESTARTSYS; 941 942 if (sw->safe_mode) 943 ret = -ENODATA; 944 else if (!sw->nvm) 945 ret = -EAGAIN; 946 else 947 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 948 949 mutex_unlock(&switch_lock); 950 951 return ret; 952 } 953 static DEVICE_ATTR_RO(nvm_version); 954 955 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 956 char *buf) 957 { 958 struct tb_switch *sw = tb_to_switch(dev); 959 960 return sprintf(buf, "%#x\n", sw->vendor); 961 } 962 static DEVICE_ATTR_RO(vendor); 963 964 static ssize_t 965 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 966 { 967 struct tb_switch *sw = tb_to_switch(dev); 968 969 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 970 } 971 static DEVICE_ATTR_RO(vendor_name); 972 973 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 974 char *buf) 975 { 976 struct tb_switch *sw = tb_to_switch(dev); 977 978 return sprintf(buf, "%pUb\n", sw->uuid); 979 } 980 static DEVICE_ATTR_RO(unique_id); 981 982 static struct attribute *switch_attrs[] = { 983 &dev_attr_authorized.attr, 984 &dev_attr_boot.attr, 985 &dev_attr_device.attr, 986 &dev_attr_device_name.attr, 987 &dev_attr_key.attr, 988 &dev_attr_nvm_authenticate.attr, 989 &dev_attr_nvm_version.attr, 990 &dev_attr_vendor.attr, 991 &dev_attr_vendor_name.attr, 992 &dev_attr_unique_id.attr, 993 NULL, 994 }; 995 996 static umode_t switch_attr_is_visible(struct kobject *kobj, 997 struct attribute *attr, int n) 998 { 999 struct device *dev = container_of(kobj, struct device, kobj); 1000 struct tb_switch *sw = tb_to_switch(dev); 1001 1002 if (attr == &dev_attr_key.attr) { 1003 if (tb_route(sw) && 1004 sw->tb->security_level == TB_SECURITY_SECURE && 1005 sw->security_level == TB_SECURITY_SECURE) 1006 return attr->mode; 1007 return 0; 1008 } else if (attr == &dev_attr_nvm_authenticate.attr || 1009 attr == &dev_attr_nvm_version.attr) { 1010 if (sw->dma_port) 1011 return attr->mode; 1012 return 0; 1013 } else if (attr == &dev_attr_boot.attr) { 1014 if (tb_route(sw)) 1015 return attr->mode; 1016 return 0; 1017 } 1018 1019 return sw->safe_mode ? 0 : attr->mode; 1020 } 1021 1022 static struct attribute_group switch_group = { 1023 .is_visible = switch_attr_is_visible, 1024 .attrs = switch_attrs, 1025 }; 1026 1027 static const struct attribute_group *switch_groups[] = { 1028 &switch_group, 1029 NULL, 1030 }; 1031 1032 static void tb_switch_release(struct device *dev) 1033 { 1034 struct tb_switch *sw = tb_to_switch(dev); 1035 1036 dma_port_free(sw->dma_port); 1037 1038 kfree(sw->uuid); 1039 kfree(sw->device_name); 1040 kfree(sw->vendor_name); 1041 kfree(sw->ports); 1042 kfree(sw->drom); 1043 kfree(sw->key); 1044 kfree(sw); 1045 } 1046 1047 /* 1048 * Currently only need to provide the callbacks. Everything else is handled 1049 * in the connection manager. 1050 */ 1051 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1052 { 1053 return 0; 1054 } 1055 1056 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1057 { 1058 return 0; 1059 } 1060 1061 static const struct dev_pm_ops tb_switch_pm_ops = { 1062 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1063 NULL) 1064 }; 1065 1066 struct device_type tb_switch_type = { 1067 .name = "thunderbolt_device", 1068 .release = tb_switch_release, 1069 .pm = &tb_switch_pm_ops, 1070 }; 1071 1072 static int tb_switch_get_generation(struct tb_switch *sw) 1073 { 1074 switch (sw->config.device_id) { 1075 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1076 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1077 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1078 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1079 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1080 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1081 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1082 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1083 return 1; 1084 1085 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1086 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1087 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1088 return 2; 1089 1090 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1091 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1092 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1093 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1094 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1095 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1096 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1097 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1098 return 3; 1099 1100 default: 1101 /* 1102 * For unknown switches assume generation to be 1 to be 1103 * on the safe side. 1104 */ 1105 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1106 sw->config.device_id); 1107 return 1; 1108 } 1109 } 1110 1111 /** 1112 * tb_switch_alloc() - allocate a switch 1113 * @tb: Pointer to the owning domain 1114 * @parent: Parent device for this switch 1115 * @route: Route string for this switch 1116 * 1117 * Allocates and initializes a switch. Will not upload configuration to 1118 * the switch. For that you need to call tb_switch_configure() 1119 * separately. The returned switch should be released by calling 1120 * tb_switch_put(). 1121 * 1122 * Return: Pointer to the allocated switch or %NULL in case of failure 1123 */ 1124 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1125 u64 route) 1126 { 1127 int i; 1128 int cap; 1129 struct tb_switch *sw; 1130 int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1131 if (upstream_port < 0) 1132 return NULL; 1133 1134 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1135 if (!sw) 1136 return NULL; 1137 1138 sw->tb = tb; 1139 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5)) 1140 goto err_free_sw_ports; 1141 1142 tb_info(tb, "current switch config:\n"); 1143 tb_dump_switch(tb, &sw->config); 1144 1145 /* configure switch */ 1146 sw->config.upstream_port_number = upstream_port; 1147 sw->config.depth = tb_route_length(route); 1148 sw->config.route_lo = route; 1149 sw->config.route_hi = route >> 32; 1150 sw->config.enabled = 0; 1151 1152 /* initialize ports */ 1153 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 1154 GFP_KERNEL); 1155 if (!sw->ports) 1156 goto err_free_sw_ports; 1157 1158 for (i = 0; i <= sw->config.max_port_number; i++) { 1159 /* minimum setup for tb_find_cap and tb_drom_read to work */ 1160 sw->ports[i].sw = sw; 1161 sw->ports[i].port = i; 1162 } 1163 1164 sw->generation = tb_switch_get_generation(sw); 1165 1166 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 1167 if (cap < 0) { 1168 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 1169 goto err_free_sw_ports; 1170 } 1171 sw->cap_plug_events = cap; 1172 1173 /* Root switch is always authorized */ 1174 if (!route) 1175 sw->authorized = true; 1176 1177 device_initialize(&sw->dev); 1178 sw->dev.parent = parent; 1179 sw->dev.bus = &tb_bus_type; 1180 sw->dev.type = &tb_switch_type; 1181 sw->dev.groups = switch_groups; 1182 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1183 1184 return sw; 1185 1186 err_free_sw_ports: 1187 kfree(sw->ports); 1188 kfree(sw); 1189 1190 return NULL; 1191 } 1192 1193 /** 1194 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 1195 * @tb: Pointer to the owning domain 1196 * @parent: Parent device for this switch 1197 * @route: Route string for this switch 1198 * 1199 * This creates a switch in safe mode. This means the switch pretty much 1200 * lacks all capabilities except DMA configuration port before it is 1201 * flashed with a valid NVM firmware. 1202 * 1203 * The returned switch must be released by calling tb_switch_put(). 1204 * 1205 * Return: Pointer to the allocated switch or %NULL in case of failure 1206 */ 1207 struct tb_switch * 1208 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 1209 { 1210 struct tb_switch *sw; 1211 1212 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1213 if (!sw) 1214 return NULL; 1215 1216 sw->tb = tb; 1217 sw->config.depth = tb_route_length(route); 1218 sw->config.route_hi = upper_32_bits(route); 1219 sw->config.route_lo = lower_32_bits(route); 1220 sw->safe_mode = true; 1221 1222 device_initialize(&sw->dev); 1223 sw->dev.parent = parent; 1224 sw->dev.bus = &tb_bus_type; 1225 sw->dev.type = &tb_switch_type; 1226 sw->dev.groups = switch_groups; 1227 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1228 1229 return sw; 1230 } 1231 1232 /** 1233 * tb_switch_configure() - Uploads configuration to the switch 1234 * @sw: Switch to configure 1235 * 1236 * Call this function before the switch is added to the system. It will 1237 * upload configuration to the switch and makes it available for the 1238 * connection manager to use. 1239 * 1240 * Return: %0 in case of success and negative errno in case of failure 1241 */ 1242 int tb_switch_configure(struct tb_switch *sw) 1243 { 1244 struct tb *tb = sw->tb; 1245 u64 route; 1246 int ret; 1247 1248 route = tb_route(sw); 1249 tb_info(tb, 1250 "initializing Switch at %#llx (depth: %d, up port: %d)\n", 1251 route, tb_route_length(route), sw->config.upstream_port_number); 1252 1253 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 1254 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 1255 sw->config.vendor_id); 1256 1257 sw->config.enabled = 1; 1258 1259 /* upload configuration */ 1260 ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); 1261 if (ret) 1262 return ret; 1263 1264 return tb_plug_events_active(sw, true); 1265 } 1266 1267 static void tb_switch_set_uuid(struct tb_switch *sw) 1268 { 1269 u32 uuid[4]; 1270 int cap; 1271 1272 if (sw->uuid) 1273 return; 1274 1275 /* 1276 * The newer controllers include fused UUID as part of link 1277 * controller specific registers 1278 */ 1279 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 1280 if (cap > 0) { 1281 tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); 1282 } else { 1283 /* 1284 * ICM generates UUID based on UID and fills the upper 1285 * two words with ones. This is not strictly following 1286 * UUID format but we want to be compatible with it so 1287 * we do the same here. 1288 */ 1289 uuid[0] = sw->uid & 0xffffffff; 1290 uuid[1] = (sw->uid >> 32) & 0xffffffff; 1291 uuid[2] = 0xffffffff; 1292 uuid[3] = 0xffffffff; 1293 } 1294 1295 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 1296 } 1297 1298 static int tb_switch_add_dma_port(struct tb_switch *sw) 1299 { 1300 u32 status; 1301 int ret; 1302 1303 switch (sw->generation) { 1304 case 3: 1305 break; 1306 1307 case 2: 1308 /* Only root switch can be upgraded */ 1309 if (tb_route(sw)) 1310 return 0; 1311 break; 1312 1313 default: 1314 /* 1315 * DMA port is the only thing available when the switch 1316 * is in safe mode. 1317 */ 1318 if (!sw->safe_mode) 1319 return 0; 1320 break; 1321 } 1322 1323 if (sw->no_nvm_upgrade) 1324 return 0; 1325 1326 sw->dma_port = dma_port_alloc(sw); 1327 if (!sw->dma_port) 1328 return 0; 1329 1330 /* 1331 * Check status of the previous flash authentication. If there 1332 * is one we need to power cycle the switch in any case to make 1333 * it functional again. 1334 */ 1335 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 1336 if (ret <= 0) 1337 return ret; 1338 1339 if (status) { 1340 tb_sw_info(sw, "switch flash authentication failed\n"); 1341 tb_switch_set_uuid(sw); 1342 nvm_set_auth_status(sw, status); 1343 } 1344 1345 tb_sw_info(sw, "power cycling the switch now\n"); 1346 dma_port_power_cycle(sw->dma_port); 1347 1348 /* 1349 * We return error here which causes the switch adding failure. 1350 * It should appear back after power cycle is complete. 1351 */ 1352 return -ESHUTDOWN; 1353 } 1354 1355 /** 1356 * tb_switch_add() - Add a switch to the domain 1357 * @sw: Switch to add 1358 * 1359 * This is the last step in adding switch to the domain. It will read 1360 * identification information from DROM and initializes ports so that 1361 * they can be used to connect other switches. The switch will be 1362 * exposed to the userspace when this function successfully returns. To 1363 * remove and release the switch, call tb_switch_remove(). 1364 * 1365 * Return: %0 in case of success and negative errno in case of failure 1366 */ 1367 int tb_switch_add(struct tb_switch *sw) 1368 { 1369 int i, ret; 1370 1371 /* 1372 * Initialize DMA control port now before we read DROM. Recent 1373 * host controllers have more complete DROM on NVM that includes 1374 * vendor and model identification strings which we then expose 1375 * to the userspace. NVM can be accessed through DMA 1376 * configuration based mailbox. 1377 */ 1378 ret = tb_switch_add_dma_port(sw); 1379 if (ret) 1380 return ret; 1381 1382 if (!sw->safe_mode) { 1383 /* read drom */ 1384 ret = tb_drom_read(sw); 1385 if (ret) { 1386 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); 1387 return ret; 1388 } 1389 tb_sw_info(sw, "uid: %#llx\n", sw->uid); 1390 1391 tb_switch_set_uuid(sw); 1392 1393 for (i = 0; i <= sw->config.max_port_number; i++) { 1394 if (sw->ports[i].disabled) { 1395 tb_port_info(&sw->ports[i], "disabled by eeprom\n"); 1396 continue; 1397 } 1398 ret = tb_init_port(&sw->ports[i]); 1399 if (ret) 1400 return ret; 1401 } 1402 } 1403 1404 ret = device_add(&sw->dev); 1405 if (ret) 1406 return ret; 1407 1408 ret = tb_switch_nvm_add(sw); 1409 if (ret) { 1410 device_del(&sw->dev); 1411 return ret; 1412 } 1413 1414 pm_runtime_set_active(&sw->dev); 1415 if (sw->rpm) { 1416 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 1417 pm_runtime_use_autosuspend(&sw->dev); 1418 pm_runtime_mark_last_busy(&sw->dev); 1419 pm_runtime_enable(&sw->dev); 1420 pm_request_autosuspend(&sw->dev); 1421 } 1422 1423 return 0; 1424 } 1425 1426 /** 1427 * tb_switch_remove() - Remove and release a switch 1428 * @sw: Switch to remove 1429 * 1430 * This will remove the switch from the domain and release it after last 1431 * reference count drops to zero. If there are switches connected below 1432 * this switch, they will be removed as well. 1433 */ 1434 void tb_switch_remove(struct tb_switch *sw) 1435 { 1436 int i; 1437 1438 if (sw->rpm) { 1439 pm_runtime_get_sync(&sw->dev); 1440 pm_runtime_disable(&sw->dev); 1441 } 1442 1443 /* port 0 is the switch itself and never has a remote */ 1444 for (i = 1; i <= sw->config.max_port_number; i++) { 1445 if (tb_is_upstream_port(&sw->ports[i])) 1446 continue; 1447 if (sw->ports[i].remote) 1448 tb_switch_remove(sw->ports[i].remote->sw); 1449 sw->ports[i].remote = NULL; 1450 if (sw->ports[i].xdomain) 1451 tb_xdomain_remove(sw->ports[i].xdomain); 1452 sw->ports[i].xdomain = NULL; 1453 } 1454 1455 if (!sw->is_unplugged) 1456 tb_plug_events_active(sw, false); 1457 1458 tb_switch_nvm_remove(sw); 1459 device_unregister(&sw->dev); 1460 } 1461 1462 /** 1463 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 1464 */ 1465 void tb_sw_set_unplugged(struct tb_switch *sw) 1466 { 1467 int i; 1468 if (sw == sw->tb->root_switch) { 1469 tb_sw_WARN(sw, "cannot unplug root switch\n"); 1470 return; 1471 } 1472 if (sw->is_unplugged) { 1473 tb_sw_WARN(sw, "is_unplugged already set\n"); 1474 return; 1475 } 1476 sw->is_unplugged = true; 1477 for (i = 0; i <= sw->config.max_port_number; i++) { 1478 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) 1479 tb_sw_set_unplugged(sw->ports[i].remote->sw); 1480 } 1481 } 1482 1483 int tb_switch_resume(struct tb_switch *sw) 1484 { 1485 int i, err; 1486 tb_sw_info(sw, "resuming switch\n"); 1487 1488 /* 1489 * Check for UID of the connected switches except for root 1490 * switch which we assume cannot be removed. 1491 */ 1492 if (tb_route(sw)) { 1493 u64 uid; 1494 1495 err = tb_drom_read_uid_only(sw, &uid); 1496 if (err) { 1497 tb_sw_warn(sw, "uid read failed\n"); 1498 return err; 1499 } 1500 if (sw->uid != uid) { 1501 tb_sw_info(sw, 1502 "changed while suspended (uid %#llx -> %#llx)\n", 1503 sw->uid, uid); 1504 return -ENODEV; 1505 } 1506 } 1507 1508 /* upload configuration */ 1509 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); 1510 if (err) 1511 return err; 1512 1513 err = tb_plug_events_active(sw, true); 1514 if (err) 1515 return err; 1516 1517 /* check for surviving downstream switches */ 1518 for (i = 1; i <= sw->config.max_port_number; i++) { 1519 struct tb_port *port = &sw->ports[i]; 1520 if (tb_is_upstream_port(port)) 1521 continue; 1522 if (!port->remote) 1523 continue; 1524 if (tb_wait_for_port(port, true) <= 0 1525 || tb_switch_resume(port->remote->sw)) { 1526 tb_port_warn(port, 1527 "lost during suspend, disconnecting\n"); 1528 tb_sw_set_unplugged(port->remote->sw); 1529 } 1530 } 1531 return 0; 1532 } 1533 1534 void tb_switch_suspend(struct tb_switch *sw) 1535 { 1536 int i, err; 1537 err = tb_plug_events_active(sw, false); 1538 if (err) 1539 return; 1540 1541 for (i = 1; i <= sw->config.max_port_number; i++) { 1542 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) 1543 tb_switch_suspend(sw->ports[i].remote->sw); 1544 } 1545 /* 1546 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any 1547 * effect? 1548 */ 1549 } 1550 1551 struct tb_sw_lookup { 1552 struct tb *tb; 1553 u8 link; 1554 u8 depth; 1555 const uuid_t *uuid; 1556 u64 route; 1557 }; 1558 1559 static int tb_switch_match(struct device *dev, void *data) 1560 { 1561 struct tb_switch *sw = tb_to_switch(dev); 1562 struct tb_sw_lookup *lookup = data; 1563 1564 if (!sw) 1565 return 0; 1566 if (sw->tb != lookup->tb) 1567 return 0; 1568 1569 if (lookup->uuid) 1570 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 1571 1572 if (lookup->route) { 1573 return sw->config.route_lo == lower_32_bits(lookup->route) && 1574 sw->config.route_hi == upper_32_bits(lookup->route); 1575 } 1576 1577 /* Root switch is matched only by depth */ 1578 if (!lookup->depth) 1579 return !sw->depth; 1580 1581 return sw->link == lookup->link && sw->depth == lookup->depth; 1582 } 1583 1584 /** 1585 * tb_switch_find_by_link_depth() - Find switch by link and depth 1586 * @tb: Domain the switch belongs 1587 * @link: Link number the switch is connected 1588 * @depth: Depth of the switch in link 1589 * 1590 * Returned switch has reference count increased so the caller needs to 1591 * call tb_switch_put() when done with the switch. 1592 */ 1593 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 1594 { 1595 struct tb_sw_lookup lookup; 1596 struct device *dev; 1597 1598 memset(&lookup, 0, sizeof(lookup)); 1599 lookup.tb = tb; 1600 lookup.link = link; 1601 lookup.depth = depth; 1602 1603 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 1604 if (dev) 1605 return tb_to_switch(dev); 1606 1607 return NULL; 1608 } 1609 1610 /** 1611 * tb_switch_find_by_uuid() - Find switch by UUID 1612 * @tb: Domain the switch belongs 1613 * @uuid: UUID to look for 1614 * 1615 * Returned switch has reference count increased so the caller needs to 1616 * call tb_switch_put() when done with the switch. 1617 */ 1618 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 1619 { 1620 struct tb_sw_lookup lookup; 1621 struct device *dev; 1622 1623 memset(&lookup, 0, sizeof(lookup)); 1624 lookup.tb = tb; 1625 lookup.uuid = uuid; 1626 1627 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 1628 if (dev) 1629 return tb_to_switch(dev); 1630 1631 return NULL; 1632 } 1633 1634 /** 1635 * tb_switch_find_by_route() - Find switch by route string 1636 * @tb: Domain the switch belongs 1637 * @route: Route string to look for 1638 * 1639 * Returned switch has reference count increased so the caller needs to 1640 * call tb_switch_put() when done with the switch. 1641 */ 1642 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 1643 { 1644 struct tb_sw_lookup lookup; 1645 struct device *dev; 1646 1647 if (!route) 1648 return tb_switch_get(tb->root_switch); 1649 1650 memset(&lookup, 0, sizeof(lookup)); 1651 lookup.tb = tb; 1652 lookup.route = route; 1653 1654 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 1655 if (dev) 1656 return tb_to_switch(dev); 1657 1658 return NULL; 1659 } 1660 1661 void tb_switch_exit(void) 1662 { 1663 ida_destroy(&nvm_ida); 1664 } 1665