1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 17 #include "tb.h" 18 19 /* Switch NVM support */ 20 21 #define NVM_CSS 0x10 22 23 struct nvm_auth_status { 24 struct list_head list; 25 uuid_t uuid; 26 u32 status; 27 }; 28 29 enum nvm_write_ops { 30 WRITE_AND_AUTHENTICATE = 1, 31 WRITE_ONLY = 2, 32 }; 33 34 /* 35 * Hold NVM authentication failure status per switch This information 36 * needs to stay around even when the switch gets power cycled so we 37 * keep it separately. 38 */ 39 static LIST_HEAD(nvm_auth_status_cache); 40 static DEFINE_MUTEX(nvm_auth_status_lock); 41 42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 43 { 44 struct nvm_auth_status *st; 45 46 list_for_each_entry(st, &nvm_auth_status_cache, list) { 47 if (uuid_equal(&st->uuid, sw->uuid)) 48 return st; 49 } 50 51 return NULL; 52 } 53 54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 55 { 56 struct nvm_auth_status *st; 57 58 mutex_lock(&nvm_auth_status_lock); 59 st = __nvm_get_auth_status(sw); 60 mutex_unlock(&nvm_auth_status_lock); 61 62 *status = st ? st->status : 0; 63 } 64 65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 66 { 67 struct nvm_auth_status *st; 68 69 if (WARN_ON(!sw->uuid)) 70 return; 71 72 mutex_lock(&nvm_auth_status_lock); 73 st = __nvm_get_auth_status(sw); 74 75 if (!st) { 76 st = kzalloc(sizeof(*st), GFP_KERNEL); 77 if (!st) 78 goto unlock; 79 80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 81 INIT_LIST_HEAD(&st->list); 82 list_add_tail(&st->list, &nvm_auth_status_cache); 83 } 84 85 st->status = status; 86 unlock: 87 mutex_unlock(&nvm_auth_status_lock); 88 } 89 90 static void nvm_clear_auth_status(const struct tb_switch *sw) 91 { 92 struct nvm_auth_status *st; 93 94 mutex_lock(&nvm_auth_status_lock); 95 st = __nvm_get_auth_status(sw); 96 if (st) { 97 list_del(&st->list); 98 kfree(st); 99 } 100 mutex_unlock(&nvm_auth_status_lock); 101 } 102 103 static int nvm_validate_and_write(struct tb_switch *sw) 104 { 105 unsigned int image_size, hdr_size; 106 const u8 *buf = sw->nvm->buf; 107 u16 ds_size; 108 int ret; 109 110 if (!buf) 111 return -EINVAL; 112 113 image_size = sw->nvm->buf_data_size; 114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 115 return -EINVAL; 116 117 /* 118 * FARB pointer must point inside the image and must at least 119 * contain parts of the digital section we will be reading here. 120 */ 121 hdr_size = (*(u32 *)buf) & 0xffffff; 122 if (hdr_size + NVM_DEVID + 2 >= image_size) 123 return -EINVAL; 124 125 /* Digital section start should be aligned to 4k page */ 126 if (!IS_ALIGNED(hdr_size, SZ_4K)) 127 return -EINVAL; 128 129 /* 130 * Read digital section size and check that it also fits inside 131 * the image. 132 */ 133 ds_size = *(u16 *)(buf + hdr_size); 134 if (ds_size >= image_size) 135 return -EINVAL; 136 137 if (!sw->safe_mode) { 138 u16 device_id; 139 140 /* 141 * Make sure the device ID in the image matches the one 142 * we read from the switch config space. 143 */ 144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 145 if (device_id != sw->config.device_id) 146 return -EINVAL; 147 148 if (sw->generation < 3) { 149 /* Write CSS headers first */ 150 ret = dma_port_flash_write(sw->dma_port, 151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 152 DMA_PORT_CSS_MAX_SIZE); 153 if (ret) 154 return ret; 155 } 156 157 /* Skip headers in the image */ 158 buf += hdr_size; 159 image_size -= hdr_size; 160 } 161 162 if (tb_switch_is_usb4(sw)) 163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 164 else 165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 166 if (!ret) 167 sw->nvm->flushed = true; 168 return ret; 169 } 170 171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 172 { 173 int ret = 0; 174 175 /* 176 * Root switch NVM upgrade requires that we disconnect the 177 * existing paths first (in case it is not in safe mode 178 * already). 179 */ 180 if (!sw->safe_mode) { 181 u32 status; 182 183 ret = tb_domain_disconnect_all_paths(sw->tb); 184 if (ret) 185 return ret; 186 /* 187 * The host controller goes away pretty soon after this if 188 * everything goes well so getting timeout is expected. 189 */ 190 ret = dma_port_flash_update_auth(sw->dma_port); 191 if (!ret || ret == -ETIMEDOUT) 192 return 0; 193 194 /* 195 * Any error from update auth operation requires power 196 * cycling of the host router. 197 */ 198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 200 nvm_set_auth_status(sw, status); 201 } 202 203 /* 204 * From safe mode we can get out by just power cycling the 205 * switch. 206 */ 207 dma_port_power_cycle(sw->dma_port); 208 return ret; 209 } 210 211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 212 { 213 int ret, retries = 10; 214 215 ret = dma_port_flash_update_auth(sw->dma_port); 216 switch (ret) { 217 case 0: 218 case -ETIMEDOUT: 219 case -EACCES: 220 case -EINVAL: 221 /* Power cycle is required */ 222 break; 223 default: 224 return ret; 225 } 226 227 /* 228 * Poll here for the authentication status. It takes some time 229 * for the device to respond (we get timeout for a while). Once 230 * we get response the device needs to be power cycled in order 231 * to the new NVM to be taken into use. 232 */ 233 do { 234 u32 status; 235 236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 237 if (ret < 0 && ret != -ETIMEDOUT) 238 return ret; 239 if (ret > 0) { 240 if (status) { 241 tb_sw_warn(sw, "failed to authenticate NVM\n"); 242 nvm_set_auth_status(sw, status); 243 } 244 245 tb_sw_info(sw, "power cycling the switch now\n"); 246 dma_port_power_cycle(sw->dma_port); 247 return 0; 248 } 249 250 msleep(500); 251 } while (--retries); 252 253 return -ETIMEDOUT; 254 } 255 256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 257 { 258 struct pci_dev *root_port; 259 260 /* 261 * During host router NVM upgrade we should not allow root port to 262 * go into D3cold because some root ports cannot trigger PME 263 * itself. To be on the safe side keep the root port in D0 during 264 * the whole upgrade process. 265 */ 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 267 if (root_port) 268 pm_runtime_get_noresume(&root_port->dev); 269 } 270 271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 272 { 273 struct pci_dev *root_port; 274 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 276 if (root_port) 277 pm_runtime_put(&root_port->dev); 278 } 279 280 static inline bool nvm_readable(struct tb_switch *sw) 281 { 282 if (tb_switch_is_usb4(sw)) { 283 /* 284 * USB4 devices must support NVM operations but it is 285 * optional for hosts. Therefore we query the NVM sector 286 * size here and if it is supported assume NVM 287 * operations are implemented. 288 */ 289 return usb4_switch_nvm_sector_size(sw) > 0; 290 } 291 292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 293 return !!sw->dma_port; 294 } 295 296 static inline bool nvm_upgradeable(struct tb_switch *sw) 297 { 298 if (sw->no_nvm_upgrade) 299 return false; 300 return nvm_readable(sw); 301 } 302 303 static inline int nvm_read(struct tb_switch *sw, unsigned int address, 304 void *buf, size_t size) 305 { 306 if (tb_switch_is_usb4(sw)) 307 return usb4_switch_nvm_read(sw, address, buf, size); 308 return dma_port_flash_read(sw->dma_port, address, buf, size); 309 } 310 311 static int nvm_authenticate(struct tb_switch *sw) 312 { 313 int ret; 314 315 if (tb_switch_is_usb4(sw)) 316 return usb4_switch_nvm_authenticate(sw); 317 318 if (!tb_route(sw)) { 319 nvm_authenticate_start_dma_port(sw); 320 ret = nvm_authenticate_host_dma_port(sw); 321 } else { 322 ret = nvm_authenticate_device_dma_port(sw); 323 } 324 325 return ret; 326 } 327 328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 329 size_t bytes) 330 { 331 struct tb_nvm *nvm = priv; 332 struct tb_switch *sw = tb_to_switch(nvm->dev); 333 int ret; 334 335 pm_runtime_get_sync(&sw->dev); 336 337 if (!mutex_trylock(&sw->tb->lock)) { 338 ret = restart_syscall(); 339 goto out; 340 } 341 342 ret = nvm_read(sw, offset, val, bytes); 343 mutex_unlock(&sw->tb->lock); 344 345 out: 346 pm_runtime_mark_last_busy(&sw->dev); 347 pm_runtime_put_autosuspend(&sw->dev); 348 349 return ret; 350 } 351 352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 353 size_t bytes) 354 { 355 struct tb_nvm *nvm = priv; 356 struct tb_switch *sw = tb_to_switch(nvm->dev); 357 int ret; 358 359 if (!mutex_trylock(&sw->tb->lock)) 360 return restart_syscall(); 361 362 /* 363 * Since writing the NVM image might require some special steps, 364 * for example when CSS headers are written, we cache the image 365 * locally here and handle the special cases when the user asks 366 * us to authenticate the image. 367 */ 368 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 369 mutex_unlock(&sw->tb->lock); 370 371 return ret; 372 } 373 374 static int tb_switch_nvm_add(struct tb_switch *sw) 375 { 376 struct tb_nvm *nvm; 377 u32 val; 378 int ret; 379 380 if (!nvm_readable(sw)) 381 return 0; 382 383 /* 384 * The NVM format of non-Intel hardware is not known so 385 * currently restrict NVM upgrade for Intel hardware. We may 386 * relax this in the future when we learn other NVM formats. 387 */ 388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL && 389 sw->config.vendor_id != 0x8087) { 390 dev_info(&sw->dev, 391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n", 392 sw->config.vendor_id); 393 return 0; 394 } 395 396 nvm = tb_nvm_alloc(&sw->dev); 397 if (IS_ERR(nvm)) 398 return PTR_ERR(nvm); 399 400 /* 401 * If the switch is in safe-mode the only accessible portion of 402 * the NVM is the non-active one where userspace is expected to 403 * write new functional NVM. 404 */ 405 if (!sw->safe_mode) { 406 u32 nvm_size, hdr_size; 407 408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); 409 if (ret) 410 goto err_nvm; 411 412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 413 nvm_size = (SZ_1M << (val & 7)) / 8; 414 nvm_size = (nvm_size - hdr_size) / 2; 415 416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); 417 if (ret) 418 goto err_nvm; 419 420 nvm->major = val >> 16; 421 nvm->minor = val >> 8; 422 423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read); 424 if (ret) 425 goto err_nvm; 426 } 427 428 if (!sw->no_nvm_upgrade) { 429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, 430 tb_switch_nvm_write); 431 if (ret) 432 goto err_nvm; 433 } 434 435 sw->nvm = nvm; 436 return 0; 437 438 err_nvm: 439 tb_nvm_free(nvm); 440 return ret; 441 } 442 443 static void tb_switch_nvm_remove(struct tb_switch *sw) 444 { 445 struct tb_nvm *nvm; 446 447 nvm = sw->nvm; 448 sw->nvm = NULL; 449 450 if (!nvm) 451 return; 452 453 /* Remove authentication status in case the switch is unplugged */ 454 if (!nvm->authenticating) 455 nvm_clear_auth_status(sw); 456 457 tb_nvm_free(nvm); 458 } 459 460 /* port utility functions */ 461 462 static const char *tb_port_type(struct tb_regs_port_header *port) 463 { 464 switch (port->type >> 16) { 465 case 0: 466 switch ((u8) port->type) { 467 case 0: 468 return "Inactive"; 469 case 1: 470 return "Port"; 471 case 2: 472 return "NHI"; 473 default: 474 return "unknown"; 475 } 476 case 0x2: 477 return "Ethernet"; 478 case 0x8: 479 return "SATA"; 480 case 0xe: 481 return "DP/HDMI"; 482 case 0x10: 483 return "PCIe"; 484 case 0x20: 485 return "USB"; 486 default: 487 return "unknown"; 488 } 489 } 490 491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 492 { 493 tb_dbg(tb, 494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 495 port->port_number, port->vendor_id, port->device_id, 496 port->revision, port->thunderbolt_version, tb_port_type(port), 497 port->type); 498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 499 port->max_in_hop_id, port->max_out_hop_id); 500 tb_dbg(tb, " Max counters: %d\n", port->max_counters); 501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); 502 } 503 504 /** 505 * tb_port_state() - get connectedness state of a port 506 * @port: the port to check 507 * 508 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 509 * 510 * Return: Returns an enum tb_port_state on success or an error code on failure. 511 */ 512 int tb_port_state(struct tb_port *port) 513 { 514 struct tb_cap_phy phy; 515 int res; 516 if (port->cap_phy == 0) { 517 tb_port_WARN(port, "does not have a PHY\n"); 518 return -EINVAL; 519 } 520 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 521 if (res) 522 return res; 523 return phy.state; 524 } 525 526 /** 527 * tb_wait_for_port() - wait for a port to become ready 528 * @port: Port to wait 529 * @wait_if_unplugged: Wait also when port is unplugged 530 * 531 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 532 * wait_if_unplugged is set then we also wait if the port is in state 533 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 534 * switch resume). Otherwise we only wait if a device is registered but the link 535 * has not yet been established. 536 * 537 * Return: Returns an error code on failure. Returns 0 if the port is not 538 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 539 * if the port is connected and in state TB_PORT_UP. 540 */ 541 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 542 { 543 int retries = 10; 544 int state; 545 if (!port->cap_phy) { 546 tb_port_WARN(port, "does not have PHY\n"); 547 return -EINVAL; 548 } 549 if (tb_is_upstream_port(port)) { 550 tb_port_WARN(port, "is the upstream port\n"); 551 return -EINVAL; 552 } 553 554 while (retries--) { 555 state = tb_port_state(port); 556 if (state < 0) 557 return state; 558 if (state == TB_PORT_DISABLED) { 559 tb_port_dbg(port, "is disabled (state: 0)\n"); 560 return 0; 561 } 562 if (state == TB_PORT_UNPLUGGED) { 563 if (wait_if_unplugged) { 564 /* used during resume */ 565 tb_port_dbg(port, 566 "is unplugged (state: 7), retrying...\n"); 567 msleep(100); 568 continue; 569 } 570 tb_port_dbg(port, "is unplugged (state: 7)\n"); 571 return 0; 572 } 573 if (state == TB_PORT_UP) { 574 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 575 return 1; 576 } 577 578 /* 579 * After plug-in the state is TB_PORT_CONNECTING. Give it some 580 * time. 581 */ 582 tb_port_dbg(port, 583 "is connected, link is not up (state: %d), retrying...\n", 584 state); 585 msleep(100); 586 } 587 tb_port_warn(port, 588 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 589 return 0; 590 } 591 592 /** 593 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 594 * @port: Port to add/remove NFC credits 595 * @credits: Credits to add/remove 596 * 597 * Change the number of NFC credits allocated to @port by @credits. To remove 598 * NFC credits pass a negative amount of credits. 599 * 600 * Return: Returns 0 on success or an error code on failure. 601 */ 602 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 603 { 604 u32 nfc_credits; 605 606 if (credits == 0 || port->sw->is_unplugged) 607 return 0; 608 609 /* 610 * USB4 restricts programming NFC buffers to lane adapters only 611 * so skip other ports. 612 */ 613 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 614 return 0; 615 616 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 617 nfc_credits += credits; 618 619 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 620 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 621 622 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 623 port->config.nfc_credits |= nfc_credits; 624 625 return tb_port_write(port, &port->config.nfc_credits, 626 TB_CFG_PORT, ADP_CS_4, 1); 627 } 628 629 /** 630 * tb_port_set_initial_credits() - Set initial port link credits allocated 631 * @port: Port to set the initial credits 632 * @credits: Number of credits to to allocate 633 * 634 * Set initial credits value to be used for ingress shared buffering. 635 */ 636 int tb_port_set_initial_credits(struct tb_port *port, u32 credits) 637 { 638 u32 data; 639 int ret; 640 641 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 642 if (ret) 643 return ret; 644 645 data &= ~ADP_CS_5_LCA_MASK; 646 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; 647 648 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 649 } 650 651 /** 652 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 653 * @port: Port whose counters to clear 654 * @counter: Counter index to clear 655 * 656 * Return: Returns 0 on success or an error code on failure. 657 */ 658 int tb_port_clear_counter(struct tb_port *port, int counter) 659 { 660 u32 zero[3] = { 0, 0, 0 }; 661 tb_port_dbg(port, "clearing counter %d\n", counter); 662 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 663 } 664 665 /** 666 * tb_port_unlock() - Unlock downstream port 667 * @port: Port to unlock 668 * 669 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 670 * downstream router accessible for CM. 671 */ 672 int tb_port_unlock(struct tb_port *port) 673 { 674 if (tb_switch_is_icm(port->sw)) 675 return 0; 676 if (!tb_port_is_null(port)) 677 return -EINVAL; 678 if (tb_switch_is_usb4(port->sw)) 679 return usb4_port_unlock(port); 680 return 0; 681 } 682 683 static int __tb_port_enable(struct tb_port *port, bool enable) 684 { 685 int ret; 686 u32 phy; 687 688 if (!tb_port_is_null(port)) 689 return -EINVAL; 690 691 ret = tb_port_read(port, &phy, TB_CFG_PORT, 692 port->cap_phy + LANE_ADP_CS_1, 1); 693 if (ret) 694 return ret; 695 696 if (enable) 697 phy &= ~LANE_ADP_CS_1_LD; 698 else 699 phy |= LANE_ADP_CS_1_LD; 700 701 return tb_port_write(port, &phy, TB_CFG_PORT, 702 port->cap_phy + LANE_ADP_CS_1, 1); 703 } 704 705 /** 706 * tb_port_enable() - Enable lane adapter 707 * @port: Port to enable (can be %NULL) 708 * 709 * This is used for lane 0 and 1 adapters to enable it. 710 */ 711 int tb_port_enable(struct tb_port *port) 712 { 713 return __tb_port_enable(port, true); 714 } 715 716 /** 717 * tb_port_disable() - Disable lane adapter 718 * @port: Port to disable (can be %NULL) 719 * 720 * This is used for lane 0 and 1 adapters to disable it. 721 */ 722 int tb_port_disable(struct tb_port *port) 723 { 724 return __tb_port_enable(port, false); 725 } 726 727 /* 728 * tb_init_port() - initialize a port 729 * 730 * This is a helper method for tb_switch_alloc. Does not check or initialize 731 * any downstream switches. 732 * 733 * Return: Returns 0 on success or an error code on failure. 734 */ 735 static int tb_init_port(struct tb_port *port) 736 { 737 int res; 738 int cap; 739 740 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 741 if (res) { 742 if (res == -ENODEV) { 743 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 744 port->port); 745 port->disabled = true; 746 return 0; 747 } 748 return res; 749 } 750 751 /* Port 0 is the switch itself and has no PHY. */ 752 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 753 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 754 755 if (cap > 0) 756 port->cap_phy = cap; 757 else 758 tb_port_WARN(port, "non switch port without a PHY\n"); 759 760 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 761 if (cap > 0) 762 port->cap_usb4 = cap; 763 } else if (port->port != 0) { 764 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 765 if (cap > 0) 766 port->cap_adap = cap; 767 } 768 769 tb_dump_port(port->sw->tb, &port->config); 770 771 INIT_LIST_HEAD(&port->list); 772 return 0; 773 774 } 775 776 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 777 int max_hopid) 778 { 779 int port_max_hopid; 780 struct ida *ida; 781 782 if (in) { 783 port_max_hopid = port->config.max_in_hop_id; 784 ida = &port->in_hopids; 785 } else { 786 port_max_hopid = port->config.max_out_hop_id; 787 ida = &port->out_hopids; 788 } 789 790 /* 791 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 792 * reserved. 793 */ 794 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 795 min_hopid = TB_PATH_MIN_HOPID; 796 797 if (max_hopid < 0 || max_hopid > port_max_hopid) 798 max_hopid = port_max_hopid; 799 800 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 801 } 802 803 /** 804 * tb_port_alloc_in_hopid() - Allocate input HopID from port 805 * @port: Port to allocate HopID for 806 * @min_hopid: Minimum acceptable input HopID 807 * @max_hopid: Maximum acceptable input HopID 808 * 809 * Return: HopID between @min_hopid and @max_hopid or negative errno in 810 * case of error. 811 */ 812 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 813 { 814 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 815 } 816 817 /** 818 * tb_port_alloc_out_hopid() - Allocate output HopID from port 819 * @port: Port to allocate HopID for 820 * @min_hopid: Minimum acceptable output HopID 821 * @max_hopid: Maximum acceptable output HopID 822 * 823 * Return: HopID between @min_hopid and @max_hopid or negative errno in 824 * case of error. 825 */ 826 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 827 { 828 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 829 } 830 831 /** 832 * tb_port_release_in_hopid() - Release allocated input HopID from port 833 * @port: Port whose HopID to release 834 * @hopid: HopID to release 835 */ 836 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 837 { 838 ida_simple_remove(&port->in_hopids, hopid); 839 } 840 841 /** 842 * tb_port_release_out_hopid() - Release allocated output HopID from port 843 * @port: Port whose HopID to release 844 * @hopid: HopID to release 845 */ 846 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 847 { 848 ida_simple_remove(&port->out_hopids, hopid); 849 } 850 851 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 852 const struct tb_switch *sw) 853 { 854 u64 mask = (1ULL << parent->config.depth * 8) - 1; 855 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 856 } 857 858 /** 859 * tb_next_port_on_path() - Return next port for given port on a path 860 * @start: Start port of the walk 861 * @end: End port of the walk 862 * @prev: Previous port (%NULL if this is the first) 863 * 864 * This function can be used to walk from one port to another if they 865 * are connected through zero or more switches. If the @prev is dual 866 * link port, the function follows that link and returns another end on 867 * that same link. 868 * 869 * If the @end port has been reached, return %NULL. 870 * 871 * Domain tb->lock must be held when this function is called. 872 */ 873 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 874 struct tb_port *prev) 875 { 876 struct tb_port *next; 877 878 if (!prev) 879 return start; 880 881 if (prev->sw == end->sw) { 882 if (prev == end) 883 return NULL; 884 return end; 885 } 886 887 if (tb_switch_is_reachable(prev->sw, end->sw)) { 888 next = tb_port_at(tb_route(end->sw), prev->sw); 889 /* Walk down the topology if next == prev */ 890 if (prev->remote && 891 (next == prev || next->dual_link_port == prev)) 892 next = prev->remote; 893 } else { 894 if (tb_is_upstream_port(prev)) { 895 next = prev->remote; 896 } else { 897 next = tb_upstream_port(prev->sw); 898 /* 899 * Keep the same link if prev and next are both 900 * dual link ports. 901 */ 902 if (next->dual_link_port && 903 next->link_nr != prev->link_nr) { 904 next = next->dual_link_port; 905 } 906 } 907 } 908 909 return next != prev ? next : NULL; 910 } 911 912 /** 913 * tb_port_get_link_speed() - Get current link speed 914 * @port: Port to check (USB4 or CIO) 915 * 916 * Returns link speed in Gb/s or negative errno in case of failure. 917 */ 918 int tb_port_get_link_speed(struct tb_port *port) 919 { 920 u32 val, speed; 921 int ret; 922 923 if (!port->cap_phy) 924 return -EINVAL; 925 926 ret = tb_port_read(port, &val, TB_CFG_PORT, 927 port->cap_phy + LANE_ADP_CS_1, 1); 928 if (ret) 929 return ret; 930 931 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 932 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 933 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 934 } 935 936 /** 937 * tb_port_get_link_width() - Get current link width 938 * @port: Port to check (USB4 or CIO) 939 * 940 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) 941 * or negative errno in case of failure. 942 */ 943 int tb_port_get_link_width(struct tb_port *port) 944 { 945 u32 val; 946 int ret; 947 948 if (!port->cap_phy) 949 return -EINVAL; 950 951 ret = tb_port_read(port, &val, TB_CFG_PORT, 952 port->cap_phy + LANE_ADP_CS_1, 1); 953 if (ret) 954 return ret; 955 956 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 957 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 958 } 959 960 static bool tb_port_is_width_supported(struct tb_port *port, int width) 961 { 962 u32 phy, widths; 963 int ret; 964 965 if (!port->cap_phy) 966 return false; 967 968 ret = tb_port_read(port, &phy, TB_CFG_PORT, 969 port->cap_phy + LANE_ADP_CS_0, 1); 970 if (ret) 971 return false; 972 973 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 974 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 975 976 return !!(widths & width); 977 } 978 979 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 980 { 981 u32 val; 982 int ret; 983 984 if (!port->cap_phy) 985 return -EINVAL; 986 987 ret = tb_port_read(port, &val, TB_CFG_PORT, 988 port->cap_phy + LANE_ADP_CS_1, 1); 989 if (ret) 990 return ret; 991 992 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 993 switch (width) { 994 case 1: 995 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 996 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 997 break; 998 case 2: 999 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 1000 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1001 break; 1002 default: 1003 return -EINVAL; 1004 } 1005 1006 val |= LANE_ADP_CS_1_LB; 1007 1008 return tb_port_write(port, &val, TB_CFG_PORT, 1009 port->cap_phy + LANE_ADP_CS_1, 1); 1010 } 1011 1012 /** 1013 * tb_port_lane_bonding_enable() - Enable bonding on port 1014 * @port: port to enable 1015 * 1016 * Enable bonding by setting the link width of the port and the 1017 * other port in case of dual link port. 1018 * 1019 * Return: %0 in case of success and negative errno in case of error 1020 */ 1021 int tb_port_lane_bonding_enable(struct tb_port *port) 1022 { 1023 int ret; 1024 1025 /* 1026 * Enable lane bonding for both links if not already enabled by 1027 * for example the boot firmware. 1028 */ 1029 ret = tb_port_get_link_width(port); 1030 if (ret == 1) { 1031 ret = tb_port_set_link_width(port, 2); 1032 if (ret) 1033 return ret; 1034 } 1035 1036 ret = tb_port_get_link_width(port->dual_link_port); 1037 if (ret == 1) { 1038 ret = tb_port_set_link_width(port->dual_link_port, 2); 1039 if (ret) { 1040 tb_port_set_link_width(port, 1); 1041 return ret; 1042 } 1043 } 1044 1045 port->bonded = true; 1046 port->dual_link_port->bonded = true; 1047 1048 return 0; 1049 } 1050 1051 /** 1052 * tb_port_lane_bonding_disable() - Disable bonding on port 1053 * @port: port to disable 1054 * 1055 * Disable bonding by setting the link width of the port and the 1056 * other port in case of dual link port. 1057 * 1058 */ 1059 void tb_port_lane_bonding_disable(struct tb_port *port) 1060 { 1061 port->dual_link_port->bonded = false; 1062 port->bonded = false; 1063 1064 tb_port_set_link_width(port->dual_link_port, 1); 1065 tb_port_set_link_width(port, 1); 1066 } 1067 1068 static int tb_port_start_lane_initialization(struct tb_port *port) 1069 { 1070 int ret; 1071 1072 if (tb_switch_is_usb4(port->sw)) 1073 return 0; 1074 1075 ret = tb_lc_start_lane_initialization(port); 1076 return ret == -EINVAL ? 0 : ret; 1077 } 1078 1079 /** 1080 * tb_port_is_enabled() - Is the adapter port enabled 1081 * @port: Port to check 1082 */ 1083 bool tb_port_is_enabled(struct tb_port *port) 1084 { 1085 switch (port->config.type) { 1086 case TB_TYPE_PCIE_UP: 1087 case TB_TYPE_PCIE_DOWN: 1088 return tb_pci_port_is_enabled(port); 1089 1090 case TB_TYPE_DP_HDMI_IN: 1091 case TB_TYPE_DP_HDMI_OUT: 1092 return tb_dp_port_is_enabled(port); 1093 1094 case TB_TYPE_USB3_UP: 1095 case TB_TYPE_USB3_DOWN: 1096 return tb_usb3_port_is_enabled(port); 1097 1098 default: 1099 return false; 1100 } 1101 } 1102 1103 /** 1104 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1105 * @port: USB3 adapter port to check 1106 */ 1107 bool tb_usb3_port_is_enabled(struct tb_port *port) 1108 { 1109 u32 data; 1110 1111 if (tb_port_read(port, &data, TB_CFG_PORT, 1112 port->cap_adap + ADP_USB3_CS_0, 1)) 1113 return false; 1114 1115 return !!(data & ADP_USB3_CS_0_PE); 1116 } 1117 1118 /** 1119 * tb_usb3_port_enable() - Enable USB3 adapter port 1120 * @port: USB3 adapter port to enable 1121 * @enable: Enable/disable the USB3 adapter 1122 */ 1123 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1124 { 1125 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1126 : ADP_USB3_CS_0_V; 1127 1128 if (!port->cap_adap) 1129 return -ENXIO; 1130 return tb_port_write(port, &word, TB_CFG_PORT, 1131 port->cap_adap + ADP_USB3_CS_0, 1); 1132 } 1133 1134 /** 1135 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1136 * @port: PCIe port to check 1137 */ 1138 bool tb_pci_port_is_enabled(struct tb_port *port) 1139 { 1140 u32 data; 1141 1142 if (tb_port_read(port, &data, TB_CFG_PORT, 1143 port->cap_adap + ADP_PCIE_CS_0, 1)) 1144 return false; 1145 1146 return !!(data & ADP_PCIE_CS_0_PE); 1147 } 1148 1149 /** 1150 * tb_pci_port_enable() - Enable PCIe adapter port 1151 * @port: PCIe port to enable 1152 * @enable: Enable/disable the PCIe adapter 1153 */ 1154 int tb_pci_port_enable(struct tb_port *port, bool enable) 1155 { 1156 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1157 if (!port->cap_adap) 1158 return -ENXIO; 1159 return tb_port_write(port, &word, TB_CFG_PORT, 1160 port->cap_adap + ADP_PCIE_CS_0, 1); 1161 } 1162 1163 /** 1164 * tb_dp_port_hpd_is_active() - Is HPD already active 1165 * @port: DP out port to check 1166 * 1167 * Checks if the DP OUT adapter port has HDP bit already set. 1168 */ 1169 int tb_dp_port_hpd_is_active(struct tb_port *port) 1170 { 1171 u32 data; 1172 int ret; 1173 1174 ret = tb_port_read(port, &data, TB_CFG_PORT, 1175 port->cap_adap + ADP_DP_CS_2, 1); 1176 if (ret) 1177 return ret; 1178 1179 return !!(data & ADP_DP_CS_2_HDP); 1180 } 1181 1182 /** 1183 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1184 * @port: Port to clear HPD 1185 * 1186 * If the DP IN port has HDP set, this function can be used to clear it. 1187 */ 1188 int tb_dp_port_hpd_clear(struct tb_port *port) 1189 { 1190 u32 data; 1191 int ret; 1192 1193 ret = tb_port_read(port, &data, TB_CFG_PORT, 1194 port->cap_adap + ADP_DP_CS_3, 1); 1195 if (ret) 1196 return ret; 1197 1198 data |= ADP_DP_CS_3_HDPC; 1199 return tb_port_write(port, &data, TB_CFG_PORT, 1200 port->cap_adap + ADP_DP_CS_3, 1); 1201 } 1202 1203 /** 1204 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1205 * @port: DP IN/OUT port to set hops 1206 * @video: Video Hop ID 1207 * @aux_tx: AUX TX Hop ID 1208 * @aux_rx: AUX RX Hop ID 1209 * 1210 * Programs specified Hop IDs for DP IN/OUT port. 1211 */ 1212 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1213 unsigned int aux_tx, unsigned int aux_rx) 1214 { 1215 u32 data[2]; 1216 int ret; 1217 1218 ret = tb_port_read(port, data, TB_CFG_PORT, 1219 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1220 if (ret) 1221 return ret; 1222 1223 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1224 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1225 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1226 1227 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1228 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1229 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1230 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1231 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1232 1233 return tb_port_write(port, data, TB_CFG_PORT, 1234 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1235 } 1236 1237 /** 1238 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1239 * @port: DP adapter port to check 1240 */ 1241 bool tb_dp_port_is_enabled(struct tb_port *port) 1242 { 1243 u32 data[2]; 1244 1245 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1246 ARRAY_SIZE(data))) 1247 return false; 1248 1249 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1250 } 1251 1252 /** 1253 * tb_dp_port_enable() - Enables/disables DP paths of a port 1254 * @port: DP IN/OUT port 1255 * @enable: Enable/disable DP path 1256 * 1257 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1258 * calling this function. 1259 */ 1260 int tb_dp_port_enable(struct tb_port *port, bool enable) 1261 { 1262 u32 data[2]; 1263 int ret; 1264 1265 ret = tb_port_read(port, data, TB_CFG_PORT, 1266 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1267 if (ret) 1268 return ret; 1269 1270 if (enable) 1271 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1272 else 1273 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1274 1275 return tb_port_write(port, data, TB_CFG_PORT, 1276 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1277 } 1278 1279 /* switch utility functions */ 1280 1281 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1282 { 1283 switch (sw->generation) { 1284 case 1: 1285 return "Thunderbolt 1"; 1286 case 2: 1287 return "Thunderbolt 2"; 1288 case 3: 1289 return "Thunderbolt 3"; 1290 case 4: 1291 return "USB4"; 1292 default: 1293 return "Unknown"; 1294 } 1295 } 1296 1297 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1298 { 1299 const struct tb_regs_switch_header *regs = &sw->config; 1300 1301 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1302 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1303 regs->revision, regs->thunderbolt_version); 1304 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1305 tb_dbg(tb, " Config:\n"); 1306 tb_dbg(tb, 1307 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1308 regs->upstream_port_number, regs->depth, 1309 (((u64) regs->route_hi) << 32) | regs->route_lo, 1310 regs->enabled, regs->plug_events_delay); 1311 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1312 regs->__unknown1, regs->__unknown4); 1313 } 1314 1315 /** 1316 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET 1317 * @sw: Switch to reset 1318 * 1319 * Return: Returns 0 on success or an error code on failure. 1320 */ 1321 int tb_switch_reset(struct tb_switch *sw) 1322 { 1323 struct tb_cfg_result res; 1324 1325 if (sw->generation > 1) 1326 return 0; 1327 1328 tb_sw_dbg(sw, "resetting switch\n"); 1329 1330 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1331 TB_CFG_SWITCH, 2, 2); 1332 if (res.err) 1333 return res.err; 1334 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT); 1335 if (res.err > 0) 1336 return -EIO; 1337 return res.err; 1338 } 1339 1340 /* 1341 * tb_plug_events_active() - enable/disable plug events on a switch 1342 * 1343 * Also configures a sane plug_events_delay of 255ms. 1344 * 1345 * Return: Returns 0 on success or an error code on failure. 1346 */ 1347 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1348 { 1349 u32 data; 1350 int res; 1351 1352 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1353 return 0; 1354 1355 sw->config.plug_events_delay = 0xff; 1356 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1357 if (res) 1358 return res; 1359 1360 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1361 if (res) 1362 return res; 1363 1364 if (active) { 1365 data = data & 0xFFFFFF83; 1366 switch (sw->config.device_id) { 1367 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1368 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1369 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1370 break; 1371 default: 1372 data |= 4; 1373 } 1374 } else { 1375 data = data | 0x7c; 1376 } 1377 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1378 sw->cap_plug_events + 1, 1); 1379 } 1380 1381 static ssize_t authorized_show(struct device *dev, 1382 struct device_attribute *attr, 1383 char *buf) 1384 { 1385 struct tb_switch *sw = tb_to_switch(dev); 1386 1387 return sprintf(buf, "%u\n", sw->authorized); 1388 } 1389 1390 static int disapprove_switch(struct device *dev, void *not_used) 1391 { 1392 struct tb_switch *sw; 1393 1394 sw = tb_to_switch(dev); 1395 if (sw && sw->authorized) { 1396 int ret; 1397 1398 /* First children */ 1399 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1400 if (ret) 1401 return ret; 1402 1403 ret = tb_domain_disapprove_switch(sw->tb, sw); 1404 if (ret) 1405 return ret; 1406 1407 sw->authorized = 0; 1408 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1409 } 1410 1411 return 0; 1412 } 1413 1414 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1415 { 1416 int ret = -EINVAL; 1417 1418 if (!mutex_trylock(&sw->tb->lock)) 1419 return restart_syscall(); 1420 1421 if (!!sw->authorized == !!val) 1422 goto unlock; 1423 1424 switch (val) { 1425 /* Disapprove switch */ 1426 case 0: 1427 if (tb_route(sw)) { 1428 ret = disapprove_switch(&sw->dev, NULL); 1429 goto unlock; 1430 } 1431 break; 1432 1433 /* Approve switch */ 1434 case 1: 1435 if (sw->key) 1436 ret = tb_domain_approve_switch_key(sw->tb, sw); 1437 else 1438 ret = tb_domain_approve_switch(sw->tb, sw); 1439 break; 1440 1441 /* Challenge switch */ 1442 case 2: 1443 if (sw->key) 1444 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1445 break; 1446 1447 default: 1448 break; 1449 } 1450 1451 if (!ret) { 1452 sw->authorized = val; 1453 /* Notify status change to the userspace */ 1454 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1455 } 1456 1457 unlock: 1458 mutex_unlock(&sw->tb->lock); 1459 return ret; 1460 } 1461 1462 static ssize_t authorized_store(struct device *dev, 1463 struct device_attribute *attr, 1464 const char *buf, size_t count) 1465 { 1466 struct tb_switch *sw = tb_to_switch(dev); 1467 unsigned int val; 1468 ssize_t ret; 1469 1470 ret = kstrtouint(buf, 0, &val); 1471 if (ret) 1472 return ret; 1473 if (val > 2) 1474 return -EINVAL; 1475 1476 pm_runtime_get_sync(&sw->dev); 1477 ret = tb_switch_set_authorized(sw, val); 1478 pm_runtime_mark_last_busy(&sw->dev); 1479 pm_runtime_put_autosuspend(&sw->dev); 1480 1481 return ret ? ret : count; 1482 } 1483 static DEVICE_ATTR_RW(authorized); 1484 1485 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1486 char *buf) 1487 { 1488 struct tb_switch *sw = tb_to_switch(dev); 1489 1490 return sprintf(buf, "%u\n", sw->boot); 1491 } 1492 static DEVICE_ATTR_RO(boot); 1493 1494 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1495 char *buf) 1496 { 1497 struct tb_switch *sw = tb_to_switch(dev); 1498 1499 return sprintf(buf, "%#x\n", sw->device); 1500 } 1501 static DEVICE_ATTR_RO(device); 1502 1503 static ssize_t 1504 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1505 { 1506 struct tb_switch *sw = tb_to_switch(dev); 1507 1508 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1509 } 1510 static DEVICE_ATTR_RO(device_name); 1511 1512 static ssize_t 1513 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1514 { 1515 struct tb_switch *sw = tb_to_switch(dev); 1516 1517 return sprintf(buf, "%u\n", sw->generation); 1518 } 1519 static DEVICE_ATTR_RO(generation); 1520 1521 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1522 char *buf) 1523 { 1524 struct tb_switch *sw = tb_to_switch(dev); 1525 ssize_t ret; 1526 1527 if (!mutex_trylock(&sw->tb->lock)) 1528 return restart_syscall(); 1529 1530 if (sw->key) 1531 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1532 else 1533 ret = sprintf(buf, "\n"); 1534 1535 mutex_unlock(&sw->tb->lock); 1536 return ret; 1537 } 1538 1539 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1540 const char *buf, size_t count) 1541 { 1542 struct tb_switch *sw = tb_to_switch(dev); 1543 u8 key[TB_SWITCH_KEY_SIZE]; 1544 ssize_t ret = count; 1545 bool clear = false; 1546 1547 if (!strcmp(buf, "\n")) 1548 clear = true; 1549 else if (hex2bin(key, buf, sizeof(key))) 1550 return -EINVAL; 1551 1552 if (!mutex_trylock(&sw->tb->lock)) 1553 return restart_syscall(); 1554 1555 if (sw->authorized) { 1556 ret = -EBUSY; 1557 } else { 1558 kfree(sw->key); 1559 if (clear) { 1560 sw->key = NULL; 1561 } else { 1562 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1563 if (!sw->key) 1564 ret = -ENOMEM; 1565 } 1566 } 1567 1568 mutex_unlock(&sw->tb->lock); 1569 return ret; 1570 } 1571 static DEVICE_ATTR(key, 0600, key_show, key_store); 1572 1573 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1574 char *buf) 1575 { 1576 struct tb_switch *sw = tb_to_switch(dev); 1577 1578 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1579 } 1580 1581 /* 1582 * Currently all lanes must run at the same speed but we expose here 1583 * both directions to allow possible asymmetric links in the future. 1584 */ 1585 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1586 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1587 1588 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1589 char *buf) 1590 { 1591 struct tb_switch *sw = tb_to_switch(dev); 1592 1593 return sprintf(buf, "%u\n", sw->link_width); 1594 } 1595 1596 /* 1597 * Currently link has same amount of lanes both directions (1 or 2) but 1598 * expose them separately to allow possible asymmetric links in the future. 1599 */ 1600 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1601 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1602 1603 static ssize_t nvm_authenticate_show(struct device *dev, 1604 struct device_attribute *attr, char *buf) 1605 { 1606 struct tb_switch *sw = tb_to_switch(dev); 1607 u32 status; 1608 1609 nvm_get_auth_status(sw, &status); 1610 return sprintf(buf, "%#x\n", status); 1611 } 1612 1613 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 1614 bool disconnect) 1615 { 1616 struct tb_switch *sw = tb_to_switch(dev); 1617 int val; 1618 int ret; 1619 1620 pm_runtime_get_sync(&sw->dev); 1621 1622 if (!mutex_trylock(&sw->tb->lock)) { 1623 ret = restart_syscall(); 1624 goto exit_rpm; 1625 } 1626 1627 /* If NVMem devices are not yet added */ 1628 if (!sw->nvm) { 1629 ret = -EAGAIN; 1630 goto exit_unlock; 1631 } 1632 1633 ret = kstrtoint(buf, 10, &val); 1634 if (ret) 1635 goto exit_unlock; 1636 1637 /* Always clear the authentication status */ 1638 nvm_clear_auth_status(sw); 1639 1640 if (val > 0) { 1641 if (!sw->nvm->flushed) { 1642 if (!sw->nvm->buf) { 1643 ret = -EINVAL; 1644 goto exit_unlock; 1645 } 1646 1647 ret = nvm_validate_and_write(sw); 1648 if (ret || val == WRITE_ONLY) 1649 goto exit_unlock; 1650 } 1651 if (val == WRITE_AND_AUTHENTICATE) { 1652 if (disconnect) { 1653 ret = tb_lc_force_power(sw); 1654 } else { 1655 sw->nvm->authenticating = true; 1656 ret = nvm_authenticate(sw); 1657 } 1658 } 1659 } 1660 1661 exit_unlock: 1662 mutex_unlock(&sw->tb->lock); 1663 exit_rpm: 1664 pm_runtime_mark_last_busy(&sw->dev); 1665 pm_runtime_put_autosuspend(&sw->dev); 1666 1667 return ret; 1668 } 1669 1670 static ssize_t nvm_authenticate_store(struct device *dev, 1671 struct device_attribute *attr, const char *buf, size_t count) 1672 { 1673 int ret = nvm_authenticate_sysfs(dev, buf, false); 1674 if (ret) 1675 return ret; 1676 return count; 1677 } 1678 static DEVICE_ATTR_RW(nvm_authenticate); 1679 1680 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 1681 struct device_attribute *attr, char *buf) 1682 { 1683 return nvm_authenticate_show(dev, attr, buf); 1684 } 1685 1686 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 1687 struct device_attribute *attr, const char *buf, size_t count) 1688 { 1689 int ret; 1690 1691 ret = nvm_authenticate_sysfs(dev, buf, true); 1692 return ret ? ret : count; 1693 } 1694 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 1695 1696 static ssize_t nvm_version_show(struct device *dev, 1697 struct device_attribute *attr, char *buf) 1698 { 1699 struct tb_switch *sw = tb_to_switch(dev); 1700 int ret; 1701 1702 if (!mutex_trylock(&sw->tb->lock)) 1703 return restart_syscall(); 1704 1705 if (sw->safe_mode) 1706 ret = -ENODATA; 1707 else if (!sw->nvm) 1708 ret = -EAGAIN; 1709 else 1710 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1711 1712 mutex_unlock(&sw->tb->lock); 1713 1714 return ret; 1715 } 1716 static DEVICE_ATTR_RO(nvm_version); 1717 1718 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1719 char *buf) 1720 { 1721 struct tb_switch *sw = tb_to_switch(dev); 1722 1723 return sprintf(buf, "%#x\n", sw->vendor); 1724 } 1725 static DEVICE_ATTR_RO(vendor); 1726 1727 static ssize_t 1728 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1729 { 1730 struct tb_switch *sw = tb_to_switch(dev); 1731 1732 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1733 } 1734 static DEVICE_ATTR_RO(vendor_name); 1735 1736 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1737 char *buf) 1738 { 1739 struct tb_switch *sw = tb_to_switch(dev); 1740 1741 return sprintf(buf, "%pUb\n", sw->uuid); 1742 } 1743 static DEVICE_ATTR_RO(unique_id); 1744 1745 static struct attribute *switch_attrs[] = { 1746 &dev_attr_authorized.attr, 1747 &dev_attr_boot.attr, 1748 &dev_attr_device.attr, 1749 &dev_attr_device_name.attr, 1750 &dev_attr_generation.attr, 1751 &dev_attr_key.attr, 1752 &dev_attr_nvm_authenticate.attr, 1753 &dev_attr_nvm_authenticate_on_disconnect.attr, 1754 &dev_attr_nvm_version.attr, 1755 &dev_attr_rx_speed.attr, 1756 &dev_attr_rx_lanes.attr, 1757 &dev_attr_tx_speed.attr, 1758 &dev_attr_tx_lanes.attr, 1759 &dev_attr_vendor.attr, 1760 &dev_attr_vendor_name.attr, 1761 &dev_attr_unique_id.attr, 1762 NULL, 1763 }; 1764 1765 static umode_t switch_attr_is_visible(struct kobject *kobj, 1766 struct attribute *attr, int n) 1767 { 1768 struct device *dev = kobj_to_dev(kobj); 1769 struct tb_switch *sw = tb_to_switch(dev); 1770 1771 if (attr == &dev_attr_authorized.attr) { 1772 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 1773 sw->tb->security_level == TB_SECURITY_DPONLY) 1774 return 0; 1775 } else if (attr == &dev_attr_device.attr) { 1776 if (!sw->device) 1777 return 0; 1778 } else if (attr == &dev_attr_device_name.attr) { 1779 if (!sw->device_name) 1780 return 0; 1781 } else if (attr == &dev_attr_vendor.attr) { 1782 if (!sw->vendor) 1783 return 0; 1784 } else if (attr == &dev_attr_vendor_name.attr) { 1785 if (!sw->vendor_name) 1786 return 0; 1787 } else if (attr == &dev_attr_key.attr) { 1788 if (tb_route(sw) && 1789 sw->tb->security_level == TB_SECURITY_SECURE && 1790 sw->security_level == TB_SECURITY_SECURE) 1791 return attr->mode; 1792 return 0; 1793 } else if (attr == &dev_attr_rx_speed.attr || 1794 attr == &dev_attr_rx_lanes.attr || 1795 attr == &dev_attr_tx_speed.attr || 1796 attr == &dev_attr_tx_lanes.attr) { 1797 if (tb_route(sw)) 1798 return attr->mode; 1799 return 0; 1800 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1801 if (nvm_upgradeable(sw)) 1802 return attr->mode; 1803 return 0; 1804 } else if (attr == &dev_attr_nvm_version.attr) { 1805 if (nvm_readable(sw)) 1806 return attr->mode; 1807 return 0; 1808 } else if (attr == &dev_attr_boot.attr) { 1809 if (tb_route(sw)) 1810 return attr->mode; 1811 return 0; 1812 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 1813 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 1814 return attr->mode; 1815 return 0; 1816 } 1817 1818 return sw->safe_mode ? 0 : attr->mode; 1819 } 1820 1821 static const struct attribute_group switch_group = { 1822 .is_visible = switch_attr_is_visible, 1823 .attrs = switch_attrs, 1824 }; 1825 1826 static const struct attribute_group *switch_groups[] = { 1827 &switch_group, 1828 NULL, 1829 }; 1830 1831 static void tb_switch_release(struct device *dev) 1832 { 1833 struct tb_switch *sw = tb_to_switch(dev); 1834 struct tb_port *port; 1835 1836 dma_port_free(sw->dma_port); 1837 1838 tb_switch_for_each_port(sw, port) { 1839 ida_destroy(&port->in_hopids); 1840 ida_destroy(&port->out_hopids); 1841 } 1842 1843 kfree(sw->uuid); 1844 kfree(sw->device_name); 1845 kfree(sw->vendor_name); 1846 kfree(sw->ports); 1847 kfree(sw->drom); 1848 kfree(sw->key); 1849 kfree(sw); 1850 } 1851 1852 /* 1853 * Currently only need to provide the callbacks. Everything else is handled 1854 * in the connection manager. 1855 */ 1856 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1857 { 1858 struct tb_switch *sw = tb_to_switch(dev); 1859 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1860 1861 if (cm_ops->runtime_suspend_switch) 1862 return cm_ops->runtime_suspend_switch(sw); 1863 1864 return 0; 1865 } 1866 1867 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1868 { 1869 struct tb_switch *sw = tb_to_switch(dev); 1870 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1871 1872 if (cm_ops->runtime_resume_switch) 1873 return cm_ops->runtime_resume_switch(sw); 1874 return 0; 1875 } 1876 1877 static const struct dev_pm_ops tb_switch_pm_ops = { 1878 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1879 NULL) 1880 }; 1881 1882 struct device_type tb_switch_type = { 1883 .name = "thunderbolt_device", 1884 .release = tb_switch_release, 1885 .pm = &tb_switch_pm_ops, 1886 }; 1887 1888 static int tb_switch_get_generation(struct tb_switch *sw) 1889 { 1890 switch (sw->config.device_id) { 1891 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1892 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1893 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1894 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1895 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1896 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1897 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1898 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1899 return 1; 1900 1901 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1902 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1903 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1904 return 2; 1905 1906 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1907 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1908 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1909 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1910 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1911 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1912 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1913 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1914 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1915 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1916 return 3; 1917 1918 default: 1919 if (tb_switch_is_usb4(sw)) 1920 return 4; 1921 1922 /* 1923 * For unknown switches assume generation to be 1 to be 1924 * on the safe side. 1925 */ 1926 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1927 sw->config.device_id); 1928 return 1; 1929 } 1930 } 1931 1932 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 1933 { 1934 int max_depth; 1935 1936 if (tb_switch_is_usb4(sw) || 1937 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 1938 max_depth = USB4_SWITCH_MAX_DEPTH; 1939 else 1940 max_depth = TB_SWITCH_MAX_DEPTH; 1941 1942 return depth > max_depth; 1943 } 1944 1945 /** 1946 * tb_switch_alloc() - allocate a switch 1947 * @tb: Pointer to the owning domain 1948 * @parent: Parent device for this switch 1949 * @route: Route string for this switch 1950 * 1951 * Allocates and initializes a switch. Will not upload configuration to 1952 * the switch. For that you need to call tb_switch_configure() 1953 * separately. The returned switch should be released by calling 1954 * tb_switch_put(). 1955 * 1956 * Return: Pointer to the allocated switch or ERR_PTR() in case of 1957 * failure. 1958 */ 1959 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1960 u64 route) 1961 { 1962 struct tb_switch *sw; 1963 int upstream_port; 1964 int i, ret, depth; 1965 1966 /* Unlock the downstream port so we can access the switch below */ 1967 if (route) { 1968 struct tb_switch *parent_sw = tb_to_switch(parent); 1969 struct tb_port *down; 1970 1971 down = tb_port_at(route, parent_sw); 1972 tb_port_unlock(down); 1973 } 1974 1975 depth = tb_route_length(route); 1976 1977 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1978 if (upstream_port < 0) 1979 return ERR_PTR(upstream_port); 1980 1981 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1982 if (!sw) 1983 return ERR_PTR(-ENOMEM); 1984 1985 sw->tb = tb; 1986 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 1987 if (ret) 1988 goto err_free_sw_ports; 1989 1990 sw->generation = tb_switch_get_generation(sw); 1991 1992 tb_dbg(tb, "current switch config:\n"); 1993 tb_dump_switch(tb, sw); 1994 1995 /* configure switch */ 1996 sw->config.upstream_port_number = upstream_port; 1997 sw->config.depth = depth; 1998 sw->config.route_hi = upper_32_bits(route); 1999 sw->config.route_lo = lower_32_bits(route); 2000 sw->config.enabled = 0; 2001 2002 /* Make sure we do not exceed maximum topology limit */ 2003 if (tb_switch_exceeds_max_depth(sw, depth)) { 2004 ret = -EADDRNOTAVAIL; 2005 goto err_free_sw_ports; 2006 } 2007 2008 /* initialize ports */ 2009 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 2010 GFP_KERNEL); 2011 if (!sw->ports) { 2012 ret = -ENOMEM; 2013 goto err_free_sw_ports; 2014 } 2015 2016 for (i = 0; i <= sw->config.max_port_number; i++) { 2017 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2018 sw->ports[i].sw = sw; 2019 sw->ports[i].port = i; 2020 2021 /* Control port does not need HopID allocation */ 2022 if (i) { 2023 ida_init(&sw->ports[i].in_hopids); 2024 ida_init(&sw->ports[i].out_hopids); 2025 } 2026 } 2027 2028 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2029 if (ret > 0) 2030 sw->cap_plug_events = ret; 2031 2032 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2033 if (ret > 0) 2034 sw->cap_lc = ret; 2035 2036 /* Root switch is always authorized */ 2037 if (!route) 2038 sw->authorized = true; 2039 2040 device_initialize(&sw->dev); 2041 sw->dev.parent = parent; 2042 sw->dev.bus = &tb_bus_type; 2043 sw->dev.type = &tb_switch_type; 2044 sw->dev.groups = switch_groups; 2045 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2046 2047 return sw; 2048 2049 err_free_sw_ports: 2050 kfree(sw->ports); 2051 kfree(sw); 2052 2053 return ERR_PTR(ret); 2054 } 2055 2056 /** 2057 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2058 * @tb: Pointer to the owning domain 2059 * @parent: Parent device for this switch 2060 * @route: Route string for this switch 2061 * 2062 * This creates a switch in safe mode. This means the switch pretty much 2063 * lacks all capabilities except DMA configuration port before it is 2064 * flashed with a valid NVM firmware. 2065 * 2066 * The returned switch must be released by calling tb_switch_put(). 2067 * 2068 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 2069 */ 2070 struct tb_switch * 2071 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2072 { 2073 struct tb_switch *sw; 2074 2075 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2076 if (!sw) 2077 return ERR_PTR(-ENOMEM); 2078 2079 sw->tb = tb; 2080 sw->config.depth = tb_route_length(route); 2081 sw->config.route_hi = upper_32_bits(route); 2082 sw->config.route_lo = lower_32_bits(route); 2083 sw->safe_mode = true; 2084 2085 device_initialize(&sw->dev); 2086 sw->dev.parent = parent; 2087 sw->dev.bus = &tb_bus_type; 2088 sw->dev.type = &tb_switch_type; 2089 sw->dev.groups = switch_groups; 2090 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2091 2092 return sw; 2093 } 2094 2095 /** 2096 * tb_switch_configure() - Uploads configuration to the switch 2097 * @sw: Switch to configure 2098 * 2099 * Call this function before the switch is added to the system. It will 2100 * upload configuration to the switch and makes it available for the 2101 * connection manager to use. Can be called to the switch again after 2102 * resume from low power states to re-initialize it. 2103 * 2104 * Return: %0 in case of success and negative errno in case of failure 2105 */ 2106 int tb_switch_configure(struct tb_switch *sw) 2107 { 2108 struct tb *tb = sw->tb; 2109 u64 route; 2110 int ret; 2111 2112 route = tb_route(sw); 2113 2114 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2115 sw->config.enabled ? "restoring" : "initializing", route, 2116 tb_route_length(route), sw->config.upstream_port_number); 2117 2118 sw->config.enabled = 1; 2119 2120 if (tb_switch_is_usb4(sw)) { 2121 /* 2122 * For USB4 devices, we need to program the CM version 2123 * accordingly so that it knows to expose all the 2124 * additional capabilities. 2125 */ 2126 sw->config.cmuv = USB4_VERSION_1_0; 2127 2128 /* Enumerate the switch */ 2129 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2130 ROUTER_CS_1, 4); 2131 if (ret) 2132 return ret; 2133 2134 ret = usb4_switch_setup(sw); 2135 } else { 2136 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2137 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2138 sw->config.vendor_id); 2139 2140 if (!sw->cap_plug_events) { 2141 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2142 return -ENODEV; 2143 } 2144 2145 /* Enumerate the switch */ 2146 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2147 ROUTER_CS_1, 3); 2148 } 2149 if (ret) 2150 return ret; 2151 2152 return tb_plug_events_active(sw, true); 2153 } 2154 2155 static int tb_switch_set_uuid(struct tb_switch *sw) 2156 { 2157 bool uid = false; 2158 u32 uuid[4]; 2159 int ret; 2160 2161 if (sw->uuid) 2162 return 0; 2163 2164 if (tb_switch_is_usb4(sw)) { 2165 ret = usb4_switch_read_uid(sw, &sw->uid); 2166 if (ret) 2167 return ret; 2168 uid = true; 2169 } else { 2170 /* 2171 * The newer controllers include fused UUID as part of 2172 * link controller specific registers 2173 */ 2174 ret = tb_lc_read_uuid(sw, uuid); 2175 if (ret) { 2176 if (ret != -EINVAL) 2177 return ret; 2178 uid = true; 2179 } 2180 } 2181 2182 if (uid) { 2183 /* 2184 * ICM generates UUID based on UID and fills the upper 2185 * two words with ones. This is not strictly following 2186 * UUID format but we want to be compatible with it so 2187 * we do the same here. 2188 */ 2189 uuid[0] = sw->uid & 0xffffffff; 2190 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2191 uuid[2] = 0xffffffff; 2192 uuid[3] = 0xffffffff; 2193 } 2194 2195 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2196 if (!sw->uuid) 2197 return -ENOMEM; 2198 return 0; 2199 } 2200 2201 static int tb_switch_add_dma_port(struct tb_switch *sw) 2202 { 2203 u32 status; 2204 int ret; 2205 2206 switch (sw->generation) { 2207 case 2: 2208 /* Only root switch can be upgraded */ 2209 if (tb_route(sw)) 2210 return 0; 2211 2212 fallthrough; 2213 case 3: 2214 case 4: 2215 ret = tb_switch_set_uuid(sw); 2216 if (ret) 2217 return ret; 2218 break; 2219 2220 default: 2221 /* 2222 * DMA port is the only thing available when the switch 2223 * is in safe mode. 2224 */ 2225 if (!sw->safe_mode) 2226 return 0; 2227 break; 2228 } 2229 2230 if (sw->no_nvm_upgrade) 2231 return 0; 2232 2233 if (tb_switch_is_usb4(sw)) { 2234 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2235 if (ret) 2236 return ret; 2237 2238 if (status) { 2239 tb_sw_info(sw, "switch flash authentication failed\n"); 2240 nvm_set_auth_status(sw, status); 2241 } 2242 2243 return 0; 2244 } 2245 2246 /* Root switch DMA port requires running firmware */ 2247 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2248 return 0; 2249 2250 sw->dma_port = dma_port_alloc(sw); 2251 if (!sw->dma_port) 2252 return 0; 2253 2254 /* 2255 * If there is status already set then authentication failed 2256 * when the dma_port_flash_update_auth() returned. Power cycling 2257 * is not needed (it was done already) so only thing we do here 2258 * is to unblock runtime PM of the root port. 2259 */ 2260 nvm_get_auth_status(sw, &status); 2261 if (status) { 2262 if (!tb_route(sw)) 2263 nvm_authenticate_complete_dma_port(sw); 2264 return 0; 2265 } 2266 2267 /* 2268 * Check status of the previous flash authentication. If there 2269 * is one we need to power cycle the switch in any case to make 2270 * it functional again. 2271 */ 2272 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2273 if (ret <= 0) 2274 return ret; 2275 2276 /* Now we can allow root port to suspend again */ 2277 if (!tb_route(sw)) 2278 nvm_authenticate_complete_dma_port(sw); 2279 2280 if (status) { 2281 tb_sw_info(sw, "switch flash authentication failed\n"); 2282 nvm_set_auth_status(sw, status); 2283 } 2284 2285 tb_sw_info(sw, "power cycling the switch now\n"); 2286 dma_port_power_cycle(sw->dma_port); 2287 2288 /* 2289 * We return error here which causes the switch adding failure. 2290 * It should appear back after power cycle is complete. 2291 */ 2292 return -ESHUTDOWN; 2293 } 2294 2295 static void tb_switch_default_link_ports(struct tb_switch *sw) 2296 { 2297 int i; 2298 2299 for (i = 1; i <= sw->config.max_port_number; i += 2) { 2300 struct tb_port *port = &sw->ports[i]; 2301 struct tb_port *subordinate; 2302 2303 if (!tb_port_is_null(port)) 2304 continue; 2305 2306 /* Check for the subordinate port */ 2307 if (i == sw->config.max_port_number || 2308 !tb_port_is_null(&sw->ports[i + 1])) 2309 continue; 2310 2311 /* Link them if not already done so (by DROM) */ 2312 subordinate = &sw->ports[i + 1]; 2313 if (!port->dual_link_port && !subordinate->dual_link_port) { 2314 port->link_nr = 0; 2315 port->dual_link_port = subordinate; 2316 subordinate->link_nr = 1; 2317 subordinate->dual_link_port = port; 2318 2319 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2320 port->port, subordinate->port); 2321 } 2322 } 2323 } 2324 2325 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2326 { 2327 const struct tb_port *up = tb_upstream_port(sw); 2328 2329 if (!up->dual_link_port || !up->dual_link_port->remote) 2330 return false; 2331 2332 if (tb_switch_is_usb4(sw)) 2333 return usb4_switch_lane_bonding_possible(sw); 2334 return tb_lc_lane_bonding_possible(sw); 2335 } 2336 2337 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2338 { 2339 struct tb_port *up; 2340 bool change = false; 2341 int ret; 2342 2343 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2344 return 0; 2345 2346 up = tb_upstream_port(sw); 2347 2348 ret = tb_port_get_link_speed(up); 2349 if (ret < 0) 2350 return ret; 2351 if (sw->link_speed != ret) 2352 change = true; 2353 sw->link_speed = ret; 2354 2355 ret = tb_port_get_link_width(up); 2356 if (ret < 0) 2357 return ret; 2358 if (sw->link_width != ret) 2359 change = true; 2360 sw->link_width = ret; 2361 2362 /* Notify userspace that there is possible link attribute change */ 2363 if (device_is_registered(&sw->dev) && change) 2364 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2365 2366 return 0; 2367 } 2368 2369 /** 2370 * tb_switch_lane_bonding_enable() - Enable lane bonding 2371 * @sw: Switch to enable lane bonding 2372 * 2373 * Connection manager can call this function to enable lane bonding of a 2374 * switch. If conditions are correct and both switches support the feature, 2375 * lanes are bonded. It is safe to call this to any switch. 2376 */ 2377 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2378 { 2379 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2380 struct tb_port *up, *down; 2381 u64 route = tb_route(sw); 2382 int ret; 2383 2384 if (!route) 2385 return 0; 2386 2387 if (!tb_switch_lane_bonding_possible(sw)) 2388 return 0; 2389 2390 up = tb_upstream_port(sw); 2391 down = tb_port_at(route, parent); 2392 2393 if (!tb_port_is_width_supported(up, 2) || 2394 !tb_port_is_width_supported(down, 2)) 2395 return 0; 2396 2397 ret = tb_port_lane_bonding_enable(up); 2398 if (ret) { 2399 tb_port_warn(up, "failed to enable lane bonding\n"); 2400 return ret; 2401 } 2402 2403 ret = tb_port_lane_bonding_enable(down); 2404 if (ret) { 2405 tb_port_warn(down, "failed to enable lane bonding\n"); 2406 tb_port_lane_bonding_disable(up); 2407 return ret; 2408 } 2409 2410 tb_switch_update_link_attributes(sw); 2411 2412 tb_sw_dbg(sw, "lane bonding enabled\n"); 2413 return ret; 2414 } 2415 2416 /** 2417 * tb_switch_lane_bonding_disable() - Disable lane bonding 2418 * @sw: Switch whose lane bonding to disable 2419 * 2420 * Disables lane bonding between @sw and parent. This can be called even 2421 * if lanes were not bonded originally. 2422 */ 2423 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2424 { 2425 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2426 struct tb_port *up, *down; 2427 2428 if (!tb_route(sw)) 2429 return; 2430 2431 up = tb_upstream_port(sw); 2432 if (!up->bonded) 2433 return; 2434 2435 down = tb_port_at(tb_route(sw), parent); 2436 2437 tb_port_lane_bonding_disable(up); 2438 tb_port_lane_bonding_disable(down); 2439 2440 tb_switch_update_link_attributes(sw); 2441 tb_sw_dbg(sw, "lane bonding disabled\n"); 2442 } 2443 2444 /** 2445 * tb_switch_configure_link() - Set link configured 2446 * @sw: Switch whose link is configured 2447 * 2448 * Sets the link upstream from @sw configured (from both ends) so that 2449 * it will not be disconnected when the domain exits sleep. Can be 2450 * called for any switch. 2451 * 2452 * It is recommended that this is called after lane bonding is enabled. 2453 * 2454 * Returns %0 on success and negative errno in case of error. 2455 */ 2456 int tb_switch_configure_link(struct tb_switch *sw) 2457 { 2458 struct tb_port *up, *down; 2459 int ret; 2460 2461 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2462 return 0; 2463 2464 up = tb_upstream_port(sw); 2465 if (tb_switch_is_usb4(up->sw)) 2466 ret = usb4_port_configure(up); 2467 else 2468 ret = tb_lc_configure_port(up); 2469 if (ret) 2470 return ret; 2471 2472 down = up->remote; 2473 if (tb_switch_is_usb4(down->sw)) 2474 return usb4_port_configure(down); 2475 return tb_lc_configure_port(down); 2476 } 2477 2478 /** 2479 * tb_switch_unconfigure_link() - Unconfigure link 2480 * @sw: Switch whose link is unconfigured 2481 * 2482 * Sets the link unconfigured so the @sw will be disconnected if the 2483 * domain exists sleep. 2484 */ 2485 void tb_switch_unconfigure_link(struct tb_switch *sw) 2486 { 2487 struct tb_port *up, *down; 2488 2489 if (sw->is_unplugged) 2490 return; 2491 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2492 return; 2493 2494 up = tb_upstream_port(sw); 2495 if (tb_switch_is_usb4(up->sw)) 2496 usb4_port_unconfigure(up); 2497 else 2498 tb_lc_unconfigure_port(up); 2499 2500 down = up->remote; 2501 if (tb_switch_is_usb4(down->sw)) 2502 usb4_port_unconfigure(down); 2503 else 2504 tb_lc_unconfigure_port(down); 2505 } 2506 2507 /** 2508 * tb_switch_add() - Add a switch to the domain 2509 * @sw: Switch to add 2510 * 2511 * This is the last step in adding switch to the domain. It will read 2512 * identification information from DROM and initializes ports so that 2513 * they can be used to connect other switches. The switch will be 2514 * exposed to the userspace when this function successfully returns. To 2515 * remove and release the switch, call tb_switch_remove(). 2516 * 2517 * Return: %0 in case of success and negative errno in case of failure 2518 */ 2519 int tb_switch_add(struct tb_switch *sw) 2520 { 2521 int i, ret; 2522 2523 /* 2524 * Initialize DMA control port now before we read DROM. Recent 2525 * host controllers have more complete DROM on NVM that includes 2526 * vendor and model identification strings which we then expose 2527 * to the userspace. NVM can be accessed through DMA 2528 * configuration based mailbox. 2529 */ 2530 ret = tb_switch_add_dma_port(sw); 2531 if (ret) { 2532 dev_err(&sw->dev, "failed to add DMA port\n"); 2533 return ret; 2534 } 2535 2536 if (!sw->safe_mode) { 2537 /* read drom */ 2538 ret = tb_drom_read(sw); 2539 if (ret) { 2540 dev_err(&sw->dev, "reading DROM failed\n"); 2541 return ret; 2542 } 2543 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2544 2545 ret = tb_switch_set_uuid(sw); 2546 if (ret) { 2547 dev_err(&sw->dev, "failed to set UUID\n"); 2548 return ret; 2549 } 2550 2551 for (i = 0; i <= sw->config.max_port_number; i++) { 2552 if (sw->ports[i].disabled) { 2553 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2554 continue; 2555 } 2556 ret = tb_init_port(&sw->ports[i]); 2557 if (ret) { 2558 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2559 return ret; 2560 } 2561 } 2562 2563 tb_switch_default_link_ports(sw); 2564 2565 ret = tb_switch_update_link_attributes(sw); 2566 if (ret) 2567 return ret; 2568 2569 ret = tb_switch_tmu_init(sw); 2570 if (ret) 2571 return ret; 2572 } 2573 2574 ret = device_add(&sw->dev); 2575 if (ret) { 2576 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2577 return ret; 2578 } 2579 2580 if (tb_route(sw)) { 2581 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2582 sw->vendor, sw->device); 2583 if (sw->vendor_name && sw->device_name) 2584 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2585 sw->device_name); 2586 } 2587 2588 ret = tb_switch_nvm_add(sw); 2589 if (ret) { 2590 dev_err(&sw->dev, "failed to add NVM devices\n"); 2591 device_del(&sw->dev); 2592 return ret; 2593 } 2594 2595 /* 2596 * Thunderbolt routers do not generate wakeups themselves but 2597 * they forward wakeups from tunneled protocols, so enable it 2598 * here. 2599 */ 2600 device_init_wakeup(&sw->dev, true); 2601 2602 pm_runtime_set_active(&sw->dev); 2603 if (sw->rpm) { 2604 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2605 pm_runtime_use_autosuspend(&sw->dev); 2606 pm_runtime_mark_last_busy(&sw->dev); 2607 pm_runtime_enable(&sw->dev); 2608 pm_request_autosuspend(&sw->dev); 2609 } 2610 2611 tb_switch_debugfs_init(sw); 2612 return 0; 2613 } 2614 2615 /** 2616 * tb_switch_remove() - Remove and release a switch 2617 * @sw: Switch to remove 2618 * 2619 * This will remove the switch from the domain and release it after last 2620 * reference count drops to zero. If there are switches connected below 2621 * this switch, they will be removed as well. 2622 */ 2623 void tb_switch_remove(struct tb_switch *sw) 2624 { 2625 struct tb_port *port; 2626 2627 tb_switch_debugfs_remove(sw); 2628 2629 if (sw->rpm) { 2630 pm_runtime_get_sync(&sw->dev); 2631 pm_runtime_disable(&sw->dev); 2632 } 2633 2634 /* port 0 is the switch itself and never has a remote */ 2635 tb_switch_for_each_port(sw, port) { 2636 if (tb_port_has_remote(port)) { 2637 tb_switch_remove(port->remote->sw); 2638 port->remote = NULL; 2639 } else if (port->xdomain) { 2640 tb_xdomain_remove(port->xdomain); 2641 port->xdomain = NULL; 2642 } 2643 2644 /* Remove any downstream retimers */ 2645 tb_retimer_remove_all(port); 2646 } 2647 2648 if (!sw->is_unplugged) 2649 tb_plug_events_active(sw, false); 2650 2651 tb_switch_nvm_remove(sw); 2652 2653 if (tb_route(sw)) 2654 dev_info(&sw->dev, "device disconnected\n"); 2655 device_unregister(&sw->dev); 2656 } 2657 2658 /** 2659 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2660 * @sw: Router to mark unplugged 2661 */ 2662 void tb_sw_set_unplugged(struct tb_switch *sw) 2663 { 2664 struct tb_port *port; 2665 2666 if (sw == sw->tb->root_switch) { 2667 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2668 return; 2669 } 2670 if (sw->is_unplugged) { 2671 tb_sw_WARN(sw, "is_unplugged already set\n"); 2672 return; 2673 } 2674 sw->is_unplugged = true; 2675 tb_switch_for_each_port(sw, port) { 2676 if (tb_port_has_remote(port)) 2677 tb_sw_set_unplugged(port->remote->sw); 2678 else if (port->xdomain) 2679 port->xdomain->is_unplugged = true; 2680 } 2681 } 2682 2683 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) 2684 { 2685 if (flags) 2686 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 2687 else 2688 tb_sw_dbg(sw, "disabling wakeup\n"); 2689 2690 if (tb_switch_is_usb4(sw)) 2691 return usb4_switch_set_wake(sw, flags); 2692 return tb_lc_set_wake(sw, flags); 2693 } 2694 2695 int tb_switch_resume(struct tb_switch *sw) 2696 { 2697 struct tb_port *port; 2698 int err; 2699 2700 tb_sw_dbg(sw, "resuming switch\n"); 2701 2702 /* 2703 * Check for UID of the connected switches except for root 2704 * switch which we assume cannot be removed. 2705 */ 2706 if (tb_route(sw)) { 2707 u64 uid; 2708 2709 /* 2710 * Check first that we can still read the switch config 2711 * space. It may be that there is now another domain 2712 * connected. 2713 */ 2714 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2715 if (err < 0) { 2716 tb_sw_info(sw, "switch not present anymore\n"); 2717 return err; 2718 } 2719 2720 if (tb_switch_is_usb4(sw)) 2721 err = usb4_switch_read_uid(sw, &uid); 2722 else 2723 err = tb_drom_read_uid_only(sw, &uid); 2724 if (err) { 2725 tb_sw_warn(sw, "uid read failed\n"); 2726 return err; 2727 } 2728 if (sw->uid != uid) { 2729 tb_sw_info(sw, 2730 "changed while suspended (uid %#llx -> %#llx)\n", 2731 sw->uid, uid); 2732 return -ENODEV; 2733 } 2734 } 2735 2736 err = tb_switch_configure(sw); 2737 if (err) 2738 return err; 2739 2740 /* Disable wakes */ 2741 tb_switch_set_wake(sw, 0); 2742 2743 err = tb_switch_tmu_init(sw); 2744 if (err) 2745 return err; 2746 2747 /* check for surviving downstream switches */ 2748 tb_switch_for_each_port(sw, port) { 2749 if (!tb_port_has_remote(port) && !port->xdomain) { 2750 /* 2751 * For disconnected downstream lane adapters 2752 * start lane initialization now so we detect 2753 * future connects. 2754 */ 2755 if (!tb_is_upstream_port(port) && tb_port_is_null(port)) 2756 tb_port_start_lane_initialization(port); 2757 continue; 2758 } else if (port->xdomain) { 2759 /* 2760 * Start lane initialization for XDomain so the 2761 * link gets re-established. 2762 */ 2763 tb_port_start_lane_initialization(port); 2764 } 2765 2766 if (tb_wait_for_port(port, true) <= 0) { 2767 tb_port_warn(port, 2768 "lost during suspend, disconnecting\n"); 2769 if (tb_port_has_remote(port)) 2770 tb_sw_set_unplugged(port->remote->sw); 2771 else if (port->xdomain) 2772 port->xdomain->is_unplugged = true; 2773 } else if (tb_port_has_remote(port) || port->xdomain) { 2774 /* 2775 * Always unlock the port so the downstream 2776 * switch/domain is accessible. 2777 */ 2778 if (tb_port_unlock(port)) 2779 tb_port_warn(port, "failed to unlock port\n"); 2780 if (port->remote && tb_switch_resume(port->remote->sw)) { 2781 tb_port_warn(port, 2782 "lost during suspend, disconnecting\n"); 2783 tb_sw_set_unplugged(port->remote->sw); 2784 } 2785 } 2786 } 2787 return 0; 2788 } 2789 2790 /** 2791 * tb_switch_suspend() - Put a switch to sleep 2792 * @sw: Switch to suspend 2793 * @runtime: Is this runtime suspend or system sleep 2794 * 2795 * Suspends router and all its children. Enables wakes according to 2796 * value of @runtime and then sets sleep bit for the router. If @sw is 2797 * host router the domain is ready to go to sleep once this function 2798 * returns. 2799 */ 2800 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 2801 { 2802 unsigned int flags = 0; 2803 struct tb_port *port; 2804 int err; 2805 2806 tb_sw_dbg(sw, "suspending switch\n"); 2807 2808 err = tb_plug_events_active(sw, false); 2809 if (err) 2810 return; 2811 2812 tb_switch_for_each_port(sw, port) { 2813 if (tb_port_has_remote(port)) 2814 tb_switch_suspend(port->remote->sw, runtime); 2815 } 2816 2817 if (runtime) { 2818 /* Trigger wake when something is plugged in/out */ 2819 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 2820 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 2821 } else if (device_may_wakeup(&sw->dev)) { 2822 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 2823 } 2824 2825 tb_switch_set_wake(sw, flags); 2826 2827 if (tb_switch_is_usb4(sw)) 2828 usb4_switch_set_sleep(sw); 2829 else 2830 tb_lc_set_sleep(sw); 2831 } 2832 2833 /** 2834 * tb_switch_query_dp_resource() - Query availability of DP resource 2835 * @sw: Switch whose DP resource is queried 2836 * @in: DP IN port 2837 * 2838 * Queries availability of DP resource for DP tunneling using switch 2839 * specific means. Returns %true if resource is available. 2840 */ 2841 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 2842 { 2843 if (tb_switch_is_usb4(sw)) 2844 return usb4_switch_query_dp_resource(sw, in); 2845 return tb_lc_dp_sink_query(sw, in); 2846 } 2847 2848 /** 2849 * tb_switch_alloc_dp_resource() - Allocate available DP resource 2850 * @sw: Switch whose DP resource is allocated 2851 * @in: DP IN port 2852 * 2853 * Allocates DP resource for DP tunneling. The resource must be 2854 * available for this to succeed (see tb_switch_query_dp_resource()). 2855 * Returns %0 in success and negative errno otherwise. 2856 */ 2857 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2858 { 2859 if (tb_switch_is_usb4(sw)) 2860 return usb4_switch_alloc_dp_resource(sw, in); 2861 return tb_lc_dp_sink_alloc(sw, in); 2862 } 2863 2864 /** 2865 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 2866 * @sw: Switch whose DP resource is de-allocated 2867 * @in: DP IN port 2868 * 2869 * De-allocates DP resource that was previously allocated for DP 2870 * tunneling. 2871 */ 2872 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2873 { 2874 int ret; 2875 2876 if (tb_switch_is_usb4(sw)) 2877 ret = usb4_switch_dealloc_dp_resource(sw, in); 2878 else 2879 ret = tb_lc_dp_sink_dealloc(sw, in); 2880 2881 if (ret) 2882 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 2883 in->port); 2884 } 2885 2886 struct tb_sw_lookup { 2887 struct tb *tb; 2888 u8 link; 2889 u8 depth; 2890 const uuid_t *uuid; 2891 u64 route; 2892 }; 2893 2894 static int tb_switch_match(struct device *dev, const void *data) 2895 { 2896 struct tb_switch *sw = tb_to_switch(dev); 2897 const struct tb_sw_lookup *lookup = data; 2898 2899 if (!sw) 2900 return 0; 2901 if (sw->tb != lookup->tb) 2902 return 0; 2903 2904 if (lookup->uuid) 2905 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 2906 2907 if (lookup->route) { 2908 return sw->config.route_lo == lower_32_bits(lookup->route) && 2909 sw->config.route_hi == upper_32_bits(lookup->route); 2910 } 2911 2912 /* Root switch is matched only by depth */ 2913 if (!lookup->depth) 2914 return !sw->depth; 2915 2916 return sw->link == lookup->link && sw->depth == lookup->depth; 2917 } 2918 2919 /** 2920 * tb_switch_find_by_link_depth() - Find switch by link and depth 2921 * @tb: Domain the switch belongs 2922 * @link: Link number the switch is connected 2923 * @depth: Depth of the switch in link 2924 * 2925 * Returned switch has reference count increased so the caller needs to 2926 * call tb_switch_put() when done with the switch. 2927 */ 2928 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 2929 { 2930 struct tb_sw_lookup lookup; 2931 struct device *dev; 2932 2933 memset(&lookup, 0, sizeof(lookup)); 2934 lookup.tb = tb; 2935 lookup.link = link; 2936 lookup.depth = depth; 2937 2938 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2939 if (dev) 2940 return tb_to_switch(dev); 2941 2942 return NULL; 2943 } 2944 2945 /** 2946 * tb_switch_find_by_uuid() - Find switch by UUID 2947 * @tb: Domain the switch belongs 2948 * @uuid: UUID to look for 2949 * 2950 * Returned switch has reference count increased so the caller needs to 2951 * call tb_switch_put() when done with the switch. 2952 */ 2953 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 2954 { 2955 struct tb_sw_lookup lookup; 2956 struct device *dev; 2957 2958 memset(&lookup, 0, sizeof(lookup)); 2959 lookup.tb = tb; 2960 lookup.uuid = uuid; 2961 2962 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2963 if (dev) 2964 return tb_to_switch(dev); 2965 2966 return NULL; 2967 } 2968 2969 /** 2970 * tb_switch_find_by_route() - Find switch by route string 2971 * @tb: Domain the switch belongs 2972 * @route: Route string to look for 2973 * 2974 * Returned switch has reference count increased so the caller needs to 2975 * call tb_switch_put() when done with the switch. 2976 */ 2977 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 2978 { 2979 struct tb_sw_lookup lookup; 2980 struct device *dev; 2981 2982 if (!route) 2983 return tb_switch_get(tb->root_switch); 2984 2985 memset(&lookup, 0, sizeof(lookup)); 2986 lookup.tb = tb; 2987 lookup.route = route; 2988 2989 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2990 if (dev) 2991 return tb_to_switch(dev); 2992 2993 return NULL; 2994 } 2995 2996 /** 2997 * tb_switch_find_port() - return the first port of @type on @sw or NULL 2998 * @sw: Switch to find the port from 2999 * @type: Port type to look for 3000 */ 3001 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3002 enum tb_port_type type) 3003 { 3004 struct tb_port *port; 3005 3006 tb_switch_for_each_port(sw, port) { 3007 if (port->config.type == type) 3008 return port; 3009 } 3010 3011 return NULL; 3012 } 3013