1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 17 #include "tb.h" 18 19 /* Switch NVM support */ 20 21 #define NVM_CSS 0x10 22 23 struct nvm_auth_status { 24 struct list_head list; 25 uuid_t uuid; 26 u32 status; 27 }; 28 29 enum nvm_write_ops { 30 WRITE_AND_AUTHENTICATE = 1, 31 WRITE_ONLY = 2, 32 }; 33 34 /* 35 * Hold NVM authentication failure status per switch This information 36 * needs to stay around even when the switch gets power cycled so we 37 * keep it separately. 38 */ 39 static LIST_HEAD(nvm_auth_status_cache); 40 static DEFINE_MUTEX(nvm_auth_status_lock); 41 42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 43 { 44 struct nvm_auth_status *st; 45 46 list_for_each_entry(st, &nvm_auth_status_cache, list) { 47 if (uuid_equal(&st->uuid, sw->uuid)) 48 return st; 49 } 50 51 return NULL; 52 } 53 54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 55 { 56 struct nvm_auth_status *st; 57 58 mutex_lock(&nvm_auth_status_lock); 59 st = __nvm_get_auth_status(sw); 60 mutex_unlock(&nvm_auth_status_lock); 61 62 *status = st ? st->status : 0; 63 } 64 65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 66 { 67 struct nvm_auth_status *st; 68 69 if (WARN_ON(!sw->uuid)) 70 return; 71 72 mutex_lock(&nvm_auth_status_lock); 73 st = __nvm_get_auth_status(sw); 74 75 if (!st) { 76 st = kzalloc(sizeof(*st), GFP_KERNEL); 77 if (!st) 78 goto unlock; 79 80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 81 INIT_LIST_HEAD(&st->list); 82 list_add_tail(&st->list, &nvm_auth_status_cache); 83 } 84 85 st->status = status; 86 unlock: 87 mutex_unlock(&nvm_auth_status_lock); 88 } 89 90 static void nvm_clear_auth_status(const struct tb_switch *sw) 91 { 92 struct nvm_auth_status *st; 93 94 mutex_lock(&nvm_auth_status_lock); 95 st = __nvm_get_auth_status(sw); 96 if (st) { 97 list_del(&st->list); 98 kfree(st); 99 } 100 mutex_unlock(&nvm_auth_status_lock); 101 } 102 103 static int nvm_validate_and_write(struct tb_switch *sw) 104 { 105 unsigned int image_size, hdr_size; 106 const u8 *buf = sw->nvm->buf; 107 u16 ds_size; 108 int ret; 109 110 if (!buf) 111 return -EINVAL; 112 113 image_size = sw->nvm->buf_data_size; 114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 115 return -EINVAL; 116 117 /* 118 * FARB pointer must point inside the image and must at least 119 * contain parts of the digital section we will be reading here. 120 */ 121 hdr_size = (*(u32 *)buf) & 0xffffff; 122 if (hdr_size + NVM_DEVID + 2 >= image_size) 123 return -EINVAL; 124 125 /* Digital section start should be aligned to 4k page */ 126 if (!IS_ALIGNED(hdr_size, SZ_4K)) 127 return -EINVAL; 128 129 /* 130 * Read digital section size and check that it also fits inside 131 * the image. 132 */ 133 ds_size = *(u16 *)(buf + hdr_size); 134 if (ds_size >= image_size) 135 return -EINVAL; 136 137 if (!sw->safe_mode) { 138 u16 device_id; 139 140 /* 141 * Make sure the device ID in the image matches the one 142 * we read from the switch config space. 143 */ 144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 145 if (device_id != sw->config.device_id) 146 return -EINVAL; 147 148 if (sw->generation < 3) { 149 /* Write CSS headers first */ 150 ret = dma_port_flash_write(sw->dma_port, 151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 152 DMA_PORT_CSS_MAX_SIZE); 153 if (ret) 154 return ret; 155 } 156 157 /* Skip headers in the image */ 158 buf += hdr_size; 159 image_size -= hdr_size; 160 } 161 162 if (tb_switch_is_usb4(sw)) 163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 164 else 165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 166 if (!ret) 167 sw->nvm->flushed = true; 168 return ret; 169 } 170 171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 172 { 173 int ret = 0; 174 175 /* 176 * Root switch NVM upgrade requires that we disconnect the 177 * existing paths first (in case it is not in safe mode 178 * already). 179 */ 180 if (!sw->safe_mode) { 181 u32 status; 182 183 ret = tb_domain_disconnect_all_paths(sw->tb); 184 if (ret) 185 return ret; 186 /* 187 * The host controller goes away pretty soon after this if 188 * everything goes well so getting timeout is expected. 189 */ 190 ret = dma_port_flash_update_auth(sw->dma_port); 191 if (!ret || ret == -ETIMEDOUT) 192 return 0; 193 194 /* 195 * Any error from update auth operation requires power 196 * cycling of the host router. 197 */ 198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 200 nvm_set_auth_status(sw, status); 201 } 202 203 /* 204 * From safe mode we can get out by just power cycling the 205 * switch. 206 */ 207 dma_port_power_cycle(sw->dma_port); 208 return ret; 209 } 210 211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 212 { 213 int ret, retries = 10; 214 215 ret = dma_port_flash_update_auth(sw->dma_port); 216 switch (ret) { 217 case 0: 218 case -ETIMEDOUT: 219 case -EACCES: 220 case -EINVAL: 221 /* Power cycle is required */ 222 break; 223 default: 224 return ret; 225 } 226 227 /* 228 * Poll here for the authentication status. It takes some time 229 * for the device to respond (we get timeout for a while). Once 230 * we get response the device needs to be power cycled in order 231 * to the new NVM to be taken into use. 232 */ 233 do { 234 u32 status; 235 236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 237 if (ret < 0 && ret != -ETIMEDOUT) 238 return ret; 239 if (ret > 0) { 240 if (status) { 241 tb_sw_warn(sw, "failed to authenticate NVM\n"); 242 nvm_set_auth_status(sw, status); 243 } 244 245 tb_sw_info(sw, "power cycling the switch now\n"); 246 dma_port_power_cycle(sw->dma_port); 247 return 0; 248 } 249 250 msleep(500); 251 } while (--retries); 252 253 return -ETIMEDOUT; 254 } 255 256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 257 { 258 struct pci_dev *root_port; 259 260 /* 261 * During host router NVM upgrade we should not allow root port to 262 * go into D3cold because some root ports cannot trigger PME 263 * itself. To be on the safe side keep the root port in D0 during 264 * the whole upgrade process. 265 */ 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 267 if (root_port) 268 pm_runtime_get_noresume(&root_port->dev); 269 } 270 271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 272 { 273 struct pci_dev *root_port; 274 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 276 if (root_port) 277 pm_runtime_put(&root_port->dev); 278 } 279 280 static inline bool nvm_readable(struct tb_switch *sw) 281 { 282 if (tb_switch_is_usb4(sw)) { 283 /* 284 * USB4 devices must support NVM operations but it is 285 * optional for hosts. Therefore we query the NVM sector 286 * size here and if it is supported assume NVM 287 * operations are implemented. 288 */ 289 return usb4_switch_nvm_sector_size(sw) > 0; 290 } 291 292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 293 return !!sw->dma_port; 294 } 295 296 static inline bool nvm_upgradeable(struct tb_switch *sw) 297 { 298 if (sw->no_nvm_upgrade) 299 return false; 300 return nvm_readable(sw); 301 } 302 303 static inline int nvm_read(struct tb_switch *sw, unsigned int address, 304 void *buf, size_t size) 305 { 306 if (tb_switch_is_usb4(sw)) 307 return usb4_switch_nvm_read(sw, address, buf, size); 308 return dma_port_flash_read(sw->dma_port, address, buf, size); 309 } 310 311 static int nvm_authenticate(struct tb_switch *sw) 312 { 313 int ret; 314 315 if (tb_switch_is_usb4(sw)) 316 return usb4_switch_nvm_authenticate(sw); 317 318 if (!tb_route(sw)) { 319 nvm_authenticate_start_dma_port(sw); 320 ret = nvm_authenticate_host_dma_port(sw); 321 } else { 322 ret = nvm_authenticate_device_dma_port(sw); 323 } 324 325 return ret; 326 } 327 328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 329 size_t bytes) 330 { 331 struct tb_nvm *nvm = priv; 332 struct tb_switch *sw = tb_to_switch(nvm->dev); 333 int ret; 334 335 pm_runtime_get_sync(&sw->dev); 336 337 if (!mutex_trylock(&sw->tb->lock)) { 338 ret = restart_syscall(); 339 goto out; 340 } 341 342 ret = nvm_read(sw, offset, val, bytes); 343 mutex_unlock(&sw->tb->lock); 344 345 out: 346 pm_runtime_mark_last_busy(&sw->dev); 347 pm_runtime_put_autosuspend(&sw->dev); 348 349 return ret; 350 } 351 352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 353 size_t bytes) 354 { 355 struct tb_nvm *nvm = priv; 356 struct tb_switch *sw = tb_to_switch(nvm->dev); 357 int ret; 358 359 if (!mutex_trylock(&sw->tb->lock)) 360 return restart_syscall(); 361 362 /* 363 * Since writing the NVM image might require some special steps, 364 * for example when CSS headers are written, we cache the image 365 * locally here and handle the special cases when the user asks 366 * us to authenticate the image. 367 */ 368 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 369 mutex_unlock(&sw->tb->lock); 370 371 return ret; 372 } 373 374 static int tb_switch_nvm_add(struct tb_switch *sw) 375 { 376 struct tb_nvm *nvm; 377 u32 val; 378 int ret; 379 380 if (!nvm_readable(sw)) 381 return 0; 382 383 /* 384 * The NVM format of non-Intel hardware is not known so 385 * currently restrict NVM upgrade for Intel hardware. We may 386 * relax this in the future when we learn other NVM formats. 387 */ 388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL && 389 sw->config.vendor_id != 0x8087) { 390 dev_info(&sw->dev, 391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n", 392 sw->config.vendor_id); 393 return 0; 394 } 395 396 nvm = tb_nvm_alloc(&sw->dev); 397 if (IS_ERR(nvm)) 398 return PTR_ERR(nvm); 399 400 /* 401 * If the switch is in safe-mode the only accessible portion of 402 * the NVM is the non-active one where userspace is expected to 403 * write new functional NVM. 404 */ 405 if (!sw->safe_mode) { 406 u32 nvm_size, hdr_size; 407 408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); 409 if (ret) 410 goto err_nvm; 411 412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 413 nvm_size = (SZ_1M << (val & 7)) / 8; 414 nvm_size = (nvm_size - hdr_size) / 2; 415 416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); 417 if (ret) 418 goto err_nvm; 419 420 nvm->major = val >> 16; 421 nvm->minor = val >> 8; 422 423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read); 424 if (ret) 425 goto err_nvm; 426 } 427 428 if (!sw->no_nvm_upgrade) { 429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, 430 tb_switch_nvm_write); 431 if (ret) 432 goto err_nvm; 433 } 434 435 sw->nvm = nvm; 436 return 0; 437 438 err_nvm: 439 tb_nvm_free(nvm); 440 return ret; 441 } 442 443 static void tb_switch_nvm_remove(struct tb_switch *sw) 444 { 445 struct tb_nvm *nvm; 446 447 nvm = sw->nvm; 448 sw->nvm = NULL; 449 450 if (!nvm) 451 return; 452 453 /* Remove authentication status in case the switch is unplugged */ 454 if (!nvm->authenticating) 455 nvm_clear_auth_status(sw); 456 457 tb_nvm_free(nvm); 458 } 459 460 /* port utility functions */ 461 462 static const char *tb_port_type(struct tb_regs_port_header *port) 463 { 464 switch (port->type >> 16) { 465 case 0: 466 switch ((u8) port->type) { 467 case 0: 468 return "Inactive"; 469 case 1: 470 return "Port"; 471 case 2: 472 return "NHI"; 473 default: 474 return "unknown"; 475 } 476 case 0x2: 477 return "Ethernet"; 478 case 0x8: 479 return "SATA"; 480 case 0xe: 481 return "DP/HDMI"; 482 case 0x10: 483 return "PCIe"; 484 case 0x20: 485 return "USB"; 486 default: 487 return "unknown"; 488 } 489 } 490 491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 492 { 493 tb_dbg(tb, 494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 495 port->port_number, port->vendor_id, port->device_id, 496 port->revision, port->thunderbolt_version, tb_port_type(port), 497 port->type); 498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 499 port->max_in_hop_id, port->max_out_hop_id); 500 tb_dbg(tb, " Max counters: %d\n", port->max_counters); 501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); 502 } 503 504 /** 505 * tb_port_state() - get connectedness state of a port 506 * @port: the port to check 507 * 508 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 509 * 510 * Return: Returns an enum tb_port_state on success or an error code on failure. 511 */ 512 int tb_port_state(struct tb_port *port) 513 { 514 struct tb_cap_phy phy; 515 int res; 516 if (port->cap_phy == 0) { 517 tb_port_WARN(port, "does not have a PHY\n"); 518 return -EINVAL; 519 } 520 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 521 if (res) 522 return res; 523 return phy.state; 524 } 525 526 /** 527 * tb_wait_for_port() - wait for a port to become ready 528 * @port: Port to wait 529 * @wait_if_unplugged: Wait also when port is unplugged 530 * 531 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 532 * wait_if_unplugged is set then we also wait if the port is in state 533 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 534 * switch resume). Otherwise we only wait if a device is registered but the link 535 * has not yet been established. 536 * 537 * Return: Returns an error code on failure. Returns 0 if the port is not 538 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 539 * if the port is connected and in state TB_PORT_UP. 540 */ 541 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 542 { 543 int retries = 10; 544 int state; 545 if (!port->cap_phy) { 546 tb_port_WARN(port, "does not have PHY\n"); 547 return -EINVAL; 548 } 549 if (tb_is_upstream_port(port)) { 550 tb_port_WARN(port, "is the upstream port\n"); 551 return -EINVAL; 552 } 553 554 while (retries--) { 555 state = tb_port_state(port); 556 if (state < 0) 557 return state; 558 if (state == TB_PORT_DISABLED) { 559 tb_port_dbg(port, "is disabled (state: 0)\n"); 560 return 0; 561 } 562 if (state == TB_PORT_UNPLUGGED) { 563 if (wait_if_unplugged) { 564 /* used during resume */ 565 tb_port_dbg(port, 566 "is unplugged (state: 7), retrying...\n"); 567 msleep(100); 568 continue; 569 } 570 tb_port_dbg(port, "is unplugged (state: 7)\n"); 571 return 0; 572 } 573 if (state == TB_PORT_UP) { 574 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 575 return 1; 576 } 577 578 /* 579 * After plug-in the state is TB_PORT_CONNECTING. Give it some 580 * time. 581 */ 582 tb_port_dbg(port, 583 "is connected, link is not up (state: %d), retrying...\n", 584 state); 585 msleep(100); 586 } 587 tb_port_warn(port, 588 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 589 return 0; 590 } 591 592 /** 593 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 594 * @port: Port to add/remove NFC credits 595 * @credits: Credits to add/remove 596 * 597 * Change the number of NFC credits allocated to @port by @credits. To remove 598 * NFC credits pass a negative amount of credits. 599 * 600 * Return: Returns 0 on success or an error code on failure. 601 */ 602 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 603 { 604 u32 nfc_credits; 605 606 if (credits == 0 || port->sw->is_unplugged) 607 return 0; 608 609 /* 610 * USB4 restricts programming NFC buffers to lane adapters only 611 * so skip other ports. 612 */ 613 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 614 return 0; 615 616 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 617 nfc_credits += credits; 618 619 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 620 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 621 622 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 623 port->config.nfc_credits |= nfc_credits; 624 625 return tb_port_write(port, &port->config.nfc_credits, 626 TB_CFG_PORT, ADP_CS_4, 1); 627 } 628 629 /** 630 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 631 * @port: Port whose counters to clear 632 * @counter: Counter index to clear 633 * 634 * Return: Returns 0 on success or an error code on failure. 635 */ 636 int tb_port_clear_counter(struct tb_port *port, int counter) 637 { 638 u32 zero[3] = { 0, 0, 0 }; 639 tb_port_dbg(port, "clearing counter %d\n", counter); 640 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 641 } 642 643 /** 644 * tb_port_unlock() - Unlock downstream port 645 * @port: Port to unlock 646 * 647 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 648 * downstream router accessible for CM. 649 */ 650 int tb_port_unlock(struct tb_port *port) 651 { 652 if (tb_switch_is_icm(port->sw)) 653 return 0; 654 if (!tb_port_is_null(port)) 655 return -EINVAL; 656 if (tb_switch_is_usb4(port->sw)) 657 return usb4_port_unlock(port); 658 return 0; 659 } 660 661 static int __tb_port_enable(struct tb_port *port, bool enable) 662 { 663 int ret; 664 u32 phy; 665 666 if (!tb_port_is_null(port)) 667 return -EINVAL; 668 669 ret = tb_port_read(port, &phy, TB_CFG_PORT, 670 port->cap_phy + LANE_ADP_CS_1, 1); 671 if (ret) 672 return ret; 673 674 if (enable) 675 phy &= ~LANE_ADP_CS_1_LD; 676 else 677 phy |= LANE_ADP_CS_1_LD; 678 679 return tb_port_write(port, &phy, TB_CFG_PORT, 680 port->cap_phy + LANE_ADP_CS_1, 1); 681 } 682 683 /** 684 * tb_port_enable() - Enable lane adapter 685 * @port: Port to enable (can be %NULL) 686 * 687 * This is used for lane 0 and 1 adapters to enable it. 688 */ 689 int tb_port_enable(struct tb_port *port) 690 { 691 return __tb_port_enable(port, true); 692 } 693 694 /** 695 * tb_port_disable() - Disable lane adapter 696 * @port: Port to disable (can be %NULL) 697 * 698 * This is used for lane 0 and 1 adapters to disable it. 699 */ 700 int tb_port_disable(struct tb_port *port) 701 { 702 return __tb_port_enable(port, false); 703 } 704 705 /* 706 * tb_init_port() - initialize a port 707 * 708 * This is a helper method for tb_switch_alloc. Does not check or initialize 709 * any downstream switches. 710 * 711 * Return: Returns 0 on success or an error code on failure. 712 */ 713 static int tb_init_port(struct tb_port *port) 714 { 715 int res; 716 int cap; 717 718 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 719 if (res) { 720 if (res == -ENODEV) { 721 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 722 port->port); 723 port->disabled = true; 724 return 0; 725 } 726 return res; 727 } 728 729 /* Port 0 is the switch itself and has no PHY. */ 730 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 731 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 732 733 if (cap > 0) 734 port->cap_phy = cap; 735 else 736 tb_port_WARN(port, "non switch port without a PHY\n"); 737 738 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 739 if (cap > 0) 740 port->cap_usb4 = cap; 741 } else if (port->port != 0) { 742 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 743 if (cap > 0) 744 port->cap_adap = cap; 745 } 746 747 tb_dump_port(port->sw->tb, &port->config); 748 749 INIT_LIST_HEAD(&port->list); 750 return 0; 751 752 } 753 754 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 755 int max_hopid) 756 { 757 int port_max_hopid; 758 struct ida *ida; 759 760 if (in) { 761 port_max_hopid = port->config.max_in_hop_id; 762 ida = &port->in_hopids; 763 } else { 764 port_max_hopid = port->config.max_out_hop_id; 765 ida = &port->out_hopids; 766 } 767 768 /* 769 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 770 * reserved. 771 */ 772 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 773 min_hopid = TB_PATH_MIN_HOPID; 774 775 if (max_hopid < 0 || max_hopid > port_max_hopid) 776 max_hopid = port_max_hopid; 777 778 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 779 } 780 781 /** 782 * tb_port_alloc_in_hopid() - Allocate input HopID from port 783 * @port: Port to allocate HopID for 784 * @min_hopid: Minimum acceptable input HopID 785 * @max_hopid: Maximum acceptable input HopID 786 * 787 * Return: HopID between @min_hopid and @max_hopid or negative errno in 788 * case of error. 789 */ 790 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 791 { 792 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 793 } 794 795 /** 796 * tb_port_alloc_out_hopid() - Allocate output HopID from port 797 * @port: Port to allocate HopID for 798 * @min_hopid: Minimum acceptable output HopID 799 * @max_hopid: Maximum acceptable output HopID 800 * 801 * Return: HopID between @min_hopid and @max_hopid or negative errno in 802 * case of error. 803 */ 804 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 805 { 806 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 807 } 808 809 /** 810 * tb_port_release_in_hopid() - Release allocated input HopID from port 811 * @port: Port whose HopID to release 812 * @hopid: HopID to release 813 */ 814 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 815 { 816 ida_simple_remove(&port->in_hopids, hopid); 817 } 818 819 /** 820 * tb_port_release_out_hopid() - Release allocated output HopID from port 821 * @port: Port whose HopID to release 822 * @hopid: HopID to release 823 */ 824 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 825 { 826 ida_simple_remove(&port->out_hopids, hopid); 827 } 828 829 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 830 const struct tb_switch *sw) 831 { 832 u64 mask = (1ULL << parent->config.depth * 8) - 1; 833 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 834 } 835 836 /** 837 * tb_next_port_on_path() - Return next port for given port on a path 838 * @start: Start port of the walk 839 * @end: End port of the walk 840 * @prev: Previous port (%NULL if this is the first) 841 * 842 * This function can be used to walk from one port to another if they 843 * are connected through zero or more switches. If the @prev is dual 844 * link port, the function follows that link and returns another end on 845 * that same link. 846 * 847 * If the @end port has been reached, return %NULL. 848 * 849 * Domain tb->lock must be held when this function is called. 850 */ 851 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 852 struct tb_port *prev) 853 { 854 struct tb_port *next; 855 856 if (!prev) 857 return start; 858 859 if (prev->sw == end->sw) { 860 if (prev == end) 861 return NULL; 862 return end; 863 } 864 865 if (tb_switch_is_reachable(prev->sw, end->sw)) { 866 next = tb_port_at(tb_route(end->sw), prev->sw); 867 /* Walk down the topology if next == prev */ 868 if (prev->remote && 869 (next == prev || next->dual_link_port == prev)) 870 next = prev->remote; 871 } else { 872 if (tb_is_upstream_port(prev)) { 873 next = prev->remote; 874 } else { 875 next = tb_upstream_port(prev->sw); 876 /* 877 * Keep the same link if prev and next are both 878 * dual link ports. 879 */ 880 if (next->dual_link_port && 881 next->link_nr != prev->link_nr) { 882 next = next->dual_link_port; 883 } 884 } 885 } 886 887 return next != prev ? next : NULL; 888 } 889 890 /** 891 * tb_port_get_link_speed() - Get current link speed 892 * @port: Port to check (USB4 or CIO) 893 * 894 * Returns link speed in Gb/s or negative errno in case of failure. 895 */ 896 int tb_port_get_link_speed(struct tb_port *port) 897 { 898 u32 val, speed; 899 int ret; 900 901 if (!port->cap_phy) 902 return -EINVAL; 903 904 ret = tb_port_read(port, &val, TB_CFG_PORT, 905 port->cap_phy + LANE_ADP_CS_1, 1); 906 if (ret) 907 return ret; 908 909 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 910 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 911 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 912 } 913 914 /** 915 * tb_port_get_link_width() - Get current link width 916 * @port: Port to check (USB4 or CIO) 917 * 918 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) 919 * or negative errno in case of failure. 920 */ 921 int tb_port_get_link_width(struct tb_port *port) 922 { 923 u32 val; 924 int ret; 925 926 if (!port->cap_phy) 927 return -EINVAL; 928 929 ret = tb_port_read(port, &val, TB_CFG_PORT, 930 port->cap_phy + LANE_ADP_CS_1, 1); 931 if (ret) 932 return ret; 933 934 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 935 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 936 } 937 938 static bool tb_port_is_width_supported(struct tb_port *port, int width) 939 { 940 u32 phy, widths; 941 int ret; 942 943 if (!port->cap_phy) 944 return false; 945 946 ret = tb_port_read(port, &phy, TB_CFG_PORT, 947 port->cap_phy + LANE_ADP_CS_0, 1); 948 if (ret) 949 return false; 950 951 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 952 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 953 954 return !!(widths & width); 955 } 956 957 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 958 { 959 u32 val; 960 int ret; 961 962 if (!port->cap_phy) 963 return -EINVAL; 964 965 ret = tb_port_read(port, &val, TB_CFG_PORT, 966 port->cap_phy + LANE_ADP_CS_1, 1); 967 if (ret) 968 return ret; 969 970 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 971 switch (width) { 972 case 1: 973 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 974 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 975 break; 976 case 2: 977 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 978 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 979 break; 980 default: 981 return -EINVAL; 982 } 983 984 val |= LANE_ADP_CS_1_LB; 985 986 return tb_port_write(port, &val, TB_CFG_PORT, 987 port->cap_phy + LANE_ADP_CS_1, 1); 988 } 989 990 /** 991 * tb_port_lane_bonding_enable() - Enable bonding on port 992 * @port: port to enable 993 * 994 * Enable bonding by setting the link width of the port and the 995 * other port in case of dual link port. 996 * 997 * Return: %0 in case of success and negative errno in case of error 998 */ 999 int tb_port_lane_bonding_enable(struct tb_port *port) 1000 { 1001 int ret; 1002 1003 /* 1004 * Enable lane bonding for both links if not already enabled by 1005 * for example the boot firmware. 1006 */ 1007 ret = tb_port_get_link_width(port); 1008 if (ret == 1) { 1009 ret = tb_port_set_link_width(port, 2); 1010 if (ret) 1011 return ret; 1012 } 1013 1014 ret = tb_port_get_link_width(port->dual_link_port); 1015 if (ret == 1) { 1016 ret = tb_port_set_link_width(port->dual_link_port, 2); 1017 if (ret) { 1018 tb_port_set_link_width(port, 1); 1019 return ret; 1020 } 1021 } 1022 1023 port->bonded = true; 1024 port->dual_link_port->bonded = true; 1025 1026 return 0; 1027 } 1028 1029 /** 1030 * tb_port_lane_bonding_disable() - Disable bonding on port 1031 * @port: port to disable 1032 * 1033 * Disable bonding by setting the link width of the port and the 1034 * other port in case of dual link port. 1035 * 1036 */ 1037 void tb_port_lane_bonding_disable(struct tb_port *port) 1038 { 1039 port->dual_link_port->bonded = false; 1040 port->bonded = false; 1041 1042 tb_port_set_link_width(port->dual_link_port, 1); 1043 tb_port_set_link_width(port, 1); 1044 } 1045 1046 static int tb_port_start_lane_initialization(struct tb_port *port) 1047 { 1048 int ret; 1049 1050 if (tb_switch_is_usb4(port->sw)) 1051 return 0; 1052 1053 ret = tb_lc_start_lane_initialization(port); 1054 return ret == -EINVAL ? 0 : ret; 1055 } 1056 1057 /** 1058 * tb_port_is_enabled() - Is the adapter port enabled 1059 * @port: Port to check 1060 */ 1061 bool tb_port_is_enabled(struct tb_port *port) 1062 { 1063 switch (port->config.type) { 1064 case TB_TYPE_PCIE_UP: 1065 case TB_TYPE_PCIE_DOWN: 1066 return tb_pci_port_is_enabled(port); 1067 1068 case TB_TYPE_DP_HDMI_IN: 1069 case TB_TYPE_DP_HDMI_OUT: 1070 return tb_dp_port_is_enabled(port); 1071 1072 case TB_TYPE_USB3_UP: 1073 case TB_TYPE_USB3_DOWN: 1074 return tb_usb3_port_is_enabled(port); 1075 1076 default: 1077 return false; 1078 } 1079 } 1080 1081 /** 1082 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1083 * @port: USB3 adapter port to check 1084 */ 1085 bool tb_usb3_port_is_enabled(struct tb_port *port) 1086 { 1087 u32 data; 1088 1089 if (tb_port_read(port, &data, TB_CFG_PORT, 1090 port->cap_adap + ADP_USB3_CS_0, 1)) 1091 return false; 1092 1093 return !!(data & ADP_USB3_CS_0_PE); 1094 } 1095 1096 /** 1097 * tb_usb3_port_enable() - Enable USB3 adapter port 1098 * @port: USB3 adapter port to enable 1099 * @enable: Enable/disable the USB3 adapter 1100 */ 1101 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1102 { 1103 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1104 : ADP_USB3_CS_0_V; 1105 1106 if (!port->cap_adap) 1107 return -ENXIO; 1108 return tb_port_write(port, &word, TB_CFG_PORT, 1109 port->cap_adap + ADP_USB3_CS_0, 1); 1110 } 1111 1112 /** 1113 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1114 * @port: PCIe port to check 1115 */ 1116 bool tb_pci_port_is_enabled(struct tb_port *port) 1117 { 1118 u32 data; 1119 1120 if (tb_port_read(port, &data, TB_CFG_PORT, 1121 port->cap_adap + ADP_PCIE_CS_0, 1)) 1122 return false; 1123 1124 return !!(data & ADP_PCIE_CS_0_PE); 1125 } 1126 1127 /** 1128 * tb_pci_port_enable() - Enable PCIe adapter port 1129 * @port: PCIe port to enable 1130 * @enable: Enable/disable the PCIe adapter 1131 */ 1132 int tb_pci_port_enable(struct tb_port *port, bool enable) 1133 { 1134 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1135 if (!port->cap_adap) 1136 return -ENXIO; 1137 return tb_port_write(port, &word, TB_CFG_PORT, 1138 port->cap_adap + ADP_PCIE_CS_0, 1); 1139 } 1140 1141 /** 1142 * tb_dp_port_hpd_is_active() - Is HPD already active 1143 * @port: DP out port to check 1144 * 1145 * Checks if the DP OUT adapter port has HDP bit already set. 1146 */ 1147 int tb_dp_port_hpd_is_active(struct tb_port *port) 1148 { 1149 u32 data; 1150 int ret; 1151 1152 ret = tb_port_read(port, &data, TB_CFG_PORT, 1153 port->cap_adap + ADP_DP_CS_2, 1); 1154 if (ret) 1155 return ret; 1156 1157 return !!(data & ADP_DP_CS_2_HDP); 1158 } 1159 1160 /** 1161 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1162 * @port: Port to clear HPD 1163 * 1164 * If the DP IN port has HDP set, this function can be used to clear it. 1165 */ 1166 int tb_dp_port_hpd_clear(struct tb_port *port) 1167 { 1168 u32 data; 1169 int ret; 1170 1171 ret = tb_port_read(port, &data, TB_CFG_PORT, 1172 port->cap_adap + ADP_DP_CS_3, 1); 1173 if (ret) 1174 return ret; 1175 1176 data |= ADP_DP_CS_3_HDPC; 1177 return tb_port_write(port, &data, TB_CFG_PORT, 1178 port->cap_adap + ADP_DP_CS_3, 1); 1179 } 1180 1181 /** 1182 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1183 * @port: DP IN/OUT port to set hops 1184 * @video: Video Hop ID 1185 * @aux_tx: AUX TX Hop ID 1186 * @aux_rx: AUX RX Hop ID 1187 * 1188 * Programs specified Hop IDs for DP IN/OUT port. 1189 */ 1190 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1191 unsigned int aux_tx, unsigned int aux_rx) 1192 { 1193 u32 data[2]; 1194 int ret; 1195 1196 ret = tb_port_read(port, data, TB_CFG_PORT, 1197 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1198 if (ret) 1199 return ret; 1200 1201 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1202 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1203 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1204 1205 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1206 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1207 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1208 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1209 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1210 1211 return tb_port_write(port, data, TB_CFG_PORT, 1212 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1213 } 1214 1215 /** 1216 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1217 * @port: DP adapter port to check 1218 */ 1219 bool tb_dp_port_is_enabled(struct tb_port *port) 1220 { 1221 u32 data[2]; 1222 1223 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1224 ARRAY_SIZE(data))) 1225 return false; 1226 1227 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1228 } 1229 1230 /** 1231 * tb_dp_port_enable() - Enables/disables DP paths of a port 1232 * @port: DP IN/OUT port 1233 * @enable: Enable/disable DP path 1234 * 1235 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1236 * calling this function. 1237 */ 1238 int tb_dp_port_enable(struct tb_port *port, bool enable) 1239 { 1240 u32 data[2]; 1241 int ret; 1242 1243 ret = tb_port_read(port, data, TB_CFG_PORT, 1244 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1245 if (ret) 1246 return ret; 1247 1248 if (enable) 1249 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1250 else 1251 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1252 1253 return tb_port_write(port, data, TB_CFG_PORT, 1254 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1255 } 1256 1257 /* switch utility functions */ 1258 1259 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1260 { 1261 switch (sw->generation) { 1262 case 1: 1263 return "Thunderbolt 1"; 1264 case 2: 1265 return "Thunderbolt 2"; 1266 case 3: 1267 return "Thunderbolt 3"; 1268 case 4: 1269 return "USB4"; 1270 default: 1271 return "Unknown"; 1272 } 1273 } 1274 1275 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1276 { 1277 const struct tb_regs_switch_header *regs = &sw->config; 1278 1279 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1280 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1281 regs->revision, regs->thunderbolt_version); 1282 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1283 tb_dbg(tb, " Config:\n"); 1284 tb_dbg(tb, 1285 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1286 regs->upstream_port_number, regs->depth, 1287 (((u64) regs->route_hi) << 32) | regs->route_lo, 1288 regs->enabled, regs->plug_events_delay); 1289 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1290 regs->__unknown1, regs->__unknown4); 1291 } 1292 1293 /** 1294 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET 1295 * @sw: Switch to reset 1296 * 1297 * Return: Returns 0 on success or an error code on failure. 1298 */ 1299 int tb_switch_reset(struct tb_switch *sw) 1300 { 1301 struct tb_cfg_result res; 1302 1303 if (sw->generation > 1) 1304 return 0; 1305 1306 tb_sw_dbg(sw, "resetting switch\n"); 1307 1308 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1309 TB_CFG_SWITCH, 2, 2); 1310 if (res.err) 1311 return res.err; 1312 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); 1313 if (res.err > 0) 1314 return -EIO; 1315 return res.err; 1316 } 1317 1318 /* 1319 * tb_plug_events_active() - enable/disable plug events on a switch 1320 * 1321 * Also configures a sane plug_events_delay of 255ms. 1322 * 1323 * Return: Returns 0 on success or an error code on failure. 1324 */ 1325 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1326 { 1327 u32 data; 1328 int res; 1329 1330 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1331 return 0; 1332 1333 sw->config.plug_events_delay = 0xff; 1334 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1335 if (res) 1336 return res; 1337 1338 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1339 if (res) 1340 return res; 1341 1342 if (active) { 1343 data = data & 0xFFFFFF83; 1344 switch (sw->config.device_id) { 1345 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1346 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1347 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1348 break; 1349 default: 1350 data |= 4; 1351 } 1352 } else { 1353 data = data | 0x7c; 1354 } 1355 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1356 sw->cap_plug_events + 1, 1); 1357 } 1358 1359 static ssize_t authorized_show(struct device *dev, 1360 struct device_attribute *attr, 1361 char *buf) 1362 { 1363 struct tb_switch *sw = tb_to_switch(dev); 1364 1365 return sprintf(buf, "%u\n", sw->authorized); 1366 } 1367 1368 static int disapprove_switch(struct device *dev, void *not_used) 1369 { 1370 struct tb_switch *sw; 1371 1372 sw = tb_to_switch(dev); 1373 if (sw && sw->authorized) { 1374 int ret; 1375 1376 /* First children */ 1377 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1378 if (ret) 1379 return ret; 1380 1381 ret = tb_domain_disapprove_switch(sw->tb, sw); 1382 if (ret) 1383 return ret; 1384 1385 sw->authorized = 0; 1386 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1387 } 1388 1389 return 0; 1390 } 1391 1392 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1393 { 1394 int ret = -EINVAL; 1395 1396 if (!mutex_trylock(&sw->tb->lock)) 1397 return restart_syscall(); 1398 1399 if (!!sw->authorized == !!val) 1400 goto unlock; 1401 1402 switch (val) { 1403 /* Disapprove switch */ 1404 case 0: 1405 if (tb_route(sw)) { 1406 ret = disapprove_switch(&sw->dev, NULL); 1407 goto unlock; 1408 } 1409 break; 1410 1411 /* Approve switch */ 1412 case 1: 1413 if (sw->key) 1414 ret = tb_domain_approve_switch_key(sw->tb, sw); 1415 else 1416 ret = tb_domain_approve_switch(sw->tb, sw); 1417 break; 1418 1419 /* Challenge switch */ 1420 case 2: 1421 if (sw->key) 1422 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1423 break; 1424 1425 default: 1426 break; 1427 } 1428 1429 if (!ret) { 1430 sw->authorized = val; 1431 /* Notify status change to the userspace */ 1432 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1433 } 1434 1435 unlock: 1436 mutex_unlock(&sw->tb->lock); 1437 return ret; 1438 } 1439 1440 static ssize_t authorized_store(struct device *dev, 1441 struct device_attribute *attr, 1442 const char *buf, size_t count) 1443 { 1444 struct tb_switch *sw = tb_to_switch(dev); 1445 unsigned int val; 1446 ssize_t ret; 1447 1448 ret = kstrtouint(buf, 0, &val); 1449 if (ret) 1450 return ret; 1451 if (val > 2) 1452 return -EINVAL; 1453 1454 pm_runtime_get_sync(&sw->dev); 1455 ret = tb_switch_set_authorized(sw, val); 1456 pm_runtime_mark_last_busy(&sw->dev); 1457 pm_runtime_put_autosuspend(&sw->dev); 1458 1459 return ret ? ret : count; 1460 } 1461 static DEVICE_ATTR_RW(authorized); 1462 1463 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1464 char *buf) 1465 { 1466 struct tb_switch *sw = tb_to_switch(dev); 1467 1468 return sprintf(buf, "%u\n", sw->boot); 1469 } 1470 static DEVICE_ATTR_RO(boot); 1471 1472 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1473 char *buf) 1474 { 1475 struct tb_switch *sw = tb_to_switch(dev); 1476 1477 return sprintf(buf, "%#x\n", sw->device); 1478 } 1479 static DEVICE_ATTR_RO(device); 1480 1481 static ssize_t 1482 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1483 { 1484 struct tb_switch *sw = tb_to_switch(dev); 1485 1486 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1487 } 1488 static DEVICE_ATTR_RO(device_name); 1489 1490 static ssize_t 1491 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1492 { 1493 struct tb_switch *sw = tb_to_switch(dev); 1494 1495 return sprintf(buf, "%u\n", sw->generation); 1496 } 1497 static DEVICE_ATTR_RO(generation); 1498 1499 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1500 char *buf) 1501 { 1502 struct tb_switch *sw = tb_to_switch(dev); 1503 ssize_t ret; 1504 1505 if (!mutex_trylock(&sw->tb->lock)) 1506 return restart_syscall(); 1507 1508 if (sw->key) 1509 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1510 else 1511 ret = sprintf(buf, "\n"); 1512 1513 mutex_unlock(&sw->tb->lock); 1514 return ret; 1515 } 1516 1517 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1518 const char *buf, size_t count) 1519 { 1520 struct tb_switch *sw = tb_to_switch(dev); 1521 u8 key[TB_SWITCH_KEY_SIZE]; 1522 ssize_t ret = count; 1523 bool clear = false; 1524 1525 if (!strcmp(buf, "\n")) 1526 clear = true; 1527 else if (hex2bin(key, buf, sizeof(key))) 1528 return -EINVAL; 1529 1530 if (!mutex_trylock(&sw->tb->lock)) 1531 return restart_syscall(); 1532 1533 if (sw->authorized) { 1534 ret = -EBUSY; 1535 } else { 1536 kfree(sw->key); 1537 if (clear) { 1538 sw->key = NULL; 1539 } else { 1540 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1541 if (!sw->key) 1542 ret = -ENOMEM; 1543 } 1544 } 1545 1546 mutex_unlock(&sw->tb->lock); 1547 return ret; 1548 } 1549 static DEVICE_ATTR(key, 0600, key_show, key_store); 1550 1551 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1552 char *buf) 1553 { 1554 struct tb_switch *sw = tb_to_switch(dev); 1555 1556 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1557 } 1558 1559 /* 1560 * Currently all lanes must run at the same speed but we expose here 1561 * both directions to allow possible asymmetric links in the future. 1562 */ 1563 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1564 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1565 1566 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1567 char *buf) 1568 { 1569 struct tb_switch *sw = tb_to_switch(dev); 1570 1571 return sprintf(buf, "%u\n", sw->link_width); 1572 } 1573 1574 /* 1575 * Currently link has same amount of lanes both directions (1 or 2) but 1576 * expose them separately to allow possible asymmetric links in the future. 1577 */ 1578 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1579 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1580 1581 static ssize_t nvm_authenticate_show(struct device *dev, 1582 struct device_attribute *attr, char *buf) 1583 { 1584 struct tb_switch *sw = tb_to_switch(dev); 1585 u32 status; 1586 1587 nvm_get_auth_status(sw, &status); 1588 return sprintf(buf, "%#x\n", status); 1589 } 1590 1591 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 1592 bool disconnect) 1593 { 1594 struct tb_switch *sw = tb_to_switch(dev); 1595 int val; 1596 int ret; 1597 1598 pm_runtime_get_sync(&sw->dev); 1599 1600 if (!mutex_trylock(&sw->tb->lock)) { 1601 ret = restart_syscall(); 1602 goto exit_rpm; 1603 } 1604 1605 /* If NVMem devices are not yet added */ 1606 if (!sw->nvm) { 1607 ret = -EAGAIN; 1608 goto exit_unlock; 1609 } 1610 1611 ret = kstrtoint(buf, 10, &val); 1612 if (ret) 1613 goto exit_unlock; 1614 1615 /* Always clear the authentication status */ 1616 nvm_clear_auth_status(sw); 1617 1618 if (val > 0) { 1619 if (!sw->nvm->flushed) { 1620 if (!sw->nvm->buf) { 1621 ret = -EINVAL; 1622 goto exit_unlock; 1623 } 1624 1625 ret = nvm_validate_and_write(sw); 1626 if (ret || val == WRITE_ONLY) 1627 goto exit_unlock; 1628 } 1629 if (val == WRITE_AND_AUTHENTICATE) { 1630 if (disconnect) { 1631 ret = tb_lc_force_power(sw); 1632 } else { 1633 sw->nvm->authenticating = true; 1634 ret = nvm_authenticate(sw); 1635 } 1636 } 1637 } 1638 1639 exit_unlock: 1640 mutex_unlock(&sw->tb->lock); 1641 exit_rpm: 1642 pm_runtime_mark_last_busy(&sw->dev); 1643 pm_runtime_put_autosuspend(&sw->dev); 1644 1645 return ret; 1646 } 1647 1648 static ssize_t nvm_authenticate_store(struct device *dev, 1649 struct device_attribute *attr, const char *buf, size_t count) 1650 { 1651 int ret = nvm_authenticate_sysfs(dev, buf, false); 1652 if (ret) 1653 return ret; 1654 return count; 1655 } 1656 static DEVICE_ATTR_RW(nvm_authenticate); 1657 1658 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 1659 struct device_attribute *attr, char *buf) 1660 { 1661 return nvm_authenticate_show(dev, attr, buf); 1662 } 1663 1664 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 1665 struct device_attribute *attr, const char *buf, size_t count) 1666 { 1667 int ret; 1668 1669 ret = nvm_authenticate_sysfs(dev, buf, true); 1670 return ret ? ret : count; 1671 } 1672 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 1673 1674 static ssize_t nvm_version_show(struct device *dev, 1675 struct device_attribute *attr, char *buf) 1676 { 1677 struct tb_switch *sw = tb_to_switch(dev); 1678 int ret; 1679 1680 if (!mutex_trylock(&sw->tb->lock)) 1681 return restart_syscall(); 1682 1683 if (sw->safe_mode) 1684 ret = -ENODATA; 1685 else if (!sw->nvm) 1686 ret = -EAGAIN; 1687 else 1688 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1689 1690 mutex_unlock(&sw->tb->lock); 1691 1692 return ret; 1693 } 1694 static DEVICE_ATTR_RO(nvm_version); 1695 1696 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1697 char *buf) 1698 { 1699 struct tb_switch *sw = tb_to_switch(dev); 1700 1701 return sprintf(buf, "%#x\n", sw->vendor); 1702 } 1703 static DEVICE_ATTR_RO(vendor); 1704 1705 static ssize_t 1706 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1707 { 1708 struct tb_switch *sw = tb_to_switch(dev); 1709 1710 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1711 } 1712 static DEVICE_ATTR_RO(vendor_name); 1713 1714 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1715 char *buf) 1716 { 1717 struct tb_switch *sw = tb_to_switch(dev); 1718 1719 return sprintf(buf, "%pUb\n", sw->uuid); 1720 } 1721 static DEVICE_ATTR_RO(unique_id); 1722 1723 static struct attribute *switch_attrs[] = { 1724 &dev_attr_authorized.attr, 1725 &dev_attr_boot.attr, 1726 &dev_attr_device.attr, 1727 &dev_attr_device_name.attr, 1728 &dev_attr_generation.attr, 1729 &dev_attr_key.attr, 1730 &dev_attr_nvm_authenticate.attr, 1731 &dev_attr_nvm_authenticate_on_disconnect.attr, 1732 &dev_attr_nvm_version.attr, 1733 &dev_attr_rx_speed.attr, 1734 &dev_attr_rx_lanes.attr, 1735 &dev_attr_tx_speed.attr, 1736 &dev_attr_tx_lanes.attr, 1737 &dev_attr_vendor.attr, 1738 &dev_attr_vendor_name.attr, 1739 &dev_attr_unique_id.attr, 1740 NULL, 1741 }; 1742 1743 static bool has_port(const struct tb_switch *sw, enum tb_port_type type) 1744 { 1745 const struct tb_port *port; 1746 1747 tb_switch_for_each_port(sw, port) { 1748 if (!port->disabled && port->config.type == type) 1749 return true; 1750 } 1751 1752 return false; 1753 } 1754 1755 static umode_t switch_attr_is_visible(struct kobject *kobj, 1756 struct attribute *attr, int n) 1757 { 1758 struct device *dev = kobj_to_dev(kobj); 1759 struct tb_switch *sw = tb_to_switch(dev); 1760 1761 if (attr == &dev_attr_authorized.attr) { 1762 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 1763 sw->tb->security_level == TB_SECURITY_DPONLY || 1764 !has_port(sw, TB_TYPE_PCIE_UP)) 1765 return 0; 1766 } else if (attr == &dev_attr_device.attr) { 1767 if (!sw->device) 1768 return 0; 1769 } else if (attr == &dev_attr_device_name.attr) { 1770 if (!sw->device_name) 1771 return 0; 1772 } else if (attr == &dev_attr_vendor.attr) { 1773 if (!sw->vendor) 1774 return 0; 1775 } else if (attr == &dev_attr_vendor_name.attr) { 1776 if (!sw->vendor_name) 1777 return 0; 1778 } else if (attr == &dev_attr_key.attr) { 1779 if (tb_route(sw) && 1780 sw->tb->security_level == TB_SECURITY_SECURE && 1781 sw->security_level == TB_SECURITY_SECURE) 1782 return attr->mode; 1783 return 0; 1784 } else if (attr == &dev_attr_rx_speed.attr || 1785 attr == &dev_attr_rx_lanes.attr || 1786 attr == &dev_attr_tx_speed.attr || 1787 attr == &dev_attr_tx_lanes.attr) { 1788 if (tb_route(sw)) 1789 return attr->mode; 1790 return 0; 1791 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1792 if (nvm_upgradeable(sw)) 1793 return attr->mode; 1794 return 0; 1795 } else if (attr == &dev_attr_nvm_version.attr) { 1796 if (nvm_readable(sw)) 1797 return attr->mode; 1798 return 0; 1799 } else if (attr == &dev_attr_boot.attr) { 1800 if (tb_route(sw)) 1801 return attr->mode; 1802 return 0; 1803 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 1804 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 1805 return attr->mode; 1806 return 0; 1807 } 1808 1809 return sw->safe_mode ? 0 : attr->mode; 1810 } 1811 1812 static const struct attribute_group switch_group = { 1813 .is_visible = switch_attr_is_visible, 1814 .attrs = switch_attrs, 1815 }; 1816 1817 static const struct attribute_group *switch_groups[] = { 1818 &switch_group, 1819 NULL, 1820 }; 1821 1822 static void tb_switch_release(struct device *dev) 1823 { 1824 struct tb_switch *sw = tb_to_switch(dev); 1825 struct tb_port *port; 1826 1827 dma_port_free(sw->dma_port); 1828 1829 tb_switch_for_each_port(sw, port) { 1830 ida_destroy(&port->in_hopids); 1831 ida_destroy(&port->out_hopids); 1832 } 1833 1834 kfree(sw->uuid); 1835 kfree(sw->device_name); 1836 kfree(sw->vendor_name); 1837 kfree(sw->ports); 1838 kfree(sw->drom); 1839 kfree(sw->key); 1840 kfree(sw); 1841 } 1842 1843 static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env) 1844 { 1845 struct tb_switch *sw = tb_to_switch(dev); 1846 const char *type; 1847 1848 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) { 1849 if (add_uevent_var(env, "USB4_VERSION=1.0")) 1850 return -ENOMEM; 1851 } 1852 1853 if (!tb_route(sw)) { 1854 type = "host"; 1855 } else { 1856 const struct tb_port *port; 1857 bool hub = false; 1858 1859 /* Device is hub if it has any downstream ports */ 1860 tb_switch_for_each_port(sw, port) { 1861 if (!port->disabled && !tb_is_upstream_port(port) && 1862 tb_port_is_null(port)) { 1863 hub = true; 1864 break; 1865 } 1866 } 1867 1868 type = hub ? "hub" : "device"; 1869 } 1870 1871 if (add_uevent_var(env, "USB4_TYPE=%s", type)) 1872 return -ENOMEM; 1873 return 0; 1874 } 1875 1876 /* 1877 * Currently only need to provide the callbacks. Everything else is handled 1878 * in the connection manager. 1879 */ 1880 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1881 { 1882 struct tb_switch *sw = tb_to_switch(dev); 1883 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1884 1885 if (cm_ops->runtime_suspend_switch) 1886 return cm_ops->runtime_suspend_switch(sw); 1887 1888 return 0; 1889 } 1890 1891 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1892 { 1893 struct tb_switch *sw = tb_to_switch(dev); 1894 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1895 1896 if (cm_ops->runtime_resume_switch) 1897 return cm_ops->runtime_resume_switch(sw); 1898 return 0; 1899 } 1900 1901 static const struct dev_pm_ops tb_switch_pm_ops = { 1902 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1903 NULL) 1904 }; 1905 1906 struct device_type tb_switch_type = { 1907 .name = "thunderbolt_device", 1908 .release = tb_switch_release, 1909 .uevent = tb_switch_uevent, 1910 .pm = &tb_switch_pm_ops, 1911 }; 1912 1913 static int tb_switch_get_generation(struct tb_switch *sw) 1914 { 1915 switch (sw->config.device_id) { 1916 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1917 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1918 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1919 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1920 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1921 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1922 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1923 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1924 return 1; 1925 1926 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1927 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1928 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1929 return 2; 1930 1931 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1932 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1933 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1934 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1935 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1936 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1937 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1938 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1939 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1940 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1941 return 3; 1942 1943 default: 1944 if (tb_switch_is_usb4(sw)) 1945 return 4; 1946 1947 /* 1948 * For unknown switches assume generation to be 1 to be 1949 * on the safe side. 1950 */ 1951 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1952 sw->config.device_id); 1953 return 1; 1954 } 1955 } 1956 1957 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 1958 { 1959 int max_depth; 1960 1961 if (tb_switch_is_usb4(sw) || 1962 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 1963 max_depth = USB4_SWITCH_MAX_DEPTH; 1964 else 1965 max_depth = TB_SWITCH_MAX_DEPTH; 1966 1967 return depth > max_depth; 1968 } 1969 1970 /** 1971 * tb_switch_alloc() - allocate a switch 1972 * @tb: Pointer to the owning domain 1973 * @parent: Parent device for this switch 1974 * @route: Route string for this switch 1975 * 1976 * Allocates and initializes a switch. Will not upload configuration to 1977 * the switch. For that you need to call tb_switch_configure() 1978 * separately. The returned switch should be released by calling 1979 * tb_switch_put(). 1980 * 1981 * Return: Pointer to the allocated switch or ERR_PTR() in case of 1982 * failure. 1983 */ 1984 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1985 u64 route) 1986 { 1987 struct tb_switch *sw; 1988 int upstream_port; 1989 int i, ret, depth; 1990 1991 /* Unlock the downstream port so we can access the switch below */ 1992 if (route) { 1993 struct tb_switch *parent_sw = tb_to_switch(parent); 1994 struct tb_port *down; 1995 1996 down = tb_port_at(route, parent_sw); 1997 tb_port_unlock(down); 1998 } 1999 2000 depth = tb_route_length(route); 2001 2002 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 2003 if (upstream_port < 0) 2004 return ERR_PTR(upstream_port); 2005 2006 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2007 if (!sw) 2008 return ERR_PTR(-ENOMEM); 2009 2010 sw->tb = tb; 2011 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 2012 if (ret) 2013 goto err_free_sw_ports; 2014 2015 sw->generation = tb_switch_get_generation(sw); 2016 2017 tb_dbg(tb, "current switch config:\n"); 2018 tb_dump_switch(tb, sw); 2019 2020 /* configure switch */ 2021 sw->config.upstream_port_number = upstream_port; 2022 sw->config.depth = depth; 2023 sw->config.route_hi = upper_32_bits(route); 2024 sw->config.route_lo = lower_32_bits(route); 2025 sw->config.enabled = 0; 2026 2027 /* Make sure we do not exceed maximum topology limit */ 2028 if (tb_switch_exceeds_max_depth(sw, depth)) { 2029 ret = -EADDRNOTAVAIL; 2030 goto err_free_sw_ports; 2031 } 2032 2033 /* initialize ports */ 2034 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 2035 GFP_KERNEL); 2036 if (!sw->ports) { 2037 ret = -ENOMEM; 2038 goto err_free_sw_ports; 2039 } 2040 2041 for (i = 0; i <= sw->config.max_port_number; i++) { 2042 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2043 sw->ports[i].sw = sw; 2044 sw->ports[i].port = i; 2045 2046 /* Control port does not need HopID allocation */ 2047 if (i) { 2048 ida_init(&sw->ports[i].in_hopids); 2049 ida_init(&sw->ports[i].out_hopids); 2050 } 2051 } 2052 2053 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2054 if (ret > 0) 2055 sw->cap_plug_events = ret; 2056 2057 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2058 if (ret > 0) 2059 sw->cap_lc = ret; 2060 2061 /* Root switch is always authorized */ 2062 if (!route) 2063 sw->authorized = true; 2064 2065 device_initialize(&sw->dev); 2066 sw->dev.parent = parent; 2067 sw->dev.bus = &tb_bus_type; 2068 sw->dev.type = &tb_switch_type; 2069 sw->dev.groups = switch_groups; 2070 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2071 2072 return sw; 2073 2074 err_free_sw_ports: 2075 kfree(sw->ports); 2076 kfree(sw); 2077 2078 return ERR_PTR(ret); 2079 } 2080 2081 /** 2082 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2083 * @tb: Pointer to the owning domain 2084 * @parent: Parent device for this switch 2085 * @route: Route string for this switch 2086 * 2087 * This creates a switch in safe mode. This means the switch pretty much 2088 * lacks all capabilities except DMA configuration port before it is 2089 * flashed with a valid NVM firmware. 2090 * 2091 * The returned switch must be released by calling tb_switch_put(). 2092 * 2093 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 2094 */ 2095 struct tb_switch * 2096 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2097 { 2098 struct tb_switch *sw; 2099 2100 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2101 if (!sw) 2102 return ERR_PTR(-ENOMEM); 2103 2104 sw->tb = tb; 2105 sw->config.depth = tb_route_length(route); 2106 sw->config.route_hi = upper_32_bits(route); 2107 sw->config.route_lo = lower_32_bits(route); 2108 sw->safe_mode = true; 2109 2110 device_initialize(&sw->dev); 2111 sw->dev.parent = parent; 2112 sw->dev.bus = &tb_bus_type; 2113 sw->dev.type = &tb_switch_type; 2114 sw->dev.groups = switch_groups; 2115 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2116 2117 return sw; 2118 } 2119 2120 /** 2121 * tb_switch_configure() - Uploads configuration to the switch 2122 * @sw: Switch to configure 2123 * 2124 * Call this function before the switch is added to the system. It will 2125 * upload configuration to the switch and makes it available for the 2126 * connection manager to use. Can be called to the switch again after 2127 * resume from low power states to re-initialize it. 2128 * 2129 * Return: %0 in case of success and negative errno in case of failure 2130 */ 2131 int tb_switch_configure(struct tb_switch *sw) 2132 { 2133 struct tb *tb = sw->tb; 2134 u64 route; 2135 int ret; 2136 2137 route = tb_route(sw); 2138 2139 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2140 sw->config.enabled ? "restoring" : "initializing", route, 2141 tb_route_length(route), sw->config.upstream_port_number); 2142 2143 sw->config.enabled = 1; 2144 2145 if (tb_switch_is_usb4(sw)) { 2146 /* 2147 * For USB4 devices, we need to program the CM version 2148 * accordingly so that it knows to expose all the 2149 * additional capabilities. 2150 */ 2151 sw->config.cmuv = USB4_VERSION_1_0; 2152 2153 /* Enumerate the switch */ 2154 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2155 ROUTER_CS_1, 4); 2156 if (ret) 2157 return ret; 2158 2159 ret = usb4_switch_setup(sw); 2160 } else { 2161 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2162 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2163 sw->config.vendor_id); 2164 2165 if (!sw->cap_plug_events) { 2166 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2167 return -ENODEV; 2168 } 2169 2170 /* Enumerate the switch */ 2171 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2172 ROUTER_CS_1, 3); 2173 } 2174 if (ret) 2175 return ret; 2176 2177 return tb_plug_events_active(sw, true); 2178 } 2179 2180 static int tb_switch_set_uuid(struct tb_switch *sw) 2181 { 2182 bool uid = false; 2183 u32 uuid[4]; 2184 int ret; 2185 2186 if (sw->uuid) 2187 return 0; 2188 2189 if (tb_switch_is_usb4(sw)) { 2190 ret = usb4_switch_read_uid(sw, &sw->uid); 2191 if (ret) 2192 return ret; 2193 uid = true; 2194 } else { 2195 /* 2196 * The newer controllers include fused UUID as part of 2197 * link controller specific registers 2198 */ 2199 ret = tb_lc_read_uuid(sw, uuid); 2200 if (ret) { 2201 if (ret != -EINVAL) 2202 return ret; 2203 uid = true; 2204 } 2205 } 2206 2207 if (uid) { 2208 /* 2209 * ICM generates UUID based on UID and fills the upper 2210 * two words with ones. This is not strictly following 2211 * UUID format but we want to be compatible with it so 2212 * we do the same here. 2213 */ 2214 uuid[0] = sw->uid & 0xffffffff; 2215 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2216 uuid[2] = 0xffffffff; 2217 uuid[3] = 0xffffffff; 2218 } 2219 2220 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2221 if (!sw->uuid) 2222 return -ENOMEM; 2223 return 0; 2224 } 2225 2226 static int tb_switch_add_dma_port(struct tb_switch *sw) 2227 { 2228 u32 status; 2229 int ret; 2230 2231 switch (sw->generation) { 2232 case 2: 2233 /* Only root switch can be upgraded */ 2234 if (tb_route(sw)) 2235 return 0; 2236 2237 fallthrough; 2238 case 3: 2239 case 4: 2240 ret = tb_switch_set_uuid(sw); 2241 if (ret) 2242 return ret; 2243 break; 2244 2245 default: 2246 /* 2247 * DMA port is the only thing available when the switch 2248 * is in safe mode. 2249 */ 2250 if (!sw->safe_mode) 2251 return 0; 2252 break; 2253 } 2254 2255 if (sw->no_nvm_upgrade) 2256 return 0; 2257 2258 if (tb_switch_is_usb4(sw)) { 2259 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2260 if (ret) 2261 return ret; 2262 2263 if (status) { 2264 tb_sw_info(sw, "switch flash authentication failed\n"); 2265 nvm_set_auth_status(sw, status); 2266 } 2267 2268 return 0; 2269 } 2270 2271 /* Root switch DMA port requires running firmware */ 2272 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2273 return 0; 2274 2275 sw->dma_port = dma_port_alloc(sw); 2276 if (!sw->dma_port) 2277 return 0; 2278 2279 /* 2280 * If there is status already set then authentication failed 2281 * when the dma_port_flash_update_auth() returned. Power cycling 2282 * is not needed (it was done already) so only thing we do here 2283 * is to unblock runtime PM of the root port. 2284 */ 2285 nvm_get_auth_status(sw, &status); 2286 if (status) { 2287 if (!tb_route(sw)) 2288 nvm_authenticate_complete_dma_port(sw); 2289 return 0; 2290 } 2291 2292 /* 2293 * Check status of the previous flash authentication. If there 2294 * is one we need to power cycle the switch in any case to make 2295 * it functional again. 2296 */ 2297 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2298 if (ret <= 0) 2299 return ret; 2300 2301 /* Now we can allow root port to suspend again */ 2302 if (!tb_route(sw)) 2303 nvm_authenticate_complete_dma_port(sw); 2304 2305 if (status) { 2306 tb_sw_info(sw, "switch flash authentication failed\n"); 2307 nvm_set_auth_status(sw, status); 2308 } 2309 2310 tb_sw_info(sw, "power cycling the switch now\n"); 2311 dma_port_power_cycle(sw->dma_port); 2312 2313 /* 2314 * We return error here which causes the switch adding failure. 2315 * It should appear back after power cycle is complete. 2316 */ 2317 return -ESHUTDOWN; 2318 } 2319 2320 static void tb_switch_default_link_ports(struct tb_switch *sw) 2321 { 2322 int i; 2323 2324 for (i = 1; i <= sw->config.max_port_number; i += 2) { 2325 struct tb_port *port = &sw->ports[i]; 2326 struct tb_port *subordinate; 2327 2328 if (!tb_port_is_null(port)) 2329 continue; 2330 2331 /* Check for the subordinate port */ 2332 if (i == sw->config.max_port_number || 2333 !tb_port_is_null(&sw->ports[i + 1])) 2334 continue; 2335 2336 /* Link them if not already done so (by DROM) */ 2337 subordinate = &sw->ports[i + 1]; 2338 if (!port->dual_link_port && !subordinate->dual_link_port) { 2339 port->link_nr = 0; 2340 port->dual_link_port = subordinate; 2341 subordinate->link_nr = 1; 2342 subordinate->dual_link_port = port; 2343 2344 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2345 port->port, subordinate->port); 2346 } 2347 } 2348 } 2349 2350 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2351 { 2352 const struct tb_port *up = tb_upstream_port(sw); 2353 2354 if (!up->dual_link_port || !up->dual_link_port->remote) 2355 return false; 2356 2357 if (tb_switch_is_usb4(sw)) 2358 return usb4_switch_lane_bonding_possible(sw); 2359 return tb_lc_lane_bonding_possible(sw); 2360 } 2361 2362 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2363 { 2364 struct tb_port *up; 2365 bool change = false; 2366 int ret; 2367 2368 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2369 return 0; 2370 2371 up = tb_upstream_port(sw); 2372 2373 ret = tb_port_get_link_speed(up); 2374 if (ret < 0) 2375 return ret; 2376 if (sw->link_speed != ret) 2377 change = true; 2378 sw->link_speed = ret; 2379 2380 ret = tb_port_get_link_width(up); 2381 if (ret < 0) 2382 return ret; 2383 if (sw->link_width != ret) 2384 change = true; 2385 sw->link_width = ret; 2386 2387 /* Notify userspace that there is possible link attribute change */ 2388 if (device_is_registered(&sw->dev) && change) 2389 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2390 2391 return 0; 2392 } 2393 2394 /** 2395 * tb_switch_lane_bonding_enable() - Enable lane bonding 2396 * @sw: Switch to enable lane bonding 2397 * 2398 * Connection manager can call this function to enable lane bonding of a 2399 * switch. If conditions are correct and both switches support the feature, 2400 * lanes are bonded. It is safe to call this to any switch. 2401 */ 2402 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2403 { 2404 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2405 struct tb_port *up, *down; 2406 u64 route = tb_route(sw); 2407 int ret; 2408 2409 if (!route) 2410 return 0; 2411 2412 if (!tb_switch_lane_bonding_possible(sw)) 2413 return 0; 2414 2415 up = tb_upstream_port(sw); 2416 down = tb_port_at(route, parent); 2417 2418 if (!tb_port_is_width_supported(up, 2) || 2419 !tb_port_is_width_supported(down, 2)) 2420 return 0; 2421 2422 ret = tb_port_lane_bonding_enable(up); 2423 if (ret) { 2424 tb_port_warn(up, "failed to enable lane bonding\n"); 2425 return ret; 2426 } 2427 2428 ret = tb_port_lane_bonding_enable(down); 2429 if (ret) { 2430 tb_port_warn(down, "failed to enable lane bonding\n"); 2431 tb_port_lane_bonding_disable(up); 2432 return ret; 2433 } 2434 2435 tb_switch_update_link_attributes(sw); 2436 2437 tb_sw_dbg(sw, "lane bonding enabled\n"); 2438 return ret; 2439 } 2440 2441 /** 2442 * tb_switch_lane_bonding_disable() - Disable lane bonding 2443 * @sw: Switch whose lane bonding to disable 2444 * 2445 * Disables lane bonding between @sw and parent. This can be called even 2446 * if lanes were not bonded originally. 2447 */ 2448 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2449 { 2450 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2451 struct tb_port *up, *down; 2452 2453 if (!tb_route(sw)) 2454 return; 2455 2456 up = tb_upstream_port(sw); 2457 if (!up->bonded) 2458 return; 2459 2460 down = tb_port_at(tb_route(sw), parent); 2461 2462 tb_port_lane_bonding_disable(up); 2463 tb_port_lane_bonding_disable(down); 2464 2465 tb_switch_update_link_attributes(sw); 2466 tb_sw_dbg(sw, "lane bonding disabled\n"); 2467 } 2468 2469 /** 2470 * tb_switch_configure_link() - Set link configured 2471 * @sw: Switch whose link is configured 2472 * 2473 * Sets the link upstream from @sw configured (from both ends) so that 2474 * it will not be disconnected when the domain exits sleep. Can be 2475 * called for any switch. 2476 * 2477 * It is recommended that this is called after lane bonding is enabled. 2478 * 2479 * Returns %0 on success and negative errno in case of error. 2480 */ 2481 int tb_switch_configure_link(struct tb_switch *sw) 2482 { 2483 struct tb_port *up, *down; 2484 int ret; 2485 2486 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2487 return 0; 2488 2489 up = tb_upstream_port(sw); 2490 if (tb_switch_is_usb4(up->sw)) 2491 ret = usb4_port_configure(up); 2492 else 2493 ret = tb_lc_configure_port(up); 2494 if (ret) 2495 return ret; 2496 2497 down = up->remote; 2498 if (tb_switch_is_usb4(down->sw)) 2499 return usb4_port_configure(down); 2500 return tb_lc_configure_port(down); 2501 } 2502 2503 /** 2504 * tb_switch_unconfigure_link() - Unconfigure link 2505 * @sw: Switch whose link is unconfigured 2506 * 2507 * Sets the link unconfigured so the @sw will be disconnected if the 2508 * domain exists sleep. 2509 */ 2510 void tb_switch_unconfigure_link(struct tb_switch *sw) 2511 { 2512 struct tb_port *up, *down; 2513 2514 if (sw->is_unplugged) 2515 return; 2516 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2517 return; 2518 2519 up = tb_upstream_port(sw); 2520 if (tb_switch_is_usb4(up->sw)) 2521 usb4_port_unconfigure(up); 2522 else 2523 tb_lc_unconfigure_port(up); 2524 2525 down = up->remote; 2526 if (tb_switch_is_usb4(down->sw)) 2527 usb4_port_unconfigure(down); 2528 else 2529 tb_lc_unconfigure_port(down); 2530 } 2531 2532 /** 2533 * tb_switch_add() - Add a switch to the domain 2534 * @sw: Switch to add 2535 * 2536 * This is the last step in adding switch to the domain. It will read 2537 * identification information from DROM and initializes ports so that 2538 * they can be used to connect other switches. The switch will be 2539 * exposed to the userspace when this function successfully returns. To 2540 * remove and release the switch, call tb_switch_remove(). 2541 * 2542 * Return: %0 in case of success and negative errno in case of failure 2543 */ 2544 int tb_switch_add(struct tb_switch *sw) 2545 { 2546 int i, ret; 2547 2548 /* 2549 * Initialize DMA control port now before we read DROM. Recent 2550 * host controllers have more complete DROM on NVM that includes 2551 * vendor and model identification strings which we then expose 2552 * to the userspace. NVM can be accessed through DMA 2553 * configuration based mailbox. 2554 */ 2555 ret = tb_switch_add_dma_port(sw); 2556 if (ret) { 2557 dev_err(&sw->dev, "failed to add DMA port\n"); 2558 return ret; 2559 } 2560 2561 if (!sw->safe_mode) { 2562 /* read drom */ 2563 ret = tb_drom_read(sw); 2564 if (ret) { 2565 dev_err(&sw->dev, "reading DROM failed\n"); 2566 return ret; 2567 } 2568 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2569 2570 tb_check_quirks(sw); 2571 2572 ret = tb_switch_set_uuid(sw); 2573 if (ret) { 2574 dev_err(&sw->dev, "failed to set UUID\n"); 2575 return ret; 2576 } 2577 2578 for (i = 0; i <= sw->config.max_port_number; i++) { 2579 if (sw->ports[i].disabled) { 2580 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2581 continue; 2582 } 2583 ret = tb_init_port(&sw->ports[i]); 2584 if (ret) { 2585 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2586 return ret; 2587 } 2588 } 2589 2590 tb_switch_default_link_ports(sw); 2591 2592 ret = tb_switch_update_link_attributes(sw); 2593 if (ret) 2594 return ret; 2595 2596 ret = tb_switch_tmu_init(sw); 2597 if (ret) 2598 return ret; 2599 } 2600 2601 ret = device_add(&sw->dev); 2602 if (ret) { 2603 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2604 return ret; 2605 } 2606 2607 if (tb_route(sw)) { 2608 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2609 sw->vendor, sw->device); 2610 if (sw->vendor_name && sw->device_name) 2611 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2612 sw->device_name); 2613 } 2614 2615 ret = tb_switch_nvm_add(sw); 2616 if (ret) { 2617 dev_err(&sw->dev, "failed to add NVM devices\n"); 2618 device_del(&sw->dev); 2619 return ret; 2620 } 2621 2622 /* 2623 * Thunderbolt routers do not generate wakeups themselves but 2624 * they forward wakeups from tunneled protocols, so enable it 2625 * here. 2626 */ 2627 device_init_wakeup(&sw->dev, true); 2628 2629 pm_runtime_set_active(&sw->dev); 2630 if (sw->rpm) { 2631 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2632 pm_runtime_use_autosuspend(&sw->dev); 2633 pm_runtime_mark_last_busy(&sw->dev); 2634 pm_runtime_enable(&sw->dev); 2635 pm_request_autosuspend(&sw->dev); 2636 } 2637 2638 tb_switch_debugfs_init(sw); 2639 return 0; 2640 } 2641 2642 /** 2643 * tb_switch_remove() - Remove and release a switch 2644 * @sw: Switch to remove 2645 * 2646 * This will remove the switch from the domain and release it after last 2647 * reference count drops to zero. If there are switches connected below 2648 * this switch, they will be removed as well. 2649 */ 2650 void tb_switch_remove(struct tb_switch *sw) 2651 { 2652 struct tb_port *port; 2653 2654 tb_switch_debugfs_remove(sw); 2655 2656 if (sw->rpm) { 2657 pm_runtime_get_sync(&sw->dev); 2658 pm_runtime_disable(&sw->dev); 2659 } 2660 2661 /* port 0 is the switch itself and never has a remote */ 2662 tb_switch_for_each_port(sw, port) { 2663 if (tb_port_has_remote(port)) { 2664 tb_switch_remove(port->remote->sw); 2665 port->remote = NULL; 2666 } else if (port->xdomain) { 2667 tb_xdomain_remove(port->xdomain); 2668 port->xdomain = NULL; 2669 } 2670 2671 /* Remove any downstream retimers */ 2672 tb_retimer_remove_all(port); 2673 } 2674 2675 if (!sw->is_unplugged) 2676 tb_plug_events_active(sw, false); 2677 2678 tb_switch_nvm_remove(sw); 2679 2680 if (tb_route(sw)) 2681 dev_info(&sw->dev, "device disconnected\n"); 2682 device_unregister(&sw->dev); 2683 } 2684 2685 /** 2686 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2687 * @sw: Router to mark unplugged 2688 */ 2689 void tb_sw_set_unplugged(struct tb_switch *sw) 2690 { 2691 struct tb_port *port; 2692 2693 if (sw == sw->tb->root_switch) { 2694 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2695 return; 2696 } 2697 if (sw->is_unplugged) { 2698 tb_sw_WARN(sw, "is_unplugged already set\n"); 2699 return; 2700 } 2701 sw->is_unplugged = true; 2702 tb_switch_for_each_port(sw, port) { 2703 if (tb_port_has_remote(port)) 2704 tb_sw_set_unplugged(port->remote->sw); 2705 else if (port->xdomain) 2706 port->xdomain->is_unplugged = true; 2707 } 2708 } 2709 2710 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) 2711 { 2712 if (flags) 2713 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 2714 else 2715 tb_sw_dbg(sw, "disabling wakeup\n"); 2716 2717 if (tb_switch_is_usb4(sw)) 2718 return usb4_switch_set_wake(sw, flags); 2719 return tb_lc_set_wake(sw, flags); 2720 } 2721 2722 int tb_switch_resume(struct tb_switch *sw) 2723 { 2724 struct tb_port *port; 2725 int err; 2726 2727 tb_sw_dbg(sw, "resuming switch\n"); 2728 2729 /* 2730 * Check for UID of the connected switches except for root 2731 * switch which we assume cannot be removed. 2732 */ 2733 if (tb_route(sw)) { 2734 u64 uid; 2735 2736 /* 2737 * Check first that we can still read the switch config 2738 * space. It may be that there is now another domain 2739 * connected. 2740 */ 2741 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2742 if (err < 0) { 2743 tb_sw_info(sw, "switch not present anymore\n"); 2744 return err; 2745 } 2746 2747 if (tb_switch_is_usb4(sw)) 2748 err = usb4_switch_read_uid(sw, &uid); 2749 else 2750 err = tb_drom_read_uid_only(sw, &uid); 2751 if (err) { 2752 tb_sw_warn(sw, "uid read failed\n"); 2753 return err; 2754 } 2755 if (sw->uid != uid) { 2756 tb_sw_info(sw, 2757 "changed while suspended (uid %#llx -> %#llx)\n", 2758 sw->uid, uid); 2759 return -ENODEV; 2760 } 2761 } 2762 2763 err = tb_switch_configure(sw); 2764 if (err) 2765 return err; 2766 2767 /* Disable wakes */ 2768 tb_switch_set_wake(sw, 0); 2769 2770 err = tb_switch_tmu_init(sw); 2771 if (err) 2772 return err; 2773 2774 /* check for surviving downstream switches */ 2775 tb_switch_for_each_port(sw, port) { 2776 if (!tb_port_has_remote(port) && !port->xdomain) { 2777 /* 2778 * For disconnected downstream lane adapters 2779 * start lane initialization now so we detect 2780 * future connects. 2781 */ 2782 if (!tb_is_upstream_port(port) && tb_port_is_null(port)) 2783 tb_port_start_lane_initialization(port); 2784 continue; 2785 } else if (port->xdomain) { 2786 /* 2787 * Start lane initialization for XDomain so the 2788 * link gets re-established. 2789 */ 2790 tb_port_start_lane_initialization(port); 2791 } 2792 2793 if (tb_wait_for_port(port, true) <= 0) { 2794 tb_port_warn(port, 2795 "lost during suspend, disconnecting\n"); 2796 if (tb_port_has_remote(port)) 2797 tb_sw_set_unplugged(port->remote->sw); 2798 else if (port->xdomain) 2799 port->xdomain->is_unplugged = true; 2800 } else if (tb_port_has_remote(port) || port->xdomain) { 2801 /* 2802 * Always unlock the port so the downstream 2803 * switch/domain is accessible. 2804 */ 2805 if (tb_port_unlock(port)) 2806 tb_port_warn(port, "failed to unlock port\n"); 2807 if (port->remote && tb_switch_resume(port->remote->sw)) { 2808 tb_port_warn(port, 2809 "lost during suspend, disconnecting\n"); 2810 tb_sw_set_unplugged(port->remote->sw); 2811 } 2812 } 2813 } 2814 return 0; 2815 } 2816 2817 /** 2818 * tb_switch_suspend() - Put a switch to sleep 2819 * @sw: Switch to suspend 2820 * @runtime: Is this runtime suspend or system sleep 2821 * 2822 * Suspends router and all its children. Enables wakes according to 2823 * value of @runtime and then sets sleep bit for the router. If @sw is 2824 * host router the domain is ready to go to sleep once this function 2825 * returns. 2826 */ 2827 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 2828 { 2829 unsigned int flags = 0; 2830 struct tb_port *port; 2831 int err; 2832 2833 tb_sw_dbg(sw, "suspending switch\n"); 2834 2835 err = tb_plug_events_active(sw, false); 2836 if (err) 2837 return; 2838 2839 tb_switch_for_each_port(sw, port) { 2840 if (tb_port_has_remote(port)) 2841 tb_switch_suspend(port->remote->sw, runtime); 2842 } 2843 2844 if (runtime) { 2845 /* Trigger wake when something is plugged in/out */ 2846 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 2847 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 2848 } else if (device_may_wakeup(&sw->dev)) { 2849 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 2850 } 2851 2852 tb_switch_set_wake(sw, flags); 2853 2854 if (tb_switch_is_usb4(sw)) 2855 usb4_switch_set_sleep(sw); 2856 else 2857 tb_lc_set_sleep(sw); 2858 } 2859 2860 /** 2861 * tb_switch_query_dp_resource() - Query availability of DP resource 2862 * @sw: Switch whose DP resource is queried 2863 * @in: DP IN port 2864 * 2865 * Queries availability of DP resource for DP tunneling using switch 2866 * specific means. Returns %true if resource is available. 2867 */ 2868 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 2869 { 2870 if (tb_switch_is_usb4(sw)) 2871 return usb4_switch_query_dp_resource(sw, in); 2872 return tb_lc_dp_sink_query(sw, in); 2873 } 2874 2875 /** 2876 * tb_switch_alloc_dp_resource() - Allocate available DP resource 2877 * @sw: Switch whose DP resource is allocated 2878 * @in: DP IN port 2879 * 2880 * Allocates DP resource for DP tunneling. The resource must be 2881 * available for this to succeed (see tb_switch_query_dp_resource()). 2882 * Returns %0 in success and negative errno otherwise. 2883 */ 2884 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2885 { 2886 if (tb_switch_is_usb4(sw)) 2887 return usb4_switch_alloc_dp_resource(sw, in); 2888 return tb_lc_dp_sink_alloc(sw, in); 2889 } 2890 2891 /** 2892 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 2893 * @sw: Switch whose DP resource is de-allocated 2894 * @in: DP IN port 2895 * 2896 * De-allocates DP resource that was previously allocated for DP 2897 * tunneling. 2898 */ 2899 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2900 { 2901 int ret; 2902 2903 if (tb_switch_is_usb4(sw)) 2904 ret = usb4_switch_dealloc_dp_resource(sw, in); 2905 else 2906 ret = tb_lc_dp_sink_dealloc(sw, in); 2907 2908 if (ret) 2909 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 2910 in->port); 2911 } 2912 2913 struct tb_sw_lookup { 2914 struct tb *tb; 2915 u8 link; 2916 u8 depth; 2917 const uuid_t *uuid; 2918 u64 route; 2919 }; 2920 2921 static int tb_switch_match(struct device *dev, const void *data) 2922 { 2923 struct tb_switch *sw = tb_to_switch(dev); 2924 const struct tb_sw_lookup *lookup = data; 2925 2926 if (!sw) 2927 return 0; 2928 if (sw->tb != lookup->tb) 2929 return 0; 2930 2931 if (lookup->uuid) 2932 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 2933 2934 if (lookup->route) { 2935 return sw->config.route_lo == lower_32_bits(lookup->route) && 2936 sw->config.route_hi == upper_32_bits(lookup->route); 2937 } 2938 2939 /* Root switch is matched only by depth */ 2940 if (!lookup->depth) 2941 return !sw->depth; 2942 2943 return sw->link == lookup->link && sw->depth == lookup->depth; 2944 } 2945 2946 /** 2947 * tb_switch_find_by_link_depth() - Find switch by link and depth 2948 * @tb: Domain the switch belongs 2949 * @link: Link number the switch is connected 2950 * @depth: Depth of the switch in link 2951 * 2952 * Returned switch has reference count increased so the caller needs to 2953 * call tb_switch_put() when done with the switch. 2954 */ 2955 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 2956 { 2957 struct tb_sw_lookup lookup; 2958 struct device *dev; 2959 2960 memset(&lookup, 0, sizeof(lookup)); 2961 lookup.tb = tb; 2962 lookup.link = link; 2963 lookup.depth = depth; 2964 2965 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2966 if (dev) 2967 return tb_to_switch(dev); 2968 2969 return NULL; 2970 } 2971 2972 /** 2973 * tb_switch_find_by_uuid() - Find switch by UUID 2974 * @tb: Domain the switch belongs 2975 * @uuid: UUID to look for 2976 * 2977 * Returned switch has reference count increased so the caller needs to 2978 * call tb_switch_put() when done with the switch. 2979 */ 2980 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 2981 { 2982 struct tb_sw_lookup lookup; 2983 struct device *dev; 2984 2985 memset(&lookup, 0, sizeof(lookup)); 2986 lookup.tb = tb; 2987 lookup.uuid = uuid; 2988 2989 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2990 if (dev) 2991 return tb_to_switch(dev); 2992 2993 return NULL; 2994 } 2995 2996 /** 2997 * tb_switch_find_by_route() - Find switch by route string 2998 * @tb: Domain the switch belongs 2999 * @route: Route string to look for 3000 * 3001 * Returned switch has reference count increased so the caller needs to 3002 * call tb_switch_put() when done with the switch. 3003 */ 3004 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 3005 { 3006 struct tb_sw_lookup lookup; 3007 struct device *dev; 3008 3009 if (!route) 3010 return tb_switch_get(tb->root_switch); 3011 3012 memset(&lookup, 0, sizeof(lookup)); 3013 lookup.tb = tb; 3014 lookup.route = route; 3015 3016 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3017 if (dev) 3018 return tb_to_switch(dev); 3019 3020 return NULL; 3021 } 3022 3023 /** 3024 * tb_switch_find_port() - return the first port of @type on @sw or NULL 3025 * @sw: Switch to find the port from 3026 * @type: Port type to look for 3027 */ 3028 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3029 enum tb_port_type type) 3030 { 3031 struct tb_port *port; 3032 3033 tb_switch_for_each_port(sw, port) { 3034 if (port->config.type == type) 3035 return port; 3036 } 3037 3038 return NULL; 3039 } 3040