1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 18 #include "tb.h" 19 20 /* Switch NVM support */ 21 22 #define NVM_CSS 0x10 23 24 struct nvm_auth_status { 25 struct list_head list; 26 uuid_t uuid; 27 u32 status; 28 }; 29 30 static bool clx_enabled = true; 31 module_param_named(clx, clx_enabled, bool, 0444); 32 MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); 33 34 /* 35 * Hold NVM authentication failure status per switch This information 36 * needs to stay around even when the switch gets power cycled so we 37 * keep it separately. 38 */ 39 static LIST_HEAD(nvm_auth_status_cache); 40 static DEFINE_MUTEX(nvm_auth_status_lock); 41 42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 43 { 44 struct nvm_auth_status *st; 45 46 list_for_each_entry(st, &nvm_auth_status_cache, list) { 47 if (uuid_equal(&st->uuid, sw->uuid)) 48 return st; 49 } 50 51 return NULL; 52 } 53 54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 55 { 56 struct nvm_auth_status *st; 57 58 mutex_lock(&nvm_auth_status_lock); 59 st = __nvm_get_auth_status(sw); 60 mutex_unlock(&nvm_auth_status_lock); 61 62 *status = st ? st->status : 0; 63 } 64 65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 66 { 67 struct nvm_auth_status *st; 68 69 if (WARN_ON(!sw->uuid)) 70 return; 71 72 mutex_lock(&nvm_auth_status_lock); 73 st = __nvm_get_auth_status(sw); 74 75 if (!st) { 76 st = kzalloc(sizeof(*st), GFP_KERNEL); 77 if (!st) 78 goto unlock; 79 80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 81 INIT_LIST_HEAD(&st->list); 82 list_add_tail(&st->list, &nvm_auth_status_cache); 83 } 84 85 st->status = status; 86 unlock: 87 mutex_unlock(&nvm_auth_status_lock); 88 } 89 90 static void nvm_clear_auth_status(const struct tb_switch *sw) 91 { 92 struct nvm_auth_status *st; 93 94 mutex_lock(&nvm_auth_status_lock); 95 st = __nvm_get_auth_status(sw); 96 if (st) { 97 list_del(&st->list); 98 kfree(st); 99 } 100 mutex_unlock(&nvm_auth_status_lock); 101 } 102 103 static int nvm_validate_and_write(struct tb_switch *sw) 104 { 105 unsigned int image_size, hdr_size; 106 const u8 *buf = sw->nvm->buf; 107 u16 ds_size; 108 int ret; 109 110 if (!buf) 111 return -EINVAL; 112 113 image_size = sw->nvm->buf_data_size; 114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 115 return -EINVAL; 116 117 /* 118 * FARB pointer must point inside the image and must at least 119 * contain parts of the digital section we will be reading here. 120 */ 121 hdr_size = (*(u32 *)buf) & 0xffffff; 122 if (hdr_size + NVM_DEVID + 2 >= image_size) 123 return -EINVAL; 124 125 /* Digital section start should be aligned to 4k page */ 126 if (!IS_ALIGNED(hdr_size, SZ_4K)) 127 return -EINVAL; 128 129 /* 130 * Read digital section size and check that it also fits inside 131 * the image. 132 */ 133 ds_size = *(u16 *)(buf + hdr_size); 134 if (ds_size >= image_size) 135 return -EINVAL; 136 137 if (!sw->safe_mode) { 138 u16 device_id; 139 140 /* 141 * Make sure the device ID in the image matches the one 142 * we read from the switch config space. 143 */ 144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 145 if (device_id != sw->config.device_id) 146 return -EINVAL; 147 148 if (sw->generation < 3) { 149 /* Write CSS headers first */ 150 ret = dma_port_flash_write(sw->dma_port, 151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 152 DMA_PORT_CSS_MAX_SIZE); 153 if (ret) 154 return ret; 155 } 156 157 /* Skip headers in the image */ 158 buf += hdr_size; 159 image_size -= hdr_size; 160 } 161 162 if (tb_switch_is_usb4(sw)) 163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 164 else 165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 166 if (!ret) 167 sw->nvm->flushed = true; 168 return ret; 169 } 170 171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 172 { 173 int ret = 0; 174 175 /* 176 * Root switch NVM upgrade requires that we disconnect the 177 * existing paths first (in case it is not in safe mode 178 * already). 179 */ 180 if (!sw->safe_mode) { 181 u32 status; 182 183 ret = tb_domain_disconnect_all_paths(sw->tb); 184 if (ret) 185 return ret; 186 /* 187 * The host controller goes away pretty soon after this if 188 * everything goes well so getting timeout is expected. 189 */ 190 ret = dma_port_flash_update_auth(sw->dma_port); 191 if (!ret || ret == -ETIMEDOUT) 192 return 0; 193 194 /* 195 * Any error from update auth operation requires power 196 * cycling of the host router. 197 */ 198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 200 nvm_set_auth_status(sw, status); 201 } 202 203 /* 204 * From safe mode we can get out by just power cycling the 205 * switch. 206 */ 207 dma_port_power_cycle(sw->dma_port); 208 return ret; 209 } 210 211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 212 { 213 int ret, retries = 10; 214 215 ret = dma_port_flash_update_auth(sw->dma_port); 216 switch (ret) { 217 case 0: 218 case -ETIMEDOUT: 219 case -EACCES: 220 case -EINVAL: 221 /* Power cycle is required */ 222 break; 223 default: 224 return ret; 225 } 226 227 /* 228 * Poll here for the authentication status. It takes some time 229 * for the device to respond (we get timeout for a while). Once 230 * we get response the device needs to be power cycled in order 231 * to the new NVM to be taken into use. 232 */ 233 do { 234 u32 status; 235 236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 237 if (ret < 0 && ret != -ETIMEDOUT) 238 return ret; 239 if (ret > 0) { 240 if (status) { 241 tb_sw_warn(sw, "failed to authenticate NVM\n"); 242 nvm_set_auth_status(sw, status); 243 } 244 245 tb_sw_info(sw, "power cycling the switch now\n"); 246 dma_port_power_cycle(sw->dma_port); 247 return 0; 248 } 249 250 msleep(500); 251 } while (--retries); 252 253 return -ETIMEDOUT; 254 } 255 256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 257 { 258 struct pci_dev *root_port; 259 260 /* 261 * During host router NVM upgrade we should not allow root port to 262 * go into D3cold because some root ports cannot trigger PME 263 * itself. To be on the safe side keep the root port in D0 during 264 * the whole upgrade process. 265 */ 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 267 if (root_port) 268 pm_runtime_get_noresume(&root_port->dev); 269 } 270 271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 272 { 273 struct pci_dev *root_port; 274 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 276 if (root_port) 277 pm_runtime_put(&root_port->dev); 278 } 279 280 static inline bool nvm_readable(struct tb_switch *sw) 281 { 282 if (tb_switch_is_usb4(sw)) { 283 /* 284 * USB4 devices must support NVM operations but it is 285 * optional for hosts. Therefore we query the NVM sector 286 * size here and if it is supported assume NVM 287 * operations are implemented. 288 */ 289 return usb4_switch_nvm_sector_size(sw) > 0; 290 } 291 292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 293 return !!sw->dma_port; 294 } 295 296 static inline bool nvm_upgradeable(struct tb_switch *sw) 297 { 298 if (sw->no_nvm_upgrade) 299 return false; 300 return nvm_readable(sw); 301 } 302 303 static inline int nvm_read(struct tb_switch *sw, unsigned int address, 304 void *buf, size_t size) 305 { 306 if (tb_switch_is_usb4(sw)) 307 return usb4_switch_nvm_read(sw, address, buf, size); 308 return dma_port_flash_read(sw->dma_port, address, buf, size); 309 } 310 311 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) 312 { 313 int ret; 314 315 if (tb_switch_is_usb4(sw)) { 316 if (auth_only) { 317 ret = usb4_switch_nvm_set_offset(sw, 0); 318 if (ret) 319 return ret; 320 } 321 sw->nvm->authenticating = true; 322 return usb4_switch_nvm_authenticate(sw); 323 } else if (auth_only) { 324 return -EOPNOTSUPP; 325 } 326 327 sw->nvm->authenticating = true; 328 if (!tb_route(sw)) { 329 nvm_authenticate_start_dma_port(sw); 330 ret = nvm_authenticate_host_dma_port(sw); 331 } else { 332 ret = nvm_authenticate_device_dma_port(sw); 333 } 334 335 return ret; 336 } 337 338 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 339 size_t bytes) 340 { 341 struct tb_nvm *nvm = priv; 342 struct tb_switch *sw = tb_to_switch(nvm->dev); 343 int ret; 344 345 pm_runtime_get_sync(&sw->dev); 346 347 if (!mutex_trylock(&sw->tb->lock)) { 348 ret = restart_syscall(); 349 goto out; 350 } 351 352 ret = nvm_read(sw, offset, val, bytes); 353 mutex_unlock(&sw->tb->lock); 354 355 out: 356 pm_runtime_mark_last_busy(&sw->dev); 357 pm_runtime_put_autosuspend(&sw->dev); 358 359 return ret; 360 } 361 362 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 363 size_t bytes) 364 { 365 struct tb_nvm *nvm = priv; 366 struct tb_switch *sw = tb_to_switch(nvm->dev); 367 int ret; 368 369 if (!mutex_trylock(&sw->tb->lock)) 370 return restart_syscall(); 371 372 /* 373 * Since writing the NVM image might require some special steps, 374 * for example when CSS headers are written, we cache the image 375 * locally here and handle the special cases when the user asks 376 * us to authenticate the image. 377 */ 378 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 379 mutex_unlock(&sw->tb->lock); 380 381 return ret; 382 } 383 384 static int tb_switch_nvm_add(struct tb_switch *sw) 385 { 386 struct tb_nvm *nvm; 387 u32 val; 388 int ret; 389 390 if (!nvm_readable(sw)) 391 return 0; 392 393 /* 394 * The NVM format of non-Intel hardware is not known so 395 * currently restrict NVM upgrade for Intel hardware. We may 396 * relax this in the future when we learn other NVM formats. 397 */ 398 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL && 399 sw->config.vendor_id != 0x8087) { 400 dev_info(&sw->dev, 401 "NVM format of vendor %#x is not known, disabling NVM upgrade\n", 402 sw->config.vendor_id); 403 return 0; 404 } 405 406 nvm = tb_nvm_alloc(&sw->dev); 407 if (IS_ERR(nvm)) 408 return PTR_ERR(nvm); 409 410 /* 411 * If the switch is in safe-mode the only accessible portion of 412 * the NVM is the non-active one where userspace is expected to 413 * write new functional NVM. 414 */ 415 if (!sw->safe_mode) { 416 u32 nvm_size, hdr_size; 417 418 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); 419 if (ret) 420 goto err_nvm; 421 422 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 423 nvm_size = (SZ_1M << (val & 7)) / 8; 424 nvm_size = (nvm_size - hdr_size) / 2; 425 426 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); 427 if (ret) 428 goto err_nvm; 429 430 nvm->major = val >> 16; 431 nvm->minor = val >> 8; 432 433 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read); 434 if (ret) 435 goto err_nvm; 436 } 437 438 if (!sw->no_nvm_upgrade) { 439 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, 440 tb_switch_nvm_write); 441 if (ret) 442 goto err_nvm; 443 } 444 445 sw->nvm = nvm; 446 return 0; 447 448 err_nvm: 449 tb_nvm_free(nvm); 450 return ret; 451 } 452 453 static void tb_switch_nvm_remove(struct tb_switch *sw) 454 { 455 struct tb_nvm *nvm; 456 457 nvm = sw->nvm; 458 sw->nvm = NULL; 459 460 if (!nvm) 461 return; 462 463 /* Remove authentication status in case the switch is unplugged */ 464 if (!nvm->authenticating) 465 nvm_clear_auth_status(sw); 466 467 tb_nvm_free(nvm); 468 } 469 470 /* port utility functions */ 471 472 static const char *tb_port_type(const struct tb_regs_port_header *port) 473 { 474 switch (port->type >> 16) { 475 case 0: 476 switch ((u8) port->type) { 477 case 0: 478 return "Inactive"; 479 case 1: 480 return "Port"; 481 case 2: 482 return "NHI"; 483 default: 484 return "unknown"; 485 } 486 case 0x2: 487 return "Ethernet"; 488 case 0x8: 489 return "SATA"; 490 case 0xe: 491 return "DP/HDMI"; 492 case 0x10: 493 return "PCIe"; 494 case 0x20: 495 return "USB"; 496 default: 497 return "unknown"; 498 } 499 } 500 501 static void tb_dump_port(struct tb *tb, const struct tb_port *port) 502 { 503 const struct tb_regs_port_header *regs = &port->config; 504 505 tb_dbg(tb, 506 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 507 regs->port_number, regs->vendor_id, regs->device_id, 508 regs->revision, regs->thunderbolt_version, tb_port_type(regs), 509 regs->type); 510 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 511 regs->max_in_hop_id, regs->max_out_hop_id); 512 tb_dbg(tb, " Max counters: %d\n", regs->max_counters); 513 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); 514 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, 515 port->ctl_credits); 516 } 517 518 /** 519 * tb_port_state() - get connectedness state of a port 520 * @port: the port to check 521 * 522 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 523 * 524 * Return: Returns an enum tb_port_state on success or an error code on failure. 525 */ 526 int tb_port_state(struct tb_port *port) 527 { 528 struct tb_cap_phy phy; 529 int res; 530 if (port->cap_phy == 0) { 531 tb_port_WARN(port, "does not have a PHY\n"); 532 return -EINVAL; 533 } 534 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 535 if (res) 536 return res; 537 return phy.state; 538 } 539 540 /** 541 * tb_wait_for_port() - wait for a port to become ready 542 * @port: Port to wait 543 * @wait_if_unplugged: Wait also when port is unplugged 544 * 545 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 546 * wait_if_unplugged is set then we also wait if the port is in state 547 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 548 * switch resume). Otherwise we only wait if a device is registered but the link 549 * has not yet been established. 550 * 551 * Return: Returns an error code on failure. Returns 0 if the port is not 552 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 553 * if the port is connected and in state TB_PORT_UP. 554 */ 555 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 556 { 557 int retries = 10; 558 int state; 559 if (!port->cap_phy) { 560 tb_port_WARN(port, "does not have PHY\n"); 561 return -EINVAL; 562 } 563 if (tb_is_upstream_port(port)) { 564 tb_port_WARN(port, "is the upstream port\n"); 565 return -EINVAL; 566 } 567 568 while (retries--) { 569 state = tb_port_state(port); 570 if (state < 0) 571 return state; 572 if (state == TB_PORT_DISABLED) { 573 tb_port_dbg(port, "is disabled (state: 0)\n"); 574 return 0; 575 } 576 if (state == TB_PORT_UNPLUGGED) { 577 if (wait_if_unplugged) { 578 /* used during resume */ 579 tb_port_dbg(port, 580 "is unplugged (state: 7), retrying...\n"); 581 msleep(100); 582 continue; 583 } 584 tb_port_dbg(port, "is unplugged (state: 7)\n"); 585 return 0; 586 } 587 if (state == TB_PORT_UP) { 588 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 589 return 1; 590 } 591 592 /* 593 * After plug-in the state is TB_PORT_CONNECTING. Give it some 594 * time. 595 */ 596 tb_port_dbg(port, 597 "is connected, link is not up (state: %d), retrying...\n", 598 state); 599 msleep(100); 600 } 601 tb_port_warn(port, 602 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 603 return 0; 604 } 605 606 /** 607 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 608 * @port: Port to add/remove NFC credits 609 * @credits: Credits to add/remove 610 * 611 * Change the number of NFC credits allocated to @port by @credits. To remove 612 * NFC credits pass a negative amount of credits. 613 * 614 * Return: Returns 0 on success or an error code on failure. 615 */ 616 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 617 { 618 u32 nfc_credits; 619 620 if (credits == 0 || port->sw->is_unplugged) 621 return 0; 622 623 /* 624 * USB4 restricts programming NFC buffers to lane adapters only 625 * so skip other ports. 626 */ 627 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 628 return 0; 629 630 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 631 if (credits < 0) 632 credits = max_t(int, -nfc_credits, credits); 633 634 nfc_credits += credits; 635 636 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 637 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 638 639 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 640 port->config.nfc_credits |= nfc_credits; 641 642 return tb_port_write(port, &port->config.nfc_credits, 643 TB_CFG_PORT, ADP_CS_4, 1); 644 } 645 646 /** 647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 648 * @port: Port whose counters to clear 649 * @counter: Counter index to clear 650 * 651 * Return: Returns 0 on success or an error code on failure. 652 */ 653 int tb_port_clear_counter(struct tb_port *port, int counter) 654 { 655 u32 zero[3] = { 0, 0, 0 }; 656 tb_port_dbg(port, "clearing counter %d\n", counter); 657 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 658 } 659 660 /** 661 * tb_port_unlock() - Unlock downstream port 662 * @port: Port to unlock 663 * 664 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 665 * downstream router accessible for CM. 666 */ 667 int tb_port_unlock(struct tb_port *port) 668 { 669 if (tb_switch_is_icm(port->sw)) 670 return 0; 671 if (!tb_port_is_null(port)) 672 return -EINVAL; 673 if (tb_switch_is_usb4(port->sw)) 674 return usb4_port_unlock(port); 675 return 0; 676 } 677 678 static int __tb_port_enable(struct tb_port *port, bool enable) 679 { 680 int ret; 681 u32 phy; 682 683 if (!tb_port_is_null(port)) 684 return -EINVAL; 685 686 ret = tb_port_read(port, &phy, TB_CFG_PORT, 687 port->cap_phy + LANE_ADP_CS_1, 1); 688 if (ret) 689 return ret; 690 691 if (enable) 692 phy &= ~LANE_ADP_CS_1_LD; 693 else 694 phy |= LANE_ADP_CS_1_LD; 695 696 return tb_port_write(port, &phy, TB_CFG_PORT, 697 port->cap_phy + LANE_ADP_CS_1, 1); 698 } 699 700 /** 701 * tb_port_enable() - Enable lane adapter 702 * @port: Port to enable (can be %NULL) 703 * 704 * This is used for lane 0 and 1 adapters to enable it. 705 */ 706 int tb_port_enable(struct tb_port *port) 707 { 708 return __tb_port_enable(port, true); 709 } 710 711 /** 712 * tb_port_disable() - Disable lane adapter 713 * @port: Port to disable (can be %NULL) 714 * 715 * This is used for lane 0 and 1 adapters to disable it. 716 */ 717 int tb_port_disable(struct tb_port *port) 718 { 719 return __tb_port_enable(port, false); 720 } 721 722 /* 723 * tb_init_port() - initialize a port 724 * 725 * This is a helper method for tb_switch_alloc. Does not check or initialize 726 * any downstream switches. 727 * 728 * Return: Returns 0 on success or an error code on failure. 729 */ 730 static int tb_init_port(struct tb_port *port) 731 { 732 int res; 733 int cap; 734 735 INIT_LIST_HEAD(&port->list); 736 737 /* Control adapter does not have configuration space */ 738 if (!port->port) 739 return 0; 740 741 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 742 if (res) { 743 if (res == -ENODEV) { 744 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 745 port->port); 746 port->disabled = true; 747 return 0; 748 } 749 return res; 750 } 751 752 /* Port 0 is the switch itself and has no PHY. */ 753 if (port->config.type == TB_TYPE_PORT) { 754 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 755 756 if (cap > 0) 757 port->cap_phy = cap; 758 else 759 tb_port_WARN(port, "non switch port without a PHY\n"); 760 761 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 762 if (cap > 0) 763 port->cap_usb4 = cap; 764 765 /* 766 * USB4 ports the buffers allocated for the control path 767 * can be read from the path config space. Legacy 768 * devices we use hard-coded value. 769 */ 770 if (tb_switch_is_usb4(port->sw)) { 771 struct tb_regs_hop hop; 772 773 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) 774 port->ctl_credits = hop.initial_credits; 775 } 776 if (!port->ctl_credits) 777 port->ctl_credits = 2; 778 779 } else { 780 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 781 if (cap > 0) 782 port->cap_adap = cap; 783 } 784 785 port->total_credits = 786 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 787 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 788 789 tb_dump_port(port->sw->tb, port); 790 return 0; 791 } 792 793 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 794 int max_hopid) 795 { 796 int port_max_hopid; 797 struct ida *ida; 798 799 if (in) { 800 port_max_hopid = port->config.max_in_hop_id; 801 ida = &port->in_hopids; 802 } else { 803 port_max_hopid = port->config.max_out_hop_id; 804 ida = &port->out_hopids; 805 } 806 807 /* 808 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 809 * reserved. 810 */ 811 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 812 min_hopid = TB_PATH_MIN_HOPID; 813 814 if (max_hopid < 0 || max_hopid > port_max_hopid) 815 max_hopid = port_max_hopid; 816 817 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 818 } 819 820 /** 821 * tb_port_alloc_in_hopid() - Allocate input HopID from port 822 * @port: Port to allocate HopID for 823 * @min_hopid: Minimum acceptable input HopID 824 * @max_hopid: Maximum acceptable input HopID 825 * 826 * Return: HopID between @min_hopid and @max_hopid or negative errno in 827 * case of error. 828 */ 829 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 830 { 831 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 832 } 833 834 /** 835 * tb_port_alloc_out_hopid() - Allocate output HopID from port 836 * @port: Port to allocate HopID for 837 * @min_hopid: Minimum acceptable output HopID 838 * @max_hopid: Maximum acceptable output HopID 839 * 840 * Return: HopID between @min_hopid and @max_hopid or negative errno in 841 * case of error. 842 */ 843 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 844 { 845 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 846 } 847 848 /** 849 * tb_port_release_in_hopid() - Release allocated input HopID from port 850 * @port: Port whose HopID to release 851 * @hopid: HopID to release 852 */ 853 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 854 { 855 ida_simple_remove(&port->in_hopids, hopid); 856 } 857 858 /** 859 * tb_port_release_out_hopid() - Release allocated output HopID from port 860 * @port: Port whose HopID to release 861 * @hopid: HopID to release 862 */ 863 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 864 { 865 ida_simple_remove(&port->out_hopids, hopid); 866 } 867 868 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 869 const struct tb_switch *sw) 870 { 871 u64 mask = (1ULL << parent->config.depth * 8) - 1; 872 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 873 } 874 875 /** 876 * tb_next_port_on_path() - Return next port for given port on a path 877 * @start: Start port of the walk 878 * @end: End port of the walk 879 * @prev: Previous port (%NULL if this is the first) 880 * 881 * This function can be used to walk from one port to another if they 882 * are connected through zero or more switches. If the @prev is dual 883 * link port, the function follows that link and returns another end on 884 * that same link. 885 * 886 * If the @end port has been reached, return %NULL. 887 * 888 * Domain tb->lock must be held when this function is called. 889 */ 890 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 891 struct tb_port *prev) 892 { 893 struct tb_port *next; 894 895 if (!prev) 896 return start; 897 898 if (prev->sw == end->sw) { 899 if (prev == end) 900 return NULL; 901 return end; 902 } 903 904 if (tb_switch_is_reachable(prev->sw, end->sw)) { 905 next = tb_port_at(tb_route(end->sw), prev->sw); 906 /* Walk down the topology if next == prev */ 907 if (prev->remote && 908 (next == prev || next->dual_link_port == prev)) 909 next = prev->remote; 910 } else { 911 if (tb_is_upstream_port(prev)) { 912 next = prev->remote; 913 } else { 914 next = tb_upstream_port(prev->sw); 915 /* 916 * Keep the same link if prev and next are both 917 * dual link ports. 918 */ 919 if (next->dual_link_port && 920 next->link_nr != prev->link_nr) { 921 next = next->dual_link_port; 922 } 923 } 924 } 925 926 return next != prev ? next : NULL; 927 } 928 929 /** 930 * tb_port_get_link_speed() - Get current link speed 931 * @port: Port to check (USB4 or CIO) 932 * 933 * Returns link speed in Gb/s or negative errno in case of failure. 934 */ 935 int tb_port_get_link_speed(struct tb_port *port) 936 { 937 u32 val, speed; 938 int ret; 939 940 if (!port->cap_phy) 941 return -EINVAL; 942 943 ret = tb_port_read(port, &val, TB_CFG_PORT, 944 port->cap_phy + LANE_ADP_CS_1, 1); 945 if (ret) 946 return ret; 947 948 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 949 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 950 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 951 } 952 953 /** 954 * tb_port_get_link_width() - Get current link width 955 * @port: Port to check (USB4 or CIO) 956 * 957 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) 958 * or negative errno in case of failure. 959 */ 960 int tb_port_get_link_width(struct tb_port *port) 961 { 962 u32 val; 963 int ret; 964 965 if (!port->cap_phy) 966 return -EINVAL; 967 968 ret = tb_port_read(port, &val, TB_CFG_PORT, 969 port->cap_phy + LANE_ADP_CS_1, 1); 970 if (ret) 971 return ret; 972 973 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 974 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 975 } 976 977 static bool tb_port_is_width_supported(struct tb_port *port, int width) 978 { 979 u32 phy, widths; 980 int ret; 981 982 if (!port->cap_phy) 983 return false; 984 985 ret = tb_port_read(port, &phy, TB_CFG_PORT, 986 port->cap_phy + LANE_ADP_CS_0, 1); 987 if (ret) 988 return false; 989 990 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 991 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 992 993 return !!(widths & width); 994 } 995 996 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 997 { 998 u32 val; 999 int ret; 1000 1001 if (!port->cap_phy) 1002 return -EINVAL; 1003 1004 ret = tb_port_read(port, &val, TB_CFG_PORT, 1005 port->cap_phy + LANE_ADP_CS_1, 1); 1006 if (ret) 1007 return ret; 1008 1009 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 1010 switch (width) { 1011 case 1: 1012 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 1013 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1014 break; 1015 case 2: 1016 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 1017 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1018 break; 1019 default: 1020 return -EINVAL; 1021 } 1022 1023 val |= LANE_ADP_CS_1_LB; 1024 1025 return tb_port_write(port, &val, TB_CFG_PORT, 1026 port->cap_phy + LANE_ADP_CS_1, 1); 1027 } 1028 1029 /** 1030 * tb_port_lane_bonding_enable() - Enable bonding on port 1031 * @port: port to enable 1032 * 1033 * Enable bonding by setting the link width of the port and the other 1034 * port in case of dual link port. Does not wait for the link to 1035 * actually reach the bonded state so caller needs to call 1036 * tb_port_wait_for_link_width() before enabling any paths through the 1037 * link to make sure the link is in expected state. 1038 * 1039 * Return: %0 in case of success and negative errno in case of error 1040 */ 1041 int tb_port_lane_bonding_enable(struct tb_port *port) 1042 { 1043 int ret; 1044 1045 /* 1046 * Enable lane bonding for both links if not already enabled by 1047 * for example the boot firmware. 1048 */ 1049 ret = tb_port_get_link_width(port); 1050 if (ret == 1) { 1051 ret = tb_port_set_link_width(port, 2); 1052 if (ret) 1053 return ret; 1054 } 1055 1056 ret = tb_port_get_link_width(port->dual_link_port); 1057 if (ret == 1) { 1058 ret = tb_port_set_link_width(port->dual_link_port, 2); 1059 if (ret) { 1060 tb_port_set_link_width(port, 1); 1061 return ret; 1062 } 1063 } 1064 1065 port->bonded = true; 1066 port->dual_link_port->bonded = true; 1067 1068 return 0; 1069 } 1070 1071 /** 1072 * tb_port_lane_bonding_disable() - Disable bonding on port 1073 * @port: port to disable 1074 * 1075 * Disable bonding by setting the link width of the port and the 1076 * other port in case of dual link port. 1077 * 1078 */ 1079 void tb_port_lane_bonding_disable(struct tb_port *port) 1080 { 1081 port->dual_link_port->bonded = false; 1082 port->bonded = false; 1083 1084 tb_port_set_link_width(port->dual_link_port, 1); 1085 tb_port_set_link_width(port, 1); 1086 } 1087 1088 /** 1089 * tb_port_wait_for_link_width() - Wait until link reaches specific width 1090 * @port: Port to wait for 1091 * @width: Expected link width (%1 or %2) 1092 * @timeout_msec: Timeout in ms how long to wait 1093 * 1094 * Should be used after both ends of the link have been bonded (or 1095 * bonding has been disabled) to wait until the link actually reaches 1096 * the expected state. Returns %-ETIMEDOUT if the @width was not reached 1097 * within the given timeout, %0 if it did. 1098 */ 1099 int tb_port_wait_for_link_width(struct tb_port *port, int width, 1100 int timeout_msec) 1101 { 1102 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1103 int ret; 1104 1105 do { 1106 ret = tb_port_get_link_width(port); 1107 if (ret < 0) 1108 return ret; 1109 else if (ret == width) 1110 return 0; 1111 1112 usleep_range(1000, 2000); 1113 } while (ktime_before(ktime_get(), timeout)); 1114 1115 return -ETIMEDOUT; 1116 } 1117 1118 static int tb_port_do_update_credits(struct tb_port *port) 1119 { 1120 u32 nfc_credits; 1121 int ret; 1122 1123 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); 1124 if (ret) 1125 return ret; 1126 1127 if (nfc_credits != port->config.nfc_credits) { 1128 u32 total; 1129 1130 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 1131 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 1132 1133 tb_port_dbg(port, "total credits changed %u -> %u\n", 1134 port->total_credits, total); 1135 1136 port->config.nfc_credits = nfc_credits; 1137 port->total_credits = total; 1138 } 1139 1140 return 0; 1141 } 1142 1143 /** 1144 * tb_port_update_credits() - Re-read port total credits 1145 * @port: Port to update 1146 * 1147 * After the link is bonded (or bonding was disabled) the port total 1148 * credits may change, so this function needs to be called to re-read 1149 * the credits. Updates also the second lane adapter. 1150 */ 1151 int tb_port_update_credits(struct tb_port *port) 1152 { 1153 int ret; 1154 1155 ret = tb_port_do_update_credits(port); 1156 if (ret) 1157 return ret; 1158 return tb_port_do_update_credits(port->dual_link_port); 1159 } 1160 1161 static int tb_port_start_lane_initialization(struct tb_port *port) 1162 { 1163 int ret; 1164 1165 if (tb_switch_is_usb4(port->sw)) 1166 return 0; 1167 1168 ret = tb_lc_start_lane_initialization(port); 1169 return ret == -EINVAL ? 0 : ret; 1170 } 1171 1172 /* 1173 * Returns true if the port had something (router, XDomain) connected 1174 * before suspend. 1175 */ 1176 static bool tb_port_resume(struct tb_port *port) 1177 { 1178 bool has_remote = tb_port_has_remote(port); 1179 1180 if (port->usb4) { 1181 usb4_port_device_resume(port->usb4); 1182 } else if (!has_remote) { 1183 /* 1184 * For disconnected downstream lane adapters start lane 1185 * initialization now so we detect future connects. 1186 * 1187 * For XDomain start the lane initialzation now so the 1188 * link gets re-established. 1189 * 1190 * This is only needed for non-USB4 ports. 1191 */ 1192 if (!tb_is_upstream_port(port) || port->xdomain) 1193 tb_port_start_lane_initialization(port); 1194 } 1195 1196 return has_remote || port->xdomain; 1197 } 1198 1199 /** 1200 * tb_port_is_enabled() - Is the adapter port enabled 1201 * @port: Port to check 1202 */ 1203 bool tb_port_is_enabled(struct tb_port *port) 1204 { 1205 switch (port->config.type) { 1206 case TB_TYPE_PCIE_UP: 1207 case TB_TYPE_PCIE_DOWN: 1208 return tb_pci_port_is_enabled(port); 1209 1210 case TB_TYPE_DP_HDMI_IN: 1211 case TB_TYPE_DP_HDMI_OUT: 1212 return tb_dp_port_is_enabled(port); 1213 1214 case TB_TYPE_USB3_UP: 1215 case TB_TYPE_USB3_DOWN: 1216 return tb_usb3_port_is_enabled(port); 1217 1218 default: 1219 return false; 1220 } 1221 } 1222 1223 /** 1224 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1225 * @port: USB3 adapter port to check 1226 */ 1227 bool tb_usb3_port_is_enabled(struct tb_port *port) 1228 { 1229 u32 data; 1230 1231 if (tb_port_read(port, &data, TB_CFG_PORT, 1232 port->cap_adap + ADP_USB3_CS_0, 1)) 1233 return false; 1234 1235 return !!(data & ADP_USB3_CS_0_PE); 1236 } 1237 1238 /** 1239 * tb_usb3_port_enable() - Enable USB3 adapter port 1240 * @port: USB3 adapter port to enable 1241 * @enable: Enable/disable the USB3 adapter 1242 */ 1243 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1244 { 1245 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1246 : ADP_USB3_CS_0_V; 1247 1248 if (!port->cap_adap) 1249 return -ENXIO; 1250 return tb_port_write(port, &word, TB_CFG_PORT, 1251 port->cap_adap + ADP_USB3_CS_0, 1); 1252 } 1253 1254 /** 1255 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1256 * @port: PCIe port to check 1257 */ 1258 bool tb_pci_port_is_enabled(struct tb_port *port) 1259 { 1260 u32 data; 1261 1262 if (tb_port_read(port, &data, TB_CFG_PORT, 1263 port->cap_adap + ADP_PCIE_CS_0, 1)) 1264 return false; 1265 1266 return !!(data & ADP_PCIE_CS_0_PE); 1267 } 1268 1269 /** 1270 * tb_pci_port_enable() - Enable PCIe adapter port 1271 * @port: PCIe port to enable 1272 * @enable: Enable/disable the PCIe adapter 1273 */ 1274 int tb_pci_port_enable(struct tb_port *port, bool enable) 1275 { 1276 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1277 if (!port->cap_adap) 1278 return -ENXIO; 1279 return tb_port_write(port, &word, TB_CFG_PORT, 1280 port->cap_adap + ADP_PCIE_CS_0, 1); 1281 } 1282 1283 /** 1284 * tb_dp_port_hpd_is_active() - Is HPD already active 1285 * @port: DP out port to check 1286 * 1287 * Checks if the DP OUT adapter port has HDP bit already set. 1288 */ 1289 int tb_dp_port_hpd_is_active(struct tb_port *port) 1290 { 1291 u32 data; 1292 int ret; 1293 1294 ret = tb_port_read(port, &data, TB_CFG_PORT, 1295 port->cap_adap + ADP_DP_CS_2, 1); 1296 if (ret) 1297 return ret; 1298 1299 return !!(data & ADP_DP_CS_2_HDP); 1300 } 1301 1302 /** 1303 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1304 * @port: Port to clear HPD 1305 * 1306 * If the DP IN port has HDP set, this function can be used to clear it. 1307 */ 1308 int tb_dp_port_hpd_clear(struct tb_port *port) 1309 { 1310 u32 data; 1311 int ret; 1312 1313 ret = tb_port_read(port, &data, TB_CFG_PORT, 1314 port->cap_adap + ADP_DP_CS_3, 1); 1315 if (ret) 1316 return ret; 1317 1318 data |= ADP_DP_CS_3_HDPC; 1319 return tb_port_write(port, &data, TB_CFG_PORT, 1320 port->cap_adap + ADP_DP_CS_3, 1); 1321 } 1322 1323 /** 1324 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1325 * @port: DP IN/OUT port to set hops 1326 * @video: Video Hop ID 1327 * @aux_tx: AUX TX Hop ID 1328 * @aux_rx: AUX RX Hop ID 1329 * 1330 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 1331 * router DP adapters too but does not program the values as the fields 1332 * are read-only. 1333 */ 1334 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1335 unsigned int aux_tx, unsigned int aux_rx) 1336 { 1337 u32 data[2]; 1338 int ret; 1339 1340 if (tb_switch_is_usb4(port->sw)) 1341 return 0; 1342 1343 ret = tb_port_read(port, data, TB_CFG_PORT, 1344 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1345 if (ret) 1346 return ret; 1347 1348 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1349 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1350 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1351 1352 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1353 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1354 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1355 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1356 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1357 1358 return tb_port_write(port, data, TB_CFG_PORT, 1359 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1360 } 1361 1362 /** 1363 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1364 * @port: DP adapter port to check 1365 */ 1366 bool tb_dp_port_is_enabled(struct tb_port *port) 1367 { 1368 u32 data[2]; 1369 1370 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1371 ARRAY_SIZE(data))) 1372 return false; 1373 1374 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1375 } 1376 1377 /** 1378 * tb_dp_port_enable() - Enables/disables DP paths of a port 1379 * @port: DP IN/OUT port 1380 * @enable: Enable/disable DP path 1381 * 1382 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1383 * calling this function. 1384 */ 1385 int tb_dp_port_enable(struct tb_port *port, bool enable) 1386 { 1387 u32 data[2]; 1388 int ret; 1389 1390 ret = tb_port_read(port, data, TB_CFG_PORT, 1391 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1392 if (ret) 1393 return ret; 1394 1395 if (enable) 1396 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1397 else 1398 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1399 1400 return tb_port_write(port, data, TB_CFG_PORT, 1401 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1402 } 1403 1404 /* switch utility functions */ 1405 1406 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1407 { 1408 switch (sw->generation) { 1409 case 1: 1410 return "Thunderbolt 1"; 1411 case 2: 1412 return "Thunderbolt 2"; 1413 case 3: 1414 return "Thunderbolt 3"; 1415 case 4: 1416 return "USB4"; 1417 default: 1418 return "Unknown"; 1419 } 1420 } 1421 1422 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1423 { 1424 const struct tb_regs_switch_header *regs = &sw->config; 1425 1426 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1427 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1428 regs->revision, regs->thunderbolt_version); 1429 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1430 tb_dbg(tb, " Config:\n"); 1431 tb_dbg(tb, 1432 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1433 regs->upstream_port_number, regs->depth, 1434 (((u64) regs->route_hi) << 32) | regs->route_lo, 1435 regs->enabled, regs->plug_events_delay); 1436 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1437 regs->__unknown1, regs->__unknown4); 1438 } 1439 1440 /** 1441 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET 1442 * @sw: Switch to reset 1443 * 1444 * Return: Returns 0 on success or an error code on failure. 1445 */ 1446 int tb_switch_reset(struct tb_switch *sw) 1447 { 1448 struct tb_cfg_result res; 1449 1450 if (sw->generation > 1) 1451 return 0; 1452 1453 tb_sw_dbg(sw, "resetting switch\n"); 1454 1455 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1456 TB_CFG_SWITCH, 2, 2); 1457 if (res.err) 1458 return res.err; 1459 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); 1460 if (res.err > 0) 1461 return -EIO; 1462 return res.err; 1463 } 1464 1465 /** 1466 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset 1467 * @sw: Router to read the offset value from 1468 * @offset: Offset in the router config space to read from 1469 * @bit: Bit mask in the offset to wait for 1470 * @value: Value of the bits to wait for 1471 * @timeout_msec: Timeout in ms how long to wait 1472 * 1473 * Wait till the specified bits in specified offset reach specified value. 1474 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached 1475 * within the given timeout or a negative errno in case of failure. 1476 */ 1477 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, 1478 u32 value, int timeout_msec) 1479 { 1480 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1481 1482 do { 1483 u32 val; 1484 int ret; 1485 1486 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 1487 if (ret) 1488 return ret; 1489 1490 if ((val & bit) == value) 1491 return 0; 1492 1493 usleep_range(50, 100); 1494 } while (ktime_before(ktime_get(), timeout)); 1495 1496 return -ETIMEDOUT; 1497 } 1498 1499 /* 1500 * tb_plug_events_active() - enable/disable plug events on a switch 1501 * 1502 * Also configures a sane plug_events_delay of 255ms. 1503 * 1504 * Return: Returns 0 on success or an error code on failure. 1505 */ 1506 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1507 { 1508 u32 data; 1509 int res; 1510 1511 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1512 return 0; 1513 1514 sw->config.plug_events_delay = 0xff; 1515 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1516 if (res) 1517 return res; 1518 1519 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1520 if (res) 1521 return res; 1522 1523 if (active) { 1524 data = data & 0xFFFFFF83; 1525 switch (sw->config.device_id) { 1526 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1527 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1528 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1529 break; 1530 default: 1531 /* 1532 * Skip Alpine Ridge, it needs to have vendor 1533 * specific USB hotplug event enabled for the 1534 * internal xHCI to work. 1535 */ 1536 if (!tb_switch_is_alpine_ridge(sw)) 1537 data |= TB_PLUG_EVENTS_USB_DISABLE; 1538 } 1539 } else { 1540 data = data | 0x7c; 1541 } 1542 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1543 sw->cap_plug_events + 1, 1); 1544 } 1545 1546 static ssize_t authorized_show(struct device *dev, 1547 struct device_attribute *attr, 1548 char *buf) 1549 { 1550 struct tb_switch *sw = tb_to_switch(dev); 1551 1552 return sprintf(buf, "%u\n", sw->authorized); 1553 } 1554 1555 static int disapprove_switch(struct device *dev, void *not_used) 1556 { 1557 char *envp[] = { "AUTHORIZED=0", NULL }; 1558 struct tb_switch *sw; 1559 1560 sw = tb_to_switch(dev); 1561 if (sw && sw->authorized) { 1562 int ret; 1563 1564 /* First children */ 1565 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1566 if (ret) 1567 return ret; 1568 1569 ret = tb_domain_disapprove_switch(sw->tb, sw); 1570 if (ret) 1571 return ret; 1572 1573 sw->authorized = 0; 1574 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1575 } 1576 1577 return 0; 1578 } 1579 1580 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1581 { 1582 char envp_string[13]; 1583 int ret = -EINVAL; 1584 char *envp[] = { envp_string, NULL }; 1585 1586 if (!mutex_trylock(&sw->tb->lock)) 1587 return restart_syscall(); 1588 1589 if (!!sw->authorized == !!val) 1590 goto unlock; 1591 1592 switch (val) { 1593 /* Disapprove switch */ 1594 case 0: 1595 if (tb_route(sw)) { 1596 ret = disapprove_switch(&sw->dev, NULL); 1597 goto unlock; 1598 } 1599 break; 1600 1601 /* Approve switch */ 1602 case 1: 1603 if (sw->key) 1604 ret = tb_domain_approve_switch_key(sw->tb, sw); 1605 else 1606 ret = tb_domain_approve_switch(sw->tb, sw); 1607 break; 1608 1609 /* Challenge switch */ 1610 case 2: 1611 if (sw->key) 1612 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1613 break; 1614 1615 default: 1616 break; 1617 } 1618 1619 if (!ret) { 1620 sw->authorized = val; 1621 /* 1622 * Notify status change to the userspace, informing the new 1623 * value of /sys/bus/thunderbolt/devices/.../authorized. 1624 */ 1625 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); 1626 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1627 } 1628 1629 unlock: 1630 mutex_unlock(&sw->tb->lock); 1631 return ret; 1632 } 1633 1634 static ssize_t authorized_store(struct device *dev, 1635 struct device_attribute *attr, 1636 const char *buf, size_t count) 1637 { 1638 struct tb_switch *sw = tb_to_switch(dev); 1639 unsigned int val; 1640 ssize_t ret; 1641 1642 ret = kstrtouint(buf, 0, &val); 1643 if (ret) 1644 return ret; 1645 if (val > 2) 1646 return -EINVAL; 1647 1648 pm_runtime_get_sync(&sw->dev); 1649 ret = tb_switch_set_authorized(sw, val); 1650 pm_runtime_mark_last_busy(&sw->dev); 1651 pm_runtime_put_autosuspend(&sw->dev); 1652 1653 return ret ? ret : count; 1654 } 1655 static DEVICE_ATTR_RW(authorized); 1656 1657 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1658 char *buf) 1659 { 1660 struct tb_switch *sw = tb_to_switch(dev); 1661 1662 return sprintf(buf, "%u\n", sw->boot); 1663 } 1664 static DEVICE_ATTR_RO(boot); 1665 1666 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1667 char *buf) 1668 { 1669 struct tb_switch *sw = tb_to_switch(dev); 1670 1671 return sprintf(buf, "%#x\n", sw->device); 1672 } 1673 static DEVICE_ATTR_RO(device); 1674 1675 static ssize_t 1676 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1677 { 1678 struct tb_switch *sw = tb_to_switch(dev); 1679 1680 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1681 } 1682 static DEVICE_ATTR_RO(device_name); 1683 1684 static ssize_t 1685 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1686 { 1687 struct tb_switch *sw = tb_to_switch(dev); 1688 1689 return sprintf(buf, "%u\n", sw->generation); 1690 } 1691 static DEVICE_ATTR_RO(generation); 1692 1693 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1694 char *buf) 1695 { 1696 struct tb_switch *sw = tb_to_switch(dev); 1697 ssize_t ret; 1698 1699 if (!mutex_trylock(&sw->tb->lock)) 1700 return restart_syscall(); 1701 1702 if (sw->key) 1703 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1704 else 1705 ret = sprintf(buf, "\n"); 1706 1707 mutex_unlock(&sw->tb->lock); 1708 return ret; 1709 } 1710 1711 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1712 const char *buf, size_t count) 1713 { 1714 struct tb_switch *sw = tb_to_switch(dev); 1715 u8 key[TB_SWITCH_KEY_SIZE]; 1716 ssize_t ret = count; 1717 bool clear = false; 1718 1719 if (!strcmp(buf, "\n")) 1720 clear = true; 1721 else if (hex2bin(key, buf, sizeof(key))) 1722 return -EINVAL; 1723 1724 if (!mutex_trylock(&sw->tb->lock)) 1725 return restart_syscall(); 1726 1727 if (sw->authorized) { 1728 ret = -EBUSY; 1729 } else { 1730 kfree(sw->key); 1731 if (clear) { 1732 sw->key = NULL; 1733 } else { 1734 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1735 if (!sw->key) 1736 ret = -ENOMEM; 1737 } 1738 } 1739 1740 mutex_unlock(&sw->tb->lock); 1741 return ret; 1742 } 1743 static DEVICE_ATTR(key, 0600, key_show, key_store); 1744 1745 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1746 char *buf) 1747 { 1748 struct tb_switch *sw = tb_to_switch(dev); 1749 1750 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1751 } 1752 1753 /* 1754 * Currently all lanes must run at the same speed but we expose here 1755 * both directions to allow possible asymmetric links in the future. 1756 */ 1757 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1758 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1759 1760 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1761 char *buf) 1762 { 1763 struct tb_switch *sw = tb_to_switch(dev); 1764 1765 return sprintf(buf, "%u\n", sw->link_width); 1766 } 1767 1768 /* 1769 * Currently link has same amount of lanes both directions (1 or 2) but 1770 * expose them separately to allow possible asymmetric links in the future. 1771 */ 1772 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1773 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1774 1775 static ssize_t nvm_authenticate_show(struct device *dev, 1776 struct device_attribute *attr, char *buf) 1777 { 1778 struct tb_switch *sw = tb_to_switch(dev); 1779 u32 status; 1780 1781 nvm_get_auth_status(sw, &status); 1782 return sprintf(buf, "%#x\n", status); 1783 } 1784 1785 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 1786 bool disconnect) 1787 { 1788 struct tb_switch *sw = tb_to_switch(dev); 1789 int val, ret; 1790 1791 pm_runtime_get_sync(&sw->dev); 1792 1793 if (!mutex_trylock(&sw->tb->lock)) { 1794 ret = restart_syscall(); 1795 goto exit_rpm; 1796 } 1797 1798 /* If NVMem devices are not yet added */ 1799 if (!sw->nvm) { 1800 ret = -EAGAIN; 1801 goto exit_unlock; 1802 } 1803 1804 ret = kstrtoint(buf, 10, &val); 1805 if (ret) 1806 goto exit_unlock; 1807 1808 /* Always clear the authentication status */ 1809 nvm_clear_auth_status(sw); 1810 1811 if (val > 0) { 1812 if (val == AUTHENTICATE_ONLY) { 1813 if (disconnect) 1814 ret = -EINVAL; 1815 else 1816 ret = nvm_authenticate(sw, true); 1817 } else { 1818 if (!sw->nvm->flushed) { 1819 if (!sw->nvm->buf) { 1820 ret = -EINVAL; 1821 goto exit_unlock; 1822 } 1823 1824 ret = nvm_validate_and_write(sw); 1825 if (ret || val == WRITE_ONLY) 1826 goto exit_unlock; 1827 } 1828 if (val == WRITE_AND_AUTHENTICATE) { 1829 if (disconnect) 1830 ret = tb_lc_force_power(sw); 1831 else 1832 ret = nvm_authenticate(sw, false); 1833 } 1834 } 1835 } 1836 1837 exit_unlock: 1838 mutex_unlock(&sw->tb->lock); 1839 exit_rpm: 1840 pm_runtime_mark_last_busy(&sw->dev); 1841 pm_runtime_put_autosuspend(&sw->dev); 1842 1843 return ret; 1844 } 1845 1846 static ssize_t nvm_authenticate_store(struct device *dev, 1847 struct device_attribute *attr, const char *buf, size_t count) 1848 { 1849 int ret = nvm_authenticate_sysfs(dev, buf, false); 1850 if (ret) 1851 return ret; 1852 return count; 1853 } 1854 static DEVICE_ATTR_RW(nvm_authenticate); 1855 1856 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 1857 struct device_attribute *attr, char *buf) 1858 { 1859 return nvm_authenticate_show(dev, attr, buf); 1860 } 1861 1862 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 1863 struct device_attribute *attr, const char *buf, size_t count) 1864 { 1865 int ret; 1866 1867 ret = nvm_authenticate_sysfs(dev, buf, true); 1868 return ret ? ret : count; 1869 } 1870 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 1871 1872 static ssize_t nvm_version_show(struct device *dev, 1873 struct device_attribute *attr, char *buf) 1874 { 1875 struct tb_switch *sw = tb_to_switch(dev); 1876 int ret; 1877 1878 if (!mutex_trylock(&sw->tb->lock)) 1879 return restart_syscall(); 1880 1881 if (sw->safe_mode) 1882 ret = -ENODATA; 1883 else if (!sw->nvm) 1884 ret = -EAGAIN; 1885 else 1886 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1887 1888 mutex_unlock(&sw->tb->lock); 1889 1890 return ret; 1891 } 1892 static DEVICE_ATTR_RO(nvm_version); 1893 1894 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1895 char *buf) 1896 { 1897 struct tb_switch *sw = tb_to_switch(dev); 1898 1899 return sprintf(buf, "%#x\n", sw->vendor); 1900 } 1901 static DEVICE_ATTR_RO(vendor); 1902 1903 static ssize_t 1904 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1905 { 1906 struct tb_switch *sw = tb_to_switch(dev); 1907 1908 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1909 } 1910 static DEVICE_ATTR_RO(vendor_name); 1911 1912 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1913 char *buf) 1914 { 1915 struct tb_switch *sw = tb_to_switch(dev); 1916 1917 return sprintf(buf, "%pUb\n", sw->uuid); 1918 } 1919 static DEVICE_ATTR_RO(unique_id); 1920 1921 static struct attribute *switch_attrs[] = { 1922 &dev_attr_authorized.attr, 1923 &dev_attr_boot.attr, 1924 &dev_attr_device.attr, 1925 &dev_attr_device_name.attr, 1926 &dev_attr_generation.attr, 1927 &dev_attr_key.attr, 1928 &dev_attr_nvm_authenticate.attr, 1929 &dev_attr_nvm_authenticate_on_disconnect.attr, 1930 &dev_attr_nvm_version.attr, 1931 &dev_attr_rx_speed.attr, 1932 &dev_attr_rx_lanes.attr, 1933 &dev_attr_tx_speed.attr, 1934 &dev_attr_tx_lanes.attr, 1935 &dev_attr_vendor.attr, 1936 &dev_attr_vendor_name.attr, 1937 &dev_attr_unique_id.attr, 1938 NULL, 1939 }; 1940 1941 static umode_t switch_attr_is_visible(struct kobject *kobj, 1942 struct attribute *attr, int n) 1943 { 1944 struct device *dev = kobj_to_dev(kobj); 1945 struct tb_switch *sw = tb_to_switch(dev); 1946 1947 if (attr == &dev_attr_authorized.attr) { 1948 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 1949 sw->tb->security_level == TB_SECURITY_DPONLY) 1950 return 0; 1951 } else if (attr == &dev_attr_device.attr) { 1952 if (!sw->device) 1953 return 0; 1954 } else if (attr == &dev_attr_device_name.attr) { 1955 if (!sw->device_name) 1956 return 0; 1957 } else if (attr == &dev_attr_vendor.attr) { 1958 if (!sw->vendor) 1959 return 0; 1960 } else if (attr == &dev_attr_vendor_name.attr) { 1961 if (!sw->vendor_name) 1962 return 0; 1963 } else if (attr == &dev_attr_key.attr) { 1964 if (tb_route(sw) && 1965 sw->tb->security_level == TB_SECURITY_SECURE && 1966 sw->security_level == TB_SECURITY_SECURE) 1967 return attr->mode; 1968 return 0; 1969 } else if (attr == &dev_attr_rx_speed.attr || 1970 attr == &dev_attr_rx_lanes.attr || 1971 attr == &dev_attr_tx_speed.attr || 1972 attr == &dev_attr_tx_lanes.attr) { 1973 if (tb_route(sw)) 1974 return attr->mode; 1975 return 0; 1976 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1977 if (nvm_upgradeable(sw)) 1978 return attr->mode; 1979 return 0; 1980 } else if (attr == &dev_attr_nvm_version.attr) { 1981 if (nvm_readable(sw)) 1982 return attr->mode; 1983 return 0; 1984 } else if (attr == &dev_attr_boot.attr) { 1985 if (tb_route(sw)) 1986 return attr->mode; 1987 return 0; 1988 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 1989 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 1990 return attr->mode; 1991 return 0; 1992 } 1993 1994 return sw->safe_mode ? 0 : attr->mode; 1995 } 1996 1997 static const struct attribute_group switch_group = { 1998 .is_visible = switch_attr_is_visible, 1999 .attrs = switch_attrs, 2000 }; 2001 2002 static const struct attribute_group *switch_groups[] = { 2003 &switch_group, 2004 NULL, 2005 }; 2006 2007 static void tb_switch_release(struct device *dev) 2008 { 2009 struct tb_switch *sw = tb_to_switch(dev); 2010 struct tb_port *port; 2011 2012 dma_port_free(sw->dma_port); 2013 2014 tb_switch_for_each_port(sw, port) { 2015 ida_destroy(&port->in_hopids); 2016 ida_destroy(&port->out_hopids); 2017 } 2018 2019 kfree(sw->uuid); 2020 kfree(sw->device_name); 2021 kfree(sw->vendor_name); 2022 kfree(sw->ports); 2023 kfree(sw->drom); 2024 kfree(sw->key); 2025 kfree(sw); 2026 } 2027 2028 static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env) 2029 { 2030 struct tb_switch *sw = tb_to_switch(dev); 2031 const char *type; 2032 2033 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) { 2034 if (add_uevent_var(env, "USB4_VERSION=1.0")) 2035 return -ENOMEM; 2036 } 2037 2038 if (!tb_route(sw)) { 2039 type = "host"; 2040 } else { 2041 const struct tb_port *port; 2042 bool hub = false; 2043 2044 /* Device is hub if it has any downstream ports */ 2045 tb_switch_for_each_port(sw, port) { 2046 if (!port->disabled && !tb_is_upstream_port(port) && 2047 tb_port_is_null(port)) { 2048 hub = true; 2049 break; 2050 } 2051 } 2052 2053 type = hub ? "hub" : "device"; 2054 } 2055 2056 if (add_uevent_var(env, "USB4_TYPE=%s", type)) 2057 return -ENOMEM; 2058 return 0; 2059 } 2060 2061 /* 2062 * Currently only need to provide the callbacks. Everything else is handled 2063 * in the connection manager. 2064 */ 2065 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 2066 { 2067 struct tb_switch *sw = tb_to_switch(dev); 2068 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2069 2070 if (cm_ops->runtime_suspend_switch) 2071 return cm_ops->runtime_suspend_switch(sw); 2072 2073 return 0; 2074 } 2075 2076 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 2077 { 2078 struct tb_switch *sw = tb_to_switch(dev); 2079 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2080 2081 if (cm_ops->runtime_resume_switch) 2082 return cm_ops->runtime_resume_switch(sw); 2083 return 0; 2084 } 2085 2086 static const struct dev_pm_ops tb_switch_pm_ops = { 2087 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 2088 NULL) 2089 }; 2090 2091 struct device_type tb_switch_type = { 2092 .name = "thunderbolt_device", 2093 .release = tb_switch_release, 2094 .uevent = tb_switch_uevent, 2095 .pm = &tb_switch_pm_ops, 2096 }; 2097 2098 static int tb_switch_get_generation(struct tb_switch *sw) 2099 { 2100 switch (sw->config.device_id) { 2101 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 2102 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 2103 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 2104 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 2105 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 2106 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 2107 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 2108 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 2109 return 1; 2110 2111 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 2112 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 2113 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 2114 return 2; 2115 2116 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 2117 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 2118 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 2119 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 2120 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 2121 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 2122 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 2123 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 2124 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2125 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2126 return 3; 2127 2128 default: 2129 if (tb_switch_is_usb4(sw)) 2130 return 4; 2131 2132 /* 2133 * For unknown switches assume generation to be 1 to be 2134 * on the safe side. 2135 */ 2136 tb_sw_warn(sw, "unsupported switch device id %#x\n", 2137 sw->config.device_id); 2138 return 1; 2139 } 2140 } 2141 2142 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 2143 { 2144 int max_depth; 2145 2146 if (tb_switch_is_usb4(sw) || 2147 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 2148 max_depth = USB4_SWITCH_MAX_DEPTH; 2149 else 2150 max_depth = TB_SWITCH_MAX_DEPTH; 2151 2152 return depth > max_depth; 2153 } 2154 2155 /** 2156 * tb_switch_alloc() - allocate a switch 2157 * @tb: Pointer to the owning domain 2158 * @parent: Parent device for this switch 2159 * @route: Route string for this switch 2160 * 2161 * Allocates and initializes a switch. Will not upload configuration to 2162 * the switch. For that you need to call tb_switch_configure() 2163 * separately. The returned switch should be released by calling 2164 * tb_switch_put(). 2165 * 2166 * Return: Pointer to the allocated switch or ERR_PTR() in case of 2167 * failure. 2168 */ 2169 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 2170 u64 route) 2171 { 2172 struct tb_switch *sw; 2173 int upstream_port; 2174 int i, ret, depth; 2175 2176 /* Unlock the downstream port so we can access the switch below */ 2177 if (route) { 2178 struct tb_switch *parent_sw = tb_to_switch(parent); 2179 struct tb_port *down; 2180 2181 down = tb_port_at(route, parent_sw); 2182 tb_port_unlock(down); 2183 } 2184 2185 depth = tb_route_length(route); 2186 2187 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 2188 if (upstream_port < 0) 2189 return ERR_PTR(upstream_port); 2190 2191 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2192 if (!sw) 2193 return ERR_PTR(-ENOMEM); 2194 2195 sw->tb = tb; 2196 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 2197 if (ret) 2198 goto err_free_sw_ports; 2199 2200 sw->generation = tb_switch_get_generation(sw); 2201 2202 tb_dbg(tb, "current switch config:\n"); 2203 tb_dump_switch(tb, sw); 2204 2205 /* configure switch */ 2206 sw->config.upstream_port_number = upstream_port; 2207 sw->config.depth = depth; 2208 sw->config.route_hi = upper_32_bits(route); 2209 sw->config.route_lo = lower_32_bits(route); 2210 sw->config.enabled = 0; 2211 2212 /* Make sure we do not exceed maximum topology limit */ 2213 if (tb_switch_exceeds_max_depth(sw, depth)) { 2214 ret = -EADDRNOTAVAIL; 2215 goto err_free_sw_ports; 2216 } 2217 2218 /* initialize ports */ 2219 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 2220 GFP_KERNEL); 2221 if (!sw->ports) { 2222 ret = -ENOMEM; 2223 goto err_free_sw_ports; 2224 } 2225 2226 for (i = 0; i <= sw->config.max_port_number; i++) { 2227 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2228 sw->ports[i].sw = sw; 2229 sw->ports[i].port = i; 2230 2231 /* Control port does not need HopID allocation */ 2232 if (i) { 2233 ida_init(&sw->ports[i].in_hopids); 2234 ida_init(&sw->ports[i].out_hopids); 2235 } 2236 } 2237 2238 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2239 if (ret > 0) 2240 sw->cap_plug_events = ret; 2241 2242 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); 2243 if (ret > 0) 2244 sw->cap_vsec_tmu = ret; 2245 2246 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2247 if (ret > 0) 2248 sw->cap_lc = ret; 2249 2250 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); 2251 if (ret > 0) 2252 sw->cap_lp = ret; 2253 2254 /* Root switch is always authorized */ 2255 if (!route) 2256 sw->authorized = true; 2257 2258 device_initialize(&sw->dev); 2259 sw->dev.parent = parent; 2260 sw->dev.bus = &tb_bus_type; 2261 sw->dev.type = &tb_switch_type; 2262 sw->dev.groups = switch_groups; 2263 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2264 2265 return sw; 2266 2267 err_free_sw_ports: 2268 kfree(sw->ports); 2269 kfree(sw); 2270 2271 return ERR_PTR(ret); 2272 } 2273 2274 /** 2275 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2276 * @tb: Pointer to the owning domain 2277 * @parent: Parent device for this switch 2278 * @route: Route string for this switch 2279 * 2280 * This creates a switch in safe mode. This means the switch pretty much 2281 * lacks all capabilities except DMA configuration port before it is 2282 * flashed with a valid NVM firmware. 2283 * 2284 * The returned switch must be released by calling tb_switch_put(). 2285 * 2286 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 2287 */ 2288 struct tb_switch * 2289 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2290 { 2291 struct tb_switch *sw; 2292 2293 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2294 if (!sw) 2295 return ERR_PTR(-ENOMEM); 2296 2297 sw->tb = tb; 2298 sw->config.depth = tb_route_length(route); 2299 sw->config.route_hi = upper_32_bits(route); 2300 sw->config.route_lo = lower_32_bits(route); 2301 sw->safe_mode = true; 2302 2303 device_initialize(&sw->dev); 2304 sw->dev.parent = parent; 2305 sw->dev.bus = &tb_bus_type; 2306 sw->dev.type = &tb_switch_type; 2307 sw->dev.groups = switch_groups; 2308 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2309 2310 return sw; 2311 } 2312 2313 /** 2314 * tb_switch_configure() - Uploads configuration to the switch 2315 * @sw: Switch to configure 2316 * 2317 * Call this function before the switch is added to the system. It will 2318 * upload configuration to the switch and makes it available for the 2319 * connection manager to use. Can be called to the switch again after 2320 * resume from low power states to re-initialize it. 2321 * 2322 * Return: %0 in case of success and negative errno in case of failure 2323 */ 2324 int tb_switch_configure(struct tb_switch *sw) 2325 { 2326 struct tb *tb = sw->tb; 2327 u64 route; 2328 int ret; 2329 2330 route = tb_route(sw); 2331 2332 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2333 sw->config.enabled ? "restoring" : "initializing", route, 2334 tb_route_length(route), sw->config.upstream_port_number); 2335 2336 sw->config.enabled = 1; 2337 2338 if (tb_switch_is_usb4(sw)) { 2339 /* 2340 * For USB4 devices, we need to program the CM version 2341 * accordingly so that it knows to expose all the 2342 * additional capabilities. 2343 */ 2344 sw->config.cmuv = USB4_VERSION_1_0; 2345 2346 /* Enumerate the switch */ 2347 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2348 ROUTER_CS_1, 4); 2349 if (ret) 2350 return ret; 2351 2352 ret = usb4_switch_setup(sw); 2353 } else { 2354 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2355 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2356 sw->config.vendor_id); 2357 2358 if (!sw->cap_plug_events) { 2359 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2360 return -ENODEV; 2361 } 2362 2363 /* Enumerate the switch */ 2364 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2365 ROUTER_CS_1, 3); 2366 } 2367 if (ret) 2368 return ret; 2369 2370 return tb_plug_events_active(sw, true); 2371 } 2372 2373 static int tb_switch_set_uuid(struct tb_switch *sw) 2374 { 2375 bool uid = false; 2376 u32 uuid[4]; 2377 int ret; 2378 2379 if (sw->uuid) 2380 return 0; 2381 2382 if (tb_switch_is_usb4(sw)) { 2383 ret = usb4_switch_read_uid(sw, &sw->uid); 2384 if (ret) 2385 return ret; 2386 uid = true; 2387 } else { 2388 /* 2389 * The newer controllers include fused UUID as part of 2390 * link controller specific registers 2391 */ 2392 ret = tb_lc_read_uuid(sw, uuid); 2393 if (ret) { 2394 if (ret != -EINVAL) 2395 return ret; 2396 uid = true; 2397 } 2398 } 2399 2400 if (uid) { 2401 /* 2402 * ICM generates UUID based on UID and fills the upper 2403 * two words with ones. This is not strictly following 2404 * UUID format but we want to be compatible with it so 2405 * we do the same here. 2406 */ 2407 uuid[0] = sw->uid & 0xffffffff; 2408 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2409 uuid[2] = 0xffffffff; 2410 uuid[3] = 0xffffffff; 2411 } 2412 2413 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2414 if (!sw->uuid) 2415 return -ENOMEM; 2416 return 0; 2417 } 2418 2419 static int tb_switch_add_dma_port(struct tb_switch *sw) 2420 { 2421 u32 status; 2422 int ret; 2423 2424 switch (sw->generation) { 2425 case 2: 2426 /* Only root switch can be upgraded */ 2427 if (tb_route(sw)) 2428 return 0; 2429 2430 fallthrough; 2431 case 3: 2432 case 4: 2433 ret = tb_switch_set_uuid(sw); 2434 if (ret) 2435 return ret; 2436 break; 2437 2438 default: 2439 /* 2440 * DMA port is the only thing available when the switch 2441 * is in safe mode. 2442 */ 2443 if (!sw->safe_mode) 2444 return 0; 2445 break; 2446 } 2447 2448 if (sw->no_nvm_upgrade) 2449 return 0; 2450 2451 if (tb_switch_is_usb4(sw)) { 2452 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2453 if (ret) 2454 return ret; 2455 2456 if (status) { 2457 tb_sw_info(sw, "switch flash authentication failed\n"); 2458 nvm_set_auth_status(sw, status); 2459 } 2460 2461 return 0; 2462 } 2463 2464 /* Root switch DMA port requires running firmware */ 2465 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2466 return 0; 2467 2468 sw->dma_port = dma_port_alloc(sw); 2469 if (!sw->dma_port) 2470 return 0; 2471 2472 /* 2473 * If there is status already set then authentication failed 2474 * when the dma_port_flash_update_auth() returned. Power cycling 2475 * is not needed (it was done already) so only thing we do here 2476 * is to unblock runtime PM of the root port. 2477 */ 2478 nvm_get_auth_status(sw, &status); 2479 if (status) { 2480 if (!tb_route(sw)) 2481 nvm_authenticate_complete_dma_port(sw); 2482 return 0; 2483 } 2484 2485 /* 2486 * Check status of the previous flash authentication. If there 2487 * is one we need to power cycle the switch in any case to make 2488 * it functional again. 2489 */ 2490 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2491 if (ret <= 0) 2492 return ret; 2493 2494 /* Now we can allow root port to suspend again */ 2495 if (!tb_route(sw)) 2496 nvm_authenticate_complete_dma_port(sw); 2497 2498 if (status) { 2499 tb_sw_info(sw, "switch flash authentication failed\n"); 2500 nvm_set_auth_status(sw, status); 2501 } 2502 2503 tb_sw_info(sw, "power cycling the switch now\n"); 2504 dma_port_power_cycle(sw->dma_port); 2505 2506 /* 2507 * We return error here which causes the switch adding failure. 2508 * It should appear back after power cycle is complete. 2509 */ 2510 return -ESHUTDOWN; 2511 } 2512 2513 static void tb_switch_default_link_ports(struct tb_switch *sw) 2514 { 2515 int i; 2516 2517 for (i = 1; i <= sw->config.max_port_number; i++) { 2518 struct tb_port *port = &sw->ports[i]; 2519 struct tb_port *subordinate; 2520 2521 if (!tb_port_is_null(port)) 2522 continue; 2523 2524 /* Check for the subordinate port */ 2525 if (i == sw->config.max_port_number || 2526 !tb_port_is_null(&sw->ports[i + 1])) 2527 continue; 2528 2529 /* Link them if not already done so (by DROM) */ 2530 subordinate = &sw->ports[i + 1]; 2531 if (!port->dual_link_port && !subordinate->dual_link_port) { 2532 port->link_nr = 0; 2533 port->dual_link_port = subordinate; 2534 subordinate->link_nr = 1; 2535 subordinate->dual_link_port = port; 2536 2537 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2538 port->port, subordinate->port); 2539 } 2540 } 2541 } 2542 2543 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2544 { 2545 const struct tb_port *up = tb_upstream_port(sw); 2546 2547 if (!up->dual_link_port || !up->dual_link_port->remote) 2548 return false; 2549 2550 if (tb_switch_is_usb4(sw)) 2551 return usb4_switch_lane_bonding_possible(sw); 2552 return tb_lc_lane_bonding_possible(sw); 2553 } 2554 2555 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2556 { 2557 struct tb_port *up; 2558 bool change = false; 2559 int ret; 2560 2561 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2562 return 0; 2563 2564 up = tb_upstream_port(sw); 2565 2566 ret = tb_port_get_link_speed(up); 2567 if (ret < 0) 2568 return ret; 2569 if (sw->link_speed != ret) 2570 change = true; 2571 sw->link_speed = ret; 2572 2573 ret = tb_port_get_link_width(up); 2574 if (ret < 0) 2575 return ret; 2576 if (sw->link_width != ret) 2577 change = true; 2578 sw->link_width = ret; 2579 2580 /* Notify userspace that there is possible link attribute change */ 2581 if (device_is_registered(&sw->dev) && change) 2582 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2583 2584 return 0; 2585 } 2586 2587 /** 2588 * tb_switch_lane_bonding_enable() - Enable lane bonding 2589 * @sw: Switch to enable lane bonding 2590 * 2591 * Connection manager can call this function to enable lane bonding of a 2592 * switch. If conditions are correct and both switches support the feature, 2593 * lanes are bonded. It is safe to call this to any switch. 2594 */ 2595 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2596 { 2597 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2598 struct tb_port *up, *down; 2599 u64 route = tb_route(sw); 2600 int ret; 2601 2602 if (!route) 2603 return 0; 2604 2605 if (!tb_switch_lane_bonding_possible(sw)) 2606 return 0; 2607 2608 up = tb_upstream_port(sw); 2609 down = tb_port_at(route, parent); 2610 2611 if (!tb_port_is_width_supported(up, 2) || 2612 !tb_port_is_width_supported(down, 2)) 2613 return 0; 2614 2615 ret = tb_port_lane_bonding_enable(up); 2616 if (ret) { 2617 tb_port_warn(up, "failed to enable lane bonding\n"); 2618 return ret; 2619 } 2620 2621 ret = tb_port_lane_bonding_enable(down); 2622 if (ret) { 2623 tb_port_warn(down, "failed to enable lane bonding\n"); 2624 tb_port_lane_bonding_disable(up); 2625 return ret; 2626 } 2627 2628 ret = tb_port_wait_for_link_width(down, 2, 100); 2629 if (ret) { 2630 tb_port_warn(down, "timeout enabling lane bonding\n"); 2631 return ret; 2632 } 2633 2634 tb_port_update_credits(down); 2635 tb_port_update_credits(up); 2636 tb_switch_update_link_attributes(sw); 2637 2638 tb_sw_dbg(sw, "lane bonding enabled\n"); 2639 return ret; 2640 } 2641 2642 /** 2643 * tb_switch_lane_bonding_disable() - Disable lane bonding 2644 * @sw: Switch whose lane bonding to disable 2645 * 2646 * Disables lane bonding between @sw and parent. This can be called even 2647 * if lanes were not bonded originally. 2648 */ 2649 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2650 { 2651 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2652 struct tb_port *up, *down; 2653 2654 if (!tb_route(sw)) 2655 return; 2656 2657 up = tb_upstream_port(sw); 2658 if (!up->bonded) 2659 return; 2660 2661 down = tb_port_at(tb_route(sw), parent); 2662 2663 tb_port_lane_bonding_disable(up); 2664 tb_port_lane_bonding_disable(down); 2665 2666 /* 2667 * It is fine if we get other errors as the router might have 2668 * been unplugged. 2669 */ 2670 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT) 2671 tb_sw_warn(sw, "timeout disabling lane bonding\n"); 2672 2673 tb_port_update_credits(down); 2674 tb_port_update_credits(up); 2675 tb_switch_update_link_attributes(sw); 2676 2677 tb_sw_dbg(sw, "lane bonding disabled\n"); 2678 } 2679 2680 /** 2681 * tb_switch_configure_link() - Set link configured 2682 * @sw: Switch whose link is configured 2683 * 2684 * Sets the link upstream from @sw configured (from both ends) so that 2685 * it will not be disconnected when the domain exits sleep. Can be 2686 * called for any switch. 2687 * 2688 * It is recommended that this is called after lane bonding is enabled. 2689 * 2690 * Returns %0 on success and negative errno in case of error. 2691 */ 2692 int tb_switch_configure_link(struct tb_switch *sw) 2693 { 2694 struct tb_port *up, *down; 2695 int ret; 2696 2697 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2698 return 0; 2699 2700 up = tb_upstream_port(sw); 2701 if (tb_switch_is_usb4(up->sw)) 2702 ret = usb4_port_configure(up); 2703 else 2704 ret = tb_lc_configure_port(up); 2705 if (ret) 2706 return ret; 2707 2708 down = up->remote; 2709 if (tb_switch_is_usb4(down->sw)) 2710 return usb4_port_configure(down); 2711 return tb_lc_configure_port(down); 2712 } 2713 2714 /** 2715 * tb_switch_unconfigure_link() - Unconfigure link 2716 * @sw: Switch whose link is unconfigured 2717 * 2718 * Sets the link unconfigured so the @sw will be disconnected if the 2719 * domain exists sleep. 2720 */ 2721 void tb_switch_unconfigure_link(struct tb_switch *sw) 2722 { 2723 struct tb_port *up, *down; 2724 2725 if (sw->is_unplugged) 2726 return; 2727 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2728 return; 2729 2730 up = tb_upstream_port(sw); 2731 if (tb_switch_is_usb4(up->sw)) 2732 usb4_port_unconfigure(up); 2733 else 2734 tb_lc_unconfigure_port(up); 2735 2736 down = up->remote; 2737 if (tb_switch_is_usb4(down->sw)) 2738 usb4_port_unconfigure(down); 2739 else 2740 tb_lc_unconfigure_port(down); 2741 } 2742 2743 static void tb_switch_credits_init(struct tb_switch *sw) 2744 { 2745 if (tb_switch_is_icm(sw)) 2746 return; 2747 if (!tb_switch_is_usb4(sw)) 2748 return; 2749 if (usb4_switch_credits_init(sw)) 2750 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); 2751 } 2752 2753 /** 2754 * tb_switch_add() - Add a switch to the domain 2755 * @sw: Switch to add 2756 * 2757 * This is the last step in adding switch to the domain. It will read 2758 * identification information from DROM and initializes ports so that 2759 * they can be used to connect other switches. The switch will be 2760 * exposed to the userspace when this function successfully returns. To 2761 * remove and release the switch, call tb_switch_remove(). 2762 * 2763 * Return: %0 in case of success and negative errno in case of failure 2764 */ 2765 int tb_switch_add(struct tb_switch *sw) 2766 { 2767 int i, ret; 2768 2769 /* 2770 * Initialize DMA control port now before we read DROM. Recent 2771 * host controllers have more complete DROM on NVM that includes 2772 * vendor and model identification strings which we then expose 2773 * to the userspace. NVM can be accessed through DMA 2774 * configuration based mailbox. 2775 */ 2776 ret = tb_switch_add_dma_port(sw); 2777 if (ret) { 2778 dev_err(&sw->dev, "failed to add DMA port\n"); 2779 return ret; 2780 } 2781 2782 if (!sw->safe_mode) { 2783 tb_switch_credits_init(sw); 2784 2785 /* read drom */ 2786 ret = tb_drom_read(sw); 2787 if (ret) 2788 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); 2789 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2790 2791 tb_check_quirks(sw); 2792 2793 ret = tb_switch_set_uuid(sw); 2794 if (ret) { 2795 dev_err(&sw->dev, "failed to set UUID\n"); 2796 return ret; 2797 } 2798 2799 for (i = 0; i <= sw->config.max_port_number; i++) { 2800 if (sw->ports[i].disabled) { 2801 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2802 continue; 2803 } 2804 ret = tb_init_port(&sw->ports[i]); 2805 if (ret) { 2806 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2807 return ret; 2808 } 2809 } 2810 2811 tb_switch_default_link_ports(sw); 2812 2813 ret = tb_switch_update_link_attributes(sw); 2814 if (ret) 2815 return ret; 2816 2817 ret = tb_switch_tmu_init(sw); 2818 if (ret) 2819 return ret; 2820 } 2821 2822 ret = device_add(&sw->dev); 2823 if (ret) { 2824 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2825 return ret; 2826 } 2827 2828 if (tb_route(sw)) { 2829 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2830 sw->vendor, sw->device); 2831 if (sw->vendor_name && sw->device_name) 2832 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2833 sw->device_name); 2834 } 2835 2836 ret = usb4_switch_add_ports(sw); 2837 if (ret) { 2838 dev_err(&sw->dev, "failed to add USB4 ports\n"); 2839 goto err_del; 2840 } 2841 2842 ret = tb_switch_nvm_add(sw); 2843 if (ret) { 2844 dev_err(&sw->dev, "failed to add NVM devices\n"); 2845 goto err_ports; 2846 } 2847 2848 /* 2849 * Thunderbolt routers do not generate wakeups themselves but 2850 * they forward wakeups from tunneled protocols, so enable it 2851 * here. 2852 */ 2853 device_init_wakeup(&sw->dev, true); 2854 2855 pm_runtime_set_active(&sw->dev); 2856 if (sw->rpm) { 2857 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2858 pm_runtime_use_autosuspend(&sw->dev); 2859 pm_runtime_mark_last_busy(&sw->dev); 2860 pm_runtime_enable(&sw->dev); 2861 pm_request_autosuspend(&sw->dev); 2862 } 2863 2864 tb_switch_debugfs_init(sw); 2865 return 0; 2866 2867 err_ports: 2868 usb4_switch_remove_ports(sw); 2869 err_del: 2870 device_del(&sw->dev); 2871 2872 return ret; 2873 } 2874 2875 /** 2876 * tb_switch_remove() - Remove and release a switch 2877 * @sw: Switch to remove 2878 * 2879 * This will remove the switch from the domain and release it after last 2880 * reference count drops to zero. If there are switches connected below 2881 * this switch, they will be removed as well. 2882 */ 2883 void tb_switch_remove(struct tb_switch *sw) 2884 { 2885 struct tb_port *port; 2886 2887 tb_switch_debugfs_remove(sw); 2888 2889 if (sw->rpm) { 2890 pm_runtime_get_sync(&sw->dev); 2891 pm_runtime_disable(&sw->dev); 2892 } 2893 2894 /* port 0 is the switch itself and never has a remote */ 2895 tb_switch_for_each_port(sw, port) { 2896 if (tb_port_has_remote(port)) { 2897 tb_switch_remove(port->remote->sw); 2898 port->remote = NULL; 2899 } else if (port->xdomain) { 2900 tb_xdomain_remove(port->xdomain); 2901 port->xdomain = NULL; 2902 } 2903 2904 /* Remove any downstream retimers */ 2905 tb_retimer_remove_all(port); 2906 } 2907 2908 if (!sw->is_unplugged) 2909 tb_plug_events_active(sw, false); 2910 2911 tb_switch_nvm_remove(sw); 2912 usb4_switch_remove_ports(sw); 2913 2914 if (tb_route(sw)) 2915 dev_info(&sw->dev, "device disconnected\n"); 2916 device_unregister(&sw->dev); 2917 } 2918 2919 /** 2920 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2921 * @sw: Router to mark unplugged 2922 */ 2923 void tb_sw_set_unplugged(struct tb_switch *sw) 2924 { 2925 struct tb_port *port; 2926 2927 if (sw == sw->tb->root_switch) { 2928 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2929 return; 2930 } 2931 if (sw->is_unplugged) { 2932 tb_sw_WARN(sw, "is_unplugged already set\n"); 2933 return; 2934 } 2935 sw->is_unplugged = true; 2936 tb_switch_for_each_port(sw, port) { 2937 if (tb_port_has_remote(port)) 2938 tb_sw_set_unplugged(port->remote->sw); 2939 else if (port->xdomain) 2940 port->xdomain->is_unplugged = true; 2941 } 2942 } 2943 2944 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) 2945 { 2946 if (flags) 2947 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 2948 else 2949 tb_sw_dbg(sw, "disabling wakeup\n"); 2950 2951 if (tb_switch_is_usb4(sw)) 2952 return usb4_switch_set_wake(sw, flags); 2953 return tb_lc_set_wake(sw, flags); 2954 } 2955 2956 int tb_switch_resume(struct tb_switch *sw) 2957 { 2958 struct tb_port *port; 2959 int err; 2960 2961 tb_sw_dbg(sw, "resuming switch\n"); 2962 2963 /* 2964 * Check for UID of the connected switches except for root 2965 * switch which we assume cannot be removed. 2966 */ 2967 if (tb_route(sw)) { 2968 u64 uid; 2969 2970 /* 2971 * Check first that we can still read the switch config 2972 * space. It may be that there is now another domain 2973 * connected. 2974 */ 2975 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2976 if (err < 0) { 2977 tb_sw_info(sw, "switch not present anymore\n"); 2978 return err; 2979 } 2980 2981 /* We don't have any way to confirm this was the same device */ 2982 if (!sw->uid) 2983 return -ENODEV; 2984 2985 if (tb_switch_is_usb4(sw)) 2986 err = usb4_switch_read_uid(sw, &uid); 2987 else 2988 err = tb_drom_read_uid_only(sw, &uid); 2989 if (err) { 2990 tb_sw_warn(sw, "uid read failed\n"); 2991 return err; 2992 } 2993 if (sw->uid != uid) { 2994 tb_sw_info(sw, 2995 "changed while suspended (uid %#llx -> %#llx)\n", 2996 sw->uid, uid); 2997 return -ENODEV; 2998 } 2999 } 3000 3001 err = tb_switch_configure(sw); 3002 if (err) 3003 return err; 3004 3005 /* Disable wakes */ 3006 tb_switch_set_wake(sw, 0); 3007 3008 err = tb_switch_tmu_init(sw); 3009 if (err) 3010 return err; 3011 3012 /* check for surviving downstream switches */ 3013 tb_switch_for_each_port(sw, port) { 3014 if (!tb_port_is_null(port)) 3015 continue; 3016 3017 if (!tb_port_resume(port)) 3018 continue; 3019 3020 if (tb_wait_for_port(port, true) <= 0) { 3021 tb_port_warn(port, 3022 "lost during suspend, disconnecting\n"); 3023 if (tb_port_has_remote(port)) 3024 tb_sw_set_unplugged(port->remote->sw); 3025 else if (port->xdomain) 3026 port->xdomain->is_unplugged = true; 3027 } else { 3028 /* 3029 * Always unlock the port so the downstream 3030 * switch/domain is accessible. 3031 */ 3032 if (tb_port_unlock(port)) 3033 tb_port_warn(port, "failed to unlock port\n"); 3034 if (port->remote && tb_switch_resume(port->remote->sw)) { 3035 tb_port_warn(port, 3036 "lost during suspend, disconnecting\n"); 3037 tb_sw_set_unplugged(port->remote->sw); 3038 } 3039 } 3040 } 3041 return 0; 3042 } 3043 3044 /** 3045 * tb_switch_suspend() - Put a switch to sleep 3046 * @sw: Switch to suspend 3047 * @runtime: Is this runtime suspend or system sleep 3048 * 3049 * Suspends router and all its children. Enables wakes according to 3050 * value of @runtime and then sets sleep bit for the router. If @sw is 3051 * host router the domain is ready to go to sleep once this function 3052 * returns. 3053 */ 3054 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 3055 { 3056 unsigned int flags = 0; 3057 struct tb_port *port; 3058 int err; 3059 3060 tb_sw_dbg(sw, "suspending switch\n"); 3061 3062 /* 3063 * Actually only needed for Titan Ridge but for simplicity can be 3064 * done for USB4 device too as CLx is re-enabled at resume. 3065 */ 3066 if (tb_switch_disable_clx(sw, TB_CL0S)) 3067 tb_sw_warn(sw, "failed to disable CLx on upstream port\n"); 3068 3069 err = tb_plug_events_active(sw, false); 3070 if (err) 3071 return; 3072 3073 tb_switch_for_each_port(sw, port) { 3074 if (tb_port_has_remote(port)) 3075 tb_switch_suspend(port->remote->sw, runtime); 3076 } 3077 3078 if (runtime) { 3079 /* Trigger wake when something is plugged in/out */ 3080 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3081 flags |= TB_WAKE_ON_USB4; 3082 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; 3083 } else if (device_may_wakeup(&sw->dev)) { 3084 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 3085 } 3086 3087 tb_switch_set_wake(sw, flags); 3088 3089 if (tb_switch_is_usb4(sw)) 3090 usb4_switch_set_sleep(sw); 3091 else 3092 tb_lc_set_sleep(sw); 3093 } 3094 3095 /** 3096 * tb_switch_query_dp_resource() - Query availability of DP resource 3097 * @sw: Switch whose DP resource is queried 3098 * @in: DP IN port 3099 * 3100 * Queries availability of DP resource for DP tunneling using switch 3101 * specific means. Returns %true if resource is available. 3102 */ 3103 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 3104 { 3105 if (tb_switch_is_usb4(sw)) 3106 return usb4_switch_query_dp_resource(sw, in); 3107 return tb_lc_dp_sink_query(sw, in); 3108 } 3109 3110 /** 3111 * tb_switch_alloc_dp_resource() - Allocate available DP resource 3112 * @sw: Switch whose DP resource is allocated 3113 * @in: DP IN port 3114 * 3115 * Allocates DP resource for DP tunneling. The resource must be 3116 * available for this to succeed (see tb_switch_query_dp_resource()). 3117 * Returns %0 in success and negative errno otherwise. 3118 */ 3119 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3120 { 3121 int ret; 3122 3123 if (tb_switch_is_usb4(sw)) 3124 ret = usb4_switch_alloc_dp_resource(sw, in); 3125 else 3126 ret = tb_lc_dp_sink_alloc(sw, in); 3127 3128 if (ret) 3129 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", 3130 in->port); 3131 else 3132 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); 3133 3134 return ret; 3135 } 3136 3137 /** 3138 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 3139 * @sw: Switch whose DP resource is de-allocated 3140 * @in: DP IN port 3141 * 3142 * De-allocates DP resource that was previously allocated for DP 3143 * tunneling. 3144 */ 3145 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3146 { 3147 int ret; 3148 3149 if (tb_switch_is_usb4(sw)) 3150 ret = usb4_switch_dealloc_dp_resource(sw, in); 3151 else 3152 ret = tb_lc_dp_sink_dealloc(sw, in); 3153 3154 if (ret) 3155 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 3156 in->port); 3157 else 3158 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); 3159 } 3160 3161 struct tb_sw_lookup { 3162 struct tb *tb; 3163 u8 link; 3164 u8 depth; 3165 const uuid_t *uuid; 3166 u64 route; 3167 }; 3168 3169 static int tb_switch_match(struct device *dev, const void *data) 3170 { 3171 struct tb_switch *sw = tb_to_switch(dev); 3172 const struct tb_sw_lookup *lookup = data; 3173 3174 if (!sw) 3175 return 0; 3176 if (sw->tb != lookup->tb) 3177 return 0; 3178 3179 if (lookup->uuid) 3180 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 3181 3182 if (lookup->route) { 3183 return sw->config.route_lo == lower_32_bits(lookup->route) && 3184 sw->config.route_hi == upper_32_bits(lookup->route); 3185 } 3186 3187 /* Root switch is matched only by depth */ 3188 if (!lookup->depth) 3189 return !sw->depth; 3190 3191 return sw->link == lookup->link && sw->depth == lookup->depth; 3192 } 3193 3194 /** 3195 * tb_switch_find_by_link_depth() - Find switch by link and depth 3196 * @tb: Domain the switch belongs 3197 * @link: Link number the switch is connected 3198 * @depth: Depth of the switch in link 3199 * 3200 * Returned switch has reference count increased so the caller needs to 3201 * call tb_switch_put() when done with the switch. 3202 */ 3203 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 3204 { 3205 struct tb_sw_lookup lookup; 3206 struct device *dev; 3207 3208 memset(&lookup, 0, sizeof(lookup)); 3209 lookup.tb = tb; 3210 lookup.link = link; 3211 lookup.depth = depth; 3212 3213 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3214 if (dev) 3215 return tb_to_switch(dev); 3216 3217 return NULL; 3218 } 3219 3220 /** 3221 * tb_switch_find_by_uuid() - Find switch by UUID 3222 * @tb: Domain the switch belongs 3223 * @uuid: UUID to look for 3224 * 3225 * Returned switch has reference count increased so the caller needs to 3226 * call tb_switch_put() when done with the switch. 3227 */ 3228 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 3229 { 3230 struct tb_sw_lookup lookup; 3231 struct device *dev; 3232 3233 memset(&lookup, 0, sizeof(lookup)); 3234 lookup.tb = tb; 3235 lookup.uuid = uuid; 3236 3237 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3238 if (dev) 3239 return tb_to_switch(dev); 3240 3241 return NULL; 3242 } 3243 3244 /** 3245 * tb_switch_find_by_route() - Find switch by route string 3246 * @tb: Domain the switch belongs 3247 * @route: Route string to look for 3248 * 3249 * Returned switch has reference count increased so the caller needs to 3250 * call tb_switch_put() when done with the switch. 3251 */ 3252 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 3253 { 3254 struct tb_sw_lookup lookup; 3255 struct device *dev; 3256 3257 if (!route) 3258 return tb_switch_get(tb->root_switch); 3259 3260 memset(&lookup, 0, sizeof(lookup)); 3261 lookup.tb = tb; 3262 lookup.route = route; 3263 3264 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3265 if (dev) 3266 return tb_to_switch(dev); 3267 3268 return NULL; 3269 } 3270 3271 /** 3272 * tb_switch_find_port() - return the first port of @type on @sw or NULL 3273 * @sw: Switch to find the port from 3274 * @type: Port type to look for 3275 */ 3276 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3277 enum tb_port_type type) 3278 { 3279 struct tb_port *port; 3280 3281 tb_switch_for_each_port(sw, port) { 3282 if (port->config.type == type) 3283 return port; 3284 } 3285 3286 return NULL; 3287 } 3288 3289 static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary) 3290 { 3291 u32 phy; 3292 int ret; 3293 3294 ret = tb_port_read(port, &phy, TB_CFG_PORT, 3295 port->cap_phy + LANE_ADP_CS_1, 1); 3296 if (ret) 3297 return ret; 3298 3299 if (secondary) 3300 phy |= LANE_ADP_CS_1_PMS; 3301 else 3302 phy &= ~LANE_ADP_CS_1_PMS; 3303 3304 return tb_port_write(port, &phy, TB_CFG_PORT, 3305 port->cap_phy + LANE_ADP_CS_1, 1); 3306 } 3307 3308 static int tb_port_pm_secondary_enable(struct tb_port *port) 3309 { 3310 return __tb_port_pm_secondary_set(port, true); 3311 } 3312 3313 static int tb_port_pm_secondary_disable(struct tb_port *port) 3314 { 3315 return __tb_port_pm_secondary_set(port, false); 3316 } 3317 3318 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) 3319 { 3320 struct tb_switch *parent = tb_switch_parent(sw); 3321 struct tb_port *up, *down; 3322 int ret; 3323 3324 if (!tb_route(sw)) 3325 return 0; 3326 3327 up = tb_upstream_port(sw); 3328 down = tb_port_at(tb_route(sw), parent); 3329 ret = tb_port_pm_secondary_enable(up); 3330 if (ret) 3331 return ret; 3332 3333 return tb_port_pm_secondary_disable(down); 3334 } 3335 3336 /* Called for USB4 or Titan Ridge routers only */ 3337 static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx) 3338 { 3339 u32 mask, val; 3340 bool ret; 3341 3342 /* Don't enable CLx in case of two single-lane links */ 3343 if (!port->bonded && port->dual_link_port) 3344 return false; 3345 3346 /* Don't enable CLx in case of inter-domain link */ 3347 if (port->xdomain) 3348 return false; 3349 3350 if (tb_switch_is_usb4(port->sw)) { 3351 if (!usb4_port_clx_supported(port)) 3352 return false; 3353 } else if (!tb_lc_is_clx_supported(port)) { 3354 return false; 3355 } 3356 3357 switch (clx) { 3358 case TB_CL0S: 3359 /* CL0s support requires also CL1 support */ 3360 mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT; 3361 break; 3362 3363 /* For now we support only CL0s. Not CL1, CL2 */ 3364 case TB_CL1: 3365 case TB_CL2: 3366 default: 3367 return false; 3368 } 3369 3370 ret = tb_port_read(port, &val, TB_CFG_PORT, 3371 port->cap_phy + LANE_ADP_CS_0, 1); 3372 if (ret) 3373 return false; 3374 3375 return !!(val & mask); 3376 } 3377 3378 static inline bool tb_port_cl0s_supported(struct tb_port *port) 3379 { 3380 return tb_port_clx_supported(port, TB_CL0S); 3381 } 3382 3383 static int __tb_port_cl0s_set(struct tb_port *port, bool enable) 3384 { 3385 u32 phy, mask; 3386 int ret; 3387 3388 /* To enable CL0s also required to enable CL1 */ 3389 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; 3390 ret = tb_port_read(port, &phy, TB_CFG_PORT, 3391 port->cap_phy + LANE_ADP_CS_1, 1); 3392 if (ret) 3393 return ret; 3394 3395 if (enable) 3396 phy |= mask; 3397 else 3398 phy &= ~mask; 3399 3400 return tb_port_write(port, &phy, TB_CFG_PORT, 3401 port->cap_phy + LANE_ADP_CS_1, 1); 3402 } 3403 3404 static int tb_port_cl0s_disable(struct tb_port *port) 3405 { 3406 return __tb_port_cl0s_set(port, false); 3407 } 3408 3409 static int tb_port_cl0s_enable(struct tb_port *port) 3410 { 3411 return __tb_port_cl0s_set(port, true); 3412 } 3413 3414 static int tb_switch_enable_cl0s(struct tb_switch *sw) 3415 { 3416 struct tb_switch *parent = tb_switch_parent(sw); 3417 bool up_cl0s_support, down_cl0s_support; 3418 struct tb_port *up, *down; 3419 int ret; 3420 3421 if (!tb_switch_is_clx_supported(sw)) 3422 return 0; 3423 3424 /* 3425 * Enable CLx for host router's downstream port as part of the 3426 * downstream router enabling procedure. 3427 */ 3428 if (!tb_route(sw)) 3429 return 0; 3430 3431 /* Enable CLx only for first hop router (depth = 1) */ 3432 if (tb_route(parent)) 3433 return 0; 3434 3435 ret = tb_switch_pm_secondary_resolve(sw); 3436 if (ret) 3437 return ret; 3438 3439 up = tb_upstream_port(sw); 3440 down = tb_port_at(tb_route(sw), parent); 3441 3442 up_cl0s_support = tb_port_cl0s_supported(up); 3443 down_cl0s_support = tb_port_cl0s_supported(down); 3444 3445 tb_port_dbg(up, "CL0s %ssupported\n", 3446 up_cl0s_support ? "" : "not "); 3447 tb_port_dbg(down, "CL0s %ssupported\n", 3448 down_cl0s_support ? "" : "not "); 3449 3450 if (!up_cl0s_support || !down_cl0s_support) 3451 return -EOPNOTSUPP; 3452 3453 ret = tb_port_cl0s_enable(up); 3454 if (ret) 3455 return ret; 3456 3457 ret = tb_port_cl0s_enable(down); 3458 if (ret) { 3459 tb_port_cl0s_disable(up); 3460 return ret; 3461 } 3462 3463 ret = tb_switch_mask_clx_objections(sw); 3464 if (ret) { 3465 tb_port_cl0s_disable(up); 3466 tb_port_cl0s_disable(down); 3467 return ret; 3468 } 3469 3470 sw->clx = TB_CL0S; 3471 3472 tb_port_dbg(up, "CL0s enabled\n"); 3473 return 0; 3474 } 3475 3476 /** 3477 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router 3478 * @sw: Router to enable CLx for 3479 * @clx: The CLx state to enable 3480 * 3481 * Enable CLx state only for first hop router. That is the most common 3482 * use-case, that is intended for better thermal management, and so helps 3483 * to improve performance. CLx is enabled only if both sides of the link 3484 * support CLx, and if both sides of the link are not configured as two 3485 * single lane links and only if the link is not inter-domain link. The 3486 * complete set of conditions is descibed in CM Guide 1.0 section 8.1. 3487 * 3488 * Return: Returns 0 on success or an error code on failure. 3489 */ 3490 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) 3491 { 3492 struct tb_switch *root_sw = sw->tb->root_switch; 3493 3494 if (!clx_enabled) 3495 return 0; 3496 3497 /* 3498 * CLx is not enabled and validated on Intel USB4 platforms before 3499 * Alder Lake. 3500 */ 3501 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw)) 3502 return 0; 3503 3504 switch (clx) { 3505 case TB_CL0S: 3506 return tb_switch_enable_cl0s(sw); 3507 3508 default: 3509 return -EOPNOTSUPP; 3510 } 3511 } 3512 3513 static int tb_switch_disable_cl0s(struct tb_switch *sw) 3514 { 3515 struct tb_switch *parent = tb_switch_parent(sw); 3516 struct tb_port *up, *down; 3517 int ret; 3518 3519 if (!tb_switch_is_clx_supported(sw)) 3520 return 0; 3521 3522 /* 3523 * Disable CLx for host router's downstream port as part of the 3524 * downstream router enabling procedure. 3525 */ 3526 if (!tb_route(sw)) 3527 return 0; 3528 3529 /* Disable CLx only for first hop router (depth = 1) */ 3530 if (tb_route(parent)) 3531 return 0; 3532 3533 up = tb_upstream_port(sw); 3534 down = tb_port_at(tb_route(sw), parent); 3535 ret = tb_port_cl0s_disable(up); 3536 if (ret) 3537 return ret; 3538 3539 ret = tb_port_cl0s_disable(down); 3540 if (ret) 3541 return ret; 3542 3543 sw->clx = TB_CLX_DISABLE; 3544 3545 tb_port_dbg(up, "CL0s disabled\n"); 3546 return 0; 3547 } 3548 3549 /** 3550 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router 3551 * @sw: Router to disable CLx for 3552 * @clx: The CLx state to disable 3553 * 3554 * Return: Returns 0 on success or an error code on failure. 3555 */ 3556 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) 3557 { 3558 if (!clx_enabled) 3559 return 0; 3560 3561 switch (clx) { 3562 case TB_CL0S: 3563 return tb_switch_disable_cl0s(sw); 3564 3565 default: 3566 return -EOPNOTSUPP; 3567 } 3568 } 3569 3570 /** 3571 * tb_switch_mask_clx_objections() - Mask CLx objections for a router 3572 * @sw: Router to mask objections for 3573 * 3574 * Mask the objections coming from the second depth routers in order to 3575 * stop these objections from interfering with the CLx states of the first 3576 * depth link. 3577 */ 3578 int tb_switch_mask_clx_objections(struct tb_switch *sw) 3579 { 3580 int up_port = sw->config.upstream_port_number; 3581 u32 offset, val[2], mask_obj, unmask_obj; 3582 int ret, i; 3583 3584 /* Only Titan Ridge of pre-USB4 devices support CLx states */ 3585 if (!tb_switch_is_titan_ridge(sw)) 3586 return 0; 3587 3588 if (!tb_route(sw)) 3589 return 0; 3590 3591 /* 3592 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: 3593 * Port A consists of lane adapters 1,2 and 3594 * Port B consists of lane adapters 3,4 3595 * If upstream port is A, (lanes are 1,2), we mask objections from 3596 * port B (lanes 3,4) and unmask objections from Port A and vice-versa. 3597 */ 3598 if (up_port == 1) { 3599 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3600 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3601 offset = TB_LOW_PWR_C1_CL1; 3602 } else { 3603 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3604 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3605 offset = TB_LOW_PWR_C3_CL1; 3606 } 3607 3608 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 3609 sw->cap_lp + offset, ARRAY_SIZE(val)); 3610 if (ret) 3611 return ret; 3612 3613 for (i = 0; i < ARRAY_SIZE(val); i++) { 3614 val[i] |= mask_obj; 3615 val[i] &= ~unmask_obj; 3616 } 3617 3618 return tb_sw_write(sw, &val, TB_CFG_SWITCH, 3619 sw->cap_lp + offset, ARRAY_SIZE(val)); 3620 } 3621 3622 /* 3623 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 3624 * device. For now used only for Titan Ridge. 3625 */ 3626 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, 3627 unsigned int pcie_offset, u32 value) 3628 { 3629 u32 offset, command, val; 3630 int ret; 3631 3632 if (sw->generation != 3) 3633 return -EOPNOTSUPP; 3634 3635 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; 3636 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); 3637 if (ret) 3638 return ret; 3639 3640 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; 3641 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); 3642 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; 3643 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 3644 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; 3645 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; 3646 3647 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; 3648 3649 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); 3650 if (ret) 3651 return ret; 3652 3653 ret = tb_switch_wait_for_bit(sw, offset, 3654 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); 3655 if (ret) 3656 return ret; 3657 3658 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 3659 if (ret) 3660 return ret; 3661 3662 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) 3663 return -ETIMEDOUT; 3664 3665 return 0; 3666 } 3667 3668 /** 3669 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state 3670 * @sw: Router to enable PCIe L1 3671 * 3672 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable 3673 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel 3674 * was configured. Due to Intel platforms limitation, shall be called only 3675 * for first hop switch. 3676 */ 3677 int tb_switch_pcie_l1_enable(struct tb_switch *sw) 3678 { 3679 struct tb_switch *parent = tb_switch_parent(sw); 3680 int ret; 3681 3682 if (!tb_route(sw)) 3683 return 0; 3684 3685 if (!tb_switch_is_titan_ridge(sw)) 3686 return 0; 3687 3688 /* Enable PCIe L1 enable only for first hop router (depth = 1) */ 3689 if (tb_route(parent)) 3690 return 0; 3691 3692 /* Write to downstream PCIe bridge #5 aka Dn4 */ 3693 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); 3694 if (ret) 3695 return ret; 3696 3697 /* Write to Upstream PCIe bridge #0 aka Up0 */ 3698 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); 3699 } 3700 3701 /** 3702 * tb_switch_xhci_connect() - Connect internal xHCI 3703 * @sw: Router whose xHCI to connect 3704 * 3705 * Can be called to any router. For Alpine Ridge and Titan Ridge 3706 * performs special flows that bring the xHCI functional for any device 3707 * connected to the type-C port. Call only after PCIe tunnel has been 3708 * established. The function only does the connect if not done already 3709 * so can be called several times for the same router. 3710 */ 3711 int tb_switch_xhci_connect(struct tb_switch *sw) 3712 { 3713 bool usb_port1, usb_port3, xhci_port1, xhci_port3; 3714 struct tb_port *port1, *port3; 3715 int ret; 3716 3717 port1 = &sw->ports[1]; 3718 port3 = &sw->ports[3]; 3719 3720 if (tb_switch_is_alpine_ridge(sw)) { 3721 usb_port1 = tb_lc_is_usb_plugged(port1); 3722 usb_port3 = tb_lc_is_usb_plugged(port3); 3723 xhci_port1 = tb_lc_is_xhci_connected(port1); 3724 xhci_port3 = tb_lc_is_xhci_connected(port3); 3725 3726 /* Figure out correct USB port to connect */ 3727 if (usb_port1 && !xhci_port1) { 3728 ret = tb_lc_xhci_connect(port1); 3729 if (ret) 3730 return ret; 3731 } 3732 if (usb_port3 && !xhci_port3) 3733 return tb_lc_xhci_connect(port3); 3734 } else if (tb_switch_is_titan_ridge(sw)) { 3735 ret = tb_lc_xhci_connect(port1); 3736 if (ret) 3737 return ret; 3738 return tb_lc_xhci_connect(port3); 3739 } 3740 3741 return 0; 3742 } 3743 3744 /** 3745 * tb_switch_xhci_disconnect() - Disconnect internal xHCI 3746 * @sw: Router whose xHCI to disconnect 3747 * 3748 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both 3749 * ports. 3750 */ 3751 void tb_switch_xhci_disconnect(struct tb_switch *sw) 3752 { 3753 if (sw->generation == 3) { 3754 struct tb_port *port1 = &sw->ports[1]; 3755 struct tb_port *port3 = &sw->ports[3]; 3756 3757 tb_lc_xhci_disconnect(port1); 3758 tb_port_dbg(port1, "disconnected xHCI\n"); 3759 tb_lc_xhci_disconnect(port3); 3760 tb_port_dbg(port3, "disconnected xHCI\n"); 3761 } 3762 } 3763