1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/module.h> 12 #include <linux/nvmem-provider.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sizes.h> 16 #include <linux/slab.h> 17 #include <linux/string_helpers.h> 18 19 #include "tb.h" 20 21 /* Switch NVM support */ 22 23 struct nvm_auth_status { 24 struct list_head list; 25 uuid_t uuid; 26 u32 status; 27 }; 28 29 static bool clx_enabled = true; 30 module_param_named(clx, clx_enabled, bool, 0444); 31 MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); 32 33 /* 34 * Hold NVM authentication failure status per switch This information 35 * needs to stay around even when the switch gets power cycled so we 36 * keep it separately. 37 */ 38 static LIST_HEAD(nvm_auth_status_cache); 39 static DEFINE_MUTEX(nvm_auth_status_lock); 40 41 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 42 { 43 struct nvm_auth_status *st; 44 45 list_for_each_entry(st, &nvm_auth_status_cache, list) { 46 if (uuid_equal(&st->uuid, sw->uuid)) 47 return st; 48 } 49 50 return NULL; 51 } 52 53 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 54 { 55 struct nvm_auth_status *st; 56 57 mutex_lock(&nvm_auth_status_lock); 58 st = __nvm_get_auth_status(sw); 59 mutex_unlock(&nvm_auth_status_lock); 60 61 *status = st ? st->status : 0; 62 } 63 64 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 65 { 66 struct nvm_auth_status *st; 67 68 if (WARN_ON(!sw->uuid)) 69 return; 70 71 mutex_lock(&nvm_auth_status_lock); 72 st = __nvm_get_auth_status(sw); 73 74 if (!st) { 75 st = kzalloc(sizeof(*st), GFP_KERNEL); 76 if (!st) 77 goto unlock; 78 79 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 80 INIT_LIST_HEAD(&st->list); 81 list_add_tail(&st->list, &nvm_auth_status_cache); 82 } 83 84 st->status = status; 85 unlock: 86 mutex_unlock(&nvm_auth_status_lock); 87 } 88 89 static void nvm_clear_auth_status(const struct tb_switch *sw) 90 { 91 struct nvm_auth_status *st; 92 93 mutex_lock(&nvm_auth_status_lock); 94 st = __nvm_get_auth_status(sw); 95 if (st) { 96 list_del(&st->list); 97 kfree(st); 98 } 99 mutex_unlock(&nvm_auth_status_lock); 100 } 101 102 static int nvm_validate_and_write(struct tb_switch *sw) 103 { 104 unsigned int image_size; 105 const u8 *buf; 106 int ret; 107 108 ret = tb_nvm_validate(sw->nvm); 109 if (ret) 110 return ret; 111 112 ret = tb_nvm_write_headers(sw->nvm); 113 if (ret) 114 return ret; 115 116 buf = sw->nvm->buf_data_start; 117 image_size = sw->nvm->buf_data_size; 118 119 if (tb_switch_is_usb4(sw)) 120 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 121 else 122 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 123 if (ret) 124 return ret; 125 126 sw->nvm->flushed = true; 127 return 0; 128 } 129 130 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 131 { 132 int ret = 0; 133 134 /* 135 * Root switch NVM upgrade requires that we disconnect the 136 * existing paths first (in case it is not in safe mode 137 * already). 138 */ 139 if (!sw->safe_mode) { 140 u32 status; 141 142 ret = tb_domain_disconnect_all_paths(sw->tb); 143 if (ret) 144 return ret; 145 /* 146 * The host controller goes away pretty soon after this if 147 * everything goes well so getting timeout is expected. 148 */ 149 ret = dma_port_flash_update_auth(sw->dma_port); 150 if (!ret || ret == -ETIMEDOUT) 151 return 0; 152 153 /* 154 * Any error from update auth operation requires power 155 * cycling of the host router. 156 */ 157 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 158 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 159 nvm_set_auth_status(sw, status); 160 } 161 162 /* 163 * From safe mode we can get out by just power cycling the 164 * switch. 165 */ 166 dma_port_power_cycle(sw->dma_port); 167 return ret; 168 } 169 170 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 171 { 172 int ret, retries = 10; 173 174 ret = dma_port_flash_update_auth(sw->dma_port); 175 switch (ret) { 176 case 0: 177 case -ETIMEDOUT: 178 case -EACCES: 179 case -EINVAL: 180 /* Power cycle is required */ 181 break; 182 default: 183 return ret; 184 } 185 186 /* 187 * Poll here for the authentication status. It takes some time 188 * for the device to respond (we get timeout for a while). Once 189 * we get response the device needs to be power cycled in order 190 * to the new NVM to be taken into use. 191 */ 192 do { 193 u32 status; 194 195 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 196 if (ret < 0 && ret != -ETIMEDOUT) 197 return ret; 198 if (ret > 0) { 199 if (status) { 200 tb_sw_warn(sw, "failed to authenticate NVM\n"); 201 nvm_set_auth_status(sw, status); 202 } 203 204 tb_sw_info(sw, "power cycling the switch now\n"); 205 dma_port_power_cycle(sw->dma_port); 206 return 0; 207 } 208 209 msleep(500); 210 } while (--retries); 211 212 return -ETIMEDOUT; 213 } 214 215 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 216 { 217 struct pci_dev *root_port; 218 219 /* 220 * During host router NVM upgrade we should not allow root port to 221 * go into D3cold because some root ports cannot trigger PME 222 * itself. To be on the safe side keep the root port in D0 during 223 * the whole upgrade process. 224 */ 225 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 226 if (root_port) 227 pm_runtime_get_noresume(&root_port->dev); 228 } 229 230 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 231 { 232 struct pci_dev *root_port; 233 234 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 235 if (root_port) 236 pm_runtime_put(&root_port->dev); 237 } 238 239 static inline bool nvm_readable(struct tb_switch *sw) 240 { 241 if (tb_switch_is_usb4(sw)) { 242 /* 243 * USB4 devices must support NVM operations but it is 244 * optional for hosts. Therefore we query the NVM sector 245 * size here and if it is supported assume NVM 246 * operations are implemented. 247 */ 248 return usb4_switch_nvm_sector_size(sw) > 0; 249 } 250 251 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 252 return !!sw->dma_port; 253 } 254 255 static inline bool nvm_upgradeable(struct tb_switch *sw) 256 { 257 if (sw->no_nvm_upgrade) 258 return false; 259 return nvm_readable(sw); 260 } 261 262 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) 263 { 264 int ret; 265 266 if (tb_switch_is_usb4(sw)) { 267 if (auth_only) { 268 ret = usb4_switch_nvm_set_offset(sw, 0); 269 if (ret) 270 return ret; 271 } 272 sw->nvm->authenticating = true; 273 return usb4_switch_nvm_authenticate(sw); 274 } else if (auth_only) { 275 return -EOPNOTSUPP; 276 } 277 278 sw->nvm->authenticating = true; 279 if (!tb_route(sw)) { 280 nvm_authenticate_start_dma_port(sw); 281 ret = nvm_authenticate_host_dma_port(sw); 282 } else { 283 ret = nvm_authenticate_device_dma_port(sw); 284 } 285 286 return ret; 287 } 288 289 /** 290 * tb_switch_nvm_read() - Read router NVM 291 * @sw: Router whose NVM to read 292 * @address: Start address on the NVM 293 * @buf: Buffer where the read data is copied 294 * @size: Size of the buffer in bytes 295 * 296 * Reads from router NVM and returns the requested data in @buf. Locking 297 * is up to the caller. Returns %0 in success and negative errno in case 298 * of failure. 299 */ 300 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 301 size_t size) 302 { 303 if (tb_switch_is_usb4(sw)) 304 return usb4_switch_nvm_read(sw, address, buf, size); 305 return dma_port_flash_read(sw->dma_port, address, buf, size); 306 } 307 308 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 309 { 310 struct tb_nvm *nvm = priv; 311 struct tb_switch *sw = tb_to_switch(nvm->dev); 312 int ret; 313 314 pm_runtime_get_sync(&sw->dev); 315 316 if (!mutex_trylock(&sw->tb->lock)) { 317 ret = restart_syscall(); 318 goto out; 319 } 320 321 ret = tb_switch_nvm_read(sw, offset, val, bytes); 322 mutex_unlock(&sw->tb->lock); 323 324 out: 325 pm_runtime_mark_last_busy(&sw->dev); 326 pm_runtime_put_autosuspend(&sw->dev); 327 328 return ret; 329 } 330 331 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 332 { 333 struct tb_nvm *nvm = priv; 334 struct tb_switch *sw = tb_to_switch(nvm->dev); 335 int ret; 336 337 if (!mutex_trylock(&sw->tb->lock)) 338 return restart_syscall(); 339 340 /* 341 * Since writing the NVM image might require some special steps, 342 * for example when CSS headers are written, we cache the image 343 * locally here and handle the special cases when the user asks 344 * us to authenticate the image. 345 */ 346 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 347 mutex_unlock(&sw->tb->lock); 348 349 return ret; 350 } 351 352 static int tb_switch_nvm_add(struct tb_switch *sw) 353 { 354 struct tb_nvm *nvm; 355 int ret; 356 357 if (!nvm_readable(sw)) 358 return 0; 359 360 nvm = tb_nvm_alloc(&sw->dev); 361 if (IS_ERR(nvm)) { 362 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 363 goto err_nvm; 364 } 365 366 ret = tb_nvm_read_version(nvm); 367 if (ret) 368 goto err_nvm; 369 370 /* 371 * If the switch is in safe-mode the only accessible portion of 372 * the NVM is the non-active one where userspace is expected to 373 * write new functional NVM. 374 */ 375 if (!sw->safe_mode) { 376 ret = tb_nvm_add_active(nvm, nvm_read); 377 if (ret) 378 goto err_nvm; 379 } 380 381 if (!sw->no_nvm_upgrade) { 382 ret = tb_nvm_add_non_active(nvm, nvm_write); 383 if (ret) 384 goto err_nvm; 385 } 386 387 sw->nvm = nvm; 388 return 0; 389 390 err_nvm: 391 tb_sw_dbg(sw, "NVM upgrade disabled\n"); 392 sw->no_nvm_upgrade = true; 393 if (!IS_ERR(nvm)) 394 tb_nvm_free(nvm); 395 396 return ret; 397 } 398 399 static void tb_switch_nvm_remove(struct tb_switch *sw) 400 { 401 struct tb_nvm *nvm; 402 403 nvm = sw->nvm; 404 sw->nvm = NULL; 405 406 if (!nvm) 407 return; 408 409 /* Remove authentication status in case the switch is unplugged */ 410 if (!nvm->authenticating) 411 nvm_clear_auth_status(sw); 412 413 tb_nvm_free(nvm); 414 } 415 416 /* port utility functions */ 417 418 static const char *tb_port_type(const struct tb_regs_port_header *port) 419 { 420 switch (port->type >> 16) { 421 case 0: 422 switch ((u8) port->type) { 423 case 0: 424 return "Inactive"; 425 case 1: 426 return "Port"; 427 case 2: 428 return "NHI"; 429 default: 430 return "unknown"; 431 } 432 case 0x2: 433 return "Ethernet"; 434 case 0x8: 435 return "SATA"; 436 case 0xe: 437 return "DP/HDMI"; 438 case 0x10: 439 return "PCIe"; 440 case 0x20: 441 return "USB"; 442 default: 443 return "unknown"; 444 } 445 } 446 447 static void tb_dump_port(struct tb *tb, const struct tb_port *port) 448 { 449 const struct tb_regs_port_header *regs = &port->config; 450 451 tb_dbg(tb, 452 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 453 regs->port_number, regs->vendor_id, regs->device_id, 454 regs->revision, regs->thunderbolt_version, tb_port_type(regs), 455 regs->type); 456 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 457 regs->max_in_hop_id, regs->max_out_hop_id); 458 tb_dbg(tb, " Max counters: %d\n", regs->max_counters); 459 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); 460 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, 461 port->ctl_credits); 462 } 463 464 /** 465 * tb_port_state() - get connectedness state of a port 466 * @port: the port to check 467 * 468 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 469 * 470 * Return: Returns an enum tb_port_state on success or an error code on failure. 471 */ 472 int tb_port_state(struct tb_port *port) 473 { 474 struct tb_cap_phy phy; 475 int res; 476 if (port->cap_phy == 0) { 477 tb_port_WARN(port, "does not have a PHY\n"); 478 return -EINVAL; 479 } 480 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 481 if (res) 482 return res; 483 return phy.state; 484 } 485 486 /** 487 * tb_wait_for_port() - wait for a port to become ready 488 * @port: Port to wait 489 * @wait_if_unplugged: Wait also when port is unplugged 490 * 491 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 492 * wait_if_unplugged is set then we also wait if the port is in state 493 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 494 * switch resume). Otherwise we only wait if a device is registered but the link 495 * has not yet been established. 496 * 497 * Return: Returns an error code on failure. Returns 0 if the port is not 498 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 499 * if the port is connected and in state TB_PORT_UP. 500 */ 501 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 502 { 503 int retries = 10; 504 int state; 505 if (!port->cap_phy) { 506 tb_port_WARN(port, "does not have PHY\n"); 507 return -EINVAL; 508 } 509 if (tb_is_upstream_port(port)) { 510 tb_port_WARN(port, "is the upstream port\n"); 511 return -EINVAL; 512 } 513 514 while (retries--) { 515 state = tb_port_state(port); 516 switch (state) { 517 case TB_PORT_DISABLED: 518 tb_port_dbg(port, "is disabled (state: 0)\n"); 519 return 0; 520 521 case TB_PORT_UNPLUGGED: 522 if (wait_if_unplugged) { 523 /* used during resume */ 524 tb_port_dbg(port, 525 "is unplugged (state: 7), retrying...\n"); 526 msleep(100); 527 break; 528 } 529 tb_port_dbg(port, "is unplugged (state: 7)\n"); 530 return 0; 531 532 case TB_PORT_UP: 533 case TB_PORT_TX_CL0S: 534 case TB_PORT_RX_CL0S: 535 case TB_PORT_CL1: 536 case TB_PORT_CL2: 537 tb_port_dbg(port, "is connected, link is up (state: %d)\n", state); 538 return 1; 539 540 default: 541 if (state < 0) 542 return state; 543 544 /* 545 * After plug-in the state is TB_PORT_CONNECTING. Give it some 546 * time. 547 */ 548 tb_port_dbg(port, 549 "is connected, link is not up (state: %d), retrying...\n", 550 state); 551 msleep(100); 552 } 553 554 } 555 tb_port_warn(port, 556 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 557 return 0; 558 } 559 560 /** 561 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 562 * @port: Port to add/remove NFC credits 563 * @credits: Credits to add/remove 564 * 565 * Change the number of NFC credits allocated to @port by @credits. To remove 566 * NFC credits pass a negative amount of credits. 567 * 568 * Return: Returns 0 on success or an error code on failure. 569 */ 570 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 571 { 572 u32 nfc_credits; 573 574 if (credits == 0 || port->sw->is_unplugged) 575 return 0; 576 577 /* 578 * USB4 restricts programming NFC buffers to lane adapters only 579 * so skip other ports. 580 */ 581 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 582 return 0; 583 584 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 585 if (credits < 0) 586 credits = max_t(int, -nfc_credits, credits); 587 588 nfc_credits += credits; 589 590 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 591 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 592 593 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 594 port->config.nfc_credits |= nfc_credits; 595 596 return tb_port_write(port, &port->config.nfc_credits, 597 TB_CFG_PORT, ADP_CS_4, 1); 598 } 599 600 /** 601 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 602 * @port: Port whose counters to clear 603 * @counter: Counter index to clear 604 * 605 * Return: Returns 0 on success or an error code on failure. 606 */ 607 int tb_port_clear_counter(struct tb_port *port, int counter) 608 { 609 u32 zero[3] = { 0, 0, 0 }; 610 tb_port_dbg(port, "clearing counter %d\n", counter); 611 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 612 } 613 614 /** 615 * tb_port_unlock() - Unlock downstream port 616 * @port: Port to unlock 617 * 618 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 619 * downstream router accessible for CM. 620 */ 621 int tb_port_unlock(struct tb_port *port) 622 { 623 if (tb_switch_is_icm(port->sw)) 624 return 0; 625 if (!tb_port_is_null(port)) 626 return -EINVAL; 627 if (tb_switch_is_usb4(port->sw)) 628 return usb4_port_unlock(port); 629 return 0; 630 } 631 632 static int __tb_port_enable(struct tb_port *port, bool enable) 633 { 634 int ret; 635 u32 phy; 636 637 if (!tb_port_is_null(port)) 638 return -EINVAL; 639 640 ret = tb_port_read(port, &phy, TB_CFG_PORT, 641 port->cap_phy + LANE_ADP_CS_1, 1); 642 if (ret) 643 return ret; 644 645 if (enable) 646 phy &= ~LANE_ADP_CS_1_LD; 647 else 648 phy |= LANE_ADP_CS_1_LD; 649 650 651 ret = tb_port_write(port, &phy, TB_CFG_PORT, 652 port->cap_phy + LANE_ADP_CS_1, 1); 653 if (ret) 654 return ret; 655 656 tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable)); 657 return 0; 658 } 659 660 /** 661 * tb_port_enable() - Enable lane adapter 662 * @port: Port to enable (can be %NULL) 663 * 664 * This is used for lane 0 and 1 adapters to enable it. 665 */ 666 int tb_port_enable(struct tb_port *port) 667 { 668 return __tb_port_enable(port, true); 669 } 670 671 /** 672 * tb_port_disable() - Disable lane adapter 673 * @port: Port to disable (can be %NULL) 674 * 675 * This is used for lane 0 and 1 adapters to disable it. 676 */ 677 int tb_port_disable(struct tb_port *port) 678 { 679 return __tb_port_enable(port, false); 680 } 681 682 /* 683 * tb_init_port() - initialize a port 684 * 685 * This is a helper method for tb_switch_alloc. Does not check or initialize 686 * any downstream switches. 687 * 688 * Return: Returns 0 on success or an error code on failure. 689 */ 690 static int tb_init_port(struct tb_port *port) 691 { 692 int res; 693 int cap; 694 695 INIT_LIST_HEAD(&port->list); 696 697 /* Control adapter does not have configuration space */ 698 if (!port->port) 699 return 0; 700 701 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 702 if (res) { 703 if (res == -ENODEV) { 704 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 705 port->port); 706 port->disabled = true; 707 return 0; 708 } 709 return res; 710 } 711 712 /* Port 0 is the switch itself and has no PHY. */ 713 if (port->config.type == TB_TYPE_PORT) { 714 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 715 716 if (cap > 0) 717 port->cap_phy = cap; 718 else 719 tb_port_WARN(port, "non switch port without a PHY\n"); 720 721 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 722 if (cap > 0) 723 port->cap_usb4 = cap; 724 725 /* 726 * USB4 ports the buffers allocated for the control path 727 * can be read from the path config space. Legacy 728 * devices we use hard-coded value. 729 */ 730 if (tb_switch_is_usb4(port->sw)) { 731 struct tb_regs_hop hop; 732 733 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) 734 port->ctl_credits = hop.initial_credits; 735 } 736 if (!port->ctl_credits) 737 port->ctl_credits = 2; 738 739 } else { 740 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 741 if (cap > 0) 742 port->cap_adap = cap; 743 } 744 745 port->total_credits = 746 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 747 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 748 749 tb_dump_port(port->sw->tb, port); 750 return 0; 751 } 752 753 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 754 int max_hopid) 755 { 756 int port_max_hopid; 757 struct ida *ida; 758 759 if (in) { 760 port_max_hopid = port->config.max_in_hop_id; 761 ida = &port->in_hopids; 762 } else { 763 port_max_hopid = port->config.max_out_hop_id; 764 ida = &port->out_hopids; 765 } 766 767 /* 768 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 769 * reserved. 770 */ 771 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 772 min_hopid = TB_PATH_MIN_HOPID; 773 774 if (max_hopid < 0 || max_hopid > port_max_hopid) 775 max_hopid = port_max_hopid; 776 777 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 778 } 779 780 /** 781 * tb_port_alloc_in_hopid() - Allocate input HopID from port 782 * @port: Port to allocate HopID for 783 * @min_hopid: Minimum acceptable input HopID 784 * @max_hopid: Maximum acceptable input HopID 785 * 786 * Return: HopID between @min_hopid and @max_hopid or negative errno in 787 * case of error. 788 */ 789 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 790 { 791 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 792 } 793 794 /** 795 * tb_port_alloc_out_hopid() - Allocate output HopID from port 796 * @port: Port to allocate HopID for 797 * @min_hopid: Minimum acceptable output HopID 798 * @max_hopid: Maximum acceptable output HopID 799 * 800 * Return: HopID between @min_hopid and @max_hopid or negative errno in 801 * case of error. 802 */ 803 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 804 { 805 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 806 } 807 808 /** 809 * tb_port_release_in_hopid() - Release allocated input HopID from port 810 * @port: Port whose HopID to release 811 * @hopid: HopID to release 812 */ 813 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 814 { 815 ida_simple_remove(&port->in_hopids, hopid); 816 } 817 818 /** 819 * tb_port_release_out_hopid() - Release allocated output HopID from port 820 * @port: Port whose HopID to release 821 * @hopid: HopID to release 822 */ 823 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 824 { 825 ida_simple_remove(&port->out_hopids, hopid); 826 } 827 828 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 829 const struct tb_switch *sw) 830 { 831 u64 mask = (1ULL << parent->config.depth * 8) - 1; 832 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 833 } 834 835 /** 836 * tb_next_port_on_path() - Return next port for given port on a path 837 * @start: Start port of the walk 838 * @end: End port of the walk 839 * @prev: Previous port (%NULL if this is the first) 840 * 841 * This function can be used to walk from one port to another if they 842 * are connected through zero or more switches. If the @prev is dual 843 * link port, the function follows that link and returns another end on 844 * that same link. 845 * 846 * If the @end port has been reached, return %NULL. 847 * 848 * Domain tb->lock must be held when this function is called. 849 */ 850 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 851 struct tb_port *prev) 852 { 853 struct tb_port *next; 854 855 if (!prev) 856 return start; 857 858 if (prev->sw == end->sw) { 859 if (prev == end) 860 return NULL; 861 return end; 862 } 863 864 if (tb_switch_is_reachable(prev->sw, end->sw)) { 865 next = tb_port_at(tb_route(end->sw), prev->sw); 866 /* Walk down the topology if next == prev */ 867 if (prev->remote && 868 (next == prev || next->dual_link_port == prev)) 869 next = prev->remote; 870 } else { 871 if (tb_is_upstream_port(prev)) { 872 next = prev->remote; 873 } else { 874 next = tb_upstream_port(prev->sw); 875 /* 876 * Keep the same link if prev and next are both 877 * dual link ports. 878 */ 879 if (next->dual_link_port && 880 next->link_nr != prev->link_nr) { 881 next = next->dual_link_port; 882 } 883 } 884 } 885 886 return next != prev ? next : NULL; 887 } 888 889 /** 890 * tb_port_get_link_speed() - Get current link speed 891 * @port: Port to check (USB4 or CIO) 892 * 893 * Returns link speed in Gb/s or negative errno in case of failure. 894 */ 895 int tb_port_get_link_speed(struct tb_port *port) 896 { 897 u32 val, speed; 898 int ret; 899 900 if (!port->cap_phy) 901 return -EINVAL; 902 903 ret = tb_port_read(port, &val, TB_CFG_PORT, 904 port->cap_phy + LANE_ADP_CS_1, 1); 905 if (ret) 906 return ret; 907 908 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 909 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 910 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 911 } 912 913 /** 914 * tb_port_get_link_width() - Get current link width 915 * @port: Port to check (USB4 or CIO) 916 * 917 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) 918 * or negative errno in case of failure. 919 */ 920 int tb_port_get_link_width(struct tb_port *port) 921 { 922 u32 val; 923 int ret; 924 925 if (!port->cap_phy) 926 return -EINVAL; 927 928 ret = tb_port_read(port, &val, TB_CFG_PORT, 929 port->cap_phy + LANE_ADP_CS_1, 1); 930 if (ret) 931 return ret; 932 933 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 934 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 935 } 936 937 static bool tb_port_is_width_supported(struct tb_port *port, int width) 938 { 939 u32 phy, widths; 940 int ret; 941 942 if (!port->cap_phy) 943 return false; 944 945 ret = tb_port_read(port, &phy, TB_CFG_PORT, 946 port->cap_phy + LANE_ADP_CS_0, 1); 947 if (ret) 948 return false; 949 950 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 951 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 952 953 return !!(widths & width); 954 } 955 956 /** 957 * tb_port_set_link_width() - Set target link width of the lane adapter 958 * @port: Lane adapter 959 * @width: Target link width (%1 or %2) 960 * 961 * Sets the target link width of the lane adapter to @width. Does not 962 * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). 963 * 964 * Return: %0 in case of success and negative errno in case of error 965 */ 966 int tb_port_set_link_width(struct tb_port *port, unsigned int width) 967 { 968 u32 val; 969 int ret; 970 971 if (!port->cap_phy) 972 return -EINVAL; 973 974 ret = tb_port_read(port, &val, TB_CFG_PORT, 975 port->cap_phy + LANE_ADP_CS_1, 1); 976 if (ret) 977 return ret; 978 979 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 980 switch (width) { 981 case 1: 982 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 983 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 984 break; 985 case 2: 986 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 987 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 988 break; 989 default: 990 return -EINVAL; 991 } 992 993 return tb_port_write(port, &val, TB_CFG_PORT, 994 port->cap_phy + LANE_ADP_CS_1, 1); 995 } 996 997 /** 998 * tb_port_set_lane_bonding() - Enable/disable lane bonding 999 * @port: Lane adapter 1000 * @bonding: enable/disable bonding 1001 * 1002 * Enables or disables lane bonding. This should be called after target 1003 * link width has been set (tb_port_set_link_width()). Note in most 1004 * cases one should use tb_port_lane_bonding_enable() instead to enable 1005 * lane bonding. 1006 * 1007 * As a side effect sets @port->bonding accordingly (and does the same 1008 * for lane 1 too). 1009 * 1010 * Return: %0 in case of success and negative errno in case of error 1011 */ 1012 int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) 1013 { 1014 u32 val; 1015 int ret; 1016 1017 if (!port->cap_phy) 1018 return -EINVAL; 1019 1020 ret = tb_port_read(port, &val, TB_CFG_PORT, 1021 port->cap_phy + LANE_ADP_CS_1, 1); 1022 if (ret) 1023 return ret; 1024 1025 if (bonding) 1026 val |= LANE_ADP_CS_1_LB; 1027 else 1028 val &= ~LANE_ADP_CS_1_LB; 1029 1030 ret = tb_port_write(port, &val, TB_CFG_PORT, 1031 port->cap_phy + LANE_ADP_CS_1, 1); 1032 if (ret) 1033 return ret; 1034 1035 /* 1036 * When lane 0 bonding is set it will affect lane 1 too so 1037 * update both. 1038 */ 1039 port->bonded = bonding; 1040 port->dual_link_port->bonded = bonding; 1041 1042 return 0; 1043 } 1044 1045 /** 1046 * tb_port_lane_bonding_enable() - Enable bonding on port 1047 * @port: port to enable 1048 * 1049 * Enable bonding by setting the link width of the port and the other 1050 * port in case of dual link port. Does not wait for the link to 1051 * actually reach the bonded state so caller needs to call 1052 * tb_port_wait_for_link_width() before enabling any paths through the 1053 * link to make sure the link is in expected state. 1054 * 1055 * Return: %0 in case of success and negative errno in case of error 1056 */ 1057 int tb_port_lane_bonding_enable(struct tb_port *port) 1058 { 1059 int ret; 1060 1061 /* 1062 * Enable lane bonding for both links if not already enabled by 1063 * for example the boot firmware. 1064 */ 1065 ret = tb_port_get_link_width(port); 1066 if (ret == 1) { 1067 ret = tb_port_set_link_width(port, 2); 1068 if (ret) 1069 goto err_lane0; 1070 } 1071 1072 ret = tb_port_get_link_width(port->dual_link_port); 1073 if (ret == 1) { 1074 ret = tb_port_set_link_width(port->dual_link_port, 2); 1075 if (ret) 1076 goto err_lane0; 1077 } 1078 1079 ret = tb_port_set_lane_bonding(port, true); 1080 if (ret) 1081 goto err_lane1; 1082 1083 return 0; 1084 1085 err_lane1: 1086 tb_port_set_link_width(port->dual_link_port, 1); 1087 err_lane0: 1088 tb_port_set_link_width(port, 1); 1089 return ret; 1090 } 1091 1092 /** 1093 * tb_port_lane_bonding_disable() - Disable bonding on port 1094 * @port: port to disable 1095 * 1096 * Disable bonding by setting the link width of the port and the 1097 * other port in case of dual link port. 1098 */ 1099 void tb_port_lane_bonding_disable(struct tb_port *port) 1100 { 1101 tb_port_set_lane_bonding(port, false); 1102 tb_port_set_link_width(port->dual_link_port, 1); 1103 tb_port_set_link_width(port, 1); 1104 } 1105 1106 /** 1107 * tb_port_wait_for_link_width() - Wait until link reaches specific width 1108 * @port: Port to wait for 1109 * @width: Expected link width (%1 or %2) 1110 * @timeout_msec: Timeout in ms how long to wait 1111 * 1112 * Should be used after both ends of the link have been bonded (or 1113 * bonding has been disabled) to wait until the link actually reaches 1114 * the expected state. Returns %-ETIMEDOUT if the @width was not reached 1115 * within the given timeout, %0 if it did. 1116 */ 1117 int tb_port_wait_for_link_width(struct tb_port *port, int width, 1118 int timeout_msec) 1119 { 1120 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1121 int ret; 1122 1123 do { 1124 ret = tb_port_get_link_width(port); 1125 if (ret < 0) { 1126 /* 1127 * Sometimes we get port locked error when 1128 * polling the lanes so we can ignore it and 1129 * retry. 1130 */ 1131 if (ret != -EACCES) 1132 return ret; 1133 } else if (ret == width) { 1134 return 0; 1135 } 1136 1137 usleep_range(1000, 2000); 1138 } while (ktime_before(ktime_get(), timeout)); 1139 1140 return -ETIMEDOUT; 1141 } 1142 1143 static int tb_port_do_update_credits(struct tb_port *port) 1144 { 1145 u32 nfc_credits; 1146 int ret; 1147 1148 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); 1149 if (ret) 1150 return ret; 1151 1152 if (nfc_credits != port->config.nfc_credits) { 1153 u32 total; 1154 1155 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 1156 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 1157 1158 tb_port_dbg(port, "total credits changed %u -> %u\n", 1159 port->total_credits, total); 1160 1161 port->config.nfc_credits = nfc_credits; 1162 port->total_credits = total; 1163 } 1164 1165 return 0; 1166 } 1167 1168 /** 1169 * tb_port_update_credits() - Re-read port total credits 1170 * @port: Port to update 1171 * 1172 * After the link is bonded (or bonding was disabled) the port total 1173 * credits may change, so this function needs to be called to re-read 1174 * the credits. Updates also the second lane adapter. 1175 */ 1176 int tb_port_update_credits(struct tb_port *port) 1177 { 1178 int ret; 1179 1180 ret = tb_port_do_update_credits(port); 1181 if (ret) 1182 return ret; 1183 return tb_port_do_update_credits(port->dual_link_port); 1184 } 1185 1186 static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary) 1187 { 1188 u32 phy; 1189 int ret; 1190 1191 ret = tb_port_read(port, &phy, TB_CFG_PORT, 1192 port->cap_phy + LANE_ADP_CS_1, 1); 1193 if (ret) 1194 return ret; 1195 1196 if (secondary) 1197 phy |= LANE_ADP_CS_1_PMS; 1198 else 1199 phy &= ~LANE_ADP_CS_1_PMS; 1200 1201 return tb_port_write(port, &phy, TB_CFG_PORT, 1202 port->cap_phy + LANE_ADP_CS_1, 1); 1203 } 1204 1205 static int tb_port_pm_secondary_enable(struct tb_port *port) 1206 { 1207 return __tb_port_pm_secondary_set(port, true); 1208 } 1209 1210 static int tb_port_pm_secondary_disable(struct tb_port *port) 1211 { 1212 return __tb_port_pm_secondary_set(port, false); 1213 } 1214 1215 /* Called for USB4 or Titan Ridge routers only */ 1216 static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask) 1217 { 1218 u32 val, mask = 0; 1219 bool ret; 1220 1221 /* Don't enable CLx in case of two single-lane links */ 1222 if (!port->bonded && port->dual_link_port) 1223 return false; 1224 1225 /* Don't enable CLx in case of inter-domain link */ 1226 if (port->xdomain) 1227 return false; 1228 1229 if (tb_switch_is_usb4(port->sw)) { 1230 if (!usb4_port_clx_supported(port)) 1231 return false; 1232 } else if (!tb_lc_is_clx_supported(port)) { 1233 return false; 1234 } 1235 1236 if (clx_mask & TB_CL1) { 1237 /* CL0s and CL1 are enabled and supported together */ 1238 mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT; 1239 } 1240 if (clx_mask & TB_CL2) 1241 mask |= LANE_ADP_CS_0_CL2_SUPPORT; 1242 1243 ret = tb_port_read(port, &val, TB_CFG_PORT, 1244 port->cap_phy + LANE_ADP_CS_0, 1); 1245 if (ret) 1246 return false; 1247 1248 return !!(val & mask); 1249 } 1250 1251 static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable) 1252 { 1253 u32 phy, mask; 1254 int ret; 1255 1256 /* CL0s and CL1 are enabled and supported together */ 1257 if (clx == TB_CL1) 1258 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; 1259 else 1260 /* For now we support only CL0s and CL1. Not CL2 */ 1261 return -EOPNOTSUPP; 1262 1263 ret = tb_port_read(port, &phy, TB_CFG_PORT, 1264 port->cap_phy + LANE_ADP_CS_1, 1); 1265 if (ret) 1266 return ret; 1267 1268 if (enable) 1269 phy |= mask; 1270 else 1271 phy &= ~mask; 1272 1273 return tb_port_write(port, &phy, TB_CFG_PORT, 1274 port->cap_phy + LANE_ADP_CS_1, 1); 1275 } 1276 1277 static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx) 1278 { 1279 return __tb_port_clx_set(port, clx, false); 1280 } 1281 1282 static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx) 1283 { 1284 return __tb_port_clx_set(port, clx, true); 1285 } 1286 1287 /** 1288 * tb_port_is_clx_enabled() - Is given CL state enabled 1289 * @port: USB4 port to check 1290 * @clx_mask: Mask of CL states to check 1291 * 1292 * Returns true if any of the given CL states is enabled for @port. 1293 */ 1294 bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask) 1295 { 1296 u32 val, mask = 0; 1297 int ret; 1298 1299 if (!tb_port_clx_supported(port, clx_mask)) 1300 return false; 1301 1302 if (clx_mask & TB_CL1) 1303 mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; 1304 if (clx_mask & TB_CL2) 1305 mask |= LANE_ADP_CS_1_CL2_ENABLE; 1306 1307 ret = tb_port_read(port, &val, TB_CFG_PORT, 1308 port->cap_phy + LANE_ADP_CS_1, 1); 1309 if (ret) 1310 return false; 1311 1312 return !!(val & mask); 1313 } 1314 1315 static int tb_port_start_lane_initialization(struct tb_port *port) 1316 { 1317 int ret; 1318 1319 if (tb_switch_is_usb4(port->sw)) 1320 return 0; 1321 1322 ret = tb_lc_start_lane_initialization(port); 1323 return ret == -EINVAL ? 0 : ret; 1324 } 1325 1326 /* 1327 * Returns true if the port had something (router, XDomain) connected 1328 * before suspend. 1329 */ 1330 static bool tb_port_resume(struct tb_port *port) 1331 { 1332 bool has_remote = tb_port_has_remote(port); 1333 1334 if (port->usb4) { 1335 usb4_port_device_resume(port->usb4); 1336 } else if (!has_remote) { 1337 /* 1338 * For disconnected downstream lane adapters start lane 1339 * initialization now so we detect future connects. 1340 * 1341 * For XDomain start the lane initialzation now so the 1342 * link gets re-established. 1343 * 1344 * This is only needed for non-USB4 ports. 1345 */ 1346 if (!tb_is_upstream_port(port) || port->xdomain) 1347 tb_port_start_lane_initialization(port); 1348 } 1349 1350 return has_remote || port->xdomain; 1351 } 1352 1353 /** 1354 * tb_port_is_enabled() - Is the adapter port enabled 1355 * @port: Port to check 1356 */ 1357 bool tb_port_is_enabled(struct tb_port *port) 1358 { 1359 switch (port->config.type) { 1360 case TB_TYPE_PCIE_UP: 1361 case TB_TYPE_PCIE_DOWN: 1362 return tb_pci_port_is_enabled(port); 1363 1364 case TB_TYPE_DP_HDMI_IN: 1365 case TB_TYPE_DP_HDMI_OUT: 1366 return tb_dp_port_is_enabled(port); 1367 1368 case TB_TYPE_USB3_UP: 1369 case TB_TYPE_USB3_DOWN: 1370 return tb_usb3_port_is_enabled(port); 1371 1372 default: 1373 return false; 1374 } 1375 } 1376 1377 /** 1378 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1379 * @port: USB3 adapter port to check 1380 */ 1381 bool tb_usb3_port_is_enabled(struct tb_port *port) 1382 { 1383 u32 data; 1384 1385 if (tb_port_read(port, &data, TB_CFG_PORT, 1386 port->cap_adap + ADP_USB3_CS_0, 1)) 1387 return false; 1388 1389 return !!(data & ADP_USB3_CS_0_PE); 1390 } 1391 1392 /** 1393 * tb_usb3_port_enable() - Enable USB3 adapter port 1394 * @port: USB3 adapter port to enable 1395 * @enable: Enable/disable the USB3 adapter 1396 */ 1397 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1398 { 1399 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1400 : ADP_USB3_CS_0_V; 1401 1402 if (!port->cap_adap) 1403 return -ENXIO; 1404 return tb_port_write(port, &word, TB_CFG_PORT, 1405 port->cap_adap + ADP_USB3_CS_0, 1); 1406 } 1407 1408 /** 1409 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1410 * @port: PCIe port to check 1411 */ 1412 bool tb_pci_port_is_enabled(struct tb_port *port) 1413 { 1414 u32 data; 1415 1416 if (tb_port_read(port, &data, TB_CFG_PORT, 1417 port->cap_adap + ADP_PCIE_CS_0, 1)) 1418 return false; 1419 1420 return !!(data & ADP_PCIE_CS_0_PE); 1421 } 1422 1423 /** 1424 * tb_pci_port_enable() - Enable PCIe adapter port 1425 * @port: PCIe port to enable 1426 * @enable: Enable/disable the PCIe adapter 1427 */ 1428 int tb_pci_port_enable(struct tb_port *port, bool enable) 1429 { 1430 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1431 if (!port->cap_adap) 1432 return -ENXIO; 1433 return tb_port_write(port, &word, TB_CFG_PORT, 1434 port->cap_adap + ADP_PCIE_CS_0, 1); 1435 } 1436 1437 /** 1438 * tb_dp_port_hpd_is_active() - Is HPD already active 1439 * @port: DP out port to check 1440 * 1441 * Checks if the DP OUT adapter port has HDP bit already set. 1442 */ 1443 int tb_dp_port_hpd_is_active(struct tb_port *port) 1444 { 1445 u32 data; 1446 int ret; 1447 1448 ret = tb_port_read(port, &data, TB_CFG_PORT, 1449 port->cap_adap + ADP_DP_CS_2, 1); 1450 if (ret) 1451 return ret; 1452 1453 return !!(data & ADP_DP_CS_2_HDP); 1454 } 1455 1456 /** 1457 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1458 * @port: Port to clear HPD 1459 * 1460 * If the DP IN port has HDP set, this function can be used to clear it. 1461 */ 1462 int tb_dp_port_hpd_clear(struct tb_port *port) 1463 { 1464 u32 data; 1465 int ret; 1466 1467 ret = tb_port_read(port, &data, TB_CFG_PORT, 1468 port->cap_adap + ADP_DP_CS_3, 1); 1469 if (ret) 1470 return ret; 1471 1472 data |= ADP_DP_CS_3_HDPC; 1473 return tb_port_write(port, &data, TB_CFG_PORT, 1474 port->cap_adap + ADP_DP_CS_3, 1); 1475 } 1476 1477 /** 1478 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1479 * @port: DP IN/OUT port to set hops 1480 * @video: Video Hop ID 1481 * @aux_tx: AUX TX Hop ID 1482 * @aux_rx: AUX RX Hop ID 1483 * 1484 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 1485 * router DP adapters too but does not program the values as the fields 1486 * are read-only. 1487 */ 1488 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1489 unsigned int aux_tx, unsigned int aux_rx) 1490 { 1491 u32 data[2]; 1492 int ret; 1493 1494 if (tb_switch_is_usb4(port->sw)) 1495 return 0; 1496 1497 ret = tb_port_read(port, data, TB_CFG_PORT, 1498 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1499 if (ret) 1500 return ret; 1501 1502 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1503 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1504 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1505 1506 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1507 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1508 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1509 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1510 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1511 1512 return tb_port_write(port, data, TB_CFG_PORT, 1513 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1514 } 1515 1516 /** 1517 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1518 * @port: DP adapter port to check 1519 */ 1520 bool tb_dp_port_is_enabled(struct tb_port *port) 1521 { 1522 u32 data[2]; 1523 1524 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1525 ARRAY_SIZE(data))) 1526 return false; 1527 1528 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1529 } 1530 1531 /** 1532 * tb_dp_port_enable() - Enables/disables DP paths of a port 1533 * @port: DP IN/OUT port 1534 * @enable: Enable/disable DP path 1535 * 1536 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1537 * calling this function. 1538 */ 1539 int tb_dp_port_enable(struct tb_port *port, bool enable) 1540 { 1541 u32 data[2]; 1542 int ret; 1543 1544 ret = tb_port_read(port, data, TB_CFG_PORT, 1545 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1546 if (ret) 1547 return ret; 1548 1549 if (enable) 1550 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1551 else 1552 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1553 1554 return tb_port_write(port, data, TB_CFG_PORT, 1555 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1556 } 1557 1558 /* switch utility functions */ 1559 1560 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1561 { 1562 switch (sw->generation) { 1563 case 1: 1564 return "Thunderbolt 1"; 1565 case 2: 1566 return "Thunderbolt 2"; 1567 case 3: 1568 return "Thunderbolt 3"; 1569 case 4: 1570 return "USB4"; 1571 default: 1572 return "Unknown"; 1573 } 1574 } 1575 1576 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1577 { 1578 const struct tb_regs_switch_header *regs = &sw->config; 1579 1580 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1581 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1582 regs->revision, regs->thunderbolt_version); 1583 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1584 tb_dbg(tb, " Config:\n"); 1585 tb_dbg(tb, 1586 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1587 regs->upstream_port_number, regs->depth, 1588 (((u64) regs->route_hi) << 32) | regs->route_lo, 1589 regs->enabled, regs->plug_events_delay); 1590 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1591 regs->__unknown1, regs->__unknown4); 1592 } 1593 1594 /** 1595 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET 1596 * @sw: Switch to reset 1597 * 1598 * Return: Returns 0 on success or an error code on failure. 1599 */ 1600 int tb_switch_reset(struct tb_switch *sw) 1601 { 1602 struct tb_cfg_result res; 1603 1604 if (sw->generation > 1) 1605 return 0; 1606 1607 tb_sw_dbg(sw, "resetting switch\n"); 1608 1609 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1610 TB_CFG_SWITCH, 2, 2); 1611 if (res.err) 1612 return res.err; 1613 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); 1614 if (res.err > 0) 1615 return -EIO; 1616 return res.err; 1617 } 1618 1619 /** 1620 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset 1621 * @sw: Router to read the offset value from 1622 * @offset: Offset in the router config space to read from 1623 * @bit: Bit mask in the offset to wait for 1624 * @value: Value of the bits to wait for 1625 * @timeout_msec: Timeout in ms how long to wait 1626 * 1627 * Wait till the specified bits in specified offset reach specified value. 1628 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached 1629 * within the given timeout or a negative errno in case of failure. 1630 */ 1631 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, 1632 u32 value, int timeout_msec) 1633 { 1634 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1635 1636 do { 1637 u32 val; 1638 int ret; 1639 1640 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 1641 if (ret) 1642 return ret; 1643 1644 if ((val & bit) == value) 1645 return 0; 1646 1647 usleep_range(50, 100); 1648 } while (ktime_before(ktime_get(), timeout)); 1649 1650 return -ETIMEDOUT; 1651 } 1652 1653 /* 1654 * tb_plug_events_active() - enable/disable plug events on a switch 1655 * 1656 * Also configures a sane plug_events_delay of 255ms. 1657 * 1658 * Return: Returns 0 on success or an error code on failure. 1659 */ 1660 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1661 { 1662 u32 data; 1663 int res; 1664 1665 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1666 return 0; 1667 1668 sw->config.plug_events_delay = 0xff; 1669 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1670 if (res) 1671 return res; 1672 1673 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1674 if (res) 1675 return res; 1676 1677 if (active) { 1678 data = data & 0xFFFFFF83; 1679 switch (sw->config.device_id) { 1680 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1681 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1682 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1683 break; 1684 default: 1685 /* 1686 * Skip Alpine Ridge, it needs to have vendor 1687 * specific USB hotplug event enabled for the 1688 * internal xHCI to work. 1689 */ 1690 if (!tb_switch_is_alpine_ridge(sw)) 1691 data |= TB_PLUG_EVENTS_USB_DISABLE; 1692 } 1693 } else { 1694 data = data | 0x7c; 1695 } 1696 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1697 sw->cap_plug_events + 1, 1); 1698 } 1699 1700 static ssize_t authorized_show(struct device *dev, 1701 struct device_attribute *attr, 1702 char *buf) 1703 { 1704 struct tb_switch *sw = tb_to_switch(dev); 1705 1706 return sysfs_emit(buf, "%u\n", sw->authorized); 1707 } 1708 1709 static int disapprove_switch(struct device *dev, void *not_used) 1710 { 1711 char *envp[] = { "AUTHORIZED=0", NULL }; 1712 struct tb_switch *sw; 1713 1714 sw = tb_to_switch(dev); 1715 if (sw && sw->authorized) { 1716 int ret; 1717 1718 /* First children */ 1719 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1720 if (ret) 1721 return ret; 1722 1723 ret = tb_domain_disapprove_switch(sw->tb, sw); 1724 if (ret) 1725 return ret; 1726 1727 sw->authorized = 0; 1728 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1729 } 1730 1731 return 0; 1732 } 1733 1734 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1735 { 1736 char envp_string[13]; 1737 int ret = -EINVAL; 1738 char *envp[] = { envp_string, NULL }; 1739 1740 if (!mutex_trylock(&sw->tb->lock)) 1741 return restart_syscall(); 1742 1743 if (!!sw->authorized == !!val) 1744 goto unlock; 1745 1746 switch (val) { 1747 /* Disapprove switch */ 1748 case 0: 1749 if (tb_route(sw)) { 1750 ret = disapprove_switch(&sw->dev, NULL); 1751 goto unlock; 1752 } 1753 break; 1754 1755 /* Approve switch */ 1756 case 1: 1757 if (sw->key) 1758 ret = tb_domain_approve_switch_key(sw->tb, sw); 1759 else 1760 ret = tb_domain_approve_switch(sw->tb, sw); 1761 break; 1762 1763 /* Challenge switch */ 1764 case 2: 1765 if (sw->key) 1766 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1767 break; 1768 1769 default: 1770 break; 1771 } 1772 1773 if (!ret) { 1774 sw->authorized = val; 1775 /* 1776 * Notify status change to the userspace, informing the new 1777 * value of /sys/bus/thunderbolt/devices/.../authorized. 1778 */ 1779 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); 1780 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1781 } 1782 1783 unlock: 1784 mutex_unlock(&sw->tb->lock); 1785 return ret; 1786 } 1787 1788 static ssize_t authorized_store(struct device *dev, 1789 struct device_attribute *attr, 1790 const char *buf, size_t count) 1791 { 1792 struct tb_switch *sw = tb_to_switch(dev); 1793 unsigned int val; 1794 ssize_t ret; 1795 1796 ret = kstrtouint(buf, 0, &val); 1797 if (ret) 1798 return ret; 1799 if (val > 2) 1800 return -EINVAL; 1801 1802 pm_runtime_get_sync(&sw->dev); 1803 ret = tb_switch_set_authorized(sw, val); 1804 pm_runtime_mark_last_busy(&sw->dev); 1805 pm_runtime_put_autosuspend(&sw->dev); 1806 1807 return ret ? ret : count; 1808 } 1809 static DEVICE_ATTR_RW(authorized); 1810 1811 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1812 char *buf) 1813 { 1814 struct tb_switch *sw = tb_to_switch(dev); 1815 1816 return sysfs_emit(buf, "%u\n", sw->boot); 1817 } 1818 static DEVICE_ATTR_RO(boot); 1819 1820 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1821 char *buf) 1822 { 1823 struct tb_switch *sw = tb_to_switch(dev); 1824 1825 return sysfs_emit(buf, "%#x\n", sw->device); 1826 } 1827 static DEVICE_ATTR_RO(device); 1828 1829 static ssize_t 1830 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1831 { 1832 struct tb_switch *sw = tb_to_switch(dev); 1833 1834 return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); 1835 } 1836 static DEVICE_ATTR_RO(device_name); 1837 1838 static ssize_t 1839 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1840 { 1841 struct tb_switch *sw = tb_to_switch(dev); 1842 1843 return sysfs_emit(buf, "%u\n", sw->generation); 1844 } 1845 static DEVICE_ATTR_RO(generation); 1846 1847 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1848 char *buf) 1849 { 1850 struct tb_switch *sw = tb_to_switch(dev); 1851 ssize_t ret; 1852 1853 if (!mutex_trylock(&sw->tb->lock)) 1854 return restart_syscall(); 1855 1856 if (sw->key) 1857 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1858 else 1859 ret = sysfs_emit(buf, "\n"); 1860 1861 mutex_unlock(&sw->tb->lock); 1862 return ret; 1863 } 1864 1865 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1866 const char *buf, size_t count) 1867 { 1868 struct tb_switch *sw = tb_to_switch(dev); 1869 u8 key[TB_SWITCH_KEY_SIZE]; 1870 ssize_t ret = count; 1871 bool clear = false; 1872 1873 if (!strcmp(buf, "\n")) 1874 clear = true; 1875 else if (hex2bin(key, buf, sizeof(key))) 1876 return -EINVAL; 1877 1878 if (!mutex_trylock(&sw->tb->lock)) 1879 return restart_syscall(); 1880 1881 if (sw->authorized) { 1882 ret = -EBUSY; 1883 } else { 1884 kfree(sw->key); 1885 if (clear) { 1886 sw->key = NULL; 1887 } else { 1888 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1889 if (!sw->key) 1890 ret = -ENOMEM; 1891 } 1892 } 1893 1894 mutex_unlock(&sw->tb->lock); 1895 return ret; 1896 } 1897 static DEVICE_ATTR(key, 0600, key_show, key_store); 1898 1899 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1900 char *buf) 1901 { 1902 struct tb_switch *sw = tb_to_switch(dev); 1903 1904 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); 1905 } 1906 1907 /* 1908 * Currently all lanes must run at the same speed but we expose here 1909 * both directions to allow possible asymmetric links in the future. 1910 */ 1911 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1912 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1913 1914 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1915 char *buf) 1916 { 1917 struct tb_switch *sw = tb_to_switch(dev); 1918 1919 return sysfs_emit(buf, "%u\n", sw->link_width); 1920 } 1921 1922 /* 1923 * Currently link has same amount of lanes both directions (1 or 2) but 1924 * expose them separately to allow possible asymmetric links in the future. 1925 */ 1926 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1927 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1928 1929 static ssize_t nvm_authenticate_show(struct device *dev, 1930 struct device_attribute *attr, char *buf) 1931 { 1932 struct tb_switch *sw = tb_to_switch(dev); 1933 u32 status; 1934 1935 nvm_get_auth_status(sw, &status); 1936 return sysfs_emit(buf, "%#x\n", status); 1937 } 1938 1939 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 1940 bool disconnect) 1941 { 1942 struct tb_switch *sw = tb_to_switch(dev); 1943 int val, ret; 1944 1945 pm_runtime_get_sync(&sw->dev); 1946 1947 if (!mutex_trylock(&sw->tb->lock)) { 1948 ret = restart_syscall(); 1949 goto exit_rpm; 1950 } 1951 1952 if (sw->no_nvm_upgrade) { 1953 ret = -EOPNOTSUPP; 1954 goto exit_unlock; 1955 } 1956 1957 /* If NVMem devices are not yet added */ 1958 if (!sw->nvm) { 1959 ret = -EAGAIN; 1960 goto exit_unlock; 1961 } 1962 1963 ret = kstrtoint(buf, 10, &val); 1964 if (ret) 1965 goto exit_unlock; 1966 1967 /* Always clear the authentication status */ 1968 nvm_clear_auth_status(sw); 1969 1970 if (val > 0) { 1971 if (val == AUTHENTICATE_ONLY) { 1972 if (disconnect) 1973 ret = -EINVAL; 1974 else 1975 ret = nvm_authenticate(sw, true); 1976 } else { 1977 if (!sw->nvm->flushed) { 1978 if (!sw->nvm->buf) { 1979 ret = -EINVAL; 1980 goto exit_unlock; 1981 } 1982 1983 ret = nvm_validate_and_write(sw); 1984 if (ret || val == WRITE_ONLY) 1985 goto exit_unlock; 1986 } 1987 if (val == WRITE_AND_AUTHENTICATE) { 1988 if (disconnect) 1989 ret = tb_lc_force_power(sw); 1990 else 1991 ret = nvm_authenticate(sw, false); 1992 } 1993 } 1994 } 1995 1996 exit_unlock: 1997 mutex_unlock(&sw->tb->lock); 1998 exit_rpm: 1999 pm_runtime_mark_last_busy(&sw->dev); 2000 pm_runtime_put_autosuspend(&sw->dev); 2001 2002 return ret; 2003 } 2004 2005 static ssize_t nvm_authenticate_store(struct device *dev, 2006 struct device_attribute *attr, const char *buf, size_t count) 2007 { 2008 int ret = nvm_authenticate_sysfs(dev, buf, false); 2009 if (ret) 2010 return ret; 2011 return count; 2012 } 2013 static DEVICE_ATTR_RW(nvm_authenticate); 2014 2015 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 2016 struct device_attribute *attr, char *buf) 2017 { 2018 return nvm_authenticate_show(dev, attr, buf); 2019 } 2020 2021 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 2022 struct device_attribute *attr, const char *buf, size_t count) 2023 { 2024 int ret; 2025 2026 ret = nvm_authenticate_sysfs(dev, buf, true); 2027 return ret ? ret : count; 2028 } 2029 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 2030 2031 static ssize_t nvm_version_show(struct device *dev, 2032 struct device_attribute *attr, char *buf) 2033 { 2034 struct tb_switch *sw = tb_to_switch(dev); 2035 int ret; 2036 2037 if (!mutex_trylock(&sw->tb->lock)) 2038 return restart_syscall(); 2039 2040 if (sw->safe_mode) 2041 ret = -ENODATA; 2042 else if (!sw->nvm) 2043 ret = -EAGAIN; 2044 else 2045 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 2046 2047 mutex_unlock(&sw->tb->lock); 2048 2049 return ret; 2050 } 2051 static DEVICE_ATTR_RO(nvm_version); 2052 2053 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 2054 char *buf) 2055 { 2056 struct tb_switch *sw = tb_to_switch(dev); 2057 2058 return sysfs_emit(buf, "%#x\n", sw->vendor); 2059 } 2060 static DEVICE_ATTR_RO(vendor); 2061 2062 static ssize_t 2063 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 2064 { 2065 struct tb_switch *sw = tb_to_switch(dev); 2066 2067 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); 2068 } 2069 static DEVICE_ATTR_RO(vendor_name); 2070 2071 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 2072 char *buf) 2073 { 2074 struct tb_switch *sw = tb_to_switch(dev); 2075 2076 return sysfs_emit(buf, "%pUb\n", sw->uuid); 2077 } 2078 static DEVICE_ATTR_RO(unique_id); 2079 2080 static struct attribute *switch_attrs[] = { 2081 &dev_attr_authorized.attr, 2082 &dev_attr_boot.attr, 2083 &dev_attr_device.attr, 2084 &dev_attr_device_name.attr, 2085 &dev_attr_generation.attr, 2086 &dev_attr_key.attr, 2087 &dev_attr_nvm_authenticate.attr, 2088 &dev_attr_nvm_authenticate_on_disconnect.attr, 2089 &dev_attr_nvm_version.attr, 2090 &dev_attr_rx_speed.attr, 2091 &dev_attr_rx_lanes.attr, 2092 &dev_attr_tx_speed.attr, 2093 &dev_attr_tx_lanes.attr, 2094 &dev_attr_vendor.attr, 2095 &dev_attr_vendor_name.attr, 2096 &dev_attr_unique_id.attr, 2097 NULL, 2098 }; 2099 2100 static umode_t switch_attr_is_visible(struct kobject *kobj, 2101 struct attribute *attr, int n) 2102 { 2103 struct device *dev = kobj_to_dev(kobj); 2104 struct tb_switch *sw = tb_to_switch(dev); 2105 2106 if (attr == &dev_attr_authorized.attr) { 2107 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 2108 sw->tb->security_level == TB_SECURITY_DPONLY) 2109 return 0; 2110 } else if (attr == &dev_attr_device.attr) { 2111 if (!sw->device) 2112 return 0; 2113 } else if (attr == &dev_attr_device_name.attr) { 2114 if (!sw->device_name) 2115 return 0; 2116 } else if (attr == &dev_attr_vendor.attr) { 2117 if (!sw->vendor) 2118 return 0; 2119 } else if (attr == &dev_attr_vendor_name.attr) { 2120 if (!sw->vendor_name) 2121 return 0; 2122 } else if (attr == &dev_attr_key.attr) { 2123 if (tb_route(sw) && 2124 sw->tb->security_level == TB_SECURITY_SECURE && 2125 sw->security_level == TB_SECURITY_SECURE) 2126 return attr->mode; 2127 return 0; 2128 } else if (attr == &dev_attr_rx_speed.attr || 2129 attr == &dev_attr_rx_lanes.attr || 2130 attr == &dev_attr_tx_speed.attr || 2131 attr == &dev_attr_tx_lanes.attr) { 2132 if (tb_route(sw)) 2133 return attr->mode; 2134 return 0; 2135 } else if (attr == &dev_attr_nvm_authenticate.attr) { 2136 if (nvm_upgradeable(sw)) 2137 return attr->mode; 2138 return 0; 2139 } else if (attr == &dev_attr_nvm_version.attr) { 2140 if (nvm_readable(sw)) 2141 return attr->mode; 2142 return 0; 2143 } else if (attr == &dev_attr_boot.attr) { 2144 if (tb_route(sw)) 2145 return attr->mode; 2146 return 0; 2147 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 2148 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 2149 return attr->mode; 2150 return 0; 2151 } 2152 2153 return sw->safe_mode ? 0 : attr->mode; 2154 } 2155 2156 static const struct attribute_group switch_group = { 2157 .is_visible = switch_attr_is_visible, 2158 .attrs = switch_attrs, 2159 }; 2160 2161 static const struct attribute_group *switch_groups[] = { 2162 &switch_group, 2163 NULL, 2164 }; 2165 2166 static void tb_switch_release(struct device *dev) 2167 { 2168 struct tb_switch *sw = tb_to_switch(dev); 2169 struct tb_port *port; 2170 2171 dma_port_free(sw->dma_port); 2172 2173 tb_switch_for_each_port(sw, port) { 2174 ida_destroy(&port->in_hopids); 2175 ida_destroy(&port->out_hopids); 2176 } 2177 2178 kfree(sw->uuid); 2179 kfree(sw->device_name); 2180 kfree(sw->vendor_name); 2181 kfree(sw->ports); 2182 kfree(sw->drom); 2183 kfree(sw->key); 2184 kfree(sw); 2185 } 2186 2187 static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) 2188 { 2189 const struct tb_switch *sw = tb_to_switch(dev); 2190 const char *type; 2191 2192 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) { 2193 if (add_uevent_var(env, "USB4_VERSION=1.0")) 2194 return -ENOMEM; 2195 } 2196 2197 if (!tb_route(sw)) { 2198 type = "host"; 2199 } else { 2200 const struct tb_port *port; 2201 bool hub = false; 2202 2203 /* Device is hub if it has any downstream ports */ 2204 tb_switch_for_each_port(sw, port) { 2205 if (!port->disabled && !tb_is_upstream_port(port) && 2206 tb_port_is_null(port)) { 2207 hub = true; 2208 break; 2209 } 2210 } 2211 2212 type = hub ? "hub" : "device"; 2213 } 2214 2215 if (add_uevent_var(env, "USB4_TYPE=%s", type)) 2216 return -ENOMEM; 2217 return 0; 2218 } 2219 2220 /* 2221 * Currently only need to provide the callbacks. Everything else is handled 2222 * in the connection manager. 2223 */ 2224 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 2225 { 2226 struct tb_switch *sw = tb_to_switch(dev); 2227 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2228 2229 if (cm_ops->runtime_suspend_switch) 2230 return cm_ops->runtime_suspend_switch(sw); 2231 2232 return 0; 2233 } 2234 2235 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 2236 { 2237 struct tb_switch *sw = tb_to_switch(dev); 2238 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2239 2240 if (cm_ops->runtime_resume_switch) 2241 return cm_ops->runtime_resume_switch(sw); 2242 return 0; 2243 } 2244 2245 static const struct dev_pm_ops tb_switch_pm_ops = { 2246 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 2247 NULL) 2248 }; 2249 2250 struct device_type tb_switch_type = { 2251 .name = "thunderbolt_device", 2252 .release = tb_switch_release, 2253 .uevent = tb_switch_uevent, 2254 .pm = &tb_switch_pm_ops, 2255 }; 2256 2257 static int tb_switch_get_generation(struct tb_switch *sw) 2258 { 2259 switch (sw->config.device_id) { 2260 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 2261 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 2262 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 2263 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 2264 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 2265 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 2266 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 2267 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 2268 return 1; 2269 2270 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 2271 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 2272 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 2273 return 2; 2274 2275 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 2276 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 2277 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 2278 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 2279 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 2280 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 2281 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 2282 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 2283 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2284 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2285 return 3; 2286 2287 default: 2288 if (tb_switch_is_usb4(sw)) 2289 return 4; 2290 2291 /* 2292 * For unknown switches assume generation to be 1 to be 2293 * on the safe side. 2294 */ 2295 tb_sw_warn(sw, "unsupported switch device id %#x\n", 2296 sw->config.device_id); 2297 return 1; 2298 } 2299 } 2300 2301 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 2302 { 2303 int max_depth; 2304 2305 if (tb_switch_is_usb4(sw) || 2306 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 2307 max_depth = USB4_SWITCH_MAX_DEPTH; 2308 else 2309 max_depth = TB_SWITCH_MAX_DEPTH; 2310 2311 return depth > max_depth; 2312 } 2313 2314 /** 2315 * tb_switch_alloc() - allocate a switch 2316 * @tb: Pointer to the owning domain 2317 * @parent: Parent device for this switch 2318 * @route: Route string for this switch 2319 * 2320 * Allocates and initializes a switch. Will not upload configuration to 2321 * the switch. For that you need to call tb_switch_configure() 2322 * separately. The returned switch should be released by calling 2323 * tb_switch_put(). 2324 * 2325 * Return: Pointer to the allocated switch or ERR_PTR() in case of 2326 * failure. 2327 */ 2328 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 2329 u64 route) 2330 { 2331 struct tb_switch *sw; 2332 int upstream_port; 2333 int i, ret, depth; 2334 2335 /* Unlock the downstream port so we can access the switch below */ 2336 if (route) { 2337 struct tb_switch *parent_sw = tb_to_switch(parent); 2338 struct tb_port *down; 2339 2340 down = tb_port_at(route, parent_sw); 2341 tb_port_unlock(down); 2342 } 2343 2344 depth = tb_route_length(route); 2345 2346 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 2347 if (upstream_port < 0) 2348 return ERR_PTR(upstream_port); 2349 2350 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2351 if (!sw) 2352 return ERR_PTR(-ENOMEM); 2353 2354 sw->tb = tb; 2355 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 2356 if (ret) 2357 goto err_free_sw_ports; 2358 2359 sw->generation = tb_switch_get_generation(sw); 2360 2361 tb_dbg(tb, "current switch config:\n"); 2362 tb_dump_switch(tb, sw); 2363 2364 /* configure switch */ 2365 sw->config.upstream_port_number = upstream_port; 2366 sw->config.depth = depth; 2367 sw->config.route_hi = upper_32_bits(route); 2368 sw->config.route_lo = lower_32_bits(route); 2369 sw->config.enabled = 0; 2370 2371 /* Make sure we do not exceed maximum topology limit */ 2372 if (tb_switch_exceeds_max_depth(sw, depth)) { 2373 ret = -EADDRNOTAVAIL; 2374 goto err_free_sw_ports; 2375 } 2376 2377 /* initialize ports */ 2378 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 2379 GFP_KERNEL); 2380 if (!sw->ports) { 2381 ret = -ENOMEM; 2382 goto err_free_sw_ports; 2383 } 2384 2385 for (i = 0; i <= sw->config.max_port_number; i++) { 2386 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2387 sw->ports[i].sw = sw; 2388 sw->ports[i].port = i; 2389 2390 /* Control port does not need HopID allocation */ 2391 if (i) { 2392 ida_init(&sw->ports[i].in_hopids); 2393 ida_init(&sw->ports[i].out_hopids); 2394 } 2395 } 2396 2397 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2398 if (ret > 0) 2399 sw->cap_plug_events = ret; 2400 2401 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); 2402 if (ret > 0) 2403 sw->cap_vsec_tmu = ret; 2404 2405 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2406 if (ret > 0) 2407 sw->cap_lc = ret; 2408 2409 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); 2410 if (ret > 0) 2411 sw->cap_lp = ret; 2412 2413 /* Root switch is always authorized */ 2414 if (!route) 2415 sw->authorized = true; 2416 2417 device_initialize(&sw->dev); 2418 sw->dev.parent = parent; 2419 sw->dev.bus = &tb_bus_type; 2420 sw->dev.type = &tb_switch_type; 2421 sw->dev.groups = switch_groups; 2422 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2423 2424 return sw; 2425 2426 err_free_sw_ports: 2427 kfree(sw->ports); 2428 kfree(sw); 2429 2430 return ERR_PTR(ret); 2431 } 2432 2433 /** 2434 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2435 * @tb: Pointer to the owning domain 2436 * @parent: Parent device for this switch 2437 * @route: Route string for this switch 2438 * 2439 * This creates a switch in safe mode. This means the switch pretty much 2440 * lacks all capabilities except DMA configuration port before it is 2441 * flashed with a valid NVM firmware. 2442 * 2443 * The returned switch must be released by calling tb_switch_put(). 2444 * 2445 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 2446 */ 2447 struct tb_switch * 2448 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2449 { 2450 struct tb_switch *sw; 2451 2452 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2453 if (!sw) 2454 return ERR_PTR(-ENOMEM); 2455 2456 sw->tb = tb; 2457 sw->config.depth = tb_route_length(route); 2458 sw->config.route_hi = upper_32_bits(route); 2459 sw->config.route_lo = lower_32_bits(route); 2460 sw->safe_mode = true; 2461 2462 device_initialize(&sw->dev); 2463 sw->dev.parent = parent; 2464 sw->dev.bus = &tb_bus_type; 2465 sw->dev.type = &tb_switch_type; 2466 sw->dev.groups = switch_groups; 2467 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2468 2469 return sw; 2470 } 2471 2472 /** 2473 * tb_switch_configure() - Uploads configuration to the switch 2474 * @sw: Switch to configure 2475 * 2476 * Call this function before the switch is added to the system. It will 2477 * upload configuration to the switch and makes it available for the 2478 * connection manager to use. Can be called to the switch again after 2479 * resume from low power states to re-initialize it. 2480 * 2481 * Return: %0 in case of success and negative errno in case of failure 2482 */ 2483 int tb_switch_configure(struct tb_switch *sw) 2484 { 2485 struct tb *tb = sw->tb; 2486 u64 route; 2487 int ret; 2488 2489 route = tb_route(sw); 2490 2491 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2492 sw->config.enabled ? "restoring" : "initializing", route, 2493 tb_route_length(route), sw->config.upstream_port_number); 2494 2495 sw->config.enabled = 1; 2496 2497 if (tb_switch_is_usb4(sw)) { 2498 /* 2499 * For USB4 devices, we need to program the CM version 2500 * accordingly so that it knows to expose all the 2501 * additional capabilities. 2502 */ 2503 sw->config.cmuv = USB4_VERSION_1_0; 2504 sw->config.plug_events_delay = 0xa; 2505 2506 /* Enumerate the switch */ 2507 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2508 ROUTER_CS_1, 4); 2509 if (ret) 2510 return ret; 2511 2512 ret = usb4_switch_setup(sw); 2513 } else { 2514 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2515 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2516 sw->config.vendor_id); 2517 2518 if (!sw->cap_plug_events) { 2519 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2520 return -ENODEV; 2521 } 2522 2523 /* Enumerate the switch */ 2524 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2525 ROUTER_CS_1, 3); 2526 } 2527 if (ret) 2528 return ret; 2529 2530 return tb_plug_events_active(sw, true); 2531 } 2532 2533 static int tb_switch_set_uuid(struct tb_switch *sw) 2534 { 2535 bool uid = false; 2536 u32 uuid[4]; 2537 int ret; 2538 2539 if (sw->uuid) 2540 return 0; 2541 2542 if (tb_switch_is_usb4(sw)) { 2543 ret = usb4_switch_read_uid(sw, &sw->uid); 2544 if (ret) 2545 return ret; 2546 uid = true; 2547 } else { 2548 /* 2549 * The newer controllers include fused UUID as part of 2550 * link controller specific registers 2551 */ 2552 ret = tb_lc_read_uuid(sw, uuid); 2553 if (ret) { 2554 if (ret != -EINVAL) 2555 return ret; 2556 uid = true; 2557 } 2558 } 2559 2560 if (uid) { 2561 /* 2562 * ICM generates UUID based on UID and fills the upper 2563 * two words with ones. This is not strictly following 2564 * UUID format but we want to be compatible with it so 2565 * we do the same here. 2566 */ 2567 uuid[0] = sw->uid & 0xffffffff; 2568 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2569 uuid[2] = 0xffffffff; 2570 uuid[3] = 0xffffffff; 2571 } 2572 2573 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2574 if (!sw->uuid) 2575 return -ENOMEM; 2576 return 0; 2577 } 2578 2579 static int tb_switch_add_dma_port(struct tb_switch *sw) 2580 { 2581 u32 status; 2582 int ret; 2583 2584 switch (sw->generation) { 2585 case 2: 2586 /* Only root switch can be upgraded */ 2587 if (tb_route(sw)) 2588 return 0; 2589 2590 fallthrough; 2591 case 3: 2592 case 4: 2593 ret = tb_switch_set_uuid(sw); 2594 if (ret) 2595 return ret; 2596 break; 2597 2598 default: 2599 /* 2600 * DMA port is the only thing available when the switch 2601 * is in safe mode. 2602 */ 2603 if (!sw->safe_mode) 2604 return 0; 2605 break; 2606 } 2607 2608 if (sw->no_nvm_upgrade) 2609 return 0; 2610 2611 if (tb_switch_is_usb4(sw)) { 2612 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2613 if (ret) 2614 return ret; 2615 2616 if (status) { 2617 tb_sw_info(sw, "switch flash authentication failed\n"); 2618 nvm_set_auth_status(sw, status); 2619 } 2620 2621 return 0; 2622 } 2623 2624 /* Root switch DMA port requires running firmware */ 2625 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2626 return 0; 2627 2628 sw->dma_port = dma_port_alloc(sw); 2629 if (!sw->dma_port) 2630 return 0; 2631 2632 /* 2633 * If there is status already set then authentication failed 2634 * when the dma_port_flash_update_auth() returned. Power cycling 2635 * is not needed (it was done already) so only thing we do here 2636 * is to unblock runtime PM of the root port. 2637 */ 2638 nvm_get_auth_status(sw, &status); 2639 if (status) { 2640 if (!tb_route(sw)) 2641 nvm_authenticate_complete_dma_port(sw); 2642 return 0; 2643 } 2644 2645 /* 2646 * Check status of the previous flash authentication. If there 2647 * is one we need to power cycle the switch in any case to make 2648 * it functional again. 2649 */ 2650 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2651 if (ret <= 0) 2652 return ret; 2653 2654 /* Now we can allow root port to suspend again */ 2655 if (!tb_route(sw)) 2656 nvm_authenticate_complete_dma_port(sw); 2657 2658 if (status) { 2659 tb_sw_info(sw, "switch flash authentication failed\n"); 2660 nvm_set_auth_status(sw, status); 2661 } 2662 2663 tb_sw_info(sw, "power cycling the switch now\n"); 2664 dma_port_power_cycle(sw->dma_port); 2665 2666 /* 2667 * We return error here which causes the switch adding failure. 2668 * It should appear back after power cycle is complete. 2669 */ 2670 return -ESHUTDOWN; 2671 } 2672 2673 static void tb_switch_default_link_ports(struct tb_switch *sw) 2674 { 2675 int i; 2676 2677 for (i = 1; i <= sw->config.max_port_number; i++) { 2678 struct tb_port *port = &sw->ports[i]; 2679 struct tb_port *subordinate; 2680 2681 if (!tb_port_is_null(port)) 2682 continue; 2683 2684 /* Check for the subordinate port */ 2685 if (i == sw->config.max_port_number || 2686 !tb_port_is_null(&sw->ports[i + 1])) 2687 continue; 2688 2689 /* Link them if not already done so (by DROM) */ 2690 subordinate = &sw->ports[i + 1]; 2691 if (!port->dual_link_port && !subordinate->dual_link_port) { 2692 port->link_nr = 0; 2693 port->dual_link_port = subordinate; 2694 subordinate->link_nr = 1; 2695 subordinate->dual_link_port = port; 2696 2697 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2698 port->port, subordinate->port); 2699 } 2700 } 2701 } 2702 2703 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2704 { 2705 const struct tb_port *up = tb_upstream_port(sw); 2706 2707 if (!up->dual_link_port || !up->dual_link_port->remote) 2708 return false; 2709 2710 if (tb_switch_is_usb4(sw)) 2711 return usb4_switch_lane_bonding_possible(sw); 2712 return tb_lc_lane_bonding_possible(sw); 2713 } 2714 2715 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2716 { 2717 struct tb_port *up; 2718 bool change = false; 2719 int ret; 2720 2721 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2722 return 0; 2723 2724 up = tb_upstream_port(sw); 2725 2726 ret = tb_port_get_link_speed(up); 2727 if (ret < 0) 2728 return ret; 2729 if (sw->link_speed != ret) 2730 change = true; 2731 sw->link_speed = ret; 2732 2733 ret = tb_port_get_link_width(up); 2734 if (ret < 0) 2735 return ret; 2736 if (sw->link_width != ret) 2737 change = true; 2738 sw->link_width = ret; 2739 2740 /* Notify userspace that there is possible link attribute change */ 2741 if (device_is_registered(&sw->dev) && change) 2742 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2743 2744 return 0; 2745 } 2746 2747 /** 2748 * tb_switch_lane_bonding_enable() - Enable lane bonding 2749 * @sw: Switch to enable lane bonding 2750 * 2751 * Connection manager can call this function to enable lane bonding of a 2752 * switch. If conditions are correct and both switches support the feature, 2753 * lanes are bonded. It is safe to call this to any switch. 2754 */ 2755 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2756 { 2757 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2758 struct tb_port *up, *down; 2759 u64 route = tb_route(sw); 2760 int ret; 2761 2762 if (!route) 2763 return 0; 2764 2765 if (!tb_switch_lane_bonding_possible(sw)) 2766 return 0; 2767 2768 up = tb_upstream_port(sw); 2769 down = tb_port_at(route, parent); 2770 2771 if (!tb_port_is_width_supported(up, 2) || 2772 !tb_port_is_width_supported(down, 2)) 2773 return 0; 2774 2775 ret = tb_port_lane_bonding_enable(up); 2776 if (ret) { 2777 tb_port_warn(up, "failed to enable lane bonding\n"); 2778 return ret; 2779 } 2780 2781 ret = tb_port_lane_bonding_enable(down); 2782 if (ret) { 2783 tb_port_warn(down, "failed to enable lane bonding\n"); 2784 tb_port_lane_bonding_disable(up); 2785 return ret; 2786 } 2787 2788 ret = tb_port_wait_for_link_width(down, 2, 100); 2789 if (ret) { 2790 tb_port_warn(down, "timeout enabling lane bonding\n"); 2791 return ret; 2792 } 2793 2794 tb_port_update_credits(down); 2795 tb_port_update_credits(up); 2796 tb_switch_update_link_attributes(sw); 2797 2798 tb_sw_dbg(sw, "lane bonding enabled\n"); 2799 return ret; 2800 } 2801 2802 /** 2803 * tb_switch_lane_bonding_disable() - Disable lane bonding 2804 * @sw: Switch whose lane bonding to disable 2805 * 2806 * Disables lane bonding between @sw and parent. This can be called even 2807 * if lanes were not bonded originally. 2808 */ 2809 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2810 { 2811 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2812 struct tb_port *up, *down; 2813 2814 if (!tb_route(sw)) 2815 return; 2816 2817 up = tb_upstream_port(sw); 2818 if (!up->bonded) 2819 return; 2820 2821 down = tb_port_at(tb_route(sw), parent); 2822 2823 tb_port_lane_bonding_disable(up); 2824 tb_port_lane_bonding_disable(down); 2825 2826 /* 2827 * It is fine if we get other errors as the router might have 2828 * been unplugged. 2829 */ 2830 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT) 2831 tb_sw_warn(sw, "timeout disabling lane bonding\n"); 2832 2833 tb_port_update_credits(down); 2834 tb_port_update_credits(up); 2835 tb_switch_update_link_attributes(sw); 2836 2837 tb_sw_dbg(sw, "lane bonding disabled\n"); 2838 } 2839 2840 /** 2841 * tb_switch_configure_link() - Set link configured 2842 * @sw: Switch whose link is configured 2843 * 2844 * Sets the link upstream from @sw configured (from both ends) so that 2845 * it will not be disconnected when the domain exits sleep. Can be 2846 * called for any switch. 2847 * 2848 * It is recommended that this is called after lane bonding is enabled. 2849 * 2850 * Returns %0 on success and negative errno in case of error. 2851 */ 2852 int tb_switch_configure_link(struct tb_switch *sw) 2853 { 2854 struct tb_port *up, *down; 2855 int ret; 2856 2857 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2858 return 0; 2859 2860 up = tb_upstream_port(sw); 2861 if (tb_switch_is_usb4(up->sw)) 2862 ret = usb4_port_configure(up); 2863 else 2864 ret = tb_lc_configure_port(up); 2865 if (ret) 2866 return ret; 2867 2868 down = up->remote; 2869 if (tb_switch_is_usb4(down->sw)) 2870 return usb4_port_configure(down); 2871 return tb_lc_configure_port(down); 2872 } 2873 2874 /** 2875 * tb_switch_unconfigure_link() - Unconfigure link 2876 * @sw: Switch whose link is unconfigured 2877 * 2878 * Sets the link unconfigured so the @sw will be disconnected if the 2879 * domain exists sleep. 2880 */ 2881 void tb_switch_unconfigure_link(struct tb_switch *sw) 2882 { 2883 struct tb_port *up, *down; 2884 2885 if (sw->is_unplugged) 2886 return; 2887 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2888 return; 2889 2890 up = tb_upstream_port(sw); 2891 if (tb_switch_is_usb4(up->sw)) 2892 usb4_port_unconfigure(up); 2893 else 2894 tb_lc_unconfigure_port(up); 2895 2896 down = up->remote; 2897 if (tb_switch_is_usb4(down->sw)) 2898 usb4_port_unconfigure(down); 2899 else 2900 tb_lc_unconfigure_port(down); 2901 } 2902 2903 static void tb_switch_credits_init(struct tb_switch *sw) 2904 { 2905 if (tb_switch_is_icm(sw)) 2906 return; 2907 if (!tb_switch_is_usb4(sw)) 2908 return; 2909 if (usb4_switch_credits_init(sw)) 2910 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); 2911 } 2912 2913 static int tb_switch_port_hotplug_enable(struct tb_switch *sw) 2914 { 2915 struct tb_port *port; 2916 2917 if (tb_switch_is_icm(sw)) 2918 return 0; 2919 2920 tb_switch_for_each_port(sw, port) { 2921 int res; 2922 2923 if (!port->cap_usb4) 2924 continue; 2925 2926 res = usb4_port_hotplug_enable(port); 2927 if (res) 2928 return res; 2929 } 2930 return 0; 2931 } 2932 2933 /** 2934 * tb_switch_add() - Add a switch to the domain 2935 * @sw: Switch to add 2936 * 2937 * This is the last step in adding switch to the domain. It will read 2938 * identification information from DROM and initializes ports so that 2939 * they can be used to connect other switches. The switch will be 2940 * exposed to the userspace when this function successfully returns. To 2941 * remove and release the switch, call tb_switch_remove(). 2942 * 2943 * Return: %0 in case of success and negative errno in case of failure 2944 */ 2945 int tb_switch_add(struct tb_switch *sw) 2946 { 2947 int i, ret; 2948 2949 /* 2950 * Initialize DMA control port now before we read DROM. Recent 2951 * host controllers have more complete DROM on NVM that includes 2952 * vendor and model identification strings which we then expose 2953 * to the userspace. NVM can be accessed through DMA 2954 * configuration based mailbox. 2955 */ 2956 ret = tb_switch_add_dma_port(sw); 2957 if (ret) { 2958 dev_err(&sw->dev, "failed to add DMA port\n"); 2959 return ret; 2960 } 2961 2962 if (!sw->safe_mode) { 2963 tb_switch_credits_init(sw); 2964 2965 /* read drom */ 2966 ret = tb_drom_read(sw); 2967 if (ret) 2968 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); 2969 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2970 2971 ret = tb_switch_set_uuid(sw); 2972 if (ret) { 2973 dev_err(&sw->dev, "failed to set UUID\n"); 2974 return ret; 2975 } 2976 2977 for (i = 0; i <= sw->config.max_port_number; i++) { 2978 if (sw->ports[i].disabled) { 2979 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2980 continue; 2981 } 2982 ret = tb_init_port(&sw->ports[i]); 2983 if (ret) { 2984 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2985 return ret; 2986 } 2987 } 2988 2989 tb_check_quirks(sw); 2990 2991 tb_switch_default_link_ports(sw); 2992 2993 ret = tb_switch_update_link_attributes(sw); 2994 if (ret) 2995 return ret; 2996 2997 ret = tb_switch_tmu_init(sw); 2998 if (ret) 2999 return ret; 3000 } 3001 3002 ret = tb_switch_port_hotplug_enable(sw); 3003 if (ret) 3004 return ret; 3005 3006 ret = device_add(&sw->dev); 3007 if (ret) { 3008 dev_err(&sw->dev, "failed to add device: %d\n", ret); 3009 return ret; 3010 } 3011 3012 if (tb_route(sw)) { 3013 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 3014 sw->vendor, sw->device); 3015 if (sw->vendor_name && sw->device_name) 3016 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 3017 sw->device_name); 3018 } 3019 3020 ret = usb4_switch_add_ports(sw); 3021 if (ret) { 3022 dev_err(&sw->dev, "failed to add USB4 ports\n"); 3023 goto err_del; 3024 } 3025 3026 ret = tb_switch_nvm_add(sw); 3027 if (ret) { 3028 dev_err(&sw->dev, "failed to add NVM devices\n"); 3029 goto err_ports; 3030 } 3031 3032 /* 3033 * Thunderbolt routers do not generate wakeups themselves but 3034 * they forward wakeups from tunneled protocols, so enable it 3035 * here. 3036 */ 3037 device_init_wakeup(&sw->dev, true); 3038 3039 pm_runtime_set_active(&sw->dev); 3040 if (sw->rpm) { 3041 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 3042 pm_runtime_use_autosuspend(&sw->dev); 3043 pm_runtime_mark_last_busy(&sw->dev); 3044 pm_runtime_enable(&sw->dev); 3045 pm_request_autosuspend(&sw->dev); 3046 } 3047 3048 tb_switch_debugfs_init(sw); 3049 return 0; 3050 3051 err_ports: 3052 usb4_switch_remove_ports(sw); 3053 err_del: 3054 device_del(&sw->dev); 3055 3056 return ret; 3057 } 3058 3059 /** 3060 * tb_switch_remove() - Remove and release a switch 3061 * @sw: Switch to remove 3062 * 3063 * This will remove the switch from the domain and release it after last 3064 * reference count drops to zero. If there are switches connected below 3065 * this switch, they will be removed as well. 3066 */ 3067 void tb_switch_remove(struct tb_switch *sw) 3068 { 3069 struct tb_port *port; 3070 3071 tb_switch_debugfs_remove(sw); 3072 3073 if (sw->rpm) { 3074 pm_runtime_get_sync(&sw->dev); 3075 pm_runtime_disable(&sw->dev); 3076 } 3077 3078 /* port 0 is the switch itself and never has a remote */ 3079 tb_switch_for_each_port(sw, port) { 3080 if (tb_port_has_remote(port)) { 3081 tb_switch_remove(port->remote->sw); 3082 port->remote = NULL; 3083 } else if (port->xdomain) { 3084 tb_xdomain_remove(port->xdomain); 3085 port->xdomain = NULL; 3086 } 3087 3088 /* Remove any downstream retimers */ 3089 tb_retimer_remove_all(port); 3090 } 3091 3092 if (!sw->is_unplugged) 3093 tb_plug_events_active(sw, false); 3094 3095 tb_switch_nvm_remove(sw); 3096 usb4_switch_remove_ports(sw); 3097 3098 if (tb_route(sw)) 3099 dev_info(&sw->dev, "device disconnected\n"); 3100 device_unregister(&sw->dev); 3101 } 3102 3103 /** 3104 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 3105 * @sw: Router to mark unplugged 3106 */ 3107 void tb_sw_set_unplugged(struct tb_switch *sw) 3108 { 3109 struct tb_port *port; 3110 3111 if (sw == sw->tb->root_switch) { 3112 tb_sw_WARN(sw, "cannot unplug root switch\n"); 3113 return; 3114 } 3115 if (sw->is_unplugged) { 3116 tb_sw_WARN(sw, "is_unplugged already set\n"); 3117 return; 3118 } 3119 sw->is_unplugged = true; 3120 tb_switch_for_each_port(sw, port) { 3121 if (tb_port_has_remote(port)) 3122 tb_sw_set_unplugged(port->remote->sw); 3123 else if (port->xdomain) 3124 port->xdomain->is_unplugged = true; 3125 } 3126 } 3127 3128 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) 3129 { 3130 if (flags) 3131 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 3132 else 3133 tb_sw_dbg(sw, "disabling wakeup\n"); 3134 3135 if (tb_switch_is_usb4(sw)) 3136 return usb4_switch_set_wake(sw, flags); 3137 return tb_lc_set_wake(sw, flags); 3138 } 3139 3140 int tb_switch_resume(struct tb_switch *sw) 3141 { 3142 struct tb_port *port; 3143 int err; 3144 3145 tb_sw_dbg(sw, "resuming switch\n"); 3146 3147 /* 3148 * Check for UID of the connected switches except for root 3149 * switch which we assume cannot be removed. 3150 */ 3151 if (tb_route(sw)) { 3152 u64 uid; 3153 3154 /* 3155 * Check first that we can still read the switch config 3156 * space. It may be that there is now another domain 3157 * connected. 3158 */ 3159 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 3160 if (err < 0) { 3161 tb_sw_info(sw, "switch not present anymore\n"); 3162 return err; 3163 } 3164 3165 /* We don't have any way to confirm this was the same device */ 3166 if (!sw->uid) 3167 return -ENODEV; 3168 3169 if (tb_switch_is_usb4(sw)) 3170 err = usb4_switch_read_uid(sw, &uid); 3171 else 3172 err = tb_drom_read_uid_only(sw, &uid); 3173 if (err) { 3174 tb_sw_warn(sw, "uid read failed\n"); 3175 return err; 3176 } 3177 if (sw->uid != uid) { 3178 tb_sw_info(sw, 3179 "changed while suspended (uid %#llx -> %#llx)\n", 3180 sw->uid, uid); 3181 return -ENODEV; 3182 } 3183 } 3184 3185 err = tb_switch_configure(sw); 3186 if (err) 3187 return err; 3188 3189 /* Disable wakes */ 3190 tb_switch_set_wake(sw, 0); 3191 3192 err = tb_switch_tmu_init(sw); 3193 if (err) 3194 return err; 3195 3196 /* check for surviving downstream switches */ 3197 tb_switch_for_each_port(sw, port) { 3198 if (!tb_port_is_null(port)) 3199 continue; 3200 3201 if (!tb_port_resume(port)) 3202 continue; 3203 3204 if (tb_wait_for_port(port, true) <= 0) { 3205 tb_port_warn(port, 3206 "lost during suspend, disconnecting\n"); 3207 if (tb_port_has_remote(port)) 3208 tb_sw_set_unplugged(port->remote->sw); 3209 else if (port->xdomain) 3210 port->xdomain->is_unplugged = true; 3211 } else { 3212 /* 3213 * Always unlock the port so the downstream 3214 * switch/domain is accessible. 3215 */ 3216 if (tb_port_unlock(port)) 3217 tb_port_warn(port, "failed to unlock port\n"); 3218 if (port->remote && tb_switch_resume(port->remote->sw)) { 3219 tb_port_warn(port, 3220 "lost during suspend, disconnecting\n"); 3221 tb_sw_set_unplugged(port->remote->sw); 3222 } 3223 } 3224 } 3225 return 0; 3226 } 3227 3228 /** 3229 * tb_switch_suspend() - Put a switch to sleep 3230 * @sw: Switch to suspend 3231 * @runtime: Is this runtime suspend or system sleep 3232 * 3233 * Suspends router and all its children. Enables wakes according to 3234 * value of @runtime and then sets sleep bit for the router. If @sw is 3235 * host router the domain is ready to go to sleep once this function 3236 * returns. 3237 */ 3238 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 3239 { 3240 unsigned int flags = 0; 3241 struct tb_port *port; 3242 int err; 3243 3244 tb_sw_dbg(sw, "suspending switch\n"); 3245 3246 /* 3247 * Actually only needed for Titan Ridge but for simplicity can be 3248 * done for USB4 device too as CLx is re-enabled at resume. 3249 * CL0s and CL1 are enabled and supported together. 3250 */ 3251 if (tb_switch_is_clx_enabled(sw, TB_CL1)) { 3252 if (tb_switch_disable_clx(sw, TB_CL1)) 3253 tb_sw_warn(sw, "failed to disable %s on upstream port\n", 3254 tb_switch_clx_name(TB_CL1)); 3255 } 3256 3257 err = tb_plug_events_active(sw, false); 3258 if (err) 3259 return; 3260 3261 tb_switch_for_each_port(sw, port) { 3262 if (tb_port_has_remote(port)) 3263 tb_switch_suspend(port->remote->sw, runtime); 3264 } 3265 3266 if (runtime) { 3267 /* Trigger wake when something is plugged in/out */ 3268 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3269 flags |= TB_WAKE_ON_USB4; 3270 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; 3271 } else if (device_may_wakeup(&sw->dev)) { 3272 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 3273 } 3274 3275 tb_switch_set_wake(sw, flags); 3276 3277 if (tb_switch_is_usb4(sw)) 3278 usb4_switch_set_sleep(sw); 3279 else 3280 tb_lc_set_sleep(sw); 3281 } 3282 3283 /** 3284 * tb_switch_query_dp_resource() - Query availability of DP resource 3285 * @sw: Switch whose DP resource is queried 3286 * @in: DP IN port 3287 * 3288 * Queries availability of DP resource for DP tunneling using switch 3289 * specific means. Returns %true if resource is available. 3290 */ 3291 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 3292 { 3293 if (tb_switch_is_usb4(sw)) 3294 return usb4_switch_query_dp_resource(sw, in); 3295 return tb_lc_dp_sink_query(sw, in); 3296 } 3297 3298 /** 3299 * tb_switch_alloc_dp_resource() - Allocate available DP resource 3300 * @sw: Switch whose DP resource is allocated 3301 * @in: DP IN port 3302 * 3303 * Allocates DP resource for DP tunneling. The resource must be 3304 * available for this to succeed (see tb_switch_query_dp_resource()). 3305 * Returns %0 in success and negative errno otherwise. 3306 */ 3307 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3308 { 3309 int ret; 3310 3311 if (tb_switch_is_usb4(sw)) 3312 ret = usb4_switch_alloc_dp_resource(sw, in); 3313 else 3314 ret = tb_lc_dp_sink_alloc(sw, in); 3315 3316 if (ret) 3317 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", 3318 in->port); 3319 else 3320 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); 3321 3322 return ret; 3323 } 3324 3325 /** 3326 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 3327 * @sw: Switch whose DP resource is de-allocated 3328 * @in: DP IN port 3329 * 3330 * De-allocates DP resource that was previously allocated for DP 3331 * tunneling. 3332 */ 3333 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3334 { 3335 int ret; 3336 3337 if (tb_switch_is_usb4(sw)) 3338 ret = usb4_switch_dealloc_dp_resource(sw, in); 3339 else 3340 ret = tb_lc_dp_sink_dealloc(sw, in); 3341 3342 if (ret) 3343 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 3344 in->port); 3345 else 3346 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); 3347 } 3348 3349 struct tb_sw_lookup { 3350 struct tb *tb; 3351 u8 link; 3352 u8 depth; 3353 const uuid_t *uuid; 3354 u64 route; 3355 }; 3356 3357 static int tb_switch_match(struct device *dev, const void *data) 3358 { 3359 struct tb_switch *sw = tb_to_switch(dev); 3360 const struct tb_sw_lookup *lookup = data; 3361 3362 if (!sw) 3363 return 0; 3364 if (sw->tb != lookup->tb) 3365 return 0; 3366 3367 if (lookup->uuid) 3368 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 3369 3370 if (lookup->route) { 3371 return sw->config.route_lo == lower_32_bits(lookup->route) && 3372 sw->config.route_hi == upper_32_bits(lookup->route); 3373 } 3374 3375 /* Root switch is matched only by depth */ 3376 if (!lookup->depth) 3377 return !sw->depth; 3378 3379 return sw->link == lookup->link && sw->depth == lookup->depth; 3380 } 3381 3382 /** 3383 * tb_switch_find_by_link_depth() - Find switch by link and depth 3384 * @tb: Domain the switch belongs 3385 * @link: Link number the switch is connected 3386 * @depth: Depth of the switch in link 3387 * 3388 * Returned switch has reference count increased so the caller needs to 3389 * call tb_switch_put() when done with the switch. 3390 */ 3391 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 3392 { 3393 struct tb_sw_lookup lookup; 3394 struct device *dev; 3395 3396 memset(&lookup, 0, sizeof(lookup)); 3397 lookup.tb = tb; 3398 lookup.link = link; 3399 lookup.depth = depth; 3400 3401 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3402 if (dev) 3403 return tb_to_switch(dev); 3404 3405 return NULL; 3406 } 3407 3408 /** 3409 * tb_switch_find_by_uuid() - Find switch by UUID 3410 * @tb: Domain the switch belongs 3411 * @uuid: UUID to look for 3412 * 3413 * Returned switch has reference count increased so the caller needs to 3414 * call tb_switch_put() when done with the switch. 3415 */ 3416 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 3417 { 3418 struct tb_sw_lookup lookup; 3419 struct device *dev; 3420 3421 memset(&lookup, 0, sizeof(lookup)); 3422 lookup.tb = tb; 3423 lookup.uuid = uuid; 3424 3425 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3426 if (dev) 3427 return tb_to_switch(dev); 3428 3429 return NULL; 3430 } 3431 3432 /** 3433 * tb_switch_find_by_route() - Find switch by route string 3434 * @tb: Domain the switch belongs 3435 * @route: Route string to look for 3436 * 3437 * Returned switch has reference count increased so the caller needs to 3438 * call tb_switch_put() when done with the switch. 3439 */ 3440 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 3441 { 3442 struct tb_sw_lookup lookup; 3443 struct device *dev; 3444 3445 if (!route) 3446 return tb_switch_get(tb->root_switch); 3447 3448 memset(&lookup, 0, sizeof(lookup)); 3449 lookup.tb = tb; 3450 lookup.route = route; 3451 3452 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3453 if (dev) 3454 return tb_to_switch(dev); 3455 3456 return NULL; 3457 } 3458 3459 /** 3460 * tb_switch_find_port() - return the first port of @type on @sw or NULL 3461 * @sw: Switch to find the port from 3462 * @type: Port type to look for 3463 */ 3464 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3465 enum tb_port_type type) 3466 { 3467 struct tb_port *port; 3468 3469 tb_switch_for_each_port(sw, port) { 3470 if (port->config.type == type) 3471 return port; 3472 } 3473 3474 return NULL; 3475 } 3476 3477 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) 3478 { 3479 struct tb_switch *parent = tb_switch_parent(sw); 3480 struct tb_port *up, *down; 3481 int ret; 3482 3483 if (!tb_route(sw)) 3484 return 0; 3485 3486 up = tb_upstream_port(sw); 3487 down = tb_port_at(tb_route(sw), parent); 3488 ret = tb_port_pm_secondary_enable(up); 3489 if (ret) 3490 return ret; 3491 3492 return tb_port_pm_secondary_disable(down); 3493 } 3494 3495 static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) 3496 { 3497 struct tb_switch *parent = tb_switch_parent(sw); 3498 bool up_clx_support, down_clx_support; 3499 struct tb_port *up, *down; 3500 int ret; 3501 3502 if (!tb_switch_is_clx_supported(sw)) 3503 return 0; 3504 3505 /* 3506 * Enable CLx for host router's downstream port as part of the 3507 * downstream router enabling procedure. 3508 */ 3509 if (!tb_route(sw)) 3510 return 0; 3511 3512 /* Enable CLx only for first hop router (depth = 1) */ 3513 if (tb_route(parent)) 3514 return 0; 3515 3516 ret = tb_switch_pm_secondary_resolve(sw); 3517 if (ret) 3518 return ret; 3519 3520 up = tb_upstream_port(sw); 3521 down = tb_port_at(tb_route(sw), parent); 3522 3523 up_clx_support = tb_port_clx_supported(up, clx); 3524 down_clx_support = tb_port_clx_supported(down, clx); 3525 3526 tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx), 3527 up_clx_support ? "" : "not "); 3528 tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx), 3529 down_clx_support ? "" : "not "); 3530 3531 if (!up_clx_support || !down_clx_support) 3532 return -EOPNOTSUPP; 3533 3534 ret = tb_port_clx_enable(up, clx); 3535 if (ret) 3536 return ret; 3537 3538 ret = tb_port_clx_enable(down, clx); 3539 if (ret) { 3540 tb_port_clx_disable(up, clx); 3541 return ret; 3542 } 3543 3544 ret = tb_switch_mask_clx_objections(sw); 3545 if (ret) { 3546 tb_port_clx_disable(up, clx); 3547 tb_port_clx_disable(down, clx); 3548 return ret; 3549 } 3550 3551 sw->clx = clx; 3552 3553 tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx)); 3554 return 0; 3555 } 3556 3557 /** 3558 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router 3559 * @sw: Router to enable CLx for 3560 * @clx: The CLx state to enable 3561 * 3562 * Enable CLx state only for first hop router. That is the most common 3563 * use-case, that is intended for better thermal management, and so helps 3564 * to improve performance. CLx is enabled only if both sides of the link 3565 * support CLx, and if both sides of the link are not configured as two 3566 * single lane links and only if the link is not inter-domain link. The 3567 * complete set of conditions is described in CM Guide 1.0 section 8.1. 3568 * 3569 * Return: Returns 0 on success or an error code on failure. 3570 */ 3571 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) 3572 { 3573 struct tb_switch *root_sw = sw->tb->root_switch; 3574 3575 if (!clx_enabled) 3576 return 0; 3577 3578 /* 3579 * CLx is not enabled and validated on Intel USB4 platforms before 3580 * Alder Lake. 3581 */ 3582 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw)) 3583 return 0; 3584 3585 switch (clx) { 3586 case TB_CL1: 3587 /* CL0s and CL1 are enabled and supported together */ 3588 return __tb_switch_enable_clx(sw, clx); 3589 3590 default: 3591 return -EOPNOTSUPP; 3592 } 3593 } 3594 3595 static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) 3596 { 3597 struct tb_switch *parent = tb_switch_parent(sw); 3598 struct tb_port *up, *down; 3599 int ret; 3600 3601 if (!tb_switch_is_clx_supported(sw)) 3602 return 0; 3603 3604 /* 3605 * Disable CLx for host router's downstream port as part of the 3606 * downstream router enabling procedure. 3607 */ 3608 if (!tb_route(sw)) 3609 return 0; 3610 3611 /* Disable CLx only for first hop router (depth = 1) */ 3612 if (tb_route(parent)) 3613 return 0; 3614 3615 up = tb_upstream_port(sw); 3616 down = tb_port_at(tb_route(sw), parent); 3617 ret = tb_port_clx_disable(up, clx); 3618 if (ret) 3619 return ret; 3620 3621 ret = tb_port_clx_disable(down, clx); 3622 if (ret) 3623 return ret; 3624 3625 sw->clx = TB_CLX_DISABLE; 3626 3627 tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx)); 3628 return 0; 3629 } 3630 3631 /** 3632 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router 3633 * @sw: Router to disable CLx for 3634 * @clx: The CLx state to disable 3635 * 3636 * Return: Returns 0 on success or an error code on failure. 3637 */ 3638 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) 3639 { 3640 if (!clx_enabled) 3641 return 0; 3642 3643 switch (clx) { 3644 case TB_CL1: 3645 /* CL0s and CL1 are enabled and supported together */ 3646 return __tb_switch_disable_clx(sw, clx); 3647 3648 default: 3649 return -EOPNOTSUPP; 3650 } 3651 } 3652 3653 /** 3654 * tb_switch_mask_clx_objections() - Mask CLx objections for a router 3655 * @sw: Router to mask objections for 3656 * 3657 * Mask the objections coming from the second depth routers in order to 3658 * stop these objections from interfering with the CLx states of the first 3659 * depth link. 3660 */ 3661 int tb_switch_mask_clx_objections(struct tb_switch *sw) 3662 { 3663 int up_port = sw->config.upstream_port_number; 3664 u32 offset, val[2], mask_obj, unmask_obj; 3665 int ret, i; 3666 3667 /* Only Titan Ridge of pre-USB4 devices support CLx states */ 3668 if (!tb_switch_is_titan_ridge(sw)) 3669 return 0; 3670 3671 if (!tb_route(sw)) 3672 return 0; 3673 3674 /* 3675 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: 3676 * Port A consists of lane adapters 1,2 and 3677 * Port B consists of lane adapters 3,4 3678 * If upstream port is A, (lanes are 1,2), we mask objections from 3679 * port B (lanes 3,4) and unmask objections from Port A and vice-versa. 3680 */ 3681 if (up_port == 1) { 3682 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3683 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3684 offset = TB_LOW_PWR_C1_CL1; 3685 } else { 3686 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3687 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3688 offset = TB_LOW_PWR_C3_CL1; 3689 } 3690 3691 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 3692 sw->cap_lp + offset, ARRAY_SIZE(val)); 3693 if (ret) 3694 return ret; 3695 3696 for (i = 0; i < ARRAY_SIZE(val); i++) { 3697 val[i] |= mask_obj; 3698 val[i] &= ~unmask_obj; 3699 } 3700 3701 return tb_sw_write(sw, &val, TB_CFG_SWITCH, 3702 sw->cap_lp + offset, ARRAY_SIZE(val)); 3703 } 3704 3705 /* 3706 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 3707 * device. For now used only for Titan Ridge. 3708 */ 3709 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, 3710 unsigned int pcie_offset, u32 value) 3711 { 3712 u32 offset, command, val; 3713 int ret; 3714 3715 if (sw->generation != 3) 3716 return -EOPNOTSUPP; 3717 3718 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; 3719 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); 3720 if (ret) 3721 return ret; 3722 3723 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; 3724 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); 3725 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; 3726 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 3727 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; 3728 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; 3729 3730 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; 3731 3732 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); 3733 if (ret) 3734 return ret; 3735 3736 ret = tb_switch_wait_for_bit(sw, offset, 3737 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); 3738 if (ret) 3739 return ret; 3740 3741 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 3742 if (ret) 3743 return ret; 3744 3745 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) 3746 return -ETIMEDOUT; 3747 3748 return 0; 3749 } 3750 3751 /** 3752 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state 3753 * @sw: Router to enable PCIe L1 3754 * 3755 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable 3756 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel 3757 * was configured. Due to Intel platforms limitation, shall be called only 3758 * for first hop switch. 3759 */ 3760 int tb_switch_pcie_l1_enable(struct tb_switch *sw) 3761 { 3762 struct tb_switch *parent = tb_switch_parent(sw); 3763 int ret; 3764 3765 if (!tb_route(sw)) 3766 return 0; 3767 3768 if (!tb_switch_is_titan_ridge(sw)) 3769 return 0; 3770 3771 /* Enable PCIe L1 enable only for first hop router (depth = 1) */ 3772 if (tb_route(parent)) 3773 return 0; 3774 3775 /* Write to downstream PCIe bridge #5 aka Dn4 */ 3776 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); 3777 if (ret) 3778 return ret; 3779 3780 /* Write to Upstream PCIe bridge #0 aka Up0 */ 3781 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); 3782 } 3783 3784 /** 3785 * tb_switch_xhci_connect() - Connect internal xHCI 3786 * @sw: Router whose xHCI to connect 3787 * 3788 * Can be called to any router. For Alpine Ridge and Titan Ridge 3789 * performs special flows that bring the xHCI functional for any device 3790 * connected to the type-C port. Call only after PCIe tunnel has been 3791 * established. The function only does the connect if not done already 3792 * so can be called several times for the same router. 3793 */ 3794 int tb_switch_xhci_connect(struct tb_switch *sw) 3795 { 3796 struct tb_port *port1, *port3; 3797 int ret; 3798 3799 if (sw->generation != 3) 3800 return 0; 3801 3802 port1 = &sw->ports[1]; 3803 port3 = &sw->ports[3]; 3804 3805 if (tb_switch_is_alpine_ridge(sw)) { 3806 bool usb_port1, usb_port3, xhci_port1, xhci_port3; 3807 3808 usb_port1 = tb_lc_is_usb_plugged(port1); 3809 usb_port3 = tb_lc_is_usb_plugged(port3); 3810 xhci_port1 = tb_lc_is_xhci_connected(port1); 3811 xhci_port3 = tb_lc_is_xhci_connected(port3); 3812 3813 /* Figure out correct USB port to connect */ 3814 if (usb_port1 && !xhci_port1) { 3815 ret = tb_lc_xhci_connect(port1); 3816 if (ret) 3817 return ret; 3818 } 3819 if (usb_port3 && !xhci_port3) 3820 return tb_lc_xhci_connect(port3); 3821 } else if (tb_switch_is_titan_ridge(sw)) { 3822 ret = tb_lc_xhci_connect(port1); 3823 if (ret) 3824 return ret; 3825 return tb_lc_xhci_connect(port3); 3826 } 3827 3828 return 0; 3829 } 3830 3831 /** 3832 * tb_switch_xhci_disconnect() - Disconnect internal xHCI 3833 * @sw: Router whose xHCI to disconnect 3834 * 3835 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both 3836 * ports. 3837 */ 3838 void tb_switch_xhci_disconnect(struct tb_switch *sw) 3839 { 3840 if (sw->generation == 3) { 3841 struct tb_port *port1 = &sw->ports[1]; 3842 struct tb_port *port3 = &sw->ports[3]; 3843 3844 tb_lc_xhci_disconnect(port1); 3845 tb_port_dbg(port1, "disconnected xHCI\n"); 3846 tb_lc_xhci_disconnect(port3); 3847 tb_port_dbg(port3, "disconnected xHCI\n"); 3848 } 3849 } 3850