1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/module.h> 12 #include <linux/nvmem-provider.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sizes.h> 16 #include <linux/slab.h> 17 #include <linux/string_helpers.h> 18 19 #include "tb.h" 20 21 /* Switch NVM support */ 22 23 struct nvm_auth_status { 24 struct list_head list; 25 uuid_t uuid; 26 u32 status; 27 }; 28 29 /* 30 * Hold NVM authentication failure status per switch This information 31 * needs to stay around even when the switch gets power cycled so we 32 * keep it separately. 33 */ 34 static LIST_HEAD(nvm_auth_status_cache); 35 static DEFINE_MUTEX(nvm_auth_status_lock); 36 37 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 38 { 39 struct nvm_auth_status *st; 40 41 list_for_each_entry(st, &nvm_auth_status_cache, list) { 42 if (uuid_equal(&st->uuid, sw->uuid)) 43 return st; 44 } 45 46 return NULL; 47 } 48 49 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 50 { 51 struct nvm_auth_status *st; 52 53 mutex_lock(&nvm_auth_status_lock); 54 st = __nvm_get_auth_status(sw); 55 mutex_unlock(&nvm_auth_status_lock); 56 57 *status = st ? st->status : 0; 58 } 59 60 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 61 { 62 struct nvm_auth_status *st; 63 64 if (WARN_ON(!sw->uuid)) 65 return; 66 67 mutex_lock(&nvm_auth_status_lock); 68 st = __nvm_get_auth_status(sw); 69 70 if (!st) { 71 st = kzalloc(sizeof(*st), GFP_KERNEL); 72 if (!st) 73 goto unlock; 74 75 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 76 INIT_LIST_HEAD(&st->list); 77 list_add_tail(&st->list, &nvm_auth_status_cache); 78 } 79 80 st->status = status; 81 unlock: 82 mutex_unlock(&nvm_auth_status_lock); 83 } 84 85 static void nvm_clear_auth_status(const struct tb_switch *sw) 86 { 87 struct nvm_auth_status *st; 88 89 mutex_lock(&nvm_auth_status_lock); 90 st = __nvm_get_auth_status(sw); 91 if (st) { 92 list_del(&st->list); 93 kfree(st); 94 } 95 mutex_unlock(&nvm_auth_status_lock); 96 } 97 98 static int nvm_validate_and_write(struct tb_switch *sw) 99 { 100 unsigned int image_size; 101 const u8 *buf; 102 int ret; 103 104 ret = tb_nvm_validate(sw->nvm); 105 if (ret) 106 return ret; 107 108 ret = tb_nvm_write_headers(sw->nvm); 109 if (ret) 110 return ret; 111 112 buf = sw->nvm->buf_data_start; 113 image_size = sw->nvm->buf_data_size; 114 115 if (tb_switch_is_usb4(sw)) 116 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 117 else 118 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 119 if (ret) 120 return ret; 121 122 sw->nvm->flushed = true; 123 return 0; 124 } 125 126 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 127 { 128 int ret = 0; 129 130 /* 131 * Root switch NVM upgrade requires that we disconnect the 132 * existing paths first (in case it is not in safe mode 133 * already). 134 */ 135 if (!sw->safe_mode) { 136 u32 status; 137 138 ret = tb_domain_disconnect_all_paths(sw->tb); 139 if (ret) 140 return ret; 141 /* 142 * The host controller goes away pretty soon after this if 143 * everything goes well so getting timeout is expected. 144 */ 145 ret = dma_port_flash_update_auth(sw->dma_port); 146 if (!ret || ret == -ETIMEDOUT) 147 return 0; 148 149 /* 150 * Any error from update auth operation requires power 151 * cycling of the host router. 152 */ 153 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 154 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 155 nvm_set_auth_status(sw, status); 156 } 157 158 /* 159 * From safe mode we can get out by just power cycling the 160 * switch. 161 */ 162 dma_port_power_cycle(sw->dma_port); 163 return ret; 164 } 165 166 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 167 { 168 int ret, retries = 10; 169 170 ret = dma_port_flash_update_auth(sw->dma_port); 171 switch (ret) { 172 case 0: 173 case -ETIMEDOUT: 174 case -EACCES: 175 case -EINVAL: 176 /* Power cycle is required */ 177 break; 178 default: 179 return ret; 180 } 181 182 /* 183 * Poll here for the authentication status. It takes some time 184 * for the device to respond (we get timeout for a while). Once 185 * we get response the device needs to be power cycled in order 186 * to the new NVM to be taken into use. 187 */ 188 do { 189 u32 status; 190 191 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 192 if (ret < 0 && ret != -ETIMEDOUT) 193 return ret; 194 if (ret > 0) { 195 if (status) { 196 tb_sw_warn(sw, "failed to authenticate NVM\n"); 197 nvm_set_auth_status(sw, status); 198 } 199 200 tb_sw_info(sw, "power cycling the switch now\n"); 201 dma_port_power_cycle(sw->dma_port); 202 return 0; 203 } 204 205 msleep(500); 206 } while (--retries); 207 208 return -ETIMEDOUT; 209 } 210 211 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 212 { 213 struct pci_dev *root_port; 214 215 /* 216 * During host router NVM upgrade we should not allow root port to 217 * go into D3cold because some root ports cannot trigger PME 218 * itself. To be on the safe side keep the root port in D0 during 219 * the whole upgrade process. 220 */ 221 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 222 if (root_port) 223 pm_runtime_get_noresume(&root_port->dev); 224 } 225 226 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 227 { 228 struct pci_dev *root_port; 229 230 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 231 if (root_port) 232 pm_runtime_put(&root_port->dev); 233 } 234 235 static inline bool nvm_readable(struct tb_switch *sw) 236 { 237 if (tb_switch_is_usb4(sw)) { 238 /* 239 * USB4 devices must support NVM operations but it is 240 * optional for hosts. Therefore we query the NVM sector 241 * size here and if it is supported assume NVM 242 * operations are implemented. 243 */ 244 return usb4_switch_nvm_sector_size(sw) > 0; 245 } 246 247 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 248 return !!sw->dma_port; 249 } 250 251 static inline bool nvm_upgradeable(struct tb_switch *sw) 252 { 253 if (sw->no_nvm_upgrade) 254 return false; 255 return nvm_readable(sw); 256 } 257 258 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) 259 { 260 int ret; 261 262 if (tb_switch_is_usb4(sw)) { 263 if (auth_only) { 264 ret = usb4_switch_nvm_set_offset(sw, 0); 265 if (ret) 266 return ret; 267 } 268 sw->nvm->authenticating = true; 269 return usb4_switch_nvm_authenticate(sw); 270 } 271 if (auth_only) 272 return -EOPNOTSUPP; 273 274 sw->nvm->authenticating = true; 275 if (!tb_route(sw)) { 276 nvm_authenticate_start_dma_port(sw); 277 ret = nvm_authenticate_host_dma_port(sw); 278 } else { 279 ret = nvm_authenticate_device_dma_port(sw); 280 } 281 282 return ret; 283 } 284 285 /** 286 * tb_switch_nvm_read() - Read router NVM 287 * @sw: Router whose NVM to read 288 * @address: Start address on the NVM 289 * @buf: Buffer where the read data is copied 290 * @size: Size of the buffer in bytes 291 * 292 * Reads from router NVM and returns the requested data in @buf. Locking 293 * is up to the caller. Returns %0 in success and negative errno in case 294 * of failure. 295 */ 296 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 297 size_t size) 298 { 299 if (tb_switch_is_usb4(sw)) 300 return usb4_switch_nvm_read(sw, address, buf, size); 301 return dma_port_flash_read(sw->dma_port, address, buf, size); 302 } 303 304 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 305 { 306 struct tb_nvm *nvm = priv; 307 struct tb_switch *sw = tb_to_switch(nvm->dev); 308 int ret; 309 310 pm_runtime_get_sync(&sw->dev); 311 312 if (!mutex_trylock(&sw->tb->lock)) { 313 ret = restart_syscall(); 314 goto out; 315 } 316 317 ret = tb_switch_nvm_read(sw, offset, val, bytes); 318 mutex_unlock(&sw->tb->lock); 319 320 out: 321 pm_runtime_mark_last_busy(&sw->dev); 322 pm_runtime_put_autosuspend(&sw->dev); 323 324 return ret; 325 } 326 327 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 328 { 329 struct tb_nvm *nvm = priv; 330 struct tb_switch *sw = tb_to_switch(nvm->dev); 331 int ret; 332 333 if (!mutex_trylock(&sw->tb->lock)) 334 return restart_syscall(); 335 336 /* 337 * Since writing the NVM image might require some special steps, 338 * for example when CSS headers are written, we cache the image 339 * locally here and handle the special cases when the user asks 340 * us to authenticate the image. 341 */ 342 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 343 mutex_unlock(&sw->tb->lock); 344 345 return ret; 346 } 347 348 static int tb_switch_nvm_add(struct tb_switch *sw) 349 { 350 struct tb_nvm *nvm; 351 int ret; 352 353 if (!nvm_readable(sw)) 354 return 0; 355 356 nvm = tb_nvm_alloc(&sw->dev); 357 if (IS_ERR(nvm)) { 358 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 359 goto err_nvm; 360 } 361 362 ret = tb_nvm_read_version(nvm); 363 if (ret) 364 goto err_nvm; 365 366 /* 367 * If the switch is in safe-mode the only accessible portion of 368 * the NVM is the non-active one where userspace is expected to 369 * write new functional NVM. 370 */ 371 if (!sw->safe_mode) { 372 ret = tb_nvm_add_active(nvm, nvm_read); 373 if (ret) 374 goto err_nvm; 375 } 376 377 if (!sw->no_nvm_upgrade) { 378 ret = tb_nvm_add_non_active(nvm, nvm_write); 379 if (ret) 380 goto err_nvm; 381 } 382 383 sw->nvm = nvm; 384 return 0; 385 386 err_nvm: 387 tb_sw_dbg(sw, "NVM upgrade disabled\n"); 388 sw->no_nvm_upgrade = true; 389 if (!IS_ERR(nvm)) 390 tb_nvm_free(nvm); 391 392 return ret; 393 } 394 395 static void tb_switch_nvm_remove(struct tb_switch *sw) 396 { 397 struct tb_nvm *nvm; 398 399 nvm = sw->nvm; 400 sw->nvm = NULL; 401 402 if (!nvm) 403 return; 404 405 /* Remove authentication status in case the switch is unplugged */ 406 if (!nvm->authenticating) 407 nvm_clear_auth_status(sw); 408 409 tb_nvm_free(nvm); 410 } 411 412 /* port utility functions */ 413 414 static const char *tb_port_type(const struct tb_regs_port_header *port) 415 { 416 switch (port->type >> 16) { 417 case 0: 418 switch ((u8) port->type) { 419 case 0: 420 return "Inactive"; 421 case 1: 422 return "Port"; 423 case 2: 424 return "NHI"; 425 default: 426 return "unknown"; 427 } 428 case 0x2: 429 return "Ethernet"; 430 case 0x8: 431 return "SATA"; 432 case 0xe: 433 return "DP/HDMI"; 434 case 0x10: 435 return "PCIe"; 436 case 0x20: 437 return "USB"; 438 default: 439 return "unknown"; 440 } 441 } 442 443 static void tb_dump_port(struct tb *tb, const struct tb_port *port) 444 { 445 const struct tb_regs_port_header *regs = &port->config; 446 447 tb_dbg(tb, 448 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 449 regs->port_number, regs->vendor_id, regs->device_id, 450 regs->revision, regs->thunderbolt_version, tb_port_type(regs), 451 regs->type); 452 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 453 regs->max_in_hop_id, regs->max_out_hop_id); 454 tb_dbg(tb, " Max counters: %d\n", regs->max_counters); 455 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); 456 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, 457 port->ctl_credits); 458 } 459 460 /** 461 * tb_port_state() - get connectedness state of a port 462 * @port: the port to check 463 * 464 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 465 * 466 * Return: Returns an enum tb_port_state on success or an error code on failure. 467 */ 468 int tb_port_state(struct tb_port *port) 469 { 470 struct tb_cap_phy phy; 471 int res; 472 if (port->cap_phy == 0) { 473 tb_port_WARN(port, "does not have a PHY\n"); 474 return -EINVAL; 475 } 476 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 477 if (res) 478 return res; 479 return phy.state; 480 } 481 482 /** 483 * tb_wait_for_port() - wait for a port to become ready 484 * @port: Port to wait 485 * @wait_if_unplugged: Wait also when port is unplugged 486 * 487 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 488 * wait_if_unplugged is set then we also wait if the port is in state 489 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 490 * switch resume). Otherwise we only wait if a device is registered but the link 491 * has not yet been established. 492 * 493 * Return: Returns an error code on failure. Returns 0 if the port is not 494 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 495 * if the port is connected and in state TB_PORT_UP. 496 */ 497 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 498 { 499 int retries = 10; 500 int state; 501 if (!port->cap_phy) { 502 tb_port_WARN(port, "does not have PHY\n"); 503 return -EINVAL; 504 } 505 if (tb_is_upstream_port(port)) { 506 tb_port_WARN(port, "is the upstream port\n"); 507 return -EINVAL; 508 } 509 510 while (retries--) { 511 state = tb_port_state(port); 512 switch (state) { 513 case TB_PORT_DISABLED: 514 tb_port_dbg(port, "is disabled (state: 0)\n"); 515 return 0; 516 517 case TB_PORT_UNPLUGGED: 518 if (wait_if_unplugged) { 519 /* used during resume */ 520 tb_port_dbg(port, 521 "is unplugged (state: 7), retrying...\n"); 522 msleep(100); 523 break; 524 } 525 tb_port_dbg(port, "is unplugged (state: 7)\n"); 526 return 0; 527 528 case TB_PORT_UP: 529 case TB_PORT_TX_CL0S: 530 case TB_PORT_RX_CL0S: 531 case TB_PORT_CL1: 532 case TB_PORT_CL2: 533 tb_port_dbg(port, "is connected, link is up (state: %d)\n", state); 534 return 1; 535 536 default: 537 if (state < 0) 538 return state; 539 540 /* 541 * After plug-in the state is TB_PORT_CONNECTING. Give it some 542 * time. 543 */ 544 tb_port_dbg(port, 545 "is connected, link is not up (state: %d), retrying...\n", 546 state); 547 msleep(100); 548 } 549 550 } 551 tb_port_warn(port, 552 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 553 return 0; 554 } 555 556 /** 557 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 558 * @port: Port to add/remove NFC credits 559 * @credits: Credits to add/remove 560 * 561 * Change the number of NFC credits allocated to @port by @credits. To remove 562 * NFC credits pass a negative amount of credits. 563 * 564 * Return: Returns 0 on success or an error code on failure. 565 */ 566 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 567 { 568 u32 nfc_credits; 569 570 if (credits == 0 || port->sw->is_unplugged) 571 return 0; 572 573 /* 574 * USB4 restricts programming NFC buffers to lane adapters only 575 * so skip other ports. 576 */ 577 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 578 return 0; 579 580 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 581 if (credits < 0) 582 credits = max_t(int, -nfc_credits, credits); 583 584 nfc_credits += credits; 585 586 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 587 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 588 589 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 590 port->config.nfc_credits |= nfc_credits; 591 592 return tb_port_write(port, &port->config.nfc_credits, 593 TB_CFG_PORT, ADP_CS_4, 1); 594 } 595 596 /** 597 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 598 * @port: Port whose counters to clear 599 * @counter: Counter index to clear 600 * 601 * Return: Returns 0 on success or an error code on failure. 602 */ 603 int tb_port_clear_counter(struct tb_port *port, int counter) 604 { 605 u32 zero[3] = { 0, 0, 0 }; 606 tb_port_dbg(port, "clearing counter %d\n", counter); 607 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 608 } 609 610 /** 611 * tb_port_unlock() - Unlock downstream port 612 * @port: Port to unlock 613 * 614 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 615 * downstream router accessible for CM. 616 */ 617 int tb_port_unlock(struct tb_port *port) 618 { 619 if (tb_switch_is_icm(port->sw)) 620 return 0; 621 if (!tb_port_is_null(port)) 622 return -EINVAL; 623 if (tb_switch_is_usb4(port->sw)) 624 return usb4_port_unlock(port); 625 return 0; 626 } 627 628 static int __tb_port_enable(struct tb_port *port, bool enable) 629 { 630 int ret; 631 u32 phy; 632 633 if (!tb_port_is_null(port)) 634 return -EINVAL; 635 636 ret = tb_port_read(port, &phy, TB_CFG_PORT, 637 port->cap_phy + LANE_ADP_CS_1, 1); 638 if (ret) 639 return ret; 640 641 if (enable) 642 phy &= ~LANE_ADP_CS_1_LD; 643 else 644 phy |= LANE_ADP_CS_1_LD; 645 646 647 ret = tb_port_write(port, &phy, TB_CFG_PORT, 648 port->cap_phy + LANE_ADP_CS_1, 1); 649 if (ret) 650 return ret; 651 652 tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable)); 653 return 0; 654 } 655 656 /** 657 * tb_port_enable() - Enable lane adapter 658 * @port: Port to enable (can be %NULL) 659 * 660 * This is used for lane 0 and 1 adapters to enable it. 661 */ 662 int tb_port_enable(struct tb_port *port) 663 { 664 return __tb_port_enable(port, true); 665 } 666 667 /** 668 * tb_port_disable() - Disable lane adapter 669 * @port: Port to disable (can be %NULL) 670 * 671 * This is used for lane 0 and 1 adapters to disable it. 672 */ 673 int tb_port_disable(struct tb_port *port) 674 { 675 return __tb_port_enable(port, false); 676 } 677 678 static int tb_port_reset(struct tb_port *port) 679 { 680 if (tb_switch_is_usb4(port->sw)) 681 return port->cap_usb4 ? usb4_port_reset(port) : 0; 682 return tb_lc_reset_port(port); 683 } 684 685 /* 686 * tb_init_port() - initialize a port 687 * 688 * This is a helper method for tb_switch_alloc. Does not check or initialize 689 * any downstream switches. 690 * 691 * Return: Returns 0 on success or an error code on failure. 692 */ 693 static int tb_init_port(struct tb_port *port) 694 { 695 int res; 696 int cap; 697 698 INIT_LIST_HEAD(&port->list); 699 700 /* Control adapter does not have configuration space */ 701 if (!port->port) 702 return 0; 703 704 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 705 if (res) { 706 if (res == -ENODEV) { 707 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 708 port->port); 709 port->disabled = true; 710 return 0; 711 } 712 return res; 713 } 714 715 /* Port 0 is the switch itself and has no PHY. */ 716 if (port->config.type == TB_TYPE_PORT) { 717 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 718 719 if (cap > 0) 720 port->cap_phy = cap; 721 else 722 tb_port_WARN(port, "non switch port without a PHY\n"); 723 724 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 725 if (cap > 0) 726 port->cap_usb4 = cap; 727 728 /* 729 * USB4 ports the buffers allocated for the control path 730 * can be read from the path config space. Legacy 731 * devices we use hard-coded value. 732 */ 733 if (port->cap_usb4) { 734 struct tb_regs_hop hop; 735 736 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) 737 port->ctl_credits = hop.initial_credits; 738 } 739 if (!port->ctl_credits) 740 port->ctl_credits = 2; 741 742 } else { 743 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 744 if (cap > 0) 745 port->cap_adap = cap; 746 } 747 748 port->total_credits = 749 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 750 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 751 752 tb_dump_port(port->sw->tb, port); 753 return 0; 754 } 755 756 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 757 int max_hopid) 758 { 759 int port_max_hopid; 760 struct ida *ida; 761 762 if (in) { 763 port_max_hopid = port->config.max_in_hop_id; 764 ida = &port->in_hopids; 765 } else { 766 port_max_hopid = port->config.max_out_hop_id; 767 ida = &port->out_hopids; 768 } 769 770 /* 771 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 772 * reserved. 773 */ 774 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 775 min_hopid = TB_PATH_MIN_HOPID; 776 777 if (max_hopid < 0 || max_hopid > port_max_hopid) 778 max_hopid = port_max_hopid; 779 780 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 781 } 782 783 /** 784 * tb_port_alloc_in_hopid() - Allocate input HopID from port 785 * @port: Port to allocate HopID for 786 * @min_hopid: Minimum acceptable input HopID 787 * @max_hopid: Maximum acceptable input HopID 788 * 789 * Return: HopID between @min_hopid and @max_hopid or negative errno in 790 * case of error. 791 */ 792 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 793 { 794 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 795 } 796 797 /** 798 * tb_port_alloc_out_hopid() - Allocate output HopID from port 799 * @port: Port to allocate HopID for 800 * @min_hopid: Minimum acceptable output HopID 801 * @max_hopid: Maximum acceptable output HopID 802 * 803 * Return: HopID between @min_hopid and @max_hopid or negative errno in 804 * case of error. 805 */ 806 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 807 { 808 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 809 } 810 811 /** 812 * tb_port_release_in_hopid() - Release allocated input HopID from port 813 * @port: Port whose HopID to release 814 * @hopid: HopID to release 815 */ 816 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 817 { 818 ida_simple_remove(&port->in_hopids, hopid); 819 } 820 821 /** 822 * tb_port_release_out_hopid() - Release allocated output HopID from port 823 * @port: Port whose HopID to release 824 * @hopid: HopID to release 825 */ 826 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 827 { 828 ida_simple_remove(&port->out_hopids, hopid); 829 } 830 831 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 832 const struct tb_switch *sw) 833 { 834 u64 mask = (1ULL << parent->config.depth * 8) - 1; 835 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 836 } 837 838 /** 839 * tb_next_port_on_path() - Return next port for given port on a path 840 * @start: Start port of the walk 841 * @end: End port of the walk 842 * @prev: Previous port (%NULL if this is the first) 843 * 844 * This function can be used to walk from one port to another if they 845 * are connected through zero or more switches. If the @prev is dual 846 * link port, the function follows that link and returns another end on 847 * that same link. 848 * 849 * If the @end port has been reached, return %NULL. 850 * 851 * Domain tb->lock must be held when this function is called. 852 */ 853 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 854 struct tb_port *prev) 855 { 856 struct tb_port *next; 857 858 if (!prev) 859 return start; 860 861 if (prev->sw == end->sw) { 862 if (prev == end) 863 return NULL; 864 return end; 865 } 866 867 if (tb_switch_is_reachable(prev->sw, end->sw)) { 868 next = tb_port_at(tb_route(end->sw), prev->sw); 869 /* Walk down the topology if next == prev */ 870 if (prev->remote && 871 (next == prev || next->dual_link_port == prev)) 872 next = prev->remote; 873 } else { 874 if (tb_is_upstream_port(prev)) { 875 next = prev->remote; 876 } else { 877 next = tb_upstream_port(prev->sw); 878 /* 879 * Keep the same link if prev and next are both 880 * dual link ports. 881 */ 882 if (next->dual_link_port && 883 next->link_nr != prev->link_nr) { 884 next = next->dual_link_port; 885 } 886 } 887 } 888 889 return next != prev ? next : NULL; 890 } 891 892 /** 893 * tb_port_get_link_speed() - Get current link speed 894 * @port: Port to check (USB4 or CIO) 895 * 896 * Returns link speed in Gb/s or negative errno in case of failure. 897 */ 898 int tb_port_get_link_speed(struct tb_port *port) 899 { 900 u32 val, speed; 901 int ret; 902 903 if (!port->cap_phy) 904 return -EINVAL; 905 906 ret = tb_port_read(port, &val, TB_CFG_PORT, 907 port->cap_phy + LANE_ADP_CS_1, 1); 908 if (ret) 909 return ret; 910 911 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 912 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 913 914 switch (speed) { 915 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4: 916 return 40; 917 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3: 918 return 20; 919 default: 920 return 10; 921 } 922 } 923 924 /** 925 * tb_port_get_link_generation() - Returns link generation 926 * @port: Lane adapter 927 * 928 * Returns link generation as number or negative errno in case of 929 * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2 930 * links so for those always returns 2. 931 */ 932 int tb_port_get_link_generation(struct tb_port *port) 933 { 934 int ret; 935 936 ret = tb_port_get_link_speed(port); 937 if (ret < 0) 938 return ret; 939 940 switch (ret) { 941 case 40: 942 return 4; 943 case 20: 944 return 3; 945 default: 946 return 2; 947 } 948 } 949 950 static const char *width_name(enum tb_link_width width) 951 { 952 switch (width) { 953 case TB_LINK_WIDTH_SINGLE: 954 return "symmetric, single lane"; 955 case TB_LINK_WIDTH_DUAL: 956 return "symmetric, dual lanes"; 957 case TB_LINK_WIDTH_ASYM_TX: 958 return "asymmetric, 3 transmitters, 1 receiver"; 959 case TB_LINK_WIDTH_ASYM_RX: 960 return "asymmetric, 3 receivers, 1 transmitter"; 961 default: 962 return "unknown"; 963 } 964 } 965 966 /** 967 * tb_port_get_link_width() - Get current link width 968 * @port: Port to check (USB4 or CIO) 969 * 970 * Returns link width. Return the link width as encoded in &enum 971 * tb_link_width or negative errno in case of failure. 972 */ 973 int tb_port_get_link_width(struct tb_port *port) 974 { 975 u32 val; 976 int ret; 977 978 if (!port->cap_phy) 979 return -EINVAL; 980 981 ret = tb_port_read(port, &val, TB_CFG_PORT, 982 port->cap_phy + LANE_ADP_CS_1, 1); 983 if (ret) 984 return ret; 985 986 /* Matches the values in enum tb_link_width */ 987 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 988 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 989 } 990 991 /** 992 * tb_port_width_supported() - Is the given link width supported 993 * @port: Port to check 994 * @width: Widths to check (bitmask) 995 * 996 * Can be called to any lane adapter. Checks if given @width is 997 * supported by the hardware and returns %true if it is. 998 */ 999 bool tb_port_width_supported(struct tb_port *port, unsigned int width) 1000 { 1001 u32 phy, widths; 1002 int ret; 1003 1004 if (!port->cap_phy) 1005 return false; 1006 1007 if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) { 1008 if (tb_port_get_link_generation(port) < 4 || 1009 !usb4_port_asym_supported(port)) 1010 return false; 1011 } 1012 1013 ret = tb_port_read(port, &phy, TB_CFG_PORT, 1014 port->cap_phy + LANE_ADP_CS_0, 1); 1015 if (ret) 1016 return false; 1017 1018 /* 1019 * The field encoding is the same as &enum tb_link_width (which is 1020 * passed to @width). 1021 */ 1022 widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy); 1023 return widths & width; 1024 } 1025 1026 /** 1027 * tb_port_set_link_width() - Set target link width of the lane adapter 1028 * @port: Lane adapter 1029 * @width: Target link width 1030 * 1031 * Sets the target link width of the lane adapter to @width. Does not 1032 * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). 1033 * 1034 * Return: %0 in case of success and negative errno in case of error 1035 */ 1036 int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) 1037 { 1038 u32 val; 1039 int ret; 1040 1041 if (!port->cap_phy) 1042 return -EINVAL; 1043 1044 ret = tb_port_read(port, &val, TB_CFG_PORT, 1045 port->cap_phy + LANE_ADP_CS_1, 1); 1046 if (ret) 1047 return ret; 1048 1049 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 1050 switch (width) { 1051 case TB_LINK_WIDTH_SINGLE: 1052 /* Gen 4 link cannot be single */ 1053 if (tb_port_get_link_generation(port) >= 4) 1054 return -EOPNOTSUPP; 1055 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 1056 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1057 break; 1058 1059 case TB_LINK_WIDTH_DUAL: 1060 if (tb_port_get_link_generation(port) >= 4) 1061 return usb4_port_asym_set_link_width(port, width); 1062 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 1063 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1064 break; 1065 1066 case TB_LINK_WIDTH_ASYM_TX: 1067 case TB_LINK_WIDTH_ASYM_RX: 1068 return usb4_port_asym_set_link_width(port, width); 1069 1070 default: 1071 return -EINVAL; 1072 } 1073 1074 return tb_port_write(port, &val, TB_CFG_PORT, 1075 port->cap_phy + LANE_ADP_CS_1, 1); 1076 } 1077 1078 /** 1079 * tb_port_set_lane_bonding() - Enable/disable lane bonding 1080 * @port: Lane adapter 1081 * @bonding: enable/disable bonding 1082 * 1083 * Enables or disables lane bonding. This should be called after target 1084 * link width has been set (tb_port_set_link_width()). Note in most 1085 * cases one should use tb_port_lane_bonding_enable() instead to enable 1086 * lane bonding. 1087 * 1088 * Return: %0 in case of success and negative errno in case of error 1089 */ 1090 static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) 1091 { 1092 u32 val; 1093 int ret; 1094 1095 if (!port->cap_phy) 1096 return -EINVAL; 1097 1098 ret = tb_port_read(port, &val, TB_CFG_PORT, 1099 port->cap_phy + LANE_ADP_CS_1, 1); 1100 if (ret) 1101 return ret; 1102 1103 if (bonding) 1104 val |= LANE_ADP_CS_1_LB; 1105 else 1106 val &= ~LANE_ADP_CS_1_LB; 1107 1108 return tb_port_write(port, &val, TB_CFG_PORT, 1109 port->cap_phy + LANE_ADP_CS_1, 1); 1110 } 1111 1112 /** 1113 * tb_port_lane_bonding_enable() - Enable bonding on port 1114 * @port: port to enable 1115 * 1116 * Enable bonding by setting the link width of the port and the other 1117 * port in case of dual link port. Does not wait for the link to 1118 * actually reach the bonded state so caller needs to call 1119 * tb_port_wait_for_link_width() before enabling any paths through the 1120 * link to make sure the link is in expected state. 1121 * 1122 * Return: %0 in case of success and negative errno in case of error 1123 */ 1124 int tb_port_lane_bonding_enable(struct tb_port *port) 1125 { 1126 enum tb_link_width width; 1127 int ret; 1128 1129 /* 1130 * Enable lane bonding for both links if not already enabled by 1131 * for example the boot firmware. 1132 */ 1133 width = tb_port_get_link_width(port); 1134 if (width == TB_LINK_WIDTH_SINGLE) { 1135 ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL); 1136 if (ret) 1137 goto err_lane0; 1138 } 1139 1140 width = tb_port_get_link_width(port->dual_link_port); 1141 if (width == TB_LINK_WIDTH_SINGLE) { 1142 ret = tb_port_set_link_width(port->dual_link_port, 1143 TB_LINK_WIDTH_DUAL); 1144 if (ret) 1145 goto err_lane0; 1146 } 1147 1148 /* 1149 * Only set bonding if the link was not already bonded. This 1150 * avoids the lane adapter to re-enter bonding state. 1151 */ 1152 if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) { 1153 ret = tb_port_set_lane_bonding(port, true); 1154 if (ret) 1155 goto err_lane1; 1156 } 1157 1158 /* 1159 * When lane 0 bonding is set it will affect lane 1 too so 1160 * update both. 1161 */ 1162 port->bonded = true; 1163 port->dual_link_port->bonded = true; 1164 1165 return 0; 1166 1167 err_lane1: 1168 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1169 err_lane0: 1170 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1171 1172 return ret; 1173 } 1174 1175 /** 1176 * tb_port_lane_bonding_disable() - Disable bonding on port 1177 * @port: port to disable 1178 * 1179 * Disable bonding by setting the link width of the port and the 1180 * other port in case of dual link port. 1181 */ 1182 void tb_port_lane_bonding_disable(struct tb_port *port) 1183 { 1184 tb_port_set_lane_bonding(port, false); 1185 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1186 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1187 port->dual_link_port->bonded = false; 1188 port->bonded = false; 1189 } 1190 1191 /** 1192 * tb_port_wait_for_link_width() - Wait until link reaches specific width 1193 * @port: Port to wait for 1194 * @width: Expected link width (bitmask) 1195 * @timeout_msec: Timeout in ms how long to wait 1196 * 1197 * Should be used after both ends of the link have been bonded (or 1198 * bonding has been disabled) to wait until the link actually reaches 1199 * the expected state. Returns %-ETIMEDOUT if the width was not reached 1200 * within the given timeout, %0 if it did. Can be passed a mask of 1201 * expected widths and succeeds if any of the widths is reached. 1202 */ 1203 int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, 1204 int timeout_msec) 1205 { 1206 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1207 int ret; 1208 1209 /* Gen 4 link does not support single lane */ 1210 if ((width & TB_LINK_WIDTH_SINGLE) && 1211 tb_port_get_link_generation(port) >= 4) 1212 return -EOPNOTSUPP; 1213 1214 do { 1215 ret = tb_port_get_link_width(port); 1216 if (ret < 0) { 1217 /* 1218 * Sometimes we get port locked error when 1219 * polling the lanes so we can ignore it and 1220 * retry. 1221 */ 1222 if (ret != -EACCES) 1223 return ret; 1224 } else if (ret & width) { 1225 return 0; 1226 } 1227 1228 usleep_range(1000, 2000); 1229 } while (ktime_before(ktime_get(), timeout)); 1230 1231 return -ETIMEDOUT; 1232 } 1233 1234 static int tb_port_do_update_credits(struct tb_port *port) 1235 { 1236 u32 nfc_credits; 1237 int ret; 1238 1239 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); 1240 if (ret) 1241 return ret; 1242 1243 if (nfc_credits != port->config.nfc_credits) { 1244 u32 total; 1245 1246 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 1247 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 1248 1249 tb_port_dbg(port, "total credits changed %u -> %u\n", 1250 port->total_credits, total); 1251 1252 port->config.nfc_credits = nfc_credits; 1253 port->total_credits = total; 1254 } 1255 1256 return 0; 1257 } 1258 1259 /** 1260 * tb_port_update_credits() - Re-read port total credits 1261 * @port: Port to update 1262 * 1263 * After the link is bonded (or bonding was disabled) the port total 1264 * credits may change, so this function needs to be called to re-read 1265 * the credits. Updates also the second lane adapter. 1266 */ 1267 int tb_port_update_credits(struct tb_port *port) 1268 { 1269 int ret; 1270 1271 ret = tb_port_do_update_credits(port); 1272 if (ret) 1273 return ret; 1274 1275 if (!port->dual_link_port) 1276 return 0; 1277 return tb_port_do_update_credits(port->dual_link_port); 1278 } 1279 1280 static int tb_port_start_lane_initialization(struct tb_port *port) 1281 { 1282 int ret; 1283 1284 if (tb_switch_is_usb4(port->sw)) 1285 return 0; 1286 1287 ret = tb_lc_start_lane_initialization(port); 1288 return ret == -EINVAL ? 0 : ret; 1289 } 1290 1291 /* 1292 * Returns true if the port had something (router, XDomain) connected 1293 * before suspend. 1294 */ 1295 static bool tb_port_resume(struct tb_port *port) 1296 { 1297 bool has_remote = tb_port_has_remote(port); 1298 1299 if (port->usb4) { 1300 usb4_port_device_resume(port->usb4); 1301 } else if (!has_remote) { 1302 /* 1303 * For disconnected downstream lane adapters start lane 1304 * initialization now so we detect future connects. 1305 * 1306 * For XDomain start the lane initialzation now so the 1307 * link gets re-established. 1308 * 1309 * This is only needed for non-USB4 ports. 1310 */ 1311 if (!tb_is_upstream_port(port) || port->xdomain) 1312 tb_port_start_lane_initialization(port); 1313 } 1314 1315 return has_remote || port->xdomain; 1316 } 1317 1318 /** 1319 * tb_port_is_enabled() - Is the adapter port enabled 1320 * @port: Port to check 1321 */ 1322 bool tb_port_is_enabled(struct tb_port *port) 1323 { 1324 switch (port->config.type) { 1325 case TB_TYPE_PCIE_UP: 1326 case TB_TYPE_PCIE_DOWN: 1327 return tb_pci_port_is_enabled(port); 1328 1329 case TB_TYPE_DP_HDMI_IN: 1330 case TB_TYPE_DP_HDMI_OUT: 1331 return tb_dp_port_is_enabled(port); 1332 1333 case TB_TYPE_USB3_UP: 1334 case TB_TYPE_USB3_DOWN: 1335 return tb_usb3_port_is_enabled(port); 1336 1337 default: 1338 return false; 1339 } 1340 } 1341 1342 /** 1343 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1344 * @port: USB3 adapter port to check 1345 */ 1346 bool tb_usb3_port_is_enabled(struct tb_port *port) 1347 { 1348 u32 data; 1349 1350 if (tb_port_read(port, &data, TB_CFG_PORT, 1351 port->cap_adap + ADP_USB3_CS_0, 1)) 1352 return false; 1353 1354 return !!(data & ADP_USB3_CS_0_PE); 1355 } 1356 1357 /** 1358 * tb_usb3_port_enable() - Enable USB3 adapter port 1359 * @port: USB3 adapter port to enable 1360 * @enable: Enable/disable the USB3 adapter 1361 */ 1362 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1363 { 1364 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1365 : ADP_USB3_CS_0_V; 1366 1367 if (!port->cap_adap) 1368 return -ENXIO; 1369 return tb_port_write(port, &word, TB_CFG_PORT, 1370 port->cap_adap + ADP_USB3_CS_0, 1); 1371 } 1372 1373 /** 1374 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1375 * @port: PCIe port to check 1376 */ 1377 bool tb_pci_port_is_enabled(struct tb_port *port) 1378 { 1379 u32 data; 1380 1381 if (tb_port_read(port, &data, TB_CFG_PORT, 1382 port->cap_adap + ADP_PCIE_CS_0, 1)) 1383 return false; 1384 1385 return !!(data & ADP_PCIE_CS_0_PE); 1386 } 1387 1388 /** 1389 * tb_pci_port_enable() - Enable PCIe adapter port 1390 * @port: PCIe port to enable 1391 * @enable: Enable/disable the PCIe adapter 1392 */ 1393 int tb_pci_port_enable(struct tb_port *port, bool enable) 1394 { 1395 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1396 if (!port->cap_adap) 1397 return -ENXIO; 1398 return tb_port_write(port, &word, TB_CFG_PORT, 1399 port->cap_adap + ADP_PCIE_CS_0, 1); 1400 } 1401 1402 /** 1403 * tb_dp_port_hpd_is_active() - Is HPD already active 1404 * @port: DP out port to check 1405 * 1406 * Checks if the DP OUT adapter port has HDP bit already set. 1407 */ 1408 int tb_dp_port_hpd_is_active(struct tb_port *port) 1409 { 1410 u32 data; 1411 int ret; 1412 1413 ret = tb_port_read(port, &data, TB_CFG_PORT, 1414 port->cap_adap + ADP_DP_CS_2, 1); 1415 if (ret) 1416 return ret; 1417 1418 return !!(data & ADP_DP_CS_2_HDP); 1419 } 1420 1421 /** 1422 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1423 * @port: Port to clear HPD 1424 * 1425 * If the DP IN port has HDP set, this function can be used to clear it. 1426 */ 1427 int tb_dp_port_hpd_clear(struct tb_port *port) 1428 { 1429 u32 data; 1430 int ret; 1431 1432 ret = tb_port_read(port, &data, TB_CFG_PORT, 1433 port->cap_adap + ADP_DP_CS_3, 1); 1434 if (ret) 1435 return ret; 1436 1437 data |= ADP_DP_CS_3_HDPC; 1438 return tb_port_write(port, &data, TB_CFG_PORT, 1439 port->cap_adap + ADP_DP_CS_3, 1); 1440 } 1441 1442 /** 1443 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1444 * @port: DP IN/OUT port to set hops 1445 * @video: Video Hop ID 1446 * @aux_tx: AUX TX Hop ID 1447 * @aux_rx: AUX RX Hop ID 1448 * 1449 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 1450 * router DP adapters too but does not program the values as the fields 1451 * are read-only. 1452 */ 1453 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1454 unsigned int aux_tx, unsigned int aux_rx) 1455 { 1456 u32 data[2]; 1457 int ret; 1458 1459 if (tb_switch_is_usb4(port->sw)) 1460 return 0; 1461 1462 ret = tb_port_read(port, data, TB_CFG_PORT, 1463 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1464 if (ret) 1465 return ret; 1466 1467 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1468 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1469 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1470 1471 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1472 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1473 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1474 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1475 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1476 1477 return tb_port_write(port, data, TB_CFG_PORT, 1478 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1479 } 1480 1481 /** 1482 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1483 * @port: DP adapter port to check 1484 */ 1485 bool tb_dp_port_is_enabled(struct tb_port *port) 1486 { 1487 u32 data[2]; 1488 1489 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1490 ARRAY_SIZE(data))) 1491 return false; 1492 1493 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1494 } 1495 1496 /** 1497 * tb_dp_port_enable() - Enables/disables DP paths of a port 1498 * @port: DP IN/OUT port 1499 * @enable: Enable/disable DP path 1500 * 1501 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1502 * calling this function. 1503 */ 1504 int tb_dp_port_enable(struct tb_port *port, bool enable) 1505 { 1506 u32 data[2]; 1507 int ret; 1508 1509 ret = tb_port_read(port, data, TB_CFG_PORT, 1510 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1511 if (ret) 1512 return ret; 1513 1514 if (enable) 1515 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1516 else 1517 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1518 1519 return tb_port_write(port, data, TB_CFG_PORT, 1520 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1521 } 1522 1523 /* switch utility functions */ 1524 1525 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1526 { 1527 switch (sw->generation) { 1528 case 1: 1529 return "Thunderbolt 1"; 1530 case 2: 1531 return "Thunderbolt 2"; 1532 case 3: 1533 return "Thunderbolt 3"; 1534 case 4: 1535 return "USB4"; 1536 default: 1537 return "Unknown"; 1538 } 1539 } 1540 1541 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1542 { 1543 const struct tb_regs_switch_header *regs = &sw->config; 1544 1545 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1546 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1547 regs->revision, regs->thunderbolt_version); 1548 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1549 tb_dbg(tb, " Config:\n"); 1550 tb_dbg(tb, 1551 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1552 regs->upstream_port_number, regs->depth, 1553 (((u64) regs->route_hi) << 32) | regs->route_lo, 1554 regs->enabled, regs->plug_events_delay); 1555 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1556 regs->__unknown1, regs->__unknown4); 1557 } 1558 1559 static int tb_switch_reset_host(struct tb_switch *sw) 1560 { 1561 if (sw->generation > 1) { 1562 struct tb_port *port; 1563 1564 tb_switch_for_each_port(sw, port) { 1565 int i, ret; 1566 1567 /* 1568 * For lane adapters we issue downstream port 1569 * reset and clear up path config spaces. 1570 * 1571 * For protocol adapters we disable the path and 1572 * clear path config space one by one (from 8 to 1573 * Max Input HopID of the adapter). 1574 */ 1575 if (tb_port_is_null(port) && !tb_is_upstream_port(port)) { 1576 ret = tb_port_reset(port); 1577 if (ret) 1578 return ret; 1579 } else if (tb_port_is_usb3_down(port) || 1580 tb_port_is_usb3_up(port)) { 1581 tb_usb3_port_enable(port, false); 1582 } else if (tb_port_is_dpin(port) || 1583 tb_port_is_dpout(port)) { 1584 tb_dp_port_enable(port, false); 1585 } else if (tb_port_is_pcie_down(port) || 1586 tb_port_is_pcie_up(port)) { 1587 tb_pci_port_enable(port, false); 1588 } else { 1589 continue; 1590 } 1591 1592 /* Cleanup path config space of protocol adapter */ 1593 for (i = TB_PATH_MIN_HOPID; 1594 i <= port->config.max_in_hop_id; i++) { 1595 ret = tb_path_deactivate_hop(port, i); 1596 if (ret) 1597 return ret; 1598 } 1599 } 1600 } else { 1601 struct tb_cfg_result res; 1602 1603 /* Thunderbolt 1 uses the "reset" config space packet */ 1604 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1605 TB_CFG_SWITCH, 2, 2); 1606 if (res.err) 1607 return res.err; 1608 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); 1609 if (res.err > 0) 1610 return -EIO; 1611 else if (res.err < 0) 1612 return res.err; 1613 } 1614 1615 return 0; 1616 } 1617 1618 static int tb_switch_reset_device(struct tb_switch *sw) 1619 { 1620 return tb_port_reset(tb_switch_downstream_port(sw)); 1621 } 1622 1623 static bool tb_switch_enumerated(struct tb_switch *sw) 1624 { 1625 u32 val; 1626 int ret; 1627 1628 /* 1629 * Read directly from the hardware because we use this also 1630 * during system sleep where sw->config.enabled is already set 1631 * by us. 1632 */ 1633 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1); 1634 if (ret) 1635 return false; 1636 1637 return !!(val & ROUTER_CS_3_V); 1638 } 1639 1640 /** 1641 * tb_switch_reset() - Perform reset to the router 1642 * @sw: Router to reset 1643 * 1644 * Issues reset to the router @sw. Can be used for any router. For host 1645 * routers, resets all the downstream ports and cleans up path config 1646 * spaces accordingly. For device routers issues downstream port reset 1647 * through the parent router, so as side effect there will be unplug 1648 * soon after this is finished. 1649 * 1650 * If the router is not enumerated does nothing. 1651 * 1652 * Returns %0 on success or negative errno in case of failure. 1653 */ 1654 int tb_switch_reset(struct tb_switch *sw) 1655 { 1656 int ret; 1657 1658 /* 1659 * We cannot access the port config spaces unless the router is 1660 * already enumerated. If the router is not enumerated it is 1661 * equal to being reset so we can skip that here. 1662 */ 1663 if (!tb_switch_enumerated(sw)) 1664 return 0; 1665 1666 tb_sw_dbg(sw, "resetting\n"); 1667 1668 if (tb_route(sw)) 1669 ret = tb_switch_reset_device(sw); 1670 else 1671 ret = tb_switch_reset_host(sw); 1672 1673 if (ret) 1674 tb_sw_warn(sw, "failed to reset\n"); 1675 1676 return ret; 1677 } 1678 1679 /** 1680 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset 1681 * @sw: Router to read the offset value from 1682 * @offset: Offset in the router config space to read from 1683 * @bit: Bit mask in the offset to wait for 1684 * @value: Value of the bits to wait for 1685 * @timeout_msec: Timeout in ms how long to wait 1686 * 1687 * Wait till the specified bits in specified offset reach specified value. 1688 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached 1689 * within the given timeout or a negative errno in case of failure. 1690 */ 1691 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, 1692 u32 value, int timeout_msec) 1693 { 1694 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1695 1696 do { 1697 u32 val; 1698 int ret; 1699 1700 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 1701 if (ret) 1702 return ret; 1703 1704 if ((val & bit) == value) 1705 return 0; 1706 1707 usleep_range(50, 100); 1708 } while (ktime_before(ktime_get(), timeout)); 1709 1710 return -ETIMEDOUT; 1711 } 1712 1713 /* 1714 * tb_plug_events_active() - enable/disable plug events on a switch 1715 * 1716 * Also configures a sane plug_events_delay of 255ms. 1717 * 1718 * Return: Returns 0 on success or an error code on failure. 1719 */ 1720 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1721 { 1722 u32 data; 1723 int res; 1724 1725 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1726 return 0; 1727 1728 sw->config.plug_events_delay = 0xff; 1729 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1730 if (res) 1731 return res; 1732 1733 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1734 if (res) 1735 return res; 1736 1737 if (active) { 1738 data = data & 0xFFFFFF83; 1739 switch (sw->config.device_id) { 1740 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1741 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1742 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1743 break; 1744 default: 1745 /* 1746 * Skip Alpine Ridge, it needs to have vendor 1747 * specific USB hotplug event enabled for the 1748 * internal xHCI to work. 1749 */ 1750 if (!tb_switch_is_alpine_ridge(sw)) 1751 data |= TB_PLUG_EVENTS_USB_DISABLE; 1752 } 1753 } else { 1754 data = data | 0x7c; 1755 } 1756 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1757 sw->cap_plug_events + 1, 1); 1758 } 1759 1760 static ssize_t authorized_show(struct device *dev, 1761 struct device_attribute *attr, 1762 char *buf) 1763 { 1764 struct tb_switch *sw = tb_to_switch(dev); 1765 1766 return sysfs_emit(buf, "%u\n", sw->authorized); 1767 } 1768 1769 static int disapprove_switch(struct device *dev, void *not_used) 1770 { 1771 char *envp[] = { "AUTHORIZED=0", NULL }; 1772 struct tb_switch *sw; 1773 1774 sw = tb_to_switch(dev); 1775 if (sw && sw->authorized) { 1776 int ret; 1777 1778 /* First children */ 1779 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1780 if (ret) 1781 return ret; 1782 1783 ret = tb_domain_disapprove_switch(sw->tb, sw); 1784 if (ret) 1785 return ret; 1786 1787 sw->authorized = 0; 1788 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1789 } 1790 1791 return 0; 1792 } 1793 1794 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1795 { 1796 char envp_string[13]; 1797 int ret = -EINVAL; 1798 char *envp[] = { envp_string, NULL }; 1799 1800 if (!mutex_trylock(&sw->tb->lock)) 1801 return restart_syscall(); 1802 1803 if (!!sw->authorized == !!val) 1804 goto unlock; 1805 1806 switch (val) { 1807 /* Disapprove switch */ 1808 case 0: 1809 if (tb_route(sw)) { 1810 ret = disapprove_switch(&sw->dev, NULL); 1811 goto unlock; 1812 } 1813 break; 1814 1815 /* Approve switch */ 1816 case 1: 1817 if (sw->key) 1818 ret = tb_domain_approve_switch_key(sw->tb, sw); 1819 else 1820 ret = tb_domain_approve_switch(sw->tb, sw); 1821 break; 1822 1823 /* Challenge switch */ 1824 case 2: 1825 if (sw->key) 1826 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1827 break; 1828 1829 default: 1830 break; 1831 } 1832 1833 if (!ret) { 1834 sw->authorized = val; 1835 /* 1836 * Notify status change to the userspace, informing the new 1837 * value of /sys/bus/thunderbolt/devices/.../authorized. 1838 */ 1839 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); 1840 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1841 } 1842 1843 unlock: 1844 mutex_unlock(&sw->tb->lock); 1845 return ret; 1846 } 1847 1848 static ssize_t authorized_store(struct device *dev, 1849 struct device_attribute *attr, 1850 const char *buf, size_t count) 1851 { 1852 struct tb_switch *sw = tb_to_switch(dev); 1853 unsigned int val; 1854 ssize_t ret; 1855 1856 ret = kstrtouint(buf, 0, &val); 1857 if (ret) 1858 return ret; 1859 if (val > 2) 1860 return -EINVAL; 1861 1862 pm_runtime_get_sync(&sw->dev); 1863 ret = tb_switch_set_authorized(sw, val); 1864 pm_runtime_mark_last_busy(&sw->dev); 1865 pm_runtime_put_autosuspend(&sw->dev); 1866 1867 return ret ? ret : count; 1868 } 1869 static DEVICE_ATTR_RW(authorized); 1870 1871 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1872 char *buf) 1873 { 1874 struct tb_switch *sw = tb_to_switch(dev); 1875 1876 return sysfs_emit(buf, "%u\n", sw->boot); 1877 } 1878 static DEVICE_ATTR_RO(boot); 1879 1880 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1881 char *buf) 1882 { 1883 struct tb_switch *sw = tb_to_switch(dev); 1884 1885 return sysfs_emit(buf, "%#x\n", sw->device); 1886 } 1887 static DEVICE_ATTR_RO(device); 1888 1889 static ssize_t 1890 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1891 { 1892 struct tb_switch *sw = tb_to_switch(dev); 1893 1894 return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); 1895 } 1896 static DEVICE_ATTR_RO(device_name); 1897 1898 static ssize_t 1899 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1900 { 1901 struct tb_switch *sw = tb_to_switch(dev); 1902 1903 return sysfs_emit(buf, "%u\n", sw->generation); 1904 } 1905 static DEVICE_ATTR_RO(generation); 1906 1907 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1908 char *buf) 1909 { 1910 struct tb_switch *sw = tb_to_switch(dev); 1911 ssize_t ret; 1912 1913 if (!mutex_trylock(&sw->tb->lock)) 1914 return restart_syscall(); 1915 1916 if (sw->key) 1917 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1918 else 1919 ret = sysfs_emit(buf, "\n"); 1920 1921 mutex_unlock(&sw->tb->lock); 1922 return ret; 1923 } 1924 1925 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1926 const char *buf, size_t count) 1927 { 1928 struct tb_switch *sw = tb_to_switch(dev); 1929 u8 key[TB_SWITCH_KEY_SIZE]; 1930 ssize_t ret = count; 1931 bool clear = false; 1932 1933 if (!strcmp(buf, "\n")) 1934 clear = true; 1935 else if (hex2bin(key, buf, sizeof(key))) 1936 return -EINVAL; 1937 1938 if (!mutex_trylock(&sw->tb->lock)) 1939 return restart_syscall(); 1940 1941 if (sw->authorized) { 1942 ret = -EBUSY; 1943 } else { 1944 kfree(sw->key); 1945 if (clear) { 1946 sw->key = NULL; 1947 } else { 1948 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1949 if (!sw->key) 1950 ret = -ENOMEM; 1951 } 1952 } 1953 1954 mutex_unlock(&sw->tb->lock); 1955 return ret; 1956 } 1957 static DEVICE_ATTR(key, 0600, key_show, key_store); 1958 1959 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1960 char *buf) 1961 { 1962 struct tb_switch *sw = tb_to_switch(dev); 1963 1964 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); 1965 } 1966 1967 /* 1968 * Currently all lanes must run at the same speed but we expose here 1969 * both directions to allow possible asymmetric links in the future. 1970 */ 1971 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1972 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1973 1974 static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, 1975 char *buf) 1976 { 1977 struct tb_switch *sw = tb_to_switch(dev); 1978 unsigned int width; 1979 1980 switch (sw->link_width) { 1981 case TB_LINK_WIDTH_SINGLE: 1982 case TB_LINK_WIDTH_ASYM_TX: 1983 width = 1; 1984 break; 1985 case TB_LINK_WIDTH_DUAL: 1986 width = 2; 1987 break; 1988 case TB_LINK_WIDTH_ASYM_RX: 1989 width = 3; 1990 break; 1991 default: 1992 WARN_ON_ONCE(1); 1993 return -EINVAL; 1994 } 1995 1996 return sysfs_emit(buf, "%u\n", width); 1997 } 1998 static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); 1999 2000 static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, 2001 char *buf) 2002 { 2003 struct tb_switch *sw = tb_to_switch(dev); 2004 unsigned int width; 2005 2006 switch (sw->link_width) { 2007 case TB_LINK_WIDTH_SINGLE: 2008 case TB_LINK_WIDTH_ASYM_RX: 2009 width = 1; 2010 break; 2011 case TB_LINK_WIDTH_DUAL: 2012 width = 2; 2013 break; 2014 case TB_LINK_WIDTH_ASYM_TX: 2015 width = 3; 2016 break; 2017 default: 2018 WARN_ON_ONCE(1); 2019 return -EINVAL; 2020 } 2021 2022 return sysfs_emit(buf, "%u\n", width); 2023 } 2024 static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); 2025 2026 static ssize_t nvm_authenticate_show(struct device *dev, 2027 struct device_attribute *attr, char *buf) 2028 { 2029 struct tb_switch *sw = tb_to_switch(dev); 2030 u32 status; 2031 2032 nvm_get_auth_status(sw, &status); 2033 return sysfs_emit(buf, "%#x\n", status); 2034 } 2035 2036 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 2037 bool disconnect) 2038 { 2039 struct tb_switch *sw = tb_to_switch(dev); 2040 int val, ret; 2041 2042 pm_runtime_get_sync(&sw->dev); 2043 2044 if (!mutex_trylock(&sw->tb->lock)) { 2045 ret = restart_syscall(); 2046 goto exit_rpm; 2047 } 2048 2049 if (sw->no_nvm_upgrade) { 2050 ret = -EOPNOTSUPP; 2051 goto exit_unlock; 2052 } 2053 2054 /* If NVMem devices are not yet added */ 2055 if (!sw->nvm) { 2056 ret = -EAGAIN; 2057 goto exit_unlock; 2058 } 2059 2060 ret = kstrtoint(buf, 10, &val); 2061 if (ret) 2062 goto exit_unlock; 2063 2064 /* Always clear the authentication status */ 2065 nvm_clear_auth_status(sw); 2066 2067 if (val > 0) { 2068 if (val == AUTHENTICATE_ONLY) { 2069 if (disconnect) 2070 ret = -EINVAL; 2071 else 2072 ret = nvm_authenticate(sw, true); 2073 } else { 2074 if (!sw->nvm->flushed) { 2075 if (!sw->nvm->buf) { 2076 ret = -EINVAL; 2077 goto exit_unlock; 2078 } 2079 2080 ret = nvm_validate_and_write(sw); 2081 if (ret || val == WRITE_ONLY) 2082 goto exit_unlock; 2083 } 2084 if (val == WRITE_AND_AUTHENTICATE) { 2085 if (disconnect) 2086 ret = tb_lc_force_power(sw); 2087 else 2088 ret = nvm_authenticate(sw, false); 2089 } 2090 } 2091 } 2092 2093 exit_unlock: 2094 mutex_unlock(&sw->tb->lock); 2095 exit_rpm: 2096 pm_runtime_mark_last_busy(&sw->dev); 2097 pm_runtime_put_autosuspend(&sw->dev); 2098 2099 return ret; 2100 } 2101 2102 static ssize_t nvm_authenticate_store(struct device *dev, 2103 struct device_attribute *attr, const char *buf, size_t count) 2104 { 2105 int ret = nvm_authenticate_sysfs(dev, buf, false); 2106 if (ret) 2107 return ret; 2108 return count; 2109 } 2110 static DEVICE_ATTR_RW(nvm_authenticate); 2111 2112 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 2113 struct device_attribute *attr, char *buf) 2114 { 2115 return nvm_authenticate_show(dev, attr, buf); 2116 } 2117 2118 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 2119 struct device_attribute *attr, const char *buf, size_t count) 2120 { 2121 int ret; 2122 2123 ret = nvm_authenticate_sysfs(dev, buf, true); 2124 return ret ? ret : count; 2125 } 2126 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 2127 2128 static ssize_t nvm_version_show(struct device *dev, 2129 struct device_attribute *attr, char *buf) 2130 { 2131 struct tb_switch *sw = tb_to_switch(dev); 2132 int ret; 2133 2134 if (!mutex_trylock(&sw->tb->lock)) 2135 return restart_syscall(); 2136 2137 if (sw->safe_mode) 2138 ret = -ENODATA; 2139 else if (!sw->nvm) 2140 ret = -EAGAIN; 2141 else 2142 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 2143 2144 mutex_unlock(&sw->tb->lock); 2145 2146 return ret; 2147 } 2148 static DEVICE_ATTR_RO(nvm_version); 2149 2150 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 2151 char *buf) 2152 { 2153 struct tb_switch *sw = tb_to_switch(dev); 2154 2155 return sysfs_emit(buf, "%#x\n", sw->vendor); 2156 } 2157 static DEVICE_ATTR_RO(vendor); 2158 2159 static ssize_t 2160 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 2161 { 2162 struct tb_switch *sw = tb_to_switch(dev); 2163 2164 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); 2165 } 2166 static DEVICE_ATTR_RO(vendor_name); 2167 2168 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 2169 char *buf) 2170 { 2171 struct tb_switch *sw = tb_to_switch(dev); 2172 2173 return sysfs_emit(buf, "%pUb\n", sw->uuid); 2174 } 2175 static DEVICE_ATTR_RO(unique_id); 2176 2177 static struct attribute *switch_attrs[] = { 2178 &dev_attr_authorized.attr, 2179 &dev_attr_boot.attr, 2180 &dev_attr_device.attr, 2181 &dev_attr_device_name.attr, 2182 &dev_attr_generation.attr, 2183 &dev_attr_key.attr, 2184 &dev_attr_nvm_authenticate.attr, 2185 &dev_attr_nvm_authenticate_on_disconnect.attr, 2186 &dev_attr_nvm_version.attr, 2187 &dev_attr_rx_speed.attr, 2188 &dev_attr_rx_lanes.attr, 2189 &dev_attr_tx_speed.attr, 2190 &dev_attr_tx_lanes.attr, 2191 &dev_attr_vendor.attr, 2192 &dev_attr_vendor_name.attr, 2193 &dev_attr_unique_id.attr, 2194 NULL, 2195 }; 2196 2197 static umode_t switch_attr_is_visible(struct kobject *kobj, 2198 struct attribute *attr, int n) 2199 { 2200 struct device *dev = kobj_to_dev(kobj); 2201 struct tb_switch *sw = tb_to_switch(dev); 2202 2203 if (attr == &dev_attr_authorized.attr) { 2204 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 2205 sw->tb->security_level == TB_SECURITY_DPONLY) 2206 return 0; 2207 } else if (attr == &dev_attr_device.attr) { 2208 if (!sw->device) 2209 return 0; 2210 } else if (attr == &dev_attr_device_name.attr) { 2211 if (!sw->device_name) 2212 return 0; 2213 } else if (attr == &dev_attr_vendor.attr) { 2214 if (!sw->vendor) 2215 return 0; 2216 } else if (attr == &dev_attr_vendor_name.attr) { 2217 if (!sw->vendor_name) 2218 return 0; 2219 } else if (attr == &dev_attr_key.attr) { 2220 if (tb_route(sw) && 2221 sw->tb->security_level == TB_SECURITY_SECURE && 2222 sw->security_level == TB_SECURITY_SECURE) 2223 return attr->mode; 2224 return 0; 2225 } else if (attr == &dev_attr_rx_speed.attr || 2226 attr == &dev_attr_rx_lanes.attr || 2227 attr == &dev_attr_tx_speed.attr || 2228 attr == &dev_attr_tx_lanes.attr) { 2229 if (tb_route(sw)) 2230 return attr->mode; 2231 return 0; 2232 } else if (attr == &dev_attr_nvm_authenticate.attr) { 2233 if (nvm_upgradeable(sw)) 2234 return attr->mode; 2235 return 0; 2236 } else if (attr == &dev_attr_nvm_version.attr) { 2237 if (nvm_readable(sw)) 2238 return attr->mode; 2239 return 0; 2240 } else if (attr == &dev_attr_boot.attr) { 2241 if (tb_route(sw)) 2242 return attr->mode; 2243 return 0; 2244 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 2245 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 2246 return attr->mode; 2247 return 0; 2248 } 2249 2250 return sw->safe_mode ? 0 : attr->mode; 2251 } 2252 2253 static const struct attribute_group switch_group = { 2254 .is_visible = switch_attr_is_visible, 2255 .attrs = switch_attrs, 2256 }; 2257 2258 static const struct attribute_group *switch_groups[] = { 2259 &switch_group, 2260 NULL, 2261 }; 2262 2263 static void tb_switch_release(struct device *dev) 2264 { 2265 struct tb_switch *sw = tb_to_switch(dev); 2266 struct tb_port *port; 2267 2268 dma_port_free(sw->dma_port); 2269 2270 tb_switch_for_each_port(sw, port) { 2271 ida_destroy(&port->in_hopids); 2272 ida_destroy(&port->out_hopids); 2273 } 2274 2275 kfree(sw->uuid); 2276 kfree(sw->device_name); 2277 kfree(sw->vendor_name); 2278 kfree(sw->ports); 2279 kfree(sw->drom); 2280 kfree(sw->key); 2281 kfree(sw); 2282 } 2283 2284 static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) 2285 { 2286 const struct tb_switch *sw = tb_to_switch(dev); 2287 const char *type; 2288 2289 if (tb_switch_is_usb4(sw)) { 2290 if (add_uevent_var(env, "USB4_VERSION=%u.0", 2291 usb4_switch_version(sw))) 2292 return -ENOMEM; 2293 } 2294 2295 if (!tb_route(sw)) { 2296 type = "host"; 2297 } else { 2298 const struct tb_port *port; 2299 bool hub = false; 2300 2301 /* Device is hub if it has any downstream ports */ 2302 tb_switch_for_each_port(sw, port) { 2303 if (!port->disabled && !tb_is_upstream_port(port) && 2304 tb_port_is_null(port)) { 2305 hub = true; 2306 break; 2307 } 2308 } 2309 2310 type = hub ? "hub" : "device"; 2311 } 2312 2313 if (add_uevent_var(env, "USB4_TYPE=%s", type)) 2314 return -ENOMEM; 2315 return 0; 2316 } 2317 2318 /* 2319 * Currently only need to provide the callbacks. Everything else is handled 2320 * in the connection manager. 2321 */ 2322 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 2323 { 2324 struct tb_switch *sw = tb_to_switch(dev); 2325 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2326 2327 if (cm_ops->runtime_suspend_switch) 2328 return cm_ops->runtime_suspend_switch(sw); 2329 2330 return 0; 2331 } 2332 2333 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 2334 { 2335 struct tb_switch *sw = tb_to_switch(dev); 2336 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2337 2338 if (cm_ops->runtime_resume_switch) 2339 return cm_ops->runtime_resume_switch(sw); 2340 return 0; 2341 } 2342 2343 static const struct dev_pm_ops tb_switch_pm_ops = { 2344 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 2345 NULL) 2346 }; 2347 2348 struct device_type tb_switch_type = { 2349 .name = "thunderbolt_device", 2350 .release = tb_switch_release, 2351 .uevent = tb_switch_uevent, 2352 .pm = &tb_switch_pm_ops, 2353 }; 2354 2355 static int tb_switch_get_generation(struct tb_switch *sw) 2356 { 2357 if (tb_switch_is_usb4(sw)) 2358 return 4; 2359 2360 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { 2361 switch (sw->config.device_id) { 2362 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 2363 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 2364 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 2365 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 2366 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 2367 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 2368 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 2369 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 2370 return 1; 2371 2372 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 2373 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 2374 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 2375 return 2; 2376 2377 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 2378 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 2379 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 2380 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 2381 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 2382 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 2383 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 2384 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 2385 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2386 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2387 return 3; 2388 } 2389 } 2390 2391 /* 2392 * For unknown switches assume generation to be 1 to be on the 2393 * safe side. 2394 */ 2395 tb_sw_warn(sw, "unsupported switch device id %#x\n", 2396 sw->config.device_id); 2397 return 1; 2398 } 2399 2400 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 2401 { 2402 int max_depth; 2403 2404 if (tb_switch_is_usb4(sw) || 2405 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 2406 max_depth = USB4_SWITCH_MAX_DEPTH; 2407 else 2408 max_depth = TB_SWITCH_MAX_DEPTH; 2409 2410 return depth > max_depth; 2411 } 2412 2413 /** 2414 * tb_switch_alloc() - allocate a switch 2415 * @tb: Pointer to the owning domain 2416 * @parent: Parent device for this switch 2417 * @route: Route string for this switch 2418 * 2419 * Allocates and initializes a switch. Will not upload configuration to 2420 * the switch. For that you need to call tb_switch_configure() 2421 * separately. The returned switch should be released by calling 2422 * tb_switch_put(). 2423 * 2424 * Return: Pointer to the allocated switch or ERR_PTR() in case of 2425 * failure. 2426 */ 2427 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 2428 u64 route) 2429 { 2430 struct tb_switch *sw; 2431 int upstream_port; 2432 int i, ret, depth; 2433 2434 /* Unlock the downstream port so we can access the switch below */ 2435 if (route) { 2436 struct tb_switch *parent_sw = tb_to_switch(parent); 2437 struct tb_port *down; 2438 2439 down = tb_port_at(route, parent_sw); 2440 tb_port_unlock(down); 2441 } 2442 2443 depth = tb_route_length(route); 2444 2445 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 2446 if (upstream_port < 0) 2447 return ERR_PTR(upstream_port); 2448 2449 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2450 if (!sw) 2451 return ERR_PTR(-ENOMEM); 2452 2453 sw->tb = tb; 2454 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 2455 if (ret) 2456 goto err_free_sw_ports; 2457 2458 sw->generation = tb_switch_get_generation(sw); 2459 2460 tb_dbg(tb, "current switch config:\n"); 2461 tb_dump_switch(tb, sw); 2462 2463 /* configure switch */ 2464 sw->config.upstream_port_number = upstream_port; 2465 sw->config.depth = depth; 2466 sw->config.route_hi = upper_32_bits(route); 2467 sw->config.route_lo = lower_32_bits(route); 2468 sw->config.enabled = 0; 2469 2470 /* Make sure we do not exceed maximum topology limit */ 2471 if (tb_switch_exceeds_max_depth(sw, depth)) { 2472 ret = -EADDRNOTAVAIL; 2473 goto err_free_sw_ports; 2474 } 2475 2476 /* initialize ports */ 2477 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 2478 GFP_KERNEL); 2479 if (!sw->ports) { 2480 ret = -ENOMEM; 2481 goto err_free_sw_ports; 2482 } 2483 2484 for (i = 0; i <= sw->config.max_port_number; i++) { 2485 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2486 sw->ports[i].sw = sw; 2487 sw->ports[i].port = i; 2488 2489 /* Control port does not need HopID allocation */ 2490 if (i) { 2491 ida_init(&sw->ports[i].in_hopids); 2492 ida_init(&sw->ports[i].out_hopids); 2493 } 2494 } 2495 2496 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2497 if (ret > 0) 2498 sw->cap_plug_events = ret; 2499 2500 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); 2501 if (ret > 0) 2502 sw->cap_vsec_tmu = ret; 2503 2504 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2505 if (ret > 0) 2506 sw->cap_lc = ret; 2507 2508 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); 2509 if (ret > 0) 2510 sw->cap_lp = ret; 2511 2512 /* Root switch is always authorized */ 2513 if (!route) 2514 sw->authorized = true; 2515 2516 device_initialize(&sw->dev); 2517 sw->dev.parent = parent; 2518 sw->dev.bus = &tb_bus_type; 2519 sw->dev.type = &tb_switch_type; 2520 sw->dev.groups = switch_groups; 2521 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2522 2523 return sw; 2524 2525 err_free_sw_ports: 2526 kfree(sw->ports); 2527 kfree(sw); 2528 2529 return ERR_PTR(ret); 2530 } 2531 2532 /** 2533 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2534 * @tb: Pointer to the owning domain 2535 * @parent: Parent device for this switch 2536 * @route: Route string for this switch 2537 * 2538 * This creates a switch in safe mode. This means the switch pretty much 2539 * lacks all capabilities except DMA configuration port before it is 2540 * flashed with a valid NVM firmware. 2541 * 2542 * The returned switch must be released by calling tb_switch_put(). 2543 * 2544 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 2545 */ 2546 struct tb_switch * 2547 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2548 { 2549 struct tb_switch *sw; 2550 2551 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2552 if (!sw) 2553 return ERR_PTR(-ENOMEM); 2554 2555 sw->tb = tb; 2556 sw->config.depth = tb_route_length(route); 2557 sw->config.route_hi = upper_32_bits(route); 2558 sw->config.route_lo = lower_32_bits(route); 2559 sw->safe_mode = true; 2560 2561 device_initialize(&sw->dev); 2562 sw->dev.parent = parent; 2563 sw->dev.bus = &tb_bus_type; 2564 sw->dev.type = &tb_switch_type; 2565 sw->dev.groups = switch_groups; 2566 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2567 2568 return sw; 2569 } 2570 2571 /** 2572 * tb_switch_configure() - Uploads configuration to the switch 2573 * @sw: Switch to configure 2574 * 2575 * Call this function before the switch is added to the system. It will 2576 * upload configuration to the switch and makes it available for the 2577 * connection manager to use. Can be called to the switch again after 2578 * resume from low power states to re-initialize it. 2579 * 2580 * Return: %0 in case of success and negative errno in case of failure 2581 */ 2582 int tb_switch_configure(struct tb_switch *sw) 2583 { 2584 struct tb *tb = sw->tb; 2585 u64 route; 2586 int ret; 2587 2588 route = tb_route(sw); 2589 2590 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2591 sw->config.enabled ? "restoring" : "initializing", route, 2592 tb_route_length(route), sw->config.upstream_port_number); 2593 2594 sw->config.enabled = 1; 2595 2596 if (tb_switch_is_usb4(sw)) { 2597 /* 2598 * For USB4 devices, we need to program the CM version 2599 * accordingly so that it knows to expose all the 2600 * additional capabilities. Program it according to USB4 2601 * version to avoid changing existing (v1) routers behaviour. 2602 */ 2603 if (usb4_switch_version(sw) < 2) 2604 sw->config.cmuv = ROUTER_CS_4_CMUV_V1; 2605 else 2606 sw->config.cmuv = ROUTER_CS_4_CMUV_V2; 2607 sw->config.plug_events_delay = 0xa; 2608 2609 /* Enumerate the switch */ 2610 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2611 ROUTER_CS_1, 4); 2612 if (ret) 2613 return ret; 2614 2615 ret = usb4_switch_setup(sw); 2616 } else { 2617 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2618 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2619 sw->config.vendor_id); 2620 2621 if (!sw->cap_plug_events) { 2622 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2623 return -ENODEV; 2624 } 2625 2626 /* Enumerate the switch */ 2627 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2628 ROUTER_CS_1, 3); 2629 } 2630 if (ret) 2631 return ret; 2632 2633 return tb_plug_events_active(sw, true); 2634 } 2635 2636 /** 2637 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid 2638 * @sw: Router to configure 2639 * 2640 * Needs to be called before any tunnels can be setup through the 2641 * router. Can be called to any router. 2642 * 2643 * Returns %0 in success and negative errno otherwise. 2644 */ 2645 int tb_switch_configuration_valid(struct tb_switch *sw) 2646 { 2647 if (tb_switch_is_usb4(sw)) 2648 return usb4_switch_configuration_valid(sw); 2649 return 0; 2650 } 2651 2652 static int tb_switch_set_uuid(struct tb_switch *sw) 2653 { 2654 bool uid = false; 2655 u32 uuid[4]; 2656 int ret; 2657 2658 if (sw->uuid) 2659 return 0; 2660 2661 if (tb_switch_is_usb4(sw)) { 2662 ret = usb4_switch_read_uid(sw, &sw->uid); 2663 if (ret) 2664 return ret; 2665 uid = true; 2666 } else { 2667 /* 2668 * The newer controllers include fused UUID as part of 2669 * link controller specific registers 2670 */ 2671 ret = tb_lc_read_uuid(sw, uuid); 2672 if (ret) { 2673 if (ret != -EINVAL) 2674 return ret; 2675 uid = true; 2676 } 2677 } 2678 2679 if (uid) { 2680 /* 2681 * ICM generates UUID based on UID and fills the upper 2682 * two words with ones. This is not strictly following 2683 * UUID format but we want to be compatible with it so 2684 * we do the same here. 2685 */ 2686 uuid[0] = sw->uid & 0xffffffff; 2687 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2688 uuid[2] = 0xffffffff; 2689 uuid[3] = 0xffffffff; 2690 } 2691 2692 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2693 if (!sw->uuid) 2694 return -ENOMEM; 2695 return 0; 2696 } 2697 2698 static int tb_switch_add_dma_port(struct tb_switch *sw) 2699 { 2700 u32 status; 2701 int ret; 2702 2703 switch (sw->generation) { 2704 case 2: 2705 /* Only root switch can be upgraded */ 2706 if (tb_route(sw)) 2707 return 0; 2708 2709 fallthrough; 2710 case 3: 2711 case 4: 2712 ret = tb_switch_set_uuid(sw); 2713 if (ret) 2714 return ret; 2715 break; 2716 2717 default: 2718 /* 2719 * DMA port is the only thing available when the switch 2720 * is in safe mode. 2721 */ 2722 if (!sw->safe_mode) 2723 return 0; 2724 break; 2725 } 2726 2727 if (sw->no_nvm_upgrade) 2728 return 0; 2729 2730 if (tb_switch_is_usb4(sw)) { 2731 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2732 if (ret) 2733 return ret; 2734 2735 if (status) { 2736 tb_sw_info(sw, "switch flash authentication failed\n"); 2737 nvm_set_auth_status(sw, status); 2738 } 2739 2740 return 0; 2741 } 2742 2743 /* Root switch DMA port requires running firmware */ 2744 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2745 return 0; 2746 2747 sw->dma_port = dma_port_alloc(sw); 2748 if (!sw->dma_port) 2749 return 0; 2750 2751 /* 2752 * If there is status already set then authentication failed 2753 * when the dma_port_flash_update_auth() returned. Power cycling 2754 * is not needed (it was done already) so only thing we do here 2755 * is to unblock runtime PM of the root port. 2756 */ 2757 nvm_get_auth_status(sw, &status); 2758 if (status) { 2759 if (!tb_route(sw)) 2760 nvm_authenticate_complete_dma_port(sw); 2761 return 0; 2762 } 2763 2764 /* 2765 * Check status of the previous flash authentication. If there 2766 * is one we need to power cycle the switch in any case to make 2767 * it functional again. 2768 */ 2769 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2770 if (ret <= 0) 2771 return ret; 2772 2773 /* Now we can allow root port to suspend again */ 2774 if (!tb_route(sw)) 2775 nvm_authenticate_complete_dma_port(sw); 2776 2777 if (status) { 2778 tb_sw_info(sw, "switch flash authentication failed\n"); 2779 nvm_set_auth_status(sw, status); 2780 } 2781 2782 tb_sw_info(sw, "power cycling the switch now\n"); 2783 dma_port_power_cycle(sw->dma_port); 2784 2785 /* 2786 * We return error here which causes the switch adding failure. 2787 * It should appear back after power cycle is complete. 2788 */ 2789 return -ESHUTDOWN; 2790 } 2791 2792 static void tb_switch_default_link_ports(struct tb_switch *sw) 2793 { 2794 int i; 2795 2796 for (i = 1; i <= sw->config.max_port_number; i++) { 2797 struct tb_port *port = &sw->ports[i]; 2798 struct tb_port *subordinate; 2799 2800 if (!tb_port_is_null(port)) 2801 continue; 2802 2803 /* Check for the subordinate port */ 2804 if (i == sw->config.max_port_number || 2805 !tb_port_is_null(&sw->ports[i + 1])) 2806 continue; 2807 2808 /* Link them if not already done so (by DROM) */ 2809 subordinate = &sw->ports[i + 1]; 2810 if (!port->dual_link_port && !subordinate->dual_link_port) { 2811 port->link_nr = 0; 2812 port->dual_link_port = subordinate; 2813 subordinate->link_nr = 1; 2814 subordinate->dual_link_port = port; 2815 2816 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2817 port->port, subordinate->port); 2818 } 2819 } 2820 } 2821 2822 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2823 { 2824 const struct tb_port *up = tb_upstream_port(sw); 2825 2826 if (!up->dual_link_port || !up->dual_link_port->remote) 2827 return false; 2828 2829 if (tb_switch_is_usb4(sw)) 2830 return usb4_switch_lane_bonding_possible(sw); 2831 return tb_lc_lane_bonding_possible(sw); 2832 } 2833 2834 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2835 { 2836 struct tb_port *up; 2837 bool change = false; 2838 int ret; 2839 2840 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2841 return 0; 2842 2843 up = tb_upstream_port(sw); 2844 2845 ret = tb_port_get_link_speed(up); 2846 if (ret < 0) 2847 return ret; 2848 if (sw->link_speed != ret) 2849 change = true; 2850 sw->link_speed = ret; 2851 2852 ret = tb_port_get_link_width(up); 2853 if (ret < 0) 2854 return ret; 2855 if (sw->link_width != ret) 2856 change = true; 2857 sw->link_width = ret; 2858 2859 /* Notify userspace that there is possible link attribute change */ 2860 if (device_is_registered(&sw->dev) && change) 2861 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2862 2863 return 0; 2864 } 2865 2866 /* Must be called after tb_switch_update_link_attributes() */ 2867 static void tb_switch_link_init(struct tb_switch *sw) 2868 { 2869 struct tb_port *up, *down; 2870 bool bonded; 2871 2872 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2873 return; 2874 2875 tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); 2876 tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width)); 2877 2878 bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; 2879 2880 /* 2881 * Gen 4 links come up as bonded so update the port structures 2882 * accordingly. 2883 */ 2884 up = tb_upstream_port(sw); 2885 down = tb_switch_downstream_port(sw); 2886 2887 up->bonded = bonded; 2888 if (up->dual_link_port) 2889 up->dual_link_port->bonded = bonded; 2890 tb_port_update_credits(up); 2891 2892 down->bonded = bonded; 2893 if (down->dual_link_port) 2894 down->dual_link_port->bonded = bonded; 2895 tb_port_update_credits(down); 2896 } 2897 2898 /** 2899 * tb_switch_lane_bonding_enable() - Enable lane bonding 2900 * @sw: Switch to enable lane bonding 2901 * 2902 * Connection manager can call this function to enable lane bonding of a 2903 * switch. If conditions are correct and both switches support the feature, 2904 * lanes are bonded. It is safe to call this to any switch. 2905 */ 2906 static int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2907 { 2908 struct tb_port *up, *down; 2909 unsigned int width; 2910 int ret; 2911 2912 if (!tb_switch_lane_bonding_possible(sw)) 2913 return 0; 2914 2915 up = tb_upstream_port(sw); 2916 down = tb_switch_downstream_port(sw); 2917 2918 if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) || 2919 !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL)) 2920 return 0; 2921 2922 /* 2923 * Both lanes need to be in CL0. Here we assume lane 0 already be in 2924 * CL0 and check just for lane 1. 2925 */ 2926 if (tb_wait_for_port(down->dual_link_port, false) <= 0) 2927 return -ENOTCONN; 2928 2929 ret = tb_port_lane_bonding_enable(up); 2930 if (ret) { 2931 tb_port_warn(up, "failed to enable lane bonding\n"); 2932 return ret; 2933 } 2934 2935 ret = tb_port_lane_bonding_enable(down); 2936 if (ret) { 2937 tb_port_warn(down, "failed to enable lane bonding\n"); 2938 tb_port_lane_bonding_disable(up); 2939 return ret; 2940 } 2941 2942 /* Any of the widths are all bonded */ 2943 width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | 2944 TB_LINK_WIDTH_ASYM_RX; 2945 2946 return tb_port_wait_for_link_width(down, width, 100); 2947 } 2948 2949 /** 2950 * tb_switch_lane_bonding_disable() - Disable lane bonding 2951 * @sw: Switch whose lane bonding to disable 2952 * 2953 * Disables lane bonding between @sw and parent. This can be called even 2954 * if lanes were not bonded originally. 2955 */ 2956 static int tb_switch_lane_bonding_disable(struct tb_switch *sw) 2957 { 2958 struct tb_port *up, *down; 2959 int ret; 2960 2961 up = tb_upstream_port(sw); 2962 if (!up->bonded) 2963 return 0; 2964 2965 /* 2966 * If the link is Gen 4 there is no way to switch the link to 2967 * two single lane links so avoid that here. Also don't bother 2968 * if the link is not up anymore (sw is unplugged). 2969 */ 2970 ret = tb_port_get_link_generation(up); 2971 if (ret < 0) 2972 return ret; 2973 if (ret >= 4) 2974 return -EOPNOTSUPP; 2975 2976 down = tb_switch_downstream_port(sw); 2977 tb_port_lane_bonding_disable(up); 2978 tb_port_lane_bonding_disable(down); 2979 2980 /* 2981 * It is fine if we get other errors as the router might have 2982 * been unplugged. 2983 */ 2984 return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); 2985 } 2986 2987 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 2988 static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) 2989 { 2990 struct tb_port *up, *down, *port; 2991 enum tb_link_width down_width; 2992 int ret; 2993 2994 up = tb_upstream_port(sw); 2995 down = tb_switch_downstream_port(sw); 2996 2997 if (width == TB_LINK_WIDTH_ASYM_TX) { 2998 down_width = TB_LINK_WIDTH_ASYM_RX; 2999 port = down; 3000 } else { 3001 down_width = TB_LINK_WIDTH_ASYM_TX; 3002 port = up; 3003 } 3004 3005 ret = tb_port_set_link_width(up, width); 3006 if (ret) 3007 return ret; 3008 3009 ret = tb_port_set_link_width(down, down_width); 3010 if (ret) 3011 return ret; 3012 3013 /* 3014 * Initiate the change in the router that one of its TX lanes is 3015 * changing to RX but do so only if there is an actual change. 3016 */ 3017 if (sw->link_width != width) { 3018 ret = usb4_port_asym_start(port); 3019 if (ret) 3020 return ret; 3021 3022 ret = tb_port_wait_for_link_width(up, width, 100); 3023 if (ret) 3024 return ret; 3025 } 3026 3027 return 0; 3028 } 3029 3030 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 3031 static int tb_switch_asym_disable(struct tb_switch *sw) 3032 { 3033 struct tb_port *up, *down; 3034 int ret; 3035 3036 up = tb_upstream_port(sw); 3037 down = tb_switch_downstream_port(sw); 3038 3039 ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL); 3040 if (ret) 3041 return ret; 3042 3043 ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL); 3044 if (ret) 3045 return ret; 3046 3047 /* 3048 * Initiate the change in the router that has three TX lanes and 3049 * is changing one of its TX lanes to RX but only if there is a 3050 * change in the link width. 3051 */ 3052 if (sw->link_width > TB_LINK_WIDTH_DUAL) { 3053 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) 3054 ret = usb4_port_asym_start(up); 3055 else 3056 ret = usb4_port_asym_start(down); 3057 if (ret) 3058 return ret; 3059 3060 ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100); 3061 if (ret) 3062 return ret; 3063 } 3064 3065 return 0; 3066 } 3067 3068 /** 3069 * tb_switch_set_link_width() - Configure router link width 3070 * @sw: Router to configure 3071 * @width: The new link width 3072 * 3073 * Set device router link width to @width from router upstream port 3074 * perspective. Supports also asymmetric links if the routers boths side 3075 * of the link supports it. 3076 * 3077 * Does nothing for host router. 3078 * 3079 * Returns %0 in case of success, negative errno otherwise. 3080 */ 3081 int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) 3082 { 3083 struct tb_port *up, *down; 3084 int ret = 0; 3085 3086 if (!tb_route(sw)) 3087 return 0; 3088 3089 up = tb_upstream_port(sw); 3090 down = tb_switch_downstream_port(sw); 3091 3092 switch (width) { 3093 case TB_LINK_WIDTH_SINGLE: 3094 ret = tb_switch_lane_bonding_disable(sw); 3095 break; 3096 3097 case TB_LINK_WIDTH_DUAL: 3098 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || 3099 sw->link_width == TB_LINK_WIDTH_ASYM_RX) { 3100 ret = tb_switch_asym_disable(sw); 3101 if (ret) 3102 break; 3103 } 3104 ret = tb_switch_lane_bonding_enable(sw); 3105 break; 3106 3107 case TB_LINK_WIDTH_ASYM_TX: 3108 case TB_LINK_WIDTH_ASYM_RX: 3109 ret = tb_switch_asym_enable(sw, width); 3110 break; 3111 } 3112 3113 switch (ret) { 3114 case 0: 3115 break; 3116 3117 case -ETIMEDOUT: 3118 tb_sw_warn(sw, "timeout changing link width\n"); 3119 return ret; 3120 3121 case -ENOTCONN: 3122 case -EOPNOTSUPP: 3123 case -ENODEV: 3124 return ret; 3125 3126 default: 3127 tb_sw_dbg(sw, "failed to change link width: %d\n", ret); 3128 return ret; 3129 } 3130 3131 tb_port_update_credits(down); 3132 tb_port_update_credits(up); 3133 3134 tb_switch_update_link_attributes(sw); 3135 3136 tb_sw_dbg(sw, "link width set to %s\n", width_name(width)); 3137 return ret; 3138 } 3139 3140 /** 3141 * tb_switch_configure_link() - Set link configured 3142 * @sw: Switch whose link is configured 3143 * 3144 * Sets the link upstream from @sw configured (from both ends) so that 3145 * it will not be disconnected when the domain exits sleep. Can be 3146 * called for any switch. 3147 * 3148 * It is recommended that this is called after lane bonding is enabled. 3149 * 3150 * Returns %0 on success and negative errno in case of error. 3151 */ 3152 int tb_switch_configure_link(struct tb_switch *sw) 3153 { 3154 struct tb_port *up, *down; 3155 int ret; 3156 3157 if (!tb_route(sw) || tb_switch_is_icm(sw)) 3158 return 0; 3159 3160 up = tb_upstream_port(sw); 3161 if (tb_switch_is_usb4(up->sw)) 3162 ret = usb4_port_configure(up); 3163 else 3164 ret = tb_lc_configure_port(up); 3165 if (ret) 3166 return ret; 3167 3168 down = up->remote; 3169 if (tb_switch_is_usb4(down->sw)) 3170 return usb4_port_configure(down); 3171 return tb_lc_configure_port(down); 3172 } 3173 3174 /** 3175 * tb_switch_unconfigure_link() - Unconfigure link 3176 * @sw: Switch whose link is unconfigured 3177 * 3178 * Sets the link unconfigured so the @sw will be disconnected if the 3179 * domain exists sleep. 3180 */ 3181 void tb_switch_unconfigure_link(struct tb_switch *sw) 3182 { 3183 struct tb_port *up, *down; 3184 3185 if (!tb_route(sw) || tb_switch_is_icm(sw)) 3186 return; 3187 3188 /* 3189 * Unconfigure downstream port so that wake-on-connect can be 3190 * configured after router unplug. No need to unconfigure upstream port 3191 * since its router is unplugged. 3192 */ 3193 up = tb_upstream_port(sw); 3194 down = up->remote; 3195 if (tb_switch_is_usb4(down->sw)) 3196 usb4_port_unconfigure(down); 3197 else 3198 tb_lc_unconfigure_port(down); 3199 3200 if (sw->is_unplugged) 3201 return; 3202 3203 up = tb_upstream_port(sw); 3204 if (tb_switch_is_usb4(up->sw)) 3205 usb4_port_unconfigure(up); 3206 else 3207 tb_lc_unconfigure_port(up); 3208 } 3209 3210 static void tb_switch_credits_init(struct tb_switch *sw) 3211 { 3212 if (tb_switch_is_icm(sw)) 3213 return; 3214 if (!tb_switch_is_usb4(sw)) 3215 return; 3216 if (usb4_switch_credits_init(sw)) 3217 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); 3218 } 3219 3220 static int tb_switch_port_hotplug_enable(struct tb_switch *sw) 3221 { 3222 struct tb_port *port; 3223 3224 if (tb_switch_is_icm(sw)) 3225 return 0; 3226 3227 tb_switch_for_each_port(sw, port) { 3228 int res; 3229 3230 if (!port->cap_usb4) 3231 continue; 3232 3233 res = usb4_port_hotplug_enable(port); 3234 if (res) 3235 return res; 3236 } 3237 return 0; 3238 } 3239 3240 /** 3241 * tb_switch_add() - Add a switch to the domain 3242 * @sw: Switch to add 3243 * 3244 * This is the last step in adding switch to the domain. It will read 3245 * identification information from DROM and initializes ports so that 3246 * they can be used to connect other switches. The switch will be 3247 * exposed to the userspace when this function successfully returns. To 3248 * remove and release the switch, call tb_switch_remove(). 3249 * 3250 * Return: %0 in case of success and negative errno in case of failure 3251 */ 3252 int tb_switch_add(struct tb_switch *sw) 3253 { 3254 int i, ret; 3255 3256 /* 3257 * Initialize DMA control port now before we read DROM. Recent 3258 * host controllers have more complete DROM on NVM that includes 3259 * vendor and model identification strings which we then expose 3260 * to the userspace. NVM can be accessed through DMA 3261 * configuration based mailbox. 3262 */ 3263 ret = tb_switch_add_dma_port(sw); 3264 if (ret) { 3265 dev_err(&sw->dev, "failed to add DMA port\n"); 3266 return ret; 3267 } 3268 3269 if (!sw->safe_mode) { 3270 tb_switch_credits_init(sw); 3271 3272 /* read drom */ 3273 ret = tb_drom_read(sw); 3274 if (ret) 3275 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); 3276 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 3277 3278 ret = tb_switch_set_uuid(sw); 3279 if (ret) { 3280 dev_err(&sw->dev, "failed to set UUID\n"); 3281 return ret; 3282 } 3283 3284 for (i = 0; i <= sw->config.max_port_number; i++) { 3285 if (sw->ports[i].disabled) { 3286 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 3287 continue; 3288 } 3289 ret = tb_init_port(&sw->ports[i]); 3290 if (ret) { 3291 dev_err(&sw->dev, "failed to initialize port %d\n", i); 3292 return ret; 3293 } 3294 } 3295 3296 tb_check_quirks(sw); 3297 3298 tb_switch_default_link_ports(sw); 3299 3300 ret = tb_switch_update_link_attributes(sw); 3301 if (ret) 3302 return ret; 3303 3304 tb_switch_link_init(sw); 3305 3306 ret = tb_switch_clx_init(sw); 3307 if (ret) 3308 return ret; 3309 3310 ret = tb_switch_tmu_init(sw); 3311 if (ret) 3312 return ret; 3313 } 3314 3315 ret = tb_switch_port_hotplug_enable(sw); 3316 if (ret) 3317 return ret; 3318 3319 ret = device_add(&sw->dev); 3320 if (ret) { 3321 dev_err(&sw->dev, "failed to add device: %d\n", ret); 3322 return ret; 3323 } 3324 3325 if (tb_route(sw)) { 3326 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 3327 sw->vendor, sw->device); 3328 if (sw->vendor_name && sw->device_name) 3329 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 3330 sw->device_name); 3331 } 3332 3333 ret = usb4_switch_add_ports(sw); 3334 if (ret) { 3335 dev_err(&sw->dev, "failed to add USB4 ports\n"); 3336 goto err_del; 3337 } 3338 3339 ret = tb_switch_nvm_add(sw); 3340 if (ret) { 3341 dev_err(&sw->dev, "failed to add NVM devices\n"); 3342 goto err_ports; 3343 } 3344 3345 /* 3346 * Thunderbolt routers do not generate wakeups themselves but 3347 * they forward wakeups from tunneled protocols, so enable it 3348 * here. 3349 */ 3350 device_init_wakeup(&sw->dev, true); 3351 3352 pm_runtime_set_active(&sw->dev); 3353 if (sw->rpm) { 3354 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 3355 pm_runtime_use_autosuspend(&sw->dev); 3356 pm_runtime_mark_last_busy(&sw->dev); 3357 pm_runtime_enable(&sw->dev); 3358 pm_request_autosuspend(&sw->dev); 3359 } 3360 3361 tb_switch_debugfs_init(sw); 3362 return 0; 3363 3364 err_ports: 3365 usb4_switch_remove_ports(sw); 3366 err_del: 3367 device_del(&sw->dev); 3368 3369 return ret; 3370 } 3371 3372 /** 3373 * tb_switch_remove() - Remove and release a switch 3374 * @sw: Switch to remove 3375 * 3376 * This will remove the switch from the domain and release it after last 3377 * reference count drops to zero. If there are switches connected below 3378 * this switch, they will be removed as well. 3379 */ 3380 void tb_switch_remove(struct tb_switch *sw) 3381 { 3382 struct tb_port *port; 3383 3384 tb_switch_debugfs_remove(sw); 3385 3386 if (sw->rpm) { 3387 pm_runtime_get_sync(&sw->dev); 3388 pm_runtime_disable(&sw->dev); 3389 } 3390 3391 /* port 0 is the switch itself and never has a remote */ 3392 tb_switch_for_each_port(sw, port) { 3393 if (tb_port_has_remote(port)) { 3394 tb_switch_remove(port->remote->sw); 3395 port->remote = NULL; 3396 } else if (port->xdomain) { 3397 port->xdomain->is_unplugged = true; 3398 tb_xdomain_remove(port->xdomain); 3399 port->xdomain = NULL; 3400 } 3401 3402 /* Remove any downstream retimers */ 3403 tb_retimer_remove_all(port); 3404 } 3405 3406 if (!sw->is_unplugged) 3407 tb_plug_events_active(sw, false); 3408 3409 tb_switch_nvm_remove(sw); 3410 usb4_switch_remove_ports(sw); 3411 3412 if (tb_route(sw)) 3413 dev_info(&sw->dev, "device disconnected\n"); 3414 device_unregister(&sw->dev); 3415 } 3416 3417 /** 3418 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 3419 * @sw: Router to mark unplugged 3420 */ 3421 void tb_sw_set_unplugged(struct tb_switch *sw) 3422 { 3423 struct tb_port *port; 3424 3425 if (sw == sw->tb->root_switch) { 3426 tb_sw_WARN(sw, "cannot unplug root switch\n"); 3427 return; 3428 } 3429 if (sw->is_unplugged) { 3430 tb_sw_WARN(sw, "is_unplugged already set\n"); 3431 return; 3432 } 3433 sw->is_unplugged = true; 3434 tb_switch_for_each_port(sw, port) { 3435 if (tb_port_has_remote(port)) 3436 tb_sw_set_unplugged(port->remote->sw); 3437 else if (port->xdomain) 3438 port->xdomain->is_unplugged = true; 3439 } 3440 } 3441 3442 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) 3443 { 3444 if (flags) 3445 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 3446 else 3447 tb_sw_dbg(sw, "disabling wakeup\n"); 3448 3449 if (tb_switch_is_usb4(sw)) 3450 return usb4_switch_set_wake(sw, flags); 3451 return tb_lc_set_wake(sw, flags); 3452 } 3453 3454 static void tb_switch_check_wakes(struct tb_switch *sw) 3455 { 3456 if (device_may_wakeup(&sw->dev)) { 3457 if (tb_switch_is_usb4(sw)) 3458 usb4_switch_check_wakes(sw); 3459 } 3460 } 3461 3462 /** 3463 * tb_switch_resume() - Resume a switch after sleep 3464 * @sw: Switch to resume 3465 * @runtime: Is this resume from runtime suspend or system sleep 3466 * 3467 * Resumes and re-enumerates router (and all its children), if still plugged 3468 * after suspend. Don't enumerate device router whose UID was changed during 3469 * suspend. If this is resume from system sleep, notifies PM core about the 3470 * wakes occurred during suspend. Disables all wakes, except USB4 wake of 3471 * upstream port for USB4 routers that shall be always enabled. 3472 */ 3473 int tb_switch_resume(struct tb_switch *sw, bool runtime) 3474 { 3475 struct tb_port *port; 3476 int err; 3477 3478 tb_sw_dbg(sw, "resuming switch\n"); 3479 3480 /* 3481 * Check for UID of the connected switches except for root 3482 * switch which we assume cannot be removed. 3483 */ 3484 if (tb_route(sw)) { 3485 u64 uid; 3486 3487 /* 3488 * Check first that we can still read the switch config 3489 * space. It may be that there is now another domain 3490 * connected. 3491 */ 3492 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 3493 if (err < 0) { 3494 tb_sw_info(sw, "switch not present anymore\n"); 3495 return err; 3496 } 3497 3498 /* We don't have any way to confirm this was the same device */ 3499 if (!sw->uid) 3500 return -ENODEV; 3501 3502 if (tb_switch_is_usb4(sw)) 3503 err = usb4_switch_read_uid(sw, &uid); 3504 else 3505 err = tb_drom_read_uid_only(sw, &uid); 3506 if (err) { 3507 tb_sw_warn(sw, "uid read failed\n"); 3508 return err; 3509 } 3510 if (sw->uid != uid) { 3511 tb_sw_info(sw, 3512 "changed while suspended (uid %#llx -> %#llx)\n", 3513 sw->uid, uid); 3514 return -ENODEV; 3515 } 3516 } 3517 3518 err = tb_switch_configure(sw); 3519 if (err) 3520 return err; 3521 3522 if (!runtime) 3523 tb_switch_check_wakes(sw); 3524 3525 /* Disable wakes */ 3526 tb_switch_set_wake(sw, 0); 3527 3528 err = tb_switch_tmu_init(sw); 3529 if (err) 3530 return err; 3531 3532 /* check for surviving downstream switches */ 3533 tb_switch_for_each_port(sw, port) { 3534 if (!tb_port_is_null(port)) 3535 continue; 3536 3537 if (!tb_port_resume(port)) 3538 continue; 3539 3540 if (tb_wait_for_port(port, true) <= 0) { 3541 tb_port_warn(port, 3542 "lost during suspend, disconnecting\n"); 3543 if (tb_port_has_remote(port)) 3544 tb_sw_set_unplugged(port->remote->sw); 3545 else if (port->xdomain) 3546 port->xdomain->is_unplugged = true; 3547 } else { 3548 /* 3549 * Always unlock the port so the downstream 3550 * switch/domain is accessible. 3551 */ 3552 if (tb_port_unlock(port)) 3553 tb_port_warn(port, "failed to unlock port\n"); 3554 if (port->remote && 3555 tb_switch_resume(port->remote->sw, runtime)) { 3556 tb_port_warn(port, 3557 "lost during suspend, disconnecting\n"); 3558 tb_sw_set_unplugged(port->remote->sw); 3559 } 3560 } 3561 } 3562 return 0; 3563 } 3564 3565 /** 3566 * tb_switch_suspend() - Put a switch to sleep 3567 * @sw: Switch to suspend 3568 * @runtime: Is this runtime suspend or system sleep 3569 * 3570 * Suspends router and all its children. Enables wakes according to 3571 * value of @runtime and then sets sleep bit for the router. If @sw is 3572 * host router the domain is ready to go to sleep once this function 3573 * returns. 3574 */ 3575 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 3576 { 3577 unsigned int flags = 0; 3578 struct tb_port *port; 3579 int err; 3580 3581 tb_sw_dbg(sw, "suspending switch\n"); 3582 3583 /* 3584 * Actually only needed for Titan Ridge but for simplicity can be 3585 * done for USB4 device too as CLx is re-enabled at resume. 3586 */ 3587 tb_switch_clx_disable(sw); 3588 3589 err = tb_plug_events_active(sw, false); 3590 if (err) 3591 return; 3592 3593 tb_switch_for_each_port(sw, port) { 3594 if (tb_port_has_remote(port)) 3595 tb_switch_suspend(port->remote->sw, runtime); 3596 } 3597 3598 if (runtime) { 3599 /* Trigger wake when something is plugged in/out */ 3600 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3601 flags |= TB_WAKE_ON_USB4; 3602 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; 3603 } else if (device_may_wakeup(&sw->dev)) { 3604 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 3605 } 3606 3607 tb_switch_set_wake(sw, flags); 3608 3609 if (tb_switch_is_usb4(sw)) 3610 usb4_switch_set_sleep(sw); 3611 else 3612 tb_lc_set_sleep(sw); 3613 } 3614 3615 /** 3616 * tb_switch_query_dp_resource() - Query availability of DP resource 3617 * @sw: Switch whose DP resource is queried 3618 * @in: DP IN port 3619 * 3620 * Queries availability of DP resource for DP tunneling using switch 3621 * specific means. Returns %true if resource is available. 3622 */ 3623 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 3624 { 3625 if (tb_switch_is_usb4(sw)) 3626 return usb4_switch_query_dp_resource(sw, in); 3627 return tb_lc_dp_sink_query(sw, in); 3628 } 3629 3630 /** 3631 * tb_switch_alloc_dp_resource() - Allocate available DP resource 3632 * @sw: Switch whose DP resource is allocated 3633 * @in: DP IN port 3634 * 3635 * Allocates DP resource for DP tunneling. The resource must be 3636 * available for this to succeed (see tb_switch_query_dp_resource()). 3637 * Returns %0 in success and negative errno otherwise. 3638 */ 3639 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3640 { 3641 int ret; 3642 3643 if (tb_switch_is_usb4(sw)) 3644 ret = usb4_switch_alloc_dp_resource(sw, in); 3645 else 3646 ret = tb_lc_dp_sink_alloc(sw, in); 3647 3648 if (ret) 3649 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", 3650 in->port); 3651 else 3652 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); 3653 3654 return ret; 3655 } 3656 3657 /** 3658 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 3659 * @sw: Switch whose DP resource is de-allocated 3660 * @in: DP IN port 3661 * 3662 * De-allocates DP resource that was previously allocated for DP 3663 * tunneling. 3664 */ 3665 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3666 { 3667 int ret; 3668 3669 if (tb_switch_is_usb4(sw)) 3670 ret = usb4_switch_dealloc_dp_resource(sw, in); 3671 else 3672 ret = tb_lc_dp_sink_dealloc(sw, in); 3673 3674 if (ret) 3675 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 3676 in->port); 3677 else 3678 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); 3679 } 3680 3681 struct tb_sw_lookup { 3682 struct tb *tb; 3683 u8 link; 3684 u8 depth; 3685 const uuid_t *uuid; 3686 u64 route; 3687 }; 3688 3689 static int tb_switch_match(struct device *dev, const void *data) 3690 { 3691 struct tb_switch *sw = tb_to_switch(dev); 3692 const struct tb_sw_lookup *lookup = data; 3693 3694 if (!sw) 3695 return 0; 3696 if (sw->tb != lookup->tb) 3697 return 0; 3698 3699 if (lookup->uuid) 3700 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 3701 3702 if (lookup->route) { 3703 return sw->config.route_lo == lower_32_bits(lookup->route) && 3704 sw->config.route_hi == upper_32_bits(lookup->route); 3705 } 3706 3707 /* Root switch is matched only by depth */ 3708 if (!lookup->depth) 3709 return !sw->depth; 3710 3711 return sw->link == lookup->link && sw->depth == lookup->depth; 3712 } 3713 3714 /** 3715 * tb_switch_find_by_link_depth() - Find switch by link and depth 3716 * @tb: Domain the switch belongs 3717 * @link: Link number the switch is connected 3718 * @depth: Depth of the switch in link 3719 * 3720 * Returned switch has reference count increased so the caller needs to 3721 * call tb_switch_put() when done with the switch. 3722 */ 3723 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 3724 { 3725 struct tb_sw_lookup lookup; 3726 struct device *dev; 3727 3728 memset(&lookup, 0, sizeof(lookup)); 3729 lookup.tb = tb; 3730 lookup.link = link; 3731 lookup.depth = depth; 3732 3733 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3734 if (dev) 3735 return tb_to_switch(dev); 3736 3737 return NULL; 3738 } 3739 3740 /** 3741 * tb_switch_find_by_uuid() - Find switch by UUID 3742 * @tb: Domain the switch belongs 3743 * @uuid: UUID to look for 3744 * 3745 * Returned switch has reference count increased so the caller needs to 3746 * call tb_switch_put() when done with the switch. 3747 */ 3748 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 3749 { 3750 struct tb_sw_lookup lookup; 3751 struct device *dev; 3752 3753 memset(&lookup, 0, sizeof(lookup)); 3754 lookup.tb = tb; 3755 lookup.uuid = uuid; 3756 3757 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3758 if (dev) 3759 return tb_to_switch(dev); 3760 3761 return NULL; 3762 } 3763 3764 /** 3765 * tb_switch_find_by_route() - Find switch by route string 3766 * @tb: Domain the switch belongs 3767 * @route: Route string to look for 3768 * 3769 * Returned switch has reference count increased so the caller needs to 3770 * call tb_switch_put() when done with the switch. 3771 */ 3772 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 3773 { 3774 struct tb_sw_lookup lookup; 3775 struct device *dev; 3776 3777 if (!route) 3778 return tb_switch_get(tb->root_switch); 3779 3780 memset(&lookup, 0, sizeof(lookup)); 3781 lookup.tb = tb; 3782 lookup.route = route; 3783 3784 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3785 if (dev) 3786 return tb_to_switch(dev); 3787 3788 return NULL; 3789 } 3790 3791 /** 3792 * tb_switch_find_port() - return the first port of @type on @sw or NULL 3793 * @sw: Switch to find the port from 3794 * @type: Port type to look for 3795 */ 3796 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3797 enum tb_port_type type) 3798 { 3799 struct tb_port *port; 3800 3801 tb_switch_for_each_port(sw, port) { 3802 if (port->config.type == type) 3803 return port; 3804 } 3805 3806 return NULL; 3807 } 3808 3809 /* 3810 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 3811 * device. For now used only for Titan Ridge. 3812 */ 3813 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, 3814 unsigned int pcie_offset, u32 value) 3815 { 3816 u32 offset, command, val; 3817 int ret; 3818 3819 if (sw->generation != 3) 3820 return -EOPNOTSUPP; 3821 3822 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; 3823 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); 3824 if (ret) 3825 return ret; 3826 3827 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; 3828 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); 3829 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; 3830 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 3831 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; 3832 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; 3833 3834 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; 3835 3836 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); 3837 if (ret) 3838 return ret; 3839 3840 ret = tb_switch_wait_for_bit(sw, offset, 3841 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); 3842 if (ret) 3843 return ret; 3844 3845 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 3846 if (ret) 3847 return ret; 3848 3849 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) 3850 return -ETIMEDOUT; 3851 3852 return 0; 3853 } 3854 3855 /** 3856 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state 3857 * @sw: Router to enable PCIe L1 3858 * 3859 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable 3860 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel 3861 * was configured. Due to Intel platforms limitation, shall be called only 3862 * for first hop switch. 3863 */ 3864 int tb_switch_pcie_l1_enable(struct tb_switch *sw) 3865 { 3866 struct tb_switch *parent = tb_switch_parent(sw); 3867 int ret; 3868 3869 if (!tb_route(sw)) 3870 return 0; 3871 3872 if (!tb_switch_is_titan_ridge(sw)) 3873 return 0; 3874 3875 /* Enable PCIe L1 enable only for first hop router (depth = 1) */ 3876 if (tb_route(parent)) 3877 return 0; 3878 3879 /* Write to downstream PCIe bridge #5 aka Dn4 */ 3880 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); 3881 if (ret) 3882 return ret; 3883 3884 /* Write to Upstream PCIe bridge #0 aka Up0 */ 3885 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); 3886 } 3887 3888 /** 3889 * tb_switch_xhci_connect() - Connect internal xHCI 3890 * @sw: Router whose xHCI to connect 3891 * 3892 * Can be called to any router. For Alpine Ridge and Titan Ridge 3893 * performs special flows that bring the xHCI functional for any device 3894 * connected to the type-C port. Call only after PCIe tunnel has been 3895 * established. The function only does the connect if not done already 3896 * so can be called several times for the same router. 3897 */ 3898 int tb_switch_xhci_connect(struct tb_switch *sw) 3899 { 3900 struct tb_port *port1, *port3; 3901 int ret; 3902 3903 if (sw->generation != 3) 3904 return 0; 3905 3906 port1 = &sw->ports[1]; 3907 port3 = &sw->ports[3]; 3908 3909 if (tb_switch_is_alpine_ridge(sw)) { 3910 bool usb_port1, usb_port3, xhci_port1, xhci_port3; 3911 3912 usb_port1 = tb_lc_is_usb_plugged(port1); 3913 usb_port3 = tb_lc_is_usb_plugged(port3); 3914 xhci_port1 = tb_lc_is_xhci_connected(port1); 3915 xhci_port3 = tb_lc_is_xhci_connected(port3); 3916 3917 /* Figure out correct USB port to connect */ 3918 if (usb_port1 && !xhci_port1) { 3919 ret = tb_lc_xhci_connect(port1); 3920 if (ret) 3921 return ret; 3922 } 3923 if (usb_port3 && !xhci_port3) 3924 return tb_lc_xhci_connect(port3); 3925 } else if (tb_switch_is_titan_ridge(sw)) { 3926 ret = tb_lc_xhci_connect(port1); 3927 if (ret) 3928 return ret; 3929 return tb_lc_xhci_connect(port3); 3930 } 3931 3932 return 0; 3933 } 3934 3935 /** 3936 * tb_switch_xhci_disconnect() - Disconnect internal xHCI 3937 * @sw: Router whose xHCI to disconnect 3938 * 3939 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both 3940 * ports. 3941 */ 3942 void tb_switch_xhci_disconnect(struct tb_switch *sw) 3943 { 3944 if (sw->generation == 3) { 3945 struct tb_port *port1 = &sw->ports[1]; 3946 struct tb_port *port3 = &sw->ports[3]; 3947 3948 tb_lc_xhci_disconnect(port1); 3949 tb_port_dbg(port1, "disconnected xHCI\n"); 3950 tb_lc_xhci_disconnect(port3); 3951 tb_port_dbg(port3, "disconnected xHCI\n"); 3952 } 3953 } 3954