1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 18 #include "tb.h" 19 20 /* Switch NVM support */ 21 22 struct nvm_auth_status { 23 struct list_head list; 24 uuid_t uuid; 25 u32 status; 26 }; 27 28 static bool clx_enabled = true; 29 module_param_named(clx, clx_enabled, bool, 0444); 30 MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); 31 32 /* 33 * Hold NVM authentication failure status per switch This information 34 * needs to stay around even when the switch gets power cycled so we 35 * keep it separately. 36 */ 37 static LIST_HEAD(nvm_auth_status_cache); 38 static DEFINE_MUTEX(nvm_auth_status_lock); 39 40 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 41 { 42 struct nvm_auth_status *st; 43 44 list_for_each_entry(st, &nvm_auth_status_cache, list) { 45 if (uuid_equal(&st->uuid, sw->uuid)) 46 return st; 47 } 48 49 return NULL; 50 } 51 52 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 53 { 54 struct nvm_auth_status *st; 55 56 mutex_lock(&nvm_auth_status_lock); 57 st = __nvm_get_auth_status(sw); 58 mutex_unlock(&nvm_auth_status_lock); 59 60 *status = st ? st->status : 0; 61 } 62 63 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 64 { 65 struct nvm_auth_status *st; 66 67 if (WARN_ON(!sw->uuid)) 68 return; 69 70 mutex_lock(&nvm_auth_status_lock); 71 st = __nvm_get_auth_status(sw); 72 73 if (!st) { 74 st = kzalloc(sizeof(*st), GFP_KERNEL); 75 if (!st) 76 goto unlock; 77 78 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 79 INIT_LIST_HEAD(&st->list); 80 list_add_tail(&st->list, &nvm_auth_status_cache); 81 } 82 83 st->status = status; 84 unlock: 85 mutex_unlock(&nvm_auth_status_lock); 86 } 87 88 static void nvm_clear_auth_status(const struct tb_switch *sw) 89 { 90 struct nvm_auth_status *st; 91 92 mutex_lock(&nvm_auth_status_lock); 93 st = __nvm_get_auth_status(sw); 94 if (st) { 95 list_del(&st->list); 96 kfree(st); 97 } 98 mutex_unlock(&nvm_auth_status_lock); 99 } 100 101 static int nvm_validate_and_write(struct tb_switch *sw) 102 { 103 unsigned int image_size; 104 const u8 *buf; 105 int ret; 106 107 ret = tb_nvm_validate(sw->nvm); 108 if (ret) 109 return ret; 110 111 ret = tb_nvm_write_headers(sw->nvm); 112 if (ret) 113 return ret; 114 115 buf = sw->nvm->buf_data_start; 116 image_size = sw->nvm->buf_data_size; 117 118 if (tb_switch_is_usb4(sw)) 119 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 120 else 121 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 122 if (ret) 123 return ret; 124 125 sw->nvm->flushed = true; 126 return 0; 127 } 128 129 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 130 { 131 int ret = 0; 132 133 /* 134 * Root switch NVM upgrade requires that we disconnect the 135 * existing paths first (in case it is not in safe mode 136 * already). 137 */ 138 if (!sw->safe_mode) { 139 u32 status; 140 141 ret = tb_domain_disconnect_all_paths(sw->tb); 142 if (ret) 143 return ret; 144 /* 145 * The host controller goes away pretty soon after this if 146 * everything goes well so getting timeout is expected. 147 */ 148 ret = dma_port_flash_update_auth(sw->dma_port); 149 if (!ret || ret == -ETIMEDOUT) 150 return 0; 151 152 /* 153 * Any error from update auth operation requires power 154 * cycling of the host router. 155 */ 156 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 157 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 158 nvm_set_auth_status(sw, status); 159 } 160 161 /* 162 * From safe mode we can get out by just power cycling the 163 * switch. 164 */ 165 dma_port_power_cycle(sw->dma_port); 166 return ret; 167 } 168 169 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 170 { 171 int ret, retries = 10; 172 173 ret = dma_port_flash_update_auth(sw->dma_port); 174 switch (ret) { 175 case 0: 176 case -ETIMEDOUT: 177 case -EACCES: 178 case -EINVAL: 179 /* Power cycle is required */ 180 break; 181 default: 182 return ret; 183 } 184 185 /* 186 * Poll here for the authentication status. It takes some time 187 * for the device to respond (we get timeout for a while). Once 188 * we get response the device needs to be power cycled in order 189 * to the new NVM to be taken into use. 190 */ 191 do { 192 u32 status; 193 194 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 195 if (ret < 0 && ret != -ETIMEDOUT) 196 return ret; 197 if (ret > 0) { 198 if (status) { 199 tb_sw_warn(sw, "failed to authenticate NVM\n"); 200 nvm_set_auth_status(sw, status); 201 } 202 203 tb_sw_info(sw, "power cycling the switch now\n"); 204 dma_port_power_cycle(sw->dma_port); 205 return 0; 206 } 207 208 msleep(500); 209 } while (--retries); 210 211 return -ETIMEDOUT; 212 } 213 214 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 215 { 216 struct pci_dev *root_port; 217 218 /* 219 * During host router NVM upgrade we should not allow root port to 220 * go into D3cold because some root ports cannot trigger PME 221 * itself. To be on the safe side keep the root port in D0 during 222 * the whole upgrade process. 223 */ 224 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 225 if (root_port) 226 pm_runtime_get_noresume(&root_port->dev); 227 } 228 229 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 230 { 231 struct pci_dev *root_port; 232 233 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 234 if (root_port) 235 pm_runtime_put(&root_port->dev); 236 } 237 238 static inline bool nvm_readable(struct tb_switch *sw) 239 { 240 if (tb_switch_is_usb4(sw)) { 241 /* 242 * USB4 devices must support NVM operations but it is 243 * optional for hosts. Therefore we query the NVM sector 244 * size here and if it is supported assume NVM 245 * operations are implemented. 246 */ 247 return usb4_switch_nvm_sector_size(sw) > 0; 248 } 249 250 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 251 return !!sw->dma_port; 252 } 253 254 static inline bool nvm_upgradeable(struct tb_switch *sw) 255 { 256 if (sw->no_nvm_upgrade) 257 return false; 258 return nvm_readable(sw); 259 } 260 261 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) 262 { 263 int ret; 264 265 if (tb_switch_is_usb4(sw)) { 266 if (auth_only) { 267 ret = usb4_switch_nvm_set_offset(sw, 0); 268 if (ret) 269 return ret; 270 } 271 sw->nvm->authenticating = true; 272 return usb4_switch_nvm_authenticate(sw); 273 } else if (auth_only) { 274 return -EOPNOTSUPP; 275 } 276 277 sw->nvm->authenticating = true; 278 if (!tb_route(sw)) { 279 nvm_authenticate_start_dma_port(sw); 280 ret = nvm_authenticate_host_dma_port(sw); 281 } else { 282 ret = nvm_authenticate_device_dma_port(sw); 283 } 284 285 return ret; 286 } 287 288 /** 289 * tb_switch_nvm_read() - Read router NVM 290 * @sw: Router whose NVM to read 291 * @address: Start address on the NVM 292 * @buf: Buffer where the read data is copied 293 * @size: Size of the buffer in bytes 294 * 295 * Reads from router NVM and returns the requested data in @buf. Locking 296 * is up to the caller. Returns %0 in success and negative errno in case 297 * of failure. 298 */ 299 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 300 size_t size) 301 { 302 if (tb_switch_is_usb4(sw)) 303 return usb4_switch_nvm_read(sw, address, buf, size); 304 return dma_port_flash_read(sw->dma_port, address, buf, size); 305 } 306 307 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 308 { 309 struct tb_nvm *nvm = priv; 310 struct tb_switch *sw = tb_to_switch(nvm->dev); 311 int ret; 312 313 pm_runtime_get_sync(&sw->dev); 314 315 if (!mutex_trylock(&sw->tb->lock)) { 316 ret = restart_syscall(); 317 goto out; 318 } 319 320 ret = tb_switch_nvm_read(sw, offset, val, bytes); 321 mutex_unlock(&sw->tb->lock); 322 323 out: 324 pm_runtime_mark_last_busy(&sw->dev); 325 pm_runtime_put_autosuspend(&sw->dev); 326 327 return ret; 328 } 329 330 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 331 { 332 struct tb_nvm *nvm = priv; 333 struct tb_switch *sw = tb_to_switch(nvm->dev); 334 int ret; 335 336 if (!mutex_trylock(&sw->tb->lock)) 337 return restart_syscall(); 338 339 /* 340 * Since writing the NVM image might require some special steps, 341 * for example when CSS headers are written, we cache the image 342 * locally here and handle the special cases when the user asks 343 * us to authenticate the image. 344 */ 345 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 346 mutex_unlock(&sw->tb->lock); 347 348 return ret; 349 } 350 351 static int tb_switch_nvm_add(struct tb_switch *sw) 352 { 353 struct tb_nvm *nvm; 354 int ret; 355 356 if (!nvm_readable(sw)) 357 return 0; 358 359 nvm = tb_nvm_alloc(&sw->dev); 360 if (IS_ERR(nvm)) { 361 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 362 goto err_nvm; 363 } 364 365 ret = tb_nvm_read_version(nvm); 366 if (ret) 367 goto err_nvm; 368 369 /* 370 * If the switch is in safe-mode the only accessible portion of 371 * the NVM is the non-active one where userspace is expected to 372 * write new functional NVM. 373 */ 374 if (!sw->safe_mode) { 375 ret = tb_nvm_add_active(nvm, nvm_read); 376 if (ret) 377 goto err_nvm; 378 } 379 380 if (!sw->no_nvm_upgrade) { 381 ret = tb_nvm_add_non_active(nvm, nvm_write); 382 if (ret) 383 goto err_nvm; 384 } 385 386 sw->nvm = nvm; 387 return 0; 388 389 err_nvm: 390 tb_sw_dbg(sw, "NVM upgrade disabled\n"); 391 sw->no_nvm_upgrade = true; 392 if (!IS_ERR(nvm)) 393 tb_nvm_free(nvm); 394 395 return ret; 396 } 397 398 static void tb_switch_nvm_remove(struct tb_switch *sw) 399 { 400 struct tb_nvm *nvm; 401 402 nvm = sw->nvm; 403 sw->nvm = NULL; 404 405 if (!nvm) 406 return; 407 408 /* Remove authentication status in case the switch is unplugged */ 409 if (!nvm->authenticating) 410 nvm_clear_auth_status(sw); 411 412 tb_nvm_free(nvm); 413 } 414 415 /* port utility functions */ 416 417 static const char *tb_port_type(const struct tb_regs_port_header *port) 418 { 419 switch (port->type >> 16) { 420 case 0: 421 switch ((u8) port->type) { 422 case 0: 423 return "Inactive"; 424 case 1: 425 return "Port"; 426 case 2: 427 return "NHI"; 428 default: 429 return "unknown"; 430 } 431 case 0x2: 432 return "Ethernet"; 433 case 0x8: 434 return "SATA"; 435 case 0xe: 436 return "DP/HDMI"; 437 case 0x10: 438 return "PCIe"; 439 case 0x20: 440 return "USB"; 441 default: 442 return "unknown"; 443 } 444 } 445 446 static void tb_dump_port(struct tb *tb, const struct tb_port *port) 447 { 448 const struct tb_regs_port_header *regs = &port->config; 449 450 tb_dbg(tb, 451 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 452 regs->port_number, regs->vendor_id, regs->device_id, 453 regs->revision, regs->thunderbolt_version, tb_port_type(regs), 454 regs->type); 455 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 456 regs->max_in_hop_id, regs->max_out_hop_id); 457 tb_dbg(tb, " Max counters: %d\n", regs->max_counters); 458 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); 459 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, 460 port->ctl_credits); 461 } 462 463 /** 464 * tb_port_state() - get connectedness state of a port 465 * @port: the port to check 466 * 467 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 468 * 469 * Return: Returns an enum tb_port_state on success or an error code on failure. 470 */ 471 int tb_port_state(struct tb_port *port) 472 { 473 struct tb_cap_phy phy; 474 int res; 475 if (port->cap_phy == 0) { 476 tb_port_WARN(port, "does not have a PHY\n"); 477 return -EINVAL; 478 } 479 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 480 if (res) 481 return res; 482 return phy.state; 483 } 484 485 /** 486 * tb_wait_for_port() - wait for a port to become ready 487 * @port: Port to wait 488 * @wait_if_unplugged: Wait also when port is unplugged 489 * 490 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 491 * wait_if_unplugged is set then we also wait if the port is in state 492 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 493 * switch resume). Otherwise we only wait if a device is registered but the link 494 * has not yet been established. 495 * 496 * Return: Returns an error code on failure. Returns 0 if the port is not 497 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 498 * if the port is connected and in state TB_PORT_UP. 499 */ 500 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 501 { 502 int retries = 10; 503 int state; 504 if (!port->cap_phy) { 505 tb_port_WARN(port, "does not have PHY\n"); 506 return -EINVAL; 507 } 508 if (tb_is_upstream_port(port)) { 509 tb_port_WARN(port, "is the upstream port\n"); 510 return -EINVAL; 511 } 512 513 while (retries--) { 514 state = tb_port_state(port); 515 if (state < 0) 516 return state; 517 if (state == TB_PORT_DISABLED) { 518 tb_port_dbg(port, "is disabled (state: 0)\n"); 519 return 0; 520 } 521 if (state == TB_PORT_UNPLUGGED) { 522 if (wait_if_unplugged) { 523 /* used during resume */ 524 tb_port_dbg(port, 525 "is unplugged (state: 7), retrying...\n"); 526 msleep(100); 527 continue; 528 } 529 tb_port_dbg(port, "is unplugged (state: 7)\n"); 530 return 0; 531 } 532 if (state == TB_PORT_UP) { 533 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 534 return 1; 535 } 536 537 /* 538 * After plug-in the state is TB_PORT_CONNECTING. Give it some 539 * time. 540 */ 541 tb_port_dbg(port, 542 "is connected, link is not up (state: %d), retrying...\n", 543 state); 544 msleep(100); 545 } 546 tb_port_warn(port, 547 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 548 return 0; 549 } 550 551 /** 552 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 553 * @port: Port to add/remove NFC credits 554 * @credits: Credits to add/remove 555 * 556 * Change the number of NFC credits allocated to @port by @credits. To remove 557 * NFC credits pass a negative amount of credits. 558 * 559 * Return: Returns 0 on success or an error code on failure. 560 */ 561 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 562 { 563 u32 nfc_credits; 564 565 if (credits == 0 || port->sw->is_unplugged) 566 return 0; 567 568 /* 569 * USB4 restricts programming NFC buffers to lane adapters only 570 * so skip other ports. 571 */ 572 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 573 return 0; 574 575 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 576 if (credits < 0) 577 credits = max_t(int, -nfc_credits, credits); 578 579 nfc_credits += credits; 580 581 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 582 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 583 584 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 585 port->config.nfc_credits |= nfc_credits; 586 587 return tb_port_write(port, &port->config.nfc_credits, 588 TB_CFG_PORT, ADP_CS_4, 1); 589 } 590 591 /** 592 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 593 * @port: Port whose counters to clear 594 * @counter: Counter index to clear 595 * 596 * Return: Returns 0 on success or an error code on failure. 597 */ 598 int tb_port_clear_counter(struct tb_port *port, int counter) 599 { 600 u32 zero[3] = { 0, 0, 0 }; 601 tb_port_dbg(port, "clearing counter %d\n", counter); 602 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 603 } 604 605 /** 606 * tb_port_unlock() - Unlock downstream port 607 * @port: Port to unlock 608 * 609 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 610 * downstream router accessible for CM. 611 */ 612 int tb_port_unlock(struct tb_port *port) 613 { 614 if (tb_switch_is_icm(port->sw)) 615 return 0; 616 if (!tb_port_is_null(port)) 617 return -EINVAL; 618 if (tb_switch_is_usb4(port->sw)) 619 return usb4_port_unlock(port); 620 return 0; 621 } 622 623 static int __tb_port_enable(struct tb_port *port, bool enable) 624 { 625 int ret; 626 u32 phy; 627 628 if (!tb_port_is_null(port)) 629 return -EINVAL; 630 631 ret = tb_port_read(port, &phy, TB_CFG_PORT, 632 port->cap_phy + LANE_ADP_CS_1, 1); 633 if (ret) 634 return ret; 635 636 if (enable) 637 phy &= ~LANE_ADP_CS_1_LD; 638 else 639 phy |= LANE_ADP_CS_1_LD; 640 641 642 ret = tb_port_write(port, &phy, TB_CFG_PORT, 643 port->cap_phy + LANE_ADP_CS_1, 1); 644 if (ret) 645 return ret; 646 647 tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis"); 648 return 0; 649 } 650 651 /** 652 * tb_port_enable() - Enable lane adapter 653 * @port: Port to enable (can be %NULL) 654 * 655 * This is used for lane 0 and 1 adapters to enable it. 656 */ 657 int tb_port_enable(struct tb_port *port) 658 { 659 return __tb_port_enable(port, true); 660 } 661 662 /** 663 * tb_port_disable() - Disable lane adapter 664 * @port: Port to disable (can be %NULL) 665 * 666 * This is used for lane 0 and 1 adapters to disable it. 667 */ 668 int tb_port_disable(struct tb_port *port) 669 { 670 return __tb_port_enable(port, false); 671 } 672 673 /* 674 * tb_init_port() - initialize a port 675 * 676 * This is a helper method for tb_switch_alloc. Does not check or initialize 677 * any downstream switches. 678 * 679 * Return: Returns 0 on success or an error code on failure. 680 */ 681 static int tb_init_port(struct tb_port *port) 682 { 683 int res; 684 int cap; 685 686 INIT_LIST_HEAD(&port->list); 687 688 /* Control adapter does not have configuration space */ 689 if (!port->port) 690 return 0; 691 692 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 693 if (res) { 694 if (res == -ENODEV) { 695 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 696 port->port); 697 port->disabled = true; 698 return 0; 699 } 700 return res; 701 } 702 703 /* Port 0 is the switch itself and has no PHY. */ 704 if (port->config.type == TB_TYPE_PORT) { 705 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 706 707 if (cap > 0) 708 port->cap_phy = cap; 709 else 710 tb_port_WARN(port, "non switch port without a PHY\n"); 711 712 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 713 if (cap > 0) 714 port->cap_usb4 = cap; 715 716 /* 717 * USB4 ports the buffers allocated for the control path 718 * can be read from the path config space. Legacy 719 * devices we use hard-coded value. 720 */ 721 if (tb_switch_is_usb4(port->sw)) { 722 struct tb_regs_hop hop; 723 724 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) 725 port->ctl_credits = hop.initial_credits; 726 } 727 if (!port->ctl_credits) 728 port->ctl_credits = 2; 729 730 } else { 731 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 732 if (cap > 0) 733 port->cap_adap = cap; 734 } 735 736 port->total_credits = 737 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 738 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 739 740 tb_dump_port(port->sw->tb, port); 741 return 0; 742 } 743 744 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 745 int max_hopid) 746 { 747 int port_max_hopid; 748 struct ida *ida; 749 750 if (in) { 751 port_max_hopid = port->config.max_in_hop_id; 752 ida = &port->in_hopids; 753 } else { 754 port_max_hopid = port->config.max_out_hop_id; 755 ida = &port->out_hopids; 756 } 757 758 /* 759 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 760 * reserved. 761 */ 762 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 763 min_hopid = TB_PATH_MIN_HOPID; 764 765 if (max_hopid < 0 || max_hopid > port_max_hopid) 766 max_hopid = port_max_hopid; 767 768 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 769 } 770 771 /** 772 * tb_port_alloc_in_hopid() - Allocate input HopID from port 773 * @port: Port to allocate HopID for 774 * @min_hopid: Minimum acceptable input HopID 775 * @max_hopid: Maximum acceptable input HopID 776 * 777 * Return: HopID between @min_hopid and @max_hopid or negative errno in 778 * case of error. 779 */ 780 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 781 { 782 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 783 } 784 785 /** 786 * tb_port_alloc_out_hopid() - Allocate output HopID from port 787 * @port: Port to allocate HopID for 788 * @min_hopid: Minimum acceptable output HopID 789 * @max_hopid: Maximum acceptable output HopID 790 * 791 * Return: HopID between @min_hopid and @max_hopid or negative errno in 792 * case of error. 793 */ 794 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 795 { 796 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 797 } 798 799 /** 800 * tb_port_release_in_hopid() - Release allocated input HopID from port 801 * @port: Port whose HopID to release 802 * @hopid: HopID to release 803 */ 804 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 805 { 806 ida_simple_remove(&port->in_hopids, hopid); 807 } 808 809 /** 810 * tb_port_release_out_hopid() - Release allocated output HopID from port 811 * @port: Port whose HopID to release 812 * @hopid: HopID to release 813 */ 814 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 815 { 816 ida_simple_remove(&port->out_hopids, hopid); 817 } 818 819 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 820 const struct tb_switch *sw) 821 { 822 u64 mask = (1ULL << parent->config.depth * 8) - 1; 823 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 824 } 825 826 /** 827 * tb_next_port_on_path() - Return next port for given port on a path 828 * @start: Start port of the walk 829 * @end: End port of the walk 830 * @prev: Previous port (%NULL if this is the first) 831 * 832 * This function can be used to walk from one port to another if they 833 * are connected through zero or more switches. If the @prev is dual 834 * link port, the function follows that link and returns another end on 835 * that same link. 836 * 837 * If the @end port has been reached, return %NULL. 838 * 839 * Domain tb->lock must be held when this function is called. 840 */ 841 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 842 struct tb_port *prev) 843 { 844 struct tb_port *next; 845 846 if (!prev) 847 return start; 848 849 if (prev->sw == end->sw) { 850 if (prev == end) 851 return NULL; 852 return end; 853 } 854 855 if (tb_switch_is_reachable(prev->sw, end->sw)) { 856 next = tb_port_at(tb_route(end->sw), prev->sw); 857 /* Walk down the topology if next == prev */ 858 if (prev->remote && 859 (next == prev || next->dual_link_port == prev)) 860 next = prev->remote; 861 } else { 862 if (tb_is_upstream_port(prev)) { 863 next = prev->remote; 864 } else { 865 next = tb_upstream_port(prev->sw); 866 /* 867 * Keep the same link if prev and next are both 868 * dual link ports. 869 */ 870 if (next->dual_link_port && 871 next->link_nr != prev->link_nr) { 872 next = next->dual_link_port; 873 } 874 } 875 } 876 877 return next != prev ? next : NULL; 878 } 879 880 /** 881 * tb_port_get_link_speed() - Get current link speed 882 * @port: Port to check (USB4 or CIO) 883 * 884 * Returns link speed in Gb/s or negative errno in case of failure. 885 */ 886 int tb_port_get_link_speed(struct tb_port *port) 887 { 888 u32 val, speed; 889 int ret; 890 891 if (!port->cap_phy) 892 return -EINVAL; 893 894 ret = tb_port_read(port, &val, TB_CFG_PORT, 895 port->cap_phy + LANE_ADP_CS_1, 1); 896 if (ret) 897 return ret; 898 899 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 900 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 901 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 902 } 903 904 /** 905 * tb_port_get_link_width() - Get current link width 906 * @port: Port to check (USB4 or CIO) 907 * 908 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) 909 * or negative errno in case of failure. 910 */ 911 int tb_port_get_link_width(struct tb_port *port) 912 { 913 u32 val; 914 int ret; 915 916 if (!port->cap_phy) 917 return -EINVAL; 918 919 ret = tb_port_read(port, &val, TB_CFG_PORT, 920 port->cap_phy + LANE_ADP_CS_1, 1); 921 if (ret) 922 return ret; 923 924 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 925 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 926 } 927 928 static bool tb_port_is_width_supported(struct tb_port *port, int width) 929 { 930 u32 phy, widths; 931 int ret; 932 933 if (!port->cap_phy) 934 return false; 935 936 ret = tb_port_read(port, &phy, TB_CFG_PORT, 937 port->cap_phy + LANE_ADP_CS_0, 1); 938 if (ret) 939 return false; 940 941 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 942 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 943 944 return !!(widths & width); 945 } 946 947 /** 948 * tb_port_set_link_width() - Set target link width of the lane adapter 949 * @port: Lane adapter 950 * @width: Target link width (%1 or %2) 951 * 952 * Sets the target link width of the lane adapter to @width. Does not 953 * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). 954 * 955 * Return: %0 in case of success and negative errno in case of error 956 */ 957 int tb_port_set_link_width(struct tb_port *port, unsigned int width) 958 { 959 u32 val; 960 int ret; 961 962 if (!port->cap_phy) 963 return -EINVAL; 964 965 ret = tb_port_read(port, &val, TB_CFG_PORT, 966 port->cap_phy + LANE_ADP_CS_1, 1); 967 if (ret) 968 return ret; 969 970 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 971 switch (width) { 972 case 1: 973 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 974 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 975 break; 976 case 2: 977 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 978 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 979 break; 980 default: 981 return -EINVAL; 982 } 983 984 return tb_port_write(port, &val, TB_CFG_PORT, 985 port->cap_phy + LANE_ADP_CS_1, 1); 986 } 987 988 /** 989 * tb_port_set_lane_bonding() - Enable/disable lane bonding 990 * @port: Lane adapter 991 * @bonding: enable/disable bonding 992 * 993 * Enables or disables lane bonding. This should be called after target 994 * link width has been set (tb_port_set_link_width()). Note in most 995 * cases one should use tb_port_lane_bonding_enable() instead to enable 996 * lane bonding. 997 * 998 * As a side effect sets @port->bonding accordingly (and does the same 999 * for lane 1 too). 1000 * 1001 * Return: %0 in case of success and negative errno in case of error 1002 */ 1003 int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) 1004 { 1005 u32 val; 1006 int ret; 1007 1008 if (!port->cap_phy) 1009 return -EINVAL; 1010 1011 ret = tb_port_read(port, &val, TB_CFG_PORT, 1012 port->cap_phy + LANE_ADP_CS_1, 1); 1013 if (ret) 1014 return ret; 1015 1016 if (bonding) 1017 val |= LANE_ADP_CS_1_LB; 1018 else 1019 val &= ~LANE_ADP_CS_1_LB; 1020 1021 ret = tb_port_write(port, &val, TB_CFG_PORT, 1022 port->cap_phy + LANE_ADP_CS_1, 1); 1023 if (ret) 1024 return ret; 1025 1026 /* 1027 * When lane 0 bonding is set it will affect lane 1 too so 1028 * update both. 1029 */ 1030 port->bonded = bonding; 1031 port->dual_link_port->bonded = bonding; 1032 1033 return 0; 1034 } 1035 1036 /** 1037 * tb_port_lane_bonding_enable() - Enable bonding on port 1038 * @port: port to enable 1039 * 1040 * Enable bonding by setting the link width of the port and the other 1041 * port in case of dual link port. Does not wait for the link to 1042 * actually reach the bonded state so caller needs to call 1043 * tb_port_wait_for_link_width() before enabling any paths through the 1044 * link to make sure the link is in expected state. 1045 * 1046 * Return: %0 in case of success and negative errno in case of error 1047 */ 1048 int tb_port_lane_bonding_enable(struct tb_port *port) 1049 { 1050 int ret; 1051 1052 /* 1053 * Enable lane bonding for both links if not already enabled by 1054 * for example the boot firmware. 1055 */ 1056 ret = tb_port_get_link_width(port); 1057 if (ret == 1) { 1058 ret = tb_port_set_link_width(port, 2); 1059 if (ret) 1060 goto err_lane0; 1061 } 1062 1063 ret = tb_port_get_link_width(port->dual_link_port); 1064 if (ret == 1) { 1065 ret = tb_port_set_link_width(port->dual_link_port, 2); 1066 if (ret) 1067 goto err_lane0; 1068 } 1069 1070 ret = tb_port_set_lane_bonding(port, true); 1071 if (ret) 1072 goto err_lane1; 1073 1074 return 0; 1075 1076 err_lane1: 1077 tb_port_set_link_width(port->dual_link_port, 1); 1078 err_lane0: 1079 tb_port_set_link_width(port, 1); 1080 return ret; 1081 } 1082 1083 /** 1084 * tb_port_lane_bonding_disable() - Disable bonding on port 1085 * @port: port to disable 1086 * 1087 * Disable bonding by setting the link width of the port and the 1088 * other port in case of dual link port. 1089 */ 1090 void tb_port_lane_bonding_disable(struct tb_port *port) 1091 { 1092 tb_port_set_lane_bonding(port, false); 1093 tb_port_set_link_width(port->dual_link_port, 1); 1094 tb_port_set_link_width(port, 1); 1095 } 1096 1097 /** 1098 * tb_port_wait_for_link_width() - Wait until link reaches specific width 1099 * @port: Port to wait for 1100 * @width: Expected link width (%1 or %2) 1101 * @timeout_msec: Timeout in ms how long to wait 1102 * 1103 * Should be used after both ends of the link have been bonded (or 1104 * bonding has been disabled) to wait until the link actually reaches 1105 * the expected state. Returns %-ETIMEDOUT if the @width was not reached 1106 * within the given timeout, %0 if it did. 1107 */ 1108 int tb_port_wait_for_link_width(struct tb_port *port, int width, 1109 int timeout_msec) 1110 { 1111 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1112 int ret; 1113 1114 do { 1115 ret = tb_port_get_link_width(port); 1116 if (ret < 0) { 1117 /* 1118 * Sometimes we get port locked error when 1119 * polling the lanes so we can ignore it and 1120 * retry. 1121 */ 1122 if (ret != -EACCES) 1123 return ret; 1124 } else if (ret == width) { 1125 return 0; 1126 } 1127 1128 usleep_range(1000, 2000); 1129 } while (ktime_before(ktime_get(), timeout)); 1130 1131 return -ETIMEDOUT; 1132 } 1133 1134 static int tb_port_do_update_credits(struct tb_port *port) 1135 { 1136 u32 nfc_credits; 1137 int ret; 1138 1139 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); 1140 if (ret) 1141 return ret; 1142 1143 if (nfc_credits != port->config.nfc_credits) { 1144 u32 total; 1145 1146 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 1147 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 1148 1149 tb_port_dbg(port, "total credits changed %u -> %u\n", 1150 port->total_credits, total); 1151 1152 port->config.nfc_credits = nfc_credits; 1153 port->total_credits = total; 1154 } 1155 1156 return 0; 1157 } 1158 1159 /** 1160 * tb_port_update_credits() - Re-read port total credits 1161 * @port: Port to update 1162 * 1163 * After the link is bonded (or bonding was disabled) the port total 1164 * credits may change, so this function needs to be called to re-read 1165 * the credits. Updates also the second lane adapter. 1166 */ 1167 int tb_port_update_credits(struct tb_port *port) 1168 { 1169 int ret; 1170 1171 ret = tb_port_do_update_credits(port); 1172 if (ret) 1173 return ret; 1174 return tb_port_do_update_credits(port->dual_link_port); 1175 } 1176 1177 static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary) 1178 { 1179 u32 phy; 1180 int ret; 1181 1182 ret = tb_port_read(port, &phy, TB_CFG_PORT, 1183 port->cap_phy + LANE_ADP_CS_1, 1); 1184 if (ret) 1185 return ret; 1186 1187 if (secondary) 1188 phy |= LANE_ADP_CS_1_PMS; 1189 else 1190 phy &= ~LANE_ADP_CS_1_PMS; 1191 1192 return tb_port_write(port, &phy, TB_CFG_PORT, 1193 port->cap_phy + LANE_ADP_CS_1, 1); 1194 } 1195 1196 static int tb_port_pm_secondary_enable(struct tb_port *port) 1197 { 1198 return __tb_port_pm_secondary_set(port, true); 1199 } 1200 1201 static int tb_port_pm_secondary_disable(struct tb_port *port) 1202 { 1203 return __tb_port_pm_secondary_set(port, false); 1204 } 1205 1206 /* Called for USB4 or Titan Ridge routers only */ 1207 static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask) 1208 { 1209 u32 val, mask = 0; 1210 bool ret; 1211 1212 /* Don't enable CLx in case of two single-lane links */ 1213 if (!port->bonded && port->dual_link_port) 1214 return false; 1215 1216 /* Don't enable CLx in case of inter-domain link */ 1217 if (port->xdomain) 1218 return false; 1219 1220 if (tb_switch_is_usb4(port->sw)) { 1221 if (!usb4_port_clx_supported(port)) 1222 return false; 1223 } else if (!tb_lc_is_clx_supported(port)) { 1224 return false; 1225 } 1226 1227 if (clx_mask & TB_CL1) { 1228 /* CL0s and CL1 are enabled and supported together */ 1229 mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT; 1230 } 1231 if (clx_mask & TB_CL2) 1232 mask |= LANE_ADP_CS_0_CL2_SUPPORT; 1233 1234 ret = tb_port_read(port, &val, TB_CFG_PORT, 1235 port->cap_phy + LANE_ADP_CS_0, 1); 1236 if (ret) 1237 return false; 1238 1239 return !!(val & mask); 1240 } 1241 1242 static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable) 1243 { 1244 u32 phy, mask; 1245 int ret; 1246 1247 /* CL0s and CL1 are enabled and supported together */ 1248 if (clx == TB_CL1) 1249 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; 1250 else 1251 /* For now we support only CL0s and CL1. Not CL2 */ 1252 return -EOPNOTSUPP; 1253 1254 ret = tb_port_read(port, &phy, TB_CFG_PORT, 1255 port->cap_phy + LANE_ADP_CS_1, 1); 1256 if (ret) 1257 return ret; 1258 1259 if (enable) 1260 phy |= mask; 1261 else 1262 phy &= ~mask; 1263 1264 return tb_port_write(port, &phy, TB_CFG_PORT, 1265 port->cap_phy + LANE_ADP_CS_1, 1); 1266 } 1267 1268 static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx) 1269 { 1270 return __tb_port_clx_set(port, clx, false); 1271 } 1272 1273 static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx) 1274 { 1275 return __tb_port_clx_set(port, clx, true); 1276 } 1277 1278 /** 1279 * tb_port_is_clx_enabled() - Is given CL state enabled 1280 * @port: USB4 port to check 1281 * @clx_mask: Mask of CL states to check 1282 * 1283 * Returns true if any of the given CL states is enabled for @port. 1284 */ 1285 bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask) 1286 { 1287 u32 val, mask = 0; 1288 int ret; 1289 1290 if (!tb_port_clx_supported(port, clx_mask)) 1291 return false; 1292 1293 if (clx_mask & TB_CL1) 1294 mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; 1295 if (clx_mask & TB_CL2) 1296 mask |= LANE_ADP_CS_1_CL2_ENABLE; 1297 1298 ret = tb_port_read(port, &val, TB_CFG_PORT, 1299 port->cap_phy + LANE_ADP_CS_1, 1); 1300 if (ret) 1301 return false; 1302 1303 return !!(val & mask); 1304 } 1305 1306 static int tb_port_start_lane_initialization(struct tb_port *port) 1307 { 1308 int ret; 1309 1310 if (tb_switch_is_usb4(port->sw)) 1311 return 0; 1312 1313 ret = tb_lc_start_lane_initialization(port); 1314 return ret == -EINVAL ? 0 : ret; 1315 } 1316 1317 /* 1318 * Returns true if the port had something (router, XDomain) connected 1319 * before suspend. 1320 */ 1321 static bool tb_port_resume(struct tb_port *port) 1322 { 1323 bool has_remote = tb_port_has_remote(port); 1324 1325 if (port->usb4) { 1326 usb4_port_device_resume(port->usb4); 1327 } else if (!has_remote) { 1328 /* 1329 * For disconnected downstream lane adapters start lane 1330 * initialization now so we detect future connects. 1331 * 1332 * For XDomain start the lane initialzation now so the 1333 * link gets re-established. 1334 * 1335 * This is only needed for non-USB4 ports. 1336 */ 1337 if (!tb_is_upstream_port(port) || port->xdomain) 1338 tb_port_start_lane_initialization(port); 1339 } 1340 1341 return has_remote || port->xdomain; 1342 } 1343 1344 /** 1345 * tb_port_is_enabled() - Is the adapter port enabled 1346 * @port: Port to check 1347 */ 1348 bool tb_port_is_enabled(struct tb_port *port) 1349 { 1350 switch (port->config.type) { 1351 case TB_TYPE_PCIE_UP: 1352 case TB_TYPE_PCIE_DOWN: 1353 return tb_pci_port_is_enabled(port); 1354 1355 case TB_TYPE_DP_HDMI_IN: 1356 case TB_TYPE_DP_HDMI_OUT: 1357 return tb_dp_port_is_enabled(port); 1358 1359 case TB_TYPE_USB3_UP: 1360 case TB_TYPE_USB3_DOWN: 1361 return tb_usb3_port_is_enabled(port); 1362 1363 default: 1364 return false; 1365 } 1366 } 1367 1368 /** 1369 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1370 * @port: USB3 adapter port to check 1371 */ 1372 bool tb_usb3_port_is_enabled(struct tb_port *port) 1373 { 1374 u32 data; 1375 1376 if (tb_port_read(port, &data, TB_CFG_PORT, 1377 port->cap_adap + ADP_USB3_CS_0, 1)) 1378 return false; 1379 1380 return !!(data & ADP_USB3_CS_0_PE); 1381 } 1382 1383 /** 1384 * tb_usb3_port_enable() - Enable USB3 adapter port 1385 * @port: USB3 adapter port to enable 1386 * @enable: Enable/disable the USB3 adapter 1387 */ 1388 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1389 { 1390 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1391 : ADP_USB3_CS_0_V; 1392 1393 if (!port->cap_adap) 1394 return -ENXIO; 1395 return tb_port_write(port, &word, TB_CFG_PORT, 1396 port->cap_adap + ADP_USB3_CS_0, 1); 1397 } 1398 1399 /** 1400 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1401 * @port: PCIe port to check 1402 */ 1403 bool tb_pci_port_is_enabled(struct tb_port *port) 1404 { 1405 u32 data; 1406 1407 if (tb_port_read(port, &data, TB_CFG_PORT, 1408 port->cap_adap + ADP_PCIE_CS_0, 1)) 1409 return false; 1410 1411 return !!(data & ADP_PCIE_CS_0_PE); 1412 } 1413 1414 /** 1415 * tb_pci_port_enable() - Enable PCIe adapter port 1416 * @port: PCIe port to enable 1417 * @enable: Enable/disable the PCIe adapter 1418 */ 1419 int tb_pci_port_enable(struct tb_port *port, bool enable) 1420 { 1421 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1422 if (!port->cap_adap) 1423 return -ENXIO; 1424 return tb_port_write(port, &word, TB_CFG_PORT, 1425 port->cap_adap + ADP_PCIE_CS_0, 1); 1426 } 1427 1428 /** 1429 * tb_dp_port_hpd_is_active() - Is HPD already active 1430 * @port: DP out port to check 1431 * 1432 * Checks if the DP OUT adapter port has HDP bit already set. 1433 */ 1434 int tb_dp_port_hpd_is_active(struct tb_port *port) 1435 { 1436 u32 data; 1437 int ret; 1438 1439 ret = tb_port_read(port, &data, TB_CFG_PORT, 1440 port->cap_adap + ADP_DP_CS_2, 1); 1441 if (ret) 1442 return ret; 1443 1444 return !!(data & ADP_DP_CS_2_HDP); 1445 } 1446 1447 /** 1448 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1449 * @port: Port to clear HPD 1450 * 1451 * If the DP IN port has HDP set, this function can be used to clear it. 1452 */ 1453 int tb_dp_port_hpd_clear(struct tb_port *port) 1454 { 1455 u32 data; 1456 int ret; 1457 1458 ret = tb_port_read(port, &data, TB_CFG_PORT, 1459 port->cap_adap + ADP_DP_CS_3, 1); 1460 if (ret) 1461 return ret; 1462 1463 data |= ADP_DP_CS_3_HDPC; 1464 return tb_port_write(port, &data, TB_CFG_PORT, 1465 port->cap_adap + ADP_DP_CS_3, 1); 1466 } 1467 1468 /** 1469 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1470 * @port: DP IN/OUT port to set hops 1471 * @video: Video Hop ID 1472 * @aux_tx: AUX TX Hop ID 1473 * @aux_rx: AUX RX Hop ID 1474 * 1475 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 1476 * router DP adapters too but does not program the values as the fields 1477 * are read-only. 1478 */ 1479 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1480 unsigned int aux_tx, unsigned int aux_rx) 1481 { 1482 u32 data[2]; 1483 int ret; 1484 1485 if (tb_switch_is_usb4(port->sw)) 1486 return 0; 1487 1488 ret = tb_port_read(port, data, TB_CFG_PORT, 1489 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1490 if (ret) 1491 return ret; 1492 1493 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1494 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1495 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1496 1497 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1498 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1499 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1500 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1501 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1502 1503 return tb_port_write(port, data, TB_CFG_PORT, 1504 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1505 } 1506 1507 /** 1508 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1509 * @port: DP adapter port to check 1510 */ 1511 bool tb_dp_port_is_enabled(struct tb_port *port) 1512 { 1513 u32 data[2]; 1514 1515 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1516 ARRAY_SIZE(data))) 1517 return false; 1518 1519 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1520 } 1521 1522 /** 1523 * tb_dp_port_enable() - Enables/disables DP paths of a port 1524 * @port: DP IN/OUT port 1525 * @enable: Enable/disable DP path 1526 * 1527 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1528 * calling this function. 1529 */ 1530 int tb_dp_port_enable(struct tb_port *port, bool enable) 1531 { 1532 u32 data[2]; 1533 int ret; 1534 1535 ret = tb_port_read(port, data, TB_CFG_PORT, 1536 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1537 if (ret) 1538 return ret; 1539 1540 if (enable) 1541 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1542 else 1543 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1544 1545 return tb_port_write(port, data, TB_CFG_PORT, 1546 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1547 } 1548 1549 /* switch utility functions */ 1550 1551 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1552 { 1553 switch (sw->generation) { 1554 case 1: 1555 return "Thunderbolt 1"; 1556 case 2: 1557 return "Thunderbolt 2"; 1558 case 3: 1559 return "Thunderbolt 3"; 1560 case 4: 1561 return "USB4"; 1562 default: 1563 return "Unknown"; 1564 } 1565 } 1566 1567 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1568 { 1569 const struct tb_regs_switch_header *regs = &sw->config; 1570 1571 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1572 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1573 regs->revision, regs->thunderbolt_version); 1574 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1575 tb_dbg(tb, " Config:\n"); 1576 tb_dbg(tb, 1577 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1578 regs->upstream_port_number, regs->depth, 1579 (((u64) regs->route_hi) << 32) | regs->route_lo, 1580 regs->enabled, regs->plug_events_delay); 1581 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1582 regs->__unknown1, regs->__unknown4); 1583 } 1584 1585 /** 1586 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET 1587 * @sw: Switch to reset 1588 * 1589 * Return: Returns 0 on success or an error code on failure. 1590 */ 1591 int tb_switch_reset(struct tb_switch *sw) 1592 { 1593 struct tb_cfg_result res; 1594 1595 if (sw->generation > 1) 1596 return 0; 1597 1598 tb_sw_dbg(sw, "resetting switch\n"); 1599 1600 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1601 TB_CFG_SWITCH, 2, 2); 1602 if (res.err) 1603 return res.err; 1604 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); 1605 if (res.err > 0) 1606 return -EIO; 1607 return res.err; 1608 } 1609 1610 /** 1611 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset 1612 * @sw: Router to read the offset value from 1613 * @offset: Offset in the router config space to read from 1614 * @bit: Bit mask in the offset to wait for 1615 * @value: Value of the bits to wait for 1616 * @timeout_msec: Timeout in ms how long to wait 1617 * 1618 * Wait till the specified bits in specified offset reach specified value. 1619 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached 1620 * within the given timeout or a negative errno in case of failure. 1621 */ 1622 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, 1623 u32 value, int timeout_msec) 1624 { 1625 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1626 1627 do { 1628 u32 val; 1629 int ret; 1630 1631 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 1632 if (ret) 1633 return ret; 1634 1635 if ((val & bit) == value) 1636 return 0; 1637 1638 usleep_range(50, 100); 1639 } while (ktime_before(ktime_get(), timeout)); 1640 1641 return -ETIMEDOUT; 1642 } 1643 1644 /* 1645 * tb_plug_events_active() - enable/disable plug events on a switch 1646 * 1647 * Also configures a sane plug_events_delay of 255ms. 1648 * 1649 * Return: Returns 0 on success or an error code on failure. 1650 */ 1651 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1652 { 1653 u32 data; 1654 int res; 1655 1656 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1657 return 0; 1658 1659 sw->config.plug_events_delay = 0xff; 1660 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1661 if (res) 1662 return res; 1663 1664 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1665 if (res) 1666 return res; 1667 1668 if (active) { 1669 data = data & 0xFFFFFF83; 1670 switch (sw->config.device_id) { 1671 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1672 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1673 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1674 break; 1675 default: 1676 /* 1677 * Skip Alpine Ridge, it needs to have vendor 1678 * specific USB hotplug event enabled for the 1679 * internal xHCI to work. 1680 */ 1681 if (!tb_switch_is_alpine_ridge(sw)) 1682 data |= TB_PLUG_EVENTS_USB_DISABLE; 1683 } 1684 } else { 1685 data = data | 0x7c; 1686 } 1687 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1688 sw->cap_plug_events + 1, 1); 1689 } 1690 1691 static ssize_t authorized_show(struct device *dev, 1692 struct device_attribute *attr, 1693 char *buf) 1694 { 1695 struct tb_switch *sw = tb_to_switch(dev); 1696 1697 return sysfs_emit(buf, "%u\n", sw->authorized); 1698 } 1699 1700 static int disapprove_switch(struct device *dev, void *not_used) 1701 { 1702 char *envp[] = { "AUTHORIZED=0", NULL }; 1703 struct tb_switch *sw; 1704 1705 sw = tb_to_switch(dev); 1706 if (sw && sw->authorized) { 1707 int ret; 1708 1709 /* First children */ 1710 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1711 if (ret) 1712 return ret; 1713 1714 ret = tb_domain_disapprove_switch(sw->tb, sw); 1715 if (ret) 1716 return ret; 1717 1718 sw->authorized = 0; 1719 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1720 } 1721 1722 return 0; 1723 } 1724 1725 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1726 { 1727 char envp_string[13]; 1728 int ret = -EINVAL; 1729 char *envp[] = { envp_string, NULL }; 1730 1731 if (!mutex_trylock(&sw->tb->lock)) 1732 return restart_syscall(); 1733 1734 if (!!sw->authorized == !!val) 1735 goto unlock; 1736 1737 switch (val) { 1738 /* Disapprove switch */ 1739 case 0: 1740 if (tb_route(sw)) { 1741 ret = disapprove_switch(&sw->dev, NULL); 1742 goto unlock; 1743 } 1744 break; 1745 1746 /* Approve switch */ 1747 case 1: 1748 if (sw->key) 1749 ret = tb_domain_approve_switch_key(sw->tb, sw); 1750 else 1751 ret = tb_domain_approve_switch(sw->tb, sw); 1752 break; 1753 1754 /* Challenge switch */ 1755 case 2: 1756 if (sw->key) 1757 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1758 break; 1759 1760 default: 1761 break; 1762 } 1763 1764 if (!ret) { 1765 sw->authorized = val; 1766 /* 1767 * Notify status change to the userspace, informing the new 1768 * value of /sys/bus/thunderbolt/devices/.../authorized. 1769 */ 1770 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); 1771 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1772 } 1773 1774 unlock: 1775 mutex_unlock(&sw->tb->lock); 1776 return ret; 1777 } 1778 1779 static ssize_t authorized_store(struct device *dev, 1780 struct device_attribute *attr, 1781 const char *buf, size_t count) 1782 { 1783 struct tb_switch *sw = tb_to_switch(dev); 1784 unsigned int val; 1785 ssize_t ret; 1786 1787 ret = kstrtouint(buf, 0, &val); 1788 if (ret) 1789 return ret; 1790 if (val > 2) 1791 return -EINVAL; 1792 1793 pm_runtime_get_sync(&sw->dev); 1794 ret = tb_switch_set_authorized(sw, val); 1795 pm_runtime_mark_last_busy(&sw->dev); 1796 pm_runtime_put_autosuspend(&sw->dev); 1797 1798 return ret ? ret : count; 1799 } 1800 static DEVICE_ATTR_RW(authorized); 1801 1802 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1803 char *buf) 1804 { 1805 struct tb_switch *sw = tb_to_switch(dev); 1806 1807 return sysfs_emit(buf, "%u\n", sw->boot); 1808 } 1809 static DEVICE_ATTR_RO(boot); 1810 1811 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1812 char *buf) 1813 { 1814 struct tb_switch *sw = tb_to_switch(dev); 1815 1816 return sysfs_emit(buf, "%#x\n", sw->device); 1817 } 1818 static DEVICE_ATTR_RO(device); 1819 1820 static ssize_t 1821 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1822 { 1823 struct tb_switch *sw = tb_to_switch(dev); 1824 1825 return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); 1826 } 1827 static DEVICE_ATTR_RO(device_name); 1828 1829 static ssize_t 1830 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1831 { 1832 struct tb_switch *sw = tb_to_switch(dev); 1833 1834 return sysfs_emit(buf, "%u\n", sw->generation); 1835 } 1836 static DEVICE_ATTR_RO(generation); 1837 1838 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1839 char *buf) 1840 { 1841 struct tb_switch *sw = tb_to_switch(dev); 1842 ssize_t ret; 1843 1844 if (!mutex_trylock(&sw->tb->lock)) 1845 return restart_syscall(); 1846 1847 if (sw->key) 1848 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1849 else 1850 ret = sysfs_emit(buf, "\n"); 1851 1852 mutex_unlock(&sw->tb->lock); 1853 return ret; 1854 } 1855 1856 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1857 const char *buf, size_t count) 1858 { 1859 struct tb_switch *sw = tb_to_switch(dev); 1860 u8 key[TB_SWITCH_KEY_SIZE]; 1861 ssize_t ret = count; 1862 bool clear = false; 1863 1864 if (!strcmp(buf, "\n")) 1865 clear = true; 1866 else if (hex2bin(key, buf, sizeof(key))) 1867 return -EINVAL; 1868 1869 if (!mutex_trylock(&sw->tb->lock)) 1870 return restart_syscall(); 1871 1872 if (sw->authorized) { 1873 ret = -EBUSY; 1874 } else { 1875 kfree(sw->key); 1876 if (clear) { 1877 sw->key = NULL; 1878 } else { 1879 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1880 if (!sw->key) 1881 ret = -ENOMEM; 1882 } 1883 } 1884 1885 mutex_unlock(&sw->tb->lock); 1886 return ret; 1887 } 1888 static DEVICE_ATTR(key, 0600, key_show, key_store); 1889 1890 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1891 char *buf) 1892 { 1893 struct tb_switch *sw = tb_to_switch(dev); 1894 1895 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); 1896 } 1897 1898 /* 1899 * Currently all lanes must run at the same speed but we expose here 1900 * both directions to allow possible asymmetric links in the future. 1901 */ 1902 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1903 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1904 1905 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1906 char *buf) 1907 { 1908 struct tb_switch *sw = tb_to_switch(dev); 1909 1910 return sysfs_emit(buf, "%u\n", sw->link_width); 1911 } 1912 1913 /* 1914 * Currently link has same amount of lanes both directions (1 or 2) but 1915 * expose them separately to allow possible asymmetric links in the future. 1916 */ 1917 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1918 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1919 1920 static ssize_t nvm_authenticate_show(struct device *dev, 1921 struct device_attribute *attr, char *buf) 1922 { 1923 struct tb_switch *sw = tb_to_switch(dev); 1924 u32 status; 1925 1926 nvm_get_auth_status(sw, &status); 1927 return sysfs_emit(buf, "%#x\n", status); 1928 } 1929 1930 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 1931 bool disconnect) 1932 { 1933 struct tb_switch *sw = tb_to_switch(dev); 1934 int val, ret; 1935 1936 pm_runtime_get_sync(&sw->dev); 1937 1938 if (!mutex_trylock(&sw->tb->lock)) { 1939 ret = restart_syscall(); 1940 goto exit_rpm; 1941 } 1942 1943 if (sw->no_nvm_upgrade) { 1944 ret = -EOPNOTSUPP; 1945 goto exit_unlock; 1946 } 1947 1948 /* If NVMem devices are not yet added */ 1949 if (!sw->nvm) { 1950 ret = -EAGAIN; 1951 goto exit_unlock; 1952 } 1953 1954 ret = kstrtoint(buf, 10, &val); 1955 if (ret) 1956 goto exit_unlock; 1957 1958 /* Always clear the authentication status */ 1959 nvm_clear_auth_status(sw); 1960 1961 if (val > 0) { 1962 if (val == AUTHENTICATE_ONLY) { 1963 if (disconnect) 1964 ret = -EINVAL; 1965 else 1966 ret = nvm_authenticate(sw, true); 1967 } else { 1968 if (!sw->nvm->flushed) { 1969 if (!sw->nvm->buf) { 1970 ret = -EINVAL; 1971 goto exit_unlock; 1972 } 1973 1974 ret = nvm_validate_and_write(sw); 1975 if (ret || val == WRITE_ONLY) 1976 goto exit_unlock; 1977 } 1978 if (val == WRITE_AND_AUTHENTICATE) { 1979 if (disconnect) 1980 ret = tb_lc_force_power(sw); 1981 else 1982 ret = nvm_authenticate(sw, false); 1983 } 1984 } 1985 } 1986 1987 exit_unlock: 1988 mutex_unlock(&sw->tb->lock); 1989 exit_rpm: 1990 pm_runtime_mark_last_busy(&sw->dev); 1991 pm_runtime_put_autosuspend(&sw->dev); 1992 1993 return ret; 1994 } 1995 1996 static ssize_t nvm_authenticate_store(struct device *dev, 1997 struct device_attribute *attr, const char *buf, size_t count) 1998 { 1999 int ret = nvm_authenticate_sysfs(dev, buf, false); 2000 if (ret) 2001 return ret; 2002 return count; 2003 } 2004 static DEVICE_ATTR_RW(nvm_authenticate); 2005 2006 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 2007 struct device_attribute *attr, char *buf) 2008 { 2009 return nvm_authenticate_show(dev, attr, buf); 2010 } 2011 2012 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 2013 struct device_attribute *attr, const char *buf, size_t count) 2014 { 2015 int ret; 2016 2017 ret = nvm_authenticate_sysfs(dev, buf, true); 2018 return ret ? ret : count; 2019 } 2020 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 2021 2022 static ssize_t nvm_version_show(struct device *dev, 2023 struct device_attribute *attr, char *buf) 2024 { 2025 struct tb_switch *sw = tb_to_switch(dev); 2026 int ret; 2027 2028 if (!mutex_trylock(&sw->tb->lock)) 2029 return restart_syscall(); 2030 2031 if (sw->safe_mode) 2032 ret = -ENODATA; 2033 else if (!sw->nvm) 2034 ret = -EAGAIN; 2035 else 2036 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 2037 2038 mutex_unlock(&sw->tb->lock); 2039 2040 return ret; 2041 } 2042 static DEVICE_ATTR_RO(nvm_version); 2043 2044 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 2045 char *buf) 2046 { 2047 struct tb_switch *sw = tb_to_switch(dev); 2048 2049 return sysfs_emit(buf, "%#x\n", sw->vendor); 2050 } 2051 static DEVICE_ATTR_RO(vendor); 2052 2053 static ssize_t 2054 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 2055 { 2056 struct tb_switch *sw = tb_to_switch(dev); 2057 2058 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); 2059 } 2060 static DEVICE_ATTR_RO(vendor_name); 2061 2062 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 2063 char *buf) 2064 { 2065 struct tb_switch *sw = tb_to_switch(dev); 2066 2067 return sysfs_emit(buf, "%pUb\n", sw->uuid); 2068 } 2069 static DEVICE_ATTR_RO(unique_id); 2070 2071 static struct attribute *switch_attrs[] = { 2072 &dev_attr_authorized.attr, 2073 &dev_attr_boot.attr, 2074 &dev_attr_device.attr, 2075 &dev_attr_device_name.attr, 2076 &dev_attr_generation.attr, 2077 &dev_attr_key.attr, 2078 &dev_attr_nvm_authenticate.attr, 2079 &dev_attr_nvm_authenticate_on_disconnect.attr, 2080 &dev_attr_nvm_version.attr, 2081 &dev_attr_rx_speed.attr, 2082 &dev_attr_rx_lanes.attr, 2083 &dev_attr_tx_speed.attr, 2084 &dev_attr_tx_lanes.attr, 2085 &dev_attr_vendor.attr, 2086 &dev_attr_vendor_name.attr, 2087 &dev_attr_unique_id.attr, 2088 NULL, 2089 }; 2090 2091 static umode_t switch_attr_is_visible(struct kobject *kobj, 2092 struct attribute *attr, int n) 2093 { 2094 struct device *dev = kobj_to_dev(kobj); 2095 struct tb_switch *sw = tb_to_switch(dev); 2096 2097 if (attr == &dev_attr_authorized.attr) { 2098 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 2099 sw->tb->security_level == TB_SECURITY_DPONLY) 2100 return 0; 2101 } else if (attr == &dev_attr_device.attr) { 2102 if (!sw->device) 2103 return 0; 2104 } else if (attr == &dev_attr_device_name.attr) { 2105 if (!sw->device_name) 2106 return 0; 2107 } else if (attr == &dev_attr_vendor.attr) { 2108 if (!sw->vendor) 2109 return 0; 2110 } else if (attr == &dev_attr_vendor_name.attr) { 2111 if (!sw->vendor_name) 2112 return 0; 2113 } else if (attr == &dev_attr_key.attr) { 2114 if (tb_route(sw) && 2115 sw->tb->security_level == TB_SECURITY_SECURE && 2116 sw->security_level == TB_SECURITY_SECURE) 2117 return attr->mode; 2118 return 0; 2119 } else if (attr == &dev_attr_rx_speed.attr || 2120 attr == &dev_attr_rx_lanes.attr || 2121 attr == &dev_attr_tx_speed.attr || 2122 attr == &dev_attr_tx_lanes.attr) { 2123 if (tb_route(sw)) 2124 return attr->mode; 2125 return 0; 2126 } else if (attr == &dev_attr_nvm_authenticate.attr) { 2127 if (nvm_upgradeable(sw)) 2128 return attr->mode; 2129 return 0; 2130 } else if (attr == &dev_attr_nvm_version.attr) { 2131 if (nvm_readable(sw)) 2132 return attr->mode; 2133 return 0; 2134 } else if (attr == &dev_attr_boot.attr) { 2135 if (tb_route(sw)) 2136 return attr->mode; 2137 return 0; 2138 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 2139 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 2140 return attr->mode; 2141 return 0; 2142 } 2143 2144 return sw->safe_mode ? 0 : attr->mode; 2145 } 2146 2147 static const struct attribute_group switch_group = { 2148 .is_visible = switch_attr_is_visible, 2149 .attrs = switch_attrs, 2150 }; 2151 2152 static const struct attribute_group *switch_groups[] = { 2153 &switch_group, 2154 NULL, 2155 }; 2156 2157 static void tb_switch_release(struct device *dev) 2158 { 2159 struct tb_switch *sw = tb_to_switch(dev); 2160 struct tb_port *port; 2161 2162 dma_port_free(sw->dma_port); 2163 2164 tb_switch_for_each_port(sw, port) { 2165 ida_destroy(&port->in_hopids); 2166 ida_destroy(&port->out_hopids); 2167 } 2168 2169 kfree(sw->uuid); 2170 kfree(sw->device_name); 2171 kfree(sw->vendor_name); 2172 kfree(sw->ports); 2173 kfree(sw->drom); 2174 kfree(sw->key); 2175 kfree(sw); 2176 } 2177 2178 static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env) 2179 { 2180 struct tb_switch *sw = tb_to_switch(dev); 2181 const char *type; 2182 2183 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) { 2184 if (add_uevent_var(env, "USB4_VERSION=1.0")) 2185 return -ENOMEM; 2186 } 2187 2188 if (!tb_route(sw)) { 2189 type = "host"; 2190 } else { 2191 const struct tb_port *port; 2192 bool hub = false; 2193 2194 /* Device is hub if it has any downstream ports */ 2195 tb_switch_for_each_port(sw, port) { 2196 if (!port->disabled && !tb_is_upstream_port(port) && 2197 tb_port_is_null(port)) { 2198 hub = true; 2199 break; 2200 } 2201 } 2202 2203 type = hub ? "hub" : "device"; 2204 } 2205 2206 if (add_uevent_var(env, "USB4_TYPE=%s", type)) 2207 return -ENOMEM; 2208 return 0; 2209 } 2210 2211 /* 2212 * Currently only need to provide the callbacks. Everything else is handled 2213 * in the connection manager. 2214 */ 2215 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 2216 { 2217 struct tb_switch *sw = tb_to_switch(dev); 2218 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2219 2220 if (cm_ops->runtime_suspend_switch) 2221 return cm_ops->runtime_suspend_switch(sw); 2222 2223 return 0; 2224 } 2225 2226 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 2227 { 2228 struct tb_switch *sw = tb_to_switch(dev); 2229 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2230 2231 if (cm_ops->runtime_resume_switch) 2232 return cm_ops->runtime_resume_switch(sw); 2233 return 0; 2234 } 2235 2236 static const struct dev_pm_ops tb_switch_pm_ops = { 2237 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 2238 NULL) 2239 }; 2240 2241 struct device_type tb_switch_type = { 2242 .name = "thunderbolt_device", 2243 .release = tb_switch_release, 2244 .uevent = tb_switch_uevent, 2245 .pm = &tb_switch_pm_ops, 2246 }; 2247 2248 static int tb_switch_get_generation(struct tb_switch *sw) 2249 { 2250 switch (sw->config.device_id) { 2251 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 2252 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 2253 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 2254 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 2255 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 2256 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 2257 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 2258 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 2259 return 1; 2260 2261 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 2262 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 2263 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 2264 return 2; 2265 2266 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 2267 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 2268 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 2269 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 2270 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 2271 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 2272 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 2273 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 2274 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2275 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2276 return 3; 2277 2278 default: 2279 if (tb_switch_is_usb4(sw)) 2280 return 4; 2281 2282 /* 2283 * For unknown switches assume generation to be 1 to be 2284 * on the safe side. 2285 */ 2286 tb_sw_warn(sw, "unsupported switch device id %#x\n", 2287 sw->config.device_id); 2288 return 1; 2289 } 2290 } 2291 2292 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 2293 { 2294 int max_depth; 2295 2296 if (tb_switch_is_usb4(sw) || 2297 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 2298 max_depth = USB4_SWITCH_MAX_DEPTH; 2299 else 2300 max_depth = TB_SWITCH_MAX_DEPTH; 2301 2302 return depth > max_depth; 2303 } 2304 2305 /** 2306 * tb_switch_alloc() - allocate a switch 2307 * @tb: Pointer to the owning domain 2308 * @parent: Parent device for this switch 2309 * @route: Route string for this switch 2310 * 2311 * Allocates and initializes a switch. Will not upload configuration to 2312 * the switch. For that you need to call tb_switch_configure() 2313 * separately. The returned switch should be released by calling 2314 * tb_switch_put(). 2315 * 2316 * Return: Pointer to the allocated switch or ERR_PTR() in case of 2317 * failure. 2318 */ 2319 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 2320 u64 route) 2321 { 2322 struct tb_switch *sw; 2323 int upstream_port; 2324 int i, ret, depth; 2325 2326 /* Unlock the downstream port so we can access the switch below */ 2327 if (route) { 2328 struct tb_switch *parent_sw = tb_to_switch(parent); 2329 struct tb_port *down; 2330 2331 down = tb_port_at(route, parent_sw); 2332 tb_port_unlock(down); 2333 } 2334 2335 depth = tb_route_length(route); 2336 2337 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 2338 if (upstream_port < 0) 2339 return ERR_PTR(upstream_port); 2340 2341 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2342 if (!sw) 2343 return ERR_PTR(-ENOMEM); 2344 2345 sw->tb = tb; 2346 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 2347 if (ret) 2348 goto err_free_sw_ports; 2349 2350 sw->generation = tb_switch_get_generation(sw); 2351 2352 tb_dbg(tb, "current switch config:\n"); 2353 tb_dump_switch(tb, sw); 2354 2355 /* configure switch */ 2356 sw->config.upstream_port_number = upstream_port; 2357 sw->config.depth = depth; 2358 sw->config.route_hi = upper_32_bits(route); 2359 sw->config.route_lo = lower_32_bits(route); 2360 sw->config.enabled = 0; 2361 2362 /* Make sure we do not exceed maximum topology limit */ 2363 if (tb_switch_exceeds_max_depth(sw, depth)) { 2364 ret = -EADDRNOTAVAIL; 2365 goto err_free_sw_ports; 2366 } 2367 2368 /* initialize ports */ 2369 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 2370 GFP_KERNEL); 2371 if (!sw->ports) { 2372 ret = -ENOMEM; 2373 goto err_free_sw_ports; 2374 } 2375 2376 for (i = 0; i <= sw->config.max_port_number; i++) { 2377 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2378 sw->ports[i].sw = sw; 2379 sw->ports[i].port = i; 2380 2381 /* Control port does not need HopID allocation */ 2382 if (i) { 2383 ida_init(&sw->ports[i].in_hopids); 2384 ida_init(&sw->ports[i].out_hopids); 2385 } 2386 } 2387 2388 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2389 if (ret > 0) 2390 sw->cap_plug_events = ret; 2391 2392 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); 2393 if (ret > 0) 2394 sw->cap_vsec_tmu = ret; 2395 2396 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2397 if (ret > 0) 2398 sw->cap_lc = ret; 2399 2400 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); 2401 if (ret > 0) 2402 sw->cap_lp = ret; 2403 2404 /* Root switch is always authorized */ 2405 if (!route) 2406 sw->authorized = true; 2407 2408 device_initialize(&sw->dev); 2409 sw->dev.parent = parent; 2410 sw->dev.bus = &tb_bus_type; 2411 sw->dev.type = &tb_switch_type; 2412 sw->dev.groups = switch_groups; 2413 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2414 2415 return sw; 2416 2417 err_free_sw_ports: 2418 kfree(sw->ports); 2419 kfree(sw); 2420 2421 return ERR_PTR(ret); 2422 } 2423 2424 /** 2425 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2426 * @tb: Pointer to the owning domain 2427 * @parent: Parent device for this switch 2428 * @route: Route string for this switch 2429 * 2430 * This creates a switch in safe mode. This means the switch pretty much 2431 * lacks all capabilities except DMA configuration port before it is 2432 * flashed with a valid NVM firmware. 2433 * 2434 * The returned switch must be released by calling tb_switch_put(). 2435 * 2436 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 2437 */ 2438 struct tb_switch * 2439 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2440 { 2441 struct tb_switch *sw; 2442 2443 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2444 if (!sw) 2445 return ERR_PTR(-ENOMEM); 2446 2447 sw->tb = tb; 2448 sw->config.depth = tb_route_length(route); 2449 sw->config.route_hi = upper_32_bits(route); 2450 sw->config.route_lo = lower_32_bits(route); 2451 sw->safe_mode = true; 2452 2453 device_initialize(&sw->dev); 2454 sw->dev.parent = parent; 2455 sw->dev.bus = &tb_bus_type; 2456 sw->dev.type = &tb_switch_type; 2457 sw->dev.groups = switch_groups; 2458 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2459 2460 return sw; 2461 } 2462 2463 /** 2464 * tb_switch_configure() - Uploads configuration to the switch 2465 * @sw: Switch to configure 2466 * 2467 * Call this function before the switch is added to the system. It will 2468 * upload configuration to the switch and makes it available for the 2469 * connection manager to use. Can be called to the switch again after 2470 * resume from low power states to re-initialize it. 2471 * 2472 * Return: %0 in case of success and negative errno in case of failure 2473 */ 2474 int tb_switch_configure(struct tb_switch *sw) 2475 { 2476 struct tb *tb = sw->tb; 2477 u64 route; 2478 int ret; 2479 2480 route = tb_route(sw); 2481 2482 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2483 sw->config.enabled ? "restoring" : "initializing", route, 2484 tb_route_length(route), sw->config.upstream_port_number); 2485 2486 sw->config.enabled = 1; 2487 2488 if (tb_switch_is_usb4(sw)) { 2489 /* 2490 * For USB4 devices, we need to program the CM version 2491 * accordingly so that it knows to expose all the 2492 * additional capabilities. 2493 */ 2494 sw->config.cmuv = USB4_VERSION_1_0; 2495 sw->config.plug_events_delay = 0xa; 2496 2497 /* Enumerate the switch */ 2498 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2499 ROUTER_CS_1, 4); 2500 if (ret) 2501 return ret; 2502 2503 ret = usb4_switch_setup(sw); 2504 } else { 2505 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2506 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2507 sw->config.vendor_id); 2508 2509 if (!sw->cap_plug_events) { 2510 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2511 return -ENODEV; 2512 } 2513 2514 /* Enumerate the switch */ 2515 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2516 ROUTER_CS_1, 3); 2517 } 2518 if (ret) 2519 return ret; 2520 2521 return tb_plug_events_active(sw, true); 2522 } 2523 2524 static int tb_switch_set_uuid(struct tb_switch *sw) 2525 { 2526 bool uid = false; 2527 u32 uuid[4]; 2528 int ret; 2529 2530 if (sw->uuid) 2531 return 0; 2532 2533 if (tb_switch_is_usb4(sw)) { 2534 ret = usb4_switch_read_uid(sw, &sw->uid); 2535 if (ret) 2536 return ret; 2537 uid = true; 2538 } else { 2539 /* 2540 * The newer controllers include fused UUID as part of 2541 * link controller specific registers 2542 */ 2543 ret = tb_lc_read_uuid(sw, uuid); 2544 if (ret) { 2545 if (ret != -EINVAL) 2546 return ret; 2547 uid = true; 2548 } 2549 } 2550 2551 if (uid) { 2552 /* 2553 * ICM generates UUID based on UID and fills the upper 2554 * two words with ones. This is not strictly following 2555 * UUID format but we want to be compatible with it so 2556 * we do the same here. 2557 */ 2558 uuid[0] = sw->uid & 0xffffffff; 2559 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2560 uuid[2] = 0xffffffff; 2561 uuid[3] = 0xffffffff; 2562 } 2563 2564 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2565 if (!sw->uuid) 2566 return -ENOMEM; 2567 return 0; 2568 } 2569 2570 static int tb_switch_add_dma_port(struct tb_switch *sw) 2571 { 2572 u32 status; 2573 int ret; 2574 2575 switch (sw->generation) { 2576 case 2: 2577 /* Only root switch can be upgraded */ 2578 if (tb_route(sw)) 2579 return 0; 2580 2581 fallthrough; 2582 case 3: 2583 case 4: 2584 ret = tb_switch_set_uuid(sw); 2585 if (ret) 2586 return ret; 2587 break; 2588 2589 default: 2590 /* 2591 * DMA port is the only thing available when the switch 2592 * is in safe mode. 2593 */ 2594 if (!sw->safe_mode) 2595 return 0; 2596 break; 2597 } 2598 2599 if (sw->no_nvm_upgrade) 2600 return 0; 2601 2602 if (tb_switch_is_usb4(sw)) { 2603 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2604 if (ret) 2605 return ret; 2606 2607 if (status) { 2608 tb_sw_info(sw, "switch flash authentication failed\n"); 2609 nvm_set_auth_status(sw, status); 2610 } 2611 2612 return 0; 2613 } 2614 2615 /* Root switch DMA port requires running firmware */ 2616 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2617 return 0; 2618 2619 sw->dma_port = dma_port_alloc(sw); 2620 if (!sw->dma_port) 2621 return 0; 2622 2623 /* 2624 * If there is status already set then authentication failed 2625 * when the dma_port_flash_update_auth() returned. Power cycling 2626 * is not needed (it was done already) so only thing we do here 2627 * is to unblock runtime PM of the root port. 2628 */ 2629 nvm_get_auth_status(sw, &status); 2630 if (status) { 2631 if (!tb_route(sw)) 2632 nvm_authenticate_complete_dma_port(sw); 2633 return 0; 2634 } 2635 2636 /* 2637 * Check status of the previous flash authentication. If there 2638 * is one we need to power cycle the switch in any case to make 2639 * it functional again. 2640 */ 2641 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2642 if (ret <= 0) 2643 return ret; 2644 2645 /* Now we can allow root port to suspend again */ 2646 if (!tb_route(sw)) 2647 nvm_authenticate_complete_dma_port(sw); 2648 2649 if (status) { 2650 tb_sw_info(sw, "switch flash authentication failed\n"); 2651 nvm_set_auth_status(sw, status); 2652 } 2653 2654 tb_sw_info(sw, "power cycling the switch now\n"); 2655 dma_port_power_cycle(sw->dma_port); 2656 2657 /* 2658 * We return error here which causes the switch adding failure. 2659 * It should appear back after power cycle is complete. 2660 */ 2661 return -ESHUTDOWN; 2662 } 2663 2664 static void tb_switch_default_link_ports(struct tb_switch *sw) 2665 { 2666 int i; 2667 2668 for (i = 1; i <= sw->config.max_port_number; i++) { 2669 struct tb_port *port = &sw->ports[i]; 2670 struct tb_port *subordinate; 2671 2672 if (!tb_port_is_null(port)) 2673 continue; 2674 2675 /* Check for the subordinate port */ 2676 if (i == sw->config.max_port_number || 2677 !tb_port_is_null(&sw->ports[i + 1])) 2678 continue; 2679 2680 /* Link them if not already done so (by DROM) */ 2681 subordinate = &sw->ports[i + 1]; 2682 if (!port->dual_link_port && !subordinate->dual_link_port) { 2683 port->link_nr = 0; 2684 port->dual_link_port = subordinate; 2685 subordinate->link_nr = 1; 2686 subordinate->dual_link_port = port; 2687 2688 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2689 port->port, subordinate->port); 2690 } 2691 } 2692 } 2693 2694 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2695 { 2696 const struct tb_port *up = tb_upstream_port(sw); 2697 2698 if (!up->dual_link_port || !up->dual_link_port->remote) 2699 return false; 2700 2701 if (tb_switch_is_usb4(sw)) 2702 return usb4_switch_lane_bonding_possible(sw); 2703 return tb_lc_lane_bonding_possible(sw); 2704 } 2705 2706 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2707 { 2708 struct tb_port *up; 2709 bool change = false; 2710 int ret; 2711 2712 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2713 return 0; 2714 2715 up = tb_upstream_port(sw); 2716 2717 ret = tb_port_get_link_speed(up); 2718 if (ret < 0) 2719 return ret; 2720 if (sw->link_speed != ret) 2721 change = true; 2722 sw->link_speed = ret; 2723 2724 ret = tb_port_get_link_width(up); 2725 if (ret < 0) 2726 return ret; 2727 if (sw->link_width != ret) 2728 change = true; 2729 sw->link_width = ret; 2730 2731 /* Notify userspace that there is possible link attribute change */ 2732 if (device_is_registered(&sw->dev) && change) 2733 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2734 2735 return 0; 2736 } 2737 2738 /** 2739 * tb_switch_lane_bonding_enable() - Enable lane bonding 2740 * @sw: Switch to enable lane bonding 2741 * 2742 * Connection manager can call this function to enable lane bonding of a 2743 * switch. If conditions are correct and both switches support the feature, 2744 * lanes are bonded. It is safe to call this to any switch. 2745 */ 2746 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2747 { 2748 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2749 struct tb_port *up, *down; 2750 u64 route = tb_route(sw); 2751 int ret; 2752 2753 if (!route) 2754 return 0; 2755 2756 if (!tb_switch_lane_bonding_possible(sw)) 2757 return 0; 2758 2759 up = tb_upstream_port(sw); 2760 down = tb_port_at(route, parent); 2761 2762 if (!tb_port_is_width_supported(up, 2) || 2763 !tb_port_is_width_supported(down, 2)) 2764 return 0; 2765 2766 ret = tb_port_lane_bonding_enable(up); 2767 if (ret) { 2768 tb_port_warn(up, "failed to enable lane bonding\n"); 2769 return ret; 2770 } 2771 2772 ret = tb_port_lane_bonding_enable(down); 2773 if (ret) { 2774 tb_port_warn(down, "failed to enable lane bonding\n"); 2775 tb_port_lane_bonding_disable(up); 2776 return ret; 2777 } 2778 2779 ret = tb_port_wait_for_link_width(down, 2, 100); 2780 if (ret) { 2781 tb_port_warn(down, "timeout enabling lane bonding\n"); 2782 return ret; 2783 } 2784 2785 tb_port_update_credits(down); 2786 tb_port_update_credits(up); 2787 tb_switch_update_link_attributes(sw); 2788 2789 tb_sw_dbg(sw, "lane bonding enabled\n"); 2790 return ret; 2791 } 2792 2793 /** 2794 * tb_switch_lane_bonding_disable() - Disable lane bonding 2795 * @sw: Switch whose lane bonding to disable 2796 * 2797 * Disables lane bonding between @sw and parent. This can be called even 2798 * if lanes were not bonded originally. 2799 */ 2800 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2801 { 2802 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2803 struct tb_port *up, *down; 2804 2805 if (!tb_route(sw)) 2806 return; 2807 2808 up = tb_upstream_port(sw); 2809 if (!up->bonded) 2810 return; 2811 2812 down = tb_port_at(tb_route(sw), parent); 2813 2814 tb_port_lane_bonding_disable(up); 2815 tb_port_lane_bonding_disable(down); 2816 2817 /* 2818 * It is fine if we get other errors as the router might have 2819 * been unplugged. 2820 */ 2821 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT) 2822 tb_sw_warn(sw, "timeout disabling lane bonding\n"); 2823 2824 tb_port_update_credits(down); 2825 tb_port_update_credits(up); 2826 tb_switch_update_link_attributes(sw); 2827 2828 tb_sw_dbg(sw, "lane bonding disabled\n"); 2829 } 2830 2831 /** 2832 * tb_switch_configure_link() - Set link configured 2833 * @sw: Switch whose link is configured 2834 * 2835 * Sets the link upstream from @sw configured (from both ends) so that 2836 * it will not be disconnected when the domain exits sleep. Can be 2837 * called for any switch. 2838 * 2839 * It is recommended that this is called after lane bonding is enabled. 2840 * 2841 * Returns %0 on success and negative errno in case of error. 2842 */ 2843 int tb_switch_configure_link(struct tb_switch *sw) 2844 { 2845 struct tb_port *up, *down; 2846 int ret; 2847 2848 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2849 return 0; 2850 2851 up = tb_upstream_port(sw); 2852 if (tb_switch_is_usb4(up->sw)) 2853 ret = usb4_port_configure(up); 2854 else 2855 ret = tb_lc_configure_port(up); 2856 if (ret) 2857 return ret; 2858 2859 down = up->remote; 2860 if (tb_switch_is_usb4(down->sw)) 2861 return usb4_port_configure(down); 2862 return tb_lc_configure_port(down); 2863 } 2864 2865 /** 2866 * tb_switch_unconfigure_link() - Unconfigure link 2867 * @sw: Switch whose link is unconfigured 2868 * 2869 * Sets the link unconfigured so the @sw will be disconnected if the 2870 * domain exists sleep. 2871 */ 2872 void tb_switch_unconfigure_link(struct tb_switch *sw) 2873 { 2874 struct tb_port *up, *down; 2875 2876 if (sw->is_unplugged) 2877 return; 2878 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2879 return; 2880 2881 up = tb_upstream_port(sw); 2882 if (tb_switch_is_usb4(up->sw)) 2883 usb4_port_unconfigure(up); 2884 else 2885 tb_lc_unconfigure_port(up); 2886 2887 down = up->remote; 2888 if (tb_switch_is_usb4(down->sw)) 2889 usb4_port_unconfigure(down); 2890 else 2891 tb_lc_unconfigure_port(down); 2892 } 2893 2894 static void tb_switch_credits_init(struct tb_switch *sw) 2895 { 2896 if (tb_switch_is_icm(sw)) 2897 return; 2898 if (!tb_switch_is_usb4(sw)) 2899 return; 2900 if (usb4_switch_credits_init(sw)) 2901 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); 2902 } 2903 2904 static int tb_switch_port_hotplug_enable(struct tb_switch *sw) 2905 { 2906 struct tb_port *port; 2907 2908 if (tb_switch_is_icm(sw)) 2909 return 0; 2910 2911 tb_switch_for_each_port(sw, port) { 2912 int res; 2913 2914 if (!port->cap_usb4) 2915 continue; 2916 2917 res = usb4_port_hotplug_enable(port); 2918 if (res) 2919 return res; 2920 } 2921 return 0; 2922 } 2923 2924 /** 2925 * tb_switch_add() - Add a switch to the domain 2926 * @sw: Switch to add 2927 * 2928 * This is the last step in adding switch to the domain. It will read 2929 * identification information from DROM and initializes ports so that 2930 * they can be used to connect other switches. The switch will be 2931 * exposed to the userspace when this function successfully returns. To 2932 * remove and release the switch, call tb_switch_remove(). 2933 * 2934 * Return: %0 in case of success and negative errno in case of failure 2935 */ 2936 int tb_switch_add(struct tb_switch *sw) 2937 { 2938 int i, ret; 2939 2940 /* 2941 * Initialize DMA control port now before we read DROM. Recent 2942 * host controllers have more complete DROM on NVM that includes 2943 * vendor and model identification strings which we then expose 2944 * to the userspace. NVM can be accessed through DMA 2945 * configuration based mailbox. 2946 */ 2947 ret = tb_switch_add_dma_port(sw); 2948 if (ret) { 2949 dev_err(&sw->dev, "failed to add DMA port\n"); 2950 return ret; 2951 } 2952 2953 if (!sw->safe_mode) { 2954 tb_switch_credits_init(sw); 2955 2956 /* read drom */ 2957 ret = tb_drom_read(sw); 2958 if (ret) 2959 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); 2960 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2961 2962 tb_check_quirks(sw); 2963 2964 ret = tb_switch_set_uuid(sw); 2965 if (ret) { 2966 dev_err(&sw->dev, "failed to set UUID\n"); 2967 return ret; 2968 } 2969 2970 for (i = 0; i <= sw->config.max_port_number; i++) { 2971 if (sw->ports[i].disabled) { 2972 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2973 continue; 2974 } 2975 ret = tb_init_port(&sw->ports[i]); 2976 if (ret) { 2977 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2978 return ret; 2979 } 2980 } 2981 2982 tb_switch_default_link_ports(sw); 2983 2984 ret = tb_switch_update_link_attributes(sw); 2985 if (ret) 2986 return ret; 2987 2988 ret = tb_switch_tmu_init(sw); 2989 if (ret) 2990 return ret; 2991 } 2992 2993 ret = tb_switch_port_hotplug_enable(sw); 2994 if (ret) 2995 return ret; 2996 2997 ret = device_add(&sw->dev); 2998 if (ret) { 2999 dev_err(&sw->dev, "failed to add device: %d\n", ret); 3000 return ret; 3001 } 3002 3003 if (tb_route(sw)) { 3004 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 3005 sw->vendor, sw->device); 3006 if (sw->vendor_name && sw->device_name) 3007 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 3008 sw->device_name); 3009 } 3010 3011 ret = usb4_switch_add_ports(sw); 3012 if (ret) { 3013 dev_err(&sw->dev, "failed to add USB4 ports\n"); 3014 goto err_del; 3015 } 3016 3017 ret = tb_switch_nvm_add(sw); 3018 if (ret) { 3019 dev_err(&sw->dev, "failed to add NVM devices\n"); 3020 goto err_ports; 3021 } 3022 3023 /* 3024 * Thunderbolt routers do not generate wakeups themselves but 3025 * they forward wakeups from tunneled protocols, so enable it 3026 * here. 3027 */ 3028 device_init_wakeup(&sw->dev, true); 3029 3030 pm_runtime_set_active(&sw->dev); 3031 if (sw->rpm) { 3032 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 3033 pm_runtime_use_autosuspend(&sw->dev); 3034 pm_runtime_mark_last_busy(&sw->dev); 3035 pm_runtime_enable(&sw->dev); 3036 pm_request_autosuspend(&sw->dev); 3037 } 3038 3039 tb_switch_debugfs_init(sw); 3040 return 0; 3041 3042 err_ports: 3043 usb4_switch_remove_ports(sw); 3044 err_del: 3045 device_del(&sw->dev); 3046 3047 return ret; 3048 } 3049 3050 /** 3051 * tb_switch_remove() - Remove and release a switch 3052 * @sw: Switch to remove 3053 * 3054 * This will remove the switch from the domain and release it after last 3055 * reference count drops to zero. If there are switches connected below 3056 * this switch, they will be removed as well. 3057 */ 3058 void tb_switch_remove(struct tb_switch *sw) 3059 { 3060 struct tb_port *port; 3061 3062 tb_switch_debugfs_remove(sw); 3063 3064 if (sw->rpm) { 3065 pm_runtime_get_sync(&sw->dev); 3066 pm_runtime_disable(&sw->dev); 3067 } 3068 3069 /* port 0 is the switch itself and never has a remote */ 3070 tb_switch_for_each_port(sw, port) { 3071 if (tb_port_has_remote(port)) { 3072 tb_switch_remove(port->remote->sw); 3073 port->remote = NULL; 3074 } else if (port->xdomain) { 3075 tb_xdomain_remove(port->xdomain); 3076 port->xdomain = NULL; 3077 } 3078 3079 /* Remove any downstream retimers */ 3080 tb_retimer_remove_all(port); 3081 } 3082 3083 if (!sw->is_unplugged) 3084 tb_plug_events_active(sw, false); 3085 3086 tb_switch_nvm_remove(sw); 3087 usb4_switch_remove_ports(sw); 3088 3089 if (tb_route(sw)) 3090 dev_info(&sw->dev, "device disconnected\n"); 3091 device_unregister(&sw->dev); 3092 } 3093 3094 /** 3095 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 3096 * @sw: Router to mark unplugged 3097 */ 3098 void tb_sw_set_unplugged(struct tb_switch *sw) 3099 { 3100 struct tb_port *port; 3101 3102 if (sw == sw->tb->root_switch) { 3103 tb_sw_WARN(sw, "cannot unplug root switch\n"); 3104 return; 3105 } 3106 if (sw->is_unplugged) { 3107 tb_sw_WARN(sw, "is_unplugged already set\n"); 3108 return; 3109 } 3110 sw->is_unplugged = true; 3111 tb_switch_for_each_port(sw, port) { 3112 if (tb_port_has_remote(port)) 3113 tb_sw_set_unplugged(port->remote->sw); 3114 else if (port->xdomain) 3115 port->xdomain->is_unplugged = true; 3116 } 3117 } 3118 3119 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) 3120 { 3121 if (flags) 3122 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 3123 else 3124 tb_sw_dbg(sw, "disabling wakeup\n"); 3125 3126 if (tb_switch_is_usb4(sw)) 3127 return usb4_switch_set_wake(sw, flags); 3128 return tb_lc_set_wake(sw, flags); 3129 } 3130 3131 int tb_switch_resume(struct tb_switch *sw) 3132 { 3133 struct tb_port *port; 3134 int err; 3135 3136 tb_sw_dbg(sw, "resuming switch\n"); 3137 3138 /* 3139 * Check for UID of the connected switches except for root 3140 * switch which we assume cannot be removed. 3141 */ 3142 if (tb_route(sw)) { 3143 u64 uid; 3144 3145 /* 3146 * Check first that we can still read the switch config 3147 * space. It may be that there is now another domain 3148 * connected. 3149 */ 3150 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 3151 if (err < 0) { 3152 tb_sw_info(sw, "switch not present anymore\n"); 3153 return err; 3154 } 3155 3156 /* We don't have any way to confirm this was the same device */ 3157 if (!sw->uid) 3158 return -ENODEV; 3159 3160 if (tb_switch_is_usb4(sw)) 3161 err = usb4_switch_read_uid(sw, &uid); 3162 else 3163 err = tb_drom_read_uid_only(sw, &uid); 3164 if (err) { 3165 tb_sw_warn(sw, "uid read failed\n"); 3166 return err; 3167 } 3168 if (sw->uid != uid) { 3169 tb_sw_info(sw, 3170 "changed while suspended (uid %#llx -> %#llx)\n", 3171 sw->uid, uid); 3172 return -ENODEV; 3173 } 3174 } 3175 3176 err = tb_switch_configure(sw); 3177 if (err) 3178 return err; 3179 3180 /* Disable wakes */ 3181 tb_switch_set_wake(sw, 0); 3182 3183 err = tb_switch_tmu_init(sw); 3184 if (err) 3185 return err; 3186 3187 /* check for surviving downstream switches */ 3188 tb_switch_for_each_port(sw, port) { 3189 if (!tb_port_is_null(port)) 3190 continue; 3191 3192 if (!tb_port_resume(port)) 3193 continue; 3194 3195 if (tb_wait_for_port(port, true) <= 0) { 3196 tb_port_warn(port, 3197 "lost during suspend, disconnecting\n"); 3198 if (tb_port_has_remote(port)) 3199 tb_sw_set_unplugged(port->remote->sw); 3200 else if (port->xdomain) 3201 port->xdomain->is_unplugged = true; 3202 } else { 3203 /* 3204 * Always unlock the port so the downstream 3205 * switch/domain is accessible. 3206 */ 3207 if (tb_port_unlock(port)) 3208 tb_port_warn(port, "failed to unlock port\n"); 3209 if (port->remote && tb_switch_resume(port->remote->sw)) { 3210 tb_port_warn(port, 3211 "lost during suspend, disconnecting\n"); 3212 tb_sw_set_unplugged(port->remote->sw); 3213 } 3214 } 3215 } 3216 return 0; 3217 } 3218 3219 /** 3220 * tb_switch_suspend() - Put a switch to sleep 3221 * @sw: Switch to suspend 3222 * @runtime: Is this runtime suspend or system sleep 3223 * 3224 * Suspends router and all its children. Enables wakes according to 3225 * value of @runtime and then sets sleep bit for the router. If @sw is 3226 * host router the domain is ready to go to sleep once this function 3227 * returns. 3228 */ 3229 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 3230 { 3231 unsigned int flags = 0; 3232 struct tb_port *port; 3233 int err; 3234 3235 tb_sw_dbg(sw, "suspending switch\n"); 3236 3237 /* 3238 * Actually only needed for Titan Ridge but for simplicity can be 3239 * done for USB4 device too as CLx is re-enabled at resume. 3240 * CL0s and CL1 are enabled and supported together. 3241 */ 3242 if (tb_switch_is_clx_enabled(sw, TB_CL1)) { 3243 if (tb_switch_disable_clx(sw, TB_CL1)) 3244 tb_sw_warn(sw, "failed to disable %s on upstream port\n", 3245 tb_switch_clx_name(TB_CL1)); 3246 } 3247 3248 err = tb_plug_events_active(sw, false); 3249 if (err) 3250 return; 3251 3252 tb_switch_for_each_port(sw, port) { 3253 if (tb_port_has_remote(port)) 3254 tb_switch_suspend(port->remote->sw, runtime); 3255 } 3256 3257 if (runtime) { 3258 /* Trigger wake when something is plugged in/out */ 3259 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3260 flags |= TB_WAKE_ON_USB4; 3261 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; 3262 } else if (device_may_wakeup(&sw->dev)) { 3263 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 3264 } 3265 3266 tb_switch_set_wake(sw, flags); 3267 3268 if (tb_switch_is_usb4(sw)) 3269 usb4_switch_set_sleep(sw); 3270 else 3271 tb_lc_set_sleep(sw); 3272 } 3273 3274 /** 3275 * tb_switch_query_dp_resource() - Query availability of DP resource 3276 * @sw: Switch whose DP resource is queried 3277 * @in: DP IN port 3278 * 3279 * Queries availability of DP resource for DP tunneling using switch 3280 * specific means. Returns %true if resource is available. 3281 */ 3282 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 3283 { 3284 if (tb_switch_is_usb4(sw)) 3285 return usb4_switch_query_dp_resource(sw, in); 3286 return tb_lc_dp_sink_query(sw, in); 3287 } 3288 3289 /** 3290 * tb_switch_alloc_dp_resource() - Allocate available DP resource 3291 * @sw: Switch whose DP resource is allocated 3292 * @in: DP IN port 3293 * 3294 * Allocates DP resource for DP tunneling. The resource must be 3295 * available for this to succeed (see tb_switch_query_dp_resource()). 3296 * Returns %0 in success and negative errno otherwise. 3297 */ 3298 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3299 { 3300 int ret; 3301 3302 if (tb_switch_is_usb4(sw)) 3303 ret = usb4_switch_alloc_dp_resource(sw, in); 3304 else 3305 ret = tb_lc_dp_sink_alloc(sw, in); 3306 3307 if (ret) 3308 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", 3309 in->port); 3310 else 3311 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); 3312 3313 return ret; 3314 } 3315 3316 /** 3317 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 3318 * @sw: Switch whose DP resource is de-allocated 3319 * @in: DP IN port 3320 * 3321 * De-allocates DP resource that was previously allocated for DP 3322 * tunneling. 3323 */ 3324 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3325 { 3326 int ret; 3327 3328 if (tb_switch_is_usb4(sw)) 3329 ret = usb4_switch_dealloc_dp_resource(sw, in); 3330 else 3331 ret = tb_lc_dp_sink_dealloc(sw, in); 3332 3333 if (ret) 3334 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 3335 in->port); 3336 else 3337 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); 3338 } 3339 3340 struct tb_sw_lookup { 3341 struct tb *tb; 3342 u8 link; 3343 u8 depth; 3344 const uuid_t *uuid; 3345 u64 route; 3346 }; 3347 3348 static int tb_switch_match(struct device *dev, const void *data) 3349 { 3350 struct tb_switch *sw = tb_to_switch(dev); 3351 const struct tb_sw_lookup *lookup = data; 3352 3353 if (!sw) 3354 return 0; 3355 if (sw->tb != lookup->tb) 3356 return 0; 3357 3358 if (lookup->uuid) 3359 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 3360 3361 if (lookup->route) { 3362 return sw->config.route_lo == lower_32_bits(lookup->route) && 3363 sw->config.route_hi == upper_32_bits(lookup->route); 3364 } 3365 3366 /* Root switch is matched only by depth */ 3367 if (!lookup->depth) 3368 return !sw->depth; 3369 3370 return sw->link == lookup->link && sw->depth == lookup->depth; 3371 } 3372 3373 /** 3374 * tb_switch_find_by_link_depth() - Find switch by link and depth 3375 * @tb: Domain the switch belongs 3376 * @link: Link number the switch is connected 3377 * @depth: Depth of the switch in link 3378 * 3379 * Returned switch has reference count increased so the caller needs to 3380 * call tb_switch_put() when done with the switch. 3381 */ 3382 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 3383 { 3384 struct tb_sw_lookup lookup; 3385 struct device *dev; 3386 3387 memset(&lookup, 0, sizeof(lookup)); 3388 lookup.tb = tb; 3389 lookup.link = link; 3390 lookup.depth = depth; 3391 3392 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3393 if (dev) 3394 return tb_to_switch(dev); 3395 3396 return NULL; 3397 } 3398 3399 /** 3400 * tb_switch_find_by_uuid() - Find switch by UUID 3401 * @tb: Domain the switch belongs 3402 * @uuid: UUID to look for 3403 * 3404 * Returned switch has reference count increased so the caller needs to 3405 * call tb_switch_put() when done with the switch. 3406 */ 3407 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 3408 { 3409 struct tb_sw_lookup lookup; 3410 struct device *dev; 3411 3412 memset(&lookup, 0, sizeof(lookup)); 3413 lookup.tb = tb; 3414 lookup.uuid = uuid; 3415 3416 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3417 if (dev) 3418 return tb_to_switch(dev); 3419 3420 return NULL; 3421 } 3422 3423 /** 3424 * tb_switch_find_by_route() - Find switch by route string 3425 * @tb: Domain the switch belongs 3426 * @route: Route string to look for 3427 * 3428 * Returned switch has reference count increased so the caller needs to 3429 * call tb_switch_put() when done with the switch. 3430 */ 3431 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 3432 { 3433 struct tb_sw_lookup lookup; 3434 struct device *dev; 3435 3436 if (!route) 3437 return tb_switch_get(tb->root_switch); 3438 3439 memset(&lookup, 0, sizeof(lookup)); 3440 lookup.tb = tb; 3441 lookup.route = route; 3442 3443 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3444 if (dev) 3445 return tb_to_switch(dev); 3446 3447 return NULL; 3448 } 3449 3450 /** 3451 * tb_switch_find_port() - return the first port of @type on @sw or NULL 3452 * @sw: Switch to find the port from 3453 * @type: Port type to look for 3454 */ 3455 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3456 enum tb_port_type type) 3457 { 3458 struct tb_port *port; 3459 3460 tb_switch_for_each_port(sw, port) { 3461 if (port->config.type == type) 3462 return port; 3463 } 3464 3465 return NULL; 3466 } 3467 3468 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) 3469 { 3470 struct tb_switch *parent = tb_switch_parent(sw); 3471 struct tb_port *up, *down; 3472 int ret; 3473 3474 if (!tb_route(sw)) 3475 return 0; 3476 3477 up = tb_upstream_port(sw); 3478 down = tb_port_at(tb_route(sw), parent); 3479 ret = tb_port_pm_secondary_enable(up); 3480 if (ret) 3481 return ret; 3482 3483 return tb_port_pm_secondary_disable(down); 3484 } 3485 3486 static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) 3487 { 3488 struct tb_switch *parent = tb_switch_parent(sw); 3489 bool up_clx_support, down_clx_support; 3490 struct tb_port *up, *down; 3491 int ret; 3492 3493 if (!tb_switch_is_clx_supported(sw)) 3494 return 0; 3495 3496 /* 3497 * Enable CLx for host router's downstream port as part of the 3498 * downstream router enabling procedure. 3499 */ 3500 if (!tb_route(sw)) 3501 return 0; 3502 3503 /* Enable CLx only for first hop router (depth = 1) */ 3504 if (tb_route(parent)) 3505 return 0; 3506 3507 ret = tb_switch_pm_secondary_resolve(sw); 3508 if (ret) 3509 return ret; 3510 3511 up = tb_upstream_port(sw); 3512 down = tb_port_at(tb_route(sw), parent); 3513 3514 up_clx_support = tb_port_clx_supported(up, clx); 3515 down_clx_support = tb_port_clx_supported(down, clx); 3516 3517 tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx), 3518 up_clx_support ? "" : "not "); 3519 tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx), 3520 down_clx_support ? "" : "not "); 3521 3522 if (!up_clx_support || !down_clx_support) 3523 return -EOPNOTSUPP; 3524 3525 ret = tb_port_clx_enable(up, clx); 3526 if (ret) 3527 return ret; 3528 3529 ret = tb_port_clx_enable(down, clx); 3530 if (ret) { 3531 tb_port_clx_disable(up, clx); 3532 return ret; 3533 } 3534 3535 ret = tb_switch_mask_clx_objections(sw); 3536 if (ret) { 3537 tb_port_clx_disable(up, clx); 3538 tb_port_clx_disable(down, clx); 3539 return ret; 3540 } 3541 3542 sw->clx = clx; 3543 3544 tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx)); 3545 return 0; 3546 } 3547 3548 /** 3549 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router 3550 * @sw: Router to enable CLx for 3551 * @clx: The CLx state to enable 3552 * 3553 * Enable CLx state only for first hop router. That is the most common 3554 * use-case, that is intended for better thermal management, and so helps 3555 * to improve performance. CLx is enabled only if both sides of the link 3556 * support CLx, and if both sides of the link are not configured as two 3557 * single lane links and only if the link is not inter-domain link. The 3558 * complete set of conditions is described in CM Guide 1.0 section 8.1. 3559 * 3560 * Return: Returns 0 on success or an error code on failure. 3561 */ 3562 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) 3563 { 3564 struct tb_switch *root_sw = sw->tb->root_switch; 3565 3566 if (!clx_enabled) 3567 return 0; 3568 3569 /* 3570 * CLx is not enabled and validated on Intel USB4 platforms before 3571 * Alder Lake. 3572 */ 3573 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw)) 3574 return 0; 3575 3576 switch (clx) { 3577 case TB_CL1: 3578 /* CL0s and CL1 are enabled and supported together */ 3579 return __tb_switch_enable_clx(sw, clx); 3580 3581 default: 3582 return -EOPNOTSUPP; 3583 } 3584 } 3585 3586 static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) 3587 { 3588 struct tb_switch *parent = tb_switch_parent(sw); 3589 struct tb_port *up, *down; 3590 int ret; 3591 3592 if (!tb_switch_is_clx_supported(sw)) 3593 return 0; 3594 3595 /* 3596 * Disable CLx for host router's downstream port as part of the 3597 * downstream router enabling procedure. 3598 */ 3599 if (!tb_route(sw)) 3600 return 0; 3601 3602 /* Disable CLx only for first hop router (depth = 1) */ 3603 if (tb_route(parent)) 3604 return 0; 3605 3606 up = tb_upstream_port(sw); 3607 down = tb_port_at(tb_route(sw), parent); 3608 ret = tb_port_clx_disable(up, clx); 3609 if (ret) 3610 return ret; 3611 3612 ret = tb_port_clx_disable(down, clx); 3613 if (ret) 3614 return ret; 3615 3616 sw->clx = TB_CLX_DISABLE; 3617 3618 tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx)); 3619 return 0; 3620 } 3621 3622 /** 3623 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router 3624 * @sw: Router to disable CLx for 3625 * @clx: The CLx state to disable 3626 * 3627 * Return: Returns 0 on success or an error code on failure. 3628 */ 3629 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) 3630 { 3631 if (!clx_enabled) 3632 return 0; 3633 3634 switch (clx) { 3635 case TB_CL1: 3636 /* CL0s and CL1 are enabled and supported together */ 3637 return __tb_switch_disable_clx(sw, clx); 3638 3639 default: 3640 return -EOPNOTSUPP; 3641 } 3642 } 3643 3644 /** 3645 * tb_switch_mask_clx_objections() - Mask CLx objections for a router 3646 * @sw: Router to mask objections for 3647 * 3648 * Mask the objections coming from the second depth routers in order to 3649 * stop these objections from interfering with the CLx states of the first 3650 * depth link. 3651 */ 3652 int tb_switch_mask_clx_objections(struct tb_switch *sw) 3653 { 3654 int up_port = sw->config.upstream_port_number; 3655 u32 offset, val[2], mask_obj, unmask_obj; 3656 int ret, i; 3657 3658 /* Only Titan Ridge of pre-USB4 devices support CLx states */ 3659 if (!tb_switch_is_titan_ridge(sw)) 3660 return 0; 3661 3662 if (!tb_route(sw)) 3663 return 0; 3664 3665 /* 3666 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: 3667 * Port A consists of lane adapters 1,2 and 3668 * Port B consists of lane adapters 3,4 3669 * If upstream port is A, (lanes are 1,2), we mask objections from 3670 * port B (lanes 3,4) and unmask objections from Port A and vice-versa. 3671 */ 3672 if (up_port == 1) { 3673 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3674 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3675 offset = TB_LOW_PWR_C1_CL1; 3676 } else { 3677 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3678 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3679 offset = TB_LOW_PWR_C3_CL1; 3680 } 3681 3682 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 3683 sw->cap_lp + offset, ARRAY_SIZE(val)); 3684 if (ret) 3685 return ret; 3686 3687 for (i = 0; i < ARRAY_SIZE(val); i++) { 3688 val[i] |= mask_obj; 3689 val[i] &= ~unmask_obj; 3690 } 3691 3692 return tb_sw_write(sw, &val, TB_CFG_SWITCH, 3693 sw->cap_lp + offset, ARRAY_SIZE(val)); 3694 } 3695 3696 /* 3697 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 3698 * device. For now used only for Titan Ridge. 3699 */ 3700 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, 3701 unsigned int pcie_offset, u32 value) 3702 { 3703 u32 offset, command, val; 3704 int ret; 3705 3706 if (sw->generation != 3) 3707 return -EOPNOTSUPP; 3708 3709 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; 3710 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); 3711 if (ret) 3712 return ret; 3713 3714 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; 3715 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); 3716 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; 3717 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 3718 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; 3719 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; 3720 3721 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; 3722 3723 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); 3724 if (ret) 3725 return ret; 3726 3727 ret = tb_switch_wait_for_bit(sw, offset, 3728 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); 3729 if (ret) 3730 return ret; 3731 3732 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 3733 if (ret) 3734 return ret; 3735 3736 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) 3737 return -ETIMEDOUT; 3738 3739 return 0; 3740 } 3741 3742 /** 3743 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state 3744 * @sw: Router to enable PCIe L1 3745 * 3746 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable 3747 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel 3748 * was configured. Due to Intel platforms limitation, shall be called only 3749 * for first hop switch. 3750 */ 3751 int tb_switch_pcie_l1_enable(struct tb_switch *sw) 3752 { 3753 struct tb_switch *parent = tb_switch_parent(sw); 3754 int ret; 3755 3756 if (!tb_route(sw)) 3757 return 0; 3758 3759 if (!tb_switch_is_titan_ridge(sw)) 3760 return 0; 3761 3762 /* Enable PCIe L1 enable only for first hop router (depth = 1) */ 3763 if (tb_route(parent)) 3764 return 0; 3765 3766 /* Write to downstream PCIe bridge #5 aka Dn4 */ 3767 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); 3768 if (ret) 3769 return ret; 3770 3771 /* Write to Upstream PCIe bridge #0 aka Up0 */ 3772 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); 3773 } 3774 3775 /** 3776 * tb_switch_xhci_connect() - Connect internal xHCI 3777 * @sw: Router whose xHCI to connect 3778 * 3779 * Can be called to any router. For Alpine Ridge and Titan Ridge 3780 * performs special flows that bring the xHCI functional for any device 3781 * connected to the type-C port. Call only after PCIe tunnel has been 3782 * established. The function only does the connect if not done already 3783 * so can be called several times for the same router. 3784 */ 3785 int tb_switch_xhci_connect(struct tb_switch *sw) 3786 { 3787 struct tb_port *port1, *port3; 3788 int ret; 3789 3790 if (sw->generation != 3) 3791 return 0; 3792 3793 port1 = &sw->ports[1]; 3794 port3 = &sw->ports[3]; 3795 3796 if (tb_switch_is_alpine_ridge(sw)) { 3797 bool usb_port1, usb_port3, xhci_port1, xhci_port3; 3798 3799 usb_port1 = tb_lc_is_usb_plugged(port1); 3800 usb_port3 = tb_lc_is_usb_plugged(port3); 3801 xhci_port1 = tb_lc_is_xhci_connected(port1); 3802 xhci_port3 = tb_lc_is_xhci_connected(port3); 3803 3804 /* Figure out correct USB port to connect */ 3805 if (usb_port1 && !xhci_port1) { 3806 ret = tb_lc_xhci_connect(port1); 3807 if (ret) 3808 return ret; 3809 } 3810 if (usb_port3 && !xhci_port3) 3811 return tb_lc_xhci_connect(port3); 3812 } else if (tb_switch_is_titan_ridge(sw)) { 3813 ret = tb_lc_xhci_connect(port1); 3814 if (ret) 3815 return ret; 3816 return tb_lc_xhci_connect(port3); 3817 } 3818 3819 return 0; 3820 } 3821 3822 /** 3823 * tb_switch_xhci_disconnect() - Disconnect internal xHCI 3824 * @sw: Router whose xHCI to disconnect 3825 * 3826 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both 3827 * ports. 3828 */ 3829 void tb_switch_xhci_disconnect(struct tb_switch *sw) 3830 { 3831 if (sw->generation == 3) { 3832 struct tb_port *port1 = &sw->ports[1]; 3833 struct tb_port *port3 = &sw->ports[3]; 3834 3835 tb_lc_xhci_disconnect(port1); 3836 tb_port_dbg(port1, "disconnected xHCI\n"); 3837 tb_lc_xhci_disconnect(port3); 3838 tb_port_dbg(port3, "disconnected xHCI\n"); 3839 } 3840 } 3841