1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 #include <linux/vmalloc.h> 17 18 #include "tb.h" 19 20 /* Switch NVM support */ 21 22 #define NVM_DEVID 0x05 23 #define NVM_VERSION 0x08 24 #define NVM_CSS 0x10 25 #define NVM_FLASH_SIZE 0x45 26 27 #define NVM_MIN_SIZE SZ_32K 28 #define NVM_MAX_SIZE SZ_512K 29 30 static DEFINE_IDA(nvm_ida); 31 32 struct nvm_auth_status { 33 struct list_head list; 34 uuid_t uuid; 35 u32 status; 36 }; 37 38 /* 39 * Hold NVM authentication failure status per switch This information 40 * needs to stay around even when the switch gets power cycled so we 41 * keep it separately. 42 */ 43 static LIST_HEAD(nvm_auth_status_cache); 44 static DEFINE_MUTEX(nvm_auth_status_lock); 45 46 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 47 { 48 struct nvm_auth_status *st; 49 50 list_for_each_entry(st, &nvm_auth_status_cache, list) { 51 if (uuid_equal(&st->uuid, sw->uuid)) 52 return st; 53 } 54 55 return NULL; 56 } 57 58 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 59 { 60 struct nvm_auth_status *st; 61 62 mutex_lock(&nvm_auth_status_lock); 63 st = __nvm_get_auth_status(sw); 64 mutex_unlock(&nvm_auth_status_lock); 65 66 *status = st ? st->status : 0; 67 } 68 69 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 70 { 71 struct nvm_auth_status *st; 72 73 if (WARN_ON(!sw->uuid)) 74 return; 75 76 mutex_lock(&nvm_auth_status_lock); 77 st = __nvm_get_auth_status(sw); 78 79 if (!st) { 80 st = kzalloc(sizeof(*st), GFP_KERNEL); 81 if (!st) 82 goto unlock; 83 84 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 85 INIT_LIST_HEAD(&st->list); 86 list_add_tail(&st->list, &nvm_auth_status_cache); 87 } 88 89 st->status = status; 90 unlock: 91 mutex_unlock(&nvm_auth_status_lock); 92 } 93 94 static void nvm_clear_auth_status(const struct tb_switch *sw) 95 { 96 struct nvm_auth_status *st; 97 98 mutex_lock(&nvm_auth_status_lock); 99 st = __nvm_get_auth_status(sw); 100 if (st) { 101 list_del(&st->list); 102 kfree(st); 103 } 104 mutex_unlock(&nvm_auth_status_lock); 105 } 106 107 static int nvm_validate_and_write(struct tb_switch *sw) 108 { 109 unsigned int image_size, hdr_size; 110 const u8 *buf = sw->nvm->buf; 111 u16 ds_size; 112 int ret; 113 114 if (!buf) 115 return -EINVAL; 116 117 image_size = sw->nvm->buf_data_size; 118 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 119 return -EINVAL; 120 121 /* 122 * FARB pointer must point inside the image and must at least 123 * contain parts of the digital section we will be reading here. 124 */ 125 hdr_size = (*(u32 *)buf) & 0xffffff; 126 if (hdr_size + NVM_DEVID + 2 >= image_size) 127 return -EINVAL; 128 129 /* Digital section start should be aligned to 4k page */ 130 if (!IS_ALIGNED(hdr_size, SZ_4K)) 131 return -EINVAL; 132 133 /* 134 * Read digital section size and check that it also fits inside 135 * the image. 136 */ 137 ds_size = *(u16 *)(buf + hdr_size); 138 if (ds_size >= image_size) 139 return -EINVAL; 140 141 if (!sw->safe_mode) { 142 u16 device_id; 143 144 /* 145 * Make sure the device ID in the image matches the one 146 * we read from the switch config space. 147 */ 148 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 149 if (device_id != sw->config.device_id) 150 return -EINVAL; 151 152 if (sw->generation < 3) { 153 /* Write CSS headers first */ 154 ret = dma_port_flash_write(sw->dma_port, 155 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 156 DMA_PORT_CSS_MAX_SIZE); 157 if (ret) 158 return ret; 159 } 160 161 /* Skip headers in the image */ 162 buf += hdr_size; 163 image_size -= hdr_size; 164 } 165 166 if (tb_switch_is_usb4(sw)) 167 return usb4_switch_nvm_write(sw, 0, buf, image_size); 168 return dma_port_flash_write(sw->dma_port, 0, buf, image_size); 169 } 170 171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 172 { 173 int ret = 0; 174 175 /* 176 * Root switch NVM upgrade requires that we disconnect the 177 * existing paths first (in case it is not in safe mode 178 * already). 179 */ 180 if (!sw->safe_mode) { 181 u32 status; 182 183 ret = tb_domain_disconnect_all_paths(sw->tb); 184 if (ret) 185 return ret; 186 /* 187 * The host controller goes away pretty soon after this if 188 * everything goes well so getting timeout is expected. 189 */ 190 ret = dma_port_flash_update_auth(sw->dma_port); 191 if (!ret || ret == -ETIMEDOUT) 192 return 0; 193 194 /* 195 * Any error from update auth operation requires power 196 * cycling of the host router. 197 */ 198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 200 nvm_set_auth_status(sw, status); 201 } 202 203 /* 204 * From safe mode we can get out by just power cycling the 205 * switch. 206 */ 207 dma_port_power_cycle(sw->dma_port); 208 return ret; 209 } 210 211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 212 { 213 int ret, retries = 10; 214 215 ret = dma_port_flash_update_auth(sw->dma_port); 216 switch (ret) { 217 case 0: 218 case -ETIMEDOUT: 219 case -EACCES: 220 case -EINVAL: 221 /* Power cycle is required */ 222 break; 223 default: 224 return ret; 225 } 226 227 /* 228 * Poll here for the authentication status. It takes some time 229 * for the device to respond (we get timeout for a while). Once 230 * we get response the device needs to be power cycled in order 231 * to the new NVM to be taken into use. 232 */ 233 do { 234 u32 status; 235 236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 237 if (ret < 0 && ret != -ETIMEDOUT) 238 return ret; 239 if (ret > 0) { 240 if (status) { 241 tb_sw_warn(sw, "failed to authenticate NVM\n"); 242 nvm_set_auth_status(sw, status); 243 } 244 245 tb_sw_info(sw, "power cycling the switch now\n"); 246 dma_port_power_cycle(sw->dma_port); 247 return 0; 248 } 249 250 msleep(500); 251 } while (--retries); 252 253 return -ETIMEDOUT; 254 } 255 256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 257 { 258 struct pci_dev *root_port; 259 260 /* 261 * During host router NVM upgrade we should not allow root port to 262 * go into D3cold because some root ports cannot trigger PME 263 * itself. To be on the safe side keep the root port in D0 during 264 * the whole upgrade process. 265 */ 266 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); 267 if (root_port) 268 pm_runtime_get_noresume(&root_port->dev); 269 } 270 271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 272 { 273 struct pci_dev *root_port; 274 275 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); 276 if (root_port) 277 pm_runtime_put(&root_port->dev); 278 } 279 280 static inline bool nvm_readable(struct tb_switch *sw) 281 { 282 if (tb_switch_is_usb4(sw)) { 283 /* 284 * USB4 devices must support NVM operations but it is 285 * optional for hosts. Therefore we query the NVM sector 286 * size here and if it is supported assume NVM 287 * operations are implemented. 288 */ 289 return usb4_switch_nvm_sector_size(sw) > 0; 290 } 291 292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 293 return !!sw->dma_port; 294 } 295 296 static inline bool nvm_upgradeable(struct tb_switch *sw) 297 { 298 if (sw->no_nvm_upgrade) 299 return false; 300 return nvm_readable(sw); 301 } 302 303 static inline int nvm_read(struct tb_switch *sw, unsigned int address, 304 void *buf, size_t size) 305 { 306 if (tb_switch_is_usb4(sw)) 307 return usb4_switch_nvm_read(sw, address, buf, size); 308 return dma_port_flash_read(sw->dma_port, address, buf, size); 309 } 310 311 static int nvm_authenticate(struct tb_switch *sw) 312 { 313 int ret; 314 315 if (tb_switch_is_usb4(sw)) 316 return usb4_switch_nvm_authenticate(sw); 317 318 if (!tb_route(sw)) { 319 nvm_authenticate_start_dma_port(sw); 320 ret = nvm_authenticate_host_dma_port(sw); 321 } else { 322 ret = nvm_authenticate_device_dma_port(sw); 323 } 324 325 return ret; 326 } 327 328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 329 size_t bytes) 330 { 331 struct tb_switch *sw = priv; 332 int ret; 333 334 pm_runtime_get_sync(&sw->dev); 335 336 if (!mutex_trylock(&sw->tb->lock)) { 337 ret = restart_syscall(); 338 goto out; 339 } 340 341 ret = nvm_read(sw, offset, val, bytes); 342 mutex_unlock(&sw->tb->lock); 343 344 out: 345 pm_runtime_mark_last_busy(&sw->dev); 346 pm_runtime_put_autosuspend(&sw->dev); 347 348 return ret; 349 } 350 351 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 352 size_t bytes) 353 { 354 struct tb_switch *sw = priv; 355 int ret = 0; 356 357 if (!mutex_trylock(&sw->tb->lock)) 358 return restart_syscall(); 359 360 /* 361 * Since writing the NVM image might require some special steps, 362 * for example when CSS headers are written, we cache the image 363 * locally here and handle the special cases when the user asks 364 * us to authenticate the image. 365 */ 366 if (!sw->nvm->buf) { 367 sw->nvm->buf = vmalloc(NVM_MAX_SIZE); 368 if (!sw->nvm->buf) { 369 ret = -ENOMEM; 370 goto unlock; 371 } 372 } 373 374 sw->nvm->buf_data_size = offset + bytes; 375 memcpy(sw->nvm->buf + offset, val, bytes); 376 377 unlock: 378 mutex_unlock(&sw->tb->lock); 379 380 return ret; 381 } 382 383 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, 384 size_t size, bool active) 385 { 386 struct nvmem_config config; 387 388 memset(&config, 0, sizeof(config)); 389 390 if (active) { 391 config.name = "nvm_active"; 392 config.reg_read = tb_switch_nvm_read; 393 config.read_only = true; 394 } else { 395 config.name = "nvm_non_active"; 396 config.reg_write = tb_switch_nvm_write; 397 config.root_only = true; 398 } 399 400 config.id = id; 401 config.stride = 4; 402 config.word_size = 4; 403 config.size = size; 404 config.dev = &sw->dev; 405 config.owner = THIS_MODULE; 406 config.priv = sw; 407 408 return nvmem_register(&config); 409 } 410 411 static int tb_switch_nvm_add(struct tb_switch *sw) 412 { 413 struct nvmem_device *nvm_dev; 414 struct tb_switch_nvm *nvm; 415 u32 val; 416 int ret; 417 418 if (!nvm_readable(sw)) 419 return 0; 420 421 /* 422 * The NVM format of non-Intel hardware is not known so 423 * currently restrict NVM upgrade for Intel hardware. We may 424 * relax this in the future when we learn other NVM formats. 425 */ 426 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) { 427 dev_info(&sw->dev, 428 "NVM format of vendor %#x is not known, disabling NVM upgrade\n", 429 sw->config.vendor_id); 430 return 0; 431 } 432 433 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 434 if (!nvm) 435 return -ENOMEM; 436 437 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); 438 439 /* 440 * If the switch is in safe-mode the only accessible portion of 441 * the NVM is the non-active one where userspace is expected to 442 * write new functional NVM. 443 */ 444 if (!sw->safe_mode) { 445 u32 nvm_size, hdr_size; 446 447 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); 448 if (ret) 449 goto err_ida; 450 451 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 452 nvm_size = (SZ_1M << (val & 7)) / 8; 453 nvm_size = (nvm_size - hdr_size) / 2; 454 455 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); 456 if (ret) 457 goto err_ida; 458 459 nvm->major = val >> 16; 460 nvm->minor = val >> 8; 461 462 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); 463 if (IS_ERR(nvm_dev)) { 464 ret = PTR_ERR(nvm_dev); 465 goto err_ida; 466 } 467 nvm->active = nvm_dev; 468 } 469 470 if (!sw->no_nvm_upgrade) { 471 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); 472 if (IS_ERR(nvm_dev)) { 473 ret = PTR_ERR(nvm_dev); 474 goto err_nvm_active; 475 } 476 nvm->non_active = nvm_dev; 477 } 478 479 sw->nvm = nvm; 480 return 0; 481 482 err_nvm_active: 483 if (nvm->active) 484 nvmem_unregister(nvm->active); 485 err_ida: 486 ida_simple_remove(&nvm_ida, nvm->id); 487 kfree(nvm); 488 489 return ret; 490 } 491 492 static void tb_switch_nvm_remove(struct tb_switch *sw) 493 { 494 struct tb_switch_nvm *nvm; 495 496 nvm = sw->nvm; 497 sw->nvm = NULL; 498 499 if (!nvm) 500 return; 501 502 /* Remove authentication status in case the switch is unplugged */ 503 if (!nvm->authenticating) 504 nvm_clear_auth_status(sw); 505 506 if (nvm->non_active) 507 nvmem_unregister(nvm->non_active); 508 if (nvm->active) 509 nvmem_unregister(nvm->active); 510 ida_simple_remove(&nvm_ida, nvm->id); 511 vfree(nvm->buf); 512 kfree(nvm); 513 } 514 515 /* port utility functions */ 516 517 static const char *tb_port_type(struct tb_regs_port_header *port) 518 { 519 switch (port->type >> 16) { 520 case 0: 521 switch ((u8) port->type) { 522 case 0: 523 return "Inactive"; 524 case 1: 525 return "Port"; 526 case 2: 527 return "NHI"; 528 default: 529 return "unknown"; 530 } 531 case 0x2: 532 return "Ethernet"; 533 case 0x8: 534 return "SATA"; 535 case 0xe: 536 return "DP/HDMI"; 537 case 0x10: 538 return "PCIe"; 539 case 0x20: 540 return "USB"; 541 default: 542 return "unknown"; 543 } 544 } 545 546 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 547 { 548 tb_dbg(tb, 549 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 550 port->port_number, port->vendor_id, port->device_id, 551 port->revision, port->thunderbolt_version, tb_port_type(port), 552 port->type); 553 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 554 port->max_in_hop_id, port->max_out_hop_id); 555 tb_dbg(tb, " Max counters: %d\n", port->max_counters); 556 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); 557 } 558 559 /** 560 * tb_port_state() - get connectedness state of a port 561 * 562 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 563 * 564 * Return: Returns an enum tb_port_state on success or an error code on failure. 565 */ 566 static int tb_port_state(struct tb_port *port) 567 { 568 struct tb_cap_phy phy; 569 int res; 570 if (port->cap_phy == 0) { 571 tb_port_WARN(port, "does not have a PHY\n"); 572 return -EINVAL; 573 } 574 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 575 if (res) 576 return res; 577 return phy.state; 578 } 579 580 /** 581 * tb_wait_for_port() - wait for a port to become ready 582 * 583 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 584 * wait_if_unplugged is set then we also wait if the port is in state 585 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 586 * switch resume). Otherwise we only wait if a device is registered but the link 587 * has not yet been established. 588 * 589 * Return: Returns an error code on failure. Returns 0 if the port is not 590 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 591 * if the port is connected and in state TB_PORT_UP. 592 */ 593 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 594 { 595 int retries = 10; 596 int state; 597 if (!port->cap_phy) { 598 tb_port_WARN(port, "does not have PHY\n"); 599 return -EINVAL; 600 } 601 if (tb_is_upstream_port(port)) { 602 tb_port_WARN(port, "is the upstream port\n"); 603 return -EINVAL; 604 } 605 606 while (retries--) { 607 state = tb_port_state(port); 608 if (state < 0) 609 return state; 610 if (state == TB_PORT_DISABLED) { 611 tb_port_dbg(port, "is disabled (state: 0)\n"); 612 return 0; 613 } 614 if (state == TB_PORT_UNPLUGGED) { 615 if (wait_if_unplugged) { 616 /* used during resume */ 617 tb_port_dbg(port, 618 "is unplugged (state: 7), retrying...\n"); 619 msleep(100); 620 continue; 621 } 622 tb_port_dbg(port, "is unplugged (state: 7)\n"); 623 return 0; 624 } 625 if (state == TB_PORT_UP) { 626 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 627 return 1; 628 } 629 630 /* 631 * After plug-in the state is TB_PORT_CONNECTING. Give it some 632 * time. 633 */ 634 tb_port_dbg(port, 635 "is connected, link is not up (state: %d), retrying...\n", 636 state); 637 msleep(100); 638 } 639 tb_port_warn(port, 640 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 641 return 0; 642 } 643 644 /** 645 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 646 * 647 * Change the number of NFC credits allocated to @port by @credits. To remove 648 * NFC credits pass a negative amount of credits. 649 * 650 * Return: Returns 0 on success or an error code on failure. 651 */ 652 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 653 { 654 u32 nfc_credits; 655 656 if (credits == 0 || port->sw->is_unplugged) 657 return 0; 658 659 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 660 nfc_credits += credits; 661 662 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 663 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 664 665 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 666 port->config.nfc_credits |= nfc_credits; 667 668 return tb_port_write(port, &port->config.nfc_credits, 669 TB_CFG_PORT, ADP_CS_4, 1); 670 } 671 672 /** 673 * tb_port_set_initial_credits() - Set initial port link credits allocated 674 * @port: Port to set the initial credits 675 * @credits: Number of credits to to allocate 676 * 677 * Set initial credits value to be used for ingress shared buffering. 678 */ 679 int tb_port_set_initial_credits(struct tb_port *port, u32 credits) 680 { 681 u32 data; 682 int ret; 683 684 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 685 if (ret) 686 return ret; 687 688 data &= ~ADP_CS_5_LCA_MASK; 689 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; 690 691 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 692 } 693 694 /** 695 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 696 * 697 * Return: Returns 0 on success or an error code on failure. 698 */ 699 int tb_port_clear_counter(struct tb_port *port, int counter) 700 { 701 u32 zero[3] = { 0, 0, 0 }; 702 tb_port_dbg(port, "clearing counter %d\n", counter); 703 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 704 } 705 706 /** 707 * tb_port_unlock() - Unlock downstream port 708 * @port: Port to unlock 709 * 710 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 711 * downstream router accessible for CM. 712 */ 713 int tb_port_unlock(struct tb_port *port) 714 { 715 if (tb_switch_is_icm(port->sw)) 716 return 0; 717 if (!tb_port_is_null(port)) 718 return -EINVAL; 719 if (tb_switch_is_usb4(port->sw)) 720 return usb4_port_unlock(port); 721 return 0; 722 } 723 724 /** 725 * tb_init_port() - initialize a port 726 * 727 * This is a helper method for tb_switch_alloc. Does not check or initialize 728 * any downstream switches. 729 * 730 * Return: Returns 0 on success or an error code on failure. 731 */ 732 static int tb_init_port(struct tb_port *port) 733 { 734 int res; 735 int cap; 736 737 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 738 if (res) { 739 if (res == -ENODEV) { 740 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 741 port->port); 742 return 0; 743 } 744 return res; 745 } 746 747 /* Port 0 is the switch itself and has no PHY. */ 748 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 749 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 750 751 if (cap > 0) 752 port->cap_phy = cap; 753 else 754 tb_port_WARN(port, "non switch port without a PHY\n"); 755 756 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 757 if (cap > 0) 758 port->cap_usb4 = cap; 759 } else if (port->port != 0) { 760 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 761 if (cap > 0) 762 port->cap_adap = cap; 763 } 764 765 tb_dump_port(port->sw->tb, &port->config); 766 767 /* Control port does not need HopID allocation */ 768 if (port->port) { 769 ida_init(&port->in_hopids); 770 ida_init(&port->out_hopids); 771 } 772 773 INIT_LIST_HEAD(&port->list); 774 return 0; 775 776 } 777 778 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 779 int max_hopid) 780 { 781 int port_max_hopid; 782 struct ida *ida; 783 784 if (in) { 785 port_max_hopid = port->config.max_in_hop_id; 786 ida = &port->in_hopids; 787 } else { 788 port_max_hopid = port->config.max_out_hop_id; 789 ida = &port->out_hopids; 790 } 791 792 /* HopIDs 0-7 are reserved */ 793 if (min_hopid < TB_PATH_MIN_HOPID) 794 min_hopid = TB_PATH_MIN_HOPID; 795 796 if (max_hopid < 0 || max_hopid > port_max_hopid) 797 max_hopid = port_max_hopid; 798 799 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 800 } 801 802 /** 803 * tb_port_alloc_in_hopid() - Allocate input HopID from port 804 * @port: Port to allocate HopID for 805 * @min_hopid: Minimum acceptable input HopID 806 * @max_hopid: Maximum acceptable input HopID 807 * 808 * Return: HopID between @min_hopid and @max_hopid or negative errno in 809 * case of error. 810 */ 811 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 812 { 813 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 814 } 815 816 /** 817 * tb_port_alloc_out_hopid() - Allocate output HopID from port 818 * @port: Port to allocate HopID for 819 * @min_hopid: Minimum acceptable output HopID 820 * @max_hopid: Maximum acceptable output HopID 821 * 822 * Return: HopID between @min_hopid and @max_hopid or negative errno in 823 * case of error. 824 */ 825 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 826 { 827 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 828 } 829 830 /** 831 * tb_port_release_in_hopid() - Release allocated input HopID from port 832 * @port: Port whose HopID to release 833 * @hopid: HopID to release 834 */ 835 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 836 { 837 ida_simple_remove(&port->in_hopids, hopid); 838 } 839 840 /** 841 * tb_port_release_out_hopid() - Release allocated output HopID from port 842 * @port: Port whose HopID to release 843 * @hopid: HopID to release 844 */ 845 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 846 { 847 ida_simple_remove(&port->out_hopids, hopid); 848 } 849 850 /** 851 * tb_next_port_on_path() - Return next port for given port on a path 852 * @start: Start port of the walk 853 * @end: End port of the walk 854 * @prev: Previous port (%NULL if this is the first) 855 * 856 * This function can be used to walk from one port to another if they 857 * are connected through zero or more switches. If the @prev is dual 858 * link port, the function follows that link and returns another end on 859 * that same link. 860 * 861 * If the @end port has been reached, return %NULL. 862 * 863 * Domain tb->lock must be held when this function is called. 864 */ 865 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 866 struct tb_port *prev) 867 { 868 struct tb_port *next; 869 870 if (!prev) 871 return start; 872 873 if (prev->sw == end->sw) { 874 if (prev == end) 875 return NULL; 876 return end; 877 } 878 879 if (start->sw->config.depth < end->sw->config.depth) { 880 if (prev->remote && 881 prev->remote->sw->config.depth > prev->sw->config.depth) 882 next = prev->remote; 883 else 884 next = tb_port_at(tb_route(end->sw), prev->sw); 885 } else { 886 if (tb_is_upstream_port(prev)) { 887 next = prev->remote; 888 } else { 889 next = tb_upstream_port(prev->sw); 890 /* 891 * Keep the same link if prev and next are both 892 * dual link ports. 893 */ 894 if (next->dual_link_port && 895 next->link_nr != prev->link_nr) { 896 next = next->dual_link_port; 897 } 898 } 899 } 900 901 return next; 902 } 903 904 static int tb_port_get_link_speed(struct tb_port *port) 905 { 906 u32 val, speed; 907 int ret; 908 909 if (!port->cap_phy) 910 return -EINVAL; 911 912 ret = tb_port_read(port, &val, TB_CFG_PORT, 913 port->cap_phy + LANE_ADP_CS_1, 1); 914 if (ret) 915 return ret; 916 917 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 918 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 919 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 920 } 921 922 static int tb_port_get_link_width(struct tb_port *port) 923 { 924 u32 val; 925 int ret; 926 927 if (!port->cap_phy) 928 return -EINVAL; 929 930 ret = tb_port_read(port, &val, TB_CFG_PORT, 931 port->cap_phy + LANE_ADP_CS_1, 1); 932 if (ret) 933 return ret; 934 935 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 936 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 937 } 938 939 static bool tb_port_is_width_supported(struct tb_port *port, int width) 940 { 941 u32 phy, widths; 942 int ret; 943 944 if (!port->cap_phy) 945 return false; 946 947 ret = tb_port_read(port, &phy, TB_CFG_PORT, 948 port->cap_phy + LANE_ADP_CS_0, 1); 949 if (ret) 950 return ret; 951 952 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 953 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 954 955 return !!(widths & width); 956 } 957 958 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 959 { 960 u32 val; 961 int ret; 962 963 if (!port->cap_phy) 964 return -EINVAL; 965 966 ret = tb_port_read(port, &val, TB_CFG_PORT, 967 port->cap_phy + LANE_ADP_CS_1, 1); 968 if (ret) 969 return ret; 970 971 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 972 switch (width) { 973 case 1: 974 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 975 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 976 break; 977 case 2: 978 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 979 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 980 break; 981 default: 982 return -EINVAL; 983 } 984 985 val |= LANE_ADP_CS_1_LB; 986 987 return tb_port_write(port, &val, TB_CFG_PORT, 988 port->cap_phy + LANE_ADP_CS_1, 1); 989 } 990 991 static int tb_port_lane_bonding_enable(struct tb_port *port) 992 { 993 int ret; 994 995 /* 996 * Enable lane bonding for both links if not already enabled by 997 * for example the boot firmware. 998 */ 999 ret = tb_port_get_link_width(port); 1000 if (ret == 1) { 1001 ret = tb_port_set_link_width(port, 2); 1002 if (ret) 1003 return ret; 1004 } 1005 1006 ret = tb_port_get_link_width(port->dual_link_port); 1007 if (ret == 1) { 1008 ret = tb_port_set_link_width(port->dual_link_port, 2); 1009 if (ret) { 1010 tb_port_set_link_width(port, 1); 1011 return ret; 1012 } 1013 } 1014 1015 port->bonded = true; 1016 port->dual_link_port->bonded = true; 1017 1018 return 0; 1019 } 1020 1021 static void tb_port_lane_bonding_disable(struct tb_port *port) 1022 { 1023 port->dual_link_port->bonded = false; 1024 port->bonded = false; 1025 1026 tb_port_set_link_width(port->dual_link_port, 1); 1027 tb_port_set_link_width(port, 1); 1028 } 1029 1030 /** 1031 * tb_port_is_enabled() - Is the adapter port enabled 1032 * @port: Port to check 1033 */ 1034 bool tb_port_is_enabled(struct tb_port *port) 1035 { 1036 switch (port->config.type) { 1037 case TB_TYPE_PCIE_UP: 1038 case TB_TYPE_PCIE_DOWN: 1039 return tb_pci_port_is_enabled(port); 1040 1041 case TB_TYPE_DP_HDMI_IN: 1042 case TB_TYPE_DP_HDMI_OUT: 1043 return tb_dp_port_is_enabled(port); 1044 1045 default: 1046 return false; 1047 } 1048 } 1049 1050 /** 1051 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1052 * @port: PCIe port to check 1053 */ 1054 bool tb_pci_port_is_enabled(struct tb_port *port) 1055 { 1056 u32 data; 1057 1058 if (tb_port_read(port, &data, TB_CFG_PORT, 1059 port->cap_adap + ADP_PCIE_CS_0, 1)) 1060 return false; 1061 1062 return !!(data & ADP_PCIE_CS_0_PE); 1063 } 1064 1065 /** 1066 * tb_pci_port_enable() - Enable PCIe adapter port 1067 * @port: PCIe port to enable 1068 * @enable: Enable/disable the PCIe adapter 1069 */ 1070 int tb_pci_port_enable(struct tb_port *port, bool enable) 1071 { 1072 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1073 if (!port->cap_adap) 1074 return -ENXIO; 1075 return tb_port_write(port, &word, TB_CFG_PORT, 1076 port->cap_adap + ADP_PCIE_CS_0, 1); 1077 } 1078 1079 /** 1080 * tb_dp_port_hpd_is_active() - Is HPD already active 1081 * @port: DP out port to check 1082 * 1083 * Checks if the DP OUT adapter port has HDP bit already set. 1084 */ 1085 int tb_dp_port_hpd_is_active(struct tb_port *port) 1086 { 1087 u32 data; 1088 int ret; 1089 1090 ret = tb_port_read(port, &data, TB_CFG_PORT, 1091 port->cap_adap + ADP_DP_CS_2, 1); 1092 if (ret) 1093 return ret; 1094 1095 return !!(data & ADP_DP_CS_2_HDP); 1096 } 1097 1098 /** 1099 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1100 * @port: Port to clear HPD 1101 * 1102 * If the DP IN port has HDP set, this function can be used to clear it. 1103 */ 1104 int tb_dp_port_hpd_clear(struct tb_port *port) 1105 { 1106 u32 data; 1107 int ret; 1108 1109 ret = tb_port_read(port, &data, TB_CFG_PORT, 1110 port->cap_adap + ADP_DP_CS_3, 1); 1111 if (ret) 1112 return ret; 1113 1114 data |= ADP_DP_CS_3_HDPC; 1115 return tb_port_write(port, &data, TB_CFG_PORT, 1116 port->cap_adap + ADP_DP_CS_3, 1); 1117 } 1118 1119 /** 1120 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1121 * @port: DP IN/OUT port to set hops 1122 * @video: Video Hop ID 1123 * @aux_tx: AUX TX Hop ID 1124 * @aux_rx: AUX RX Hop ID 1125 * 1126 * Programs specified Hop IDs for DP IN/OUT port. 1127 */ 1128 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1129 unsigned int aux_tx, unsigned int aux_rx) 1130 { 1131 u32 data[2]; 1132 int ret; 1133 1134 ret = tb_port_read(port, data, TB_CFG_PORT, 1135 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1136 if (ret) 1137 return ret; 1138 1139 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1140 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1141 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1142 1143 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1144 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1145 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1146 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1147 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1148 1149 return tb_port_write(port, data, TB_CFG_PORT, 1150 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1151 } 1152 1153 /** 1154 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1155 * @port: DP adapter port to check 1156 */ 1157 bool tb_dp_port_is_enabled(struct tb_port *port) 1158 { 1159 u32 data[2]; 1160 1161 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1162 ARRAY_SIZE(data))) 1163 return false; 1164 1165 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1166 } 1167 1168 /** 1169 * tb_dp_port_enable() - Enables/disables DP paths of a port 1170 * @port: DP IN/OUT port 1171 * @enable: Enable/disable DP path 1172 * 1173 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1174 * calling this function. 1175 */ 1176 int tb_dp_port_enable(struct tb_port *port, bool enable) 1177 { 1178 u32 data[2]; 1179 int ret; 1180 1181 ret = tb_port_read(port, data, TB_CFG_PORT, 1182 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1183 if (ret) 1184 return ret; 1185 1186 if (enable) 1187 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1188 else 1189 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1190 1191 return tb_port_write(port, data, TB_CFG_PORT, 1192 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1193 } 1194 1195 /* switch utility functions */ 1196 1197 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1198 { 1199 switch (sw->generation) { 1200 case 1: 1201 return "Thunderbolt 1"; 1202 case 2: 1203 return "Thunderbolt 2"; 1204 case 3: 1205 return "Thunderbolt 3"; 1206 case 4: 1207 return "USB4"; 1208 default: 1209 return "Unknown"; 1210 } 1211 } 1212 1213 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1214 { 1215 const struct tb_regs_switch_header *regs = &sw->config; 1216 1217 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1218 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1219 regs->revision, regs->thunderbolt_version); 1220 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1221 tb_dbg(tb, " Config:\n"); 1222 tb_dbg(tb, 1223 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1224 regs->upstream_port_number, regs->depth, 1225 (((u64) regs->route_hi) << 32) | regs->route_lo, 1226 regs->enabled, regs->plug_events_delay); 1227 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1228 regs->__unknown1, regs->__unknown4); 1229 } 1230 1231 /** 1232 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET 1233 * 1234 * Return: Returns 0 on success or an error code on failure. 1235 */ 1236 int tb_switch_reset(struct tb *tb, u64 route) 1237 { 1238 struct tb_cfg_result res; 1239 struct tb_regs_switch_header header = { 1240 header.route_hi = route >> 32, 1241 header.route_lo = route, 1242 header.enabled = true, 1243 }; 1244 tb_dbg(tb, "resetting switch at %llx\n", route); 1245 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, 1246 0, 2, 2, 2); 1247 if (res.err) 1248 return res.err; 1249 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); 1250 if (res.err > 0) 1251 return -EIO; 1252 return res.err; 1253 } 1254 1255 /** 1256 * tb_plug_events_active() - enable/disable plug events on a switch 1257 * 1258 * Also configures a sane plug_events_delay of 255ms. 1259 * 1260 * Return: Returns 0 on success or an error code on failure. 1261 */ 1262 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1263 { 1264 u32 data; 1265 int res; 1266 1267 if (tb_switch_is_icm(sw)) 1268 return 0; 1269 1270 sw->config.plug_events_delay = 0xff; 1271 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1272 if (res) 1273 return res; 1274 1275 /* Plug events are always enabled in USB4 */ 1276 if (tb_switch_is_usb4(sw)) 1277 return 0; 1278 1279 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1280 if (res) 1281 return res; 1282 1283 if (active) { 1284 data = data & 0xFFFFFF83; 1285 switch (sw->config.device_id) { 1286 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1287 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1288 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1289 break; 1290 default: 1291 data |= 4; 1292 } 1293 } else { 1294 data = data | 0x7c; 1295 } 1296 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1297 sw->cap_plug_events + 1, 1); 1298 } 1299 1300 static ssize_t authorized_show(struct device *dev, 1301 struct device_attribute *attr, 1302 char *buf) 1303 { 1304 struct tb_switch *sw = tb_to_switch(dev); 1305 1306 return sprintf(buf, "%u\n", sw->authorized); 1307 } 1308 1309 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1310 { 1311 int ret = -EINVAL; 1312 1313 if (!mutex_trylock(&sw->tb->lock)) 1314 return restart_syscall(); 1315 1316 if (sw->authorized) 1317 goto unlock; 1318 1319 switch (val) { 1320 /* Approve switch */ 1321 case 1: 1322 if (sw->key) 1323 ret = tb_domain_approve_switch_key(sw->tb, sw); 1324 else 1325 ret = tb_domain_approve_switch(sw->tb, sw); 1326 break; 1327 1328 /* Challenge switch */ 1329 case 2: 1330 if (sw->key) 1331 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1332 break; 1333 1334 default: 1335 break; 1336 } 1337 1338 if (!ret) { 1339 sw->authorized = val; 1340 /* Notify status change to the userspace */ 1341 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1342 } 1343 1344 unlock: 1345 mutex_unlock(&sw->tb->lock); 1346 return ret; 1347 } 1348 1349 static ssize_t authorized_store(struct device *dev, 1350 struct device_attribute *attr, 1351 const char *buf, size_t count) 1352 { 1353 struct tb_switch *sw = tb_to_switch(dev); 1354 unsigned int val; 1355 ssize_t ret; 1356 1357 ret = kstrtouint(buf, 0, &val); 1358 if (ret) 1359 return ret; 1360 if (val > 2) 1361 return -EINVAL; 1362 1363 pm_runtime_get_sync(&sw->dev); 1364 ret = tb_switch_set_authorized(sw, val); 1365 pm_runtime_mark_last_busy(&sw->dev); 1366 pm_runtime_put_autosuspend(&sw->dev); 1367 1368 return ret ? ret : count; 1369 } 1370 static DEVICE_ATTR_RW(authorized); 1371 1372 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1373 char *buf) 1374 { 1375 struct tb_switch *sw = tb_to_switch(dev); 1376 1377 return sprintf(buf, "%u\n", sw->boot); 1378 } 1379 static DEVICE_ATTR_RO(boot); 1380 1381 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1382 char *buf) 1383 { 1384 struct tb_switch *sw = tb_to_switch(dev); 1385 1386 return sprintf(buf, "%#x\n", sw->device); 1387 } 1388 static DEVICE_ATTR_RO(device); 1389 1390 static ssize_t 1391 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1392 { 1393 struct tb_switch *sw = tb_to_switch(dev); 1394 1395 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1396 } 1397 static DEVICE_ATTR_RO(device_name); 1398 1399 static ssize_t 1400 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1401 { 1402 struct tb_switch *sw = tb_to_switch(dev); 1403 1404 return sprintf(buf, "%u\n", sw->generation); 1405 } 1406 static DEVICE_ATTR_RO(generation); 1407 1408 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1409 char *buf) 1410 { 1411 struct tb_switch *sw = tb_to_switch(dev); 1412 ssize_t ret; 1413 1414 if (!mutex_trylock(&sw->tb->lock)) 1415 return restart_syscall(); 1416 1417 if (sw->key) 1418 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1419 else 1420 ret = sprintf(buf, "\n"); 1421 1422 mutex_unlock(&sw->tb->lock); 1423 return ret; 1424 } 1425 1426 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1427 const char *buf, size_t count) 1428 { 1429 struct tb_switch *sw = tb_to_switch(dev); 1430 u8 key[TB_SWITCH_KEY_SIZE]; 1431 ssize_t ret = count; 1432 bool clear = false; 1433 1434 if (!strcmp(buf, "\n")) 1435 clear = true; 1436 else if (hex2bin(key, buf, sizeof(key))) 1437 return -EINVAL; 1438 1439 if (!mutex_trylock(&sw->tb->lock)) 1440 return restart_syscall(); 1441 1442 if (sw->authorized) { 1443 ret = -EBUSY; 1444 } else { 1445 kfree(sw->key); 1446 if (clear) { 1447 sw->key = NULL; 1448 } else { 1449 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1450 if (!sw->key) 1451 ret = -ENOMEM; 1452 } 1453 } 1454 1455 mutex_unlock(&sw->tb->lock); 1456 return ret; 1457 } 1458 static DEVICE_ATTR(key, 0600, key_show, key_store); 1459 1460 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1461 char *buf) 1462 { 1463 struct tb_switch *sw = tb_to_switch(dev); 1464 1465 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1466 } 1467 1468 /* 1469 * Currently all lanes must run at the same speed but we expose here 1470 * both directions to allow possible asymmetric links in the future. 1471 */ 1472 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1473 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1474 1475 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1476 char *buf) 1477 { 1478 struct tb_switch *sw = tb_to_switch(dev); 1479 1480 return sprintf(buf, "%u\n", sw->link_width); 1481 } 1482 1483 /* 1484 * Currently link has same amount of lanes both directions (1 or 2) but 1485 * expose them separately to allow possible asymmetric links in the future. 1486 */ 1487 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1488 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1489 1490 static ssize_t nvm_authenticate_show(struct device *dev, 1491 struct device_attribute *attr, char *buf) 1492 { 1493 struct tb_switch *sw = tb_to_switch(dev); 1494 u32 status; 1495 1496 nvm_get_auth_status(sw, &status); 1497 return sprintf(buf, "%#x\n", status); 1498 } 1499 1500 static ssize_t nvm_authenticate_store(struct device *dev, 1501 struct device_attribute *attr, const char *buf, size_t count) 1502 { 1503 struct tb_switch *sw = tb_to_switch(dev); 1504 bool val; 1505 int ret; 1506 1507 pm_runtime_get_sync(&sw->dev); 1508 1509 if (!mutex_trylock(&sw->tb->lock)) { 1510 ret = restart_syscall(); 1511 goto exit_rpm; 1512 } 1513 1514 /* If NVMem devices are not yet added */ 1515 if (!sw->nvm) { 1516 ret = -EAGAIN; 1517 goto exit_unlock; 1518 } 1519 1520 ret = kstrtobool(buf, &val); 1521 if (ret) 1522 goto exit_unlock; 1523 1524 /* Always clear the authentication status */ 1525 nvm_clear_auth_status(sw); 1526 1527 if (val) { 1528 if (!sw->nvm->buf) { 1529 ret = -EINVAL; 1530 goto exit_unlock; 1531 } 1532 1533 ret = nvm_validate_and_write(sw); 1534 if (ret) 1535 goto exit_unlock; 1536 1537 sw->nvm->authenticating = true; 1538 ret = nvm_authenticate(sw); 1539 } 1540 1541 exit_unlock: 1542 mutex_unlock(&sw->tb->lock); 1543 exit_rpm: 1544 pm_runtime_mark_last_busy(&sw->dev); 1545 pm_runtime_put_autosuspend(&sw->dev); 1546 1547 if (ret) 1548 return ret; 1549 return count; 1550 } 1551 static DEVICE_ATTR_RW(nvm_authenticate); 1552 1553 static ssize_t nvm_version_show(struct device *dev, 1554 struct device_attribute *attr, char *buf) 1555 { 1556 struct tb_switch *sw = tb_to_switch(dev); 1557 int ret; 1558 1559 if (!mutex_trylock(&sw->tb->lock)) 1560 return restart_syscall(); 1561 1562 if (sw->safe_mode) 1563 ret = -ENODATA; 1564 else if (!sw->nvm) 1565 ret = -EAGAIN; 1566 else 1567 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1568 1569 mutex_unlock(&sw->tb->lock); 1570 1571 return ret; 1572 } 1573 static DEVICE_ATTR_RO(nvm_version); 1574 1575 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1576 char *buf) 1577 { 1578 struct tb_switch *sw = tb_to_switch(dev); 1579 1580 return sprintf(buf, "%#x\n", sw->vendor); 1581 } 1582 static DEVICE_ATTR_RO(vendor); 1583 1584 static ssize_t 1585 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1586 { 1587 struct tb_switch *sw = tb_to_switch(dev); 1588 1589 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1590 } 1591 static DEVICE_ATTR_RO(vendor_name); 1592 1593 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1594 char *buf) 1595 { 1596 struct tb_switch *sw = tb_to_switch(dev); 1597 1598 return sprintf(buf, "%pUb\n", sw->uuid); 1599 } 1600 static DEVICE_ATTR_RO(unique_id); 1601 1602 static struct attribute *switch_attrs[] = { 1603 &dev_attr_authorized.attr, 1604 &dev_attr_boot.attr, 1605 &dev_attr_device.attr, 1606 &dev_attr_device_name.attr, 1607 &dev_attr_generation.attr, 1608 &dev_attr_key.attr, 1609 &dev_attr_nvm_authenticate.attr, 1610 &dev_attr_nvm_version.attr, 1611 &dev_attr_rx_speed.attr, 1612 &dev_attr_rx_lanes.attr, 1613 &dev_attr_tx_speed.attr, 1614 &dev_attr_tx_lanes.attr, 1615 &dev_attr_vendor.attr, 1616 &dev_attr_vendor_name.attr, 1617 &dev_attr_unique_id.attr, 1618 NULL, 1619 }; 1620 1621 static umode_t switch_attr_is_visible(struct kobject *kobj, 1622 struct attribute *attr, int n) 1623 { 1624 struct device *dev = container_of(kobj, struct device, kobj); 1625 struct tb_switch *sw = tb_to_switch(dev); 1626 1627 if (attr == &dev_attr_device.attr) { 1628 if (!sw->device) 1629 return 0; 1630 } else if (attr == &dev_attr_device_name.attr) { 1631 if (!sw->device_name) 1632 return 0; 1633 } else if (attr == &dev_attr_vendor.attr) { 1634 if (!sw->vendor) 1635 return 0; 1636 } else if (attr == &dev_attr_vendor_name.attr) { 1637 if (!sw->vendor_name) 1638 return 0; 1639 } else if (attr == &dev_attr_key.attr) { 1640 if (tb_route(sw) && 1641 sw->tb->security_level == TB_SECURITY_SECURE && 1642 sw->security_level == TB_SECURITY_SECURE) 1643 return attr->mode; 1644 return 0; 1645 } else if (attr == &dev_attr_rx_speed.attr || 1646 attr == &dev_attr_rx_lanes.attr || 1647 attr == &dev_attr_tx_speed.attr || 1648 attr == &dev_attr_tx_lanes.attr) { 1649 if (tb_route(sw)) 1650 return attr->mode; 1651 return 0; 1652 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1653 if (nvm_upgradeable(sw)) 1654 return attr->mode; 1655 return 0; 1656 } else if (attr == &dev_attr_nvm_version.attr) { 1657 if (nvm_readable(sw)) 1658 return attr->mode; 1659 return 0; 1660 } else if (attr == &dev_attr_boot.attr) { 1661 if (tb_route(sw)) 1662 return attr->mode; 1663 return 0; 1664 } 1665 1666 return sw->safe_mode ? 0 : attr->mode; 1667 } 1668 1669 static struct attribute_group switch_group = { 1670 .is_visible = switch_attr_is_visible, 1671 .attrs = switch_attrs, 1672 }; 1673 1674 static const struct attribute_group *switch_groups[] = { 1675 &switch_group, 1676 NULL, 1677 }; 1678 1679 static void tb_switch_release(struct device *dev) 1680 { 1681 struct tb_switch *sw = tb_to_switch(dev); 1682 struct tb_port *port; 1683 1684 dma_port_free(sw->dma_port); 1685 1686 tb_switch_for_each_port(sw, port) { 1687 if (!port->disabled) { 1688 ida_destroy(&port->in_hopids); 1689 ida_destroy(&port->out_hopids); 1690 } 1691 } 1692 1693 kfree(sw->uuid); 1694 kfree(sw->device_name); 1695 kfree(sw->vendor_name); 1696 kfree(sw->ports); 1697 kfree(sw->drom); 1698 kfree(sw->key); 1699 kfree(sw); 1700 } 1701 1702 /* 1703 * Currently only need to provide the callbacks. Everything else is handled 1704 * in the connection manager. 1705 */ 1706 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1707 { 1708 struct tb_switch *sw = tb_to_switch(dev); 1709 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1710 1711 if (cm_ops->runtime_suspend_switch) 1712 return cm_ops->runtime_suspend_switch(sw); 1713 1714 return 0; 1715 } 1716 1717 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1718 { 1719 struct tb_switch *sw = tb_to_switch(dev); 1720 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1721 1722 if (cm_ops->runtime_resume_switch) 1723 return cm_ops->runtime_resume_switch(sw); 1724 return 0; 1725 } 1726 1727 static const struct dev_pm_ops tb_switch_pm_ops = { 1728 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1729 NULL) 1730 }; 1731 1732 struct device_type tb_switch_type = { 1733 .name = "thunderbolt_device", 1734 .release = tb_switch_release, 1735 .pm = &tb_switch_pm_ops, 1736 }; 1737 1738 static int tb_switch_get_generation(struct tb_switch *sw) 1739 { 1740 switch (sw->config.device_id) { 1741 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1742 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1743 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1744 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1745 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1746 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1747 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1748 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1749 return 1; 1750 1751 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1752 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1753 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1754 return 2; 1755 1756 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1757 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1758 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1759 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1760 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1761 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1762 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1763 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1764 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1765 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1766 return 3; 1767 1768 default: 1769 if (tb_switch_is_usb4(sw)) 1770 return 4; 1771 1772 /* 1773 * For unknown switches assume generation to be 1 to be 1774 * on the safe side. 1775 */ 1776 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1777 sw->config.device_id); 1778 return 1; 1779 } 1780 } 1781 1782 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 1783 { 1784 int max_depth; 1785 1786 if (tb_switch_is_usb4(sw) || 1787 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 1788 max_depth = USB4_SWITCH_MAX_DEPTH; 1789 else 1790 max_depth = TB_SWITCH_MAX_DEPTH; 1791 1792 return depth > max_depth; 1793 } 1794 1795 /** 1796 * tb_switch_alloc() - allocate a switch 1797 * @tb: Pointer to the owning domain 1798 * @parent: Parent device for this switch 1799 * @route: Route string for this switch 1800 * 1801 * Allocates and initializes a switch. Will not upload configuration to 1802 * the switch. For that you need to call tb_switch_configure() 1803 * separately. The returned switch should be released by calling 1804 * tb_switch_put(). 1805 * 1806 * Return: Pointer to the allocated switch or ERR_PTR() in case of 1807 * failure. 1808 */ 1809 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1810 u64 route) 1811 { 1812 struct tb_switch *sw; 1813 int upstream_port; 1814 int i, ret, depth; 1815 1816 /* Unlock the downstream port so we can access the switch below */ 1817 if (route) { 1818 struct tb_switch *parent_sw = tb_to_switch(parent); 1819 struct tb_port *down; 1820 1821 down = tb_port_at(route, parent_sw); 1822 tb_port_unlock(down); 1823 } 1824 1825 depth = tb_route_length(route); 1826 1827 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1828 if (upstream_port < 0) 1829 return ERR_PTR(upstream_port); 1830 1831 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1832 if (!sw) 1833 return ERR_PTR(-ENOMEM); 1834 1835 sw->tb = tb; 1836 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 1837 if (ret) 1838 goto err_free_sw_ports; 1839 1840 sw->generation = tb_switch_get_generation(sw); 1841 1842 tb_dbg(tb, "current switch config:\n"); 1843 tb_dump_switch(tb, sw); 1844 1845 /* configure switch */ 1846 sw->config.upstream_port_number = upstream_port; 1847 sw->config.depth = depth; 1848 sw->config.route_hi = upper_32_bits(route); 1849 sw->config.route_lo = lower_32_bits(route); 1850 sw->config.enabled = 0; 1851 1852 /* Make sure we do not exceed maximum topology limit */ 1853 if (tb_switch_exceeds_max_depth(sw, depth)) 1854 return ERR_PTR(-EADDRNOTAVAIL); 1855 1856 /* initialize ports */ 1857 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 1858 GFP_KERNEL); 1859 if (!sw->ports) { 1860 ret = -ENOMEM; 1861 goto err_free_sw_ports; 1862 } 1863 1864 for (i = 0; i <= sw->config.max_port_number; i++) { 1865 /* minimum setup for tb_find_cap and tb_drom_read to work */ 1866 sw->ports[i].sw = sw; 1867 sw->ports[i].port = i; 1868 } 1869 1870 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 1871 if (ret > 0) 1872 sw->cap_plug_events = ret; 1873 1874 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 1875 if (ret > 0) 1876 sw->cap_lc = ret; 1877 1878 /* Root switch is always authorized */ 1879 if (!route) 1880 sw->authorized = true; 1881 1882 device_initialize(&sw->dev); 1883 sw->dev.parent = parent; 1884 sw->dev.bus = &tb_bus_type; 1885 sw->dev.type = &tb_switch_type; 1886 sw->dev.groups = switch_groups; 1887 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1888 1889 return sw; 1890 1891 err_free_sw_ports: 1892 kfree(sw->ports); 1893 kfree(sw); 1894 1895 return ERR_PTR(ret); 1896 } 1897 1898 /** 1899 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 1900 * @tb: Pointer to the owning domain 1901 * @parent: Parent device for this switch 1902 * @route: Route string for this switch 1903 * 1904 * This creates a switch in safe mode. This means the switch pretty much 1905 * lacks all capabilities except DMA configuration port before it is 1906 * flashed with a valid NVM firmware. 1907 * 1908 * The returned switch must be released by calling tb_switch_put(). 1909 * 1910 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 1911 */ 1912 struct tb_switch * 1913 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 1914 { 1915 struct tb_switch *sw; 1916 1917 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1918 if (!sw) 1919 return ERR_PTR(-ENOMEM); 1920 1921 sw->tb = tb; 1922 sw->config.depth = tb_route_length(route); 1923 sw->config.route_hi = upper_32_bits(route); 1924 sw->config.route_lo = lower_32_bits(route); 1925 sw->safe_mode = true; 1926 1927 device_initialize(&sw->dev); 1928 sw->dev.parent = parent; 1929 sw->dev.bus = &tb_bus_type; 1930 sw->dev.type = &tb_switch_type; 1931 sw->dev.groups = switch_groups; 1932 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1933 1934 return sw; 1935 } 1936 1937 /** 1938 * tb_switch_configure() - Uploads configuration to the switch 1939 * @sw: Switch to configure 1940 * 1941 * Call this function before the switch is added to the system. It will 1942 * upload configuration to the switch and makes it available for the 1943 * connection manager to use. Can be called to the switch again after 1944 * resume from low power states to re-initialize it. 1945 * 1946 * Return: %0 in case of success and negative errno in case of failure 1947 */ 1948 int tb_switch_configure(struct tb_switch *sw) 1949 { 1950 struct tb *tb = sw->tb; 1951 u64 route; 1952 int ret; 1953 1954 route = tb_route(sw); 1955 1956 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 1957 sw->config.enabled ? "restoring " : "initializing", route, 1958 tb_route_length(route), sw->config.upstream_port_number); 1959 1960 sw->config.enabled = 1; 1961 1962 if (tb_switch_is_usb4(sw)) { 1963 /* 1964 * For USB4 devices, we need to program the CM version 1965 * accordingly so that it knows to expose all the 1966 * additional capabilities. 1967 */ 1968 sw->config.cmuv = USB4_VERSION_1_0; 1969 1970 /* Enumerate the switch */ 1971 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 1972 ROUTER_CS_1, 4); 1973 if (ret) 1974 return ret; 1975 1976 ret = usb4_switch_setup(sw); 1977 if (ret) 1978 return ret; 1979 1980 ret = usb4_switch_configure_link(sw); 1981 } else { 1982 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 1983 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 1984 sw->config.vendor_id); 1985 1986 if (!sw->cap_plug_events) { 1987 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 1988 return -ENODEV; 1989 } 1990 1991 /* Enumerate the switch */ 1992 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 1993 ROUTER_CS_1, 3); 1994 if (ret) 1995 return ret; 1996 1997 ret = tb_lc_configure_link(sw); 1998 } 1999 if (ret) 2000 return ret; 2001 2002 return tb_plug_events_active(sw, true); 2003 } 2004 2005 static int tb_switch_set_uuid(struct tb_switch *sw) 2006 { 2007 bool uid = false; 2008 u32 uuid[4]; 2009 int ret; 2010 2011 if (sw->uuid) 2012 return 0; 2013 2014 if (tb_switch_is_usb4(sw)) { 2015 ret = usb4_switch_read_uid(sw, &sw->uid); 2016 if (ret) 2017 return ret; 2018 uid = true; 2019 } else { 2020 /* 2021 * The newer controllers include fused UUID as part of 2022 * link controller specific registers 2023 */ 2024 ret = tb_lc_read_uuid(sw, uuid); 2025 if (ret) { 2026 if (ret != -EINVAL) 2027 return ret; 2028 uid = true; 2029 } 2030 } 2031 2032 if (uid) { 2033 /* 2034 * ICM generates UUID based on UID and fills the upper 2035 * two words with ones. This is not strictly following 2036 * UUID format but we want to be compatible with it so 2037 * we do the same here. 2038 */ 2039 uuid[0] = sw->uid & 0xffffffff; 2040 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2041 uuid[2] = 0xffffffff; 2042 uuid[3] = 0xffffffff; 2043 } 2044 2045 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2046 if (!sw->uuid) 2047 return -ENOMEM; 2048 return 0; 2049 } 2050 2051 static int tb_switch_add_dma_port(struct tb_switch *sw) 2052 { 2053 u32 status; 2054 int ret; 2055 2056 switch (sw->generation) { 2057 case 2: 2058 /* Only root switch can be upgraded */ 2059 if (tb_route(sw)) 2060 return 0; 2061 2062 /* fallthrough */ 2063 case 3: 2064 ret = tb_switch_set_uuid(sw); 2065 if (ret) 2066 return ret; 2067 break; 2068 2069 default: 2070 /* 2071 * DMA port is the only thing available when the switch 2072 * is in safe mode. 2073 */ 2074 if (!sw->safe_mode) 2075 return 0; 2076 break; 2077 } 2078 2079 /* Root switch DMA port requires running firmware */ 2080 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2081 return 0; 2082 2083 sw->dma_port = dma_port_alloc(sw); 2084 if (!sw->dma_port) 2085 return 0; 2086 2087 if (sw->no_nvm_upgrade) 2088 return 0; 2089 2090 /* 2091 * If there is status already set then authentication failed 2092 * when the dma_port_flash_update_auth() returned. Power cycling 2093 * is not needed (it was done already) so only thing we do here 2094 * is to unblock runtime PM of the root port. 2095 */ 2096 nvm_get_auth_status(sw, &status); 2097 if (status) { 2098 if (!tb_route(sw)) 2099 nvm_authenticate_complete_dma_port(sw); 2100 return 0; 2101 } 2102 2103 /* 2104 * Check status of the previous flash authentication. If there 2105 * is one we need to power cycle the switch in any case to make 2106 * it functional again. 2107 */ 2108 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2109 if (ret <= 0) 2110 return ret; 2111 2112 /* Now we can allow root port to suspend again */ 2113 if (!tb_route(sw)) 2114 nvm_authenticate_complete_dma_port(sw); 2115 2116 if (status) { 2117 tb_sw_info(sw, "switch flash authentication failed\n"); 2118 nvm_set_auth_status(sw, status); 2119 } 2120 2121 tb_sw_info(sw, "power cycling the switch now\n"); 2122 dma_port_power_cycle(sw->dma_port); 2123 2124 /* 2125 * We return error here which causes the switch adding failure. 2126 * It should appear back after power cycle is complete. 2127 */ 2128 return -ESHUTDOWN; 2129 } 2130 2131 static void tb_switch_default_link_ports(struct tb_switch *sw) 2132 { 2133 int i; 2134 2135 for (i = 1; i <= sw->config.max_port_number; i += 2) { 2136 struct tb_port *port = &sw->ports[i]; 2137 struct tb_port *subordinate; 2138 2139 if (!tb_port_is_null(port)) 2140 continue; 2141 2142 /* Check for the subordinate port */ 2143 if (i == sw->config.max_port_number || 2144 !tb_port_is_null(&sw->ports[i + 1])) 2145 continue; 2146 2147 /* Link them if not already done so (by DROM) */ 2148 subordinate = &sw->ports[i + 1]; 2149 if (!port->dual_link_port && !subordinate->dual_link_port) { 2150 port->link_nr = 0; 2151 port->dual_link_port = subordinate; 2152 subordinate->link_nr = 1; 2153 subordinate->dual_link_port = port; 2154 2155 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2156 port->port, subordinate->port); 2157 } 2158 } 2159 } 2160 2161 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2162 { 2163 const struct tb_port *up = tb_upstream_port(sw); 2164 2165 if (!up->dual_link_port || !up->dual_link_port->remote) 2166 return false; 2167 2168 if (tb_switch_is_usb4(sw)) 2169 return usb4_switch_lane_bonding_possible(sw); 2170 return tb_lc_lane_bonding_possible(sw); 2171 } 2172 2173 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2174 { 2175 struct tb_port *up; 2176 bool change = false; 2177 int ret; 2178 2179 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2180 return 0; 2181 2182 up = tb_upstream_port(sw); 2183 2184 ret = tb_port_get_link_speed(up); 2185 if (ret < 0) 2186 return ret; 2187 if (sw->link_speed != ret) 2188 change = true; 2189 sw->link_speed = ret; 2190 2191 ret = tb_port_get_link_width(up); 2192 if (ret < 0) 2193 return ret; 2194 if (sw->link_width != ret) 2195 change = true; 2196 sw->link_width = ret; 2197 2198 /* Notify userspace that there is possible link attribute change */ 2199 if (device_is_registered(&sw->dev) && change) 2200 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2201 2202 return 0; 2203 } 2204 2205 /** 2206 * tb_switch_lane_bonding_enable() - Enable lane bonding 2207 * @sw: Switch to enable lane bonding 2208 * 2209 * Connection manager can call this function to enable lane bonding of a 2210 * switch. If conditions are correct and both switches support the feature, 2211 * lanes are bonded. It is safe to call this to any switch. 2212 */ 2213 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2214 { 2215 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2216 struct tb_port *up, *down; 2217 u64 route = tb_route(sw); 2218 int ret; 2219 2220 if (!route) 2221 return 0; 2222 2223 if (!tb_switch_lane_bonding_possible(sw)) 2224 return 0; 2225 2226 up = tb_upstream_port(sw); 2227 down = tb_port_at(route, parent); 2228 2229 if (!tb_port_is_width_supported(up, 2) || 2230 !tb_port_is_width_supported(down, 2)) 2231 return 0; 2232 2233 ret = tb_port_lane_bonding_enable(up); 2234 if (ret) { 2235 tb_port_warn(up, "failed to enable lane bonding\n"); 2236 return ret; 2237 } 2238 2239 ret = tb_port_lane_bonding_enable(down); 2240 if (ret) { 2241 tb_port_warn(down, "failed to enable lane bonding\n"); 2242 tb_port_lane_bonding_disable(up); 2243 return ret; 2244 } 2245 2246 tb_switch_update_link_attributes(sw); 2247 2248 tb_sw_dbg(sw, "lane bonding enabled\n"); 2249 return ret; 2250 } 2251 2252 /** 2253 * tb_switch_lane_bonding_disable() - Disable lane bonding 2254 * @sw: Switch whose lane bonding to disable 2255 * 2256 * Disables lane bonding between @sw and parent. This can be called even 2257 * if lanes were not bonded originally. 2258 */ 2259 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2260 { 2261 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2262 struct tb_port *up, *down; 2263 2264 if (!tb_route(sw)) 2265 return; 2266 2267 up = tb_upstream_port(sw); 2268 if (!up->bonded) 2269 return; 2270 2271 down = tb_port_at(tb_route(sw), parent); 2272 2273 tb_port_lane_bonding_disable(up); 2274 tb_port_lane_bonding_disable(down); 2275 2276 tb_switch_update_link_attributes(sw); 2277 tb_sw_dbg(sw, "lane bonding disabled\n"); 2278 } 2279 2280 /** 2281 * tb_switch_add() - Add a switch to the domain 2282 * @sw: Switch to add 2283 * 2284 * This is the last step in adding switch to the domain. It will read 2285 * identification information from DROM and initializes ports so that 2286 * they can be used to connect other switches. The switch will be 2287 * exposed to the userspace when this function successfully returns. To 2288 * remove and release the switch, call tb_switch_remove(). 2289 * 2290 * Return: %0 in case of success and negative errno in case of failure 2291 */ 2292 int tb_switch_add(struct tb_switch *sw) 2293 { 2294 int i, ret; 2295 2296 /* 2297 * Initialize DMA control port now before we read DROM. Recent 2298 * host controllers have more complete DROM on NVM that includes 2299 * vendor and model identification strings which we then expose 2300 * to the userspace. NVM can be accessed through DMA 2301 * configuration based mailbox. 2302 */ 2303 ret = tb_switch_add_dma_port(sw); 2304 if (ret) { 2305 dev_err(&sw->dev, "failed to add DMA port\n"); 2306 return ret; 2307 } 2308 2309 if (!sw->safe_mode) { 2310 /* read drom */ 2311 ret = tb_drom_read(sw); 2312 if (ret) { 2313 dev_err(&sw->dev, "reading DROM failed\n"); 2314 return ret; 2315 } 2316 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2317 2318 ret = tb_switch_set_uuid(sw); 2319 if (ret) { 2320 dev_err(&sw->dev, "failed to set UUID\n"); 2321 return ret; 2322 } 2323 2324 for (i = 0; i <= sw->config.max_port_number; i++) { 2325 if (sw->ports[i].disabled) { 2326 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2327 continue; 2328 } 2329 ret = tb_init_port(&sw->ports[i]); 2330 if (ret) { 2331 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2332 return ret; 2333 } 2334 } 2335 2336 tb_switch_default_link_ports(sw); 2337 2338 ret = tb_switch_update_link_attributes(sw); 2339 if (ret) 2340 return ret; 2341 } 2342 2343 ret = device_add(&sw->dev); 2344 if (ret) { 2345 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2346 return ret; 2347 } 2348 2349 if (tb_route(sw)) { 2350 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2351 sw->vendor, sw->device); 2352 if (sw->vendor_name && sw->device_name) 2353 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2354 sw->device_name); 2355 } 2356 2357 ret = tb_switch_nvm_add(sw); 2358 if (ret) { 2359 dev_err(&sw->dev, "failed to add NVM devices\n"); 2360 device_del(&sw->dev); 2361 return ret; 2362 } 2363 2364 pm_runtime_set_active(&sw->dev); 2365 if (sw->rpm) { 2366 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2367 pm_runtime_use_autosuspend(&sw->dev); 2368 pm_runtime_mark_last_busy(&sw->dev); 2369 pm_runtime_enable(&sw->dev); 2370 pm_request_autosuspend(&sw->dev); 2371 } 2372 2373 return 0; 2374 } 2375 2376 /** 2377 * tb_switch_remove() - Remove and release a switch 2378 * @sw: Switch to remove 2379 * 2380 * This will remove the switch from the domain and release it after last 2381 * reference count drops to zero. If there are switches connected below 2382 * this switch, they will be removed as well. 2383 */ 2384 void tb_switch_remove(struct tb_switch *sw) 2385 { 2386 struct tb_port *port; 2387 2388 if (sw->rpm) { 2389 pm_runtime_get_sync(&sw->dev); 2390 pm_runtime_disable(&sw->dev); 2391 } 2392 2393 /* port 0 is the switch itself and never has a remote */ 2394 tb_switch_for_each_port(sw, port) { 2395 if (tb_port_has_remote(port)) { 2396 tb_switch_remove(port->remote->sw); 2397 port->remote = NULL; 2398 } else if (port->xdomain) { 2399 tb_xdomain_remove(port->xdomain); 2400 port->xdomain = NULL; 2401 } 2402 } 2403 2404 if (!sw->is_unplugged) 2405 tb_plug_events_active(sw, false); 2406 2407 if (tb_switch_is_usb4(sw)) 2408 usb4_switch_unconfigure_link(sw); 2409 else 2410 tb_lc_unconfigure_link(sw); 2411 2412 tb_switch_nvm_remove(sw); 2413 2414 if (tb_route(sw)) 2415 dev_info(&sw->dev, "device disconnected\n"); 2416 device_unregister(&sw->dev); 2417 } 2418 2419 /** 2420 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2421 */ 2422 void tb_sw_set_unplugged(struct tb_switch *sw) 2423 { 2424 struct tb_port *port; 2425 2426 if (sw == sw->tb->root_switch) { 2427 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2428 return; 2429 } 2430 if (sw->is_unplugged) { 2431 tb_sw_WARN(sw, "is_unplugged already set\n"); 2432 return; 2433 } 2434 sw->is_unplugged = true; 2435 tb_switch_for_each_port(sw, port) { 2436 if (tb_port_has_remote(port)) 2437 tb_sw_set_unplugged(port->remote->sw); 2438 else if (port->xdomain) 2439 port->xdomain->is_unplugged = true; 2440 } 2441 } 2442 2443 int tb_switch_resume(struct tb_switch *sw) 2444 { 2445 struct tb_port *port; 2446 int err; 2447 2448 tb_sw_dbg(sw, "resuming switch\n"); 2449 2450 /* 2451 * Check for UID of the connected switches except for root 2452 * switch which we assume cannot be removed. 2453 */ 2454 if (tb_route(sw)) { 2455 u64 uid; 2456 2457 /* 2458 * Check first that we can still read the switch config 2459 * space. It may be that there is now another domain 2460 * connected. 2461 */ 2462 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2463 if (err < 0) { 2464 tb_sw_info(sw, "switch not present anymore\n"); 2465 return err; 2466 } 2467 2468 if (tb_switch_is_usb4(sw)) 2469 err = usb4_switch_read_uid(sw, &uid); 2470 else 2471 err = tb_drom_read_uid_only(sw, &uid); 2472 if (err) { 2473 tb_sw_warn(sw, "uid read failed\n"); 2474 return err; 2475 } 2476 if (sw->uid != uid) { 2477 tb_sw_info(sw, 2478 "changed while suspended (uid %#llx -> %#llx)\n", 2479 sw->uid, uid); 2480 return -ENODEV; 2481 } 2482 } 2483 2484 err = tb_switch_configure(sw); 2485 if (err) 2486 return err; 2487 2488 /* check for surviving downstream switches */ 2489 tb_switch_for_each_port(sw, port) { 2490 if (!tb_port_has_remote(port) && !port->xdomain) 2491 continue; 2492 2493 if (tb_wait_for_port(port, true) <= 0) { 2494 tb_port_warn(port, 2495 "lost during suspend, disconnecting\n"); 2496 if (tb_port_has_remote(port)) 2497 tb_sw_set_unplugged(port->remote->sw); 2498 else if (port->xdomain) 2499 port->xdomain->is_unplugged = true; 2500 } else if (tb_port_has_remote(port) || port->xdomain) { 2501 /* 2502 * Always unlock the port so the downstream 2503 * switch/domain is accessible. 2504 */ 2505 if (tb_port_unlock(port)) 2506 tb_port_warn(port, "failed to unlock port\n"); 2507 if (port->remote && tb_switch_resume(port->remote->sw)) { 2508 tb_port_warn(port, 2509 "lost during suspend, disconnecting\n"); 2510 tb_sw_set_unplugged(port->remote->sw); 2511 } 2512 } 2513 } 2514 return 0; 2515 } 2516 2517 void tb_switch_suspend(struct tb_switch *sw) 2518 { 2519 struct tb_port *port; 2520 int err; 2521 2522 err = tb_plug_events_active(sw, false); 2523 if (err) 2524 return; 2525 2526 tb_switch_for_each_port(sw, port) { 2527 if (tb_port_has_remote(port)) 2528 tb_switch_suspend(port->remote->sw); 2529 } 2530 2531 if (tb_switch_is_usb4(sw)) 2532 usb4_switch_set_sleep(sw); 2533 else 2534 tb_lc_set_sleep(sw); 2535 } 2536 2537 /** 2538 * tb_switch_query_dp_resource() - Query availability of DP resource 2539 * @sw: Switch whose DP resource is queried 2540 * @in: DP IN port 2541 * 2542 * Queries availability of DP resource for DP tunneling using switch 2543 * specific means. Returns %true if resource is available. 2544 */ 2545 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 2546 { 2547 if (tb_switch_is_usb4(sw)) 2548 return usb4_switch_query_dp_resource(sw, in); 2549 return tb_lc_dp_sink_query(sw, in); 2550 } 2551 2552 /** 2553 * tb_switch_alloc_dp_resource() - Allocate available DP resource 2554 * @sw: Switch whose DP resource is allocated 2555 * @in: DP IN port 2556 * 2557 * Allocates DP resource for DP tunneling. The resource must be 2558 * available for this to succeed (see tb_switch_query_dp_resource()). 2559 * Returns %0 in success and negative errno otherwise. 2560 */ 2561 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2562 { 2563 if (tb_switch_is_usb4(sw)) 2564 return usb4_switch_alloc_dp_resource(sw, in); 2565 return tb_lc_dp_sink_alloc(sw, in); 2566 } 2567 2568 /** 2569 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 2570 * @sw: Switch whose DP resource is de-allocated 2571 * @in: DP IN port 2572 * 2573 * De-allocates DP resource that was previously allocated for DP 2574 * tunneling. 2575 */ 2576 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2577 { 2578 int ret; 2579 2580 if (tb_switch_is_usb4(sw)) 2581 ret = usb4_switch_dealloc_dp_resource(sw, in); 2582 else 2583 ret = tb_lc_dp_sink_dealloc(sw, in); 2584 2585 if (ret) 2586 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 2587 in->port); 2588 } 2589 2590 struct tb_sw_lookup { 2591 struct tb *tb; 2592 u8 link; 2593 u8 depth; 2594 const uuid_t *uuid; 2595 u64 route; 2596 }; 2597 2598 static int tb_switch_match(struct device *dev, const void *data) 2599 { 2600 struct tb_switch *sw = tb_to_switch(dev); 2601 const struct tb_sw_lookup *lookup = data; 2602 2603 if (!sw) 2604 return 0; 2605 if (sw->tb != lookup->tb) 2606 return 0; 2607 2608 if (lookup->uuid) 2609 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 2610 2611 if (lookup->route) { 2612 return sw->config.route_lo == lower_32_bits(lookup->route) && 2613 sw->config.route_hi == upper_32_bits(lookup->route); 2614 } 2615 2616 /* Root switch is matched only by depth */ 2617 if (!lookup->depth) 2618 return !sw->depth; 2619 2620 return sw->link == lookup->link && sw->depth == lookup->depth; 2621 } 2622 2623 /** 2624 * tb_switch_find_by_link_depth() - Find switch by link and depth 2625 * @tb: Domain the switch belongs 2626 * @link: Link number the switch is connected 2627 * @depth: Depth of the switch in link 2628 * 2629 * Returned switch has reference count increased so the caller needs to 2630 * call tb_switch_put() when done with the switch. 2631 */ 2632 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 2633 { 2634 struct tb_sw_lookup lookup; 2635 struct device *dev; 2636 2637 memset(&lookup, 0, sizeof(lookup)); 2638 lookup.tb = tb; 2639 lookup.link = link; 2640 lookup.depth = depth; 2641 2642 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2643 if (dev) 2644 return tb_to_switch(dev); 2645 2646 return NULL; 2647 } 2648 2649 /** 2650 * tb_switch_find_by_uuid() - Find switch by UUID 2651 * @tb: Domain the switch belongs 2652 * @uuid: UUID to look for 2653 * 2654 * Returned switch has reference count increased so the caller needs to 2655 * call tb_switch_put() when done with the switch. 2656 */ 2657 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 2658 { 2659 struct tb_sw_lookup lookup; 2660 struct device *dev; 2661 2662 memset(&lookup, 0, sizeof(lookup)); 2663 lookup.tb = tb; 2664 lookup.uuid = uuid; 2665 2666 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2667 if (dev) 2668 return tb_to_switch(dev); 2669 2670 return NULL; 2671 } 2672 2673 /** 2674 * tb_switch_find_by_route() - Find switch by route string 2675 * @tb: Domain the switch belongs 2676 * @route: Route string to look for 2677 * 2678 * Returned switch has reference count increased so the caller needs to 2679 * call tb_switch_put() when done with the switch. 2680 */ 2681 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 2682 { 2683 struct tb_sw_lookup lookup; 2684 struct device *dev; 2685 2686 if (!route) 2687 return tb_switch_get(tb->root_switch); 2688 2689 memset(&lookup, 0, sizeof(lookup)); 2690 lookup.tb = tb; 2691 lookup.route = route; 2692 2693 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2694 if (dev) 2695 return tb_to_switch(dev); 2696 2697 return NULL; 2698 } 2699 2700 /** 2701 * tb_switch_find_port() - return the first port of @type on @sw or NULL 2702 * @sw: Switch to find the port from 2703 * @type: Port type to look for 2704 */ 2705 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 2706 enum tb_port_type type) 2707 { 2708 struct tb_port *port; 2709 2710 tb_switch_for_each_port(sw, port) { 2711 if (port->config.type == type) 2712 return port; 2713 } 2714 2715 return NULL; 2716 } 2717 2718 void tb_switch_exit(void) 2719 { 2720 ida_destroy(&nvm_ida); 2721 } 2722