1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 #include <linux/vmalloc.h> 17 18 #include "tb.h" 19 20 /* Switch NVM support */ 21 22 #define NVM_DEVID 0x05 23 #define NVM_VERSION 0x08 24 #define NVM_CSS 0x10 25 #define NVM_FLASH_SIZE 0x45 26 27 #define NVM_MIN_SIZE SZ_32K 28 #define NVM_MAX_SIZE SZ_512K 29 30 static DEFINE_IDA(nvm_ida); 31 32 struct nvm_auth_status { 33 struct list_head list; 34 uuid_t uuid; 35 u32 status; 36 }; 37 38 /* 39 * Hold NVM authentication failure status per switch This information 40 * needs to stay around even when the switch gets power cycled so we 41 * keep it separately. 42 */ 43 static LIST_HEAD(nvm_auth_status_cache); 44 static DEFINE_MUTEX(nvm_auth_status_lock); 45 46 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 47 { 48 struct nvm_auth_status *st; 49 50 list_for_each_entry(st, &nvm_auth_status_cache, list) { 51 if (uuid_equal(&st->uuid, sw->uuid)) 52 return st; 53 } 54 55 return NULL; 56 } 57 58 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 59 { 60 struct nvm_auth_status *st; 61 62 mutex_lock(&nvm_auth_status_lock); 63 st = __nvm_get_auth_status(sw); 64 mutex_unlock(&nvm_auth_status_lock); 65 66 *status = st ? st->status : 0; 67 } 68 69 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 70 { 71 struct nvm_auth_status *st; 72 73 if (WARN_ON(!sw->uuid)) 74 return; 75 76 mutex_lock(&nvm_auth_status_lock); 77 st = __nvm_get_auth_status(sw); 78 79 if (!st) { 80 st = kzalloc(sizeof(*st), GFP_KERNEL); 81 if (!st) 82 goto unlock; 83 84 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 85 INIT_LIST_HEAD(&st->list); 86 list_add_tail(&st->list, &nvm_auth_status_cache); 87 } 88 89 st->status = status; 90 unlock: 91 mutex_unlock(&nvm_auth_status_lock); 92 } 93 94 static void nvm_clear_auth_status(const struct tb_switch *sw) 95 { 96 struct nvm_auth_status *st; 97 98 mutex_lock(&nvm_auth_status_lock); 99 st = __nvm_get_auth_status(sw); 100 if (st) { 101 list_del(&st->list); 102 kfree(st); 103 } 104 mutex_unlock(&nvm_auth_status_lock); 105 } 106 107 static int nvm_validate_and_write(struct tb_switch *sw) 108 { 109 unsigned int image_size, hdr_size; 110 const u8 *buf = sw->nvm->buf; 111 u16 ds_size; 112 int ret; 113 114 if (!buf) 115 return -EINVAL; 116 117 image_size = sw->nvm->buf_data_size; 118 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 119 return -EINVAL; 120 121 /* 122 * FARB pointer must point inside the image and must at least 123 * contain parts of the digital section we will be reading here. 124 */ 125 hdr_size = (*(u32 *)buf) & 0xffffff; 126 if (hdr_size + NVM_DEVID + 2 >= image_size) 127 return -EINVAL; 128 129 /* Digital section start should be aligned to 4k page */ 130 if (!IS_ALIGNED(hdr_size, SZ_4K)) 131 return -EINVAL; 132 133 /* 134 * Read digital section size and check that it also fits inside 135 * the image. 136 */ 137 ds_size = *(u16 *)(buf + hdr_size); 138 if (ds_size >= image_size) 139 return -EINVAL; 140 141 if (!sw->safe_mode) { 142 u16 device_id; 143 144 /* 145 * Make sure the device ID in the image matches the one 146 * we read from the switch config space. 147 */ 148 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 149 if (device_id != sw->config.device_id) 150 return -EINVAL; 151 152 if (sw->generation < 3) { 153 /* Write CSS headers first */ 154 ret = dma_port_flash_write(sw->dma_port, 155 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 156 DMA_PORT_CSS_MAX_SIZE); 157 if (ret) 158 return ret; 159 } 160 161 /* Skip headers in the image */ 162 buf += hdr_size; 163 image_size -= hdr_size; 164 } 165 166 if (tb_switch_is_usb4(sw)) 167 return usb4_switch_nvm_write(sw, 0, buf, image_size); 168 return dma_port_flash_write(sw->dma_port, 0, buf, image_size); 169 } 170 171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 172 { 173 int ret = 0; 174 175 /* 176 * Root switch NVM upgrade requires that we disconnect the 177 * existing paths first (in case it is not in safe mode 178 * already). 179 */ 180 if (!sw->safe_mode) { 181 u32 status; 182 183 ret = tb_domain_disconnect_all_paths(sw->tb); 184 if (ret) 185 return ret; 186 /* 187 * The host controller goes away pretty soon after this if 188 * everything goes well so getting timeout is expected. 189 */ 190 ret = dma_port_flash_update_auth(sw->dma_port); 191 if (!ret || ret == -ETIMEDOUT) 192 return 0; 193 194 /* 195 * Any error from update auth operation requires power 196 * cycling of the host router. 197 */ 198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 200 nvm_set_auth_status(sw, status); 201 } 202 203 /* 204 * From safe mode we can get out by just power cycling the 205 * switch. 206 */ 207 dma_port_power_cycle(sw->dma_port); 208 return ret; 209 } 210 211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 212 { 213 int ret, retries = 10; 214 215 ret = dma_port_flash_update_auth(sw->dma_port); 216 switch (ret) { 217 case 0: 218 case -ETIMEDOUT: 219 case -EACCES: 220 case -EINVAL: 221 /* Power cycle is required */ 222 break; 223 default: 224 return ret; 225 } 226 227 /* 228 * Poll here for the authentication status. It takes some time 229 * for the device to respond (we get timeout for a while). Once 230 * we get response the device needs to be power cycled in order 231 * to the new NVM to be taken into use. 232 */ 233 do { 234 u32 status; 235 236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 237 if (ret < 0 && ret != -ETIMEDOUT) 238 return ret; 239 if (ret > 0) { 240 if (status) { 241 tb_sw_warn(sw, "failed to authenticate NVM\n"); 242 nvm_set_auth_status(sw, status); 243 } 244 245 tb_sw_info(sw, "power cycling the switch now\n"); 246 dma_port_power_cycle(sw->dma_port); 247 return 0; 248 } 249 250 msleep(500); 251 } while (--retries); 252 253 return -ETIMEDOUT; 254 } 255 256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 257 { 258 struct pci_dev *root_port; 259 260 /* 261 * During host router NVM upgrade we should not allow root port to 262 * go into D3cold because some root ports cannot trigger PME 263 * itself. To be on the safe side keep the root port in D0 during 264 * the whole upgrade process. 265 */ 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 267 if (root_port) 268 pm_runtime_get_noresume(&root_port->dev); 269 } 270 271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 272 { 273 struct pci_dev *root_port; 274 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 276 if (root_port) 277 pm_runtime_put(&root_port->dev); 278 } 279 280 static inline bool nvm_readable(struct tb_switch *sw) 281 { 282 if (tb_switch_is_usb4(sw)) { 283 /* 284 * USB4 devices must support NVM operations but it is 285 * optional for hosts. Therefore we query the NVM sector 286 * size here and if it is supported assume NVM 287 * operations are implemented. 288 */ 289 return usb4_switch_nvm_sector_size(sw) > 0; 290 } 291 292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 293 return !!sw->dma_port; 294 } 295 296 static inline bool nvm_upgradeable(struct tb_switch *sw) 297 { 298 if (sw->no_nvm_upgrade) 299 return false; 300 return nvm_readable(sw); 301 } 302 303 static inline int nvm_read(struct tb_switch *sw, unsigned int address, 304 void *buf, size_t size) 305 { 306 if (tb_switch_is_usb4(sw)) 307 return usb4_switch_nvm_read(sw, address, buf, size); 308 return dma_port_flash_read(sw->dma_port, address, buf, size); 309 } 310 311 static int nvm_authenticate(struct tb_switch *sw) 312 { 313 int ret; 314 315 if (tb_switch_is_usb4(sw)) 316 return usb4_switch_nvm_authenticate(sw); 317 318 if (!tb_route(sw)) { 319 nvm_authenticate_start_dma_port(sw); 320 ret = nvm_authenticate_host_dma_port(sw); 321 } else { 322 ret = nvm_authenticate_device_dma_port(sw); 323 } 324 325 return ret; 326 } 327 328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 329 size_t bytes) 330 { 331 struct tb_switch *sw = priv; 332 int ret; 333 334 pm_runtime_get_sync(&sw->dev); 335 336 if (!mutex_trylock(&sw->tb->lock)) { 337 ret = restart_syscall(); 338 goto out; 339 } 340 341 ret = nvm_read(sw, offset, val, bytes); 342 mutex_unlock(&sw->tb->lock); 343 344 out: 345 pm_runtime_mark_last_busy(&sw->dev); 346 pm_runtime_put_autosuspend(&sw->dev); 347 348 return ret; 349 } 350 351 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 352 size_t bytes) 353 { 354 struct tb_switch *sw = priv; 355 int ret = 0; 356 357 if (!mutex_trylock(&sw->tb->lock)) 358 return restart_syscall(); 359 360 /* 361 * Since writing the NVM image might require some special steps, 362 * for example when CSS headers are written, we cache the image 363 * locally here and handle the special cases when the user asks 364 * us to authenticate the image. 365 */ 366 if (!sw->nvm->buf) { 367 sw->nvm->buf = vmalloc(NVM_MAX_SIZE); 368 if (!sw->nvm->buf) { 369 ret = -ENOMEM; 370 goto unlock; 371 } 372 } 373 374 sw->nvm->buf_data_size = offset + bytes; 375 memcpy(sw->nvm->buf + offset, val, bytes); 376 377 unlock: 378 mutex_unlock(&sw->tb->lock); 379 380 return ret; 381 } 382 383 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, 384 size_t size, bool active) 385 { 386 struct nvmem_config config; 387 388 memset(&config, 0, sizeof(config)); 389 390 if (active) { 391 config.name = "nvm_active"; 392 config.reg_read = tb_switch_nvm_read; 393 config.read_only = true; 394 } else { 395 config.name = "nvm_non_active"; 396 config.reg_write = tb_switch_nvm_write; 397 config.root_only = true; 398 } 399 400 config.id = id; 401 config.stride = 4; 402 config.word_size = 4; 403 config.size = size; 404 config.dev = &sw->dev; 405 config.owner = THIS_MODULE; 406 config.priv = sw; 407 408 return nvmem_register(&config); 409 } 410 411 static int tb_switch_nvm_add(struct tb_switch *sw) 412 { 413 struct nvmem_device *nvm_dev; 414 struct tb_switch_nvm *nvm; 415 u32 val; 416 int ret; 417 418 if (!nvm_readable(sw)) 419 return 0; 420 421 /* 422 * The NVM format of non-Intel hardware is not known so 423 * currently restrict NVM upgrade for Intel hardware. We may 424 * relax this in the future when we learn other NVM formats. 425 */ 426 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) { 427 dev_info(&sw->dev, 428 "NVM format of vendor %#x is not known, disabling NVM upgrade\n", 429 sw->config.vendor_id); 430 return 0; 431 } 432 433 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 434 if (!nvm) 435 return -ENOMEM; 436 437 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); 438 439 /* 440 * If the switch is in safe-mode the only accessible portion of 441 * the NVM is the non-active one where userspace is expected to 442 * write new functional NVM. 443 */ 444 if (!sw->safe_mode) { 445 u32 nvm_size, hdr_size; 446 447 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); 448 if (ret) 449 goto err_ida; 450 451 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 452 nvm_size = (SZ_1M << (val & 7)) / 8; 453 nvm_size = (nvm_size - hdr_size) / 2; 454 455 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); 456 if (ret) 457 goto err_ida; 458 459 nvm->major = val >> 16; 460 nvm->minor = val >> 8; 461 462 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); 463 if (IS_ERR(nvm_dev)) { 464 ret = PTR_ERR(nvm_dev); 465 goto err_ida; 466 } 467 nvm->active = nvm_dev; 468 } 469 470 if (!sw->no_nvm_upgrade) { 471 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); 472 if (IS_ERR(nvm_dev)) { 473 ret = PTR_ERR(nvm_dev); 474 goto err_nvm_active; 475 } 476 nvm->non_active = nvm_dev; 477 } 478 479 sw->nvm = nvm; 480 return 0; 481 482 err_nvm_active: 483 if (nvm->active) 484 nvmem_unregister(nvm->active); 485 err_ida: 486 ida_simple_remove(&nvm_ida, nvm->id); 487 kfree(nvm); 488 489 return ret; 490 } 491 492 static void tb_switch_nvm_remove(struct tb_switch *sw) 493 { 494 struct tb_switch_nvm *nvm; 495 496 nvm = sw->nvm; 497 sw->nvm = NULL; 498 499 if (!nvm) 500 return; 501 502 /* Remove authentication status in case the switch is unplugged */ 503 if (!nvm->authenticating) 504 nvm_clear_auth_status(sw); 505 506 if (nvm->non_active) 507 nvmem_unregister(nvm->non_active); 508 if (nvm->active) 509 nvmem_unregister(nvm->active); 510 ida_simple_remove(&nvm_ida, nvm->id); 511 vfree(nvm->buf); 512 kfree(nvm); 513 } 514 515 /* port utility functions */ 516 517 static const char *tb_port_type(struct tb_regs_port_header *port) 518 { 519 switch (port->type >> 16) { 520 case 0: 521 switch ((u8) port->type) { 522 case 0: 523 return "Inactive"; 524 case 1: 525 return "Port"; 526 case 2: 527 return "NHI"; 528 default: 529 return "unknown"; 530 } 531 case 0x2: 532 return "Ethernet"; 533 case 0x8: 534 return "SATA"; 535 case 0xe: 536 return "DP/HDMI"; 537 case 0x10: 538 return "PCIe"; 539 case 0x20: 540 return "USB"; 541 default: 542 return "unknown"; 543 } 544 } 545 546 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 547 { 548 tb_dbg(tb, 549 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 550 port->port_number, port->vendor_id, port->device_id, 551 port->revision, port->thunderbolt_version, tb_port_type(port), 552 port->type); 553 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 554 port->max_in_hop_id, port->max_out_hop_id); 555 tb_dbg(tb, " Max counters: %d\n", port->max_counters); 556 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); 557 } 558 559 /** 560 * tb_port_state() - get connectedness state of a port 561 * 562 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 563 * 564 * Return: Returns an enum tb_port_state on success or an error code on failure. 565 */ 566 static int tb_port_state(struct tb_port *port) 567 { 568 struct tb_cap_phy phy; 569 int res; 570 if (port->cap_phy == 0) { 571 tb_port_WARN(port, "does not have a PHY\n"); 572 return -EINVAL; 573 } 574 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 575 if (res) 576 return res; 577 return phy.state; 578 } 579 580 /** 581 * tb_wait_for_port() - wait for a port to become ready 582 * 583 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 584 * wait_if_unplugged is set then we also wait if the port is in state 585 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 586 * switch resume). Otherwise we only wait if a device is registered but the link 587 * has not yet been established. 588 * 589 * Return: Returns an error code on failure. Returns 0 if the port is not 590 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 591 * if the port is connected and in state TB_PORT_UP. 592 */ 593 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 594 { 595 int retries = 10; 596 int state; 597 if (!port->cap_phy) { 598 tb_port_WARN(port, "does not have PHY\n"); 599 return -EINVAL; 600 } 601 if (tb_is_upstream_port(port)) { 602 tb_port_WARN(port, "is the upstream port\n"); 603 return -EINVAL; 604 } 605 606 while (retries--) { 607 state = tb_port_state(port); 608 if (state < 0) 609 return state; 610 if (state == TB_PORT_DISABLED) { 611 tb_port_dbg(port, "is disabled (state: 0)\n"); 612 return 0; 613 } 614 if (state == TB_PORT_UNPLUGGED) { 615 if (wait_if_unplugged) { 616 /* used during resume */ 617 tb_port_dbg(port, 618 "is unplugged (state: 7), retrying...\n"); 619 msleep(100); 620 continue; 621 } 622 tb_port_dbg(port, "is unplugged (state: 7)\n"); 623 return 0; 624 } 625 if (state == TB_PORT_UP) { 626 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 627 return 1; 628 } 629 630 /* 631 * After plug-in the state is TB_PORT_CONNECTING. Give it some 632 * time. 633 */ 634 tb_port_dbg(port, 635 "is connected, link is not up (state: %d), retrying...\n", 636 state); 637 msleep(100); 638 } 639 tb_port_warn(port, 640 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 641 return 0; 642 } 643 644 /** 645 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 646 * 647 * Change the number of NFC credits allocated to @port by @credits. To remove 648 * NFC credits pass a negative amount of credits. 649 * 650 * Return: Returns 0 on success or an error code on failure. 651 */ 652 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 653 { 654 u32 nfc_credits; 655 656 if (credits == 0 || port->sw->is_unplugged) 657 return 0; 658 659 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 660 nfc_credits += credits; 661 662 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 663 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 664 665 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 666 port->config.nfc_credits |= nfc_credits; 667 668 return tb_port_write(port, &port->config.nfc_credits, 669 TB_CFG_PORT, ADP_CS_4, 1); 670 } 671 672 /** 673 * tb_port_set_initial_credits() - Set initial port link credits allocated 674 * @port: Port to set the initial credits 675 * @credits: Number of credits to to allocate 676 * 677 * Set initial credits value to be used for ingress shared buffering. 678 */ 679 int tb_port_set_initial_credits(struct tb_port *port, u32 credits) 680 { 681 u32 data; 682 int ret; 683 684 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 685 if (ret) 686 return ret; 687 688 data &= ~ADP_CS_5_LCA_MASK; 689 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; 690 691 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 692 } 693 694 /** 695 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 696 * 697 * Return: Returns 0 on success or an error code on failure. 698 */ 699 int tb_port_clear_counter(struct tb_port *port, int counter) 700 { 701 u32 zero[3] = { 0, 0, 0 }; 702 tb_port_dbg(port, "clearing counter %d\n", counter); 703 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 704 } 705 706 /** 707 * tb_port_unlock() - Unlock downstream port 708 * @port: Port to unlock 709 * 710 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 711 * downstream router accessible for CM. 712 */ 713 int tb_port_unlock(struct tb_port *port) 714 { 715 if (tb_switch_is_icm(port->sw)) 716 return 0; 717 if (!tb_port_is_null(port)) 718 return -EINVAL; 719 if (tb_switch_is_usb4(port->sw)) 720 return usb4_port_unlock(port); 721 return 0; 722 } 723 724 /** 725 * tb_init_port() - initialize a port 726 * 727 * This is a helper method for tb_switch_alloc. Does not check or initialize 728 * any downstream switches. 729 * 730 * Return: Returns 0 on success or an error code on failure. 731 */ 732 static int tb_init_port(struct tb_port *port) 733 { 734 int res; 735 int cap; 736 737 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 738 if (res) { 739 if (res == -ENODEV) { 740 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 741 port->port); 742 return 0; 743 } 744 return res; 745 } 746 747 /* Port 0 is the switch itself and has no PHY. */ 748 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 749 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 750 751 if (cap > 0) 752 port->cap_phy = cap; 753 else 754 tb_port_WARN(port, "non switch port without a PHY\n"); 755 756 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 757 if (cap > 0) 758 port->cap_usb4 = cap; 759 } else if (port->port != 0) { 760 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 761 if (cap > 0) 762 port->cap_adap = cap; 763 } 764 765 tb_dump_port(port->sw->tb, &port->config); 766 767 /* Control port does not need HopID allocation */ 768 if (port->port) { 769 ida_init(&port->in_hopids); 770 ida_init(&port->out_hopids); 771 } 772 773 INIT_LIST_HEAD(&port->list); 774 return 0; 775 776 } 777 778 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 779 int max_hopid) 780 { 781 int port_max_hopid; 782 struct ida *ida; 783 784 if (in) { 785 port_max_hopid = port->config.max_in_hop_id; 786 ida = &port->in_hopids; 787 } else { 788 port_max_hopid = port->config.max_out_hop_id; 789 ida = &port->out_hopids; 790 } 791 792 /* 793 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 794 * reserved. 795 */ 796 if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID) 797 min_hopid = TB_PATH_MIN_HOPID; 798 799 if (max_hopid < 0 || max_hopid > port_max_hopid) 800 max_hopid = port_max_hopid; 801 802 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 803 } 804 805 /** 806 * tb_port_alloc_in_hopid() - Allocate input HopID from port 807 * @port: Port to allocate HopID for 808 * @min_hopid: Minimum acceptable input HopID 809 * @max_hopid: Maximum acceptable input HopID 810 * 811 * Return: HopID between @min_hopid and @max_hopid or negative errno in 812 * case of error. 813 */ 814 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 815 { 816 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 817 } 818 819 /** 820 * tb_port_alloc_out_hopid() - Allocate output HopID from port 821 * @port: Port to allocate HopID for 822 * @min_hopid: Minimum acceptable output HopID 823 * @max_hopid: Maximum acceptable output HopID 824 * 825 * Return: HopID between @min_hopid and @max_hopid or negative errno in 826 * case of error. 827 */ 828 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 829 { 830 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 831 } 832 833 /** 834 * tb_port_release_in_hopid() - Release allocated input HopID from port 835 * @port: Port whose HopID to release 836 * @hopid: HopID to release 837 */ 838 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 839 { 840 ida_simple_remove(&port->in_hopids, hopid); 841 } 842 843 /** 844 * tb_port_release_out_hopid() - Release allocated output HopID from port 845 * @port: Port whose HopID to release 846 * @hopid: HopID to release 847 */ 848 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 849 { 850 ida_simple_remove(&port->out_hopids, hopid); 851 } 852 853 /** 854 * tb_next_port_on_path() - Return next port for given port on a path 855 * @start: Start port of the walk 856 * @end: End port of the walk 857 * @prev: Previous port (%NULL if this is the first) 858 * 859 * This function can be used to walk from one port to another if they 860 * are connected through zero or more switches. If the @prev is dual 861 * link port, the function follows that link and returns another end on 862 * that same link. 863 * 864 * If the @end port has been reached, return %NULL. 865 * 866 * Domain tb->lock must be held when this function is called. 867 */ 868 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 869 struct tb_port *prev) 870 { 871 struct tb_port *next; 872 873 if (!prev) 874 return start; 875 876 if (prev->sw == end->sw) { 877 if (prev == end) 878 return NULL; 879 return end; 880 } 881 882 if (start->sw->config.depth < end->sw->config.depth) { 883 if (prev->remote && 884 prev->remote->sw->config.depth > prev->sw->config.depth) 885 next = prev->remote; 886 else 887 next = tb_port_at(tb_route(end->sw), prev->sw); 888 } else { 889 if (tb_is_upstream_port(prev)) { 890 next = prev->remote; 891 } else { 892 next = tb_upstream_port(prev->sw); 893 /* 894 * Keep the same link if prev and next are both 895 * dual link ports. 896 */ 897 if (next->dual_link_port && 898 next->link_nr != prev->link_nr) { 899 next = next->dual_link_port; 900 } 901 } 902 } 903 904 return next; 905 } 906 907 static int tb_port_get_link_speed(struct tb_port *port) 908 { 909 u32 val, speed; 910 int ret; 911 912 if (!port->cap_phy) 913 return -EINVAL; 914 915 ret = tb_port_read(port, &val, TB_CFG_PORT, 916 port->cap_phy + LANE_ADP_CS_1, 1); 917 if (ret) 918 return ret; 919 920 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 921 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 922 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 923 } 924 925 static int tb_port_get_link_width(struct tb_port *port) 926 { 927 u32 val; 928 int ret; 929 930 if (!port->cap_phy) 931 return -EINVAL; 932 933 ret = tb_port_read(port, &val, TB_CFG_PORT, 934 port->cap_phy + LANE_ADP_CS_1, 1); 935 if (ret) 936 return ret; 937 938 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 939 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 940 } 941 942 static bool tb_port_is_width_supported(struct tb_port *port, int width) 943 { 944 u32 phy, widths; 945 int ret; 946 947 if (!port->cap_phy) 948 return false; 949 950 ret = tb_port_read(port, &phy, TB_CFG_PORT, 951 port->cap_phy + LANE_ADP_CS_0, 1); 952 if (ret) 953 return false; 954 955 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 956 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 957 958 return !!(widths & width); 959 } 960 961 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 962 { 963 u32 val; 964 int ret; 965 966 if (!port->cap_phy) 967 return -EINVAL; 968 969 ret = tb_port_read(port, &val, TB_CFG_PORT, 970 port->cap_phy + LANE_ADP_CS_1, 1); 971 if (ret) 972 return ret; 973 974 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 975 switch (width) { 976 case 1: 977 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 978 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 979 break; 980 case 2: 981 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 982 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 983 break; 984 default: 985 return -EINVAL; 986 } 987 988 val |= LANE_ADP_CS_1_LB; 989 990 return tb_port_write(port, &val, TB_CFG_PORT, 991 port->cap_phy + LANE_ADP_CS_1, 1); 992 } 993 994 static int tb_port_lane_bonding_enable(struct tb_port *port) 995 { 996 int ret; 997 998 /* 999 * Enable lane bonding for both links if not already enabled by 1000 * for example the boot firmware. 1001 */ 1002 ret = tb_port_get_link_width(port); 1003 if (ret == 1) { 1004 ret = tb_port_set_link_width(port, 2); 1005 if (ret) 1006 return ret; 1007 } 1008 1009 ret = tb_port_get_link_width(port->dual_link_port); 1010 if (ret == 1) { 1011 ret = tb_port_set_link_width(port->dual_link_port, 2); 1012 if (ret) { 1013 tb_port_set_link_width(port, 1); 1014 return ret; 1015 } 1016 } 1017 1018 port->bonded = true; 1019 port->dual_link_port->bonded = true; 1020 1021 return 0; 1022 } 1023 1024 static void tb_port_lane_bonding_disable(struct tb_port *port) 1025 { 1026 port->dual_link_port->bonded = false; 1027 port->bonded = false; 1028 1029 tb_port_set_link_width(port->dual_link_port, 1); 1030 tb_port_set_link_width(port, 1); 1031 } 1032 1033 /** 1034 * tb_port_is_enabled() - Is the adapter port enabled 1035 * @port: Port to check 1036 */ 1037 bool tb_port_is_enabled(struct tb_port *port) 1038 { 1039 switch (port->config.type) { 1040 case TB_TYPE_PCIE_UP: 1041 case TB_TYPE_PCIE_DOWN: 1042 return tb_pci_port_is_enabled(port); 1043 1044 case TB_TYPE_DP_HDMI_IN: 1045 case TB_TYPE_DP_HDMI_OUT: 1046 return tb_dp_port_is_enabled(port); 1047 1048 case TB_TYPE_USB3_UP: 1049 case TB_TYPE_USB3_DOWN: 1050 return tb_usb3_port_is_enabled(port); 1051 1052 default: 1053 return false; 1054 } 1055 } 1056 1057 /** 1058 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1059 * @port: USB3 adapter port to check 1060 */ 1061 bool tb_usb3_port_is_enabled(struct tb_port *port) 1062 { 1063 u32 data; 1064 1065 if (tb_port_read(port, &data, TB_CFG_PORT, 1066 port->cap_adap + ADP_USB3_CS_0, 1)) 1067 return false; 1068 1069 return !!(data & ADP_USB3_CS_0_PE); 1070 } 1071 1072 /** 1073 * tb_usb3_port_enable() - Enable USB3 adapter port 1074 * @port: USB3 adapter port to enable 1075 * @enable: Enable/disable the USB3 adapter 1076 */ 1077 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1078 { 1079 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1080 : ADP_USB3_CS_0_V; 1081 1082 if (!port->cap_adap) 1083 return -ENXIO; 1084 return tb_port_write(port, &word, TB_CFG_PORT, 1085 port->cap_adap + ADP_USB3_CS_0, 1); 1086 } 1087 1088 /** 1089 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1090 * @port: PCIe port to check 1091 */ 1092 bool tb_pci_port_is_enabled(struct tb_port *port) 1093 { 1094 u32 data; 1095 1096 if (tb_port_read(port, &data, TB_CFG_PORT, 1097 port->cap_adap + ADP_PCIE_CS_0, 1)) 1098 return false; 1099 1100 return !!(data & ADP_PCIE_CS_0_PE); 1101 } 1102 1103 /** 1104 * tb_pci_port_enable() - Enable PCIe adapter port 1105 * @port: PCIe port to enable 1106 * @enable: Enable/disable the PCIe adapter 1107 */ 1108 int tb_pci_port_enable(struct tb_port *port, bool enable) 1109 { 1110 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1111 if (!port->cap_adap) 1112 return -ENXIO; 1113 return tb_port_write(port, &word, TB_CFG_PORT, 1114 port->cap_adap + ADP_PCIE_CS_0, 1); 1115 } 1116 1117 /** 1118 * tb_dp_port_hpd_is_active() - Is HPD already active 1119 * @port: DP out port to check 1120 * 1121 * Checks if the DP OUT adapter port has HDP bit already set. 1122 */ 1123 int tb_dp_port_hpd_is_active(struct tb_port *port) 1124 { 1125 u32 data; 1126 int ret; 1127 1128 ret = tb_port_read(port, &data, TB_CFG_PORT, 1129 port->cap_adap + ADP_DP_CS_2, 1); 1130 if (ret) 1131 return ret; 1132 1133 return !!(data & ADP_DP_CS_2_HDP); 1134 } 1135 1136 /** 1137 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1138 * @port: Port to clear HPD 1139 * 1140 * If the DP IN port has HDP set, this function can be used to clear it. 1141 */ 1142 int tb_dp_port_hpd_clear(struct tb_port *port) 1143 { 1144 u32 data; 1145 int ret; 1146 1147 ret = tb_port_read(port, &data, TB_CFG_PORT, 1148 port->cap_adap + ADP_DP_CS_3, 1); 1149 if (ret) 1150 return ret; 1151 1152 data |= ADP_DP_CS_3_HDPC; 1153 return tb_port_write(port, &data, TB_CFG_PORT, 1154 port->cap_adap + ADP_DP_CS_3, 1); 1155 } 1156 1157 /** 1158 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1159 * @port: DP IN/OUT port to set hops 1160 * @video: Video Hop ID 1161 * @aux_tx: AUX TX Hop ID 1162 * @aux_rx: AUX RX Hop ID 1163 * 1164 * Programs specified Hop IDs for DP IN/OUT port. 1165 */ 1166 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1167 unsigned int aux_tx, unsigned int aux_rx) 1168 { 1169 u32 data[2]; 1170 int ret; 1171 1172 ret = tb_port_read(port, data, TB_CFG_PORT, 1173 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1174 if (ret) 1175 return ret; 1176 1177 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1178 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1179 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1180 1181 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1182 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1183 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1184 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1185 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1186 1187 return tb_port_write(port, data, TB_CFG_PORT, 1188 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1189 } 1190 1191 /** 1192 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1193 * @port: DP adapter port to check 1194 */ 1195 bool tb_dp_port_is_enabled(struct tb_port *port) 1196 { 1197 u32 data[2]; 1198 1199 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1200 ARRAY_SIZE(data))) 1201 return false; 1202 1203 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1204 } 1205 1206 /** 1207 * tb_dp_port_enable() - Enables/disables DP paths of a port 1208 * @port: DP IN/OUT port 1209 * @enable: Enable/disable DP path 1210 * 1211 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1212 * calling this function. 1213 */ 1214 int tb_dp_port_enable(struct tb_port *port, bool enable) 1215 { 1216 u32 data[2]; 1217 int ret; 1218 1219 ret = tb_port_read(port, data, TB_CFG_PORT, 1220 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1221 if (ret) 1222 return ret; 1223 1224 if (enable) 1225 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1226 else 1227 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1228 1229 return tb_port_write(port, data, TB_CFG_PORT, 1230 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1231 } 1232 1233 /* switch utility functions */ 1234 1235 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1236 { 1237 switch (sw->generation) { 1238 case 1: 1239 return "Thunderbolt 1"; 1240 case 2: 1241 return "Thunderbolt 2"; 1242 case 3: 1243 return "Thunderbolt 3"; 1244 case 4: 1245 return "USB4"; 1246 default: 1247 return "Unknown"; 1248 } 1249 } 1250 1251 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1252 { 1253 const struct tb_regs_switch_header *regs = &sw->config; 1254 1255 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1256 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1257 regs->revision, regs->thunderbolt_version); 1258 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1259 tb_dbg(tb, " Config:\n"); 1260 tb_dbg(tb, 1261 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1262 regs->upstream_port_number, regs->depth, 1263 (((u64) regs->route_hi) << 32) | regs->route_lo, 1264 regs->enabled, regs->plug_events_delay); 1265 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1266 regs->__unknown1, regs->__unknown4); 1267 } 1268 1269 /** 1270 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET 1271 * 1272 * Return: Returns 0 on success or an error code on failure. 1273 */ 1274 int tb_switch_reset(struct tb *tb, u64 route) 1275 { 1276 struct tb_cfg_result res; 1277 struct tb_regs_switch_header header = { 1278 header.route_hi = route >> 32, 1279 header.route_lo = route, 1280 header.enabled = true, 1281 }; 1282 tb_dbg(tb, "resetting switch at %llx\n", route); 1283 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, 1284 0, 2, 2, 2); 1285 if (res.err) 1286 return res.err; 1287 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); 1288 if (res.err > 0) 1289 return -EIO; 1290 return res.err; 1291 } 1292 1293 /** 1294 * tb_plug_events_active() - enable/disable plug events on a switch 1295 * 1296 * Also configures a sane plug_events_delay of 255ms. 1297 * 1298 * Return: Returns 0 on success or an error code on failure. 1299 */ 1300 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1301 { 1302 u32 data; 1303 int res; 1304 1305 if (tb_switch_is_icm(sw)) 1306 return 0; 1307 1308 sw->config.plug_events_delay = 0xff; 1309 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1310 if (res) 1311 return res; 1312 1313 /* Plug events are always enabled in USB4 */ 1314 if (tb_switch_is_usb4(sw)) 1315 return 0; 1316 1317 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1318 if (res) 1319 return res; 1320 1321 if (active) { 1322 data = data & 0xFFFFFF83; 1323 switch (sw->config.device_id) { 1324 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1325 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1326 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1327 break; 1328 default: 1329 data |= 4; 1330 } 1331 } else { 1332 data = data | 0x7c; 1333 } 1334 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1335 sw->cap_plug_events + 1, 1); 1336 } 1337 1338 static ssize_t authorized_show(struct device *dev, 1339 struct device_attribute *attr, 1340 char *buf) 1341 { 1342 struct tb_switch *sw = tb_to_switch(dev); 1343 1344 return sprintf(buf, "%u\n", sw->authorized); 1345 } 1346 1347 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1348 { 1349 int ret = -EINVAL; 1350 1351 if (!mutex_trylock(&sw->tb->lock)) 1352 return restart_syscall(); 1353 1354 if (sw->authorized) 1355 goto unlock; 1356 1357 switch (val) { 1358 /* Approve switch */ 1359 case 1: 1360 if (sw->key) 1361 ret = tb_domain_approve_switch_key(sw->tb, sw); 1362 else 1363 ret = tb_domain_approve_switch(sw->tb, sw); 1364 break; 1365 1366 /* Challenge switch */ 1367 case 2: 1368 if (sw->key) 1369 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1370 break; 1371 1372 default: 1373 break; 1374 } 1375 1376 if (!ret) { 1377 sw->authorized = val; 1378 /* Notify status change to the userspace */ 1379 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1380 } 1381 1382 unlock: 1383 mutex_unlock(&sw->tb->lock); 1384 return ret; 1385 } 1386 1387 static ssize_t authorized_store(struct device *dev, 1388 struct device_attribute *attr, 1389 const char *buf, size_t count) 1390 { 1391 struct tb_switch *sw = tb_to_switch(dev); 1392 unsigned int val; 1393 ssize_t ret; 1394 1395 ret = kstrtouint(buf, 0, &val); 1396 if (ret) 1397 return ret; 1398 if (val > 2) 1399 return -EINVAL; 1400 1401 pm_runtime_get_sync(&sw->dev); 1402 ret = tb_switch_set_authorized(sw, val); 1403 pm_runtime_mark_last_busy(&sw->dev); 1404 pm_runtime_put_autosuspend(&sw->dev); 1405 1406 return ret ? ret : count; 1407 } 1408 static DEVICE_ATTR_RW(authorized); 1409 1410 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1411 char *buf) 1412 { 1413 struct tb_switch *sw = tb_to_switch(dev); 1414 1415 return sprintf(buf, "%u\n", sw->boot); 1416 } 1417 static DEVICE_ATTR_RO(boot); 1418 1419 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1420 char *buf) 1421 { 1422 struct tb_switch *sw = tb_to_switch(dev); 1423 1424 return sprintf(buf, "%#x\n", sw->device); 1425 } 1426 static DEVICE_ATTR_RO(device); 1427 1428 static ssize_t 1429 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1430 { 1431 struct tb_switch *sw = tb_to_switch(dev); 1432 1433 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1434 } 1435 static DEVICE_ATTR_RO(device_name); 1436 1437 static ssize_t 1438 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1439 { 1440 struct tb_switch *sw = tb_to_switch(dev); 1441 1442 return sprintf(buf, "%u\n", sw->generation); 1443 } 1444 static DEVICE_ATTR_RO(generation); 1445 1446 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1447 char *buf) 1448 { 1449 struct tb_switch *sw = tb_to_switch(dev); 1450 ssize_t ret; 1451 1452 if (!mutex_trylock(&sw->tb->lock)) 1453 return restart_syscall(); 1454 1455 if (sw->key) 1456 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1457 else 1458 ret = sprintf(buf, "\n"); 1459 1460 mutex_unlock(&sw->tb->lock); 1461 return ret; 1462 } 1463 1464 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1465 const char *buf, size_t count) 1466 { 1467 struct tb_switch *sw = tb_to_switch(dev); 1468 u8 key[TB_SWITCH_KEY_SIZE]; 1469 ssize_t ret = count; 1470 bool clear = false; 1471 1472 if (!strcmp(buf, "\n")) 1473 clear = true; 1474 else if (hex2bin(key, buf, sizeof(key))) 1475 return -EINVAL; 1476 1477 if (!mutex_trylock(&sw->tb->lock)) 1478 return restart_syscall(); 1479 1480 if (sw->authorized) { 1481 ret = -EBUSY; 1482 } else { 1483 kfree(sw->key); 1484 if (clear) { 1485 sw->key = NULL; 1486 } else { 1487 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1488 if (!sw->key) 1489 ret = -ENOMEM; 1490 } 1491 } 1492 1493 mutex_unlock(&sw->tb->lock); 1494 return ret; 1495 } 1496 static DEVICE_ATTR(key, 0600, key_show, key_store); 1497 1498 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1499 char *buf) 1500 { 1501 struct tb_switch *sw = tb_to_switch(dev); 1502 1503 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1504 } 1505 1506 /* 1507 * Currently all lanes must run at the same speed but we expose here 1508 * both directions to allow possible asymmetric links in the future. 1509 */ 1510 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1511 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1512 1513 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1514 char *buf) 1515 { 1516 struct tb_switch *sw = tb_to_switch(dev); 1517 1518 return sprintf(buf, "%u\n", sw->link_width); 1519 } 1520 1521 /* 1522 * Currently link has same amount of lanes both directions (1 or 2) but 1523 * expose them separately to allow possible asymmetric links in the future. 1524 */ 1525 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1526 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1527 1528 static ssize_t nvm_authenticate_show(struct device *dev, 1529 struct device_attribute *attr, char *buf) 1530 { 1531 struct tb_switch *sw = tb_to_switch(dev); 1532 u32 status; 1533 1534 nvm_get_auth_status(sw, &status); 1535 return sprintf(buf, "%#x\n", status); 1536 } 1537 1538 static ssize_t nvm_authenticate_store(struct device *dev, 1539 struct device_attribute *attr, const char *buf, size_t count) 1540 { 1541 struct tb_switch *sw = tb_to_switch(dev); 1542 bool val; 1543 int ret; 1544 1545 pm_runtime_get_sync(&sw->dev); 1546 1547 if (!mutex_trylock(&sw->tb->lock)) { 1548 ret = restart_syscall(); 1549 goto exit_rpm; 1550 } 1551 1552 /* If NVMem devices are not yet added */ 1553 if (!sw->nvm) { 1554 ret = -EAGAIN; 1555 goto exit_unlock; 1556 } 1557 1558 ret = kstrtobool(buf, &val); 1559 if (ret) 1560 goto exit_unlock; 1561 1562 /* Always clear the authentication status */ 1563 nvm_clear_auth_status(sw); 1564 1565 if (val) { 1566 if (!sw->nvm->buf) { 1567 ret = -EINVAL; 1568 goto exit_unlock; 1569 } 1570 1571 ret = nvm_validate_and_write(sw); 1572 if (ret) 1573 goto exit_unlock; 1574 1575 sw->nvm->authenticating = true; 1576 ret = nvm_authenticate(sw); 1577 } 1578 1579 exit_unlock: 1580 mutex_unlock(&sw->tb->lock); 1581 exit_rpm: 1582 pm_runtime_mark_last_busy(&sw->dev); 1583 pm_runtime_put_autosuspend(&sw->dev); 1584 1585 if (ret) 1586 return ret; 1587 return count; 1588 } 1589 static DEVICE_ATTR_RW(nvm_authenticate); 1590 1591 static ssize_t nvm_version_show(struct device *dev, 1592 struct device_attribute *attr, char *buf) 1593 { 1594 struct tb_switch *sw = tb_to_switch(dev); 1595 int ret; 1596 1597 if (!mutex_trylock(&sw->tb->lock)) 1598 return restart_syscall(); 1599 1600 if (sw->safe_mode) 1601 ret = -ENODATA; 1602 else if (!sw->nvm) 1603 ret = -EAGAIN; 1604 else 1605 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1606 1607 mutex_unlock(&sw->tb->lock); 1608 1609 return ret; 1610 } 1611 static DEVICE_ATTR_RO(nvm_version); 1612 1613 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1614 char *buf) 1615 { 1616 struct tb_switch *sw = tb_to_switch(dev); 1617 1618 return sprintf(buf, "%#x\n", sw->vendor); 1619 } 1620 static DEVICE_ATTR_RO(vendor); 1621 1622 static ssize_t 1623 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1624 { 1625 struct tb_switch *sw = tb_to_switch(dev); 1626 1627 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1628 } 1629 static DEVICE_ATTR_RO(vendor_name); 1630 1631 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1632 char *buf) 1633 { 1634 struct tb_switch *sw = tb_to_switch(dev); 1635 1636 return sprintf(buf, "%pUb\n", sw->uuid); 1637 } 1638 static DEVICE_ATTR_RO(unique_id); 1639 1640 static struct attribute *switch_attrs[] = { 1641 &dev_attr_authorized.attr, 1642 &dev_attr_boot.attr, 1643 &dev_attr_device.attr, 1644 &dev_attr_device_name.attr, 1645 &dev_attr_generation.attr, 1646 &dev_attr_key.attr, 1647 &dev_attr_nvm_authenticate.attr, 1648 &dev_attr_nvm_version.attr, 1649 &dev_attr_rx_speed.attr, 1650 &dev_attr_rx_lanes.attr, 1651 &dev_attr_tx_speed.attr, 1652 &dev_attr_tx_lanes.attr, 1653 &dev_attr_vendor.attr, 1654 &dev_attr_vendor_name.attr, 1655 &dev_attr_unique_id.attr, 1656 NULL, 1657 }; 1658 1659 static umode_t switch_attr_is_visible(struct kobject *kobj, 1660 struct attribute *attr, int n) 1661 { 1662 struct device *dev = container_of(kobj, struct device, kobj); 1663 struct tb_switch *sw = tb_to_switch(dev); 1664 1665 if (attr == &dev_attr_device.attr) { 1666 if (!sw->device) 1667 return 0; 1668 } else if (attr == &dev_attr_device_name.attr) { 1669 if (!sw->device_name) 1670 return 0; 1671 } else if (attr == &dev_attr_vendor.attr) { 1672 if (!sw->vendor) 1673 return 0; 1674 } else if (attr == &dev_attr_vendor_name.attr) { 1675 if (!sw->vendor_name) 1676 return 0; 1677 } else if (attr == &dev_attr_key.attr) { 1678 if (tb_route(sw) && 1679 sw->tb->security_level == TB_SECURITY_SECURE && 1680 sw->security_level == TB_SECURITY_SECURE) 1681 return attr->mode; 1682 return 0; 1683 } else if (attr == &dev_attr_rx_speed.attr || 1684 attr == &dev_attr_rx_lanes.attr || 1685 attr == &dev_attr_tx_speed.attr || 1686 attr == &dev_attr_tx_lanes.attr) { 1687 if (tb_route(sw)) 1688 return attr->mode; 1689 return 0; 1690 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1691 if (nvm_upgradeable(sw)) 1692 return attr->mode; 1693 return 0; 1694 } else if (attr == &dev_attr_nvm_version.attr) { 1695 if (nvm_readable(sw)) 1696 return attr->mode; 1697 return 0; 1698 } else if (attr == &dev_attr_boot.attr) { 1699 if (tb_route(sw)) 1700 return attr->mode; 1701 return 0; 1702 } 1703 1704 return sw->safe_mode ? 0 : attr->mode; 1705 } 1706 1707 static struct attribute_group switch_group = { 1708 .is_visible = switch_attr_is_visible, 1709 .attrs = switch_attrs, 1710 }; 1711 1712 static const struct attribute_group *switch_groups[] = { 1713 &switch_group, 1714 NULL, 1715 }; 1716 1717 static void tb_switch_release(struct device *dev) 1718 { 1719 struct tb_switch *sw = tb_to_switch(dev); 1720 struct tb_port *port; 1721 1722 dma_port_free(sw->dma_port); 1723 1724 tb_switch_for_each_port(sw, port) { 1725 if (!port->disabled) { 1726 ida_destroy(&port->in_hopids); 1727 ida_destroy(&port->out_hopids); 1728 } 1729 } 1730 1731 kfree(sw->uuid); 1732 kfree(sw->device_name); 1733 kfree(sw->vendor_name); 1734 kfree(sw->ports); 1735 kfree(sw->drom); 1736 kfree(sw->key); 1737 kfree(sw); 1738 } 1739 1740 /* 1741 * Currently only need to provide the callbacks. Everything else is handled 1742 * in the connection manager. 1743 */ 1744 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1745 { 1746 struct tb_switch *sw = tb_to_switch(dev); 1747 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1748 1749 if (cm_ops->runtime_suspend_switch) 1750 return cm_ops->runtime_suspend_switch(sw); 1751 1752 return 0; 1753 } 1754 1755 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1756 { 1757 struct tb_switch *sw = tb_to_switch(dev); 1758 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1759 1760 if (cm_ops->runtime_resume_switch) 1761 return cm_ops->runtime_resume_switch(sw); 1762 return 0; 1763 } 1764 1765 static const struct dev_pm_ops tb_switch_pm_ops = { 1766 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1767 NULL) 1768 }; 1769 1770 struct device_type tb_switch_type = { 1771 .name = "thunderbolt_device", 1772 .release = tb_switch_release, 1773 .pm = &tb_switch_pm_ops, 1774 }; 1775 1776 static int tb_switch_get_generation(struct tb_switch *sw) 1777 { 1778 switch (sw->config.device_id) { 1779 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1780 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1781 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1782 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1783 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1784 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1785 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1786 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1787 return 1; 1788 1789 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1790 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1791 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1792 return 2; 1793 1794 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1795 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1796 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1797 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1798 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1799 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1800 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1801 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1802 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1803 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1804 return 3; 1805 1806 default: 1807 if (tb_switch_is_usb4(sw)) 1808 return 4; 1809 1810 /* 1811 * For unknown switches assume generation to be 1 to be 1812 * on the safe side. 1813 */ 1814 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1815 sw->config.device_id); 1816 return 1; 1817 } 1818 } 1819 1820 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 1821 { 1822 int max_depth; 1823 1824 if (tb_switch_is_usb4(sw) || 1825 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 1826 max_depth = USB4_SWITCH_MAX_DEPTH; 1827 else 1828 max_depth = TB_SWITCH_MAX_DEPTH; 1829 1830 return depth > max_depth; 1831 } 1832 1833 /** 1834 * tb_switch_alloc() - allocate a switch 1835 * @tb: Pointer to the owning domain 1836 * @parent: Parent device for this switch 1837 * @route: Route string for this switch 1838 * 1839 * Allocates and initializes a switch. Will not upload configuration to 1840 * the switch. For that you need to call tb_switch_configure() 1841 * separately. The returned switch should be released by calling 1842 * tb_switch_put(). 1843 * 1844 * Return: Pointer to the allocated switch or ERR_PTR() in case of 1845 * failure. 1846 */ 1847 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1848 u64 route) 1849 { 1850 struct tb_switch *sw; 1851 int upstream_port; 1852 int i, ret, depth; 1853 1854 /* Unlock the downstream port so we can access the switch below */ 1855 if (route) { 1856 struct tb_switch *parent_sw = tb_to_switch(parent); 1857 struct tb_port *down; 1858 1859 down = tb_port_at(route, parent_sw); 1860 tb_port_unlock(down); 1861 } 1862 1863 depth = tb_route_length(route); 1864 1865 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1866 if (upstream_port < 0) 1867 return ERR_PTR(upstream_port); 1868 1869 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1870 if (!sw) 1871 return ERR_PTR(-ENOMEM); 1872 1873 sw->tb = tb; 1874 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 1875 if (ret) 1876 goto err_free_sw_ports; 1877 1878 sw->generation = tb_switch_get_generation(sw); 1879 1880 tb_dbg(tb, "current switch config:\n"); 1881 tb_dump_switch(tb, sw); 1882 1883 /* configure switch */ 1884 sw->config.upstream_port_number = upstream_port; 1885 sw->config.depth = depth; 1886 sw->config.route_hi = upper_32_bits(route); 1887 sw->config.route_lo = lower_32_bits(route); 1888 sw->config.enabled = 0; 1889 1890 /* Make sure we do not exceed maximum topology limit */ 1891 if (tb_switch_exceeds_max_depth(sw, depth)) { 1892 ret = -EADDRNOTAVAIL; 1893 goto err_free_sw_ports; 1894 } 1895 1896 /* initialize ports */ 1897 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 1898 GFP_KERNEL); 1899 if (!sw->ports) { 1900 ret = -ENOMEM; 1901 goto err_free_sw_ports; 1902 } 1903 1904 for (i = 0; i <= sw->config.max_port_number; i++) { 1905 /* minimum setup for tb_find_cap and tb_drom_read to work */ 1906 sw->ports[i].sw = sw; 1907 sw->ports[i].port = i; 1908 } 1909 1910 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 1911 if (ret > 0) 1912 sw->cap_plug_events = ret; 1913 1914 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 1915 if (ret > 0) 1916 sw->cap_lc = ret; 1917 1918 /* Root switch is always authorized */ 1919 if (!route) 1920 sw->authorized = true; 1921 1922 device_initialize(&sw->dev); 1923 sw->dev.parent = parent; 1924 sw->dev.bus = &tb_bus_type; 1925 sw->dev.type = &tb_switch_type; 1926 sw->dev.groups = switch_groups; 1927 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1928 1929 return sw; 1930 1931 err_free_sw_ports: 1932 kfree(sw->ports); 1933 kfree(sw); 1934 1935 return ERR_PTR(ret); 1936 } 1937 1938 /** 1939 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 1940 * @tb: Pointer to the owning domain 1941 * @parent: Parent device for this switch 1942 * @route: Route string for this switch 1943 * 1944 * This creates a switch in safe mode. This means the switch pretty much 1945 * lacks all capabilities except DMA configuration port before it is 1946 * flashed with a valid NVM firmware. 1947 * 1948 * The returned switch must be released by calling tb_switch_put(). 1949 * 1950 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 1951 */ 1952 struct tb_switch * 1953 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 1954 { 1955 struct tb_switch *sw; 1956 1957 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1958 if (!sw) 1959 return ERR_PTR(-ENOMEM); 1960 1961 sw->tb = tb; 1962 sw->config.depth = tb_route_length(route); 1963 sw->config.route_hi = upper_32_bits(route); 1964 sw->config.route_lo = lower_32_bits(route); 1965 sw->safe_mode = true; 1966 1967 device_initialize(&sw->dev); 1968 sw->dev.parent = parent; 1969 sw->dev.bus = &tb_bus_type; 1970 sw->dev.type = &tb_switch_type; 1971 sw->dev.groups = switch_groups; 1972 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1973 1974 return sw; 1975 } 1976 1977 /** 1978 * tb_switch_configure() - Uploads configuration to the switch 1979 * @sw: Switch to configure 1980 * 1981 * Call this function before the switch is added to the system. It will 1982 * upload configuration to the switch and makes it available for the 1983 * connection manager to use. Can be called to the switch again after 1984 * resume from low power states to re-initialize it. 1985 * 1986 * Return: %0 in case of success and negative errno in case of failure 1987 */ 1988 int tb_switch_configure(struct tb_switch *sw) 1989 { 1990 struct tb *tb = sw->tb; 1991 u64 route; 1992 int ret; 1993 1994 route = tb_route(sw); 1995 1996 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 1997 sw->config.enabled ? "restoring " : "initializing", route, 1998 tb_route_length(route), sw->config.upstream_port_number); 1999 2000 sw->config.enabled = 1; 2001 2002 if (tb_switch_is_usb4(sw)) { 2003 /* 2004 * For USB4 devices, we need to program the CM version 2005 * accordingly so that it knows to expose all the 2006 * additional capabilities. 2007 */ 2008 sw->config.cmuv = USB4_VERSION_1_0; 2009 2010 /* Enumerate the switch */ 2011 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2012 ROUTER_CS_1, 4); 2013 if (ret) 2014 return ret; 2015 2016 ret = usb4_switch_setup(sw); 2017 if (ret) 2018 return ret; 2019 2020 ret = usb4_switch_configure_link(sw); 2021 } else { 2022 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2023 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2024 sw->config.vendor_id); 2025 2026 if (!sw->cap_plug_events) { 2027 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2028 return -ENODEV; 2029 } 2030 2031 /* Enumerate the switch */ 2032 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2033 ROUTER_CS_1, 3); 2034 if (ret) 2035 return ret; 2036 2037 ret = tb_lc_configure_link(sw); 2038 } 2039 if (ret) 2040 return ret; 2041 2042 return tb_plug_events_active(sw, true); 2043 } 2044 2045 static int tb_switch_set_uuid(struct tb_switch *sw) 2046 { 2047 bool uid = false; 2048 u32 uuid[4]; 2049 int ret; 2050 2051 if (sw->uuid) 2052 return 0; 2053 2054 if (tb_switch_is_usb4(sw)) { 2055 ret = usb4_switch_read_uid(sw, &sw->uid); 2056 if (ret) 2057 return ret; 2058 uid = true; 2059 } else { 2060 /* 2061 * The newer controllers include fused UUID as part of 2062 * link controller specific registers 2063 */ 2064 ret = tb_lc_read_uuid(sw, uuid); 2065 if (ret) { 2066 if (ret != -EINVAL) 2067 return ret; 2068 uid = true; 2069 } 2070 } 2071 2072 if (uid) { 2073 /* 2074 * ICM generates UUID based on UID and fills the upper 2075 * two words with ones. This is not strictly following 2076 * UUID format but we want to be compatible with it so 2077 * we do the same here. 2078 */ 2079 uuid[0] = sw->uid & 0xffffffff; 2080 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2081 uuid[2] = 0xffffffff; 2082 uuid[3] = 0xffffffff; 2083 } 2084 2085 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2086 if (!sw->uuid) 2087 return -ENOMEM; 2088 return 0; 2089 } 2090 2091 static int tb_switch_add_dma_port(struct tb_switch *sw) 2092 { 2093 u32 status; 2094 int ret; 2095 2096 switch (sw->generation) { 2097 case 2: 2098 /* Only root switch can be upgraded */ 2099 if (tb_route(sw)) 2100 return 0; 2101 2102 /* fallthrough */ 2103 case 3: 2104 ret = tb_switch_set_uuid(sw); 2105 if (ret) 2106 return ret; 2107 break; 2108 2109 default: 2110 /* 2111 * DMA port is the only thing available when the switch 2112 * is in safe mode. 2113 */ 2114 if (!sw->safe_mode) 2115 return 0; 2116 break; 2117 } 2118 2119 /* Root switch DMA port requires running firmware */ 2120 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2121 return 0; 2122 2123 sw->dma_port = dma_port_alloc(sw); 2124 if (!sw->dma_port) 2125 return 0; 2126 2127 if (sw->no_nvm_upgrade) 2128 return 0; 2129 2130 /* 2131 * If there is status already set then authentication failed 2132 * when the dma_port_flash_update_auth() returned. Power cycling 2133 * is not needed (it was done already) so only thing we do here 2134 * is to unblock runtime PM of the root port. 2135 */ 2136 nvm_get_auth_status(sw, &status); 2137 if (status) { 2138 if (!tb_route(sw)) 2139 nvm_authenticate_complete_dma_port(sw); 2140 return 0; 2141 } 2142 2143 /* 2144 * Check status of the previous flash authentication. If there 2145 * is one we need to power cycle the switch in any case to make 2146 * it functional again. 2147 */ 2148 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2149 if (ret <= 0) 2150 return ret; 2151 2152 /* Now we can allow root port to suspend again */ 2153 if (!tb_route(sw)) 2154 nvm_authenticate_complete_dma_port(sw); 2155 2156 if (status) { 2157 tb_sw_info(sw, "switch flash authentication failed\n"); 2158 nvm_set_auth_status(sw, status); 2159 } 2160 2161 tb_sw_info(sw, "power cycling the switch now\n"); 2162 dma_port_power_cycle(sw->dma_port); 2163 2164 /* 2165 * We return error here which causes the switch adding failure. 2166 * It should appear back after power cycle is complete. 2167 */ 2168 return -ESHUTDOWN; 2169 } 2170 2171 static void tb_switch_default_link_ports(struct tb_switch *sw) 2172 { 2173 int i; 2174 2175 for (i = 1; i <= sw->config.max_port_number; i += 2) { 2176 struct tb_port *port = &sw->ports[i]; 2177 struct tb_port *subordinate; 2178 2179 if (!tb_port_is_null(port)) 2180 continue; 2181 2182 /* Check for the subordinate port */ 2183 if (i == sw->config.max_port_number || 2184 !tb_port_is_null(&sw->ports[i + 1])) 2185 continue; 2186 2187 /* Link them if not already done so (by DROM) */ 2188 subordinate = &sw->ports[i + 1]; 2189 if (!port->dual_link_port && !subordinate->dual_link_port) { 2190 port->link_nr = 0; 2191 port->dual_link_port = subordinate; 2192 subordinate->link_nr = 1; 2193 subordinate->dual_link_port = port; 2194 2195 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2196 port->port, subordinate->port); 2197 } 2198 } 2199 } 2200 2201 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2202 { 2203 const struct tb_port *up = tb_upstream_port(sw); 2204 2205 if (!up->dual_link_port || !up->dual_link_port->remote) 2206 return false; 2207 2208 if (tb_switch_is_usb4(sw)) 2209 return usb4_switch_lane_bonding_possible(sw); 2210 return tb_lc_lane_bonding_possible(sw); 2211 } 2212 2213 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2214 { 2215 struct tb_port *up; 2216 bool change = false; 2217 int ret; 2218 2219 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2220 return 0; 2221 2222 up = tb_upstream_port(sw); 2223 2224 ret = tb_port_get_link_speed(up); 2225 if (ret < 0) 2226 return ret; 2227 if (sw->link_speed != ret) 2228 change = true; 2229 sw->link_speed = ret; 2230 2231 ret = tb_port_get_link_width(up); 2232 if (ret < 0) 2233 return ret; 2234 if (sw->link_width != ret) 2235 change = true; 2236 sw->link_width = ret; 2237 2238 /* Notify userspace that there is possible link attribute change */ 2239 if (device_is_registered(&sw->dev) && change) 2240 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2241 2242 return 0; 2243 } 2244 2245 /** 2246 * tb_switch_lane_bonding_enable() - Enable lane bonding 2247 * @sw: Switch to enable lane bonding 2248 * 2249 * Connection manager can call this function to enable lane bonding of a 2250 * switch. If conditions are correct and both switches support the feature, 2251 * lanes are bonded. It is safe to call this to any switch. 2252 */ 2253 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2254 { 2255 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2256 struct tb_port *up, *down; 2257 u64 route = tb_route(sw); 2258 int ret; 2259 2260 if (!route) 2261 return 0; 2262 2263 if (!tb_switch_lane_bonding_possible(sw)) 2264 return 0; 2265 2266 up = tb_upstream_port(sw); 2267 down = tb_port_at(route, parent); 2268 2269 if (!tb_port_is_width_supported(up, 2) || 2270 !tb_port_is_width_supported(down, 2)) 2271 return 0; 2272 2273 ret = tb_port_lane_bonding_enable(up); 2274 if (ret) { 2275 tb_port_warn(up, "failed to enable lane bonding\n"); 2276 return ret; 2277 } 2278 2279 ret = tb_port_lane_bonding_enable(down); 2280 if (ret) { 2281 tb_port_warn(down, "failed to enable lane bonding\n"); 2282 tb_port_lane_bonding_disable(up); 2283 return ret; 2284 } 2285 2286 tb_switch_update_link_attributes(sw); 2287 2288 tb_sw_dbg(sw, "lane bonding enabled\n"); 2289 return ret; 2290 } 2291 2292 /** 2293 * tb_switch_lane_bonding_disable() - Disable lane bonding 2294 * @sw: Switch whose lane bonding to disable 2295 * 2296 * Disables lane bonding between @sw and parent. This can be called even 2297 * if lanes were not bonded originally. 2298 */ 2299 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2300 { 2301 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2302 struct tb_port *up, *down; 2303 2304 if (!tb_route(sw)) 2305 return; 2306 2307 up = tb_upstream_port(sw); 2308 if (!up->bonded) 2309 return; 2310 2311 down = tb_port_at(tb_route(sw), parent); 2312 2313 tb_port_lane_bonding_disable(up); 2314 tb_port_lane_bonding_disable(down); 2315 2316 tb_switch_update_link_attributes(sw); 2317 tb_sw_dbg(sw, "lane bonding disabled\n"); 2318 } 2319 2320 /** 2321 * tb_switch_add() - Add a switch to the domain 2322 * @sw: Switch to add 2323 * 2324 * This is the last step in adding switch to the domain. It will read 2325 * identification information from DROM and initializes ports so that 2326 * they can be used to connect other switches. The switch will be 2327 * exposed to the userspace when this function successfully returns. To 2328 * remove and release the switch, call tb_switch_remove(). 2329 * 2330 * Return: %0 in case of success and negative errno in case of failure 2331 */ 2332 int tb_switch_add(struct tb_switch *sw) 2333 { 2334 int i, ret; 2335 2336 /* 2337 * Initialize DMA control port now before we read DROM. Recent 2338 * host controllers have more complete DROM on NVM that includes 2339 * vendor and model identification strings which we then expose 2340 * to the userspace. NVM can be accessed through DMA 2341 * configuration based mailbox. 2342 */ 2343 ret = tb_switch_add_dma_port(sw); 2344 if (ret) { 2345 dev_err(&sw->dev, "failed to add DMA port\n"); 2346 return ret; 2347 } 2348 2349 if (!sw->safe_mode) { 2350 /* read drom */ 2351 ret = tb_drom_read(sw); 2352 if (ret) { 2353 dev_err(&sw->dev, "reading DROM failed\n"); 2354 return ret; 2355 } 2356 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2357 2358 ret = tb_switch_set_uuid(sw); 2359 if (ret) { 2360 dev_err(&sw->dev, "failed to set UUID\n"); 2361 return ret; 2362 } 2363 2364 for (i = 0; i <= sw->config.max_port_number; i++) { 2365 if (sw->ports[i].disabled) { 2366 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2367 continue; 2368 } 2369 ret = tb_init_port(&sw->ports[i]); 2370 if (ret) { 2371 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2372 return ret; 2373 } 2374 } 2375 2376 tb_switch_default_link_ports(sw); 2377 2378 ret = tb_switch_update_link_attributes(sw); 2379 if (ret) 2380 return ret; 2381 2382 ret = tb_switch_tmu_init(sw); 2383 if (ret) 2384 return ret; 2385 } 2386 2387 ret = device_add(&sw->dev); 2388 if (ret) { 2389 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2390 return ret; 2391 } 2392 2393 if (tb_route(sw)) { 2394 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2395 sw->vendor, sw->device); 2396 if (sw->vendor_name && sw->device_name) 2397 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2398 sw->device_name); 2399 } 2400 2401 ret = tb_switch_nvm_add(sw); 2402 if (ret) { 2403 dev_err(&sw->dev, "failed to add NVM devices\n"); 2404 device_del(&sw->dev); 2405 return ret; 2406 } 2407 2408 pm_runtime_set_active(&sw->dev); 2409 if (sw->rpm) { 2410 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2411 pm_runtime_use_autosuspend(&sw->dev); 2412 pm_runtime_mark_last_busy(&sw->dev); 2413 pm_runtime_enable(&sw->dev); 2414 pm_request_autosuspend(&sw->dev); 2415 } 2416 2417 return 0; 2418 } 2419 2420 /** 2421 * tb_switch_remove() - Remove and release a switch 2422 * @sw: Switch to remove 2423 * 2424 * This will remove the switch from the domain and release it after last 2425 * reference count drops to zero. If there are switches connected below 2426 * this switch, they will be removed as well. 2427 */ 2428 void tb_switch_remove(struct tb_switch *sw) 2429 { 2430 struct tb_port *port; 2431 2432 if (sw->rpm) { 2433 pm_runtime_get_sync(&sw->dev); 2434 pm_runtime_disable(&sw->dev); 2435 } 2436 2437 /* port 0 is the switch itself and never has a remote */ 2438 tb_switch_for_each_port(sw, port) { 2439 if (tb_port_has_remote(port)) { 2440 tb_switch_remove(port->remote->sw); 2441 port->remote = NULL; 2442 } else if (port->xdomain) { 2443 tb_xdomain_remove(port->xdomain); 2444 port->xdomain = NULL; 2445 } 2446 } 2447 2448 if (!sw->is_unplugged) 2449 tb_plug_events_active(sw, false); 2450 2451 if (tb_switch_is_usb4(sw)) 2452 usb4_switch_unconfigure_link(sw); 2453 else 2454 tb_lc_unconfigure_link(sw); 2455 2456 tb_switch_nvm_remove(sw); 2457 2458 if (tb_route(sw)) 2459 dev_info(&sw->dev, "device disconnected\n"); 2460 device_unregister(&sw->dev); 2461 } 2462 2463 /** 2464 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2465 */ 2466 void tb_sw_set_unplugged(struct tb_switch *sw) 2467 { 2468 struct tb_port *port; 2469 2470 if (sw == sw->tb->root_switch) { 2471 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2472 return; 2473 } 2474 if (sw->is_unplugged) { 2475 tb_sw_WARN(sw, "is_unplugged already set\n"); 2476 return; 2477 } 2478 sw->is_unplugged = true; 2479 tb_switch_for_each_port(sw, port) { 2480 if (tb_port_has_remote(port)) 2481 tb_sw_set_unplugged(port->remote->sw); 2482 else if (port->xdomain) 2483 port->xdomain->is_unplugged = true; 2484 } 2485 } 2486 2487 int tb_switch_resume(struct tb_switch *sw) 2488 { 2489 struct tb_port *port; 2490 int err; 2491 2492 tb_sw_dbg(sw, "resuming switch\n"); 2493 2494 /* 2495 * Check for UID of the connected switches except for root 2496 * switch which we assume cannot be removed. 2497 */ 2498 if (tb_route(sw)) { 2499 u64 uid; 2500 2501 /* 2502 * Check first that we can still read the switch config 2503 * space. It may be that there is now another domain 2504 * connected. 2505 */ 2506 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2507 if (err < 0) { 2508 tb_sw_info(sw, "switch not present anymore\n"); 2509 return err; 2510 } 2511 2512 if (tb_switch_is_usb4(sw)) 2513 err = usb4_switch_read_uid(sw, &uid); 2514 else 2515 err = tb_drom_read_uid_only(sw, &uid); 2516 if (err) { 2517 tb_sw_warn(sw, "uid read failed\n"); 2518 return err; 2519 } 2520 if (sw->uid != uid) { 2521 tb_sw_info(sw, 2522 "changed while suspended (uid %#llx -> %#llx)\n", 2523 sw->uid, uid); 2524 return -ENODEV; 2525 } 2526 } 2527 2528 err = tb_switch_configure(sw); 2529 if (err) 2530 return err; 2531 2532 /* check for surviving downstream switches */ 2533 tb_switch_for_each_port(sw, port) { 2534 if (!tb_port_has_remote(port) && !port->xdomain) 2535 continue; 2536 2537 if (tb_wait_for_port(port, true) <= 0) { 2538 tb_port_warn(port, 2539 "lost during suspend, disconnecting\n"); 2540 if (tb_port_has_remote(port)) 2541 tb_sw_set_unplugged(port->remote->sw); 2542 else if (port->xdomain) 2543 port->xdomain->is_unplugged = true; 2544 } else if (tb_port_has_remote(port) || port->xdomain) { 2545 /* 2546 * Always unlock the port so the downstream 2547 * switch/domain is accessible. 2548 */ 2549 if (tb_port_unlock(port)) 2550 tb_port_warn(port, "failed to unlock port\n"); 2551 if (port->remote && tb_switch_resume(port->remote->sw)) { 2552 tb_port_warn(port, 2553 "lost during suspend, disconnecting\n"); 2554 tb_sw_set_unplugged(port->remote->sw); 2555 } 2556 } 2557 } 2558 return 0; 2559 } 2560 2561 void tb_switch_suspend(struct tb_switch *sw) 2562 { 2563 struct tb_port *port; 2564 int err; 2565 2566 err = tb_plug_events_active(sw, false); 2567 if (err) 2568 return; 2569 2570 tb_switch_for_each_port(sw, port) { 2571 if (tb_port_has_remote(port)) 2572 tb_switch_suspend(port->remote->sw); 2573 } 2574 2575 if (tb_switch_is_usb4(sw)) 2576 usb4_switch_set_sleep(sw); 2577 else 2578 tb_lc_set_sleep(sw); 2579 } 2580 2581 /** 2582 * tb_switch_query_dp_resource() - Query availability of DP resource 2583 * @sw: Switch whose DP resource is queried 2584 * @in: DP IN port 2585 * 2586 * Queries availability of DP resource for DP tunneling using switch 2587 * specific means. Returns %true if resource is available. 2588 */ 2589 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 2590 { 2591 if (tb_switch_is_usb4(sw)) 2592 return usb4_switch_query_dp_resource(sw, in); 2593 return tb_lc_dp_sink_query(sw, in); 2594 } 2595 2596 /** 2597 * tb_switch_alloc_dp_resource() - Allocate available DP resource 2598 * @sw: Switch whose DP resource is allocated 2599 * @in: DP IN port 2600 * 2601 * Allocates DP resource for DP tunneling. The resource must be 2602 * available for this to succeed (see tb_switch_query_dp_resource()). 2603 * Returns %0 in success and negative errno otherwise. 2604 */ 2605 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2606 { 2607 if (tb_switch_is_usb4(sw)) 2608 return usb4_switch_alloc_dp_resource(sw, in); 2609 return tb_lc_dp_sink_alloc(sw, in); 2610 } 2611 2612 /** 2613 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 2614 * @sw: Switch whose DP resource is de-allocated 2615 * @in: DP IN port 2616 * 2617 * De-allocates DP resource that was previously allocated for DP 2618 * tunneling. 2619 */ 2620 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2621 { 2622 int ret; 2623 2624 if (tb_switch_is_usb4(sw)) 2625 ret = usb4_switch_dealloc_dp_resource(sw, in); 2626 else 2627 ret = tb_lc_dp_sink_dealloc(sw, in); 2628 2629 if (ret) 2630 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 2631 in->port); 2632 } 2633 2634 struct tb_sw_lookup { 2635 struct tb *tb; 2636 u8 link; 2637 u8 depth; 2638 const uuid_t *uuid; 2639 u64 route; 2640 }; 2641 2642 static int tb_switch_match(struct device *dev, const void *data) 2643 { 2644 struct tb_switch *sw = tb_to_switch(dev); 2645 const struct tb_sw_lookup *lookup = data; 2646 2647 if (!sw) 2648 return 0; 2649 if (sw->tb != lookup->tb) 2650 return 0; 2651 2652 if (lookup->uuid) 2653 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 2654 2655 if (lookup->route) { 2656 return sw->config.route_lo == lower_32_bits(lookup->route) && 2657 sw->config.route_hi == upper_32_bits(lookup->route); 2658 } 2659 2660 /* Root switch is matched only by depth */ 2661 if (!lookup->depth) 2662 return !sw->depth; 2663 2664 return sw->link == lookup->link && sw->depth == lookup->depth; 2665 } 2666 2667 /** 2668 * tb_switch_find_by_link_depth() - Find switch by link and depth 2669 * @tb: Domain the switch belongs 2670 * @link: Link number the switch is connected 2671 * @depth: Depth of the switch in link 2672 * 2673 * Returned switch has reference count increased so the caller needs to 2674 * call tb_switch_put() when done with the switch. 2675 */ 2676 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 2677 { 2678 struct tb_sw_lookup lookup; 2679 struct device *dev; 2680 2681 memset(&lookup, 0, sizeof(lookup)); 2682 lookup.tb = tb; 2683 lookup.link = link; 2684 lookup.depth = depth; 2685 2686 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2687 if (dev) 2688 return tb_to_switch(dev); 2689 2690 return NULL; 2691 } 2692 2693 /** 2694 * tb_switch_find_by_uuid() - Find switch by UUID 2695 * @tb: Domain the switch belongs 2696 * @uuid: UUID to look for 2697 * 2698 * Returned switch has reference count increased so the caller needs to 2699 * call tb_switch_put() when done with the switch. 2700 */ 2701 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 2702 { 2703 struct tb_sw_lookup lookup; 2704 struct device *dev; 2705 2706 memset(&lookup, 0, sizeof(lookup)); 2707 lookup.tb = tb; 2708 lookup.uuid = uuid; 2709 2710 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2711 if (dev) 2712 return tb_to_switch(dev); 2713 2714 return NULL; 2715 } 2716 2717 /** 2718 * tb_switch_find_by_route() - Find switch by route string 2719 * @tb: Domain the switch belongs 2720 * @route: Route string to look for 2721 * 2722 * Returned switch has reference count increased so the caller needs to 2723 * call tb_switch_put() when done with the switch. 2724 */ 2725 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 2726 { 2727 struct tb_sw_lookup lookup; 2728 struct device *dev; 2729 2730 if (!route) 2731 return tb_switch_get(tb->root_switch); 2732 2733 memset(&lookup, 0, sizeof(lookup)); 2734 lookup.tb = tb; 2735 lookup.route = route; 2736 2737 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2738 if (dev) 2739 return tb_to_switch(dev); 2740 2741 return NULL; 2742 } 2743 2744 /** 2745 * tb_switch_find_port() - return the first port of @type on @sw or NULL 2746 * @sw: Switch to find the port from 2747 * @type: Port type to look for 2748 */ 2749 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 2750 enum tb_port_type type) 2751 { 2752 struct tb_port *port; 2753 2754 tb_switch_for_each_port(sw, port) { 2755 if (port->config.type == type) 2756 return port; 2757 } 2758 2759 return NULL; 2760 } 2761 2762 void tb_switch_exit(void) 2763 { 2764 ida_destroy(&nvm_ida); 2765 } 2766