1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 #include <linux/vmalloc.h> 17 18 #include "tb.h" 19 20 /* Switch NVM support */ 21 22 #define NVM_DEVID 0x05 23 #define NVM_VERSION 0x08 24 #define NVM_CSS 0x10 25 #define NVM_FLASH_SIZE 0x45 26 27 #define NVM_MIN_SIZE SZ_32K 28 #define NVM_MAX_SIZE SZ_512K 29 30 static DEFINE_IDA(nvm_ida); 31 32 struct nvm_auth_status { 33 struct list_head list; 34 uuid_t uuid; 35 u32 status; 36 }; 37 38 /* 39 * Hold NVM authentication failure status per switch This information 40 * needs to stay around even when the switch gets power cycled so we 41 * keep it separately. 42 */ 43 static LIST_HEAD(nvm_auth_status_cache); 44 static DEFINE_MUTEX(nvm_auth_status_lock); 45 46 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 47 { 48 struct nvm_auth_status *st; 49 50 list_for_each_entry(st, &nvm_auth_status_cache, list) { 51 if (uuid_equal(&st->uuid, sw->uuid)) 52 return st; 53 } 54 55 return NULL; 56 } 57 58 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 59 { 60 struct nvm_auth_status *st; 61 62 mutex_lock(&nvm_auth_status_lock); 63 st = __nvm_get_auth_status(sw); 64 mutex_unlock(&nvm_auth_status_lock); 65 66 *status = st ? st->status : 0; 67 } 68 69 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 70 { 71 struct nvm_auth_status *st; 72 73 if (WARN_ON(!sw->uuid)) 74 return; 75 76 mutex_lock(&nvm_auth_status_lock); 77 st = __nvm_get_auth_status(sw); 78 79 if (!st) { 80 st = kzalloc(sizeof(*st), GFP_KERNEL); 81 if (!st) 82 goto unlock; 83 84 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 85 INIT_LIST_HEAD(&st->list); 86 list_add_tail(&st->list, &nvm_auth_status_cache); 87 } 88 89 st->status = status; 90 unlock: 91 mutex_unlock(&nvm_auth_status_lock); 92 } 93 94 static void nvm_clear_auth_status(const struct tb_switch *sw) 95 { 96 struct nvm_auth_status *st; 97 98 mutex_lock(&nvm_auth_status_lock); 99 st = __nvm_get_auth_status(sw); 100 if (st) { 101 list_del(&st->list); 102 kfree(st); 103 } 104 mutex_unlock(&nvm_auth_status_lock); 105 } 106 107 static int nvm_validate_and_write(struct tb_switch *sw) 108 { 109 unsigned int image_size, hdr_size; 110 const u8 *buf = sw->nvm->buf; 111 u16 ds_size; 112 int ret; 113 114 if (!buf) 115 return -EINVAL; 116 117 image_size = sw->nvm->buf_data_size; 118 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 119 return -EINVAL; 120 121 /* 122 * FARB pointer must point inside the image and must at least 123 * contain parts of the digital section we will be reading here. 124 */ 125 hdr_size = (*(u32 *)buf) & 0xffffff; 126 if (hdr_size + NVM_DEVID + 2 >= image_size) 127 return -EINVAL; 128 129 /* Digital section start should be aligned to 4k page */ 130 if (!IS_ALIGNED(hdr_size, SZ_4K)) 131 return -EINVAL; 132 133 /* 134 * Read digital section size and check that it also fits inside 135 * the image. 136 */ 137 ds_size = *(u16 *)(buf + hdr_size); 138 if (ds_size >= image_size) 139 return -EINVAL; 140 141 if (!sw->safe_mode) { 142 u16 device_id; 143 144 /* 145 * Make sure the device ID in the image matches the one 146 * we read from the switch config space. 147 */ 148 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 149 if (device_id != sw->config.device_id) 150 return -EINVAL; 151 152 if (sw->generation < 3) { 153 /* Write CSS headers first */ 154 ret = dma_port_flash_write(sw->dma_port, 155 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 156 DMA_PORT_CSS_MAX_SIZE); 157 if (ret) 158 return ret; 159 } 160 161 /* Skip headers in the image */ 162 buf += hdr_size; 163 image_size -= hdr_size; 164 } 165 166 return dma_port_flash_write(sw->dma_port, 0, buf, image_size); 167 } 168 169 static int nvm_authenticate_host(struct tb_switch *sw) 170 { 171 int ret = 0; 172 173 /* 174 * Root switch NVM upgrade requires that we disconnect the 175 * existing paths first (in case it is not in safe mode 176 * already). 177 */ 178 if (!sw->safe_mode) { 179 u32 status; 180 181 ret = tb_domain_disconnect_all_paths(sw->tb); 182 if (ret) 183 return ret; 184 /* 185 * The host controller goes away pretty soon after this if 186 * everything goes well so getting timeout is expected. 187 */ 188 ret = dma_port_flash_update_auth(sw->dma_port); 189 if (!ret || ret == -ETIMEDOUT) 190 return 0; 191 192 /* 193 * Any error from update auth operation requires power 194 * cycling of the host router. 195 */ 196 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 197 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 198 nvm_set_auth_status(sw, status); 199 } 200 201 /* 202 * From safe mode we can get out by just power cycling the 203 * switch. 204 */ 205 dma_port_power_cycle(sw->dma_port); 206 return ret; 207 } 208 209 static int nvm_authenticate_device(struct tb_switch *sw) 210 { 211 int ret, retries = 10; 212 213 ret = dma_port_flash_update_auth(sw->dma_port); 214 switch (ret) { 215 case 0: 216 case -ETIMEDOUT: 217 case -EACCES: 218 case -EINVAL: 219 /* Power cycle is required */ 220 break; 221 default: 222 return ret; 223 } 224 225 /* 226 * Poll here for the authentication status. It takes some time 227 * for the device to respond (we get timeout for a while). Once 228 * we get response the device needs to be power cycled in order 229 * to the new NVM to be taken into use. 230 */ 231 do { 232 u32 status; 233 234 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 235 if (ret < 0 && ret != -ETIMEDOUT) 236 return ret; 237 if (ret > 0) { 238 if (status) { 239 tb_sw_warn(sw, "failed to authenticate NVM\n"); 240 nvm_set_auth_status(sw, status); 241 } 242 243 tb_sw_info(sw, "power cycling the switch now\n"); 244 dma_port_power_cycle(sw->dma_port); 245 return 0; 246 } 247 248 msleep(500); 249 } while (--retries); 250 251 return -ETIMEDOUT; 252 } 253 254 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 255 size_t bytes) 256 { 257 struct tb_switch *sw = priv; 258 int ret; 259 260 pm_runtime_get_sync(&sw->dev); 261 262 if (!mutex_trylock(&sw->tb->lock)) { 263 ret = restart_syscall(); 264 goto out; 265 } 266 267 ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); 268 mutex_unlock(&sw->tb->lock); 269 270 out: 271 pm_runtime_mark_last_busy(&sw->dev); 272 pm_runtime_put_autosuspend(&sw->dev); 273 274 return ret; 275 } 276 277 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 278 size_t bytes) 279 { 280 struct tb_switch *sw = priv; 281 int ret = 0; 282 283 if (!mutex_trylock(&sw->tb->lock)) 284 return restart_syscall(); 285 286 /* 287 * Since writing the NVM image might require some special steps, 288 * for example when CSS headers are written, we cache the image 289 * locally here and handle the special cases when the user asks 290 * us to authenticate the image. 291 */ 292 if (!sw->nvm->buf) { 293 sw->nvm->buf = vmalloc(NVM_MAX_SIZE); 294 if (!sw->nvm->buf) { 295 ret = -ENOMEM; 296 goto unlock; 297 } 298 } 299 300 sw->nvm->buf_data_size = offset + bytes; 301 memcpy(sw->nvm->buf + offset, val, bytes); 302 303 unlock: 304 mutex_unlock(&sw->tb->lock); 305 306 return ret; 307 } 308 309 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, 310 size_t size, bool active) 311 { 312 struct nvmem_config config; 313 314 memset(&config, 0, sizeof(config)); 315 316 if (active) { 317 config.name = "nvm_active"; 318 config.reg_read = tb_switch_nvm_read; 319 config.read_only = true; 320 } else { 321 config.name = "nvm_non_active"; 322 config.reg_write = tb_switch_nvm_write; 323 config.root_only = true; 324 } 325 326 config.id = id; 327 config.stride = 4; 328 config.word_size = 4; 329 config.size = size; 330 config.dev = &sw->dev; 331 config.owner = THIS_MODULE; 332 config.priv = sw; 333 334 return nvmem_register(&config); 335 } 336 337 static int tb_switch_nvm_add(struct tb_switch *sw) 338 { 339 struct nvmem_device *nvm_dev; 340 struct tb_switch_nvm *nvm; 341 u32 val; 342 int ret; 343 344 if (!sw->dma_port) 345 return 0; 346 347 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 348 if (!nvm) 349 return -ENOMEM; 350 351 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); 352 353 /* 354 * If the switch is in safe-mode the only accessible portion of 355 * the NVM is the non-active one where userspace is expected to 356 * write new functional NVM. 357 */ 358 if (!sw->safe_mode) { 359 u32 nvm_size, hdr_size; 360 361 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, 362 sizeof(val)); 363 if (ret) 364 goto err_ida; 365 366 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 367 nvm_size = (SZ_1M << (val & 7)) / 8; 368 nvm_size = (nvm_size - hdr_size) / 2; 369 370 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, 371 sizeof(val)); 372 if (ret) 373 goto err_ida; 374 375 nvm->major = val >> 16; 376 nvm->minor = val >> 8; 377 378 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); 379 if (IS_ERR(nvm_dev)) { 380 ret = PTR_ERR(nvm_dev); 381 goto err_ida; 382 } 383 nvm->active = nvm_dev; 384 } 385 386 if (!sw->no_nvm_upgrade) { 387 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); 388 if (IS_ERR(nvm_dev)) { 389 ret = PTR_ERR(nvm_dev); 390 goto err_nvm_active; 391 } 392 nvm->non_active = nvm_dev; 393 } 394 395 sw->nvm = nvm; 396 return 0; 397 398 err_nvm_active: 399 if (nvm->active) 400 nvmem_unregister(nvm->active); 401 err_ida: 402 ida_simple_remove(&nvm_ida, nvm->id); 403 kfree(nvm); 404 405 return ret; 406 } 407 408 static void tb_switch_nvm_remove(struct tb_switch *sw) 409 { 410 struct tb_switch_nvm *nvm; 411 412 nvm = sw->nvm; 413 sw->nvm = NULL; 414 415 if (!nvm) 416 return; 417 418 /* Remove authentication status in case the switch is unplugged */ 419 if (!nvm->authenticating) 420 nvm_clear_auth_status(sw); 421 422 if (nvm->non_active) 423 nvmem_unregister(nvm->non_active); 424 if (nvm->active) 425 nvmem_unregister(nvm->active); 426 ida_simple_remove(&nvm_ida, nvm->id); 427 vfree(nvm->buf); 428 kfree(nvm); 429 } 430 431 /* port utility functions */ 432 433 static const char *tb_port_type(struct tb_regs_port_header *port) 434 { 435 switch (port->type >> 16) { 436 case 0: 437 switch ((u8) port->type) { 438 case 0: 439 return "Inactive"; 440 case 1: 441 return "Port"; 442 case 2: 443 return "NHI"; 444 default: 445 return "unknown"; 446 } 447 case 0x2: 448 return "Ethernet"; 449 case 0x8: 450 return "SATA"; 451 case 0xe: 452 return "DP/HDMI"; 453 case 0x10: 454 return "PCIe"; 455 case 0x20: 456 return "USB"; 457 default: 458 return "unknown"; 459 } 460 } 461 462 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 463 { 464 tb_dbg(tb, 465 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 466 port->port_number, port->vendor_id, port->device_id, 467 port->revision, port->thunderbolt_version, tb_port_type(port), 468 port->type); 469 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 470 port->max_in_hop_id, port->max_out_hop_id); 471 tb_dbg(tb, " Max counters: %d\n", port->max_counters); 472 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); 473 } 474 475 /** 476 * tb_port_state() - get connectedness state of a port 477 * 478 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 479 * 480 * Return: Returns an enum tb_port_state on success or an error code on failure. 481 */ 482 static int tb_port_state(struct tb_port *port) 483 { 484 struct tb_cap_phy phy; 485 int res; 486 if (port->cap_phy == 0) { 487 tb_port_WARN(port, "does not have a PHY\n"); 488 return -EINVAL; 489 } 490 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 491 if (res) 492 return res; 493 return phy.state; 494 } 495 496 /** 497 * tb_wait_for_port() - wait for a port to become ready 498 * 499 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 500 * wait_if_unplugged is set then we also wait if the port is in state 501 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 502 * switch resume). Otherwise we only wait if a device is registered but the link 503 * has not yet been established. 504 * 505 * Return: Returns an error code on failure. Returns 0 if the port is not 506 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 507 * if the port is connected and in state TB_PORT_UP. 508 */ 509 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 510 { 511 int retries = 10; 512 int state; 513 if (!port->cap_phy) { 514 tb_port_WARN(port, "does not have PHY\n"); 515 return -EINVAL; 516 } 517 if (tb_is_upstream_port(port)) { 518 tb_port_WARN(port, "is the upstream port\n"); 519 return -EINVAL; 520 } 521 522 while (retries--) { 523 state = tb_port_state(port); 524 if (state < 0) 525 return state; 526 if (state == TB_PORT_DISABLED) { 527 tb_port_dbg(port, "is disabled (state: 0)\n"); 528 return 0; 529 } 530 if (state == TB_PORT_UNPLUGGED) { 531 if (wait_if_unplugged) { 532 /* used during resume */ 533 tb_port_dbg(port, 534 "is unplugged (state: 7), retrying...\n"); 535 msleep(100); 536 continue; 537 } 538 tb_port_dbg(port, "is unplugged (state: 7)\n"); 539 return 0; 540 } 541 if (state == TB_PORT_UP) { 542 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 543 return 1; 544 } 545 546 /* 547 * After plug-in the state is TB_PORT_CONNECTING. Give it some 548 * time. 549 */ 550 tb_port_dbg(port, 551 "is connected, link is not up (state: %d), retrying...\n", 552 state); 553 msleep(100); 554 } 555 tb_port_warn(port, 556 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 557 return 0; 558 } 559 560 /** 561 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 562 * 563 * Change the number of NFC credits allocated to @port by @credits. To remove 564 * NFC credits pass a negative amount of credits. 565 * 566 * Return: Returns 0 on success or an error code on failure. 567 */ 568 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 569 { 570 u32 nfc_credits; 571 572 if (credits == 0 || port->sw->is_unplugged) 573 return 0; 574 575 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 576 nfc_credits += credits; 577 578 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 579 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 580 581 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 582 port->config.nfc_credits |= nfc_credits; 583 584 return tb_port_write(port, &port->config.nfc_credits, 585 TB_CFG_PORT, ADP_CS_4, 1); 586 } 587 588 /** 589 * tb_port_set_initial_credits() - Set initial port link credits allocated 590 * @port: Port to set the initial credits 591 * @credits: Number of credits to to allocate 592 * 593 * Set initial credits value to be used for ingress shared buffering. 594 */ 595 int tb_port_set_initial_credits(struct tb_port *port, u32 credits) 596 { 597 u32 data; 598 int ret; 599 600 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 601 if (ret) 602 return ret; 603 604 data &= ~ADP_CS_5_LCA_MASK; 605 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; 606 607 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 608 } 609 610 /** 611 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 612 * 613 * Return: Returns 0 on success or an error code on failure. 614 */ 615 int tb_port_clear_counter(struct tb_port *port, int counter) 616 { 617 u32 zero[3] = { 0, 0, 0 }; 618 tb_port_dbg(port, "clearing counter %d\n", counter); 619 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 620 } 621 622 /** 623 * tb_init_port() - initialize a port 624 * 625 * This is a helper method for tb_switch_alloc. Does not check or initialize 626 * any downstream switches. 627 * 628 * Return: Returns 0 on success or an error code on failure. 629 */ 630 static int tb_init_port(struct tb_port *port) 631 { 632 int res; 633 int cap; 634 635 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 636 if (res) { 637 if (res == -ENODEV) { 638 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 639 port->port); 640 return 0; 641 } 642 return res; 643 } 644 645 /* Port 0 is the switch itself and has no PHY. */ 646 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 647 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 648 649 if (cap > 0) 650 port->cap_phy = cap; 651 else 652 tb_port_WARN(port, "non switch port without a PHY\n"); 653 } else if (port->port != 0) { 654 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 655 if (cap > 0) 656 port->cap_adap = cap; 657 } 658 659 tb_dump_port(port->sw->tb, &port->config); 660 661 /* Control port does not need HopID allocation */ 662 if (port->port) { 663 ida_init(&port->in_hopids); 664 ida_init(&port->out_hopids); 665 } 666 667 INIT_LIST_HEAD(&port->list); 668 return 0; 669 670 } 671 672 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 673 int max_hopid) 674 { 675 int port_max_hopid; 676 struct ida *ida; 677 678 if (in) { 679 port_max_hopid = port->config.max_in_hop_id; 680 ida = &port->in_hopids; 681 } else { 682 port_max_hopid = port->config.max_out_hop_id; 683 ida = &port->out_hopids; 684 } 685 686 /* HopIDs 0-7 are reserved */ 687 if (min_hopid < TB_PATH_MIN_HOPID) 688 min_hopid = TB_PATH_MIN_HOPID; 689 690 if (max_hopid < 0 || max_hopid > port_max_hopid) 691 max_hopid = port_max_hopid; 692 693 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 694 } 695 696 /** 697 * tb_port_alloc_in_hopid() - Allocate input HopID from port 698 * @port: Port to allocate HopID for 699 * @min_hopid: Minimum acceptable input HopID 700 * @max_hopid: Maximum acceptable input HopID 701 * 702 * Return: HopID between @min_hopid and @max_hopid or negative errno in 703 * case of error. 704 */ 705 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 706 { 707 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 708 } 709 710 /** 711 * tb_port_alloc_out_hopid() - Allocate output HopID from port 712 * @port: Port to allocate HopID for 713 * @min_hopid: Minimum acceptable output HopID 714 * @max_hopid: Maximum acceptable output HopID 715 * 716 * Return: HopID between @min_hopid and @max_hopid or negative errno in 717 * case of error. 718 */ 719 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 720 { 721 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 722 } 723 724 /** 725 * tb_port_release_in_hopid() - Release allocated input HopID from port 726 * @port: Port whose HopID to release 727 * @hopid: HopID to release 728 */ 729 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 730 { 731 ida_simple_remove(&port->in_hopids, hopid); 732 } 733 734 /** 735 * tb_port_release_out_hopid() - Release allocated output HopID from port 736 * @port: Port whose HopID to release 737 * @hopid: HopID to release 738 */ 739 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 740 { 741 ida_simple_remove(&port->out_hopids, hopid); 742 } 743 744 /** 745 * tb_next_port_on_path() - Return next port for given port on a path 746 * @start: Start port of the walk 747 * @end: End port of the walk 748 * @prev: Previous port (%NULL if this is the first) 749 * 750 * This function can be used to walk from one port to another if they 751 * are connected through zero or more switches. If the @prev is dual 752 * link port, the function follows that link and returns another end on 753 * that same link. 754 * 755 * If the @end port has been reached, return %NULL. 756 * 757 * Domain tb->lock must be held when this function is called. 758 */ 759 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 760 struct tb_port *prev) 761 { 762 struct tb_port *next; 763 764 if (!prev) 765 return start; 766 767 if (prev->sw == end->sw) { 768 if (prev == end) 769 return NULL; 770 return end; 771 } 772 773 if (start->sw->config.depth < end->sw->config.depth) { 774 if (prev->remote && 775 prev->remote->sw->config.depth > prev->sw->config.depth) 776 next = prev->remote; 777 else 778 next = tb_port_at(tb_route(end->sw), prev->sw); 779 } else { 780 if (tb_is_upstream_port(prev)) { 781 next = prev->remote; 782 } else { 783 next = tb_upstream_port(prev->sw); 784 /* 785 * Keep the same link if prev and next are both 786 * dual link ports. 787 */ 788 if (next->dual_link_port && 789 next->link_nr != prev->link_nr) { 790 next = next->dual_link_port; 791 } 792 } 793 } 794 795 return next; 796 } 797 798 static int tb_port_get_link_speed(struct tb_port *port) 799 { 800 u32 val, speed; 801 int ret; 802 803 if (!port->cap_phy) 804 return -EINVAL; 805 806 ret = tb_port_read(port, &val, TB_CFG_PORT, 807 port->cap_phy + LANE_ADP_CS_1, 1); 808 if (ret) 809 return ret; 810 811 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 812 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 813 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 814 } 815 816 static int tb_port_get_link_width(struct tb_port *port) 817 { 818 u32 val; 819 int ret; 820 821 if (!port->cap_phy) 822 return -EINVAL; 823 824 ret = tb_port_read(port, &val, TB_CFG_PORT, 825 port->cap_phy + LANE_ADP_CS_1, 1); 826 if (ret) 827 return ret; 828 829 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 830 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 831 } 832 833 static bool tb_port_is_width_supported(struct tb_port *port, int width) 834 { 835 u32 phy, widths; 836 int ret; 837 838 if (!port->cap_phy) 839 return false; 840 841 ret = tb_port_read(port, &phy, TB_CFG_PORT, 842 port->cap_phy + LANE_ADP_CS_0, 1); 843 if (ret) 844 return ret; 845 846 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 847 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 848 849 return !!(widths & width); 850 } 851 852 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 853 { 854 u32 val; 855 int ret; 856 857 if (!port->cap_phy) 858 return -EINVAL; 859 860 ret = tb_port_read(port, &val, TB_CFG_PORT, 861 port->cap_phy + LANE_ADP_CS_1, 1); 862 if (ret) 863 return ret; 864 865 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 866 switch (width) { 867 case 1: 868 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 869 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 870 break; 871 case 2: 872 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 873 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 874 break; 875 default: 876 return -EINVAL; 877 } 878 879 val |= LANE_ADP_CS_1_LB; 880 881 return tb_port_write(port, &val, TB_CFG_PORT, 882 port->cap_phy + LANE_ADP_CS_1, 1); 883 } 884 885 static int tb_port_lane_bonding_enable(struct tb_port *port) 886 { 887 int ret; 888 889 /* 890 * Enable lane bonding for both links if not already enabled by 891 * for example the boot firmware. 892 */ 893 ret = tb_port_get_link_width(port); 894 if (ret == 1) { 895 ret = tb_port_set_link_width(port, 2); 896 if (ret) 897 return ret; 898 } 899 900 ret = tb_port_get_link_width(port->dual_link_port); 901 if (ret == 1) { 902 ret = tb_port_set_link_width(port->dual_link_port, 2); 903 if (ret) { 904 tb_port_set_link_width(port, 1); 905 return ret; 906 } 907 } 908 909 port->bonded = true; 910 port->dual_link_port->bonded = true; 911 912 return 0; 913 } 914 915 static void tb_port_lane_bonding_disable(struct tb_port *port) 916 { 917 port->dual_link_port->bonded = false; 918 port->bonded = false; 919 920 tb_port_set_link_width(port->dual_link_port, 1); 921 tb_port_set_link_width(port, 1); 922 } 923 924 /** 925 * tb_port_is_enabled() - Is the adapter port enabled 926 * @port: Port to check 927 */ 928 bool tb_port_is_enabled(struct tb_port *port) 929 { 930 switch (port->config.type) { 931 case TB_TYPE_PCIE_UP: 932 case TB_TYPE_PCIE_DOWN: 933 return tb_pci_port_is_enabled(port); 934 935 case TB_TYPE_DP_HDMI_IN: 936 case TB_TYPE_DP_HDMI_OUT: 937 return tb_dp_port_is_enabled(port); 938 939 default: 940 return false; 941 } 942 } 943 944 /** 945 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 946 * @port: PCIe port to check 947 */ 948 bool tb_pci_port_is_enabled(struct tb_port *port) 949 { 950 u32 data; 951 952 if (tb_port_read(port, &data, TB_CFG_PORT, 953 port->cap_adap + ADP_PCIE_CS_0, 1)) 954 return false; 955 956 return !!(data & ADP_PCIE_CS_0_PE); 957 } 958 959 /** 960 * tb_pci_port_enable() - Enable PCIe adapter port 961 * @port: PCIe port to enable 962 * @enable: Enable/disable the PCIe adapter 963 */ 964 int tb_pci_port_enable(struct tb_port *port, bool enable) 965 { 966 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 967 if (!port->cap_adap) 968 return -ENXIO; 969 return tb_port_write(port, &word, TB_CFG_PORT, 970 port->cap_adap + ADP_PCIE_CS_0, 1); 971 } 972 973 /** 974 * tb_dp_port_hpd_is_active() - Is HPD already active 975 * @port: DP out port to check 976 * 977 * Checks if the DP OUT adapter port has HDP bit already set. 978 */ 979 int tb_dp_port_hpd_is_active(struct tb_port *port) 980 { 981 u32 data; 982 int ret; 983 984 ret = tb_port_read(port, &data, TB_CFG_PORT, 985 port->cap_adap + ADP_DP_CS_2, 1); 986 if (ret) 987 return ret; 988 989 return !!(data & ADP_DP_CS_2_HDP); 990 } 991 992 /** 993 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 994 * @port: Port to clear HPD 995 * 996 * If the DP IN port has HDP set, this function can be used to clear it. 997 */ 998 int tb_dp_port_hpd_clear(struct tb_port *port) 999 { 1000 u32 data; 1001 int ret; 1002 1003 ret = tb_port_read(port, &data, TB_CFG_PORT, 1004 port->cap_adap + ADP_DP_CS_3, 1); 1005 if (ret) 1006 return ret; 1007 1008 data |= ADP_DP_CS_3_HDPC; 1009 return tb_port_write(port, &data, TB_CFG_PORT, 1010 port->cap_adap + ADP_DP_CS_3, 1); 1011 } 1012 1013 /** 1014 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1015 * @port: DP IN/OUT port to set hops 1016 * @video: Video Hop ID 1017 * @aux_tx: AUX TX Hop ID 1018 * @aux_rx: AUX RX Hop ID 1019 * 1020 * Programs specified Hop IDs for DP IN/OUT port. 1021 */ 1022 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1023 unsigned int aux_tx, unsigned int aux_rx) 1024 { 1025 u32 data[2]; 1026 int ret; 1027 1028 ret = tb_port_read(port, data, TB_CFG_PORT, 1029 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1030 if (ret) 1031 return ret; 1032 1033 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1034 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1035 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1036 1037 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1038 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1039 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1040 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1041 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1042 1043 return tb_port_write(port, data, TB_CFG_PORT, 1044 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1045 } 1046 1047 /** 1048 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1049 * @port: DP adapter port to check 1050 */ 1051 bool tb_dp_port_is_enabled(struct tb_port *port) 1052 { 1053 u32 data[2]; 1054 1055 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1056 ARRAY_SIZE(data))) 1057 return false; 1058 1059 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1060 } 1061 1062 /** 1063 * tb_dp_port_enable() - Enables/disables DP paths of a port 1064 * @port: DP IN/OUT port 1065 * @enable: Enable/disable DP path 1066 * 1067 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1068 * calling this function. 1069 */ 1070 int tb_dp_port_enable(struct tb_port *port, bool enable) 1071 { 1072 u32 data[2]; 1073 int ret; 1074 1075 ret = tb_port_read(port, data, TB_CFG_PORT, 1076 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1077 if (ret) 1078 return ret; 1079 1080 if (enable) 1081 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1082 else 1083 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1084 1085 return tb_port_write(port, data, TB_CFG_PORT, 1086 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1087 } 1088 1089 /* switch utility functions */ 1090 1091 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) 1092 { 1093 tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1094 sw->vendor_id, sw->device_id, sw->revision, 1095 sw->thunderbolt_version); 1096 tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number); 1097 tb_dbg(tb, " Config:\n"); 1098 tb_dbg(tb, 1099 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1100 sw->upstream_port_number, sw->depth, 1101 (((u64) sw->route_hi) << 32) | sw->route_lo, 1102 sw->enabled, sw->plug_events_delay); 1103 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1104 sw->__unknown1, sw->__unknown4); 1105 } 1106 1107 /** 1108 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET 1109 * 1110 * Return: Returns 0 on success or an error code on failure. 1111 */ 1112 int tb_switch_reset(struct tb *tb, u64 route) 1113 { 1114 struct tb_cfg_result res; 1115 struct tb_regs_switch_header header = { 1116 header.route_hi = route >> 32, 1117 header.route_lo = route, 1118 header.enabled = true, 1119 }; 1120 tb_dbg(tb, "resetting switch at %llx\n", route); 1121 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, 1122 0, 2, 2, 2); 1123 if (res.err) 1124 return res.err; 1125 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); 1126 if (res.err > 0) 1127 return -EIO; 1128 return res.err; 1129 } 1130 1131 /** 1132 * tb_plug_events_active() - enable/disable plug events on a switch 1133 * 1134 * Also configures a sane plug_events_delay of 255ms. 1135 * 1136 * Return: Returns 0 on success or an error code on failure. 1137 */ 1138 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1139 { 1140 u32 data; 1141 int res; 1142 1143 if (tb_switch_is_icm(sw)) 1144 return 0; 1145 1146 sw->config.plug_events_delay = 0xff; 1147 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1148 if (res) 1149 return res; 1150 1151 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1152 if (res) 1153 return res; 1154 1155 if (active) { 1156 data = data & 0xFFFFFF83; 1157 switch (sw->config.device_id) { 1158 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1159 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1160 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1161 break; 1162 default: 1163 data |= 4; 1164 } 1165 } else { 1166 data = data | 0x7c; 1167 } 1168 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1169 sw->cap_plug_events + 1, 1); 1170 } 1171 1172 static ssize_t authorized_show(struct device *dev, 1173 struct device_attribute *attr, 1174 char *buf) 1175 { 1176 struct tb_switch *sw = tb_to_switch(dev); 1177 1178 return sprintf(buf, "%u\n", sw->authorized); 1179 } 1180 1181 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1182 { 1183 int ret = -EINVAL; 1184 1185 if (!mutex_trylock(&sw->tb->lock)) 1186 return restart_syscall(); 1187 1188 if (sw->authorized) 1189 goto unlock; 1190 1191 switch (val) { 1192 /* Approve switch */ 1193 case 1: 1194 if (sw->key) 1195 ret = tb_domain_approve_switch_key(sw->tb, sw); 1196 else 1197 ret = tb_domain_approve_switch(sw->tb, sw); 1198 break; 1199 1200 /* Challenge switch */ 1201 case 2: 1202 if (sw->key) 1203 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1204 break; 1205 1206 default: 1207 break; 1208 } 1209 1210 if (!ret) { 1211 sw->authorized = val; 1212 /* Notify status change to the userspace */ 1213 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1214 } 1215 1216 unlock: 1217 mutex_unlock(&sw->tb->lock); 1218 return ret; 1219 } 1220 1221 static ssize_t authorized_store(struct device *dev, 1222 struct device_attribute *attr, 1223 const char *buf, size_t count) 1224 { 1225 struct tb_switch *sw = tb_to_switch(dev); 1226 unsigned int val; 1227 ssize_t ret; 1228 1229 ret = kstrtouint(buf, 0, &val); 1230 if (ret) 1231 return ret; 1232 if (val > 2) 1233 return -EINVAL; 1234 1235 pm_runtime_get_sync(&sw->dev); 1236 ret = tb_switch_set_authorized(sw, val); 1237 pm_runtime_mark_last_busy(&sw->dev); 1238 pm_runtime_put_autosuspend(&sw->dev); 1239 1240 return ret ? ret : count; 1241 } 1242 static DEVICE_ATTR_RW(authorized); 1243 1244 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1245 char *buf) 1246 { 1247 struct tb_switch *sw = tb_to_switch(dev); 1248 1249 return sprintf(buf, "%u\n", sw->boot); 1250 } 1251 static DEVICE_ATTR_RO(boot); 1252 1253 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1254 char *buf) 1255 { 1256 struct tb_switch *sw = tb_to_switch(dev); 1257 1258 return sprintf(buf, "%#x\n", sw->device); 1259 } 1260 static DEVICE_ATTR_RO(device); 1261 1262 static ssize_t 1263 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1264 { 1265 struct tb_switch *sw = tb_to_switch(dev); 1266 1267 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1268 } 1269 static DEVICE_ATTR_RO(device_name); 1270 1271 static ssize_t 1272 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1273 { 1274 struct tb_switch *sw = tb_to_switch(dev); 1275 1276 return sprintf(buf, "%u\n", sw->generation); 1277 } 1278 static DEVICE_ATTR_RO(generation); 1279 1280 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1281 char *buf) 1282 { 1283 struct tb_switch *sw = tb_to_switch(dev); 1284 ssize_t ret; 1285 1286 if (!mutex_trylock(&sw->tb->lock)) 1287 return restart_syscall(); 1288 1289 if (sw->key) 1290 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1291 else 1292 ret = sprintf(buf, "\n"); 1293 1294 mutex_unlock(&sw->tb->lock); 1295 return ret; 1296 } 1297 1298 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1299 const char *buf, size_t count) 1300 { 1301 struct tb_switch *sw = tb_to_switch(dev); 1302 u8 key[TB_SWITCH_KEY_SIZE]; 1303 ssize_t ret = count; 1304 bool clear = false; 1305 1306 if (!strcmp(buf, "\n")) 1307 clear = true; 1308 else if (hex2bin(key, buf, sizeof(key))) 1309 return -EINVAL; 1310 1311 if (!mutex_trylock(&sw->tb->lock)) 1312 return restart_syscall(); 1313 1314 if (sw->authorized) { 1315 ret = -EBUSY; 1316 } else { 1317 kfree(sw->key); 1318 if (clear) { 1319 sw->key = NULL; 1320 } else { 1321 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1322 if (!sw->key) 1323 ret = -ENOMEM; 1324 } 1325 } 1326 1327 mutex_unlock(&sw->tb->lock); 1328 return ret; 1329 } 1330 static DEVICE_ATTR(key, 0600, key_show, key_store); 1331 1332 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1333 char *buf) 1334 { 1335 struct tb_switch *sw = tb_to_switch(dev); 1336 1337 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1338 } 1339 1340 /* 1341 * Currently all lanes must run at the same speed but we expose here 1342 * both directions to allow possible asymmetric links in the future. 1343 */ 1344 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1345 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1346 1347 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1348 char *buf) 1349 { 1350 struct tb_switch *sw = tb_to_switch(dev); 1351 1352 return sprintf(buf, "%u\n", sw->link_width); 1353 } 1354 1355 /* 1356 * Currently link has same amount of lanes both directions (1 or 2) but 1357 * expose them separately to allow possible asymmetric links in the future. 1358 */ 1359 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1360 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1361 1362 static void nvm_authenticate_start(struct tb_switch *sw) 1363 { 1364 struct pci_dev *root_port; 1365 1366 /* 1367 * During host router NVM upgrade we should not allow root port to 1368 * go into D3cold because some root ports cannot trigger PME 1369 * itself. To be on the safe side keep the root port in D0 during 1370 * the whole upgrade process. 1371 */ 1372 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); 1373 if (root_port) 1374 pm_runtime_get_noresume(&root_port->dev); 1375 } 1376 1377 static void nvm_authenticate_complete(struct tb_switch *sw) 1378 { 1379 struct pci_dev *root_port; 1380 1381 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); 1382 if (root_port) 1383 pm_runtime_put(&root_port->dev); 1384 } 1385 1386 static ssize_t nvm_authenticate_show(struct device *dev, 1387 struct device_attribute *attr, char *buf) 1388 { 1389 struct tb_switch *sw = tb_to_switch(dev); 1390 u32 status; 1391 1392 nvm_get_auth_status(sw, &status); 1393 return sprintf(buf, "%#x\n", status); 1394 } 1395 1396 static ssize_t nvm_authenticate_store(struct device *dev, 1397 struct device_attribute *attr, const char *buf, size_t count) 1398 { 1399 struct tb_switch *sw = tb_to_switch(dev); 1400 bool val; 1401 int ret; 1402 1403 pm_runtime_get_sync(&sw->dev); 1404 1405 if (!mutex_trylock(&sw->tb->lock)) { 1406 ret = restart_syscall(); 1407 goto exit_rpm; 1408 } 1409 1410 /* If NVMem devices are not yet added */ 1411 if (!sw->nvm) { 1412 ret = -EAGAIN; 1413 goto exit_unlock; 1414 } 1415 1416 ret = kstrtobool(buf, &val); 1417 if (ret) 1418 goto exit_unlock; 1419 1420 /* Always clear the authentication status */ 1421 nvm_clear_auth_status(sw); 1422 1423 if (val) { 1424 if (!sw->nvm->buf) { 1425 ret = -EINVAL; 1426 goto exit_unlock; 1427 } 1428 1429 ret = nvm_validate_and_write(sw); 1430 if (ret) 1431 goto exit_unlock; 1432 1433 sw->nvm->authenticating = true; 1434 1435 if (!tb_route(sw)) { 1436 /* 1437 * Keep root port from suspending as long as the 1438 * NVM upgrade process is running. 1439 */ 1440 nvm_authenticate_start(sw); 1441 ret = nvm_authenticate_host(sw); 1442 } else { 1443 ret = nvm_authenticate_device(sw); 1444 } 1445 } 1446 1447 exit_unlock: 1448 mutex_unlock(&sw->tb->lock); 1449 exit_rpm: 1450 pm_runtime_mark_last_busy(&sw->dev); 1451 pm_runtime_put_autosuspend(&sw->dev); 1452 1453 if (ret) 1454 return ret; 1455 return count; 1456 } 1457 static DEVICE_ATTR_RW(nvm_authenticate); 1458 1459 static ssize_t nvm_version_show(struct device *dev, 1460 struct device_attribute *attr, char *buf) 1461 { 1462 struct tb_switch *sw = tb_to_switch(dev); 1463 int ret; 1464 1465 if (!mutex_trylock(&sw->tb->lock)) 1466 return restart_syscall(); 1467 1468 if (sw->safe_mode) 1469 ret = -ENODATA; 1470 else if (!sw->nvm) 1471 ret = -EAGAIN; 1472 else 1473 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1474 1475 mutex_unlock(&sw->tb->lock); 1476 1477 return ret; 1478 } 1479 static DEVICE_ATTR_RO(nvm_version); 1480 1481 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1482 char *buf) 1483 { 1484 struct tb_switch *sw = tb_to_switch(dev); 1485 1486 return sprintf(buf, "%#x\n", sw->vendor); 1487 } 1488 static DEVICE_ATTR_RO(vendor); 1489 1490 static ssize_t 1491 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1492 { 1493 struct tb_switch *sw = tb_to_switch(dev); 1494 1495 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1496 } 1497 static DEVICE_ATTR_RO(vendor_name); 1498 1499 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1500 char *buf) 1501 { 1502 struct tb_switch *sw = tb_to_switch(dev); 1503 1504 return sprintf(buf, "%pUb\n", sw->uuid); 1505 } 1506 static DEVICE_ATTR_RO(unique_id); 1507 1508 static struct attribute *switch_attrs[] = { 1509 &dev_attr_authorized.attr, 1510 &dev_attr_boot.attr, 1511 &dev_attr_device.attr, 1512 &dev_attr_device_name.attr, 1513 &dev_attr_generation.attr, 1514 &dev_attr_key.attr, 1515 &dev_attr_nvm_authenticate.attr, 1516 &dev_attr_nvm_version.attr, 1517 &dev_attr_rx_speed.attr, 1518 &dev_attr_rx_lanes.attr, 1519 &dev_attr_tx_speed.attr, 1520 &dev_attr_tx_lanes.attr, 1521 &dev_attr_vendor.attr, 1522 &dev_attr_vendor_name.attr, 1523 &dev_attr_unique_id.attr, 1524 NULL, 1525 }; 1526 1527 static umode_t switch_attr_is_visible(struct kobject *kobj, 1528 struct attribute *attr, int n) 1529 { 1530 struct device *dev = container_of(kobj, struct device, kobj); 1531 struct tb_switch *sw = tb_to_switch(dev); 1532 1533 if (attr == &dev_attr_device.attr) { 1534 if (!sw->device) 1535 return 0; 1536 } else if (attr == &dev_attr_device_name.attr) { 1537 if (!sw->device_name) 1538 return 0; 1539 } else if (attr == &dev_attr_vendor.attr) { 1540 if (!sw->vendor) 1541 return 0; 1542 } else if (attr == &dev_attr_vendor_name.attr) { 1543 if (!sw->vendor_name) 1544 return 0; 1545 } else if (attr == &dev_attr_key.attr) { 1546 if (tb_route(sw) && 1547 sw->tb->security_level == TB_SECURITY_SECURE && 1548 sw->security_level == TB_SECURITY_SECURE) 1549 return attr->mode; 1550 return 0; 1551 } else if (attr == &dev_attr_rx_speed.attr || 1552 attr == &dev_attr_rx_lanes.attr || 1553 attr == &dev_attr_tx_speed.attr || 1554 attr == &dev_attr_tx_lanes.attr) { 1555 if (tb_route(sw)) 1556 return attr->mode; 1557 return 0; 1558 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1559 if (sw->dma_port && !sw->no_nvm_upgrade) 1560 return attr->mode; 1561 return 0; 1562 } else if (attr == &dev_attr_nvm_version.attr) { 1563 if (sw->dma_port) 1564 return attr->mode; 1565 return 0; 1566 } else if (attr == &dev_attr_boot.attr) { 1567 if (tb_route(sw)) 1568 return attr->mode; 1569 return 0; 1570 } 1571 1572 return sw->safe_mode ? 0 : attr->mode; 1573 } 1574 1575 static struct attribute_group switch_group = { 1576 .is_visible = switch_attr_is_visible, 1577 .attrs = switch_attrs, 1578 }; 1579 1580 static const struct attribute_group *switch_groups[] = { 1581 &switch_group, 1582 NULL, 1583 }; 1584 1585 static void tb_switch_release(struct device *dev) 1586 { 1587 struct tb_switch *sw = tb_to_switch(dev); 1588 struct tb_port *port; 1589 1590 dma_port_free(sw->dma_port); 1591 1592 tb_switch_for_each_port(sw, port) { 1593 if (!port->disabled) { 1594 ida_destroy(&port->in_hopids); 1595 ida_destroy(&port->out_hopids); 1596 } 1597 } 1598 1599 kfree(sw->uuid); 1600 kfree(sw->device_name); 1601 kfree(sw->vendor_name); 1602 kfree(sw->ports); 1603 kfree(sw->drom); 1604 kfree(sw->key); 1605 kfree(sw); 1606 } 1607 1608 /* 1609 * Currently only need to provide the callbacks. Everything else is handled 1610 * in the connection manager. 1611 */ 1612 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1613 { 1614 struct tb_switch *sw = tb_to_switch(dev); 1615 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1616 1617 if (cm_ops->runtime_suspend_switch) 1618 return cm_ops->runtime_suspend_switch(sw); 1619 1620 return 0; 1621 } 1622 1623 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1624 { 1625 struct tb_switch *sw = tb_to_switch(dev); 1626 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1627 1628 if (cm_ops->runtime_resume_switch) 1629 return cm_ops->runtime_resume_switch(sw); 1630 return 0; 1631 } 1632 1633 static const struct dev_pm_ops tb_switch_pm_ops = { 1634 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1635 NULL) 1636 }; 1637 1638 struct device_type tb_switch_type = { 1639 .name = "thunderbolt_device", 1640 .release = tb_switch_release, 1641 .pm = &tb_switch_pm_ops, 1642 }; 1643 1644 static int tb_switch_get_generation(struct tb_switch *sw) 1645 { 1646 switch (sw->config.device_id) { 1647 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1648 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1649 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1650 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1651 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1652 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1653 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1654 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1655 return 1; 1656 1657 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1658 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1659 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1660 return 2; 1661 1662 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1663 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1664 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1665 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1666 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1667 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1668 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1669 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1670 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1671 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1672 return 3; 1673 1674 default: 1675 /* 1676 * For unknown switches assume generation to be 1 to be 1677 * on the safe side. 1678 */ 1679 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1680 sw->config.device_id); 1681 return 1; 1682 } 1683 } 1684 1685 /** 1686 * tb_switch_alloc() - allocate a switch 1687 * @tb: Pointer to the owning domain 1688 * @parent: Parent device for this switch 1689 * @route: Route string for this switch 1690 * 1691 * Allocates and initializes a switch. Will not upload configuration to 1692 * the switch. For that you need to call tb_switch_configure() 1693 * separately. The returned switch should be released by calling 1694 * tb_switch_put(). 1695 * 1696 * Return: Pointer to the allocated switch or ERR_PTR() in case of 1697 * failure. 1698 */ 1699 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1700 u64 route) 1701 { 1702 struct tb_switch *sw; 1703 int upstream_port; 1704 int i, ret, depth; 1705 1706 /* Make sure we do not exceed maximum topology limit */ 1707 depth = tb_route_length(route); 1708 if (depth > TB_SWITCH_MAX_DEPTH) 1709 return ERR_PTR(-EADDRNOTAVAIL); 1710 1711 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1712 if (upstream_port < 0) 1713 return ERR_PTR(upstream_port); 1714 1715 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1716 if (!sw) 1717 return ERR_PTR(-ENOMEM); 1718 1719 sw->tb = tb; 1720 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 1721 if (ret) 1722 goto err_free_sw_ports; 1723 1724 tb_dbg(tb, "current switch config:\n"); 1725 tb_dump_switch(tb, &sw->config); 1726 1727 /* configure switch */ 1728 sw->config.upstream_port_number = upstream_port; 1729 sw->config.depth = depth; 1730 sw->config.route_hi = upper_32_bits(route); 1731 sw->config.route_lo = lower_32_bits(route); 1732 sw->config.enabled = 0; 1733 1734 /* initialize ports */ 1735 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 1736 GFP_KERNEL); 1737 if (!sw->ports) { 1738 ret = -ENOMEM; 1739 goto err_free_sw_ports; 1740 } 1741 1742 for (i = 0; i <= sw->config.max_port_number; i++) { 1743 /* minimum setup for tb_find_cap and tb_drom_read to work */ 1744 sw->ports[i].sw = sw; 1745 sw->ports[i].port = i; 1746 } 1747 1748 sw->generation = tb_switch_get_generation(sw); 1749 1750 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 1751 if (ret < 0) { 1752 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 1753 goto err_free_sw_ports; 1754 } 1755 sw->cap_plug_events = ret; 1756 1757 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 1758 if (ret > 0) 1759 sw->cap_lc = ret; 1760 1761 /* Root switch is always authorized */ 1762 if (!route) 1763 sw->authorized = true; 1764 1765 device_initialize(&sw->dev); 1766 sw->dev.parent = parent; 1767 sw->dev.bus = &tb_bus_type; 1768 sw->dev.type = &tb_switch_type; 1769 sw->dev.groups = switch_groups; 1770 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1771 1772 return sw; 1773 1774 err_free_sw_ports: 1775 kfree(sw->ports); 1776 kfree(sw); 1777 1778 return ERR_PTR(ret); 1779 } 1780 1781 /** 1782 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 1783 * @tb: Pointer to the owning domain 1784 * @parent: Parent device for this switch 1785 * @route: Route string for this switch 1786 * 1787 * This creates a switch in safe mode. This means the switch pretty much 1788 * lacks all capabilities except DMA configuration port before it is 1789 * flashed with a valid NVM firmware. 1790 * 1791 * The returned switch must be released by calling tb_switch_put(). 1792 * 1793 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 1794 */ 1795 struct tb_switch * 1796 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 1797 { 1798 struct tb_switch *sw; 1799 1800 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1801 if (!sw) 1802 return ERR_PTR(-ENOMEM); 1803 1804 sw->tb = tb; 1805 sw->config.depth = tb_route_length(route); 1806 sw->config.route_hi = upper_32_bits(route); 1807 sw->config.route_lo = lower_32_bits(route); 1808 sw->safe_mode = true; 1809 1810 device_initialize(&sw->dev); 1811 sw->dev.parent = parent; 1812 sw->dev.bus = &tb_bus_type; 1813 sw->dev.type = &tb_switch_type; 1814 sw->dev.groups = switch_groups; 1815 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1816 1817 return sw; 1818 } 1819 1820 /** 1821 * tb_switch_configure() - Uploads configuration to the switch 1822 * @sw: Switch to configure 1823 * 1824 * Call this function before the switch is added to the system. It will 1825 * upload configuration to the switch and makes it available for the 1826 * connection manager to use. 1827 * 1828 * Return: %0 in case of success and negative errno in case of failure 1829 */ 1830 int tb_switch_configure(struct tb_switch *sw) 1831 { 1832 struct tb *tb = sw->tb; 1833 u64 route; 1834 int ret; 1835 1836 route = tb_route(sw); 1837 tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n", 1838 route, tb_route_length(route), sw->config.upstream_port_number); 1839 1840 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 1841 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 1842 sw->config.vendor_id); 1843 1844 sw->config.enabled = 1; 1845 1846 /* upload configuration */ 1847 ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); 1848 if (ret) 1849 return ret; 1850 1851 ret = tb_lc_configure_link(sw); 1852 if (ret) 1853 return ret; 1854 1855 return tb_plug_events_active(sw, true); 1856 } 1857 1858 static int tb_switch_set_uuid(struct tb_switch *sw) 1859 { 1860 u32 uuid[4]; 1861 int ret; 1862 1863 if (sw->uuid) 1864 return 0; 1865 1866 /* 1867 * The newer controllers include fused UUID as part of link 1868 * controller specific registers 1869 */ 1870 ret = tb_lc_read_uuid(sw, uuid); 1871 if (ret) { 1872 /* 1873 * ICM generates UUID based on UID and fills the upper 1874 * two words with ones. This is not strictly following 1875 * UUID format but we want to be compatible with it so 1876 * we do the same here. 1877 */ 1878 uuid[0] = sw->uid & 0xffffffff; 1879 uuid[1] = (sw->uid >> 32) & 0xffffffff; 1880 uuid[2] = 0xffffffff; 1881 uuid[3] = 0xffffffff; 1882 } 1883 1884 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 1885 if (!sw->uuid) 1886 return -ENOMEM; 1887 return 0; 1888 } 1889 1890 static int tb_switch_add_dma_port(struct tb_switch *sw) 1891 { 1892 u32 status; 1893 int ret; 1894 1895 switch (sw->generation) { 1896 case 2: 1897 /* Only root switch can be upgraded */ 1898 if (tb_route(sw)) 1899 return 0; 1900 1901 /* fallthrough */ 1902 case 3: 1903 ret = tb_switch_set_uuid(sw); 1904 if (ret) 1905 return ret; 1906 break; 1907 1908 default: 1909 /* 1910 * DMA port is the only thing available when the switch 1911 * is in safe mode. 1912 */ 1913 if (!sw->safe_mode) 1914 return 0; 1915 break; 1916 } 1917 1918 /* Root switch DMA port requires running firmware */ 1919 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 1920 return 0; 1921 1922 sw->dma_port = dma_port_alloc(sw); 1923 if (!sw->dma_port) 1924 return 0; 1925 1926 if (sw->no_nvm_upgrade) 1927 return 0; 1928 1929 /* 1930 * If there is status already set then authentication failed 1931 * when the dma_port_flash_update_auth() returned. Power cycling 1932 * is not needed (it was done already) so only thing we do here 1933 * is to unblock runtime PM of the root port. 1934 */ 1935 nvm_get_auth_status(sw, &status); 1936 if (status) { 1937 if (!tb_route(sw)) 1938 nvm_authenticate_complete(sw); 1939 return 0; 1940 } 1941 1942 /* 1943 * Check status of the previous flash authentication. If there 1944 * is one we need to power cycle the switch in any case to make 1945 * it functional again. 1946 */ 1947 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 1948 if (ret <= 0) 1949 return ret; 1950 1951 /* Now we can allow root port to suspend again */ 1952 if (!tb_route(sw)) 1953 nvm_authenticate_complete(sw); 1954 1955 if (status) { 1956 tb_sw_info(sw, "switch flash authentication failed\n"); 1957 nvm_set_auth_status(sw, status); 1958 } 1959 1960 tb_sw_info(sw, "power cycling the switch now\n"); 1961 dma_port_power_cycle(sw->dma_port); 1962 1963 /* 1964 * We return error here which causes the switch adding failure. 1965 * It should appear back after power cycle is complete. 1966 */ 1967 return -ESHUTDOWN; 1968 } 1969 1970 static void tb_switch_default_link_ports(struct tb_switch *sw) 1971 { 1972 int i; 1973 1974 for (i = 1; i <= sw->config.max_port_number; i += 2) { 1975 struct tb_port *port = &sw->ports[i]; 1976 struct tb_port *subordinate; 1977 1978 if (!tb_port_is_null(port)) 1979 continue; 1980 1981 /* Check for the subordinate port */ 1982 if (i == sw->config.max_port_number || 1983 !tb_port_is_null(&sw->ports[i + 1])) 1984 continue; 1985 1986 /* Link them if not already done so (by DROM) */ 1987 subordinate = &sw->ports[i + 1]; 1988 if (!port->dual_link_port && !subordinate->dual_link_port) { 1989 port->link_nr = 0; 1990 port->dual_link_port = subordinate; 1991 subordinate->link_nr = 1; 1992 subordinate->dual_link_port = port; 1993 1994 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 1995 port->port, subordinate->port); 1996 } 1997 } 1998 } 1999 2000 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2001 { 2002 const struct tb_port *up = tb_upstream_port(sw); 2003 2004 if (!up->dual_link_port || !up->dual_link_port->remote) 2005 return false; 2006 2007 return tb_lc_lane_bonding_possible(sw); 2008 } 2009 2010 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2011 { 2012 struct tb_port *up; 2013 bool change = false; 2014 int ret; 2015 2016 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2017 return 0; 2018 2019 up = tb_upstream_port(sw); 2020 2021 ret = tb_port_get_link_speed(up); 2022 if (ret < 0) 2023 return ret; 2024 if (sw->link_speed != ret) 2025 change = true; 2026 sw->link_speed = ret; 2027 2028 ret = tb_port_get_link_width(up); 2029 if (ret < 0) 2030 return ret; 2031 if (sw->link_width != ret) 2032 change = true; 2033 sw->link_width = ret; 2034 2035 /* Notify userspace that there is possible link attribute change */ 2036 if (device_is_registered(&sw->dev) && change) 2037 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2038 2039 return 0; 2040 } 2041 2042 /** 2043 * tb_switch_lane_bonding_enable() - Enable lane bonding 2044 * @sw: Switch to enable lane bonding 2045 * 2046 * Connection manager can call this function to enable lane bonding of a 2047 * switch. If conditions are correct and both switches support the feature, 2048 * lanes are bonded. It is safe to call this to any switch. 2049 */ 2050 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2051 { 2052 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2053 struct tb_port *up, *down; 2054 u64 route = tb_route(sw); 2055 int ret; 2056 2057 if (!route) 2058 return 0; 2059 2060 if (!tb_switch_lane_bonding_possible(sw)) 2061 return 0; 2062 2063 up = tb_upstream_port(sw); 2064 down = tb_port_at(route, parent); 2065 2066 if (!tb_port_is_width_supported(up, 2) || 2067 !tb_port_is_width_supported(down, 2)) 2068 return 0; 2069 2070 ret = tb_port_lane_bonding_enable(up); 2071 if (ret) { 2072 tb_port_warn(up, "failed to enable lane bonding\n"); 2073 return ret; 2074 } 2075 2076 ret = tb_port_lane_bonding_enable(down); 2077 if (ret) { 2078 tb_port_warn(down, "failed to enable lane bonding\n"); 2079 tb_port_lane_bonding_disable(up); 2080 return ret; 2081 } 2082 2083 tb_switch_update_link_attributes(sw); 2084 2085 tb_sw_dbg(sw, "lane bonding enabled\n"); 2086 return ret; 2087 } 2088 2089 /** 2090 * tb_switch_lane_bonding_disable() - Disable lane bonding 2091 * @sw: Switch whose lane bonding to disable 2092 * 2093 * Disables lane bonding between @sw and parent. This can be called even 2094 * if lanes were not bonded originally. 2095 */ 2096 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2097 { 2098 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2099 struct tb_port *up, *down; 2100 2101 if (!tb_route(sw)) 2102 return; 2103 2104 up = tb_upstream_port(sw); 2105 if (!up->bonded) 2106 return; 2107 2108 down = tb_port_at(tb_route(sw), parent); 2109 2110 tb_port_lane_bonding_disable(up); 2111 tb_port_lane_bonding_disable(down); 2112 2113 tb_switch_update_link_attributes(sw); 2114 tb_sw_dbg(sw, "lane bonding disabled\n"); 2115 } 2116 2117 /** 2118 * tb_switch_add() - Add a switch to the domain 2119 * @sw: Switch to add 2120 * 2121 * This is the last step in adding switch to the domain. It will read 2122 * identification information from DROM and initializes ports so that 2123 * they can be used to connect other switches. The switch will be 2124 * exposed to the userspace when this function successfully returns. To 2125 * remove and release the switch, call tb_switch_remove(). 2126 * 2127 * Return: %0 in case of success and negative errno in case of failure 2128 */ 2129 int tb_switch_add(struct tb_switch *sw) 2130 { 2131 int i, ret; 2132 2133 /* 2134 * Initialize DMA control port now before we read DROM. Recent 2135 * host controllers have more complete DROM on NVM that includes 2136 * vendor and model identification strings which we then expose 2137 * to the userspace. NVM can be accessed through DMA 2138 * configuration based mailbox. 2139 */ 2140 ret = tb_switch_add_dma_port(sw); 2141 if (ret) { 2142 dev_err(&sw->dev, "failed to add DMA port\n"); 2143 return ret; 2144 } 2145 2146 if (!sw->safe_mode) { 2147 /* read drom */ 2148 ret = tb_drom_read(sw); 2149 if (ret) { 2150 dev_err(&sw->dev, "reading DROM failed\n"); 2151 return ret; 2152 } 2153 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2154 2155 ret = tb_switch_set_uuid(sw); 2156 if (ret) { 2157 dev_err(&sw->dev, "failed to set UUID\n"); 2158 return ret; 2159 } 2160 2161 for (i = 0; i <= sw->config.max_port_number; i++) { 2162 if (sw->ports[i].disabled) { 2163 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2164 continue; 2165 } 2166 ret = tb_init_port(&sw->ports[i]); 2167 if (ret) { 2168 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2169 return ret; 2170 } 2171 } 2172 2173 tb_switch_default_link_ports(sw); 2174 2175 ret = tb_switch_update_link_attributes(sw); 2176 if (ret) 2177 return ret; 2178 } 2179 2180 ret = device_add(&sw->dev); 2181 if (ret) { 2182 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2183 return ret; 2184 } 2185 2186 if (tb_route(sw)) { 2187 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2188 sw->vendor, sw->device); 2189 if (sw->vendor_name && sw->device_name) 2190 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2191 sw->device_name); 2192 } 2193 2194 ret = tb_switch_nvm_add(sw); 2195 if (ret) { 2196 dev_err(&sw->dev, "failed to add NVM devices\n"); 2197 device_del(&sw->dev); 2198 return ret; 2199 } 2200 2201 pm_runtime_set_active(&sw->dev); 2202 if (sw->rpm) { 2203 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2204 pm_runtime_use_autosuspend(&sw->dev); 2205 pm_runtime_mark_last_busy(&sw->dev); 2206 pm_runtime_enable(&sw->dev); 2207 pm_request_autosuspend(&sw->dev); 2208 } 2209 2210 return 0; 2211 } 2212 2213 /** 2214 * tb_switch_remove() - Remove and release a switch 2215 * @sw: Switch to remove 2216 * 2217 * This will remove the switch from the domain and release it after last 2218 * reference count drops to zero. If there are switches connected below 2219 * this switch, they will be removed as well. 2220 */ 2221 void tb_switch_remove(struct tb_switch *sw) 2222 { 2223 struct tb_port *port; 2224 2225 if (sw->rpm) { 2226 pm_runtime_get_sync(&sw->dev); 2227 pm_runtime_disable(&sw->dev); 2228 } 2229 2230 /* port 0 is the switch itself and never has a remote */ 2231 tb_switch_for_each_port(sw, port) { 2232 if (tb_port_has_remote(port)) { 2233 tb_switch_remove(port->remote->sw); 2234 port->remote = NULL; 2235 } else if (port->xdomain) { 2236 tb_xdomain_remove(port->xdomain); 2237 port->xdomain = NULL; 2238 } 2239 } 2240 2241 if (!sw->is_unplugged) 2242 tb_plug_events_active(sw, false); 2243 tb_lc_unconfigure_link(sw); 2244 2245 tb_switch_nvm_remove(sw); 2246 2247 if (tb_route(sw)) 2248 dev_info(&sw->dev, "device disconnected\n"); 2249 device_unregister(&sw->dev); 2250 } 2251 2252 /** 2253 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2254 */ 2255 void tb_sw_set_unplugged(struct tb_switch *sw) 2256 { 2257 struct tb_port *port; 2258 2259 if (sw == sw->tb->root_switch) { 2260 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2261 return; 2262 } 2263 if (sw->is_unplugged) { 2264 tb_sw_WARN(sw, "is_unplugged already set\n"); 2265 return; 2266 } 2267 sw->is_unplugged = true; 2268 tb_switch_for_each_port(sw, port) { 2269 if (tb_port_has_remote(port)) 2270 tb_sw_set_unplugged(port->remote->sw); 2271 else if (port->xdomain) 2272 port->xdomain->is_unplugged = true; 2273 } 2274 } 2275 2276 int tb_switch_resume(struct tb_switch *sw) 2277 { 2278 struct tb_port *port; 2279 int err; 2280 2281 tb_sw_dbg(sw, "resuming switch\n"); 2282 2283 /* 2284 * Check for UID of the connected switches except for root 2285 * switch which we assume cannot be removed. 2286 */ 2287 if (tb_route(sw)) { 2288 u64 uid; 2289 2290 /* 2291 * Check first that we can still read the switch config 2292 * space. It may be that there is now another domain 2293 * connected. 2294 */ 2295 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2296 if (err < 0) { 2297 tb_sw_info(sw, "switch not present anymore\n"); 2298 return err; 2299 } 2300 2301 err = tb_drom_read_uid_only(sw, &uid); 2302 if (err) { 2303 tb_sw_warn(sw, "uid read failed\n"); 2304 return err; 2305 } 2306 if (sw->uid != uid) { 2307 tb_sw_info(sw, 2308 "changed while suspended (uid %#llx -> %#llx)\n", 2309 sw->uid, uid); 2310 return -ENODEV; 2311 } 2312 } 2313 2314 /* upload configuration */ 2315 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); 2316 if (err) 2317 return err; 2318 2319 err = tb_lc_configure_link(sw); 2320 if (err) 2321 return err; 2322 2323 err = tb_plug_events_active(sw, true); 2324 if (err) 2325 return err; 2326 2327 /* check for surviving downstream switches */ 2328 tb_switch_for_each_port(sw, port) { 2329 if (!tb_port_has_remote(port) && !port->xdomain) 2330 continue; 2331 2332 if (tb_wait_for_port(port, true) <= 0) { 2333 tb_port_warn(port, 2334 "lost during suspend, disconnecting\n"); 2335 if (tb_port_has_remote(port)) 2336 tb_sw_set_unplugged(port->remote->sw); 2337 else if (port->xdomain) 2338 port->xdomain->is_unplugged = true; 2339 } else if (tb_port_has_remote(port)) { 2340 if (tb_switch_resume(port->remote->sw)) { 2341 tb_port_warn(port, 2342 "lost during suspend, disconnecting\n"); 2343 tb_sw_set_unplugged(port->remote->sw); 2344 } 2345 } 2346 } 2347 return 0; 2348 } 2349 2350 void tb_switch_suspend(struct tb_switch *sw) 2351 { 2352 struct tb_port *port; 2353 int err; 2354 2355 err = tb_plug_events_active(sw, false); 2356 if (err) 2357 return; 2358 2359 tb_switch_for_each_port(sw, port) { 2360 if (tb_port_has_remote(port)) 2361 tb_switch_suspend(port->remote->sw); 2362 } 2363 2364 tb_lc_set_sleep(sw); 2365 } 2366 2367 /** 2368 * tb_switch_query_dp_resource() - Query availability of DP resource 2369 * @sw: Switch whose DP resource is queried 2370 * @in: DP IN port 2371 * 2372 * Queries availability of DP resource for DP tunneling using switch 2373 * specific means. Returns %true if resource is available. 2374 */ 2375 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 2376 { 2377 return tb_lc_dp_sink_query(sw, in); 2378 } 2379 2380 /** 2381 * tb_switch_alloc_dp_resource() - Allocate available DP resource 2382 * @sw: Switch whose DP resource is allocated 2383 * @in: DP IN port 2384 * 2385 * Allocates DP resource for DP tunneling. The resource must be 2386 * available for this to succeed (see tb_switch_query_dp_resource()). 2387 * Returns %0 in success and negative errno otherwise. 2388 */ 2389 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2390 { 2391 return tb_lc_dp_sink_alloc(sw, in); 2392 } 2393 2394 /** 2395 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 2396 * @sw: Switch whose DP resource is de-allocated 2397 * @in: DP IN port 2398 * 2399 * De-allocates DP resource that was previously allocated for DP 2400 * tunneling. 2401 */ 2402 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2403 { 2404 if (tb_lc_dp_sink_dealloc(sw, in)) { 2405 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 2406 in->port); 2407 } 2408 } 2409 2410 struct tb_sw_lookup { 2411 struct tb *tb; 2412 u8 link; 2413 u8 depth; 2414 const uuid_t *uuid; 2415 u64 route; 2416 }; 2417 2418 static int tb_switch_match(struct device *dev, const void *data) 2419 { 2420 struct tb_switch *sw = tb_to_switch(dev); 2421 const struct tb_sw_lookup *lookup = data; 2422 2423 if (!sw) 2424 return 0; 2425 if (sw->tb != lookup->tb) 2426 return 0; 2427 2428 if (lookup->uuid) 2429 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 2430 2431 if (lookup->route) { 2432 return sw->config.route_lo == lower_32_bits(lookup->route) && 2433 sw->config.route_hi == upper_32_bits(lookup->route); 2434 } 2435 2436 /* Root switch is matched only by depth */ 2437 if (!lookup->depth) 2438 return !sw->depth; 2439 2440 return sw->link == lookup->link && sw->depth == lookup->depth; 2441 } 2442 2443 /** 2444 * tb_switch_find_by_link_depth() - Find switch by link and depth 2445 * @tb: Domain the switch belongs 2446 * @link: Link number the switch is connected 2447 * @depth: Depth of the switch in link 2448 * 2449 * Returned switch has reference count increased so the caller needs to 2450 * call tb_switch_put() when done with the switch. 2451 */ 2452 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 2453 { 2454 struct tb_sw_lookup lookup; 2455 struct device *dev; 2456 2457 memset(&lookup, 0, sizeof(lookup)); 2458 lookup.tb = tb; 2459 lookup.link = link; 2460 lookup.depth = depth; 2461 2462 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2463 if (dev) 2464 return tb_to_switch(dev); 2465 2466 return NULL; 2467 } 2468 2469 /** 2470 * tb_switch_find_by_uuid() - Find switch by UUID 2471 * @tb: Domain the switch belongs 2472 * @uuid: UUID to look for 2473 * 2474 * Returned switch has reference count increased so the caller needs to 2475 * call tb_switch_put() when done with the switch. 2476 */ 2477 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 2478 { 2479 struct tb_sw_lookup lookup; 2480 struct device *dev; 2481 2482 memset(&lookup, 0, sizeof(lookup)); 2483 lookup.tb = tb; 2484 lookup.uuid = uuid; 2485 2486 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2487 if (dev) 2488 return tb_to_switch(dev); 2489 2490 return NULL; 2491 } 2492 2493 /** 2494 * tb_switch_find_by_route() - Find switch by route string 2495 * @tb: Domain the switch belongs 2496 * @route: Route string to look for 2497 * 2498 * Returned switch has reference count increased so the caller needs to 2499 * call tb_switch_put() when done with the switch. 2500 */ 2501 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 2502 { 2503 struct tb_sw_lookup lookup; 2504 struct device *dev; 2505 2506 if (!route) 2507 return tb_switch_get(tb->root_switch); 2508 2509 memset(&lookup, 0, sizeof(lookup)); 2510 lookup.tb = tb; 2511 lookup.route = route; 2512 2513 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2514 if (dev) 2515 return tb_to_switch(dev); 2516 2517 return NULL; 2518 } 2519 2520 void tb_switch_exit(void) 2521 { 2522 ida_destroy(&nvm_ida); 2523 } 2524