1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - bus logic (NHI independent) 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/delay.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/platform_data/x86/apple.h> 14 15 #include "tb.h" 16 #include "tb_regs.h" 17 #include "tunnel.h" 18 19 #define TB_TIMEOUT 100 /* ms */ 20 #define MAX_GROUPS 7 /* max Group_ID is 7 */ 21 22 /** 23 * struct tb_cm - Simple Thunderbolt connection manager 24 * @tunnel_list: List of active tunnels 25 * @dp_resources: List of available DP resources for DP tunneling 26 * @hotplug_active: tb_handle_hotplug will stop progressing plug 27 * events and exit if this is not set (it needs to 28 * acquire the lock one more time). Used to drain wq 29 * after cfg has been paused. 30 * @remove_work: Work used to remove any unplugged routers after 31 * runtime resume 32 * @groups: Bandwidth groups used in this domain. 33 */ 34 struct tb_cm { 35 struct list_head tunnel_list; 36 struct list_head dp_resources; 37 bool hotplug_active; 38 struct delayed_work remove_work; 39 struct tb_bandwidth_group groups[MAX_GROUPS]; 40 }; 41 42 static inline struct tb *tcm_to_tb(struct tb_cm *tcm) 43 { 44 return ((void *)tcm - sizeof(struct tb)); 45 } 46 47 struct tb_hotplug_event { 48 struct work_struct work; 49 struct tb *tb; 50 u64 route; 51 u8 port; 52 bool unplug; 53 }; 54 55 static void tb_init_bandwidth_groups(struct tb_cm *tcm) 56 { 57 int i; 58 59 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { 60 struct tb_bandwidth_group *group = &tcm->groups[i]; 61 62 group->tb = tcm_to_tb(tcm); 63 group->index = i + 1; 64 INIT_LIST_HEAD(&group->ports); 65 } 66 } 67 68 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group, 69 struct tb_port *in) 70 { 71 if (!group || WARN_ON(in->group)) 72 return; 73 74 in->group = group; 75 list_add_tail(&in->group_list, &group->ports); 76 77 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index); 78 } 79 80 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm) 81 { 82 int i; 83 84 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { 85 struct tb_bandwidth_group *group = &tcm->groups[i]; 86 87 if (list_empty(&group->ports)) 88 return group; 89 } 90 91 return NULL; 92 } 93 94 static struct tb_bandwidth_group * 95 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, 96 struct tb_port *out) 97 { 98 struct tb_bandwidth_group *group; 99 struct tb_tunnel *tunnel; 100 101 /* 102 * Find all DP tunnels that go through all the same USB4 links 103 * as this one. Because we always setup tunnels the same way we 104 * can just check for the routers at both ends of the tunnels 105 * and if they are the same we have a match. 106 */ 107 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 108 if (!tb_tunnel_is_dp(tunnel)) 109 continue; 110 111 if (tunnel->src_port->sw == in->sw && 112 tunnel->dst_port->sw == out->sw) { 113 group = tunnel->src_port->group; 114 if (group) { 115 tb_bandwidth_group_attach_port(group, in); 116 return group; 117 } 118 } 119 } 120 121 /* Pick up next available group then */ 122 group = tb_find_free_bandwidth_group(tcm); 123 if (group) 124 tb_bandwidth_group_attach_port(group, in); 125 else 126 tb_port_warn(in, "no available bandwidth groups\n"); 127 128 return group; 129 } 130 131 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, 132 struct tb_port *out) 133 { 134 if (usb4_dp_port_bw_mode_enabled(in)) { 135 int index, i; 136 137 index = usb4_dp_port_group_id(in); 138 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { 139 if (tcm->groups[i].index == index) { 140 tb_bandwidth_group_attach_port(&tcm->groups[i], in); 141 return; 142 } 143 } 144 } 145 146 tb_attach_bandwidth_group(tcm, in, out); 147 } 148 149 static void tb_detach_bandwidth_group(struct tb_port *in) 150 { 151 struct tb_bandwidth_group *group = in->group; 152 153 if (group) { 154 in->group = NULL; 155 list_del_init(&in->group_list); 156 157 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index); 158 } 159 } 160 161 static void tb_handle_hotplug(struct work_struct *work); 162 163 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 164 { 165 struct tb_hotplug_event *ev; 166 167 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 168 if (!ev) 169 return; 170 171 ev->tb = tb; 172 ev->route = route; 173 ev->port = port; 174 ev->unplug = unplug; 175 INIT_WORK(&ev->work, tb_handle_hotplug); 176 queue_work(tb->wq, &ev->work); 177 } 178 179 /* enumeration & hot plug handling */ 180 181 static void tb_add_dp_resources(struct tb_switch *sw) 182 { 183 struct tb_cm *tcm = tb_priv(sw->tb); 184 struct tb_port *port; 185 186 tb_switch_for_each_port(sw, port) { 187 if (!tb_port_is_dpin(port)) 188 continue; 189 190 if (!tb_switch_query_dp_resource(sw, port)) 191 continue; 192 193 list_add_tail(&port->list, &tcm->dp_resources); 194 tb_port_dbg(port, "DP IN resource available\n"); 195 } 196 } 197 198 static void tb_remove_dp_resources(struct tb_switch *sw) 199 { 200 struct tb_cm *tcm = tb_priv(sw->tb); 201 struct tb_port *port, *tmp; 202 203 /* Clear children resources first */ 204 tb_switch_for_each_port(sw, port) { 205 if (tb_port_has_remote(port)) 206 tb_remove_dp_resources(port->remote->sw); 207 } 208 209 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { 210 if (port->sw == sw) { 211 tb_port_dbg(port, "DP OUT resource unavailable\n"); 212 list_del_init(&port->list); 213 } 214 } 215 } 216 217 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) 218 { 219 struct tb_cm *tcm = tb_priv(tb); 220 struct tb_port *p; 221 222 list_for_each_entry(p, &tcm->dp_resources, list) { 223 if (p == port) 224 return; 225 } 226 227 tb_port_dbg(port, "DP %s resource available discovered\n", 228 tb_port_is_dpin(port) ? "IN" : "OUT"); 229 list_add_tail(&port->list, &tcm->dp_resources); 230 } 231 232 static void tb_discover_dp_resources(struct tb *tb) 233 { 234 struct tb_cm *tcm = tb_priv(tb); 235 struct tb_tunnel *tunnel; 236 237 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 238 if (tb_tunnel_is_dp(tunnel)) 239 tb_discover_dp_resource(tb, tunnel->dst_port); 240 } 241 } 242 243 /* Enables CL states up to host router */ 244 static int tb_enable_clx(struct tb_switch *sw) 245 { 246 struct tb_cm *tcm = tb_priv(sw->tb); 247 const struct tb_tunnel *tunnel; 248 int ret; 249 250 /* 251 * Currently only enable CLx for the first link. This is enough 252 * to allow the CPU to save energy at least on Intel hardware 253 * and makes it slightly simpler to implement. We may change 254 * this in the future to cover the whole topology if it turns 255 * out to be beneficial. 256 */ 257 while (sw && sw->config.depth > 1) 258 sw = tb_switch_parent(sw); 259 260 if (!sw) 261 return 0; 262 263 if (sw->config.depth != 1) 264 return 0; 265 266 /* 267 * If we are re-enabling then check if there is an active DMA 268 * tunnel and in that case bail out. 269 */ 270 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 271 if (tb_tunnel_is_dma(tunnel)) { 272 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) 273 return 0; 274 } 275 } 276 277 /* 278 * CL0s and CL1 are enabled and supported together. 279 * Silently ignore CLx enabling in case CLx is not supported. 280 */ 281 ret = tb_switch_clx_enable(sw, TB_CL0S | TB_CL1); 282 return ret == -EOPNOTSUPP ? 0 : ret; 283 } 284 285 /* Disables CL states up to the host router */ 286 static void tb_disable_clx(struct tb_switch *sw) 287 { 288 do { 289 if (tb_switch_clx_disable(sw) < 0) 290 tb_sw_warn(sw, "failed to disable CL states\n"); 291 sw = tb_switch_parent(sw); 292 } while (sw); 293 } 294 295 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) 296 { 297 struct tb_switch *sw; 298 299 sw = tb_to_switch(dev); 300 if (sw) { 301 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, 302 tb_switch_clx_is_enabled(sw, TB_CL1)); 303 if (tb_switch_tmu_enable(sw)) 304 tb_sw_warn(sw, "failed to increase TMU rate\n"); 305 } 306 307 return 0; 308 } 309 310 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) 311 { 312 struct tb_switch *sw; 313 314 if (!tunnel) 315 return; 316 317 /* 318 * Once first DP tunnel is established we change the TMU 319 * accuracy of first depth child routers (and the host router) 320 * to the highest. This is needed for the DP tunneling to work 321 * but also allows CL0s. 322 */ 323 sw = tunnel->tb->root_switch; 324 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); 325 } 326 327 static int tb_enable_tmu(struct tb_switch *sw) 328 { 329 int ret; 330 331 /* 332 * If CL1 is enabled then we need to configure the TMU accuracy 333 * level to normal. Otherwise we keep the TMU running at the 334 * highest accuracy. 335 */ 336 if (tb_switch_clx_is_enabled(sw, TB_CL1)) 337 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true); 338 else 339 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false); 340 if (ret) 341 return ret; 342 343 /* If it is already enabled in correct mode, don't touch it */ 344 if (tb_switch_tmu_is_enabled(sw)) 345 return 0; 346 347 ret = tb_switch_tmu_disable(sw); 348 if (ret) 349 return ret; 350 351 ret = tb_switch_tmu_post_time(sw); 352 if (ret) 353 return ret; 354 355 return tb_switch_tmu_enable(sw); 356 } 357 358 static void tb_switch_discover_tunnels(struct tb_switch *sw, 359 struct list_head *list, 360 bool alloc_hopids) 361 { 362 struct tb *tb = sw->tb; 363 struct tb_port *port; 364 365 tb_switch_for_each_port(sw, port) { 366 struct tb_tunnel *tunnel = NULL; 367 368 switch (port->config.type) { 369 case TB_TYPE_DP_HDMI_IN: 370 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); 371 tb_increase_tmu_accuracy(tunnel); 372 break; 373 374 case TB_TYPE_PCIE_DOWN: 375 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); 376 break; 377 378 case TB_TYPE_USB3_DOWN: 379 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); 380 break; 381 382 default: 383 break; 384 } 385 386 if (tunnel) 387 list_add_tail(&tunnel->list, list); 388 } 389 390 tb_switch_for_each_port(sw, port) { 391 if (tb_port_has_remote(port)) { 392 tb_switch_discover_tunnels(port->remote->sw, list, 393 alloc_hopids); 394 } 395 } 396 } 397 398 static void tb_discover_tunnels(struct tb *tb) 399 { 400 struct tb_cm *tcm = tb_priv(tb); 401 struct tb_tunnel *tunnel; 402 403 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); 404 405 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 406 if (tb_tunnel_is_pci(tunnel)) { 407 struct tb_switch *parent = tunnel->dst_port->sw; 408 409 while (parent != tunnel->src_port->sw) { 410 parent->boot = true; 411 parent = tb_switch_parent(parent); 412 } 413 } else if (tb_tunnel_is_dp(tunnel)) { 414 struct tb_port *in = tunnel->src_port; 415 struct tb_port *out = tunnel->dst_port; 416 417 /* Keep the domain from powering down */ 418 pm_runtime_get_sync(&in->sw->dev); 419 pm_runtime_get_sync(&out->sw->dev); 420 421 tb_discover_bandwidth_group(tcm, in, out); 422 } 423 } 424 } 425 426 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) 427 { 428 if (tb_switch_is_usb4(port->sw)) 429 return usb4_port_configure_xdomain(port, xd); 430 return tb_lc_configure_xdomain(port); 431 } 432 433 static void tb_port_unconfigure_xdomain(struct tb_port *port) 434 { 435 if (tb_switch_is_usb4(port->sw)) 436 usb4_port_unconfigure_xdomain(port); 437 else 438 tb_lc_unconfigure_xdomain(port); 439 440 tb_port_enable(port->dual_link_port); 441 } 442 443 static void tb_scan_xdomain(struct tb_port *port) 444 { 445 struct tb_switch *sw = port->sw; 446 struct tb *tb = sw->tb; 447 struct tb_xdomain *xd; 448 u64 route; 449 450 if (!tb_is_xdomain_enabled()) 451 return; 452 453 route = tb_downstream_route(port); 454 xd = tb_xdomain_find_by_route(tb, route); 455 if (xd) { 456 tb_xdomain_put(xd); 457 return; 458 } 459 460 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, 461 NULL); 462 if (xd) { 463 tb_port_at(route, sw)->xdomain = xd; 464 tb_port_configure_xdomain(port, xd); 465 tb_xdomain_add(xd); 466 } 467 } 468 469 /** 470 * tb_find_unused_port() - return the first inactive port on @sw 471 * @sw: Switch to find the port on 472 * @type: Port type to look for 473 */ 474 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, 475 enum tb_port_type type) 476 { 477 struct tb_port *port; 478 479 tb_switch_for_each_port(sw, port) { 480 if (tb_is_upstream_port(port)) 481 continue; 482 if (port->config.type != type) 483 continue; 484 if (!port->cap_adap) 485 continue; 486 if (tb_port_is_enabled(port)) 487 continue; 488 return port; 489 } 490 return NULL; 491 } 492 493 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, 494 const struct tb_port *port) 495 { 496 struct tb_port *down; 497 498 down = usb4_switch_map_usb3_down(sw, port); 499 if (down && !tb_usb3_port_is_enabled(down)) 500 return down; 501 return NULL; 502 } 503 504 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, 505 struct tb_port *src_port, 506 struct tb_port *dst_port) 507 { 508 struct tb_cm *tcm = tb_priv(tb); 509 struct tb_tunnel *tunnel; 510 511 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 512 if (tunnel->type == type && 513 ((src_port && src_port == tunnel->src_port) || 514 (dst_port && dst_port == tunnel->dst_port))) { 515 return tunnel; 516 } 517 } 518 519 return NULL; 520 } 521 522 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, 523 struct tb_port *src_port, 524 struct tb_port *dst_port) 525 { 526 struct tb_port *port, *usb3_down; 527 struct tb_switch *sw; 528 529 /* Pick the router that is deepest in the topology */ 530 if (dst_port->sw->config.depth > src_port->sw->config.depth) 531 sw = dst_port->sw; 532 else 533 sw = src_port->sw; 534 535 /* Can't be the host router */ 536 if (sw == tb->root_switch) 537 return NULL; 538 539 /* Find the downstream USB4 port that leads to this router */ 540 port = tb_port_at(tb_route(sw), tb->root_switch); 541 /* Find the corresponding host router USB3 downstream port */ 542 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); 543 if (!usb3_down) 544 return NULL; 545 546 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); 547 } 548 549 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, 550 struct tb_port *dst_port, int *available_up, int *available_down) 551 { 552 int usb3_consumed_up, usb3_consumed_down, ret; 553 struct tb_cm *tcm = tb_priv(tb); 554 struct tb_tunnel *tunnel; 555 struct tb_port *port; 556 557 tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n", 558 tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw), 559 dst_port->port); 560 561 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 562 if (tunnel && tunnel->src_port != src_port && 563 tunnel->dst_port != dst_port) { 564 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, 565 &usb3_consumed_down); 566 if (ret) 567 return ret; 568 } else { 569 usb3_consumed_up = 0; 570 usb3_consumed_down = 0; 571 } 572 573 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ 574 *available_up = *available_down = 120000; 575 576 /* Find the minimum available bandwidth over all links */ 577 tb_for_each_port_on_path(src_port, dst_port, port) { 578 int link_speed, link_width, up_bw, down_bw; 579 580 if (!tb_port_is_null(port)) 581 continue; 582 583 if (tb_is_upstream_port(port)) { 584 link_speed = port->sw->link_speed; 585 /* 586 * sw->link_width is from upstream perspective 587 * so we use the opposite for downstream of the 588 * host router. 589 */ 590 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { 591 up_bw = link_speed * 3 * 1000; 592 down_bw = link_speed * 1 * 1000; 593 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { 594 up_bw = link_speed * 1 * 1000; 595 down_bw = link_speed * 3 * 1000; 596 } else { 597 up_bw = link_speed * port->sw->link_width * 1000; 598 down_bw = up_bw; 599 } 600 } else { 601 link_speed = tb_port_get_link_speed(port); 602 if (link_speed < 0) 603 return link_speed; 604 605 link_width = tb_port_get_link_width(port); 606 if (link_width < 0) 607 return link_width; 608 609 if (link_width == TB_LINK_WIDTH_ASYM_TX) { 610 up_bw = link_speed * 1 * 1000; 611 down_bw = link_speed * 3 * 1000; 612 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { 613 up_bw = link_speed * 3 * 1000; 614 down_bw = link_speed * 1 * 1000; 615 } else { 616 up_bw = link_speed * link_width * 1000; 617 down_bw = up_bw; 618 } 619 } 620 621 /* Leave 10% guard band */ 622 up_bw -= up_bw / 10; 623 down_bw -= down_bw / 10; 624 625 tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw, 626 down_bw); 627 628 /* 629 * Find all DP tunnels that cross the port and reduce 630 * their consumed bandwidth from the available. 631 */ 632 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 633 int dp_consumed_up, dp_consumed_down; 634 635 if (tb_tunnel_is_invalid(tunnel)) 636 continue; 637 638 if (!tb_tunnel_is_dp(tunnel)) 639 continue; 640 641 if (!tb_tunnel_port_on_path(tunnel, port)) 642 continue; 643 644 /* 645 * Ignore the DP tunnel between src_port and 646 * dst_port because it is the same tunnel and we 647 * may be re-calculating estimated bandwidth. 648 */ 649 if (tunnel->src_port == src_port && 650 tunnel->dst_port == dst_port) 651 continue; 652 653 ret = tb_tunnel_consumed_bandwidth(tunnel, 654 &dp_consumed_up, 655 &dp_consumed_down); 656 if (ret) 657 return ret; 658 659 up_bw -= dp_consumed_up; 660 down_bw -= dp_consumed_down; 661 } 662 663 /* 664 * If USB3 is tunneled from the host router down to the 665 * branch leading to port we need to take USB3 consumed 666 * bandwidth into account regardless whether it actually 667 * crosses the port. 668 */ 669 up_bw -= usb3_consumed_up; 670 down_bw -= usb3_consumed_down; 671 672 if (up_bw < *available_up) 673 *available_up = up_bw; 674 if (down_bw < *available_down) 675 *available_down = down_bw; 676 } 677 678 if (*available_up < 0) 679 *available_up = 0; 680 if (*available_down < 0) 681 *available_down = 0; 682 683 return 0; 684 } 685 686 static int tb_release_unused_usb3_bandwidth(struct tb *tb, 687 struct tb_port *src_port, 688 struct tb_port *dst_port) 689 { 690 struct tb_tunnel *tunnel; 691 692 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 693 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; 694 } 695 696 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, 697 struct tb_port *dst_port) 698 { 699 int ret, available_up, available_down; 700 struct tb_tunnel *tunnel; 701 702 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 703 if (!tunnel) 704 return; 705 706 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); 707 708 /* 709 * Calculate available bandwidth for the first hop USB3 tunnel. 710 * That determines the whole USB3 bandwidth for this branch. 711 */ 712 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, 713 &available_up, &available_down); 714 if (ret) { 715 tb_warn(tb, "failed to calculate available bandwidth\n"); 716 return; 717 } 718 719 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", 720 available_up, available_down); 721 722 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); 723 } 724 725 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) 726 { 727 struct tb_switch *parent = tb_switch_parent(sw); 728 int ret, available_up, available_down; 729 struct tb_port *up, *down, *port; 730 struct tb_cm *tcm = tb_priv(tb); 731 struct tb_tunnel *tunnel; 732 733 if (!tb_acpi_may_tunnel_usb3()) { 734 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); 735 return 0; 736 } 737 738 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); 739 if (!up) 740 return 0; 741 742 if (!sw->link_usb4) 743 return 0; 744 745 /* 746 * Look up available down port. Since we are chaining it should 747 * be found right above this switch. 748 */ 749 port = tb_switch_downstream_port(sw); 750 down = tb_find_usb3_down(parent, port); 751 if (!down) 752 return 0; 753 754 if (tb_route(parent)) { 755 struct tb_port *parent_up; 756 /* 757 * Check first that the parent switch has its upstream USB3 758 * port enabled. Otherwise the chain is not complete and 759 * there is no point setting up a new tunnel. 760 */ 761 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); 762 if (!parent_up || !tb_port_is_enabled(parent_up)) 763 return 0; 764 765 /* Make all unused bandwidth available for the new tunnel */ 766 ret = tb_release_unused_usb3_bandwidth(tb, down, up); 767 if (ret) 768 return ret; 769 } 770 771 ret = tb_available_bandwidth(tb, down, up, &available_up, 772 &available_down); 773 if (ret) 774 goto err_reclaim; 775 776 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", 777 available_up, available_down); 778 779 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, 780 available_down); 781 if (!tunnel) { 782 ret = -ENOMEM; 783 goto err_reclaim; 784 } 785 786 if (tb_tunnel_activate(tunnel)) { 787 tb_port_info(up, 788 "USB3 tunnel activation failed, aborting\n"); 789 ret = -EIO; 790 goto err_free; 791 } 792 793 list_add_tail(&tunnel->list, &tcm->tunnel_list); 794 if (tb_route(parent)) 795 tb_reclaim_usb3_bandwidth(tb, down, up); 796 797 return 0; 798 799 err_free: 800 tb_tunnel_free(tunnel); 801 err_reclaim: 802 if (tb_route(parent)) 803 tb_reclaim_usb3_bandwidth(tb, down, up); 804 805 return ret; 806 } 807 808 static int tb_create_usb3_tunnels(struct tb_switch *sw) 809 { 810 struct tb_port *port; 811 int ret; 812 813 if (!tb_acpi_may_tunnel_usb3()) 814 return 0; 815 816 if (tb_route(sw)) { 817 ret = tb_tunnel_usb3(sw->tb, sw); 818 if (ret) 819 return ret; 820 } 821 822 tb_switch_for_each_port(sw, port) { 823 if (!tb_port_has_remote(port)) 824 continue; 825 ret = tb_create_usb3_tunnels(port->remote->sw); 826 if (ret) 827 return ret; 828 } 829 830 return 0; 831 } 832 833 static void tb_scan_port(struct tb_port *port); 834 835 /* 836 * tb_scan_switch() - scan for and initialize downstream switches 837 */ 838 static void tb_scan_switch(struct tb_switch *sw) 839 { 840 struct tb_port *port; 841 842 pm_runtime_get_sync(&sw->dev); 843 844 tb_switch_for_each_port(sw, port) 845 tb_scan_port(port); 846 847 pm_runtime_mark_last_busy(&sw->dev); 848 pm_runtime_put_autosuspend(&sw->dev); 849 } 850 851 /* 852 * tb_scan_port() - check for and initialize switches below port 853 */ 854 static void tb_scan_port(struct tb_port *port) 855 { 856 struct tb_cm *tcm = tb_priv(port->sw->tb); 857 struct tb_port *upstream_port; 858 bool discovery = false; 859 struct tb_switch *sw; 860 861 if (tb_is_upstream_port(port)) 862 return; 863 864 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && 865 !tb_dp_port_is_enabled(port)) { 866 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); 867 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, 868 false); 869 return; 870 } 871 872 if (port->config.type != TB_TYPE_PORT) 873 return; 874 if (port->dual_link_port && port->link_nr) 875 return; /* 876 * Downstream switch is reachable through two ports. 877 * Only scan on the primary port (link_nr == 0). 878 */ 879 880 if (port->usb4) 881 pm_runtime_get_sync(&port->usb4->dev); 882 883 if (tb_wait_for_port(port, false) <= 0) 884 goto out_rpm_put; 885 if (port->remote) { 886 tb_port_dbg(port, "port already has a remote\n"); 887 goto out_rpm_put; 888 } 889 890 tb_retimer_scan(port, true); 891 892 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 893 tb_downstream_route(port)); 894 if (IS_ERR(sw)) { 895 /* 896 * If there is an error accessing the connected switch 897 * it may be connected to another domain. Also we allow 898 * the other domain to be connected to a max depth switch. 899 */ 900 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 901 tb_scan_xdomain(port); 902 goto out_rpm_put; 903 } 904 905 if (tb_switch_configure(sw)) { 906 tb_switch_put(sw); 907 goto out_rpm_put; 908 } 909 910 /* 911 * If there was previously another domain connected remove it 912 * first. 913 */ 914 if (port->xdomain) { 915 tb_xdomain_remove(port->xdomain); 916 tb_port_unconfigure_xdomain(port); 917 port->xdomain = NULL; 918 } 919 920 /* 921 * Do not send uevents until we have discovered all existing 922 * tunnels and know which switches were authorized already by 923 * the boot firmware. 924 */ 925 if (!tcm->hotplug_active) { 926 dev_set_uevent_suppress(&sw->dev, true); 927 discovery = true; 928 } 929 930 /* 931 * At the moment Thunderbolt 2 and beyond (devices with LC) we 932 * can support runtime PM. 933 */ 934 sw->rpm = sw->generation > 1; 935 936 if (tb_switch_add(sw)) { 937 tb_switch_put(sw); 938 goto out_rpm_put; 939 } 940 941 /* Link the switches using both links if available */ 942 upstream_port = tb_upstream_port(sw); 943 port->remote = upstream_port; 944 upstream_port->remote = port; 945 if (port->dual_link_port && upstream_port->dual_link_port) { 946 port->dual_link_port->remote = upstream_port->dual_link_port; 947 upstream_port->dual_link_port->remote = port->dual_link_port; 948 } 949 950 /* Enable lane bonding if supported */ 951 tb_switch_lane_bonding_enable(sw); 952 /* Set the link configured */ 953 tb_switch_configure_link(sw); 954 /* 955 * CL0s and CL1 are enabled and supported together. 956 * Silently ignore CLx enabling in case CLx is not supported. 957 */ 958 if (discovery) 959 tb_sw_dbg(sw, "discovery, not touching CL states\n"); 960 else if (tb_enable_clx(sw)) 961 tb_sw_warn(sw, "failed to enable CL states\n"); 962 963 if (tb_enable_tmu(sw)) 964 tb_sw_warn(sw, "failed to enable TMU\n"); 965 966 /* Scan upstream retimers */ 967 tb_retimer_scan(upstream_port, true); 968 969 /* 970 * Create USB 3.x tunnels only when the switch is plugged to the 971 * domain. This is because we scan the domain also during discovery 972 * and want to discover existing USB 3.x tunnels before we create 973 * any new. 974 */ 975 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) 976 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); 977 978 tb_add_dp_resources(sw); 979 tb_scan_switch(sw); 980 981 out_rpm_put: 982 if (port->usb4) { 983 pm_runtime_mark_last_busy(&port->usb4->dev); 984 pm_runtime_put_autosuspend(&port->usb4->dev); 985 } 986 } 987 988 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) 989 { 990 struct tb_port *src_port, *dst_port; 991 struct tb *tb; 992 993 if (!tunnel) 994 return; 995 996 tb_tunnel_deactivate(tunnel); 997 list_del(&tunnel->list); 998 999 tb = tunnel->tb; 1000 src_port = tunnel->src_port; 1001 dst_port = tunnel->dst_port; 1002 1003 switch (tunnel->type) { 1004 case TB_TUNNEL_DP: 1005 tb_detach_bandwidth_group(src_port); 1006 /* 1007 * In case of DP tunnel make sure the DP IN resource is 1008 * deallocated properly. 1009 */ 1010 tb_switch_dealloc_dp_resource(src_port->sw, src_port); 1011 /* Now we can allow the domain to runtime suspend again */ 1012 pm_runtime_mark_last_busy(&dst_port->sw->dev); 1013 pm_runtime_put_autosuspend(&dst_port->sw->dev); 1014 pm_runtime_mark_last_busy(&src_port->sw->dev); 1015 pm_runtime_put_autosuspend(&src_port->sw->dev); 1016 fallthrough; 1017 1018 case TB_TUNNEL_USB3: 1019 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); 1020 break; 1021 1022 default: 1023 /* 1024 * PCIe and DMA tunnels do not consume guaranteed 1025 * bandwidth. 1026 */ 1027 break; 1028 } 1029 1030 tb_tunnel_free(tunnel); 1031 } 1032 1033 /* 1034 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 1035 */ 1036 static void tb_free_invalid_tunnels(struct tb *tb) 1037 { 1038 struct tb_cm *tcm = tb_priv(tb); 1039 struct tb_tunnel *tunnel; 1040 struct tb_tunnel *n; 1041 1042 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 1043 if (tb_tunnel_is_invalid(tunnel)) 1044 tb_deactivate_and_free_tunnel(tunnel); 1045 } 1046 } 1047 1048 /* 1049 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 1050 */ 1051 static void tb_free_unplugged_children(struct tb_switch *sw) 1052 { 1053 struct tb_port *port; 1054 1055 tb_switch_for_each_port(sw, port) { 1056 if (!tb_port_has_remote(port)) 1057 continue; 1058 1059 if (port->remote->sw->is_unplugged) { 1060 tb_retimer_remove_all(port); 1061 tb_remove_dp_resources(port->remote->sw); 1062 tb_switch_unconfigure_link(port->remote->sw); 1063 tb_switch_lane_bonding_disable(port->remote->sw); 1064 tb_switch_remove(port->remote->sw); 1065 port->remote = NULL; 1066 if (port->dual_link_port) 1067 port->dual_link_port->remote = NULL; 1068 } else { 1069 tb_free_unplugged_children(port->remote->sw); 1070 } 1071 } 1072 } 1073 1074 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 1075 const struct tb_port *port) 1076 { 1077 struct tb_port *down = NULL; 1078 1079 /* 1080 * To keep plugging devices consistently in the same PCIe 1081 * hierarchy, do mapping here for switch downstream PCIe ports. 1082 */ 1083 if (tb_switch_is_usb4(sw)) { 1084 down = usb4_switch_map_pcie_down(sw, port); 1085 } else if (!tb_route(sw)) { 1086 int phy_port = tb_phy_port_from_link(port->port); 1087 int index; 1088 1089 /* 1090 * Hard-coded Thunderbolt port to PCIe down port mapping 1091 * per controller. 1092 */ 1093 if (tb_switch_is_cactus_ridge(sw) || 1094 tb_switch_is_alpine_ridge(sw)) 1095 index = !phy_port ? 6 : 7; 1096 else if (tb_switch_is_falcon_ridge(sw)) 1097 index = !phy_port ? 6 : 8; 1098 else if (tb_switch_is_titan_ridge(sw)) 1099 index = !phy_port ? 8 : 9; 1100 else 1101 goto out; 1102 1103 /* Validate the hard-coding */ 1104 if (WARN_ON(index > sw->config.max_port_number)) 1105 goto out; 1106 1107 down = &sw->ports[index]; 1108 } 1109 1110 if (down) { 1111 if (WARN_ON(!tb_port_is_pcie_down(down))) 1112 goto out; 1113 if (tb_pci_port_is_enabled(down)) 1114 goto out; 1115 1116 return down; 1117 } 1118 1119 out: 1120 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); 1121 } 1122 1123 static void 1124 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) 1125 { 1126 struct tb_tunnel *first_tunnel; 1127 struct tb *tb = group->tb; 1128 struct tb_port *in; 1129 int ret; 1130 1131 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n", 1132 group->index); 1133 1134 first_tunnel = NULL; 1135 list_for_each_entry(in, &group->ports, group_list) { 1136 int estimated_bw, estimated_up, estimated_down; 1137 struct tb_tunnel *tunnel; 1138 struct tb_port *out; 1139 1140 if (!usb4_dp_port_bw_mode_enabled(in)) 1141 continue; 1142 1143 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); 1144 if (WARN_ON(!tunnel)) 1145 break; 1146 1147 if (!first_tunnel) { 1148 /* 1149 * Since USB3 bandwidth is shared by all DP 1150 * tunnels under the host router USB4 port, even 1151 * if they do not begin from the host router, we 1152 * can release USB3 bandwidth just once and not 1153 * for each tunnel separately. 1154 */ 1155 first_tunnel = tunnel; 1156 ret = tb_release_unused_usb3_bandwidth(tb, 1157 first_tunnel->src_port, first_tunnel->dst_port); 1158 if (ret) { 1159 tb_port_warn(in, 1160 "failed to release unused bandwidth\n"); 1161 break; 1162 } 1163 } 1164 1165 out = tunnel->dst_port; 1166 ret = tb_available_bandwidth(tb, in, out, &estimated_up, 1167 &estimated_down); 1168 if (ret) { 1169 tb_port_warn(in, 1170 "failed to re-calculate estimated bandwidth\n"); 1171 break; 1172 } 1173 1174 /* 1175 * Estimated bandwidth includes: 1176 * - already allocated bandwidth for the DP tunnel 1177 * - available bandwidth along the path 1178 * - bandwidth allocated for USB 3.x but not used. 1179 */ 1180 tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n", 1181 estimated_up, estimated_down); 1182 1183 if (in->sw->config.depth < out->sw->config.depth) 1184 estimated_bw = estimated_down; 1185 else 1186 estimated_bw = estimated_up; 1187 1188 if (usb4_dp_port_set_estimated_bw(in, estimated_bw)) 1189 tb_port_warn(in, "failed to update estimated bandwidth\n"); 1190 } 1191 1192 if (first_tunnel) 1193 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port, 1194 first_tunnel->dst_port); 1195 1196 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index); 1197 } 1198 1199 static void tb_recalc_estimated_bandwidth(struct tb *tb) 1200 { 1201 struct tb_cm *tcm = tb_priv(tb); 1202 int i; 1203 1204 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n"); 1205 1206 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { 1207 struct tb_bandwidth_group *group = &tcm->groups[i]; 1208 1209 if (!list_empty(&group->ports)) 1210 tb_recalc_estimated_bandwidth_for_group(group); 1211 } 1212 1213 tb_dbg(tb, "bandwidth re-calculation done\n"); 1214 } 1215 1216 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) 1217 { 1218 struct tb_port *host_port, *port; 1219 struct tb_cm *tcm = tb_priv(tb); 1220 1221 host_port = tb_route(in->sw) ? 1222 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; 1223 1224 list_for_each_entry(port, &tcm->dp_resources, list) { 1225 if (!tb_port_is_dpout(port)) 1226 continue; 1227 1228 if (tb_port_is_enabled(port)) { 1229 tb_port_dbg(port, "DP OUT in use\n"); 1230 continue; 1231 } 1232 1233 tb_port_dbg(port, "DP OUT available\n"); 1234 1235 /* 1236 * Keep the DP tunnel under the topology starting from 1237 * the same host router downstream port. 1238 */ 1239 if (host_port && tb_route(port->sw)) { 1240 struct tb_port *p; 1241 1242 p = tb_port_at(tb_route(port->sw), tb->root_switch); 1243 if (p != host_port) 1244 continue; 1245 } 1246 1247 return port; 1248 } 1249 1250 return NULL; 1251 } 1252 1253 static void tb_tunnel_dp(struct tb *tb) 1254 { 1255 int available_up, available_down, ret, link_nr; 1256 struct tb_cm *tcm = tb_priv(tb); 1257 struct tb_port *port, *in, *out; 1258 struct tb_tunnel *tunnel; 1259 1260 if (!tb_acpi_may_tunnel_dp()) { 1261 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); 1262 return; 1263 } 1264 1265 /* 1266 * Find pair of inactive DP IN and DP OUT adapters and then 1267 * establish a DP tunnel between them. 1268 */ 1269 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); 1270 1271 in = NULL; 1272 out = NULL; 1273 list_for_each_entry(port, &tcm->dp_resources, list) { 1274 if (!tb_port_is_dpin(port)) 1275 continue; 1276 1277 if (tb_port_is_enabled(port)) { 1278 tb_port_dbg(port, "DP IN in use\n"); 1279 continue; 1280 } 1281 1282 tb_port_dbg(port, "DP IN available\n"); 1283 1284 out = tb_find_dp_out(tb, port); 1285 if (out) { 1286 in = port; 1287 break; 1288 } 1289 } 1290 1291 if (!in) { 1292 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); 1293 return; 1294 } 1295 if (!out) { 1296 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); 1297 return; 1298 } 1299 1300 /* 1301 * This is only applicable to links that are not bonded (so 1302 * when Thunderbolt 1 hardware is involved somewhere in the 1303 * topology). For these try to share the DP bandwidth between 1304 * the two lanes. 1305 */ 1306 link_nr = 1; 1307 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 1308 if (tb_tunnel_is_dp(tunnel)) { 1309 link_nr = 0; 1310 break; 1311 } 1312 } 1313 1314 /* 1315 * DP stream needs the domain to be active so runtime resume 1316 * both ends of the tunnel. 1317 * 1318 * This should bring the routers in the middle active as well 1319 * and keeps the domain from runtime suspending while the DP 1320 * tunnel is active. 1321 */ 1322 pm_runtime_get_sync(&in->sw->dev); 1323 pm_runtime_get_sync(&out->sw->dev); 1324 1325 if (tb_switch_alloc_dp_resource(in->sw, in)) { 1326 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); 1327 goto err_rpm_put; 1328 } 1329 1330 if (!tb_attach_bandwidth_group(tcm, in, out)) 1331 goto err_dealloc_dp; 1332 1333 /* Make all unused USB3 bandwidth available for the new DP tunnel */ 1334 ret = tb_release_unused_usb3_bandwidth(tb, in, out); 1335 if (ret) { 1336 tb_warn(tb, "failed to release unused bandwidth\n"); 1337 goto err_detach_group; 1338 } 1339 1340 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); 1341 if (ret) 1342 goto err_reclaim_usb; 1343 1344 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", 1345 available_up, available_down); 1346 1347 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, 1348 available_down); 1349 if (!tunnel) { 1350 tb_port_dbg(out, "could not allocate DP tunnel\n"); 1351 goto err_reclaim_usb; 1352 } 1353 1354 if (tb_tunnel_activate(tunnel)) { 1355 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 1356 goto err_free; 1357 } 1358 1359 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1360 tb_reclaim_usb3_bandwidth(tb, in, out); 1361 1362 /* Update the domain with the new bandwidth estimation */ 1363 tb_recalc_estimated_bandwidth(tb); 1364 1365 /* 1366 * In case of DP tunnel exists, change host router's 1st children 1367 * TMU mode to HiFi for CL0s to work. 1368 */ 1369 tb_increase_tmu_accuracy(tunnel); 1370 return; 1371 1372 err_free: 1373 tb_tunnel_free(tunnel); 1374 err_reclaim_usb: 1375 tb_reclaim_usb3_bandwidth(tb, in, out); 1376 err_detach_group: 1377 tb_detach_bandwidth_group(in); 1378 err_dealloc_dp: 1379 tb_switch_dealloc_dp_resource(in->sw, in); 1380 err_rpm_put: 1381 pm_runtime_mark_last_busy(&out->sw->dev); 1382 pm_runtime_put_autosuspend(&out->sw->dev); 1383 pm_runtime_mark_last_busy(&in->sw->dev); 1384 pm_runtime_put_autosuspend(&in->sw->dev); 1385 } 1386 1387 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) 1388 { 1389 struct tb_port *in, *out; 1390 struct tb_tunnel *tunnel; 1391 1392 if (tb_port_is_dpin(port)) { 1393 tb_port_dbg(port, "DP IN resource unavailable\n"); 1394 in = port; 1395 out = NULL; 1396 } else { 1397 tb_port_dbg(port, "DP OUT resource unavailable\n"); 1398 in = NULL; 1399 out = port; 1400 } 1401 1402 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); 1403 tb_deactivate_and_free_tunnel(tunnel); 1404 list_del_init(&port->list); 1405 1406 /* 1407 * See if there is another DP OUT port that can be used for 1408 * to create another tunnel. 1409 */ 1410 tb_recalc_estimated_bandwidth(tb); 1411 tb_tunnel_dp(tb); 1412 } 1413 1414 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) 1415 { 1416 struct tb_cm *tcm = tb_priv(tb); 1417 struct tb_port *p; 1418 1419 if (tb_port_is_enabled(port)) 1420 return; 1421 1422 list_for_each_entry(p, &tcm->dp_resources, list) { 1423 if (p == port) 1424 return; 1425 } 1426 1427 tb_port_dbg(port, "DP %s resource available\n", 1428 tb_port_is_dpin(port) ? "IN" : "OUT"); 1429 list_add_tail(&port->list, &tcm->dp_resources); 1430 1431 /* Look for suitable DP IN <-> DP OUT pairs now */ 1432 tb_tunnel_dp(tb); 1433 } 1434 1435 static void tb_disconnect_and_release_dp(struct tb *tb) 1436 { 1437 struct tb_cm *tcm = tb_priv(tb); 1438 struct tb_tunnel *tunnel, *n; 1439 1440 /* 1441 * Tear down all DP tunnels and release their resources. They 1442 * will be re-established after resume based on plug events. 1443 */ 1444 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { 1445 if (tb_tunnel_is_dp(tunnel)) 1446 tb_deactivate_and_free_tunnel(tunnel); 1447 } 1448 1449 while (!list_empty(&tcm->dp_resources)) { 1450 struct tb_port *port; 1451 1452 port = list_first_entry(&tcm->dp_resources, 1453 struct tb_port, list); 1454 list_del_init(&port->list); 1455 } 1456 } 1457 1458 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) 1459 { 1460 struct tb_tunnel *tunnel; 1461 struct tb_port *up; 1462 1463 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 1464 if (WARN_ON(!up)) 1465 return -ENODEV; 1466 1467 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); 1468 if (WARN_ON(!tunnel)) 1469 return -ENODEV; 1470 1471 tb_switch_xhci_disconnect(sw); 1472 1473 tb_tunnel_deactivate(tunnel); 1474 list_del(&tunnel->list); 1475 tb_tunnel_free(tunnel); 1476 return 0; 1477 } 1478 1479 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 1480 { 1481 struct tb_port *up, *down, *port; 1482 struct tb_cm *tcm = tb_priv(tb); 1483 struct tb_tunnel *tunnel; 1484 1485 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 1486 if (!up) 1487 return 0; 1488 1489 /* 1490 * Look up available down port. Since we are chaining it should 1491 * be found right above this switch. 1492 */ 1493 port = tb_switch_downstream_port(sw); 1494 down = tb_find_pcie_down(tb_switch_parent(sw), port); 1495 if (!down) 1496 return 0; 1497 1498 tunnel = tb_tunnel_alloc_pci(tb, up, down); 1499 if (!tunnel) 1500 return -ENOMEM; 1501 1502 if (tb_tunnel_activate(tunnel)) { 1503 tb_port_info(up, 1504 "PCIe tunnel activation failed, aborting\n"); 1505 tb_tunnel_free(tunnel); 1506 return -EIO; 1507 } 1508 1509 /* 1510 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it 1511 * here. 1512 */ 1513 if (tb_switch_pcie_l1_enable(sw)) 1514 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); 1515 1516 if (tb_switch_xhci_connect(sw)) 1517 tb_sw_warn(sw, "failed to connect xHCI\n"); 1518 1519 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1520 return 0; 1521 } 1522 1523 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1524 int transmit_path, int transmit_ring, 1525 int receive_path, int receive_ring) 1526 { 1527 struct tb_cm *tcm = tb_priv(tb); 1528 struct tb_port *nhi_port, *dst_port; 1529 struct tb_tunnel *tunnel; 1530 struct tb_switch *sw; 1531 int ret; 1532 1533 sw = tb_to_switch(xd->dev.parent); 1534 dst_port = tb_port_at(xd->route, sw); 1535 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 1536 1537 mutex_lock(&tb->lock); 1538 1539 /* 1540 * When tunneling DMA paths the link should not enter CL states 1541 * so disable them now. 1542 */ 1543 tb_disable_clx(sw); 1544 1545 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, 1546 transmit_ring, receive_path, receive_ring); 1547 if (!tunnel) { 1548 ret = -ENOMEM; 1549 goto err_clx; 1550 } 1551 1552 if (tb_tunnel_activate(tunnel)) { 1553 tb_port_info(nhi_port, 1554 "DMA tunnel activation failed, aborting\n"); 1555 ret = -EIO; 1556 goto err_free; 1557 } 1558 1559 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1560 mutex_unlock(&tb->lock); 1561 return 0; 1562 1563 err_free: 1564 tb_tunnel_free(tunnel); 1565 err_clx: 1566 tb_enable_clx(sw); 1567 mutex_unlock(&tb->lock); 1568 1569 return ret; 1570 } 1571 1572 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1573 int transmit_path, int transmit_ring, 1574 int receive_path, int receive_ring) 1575 { 1576 struct tb_cm *tcm = tb_priv(tb); 1577 struct tb_port *nhi_port, *dst_port; 1578 struct tb_tunnel *tunnel, *n; 1579 struct tb_switch *sw; 1580 1581 sw = tb_to_switch(xd->dev.parent); 1582 dst_port = tb_port_at(xd->route, sw); 1583 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 1584 1585 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 1586 if (!tb_tunnel_is_dma(tunnel)) 1587 continue; 1588 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) 1589 continue; 1590 1591 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, 1592 receive_path, receive_ring)) 1593 tb_deactivate_and_free_tunnel(tunnel); 1594 } 1595 1596 /* 1597 * Try to re-enable CL states now, it is OK if this fails 1598 * because we may still have another DMA tunnel active through 1599 * the same host router USB4 downstream port. 1600 */ 1601 tb_enable_clx(sw); 1602 } 1603 1604 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1605 int transmit_path, int transmit_ring, 1606 int receive_path, int receive_ring) 1607 { 1608 if (!xd->is_unplugged) { 1609 mutex_lock(&tb->lock); 1610 __tb_disconnect_xdomain_paths(tb, xd, transmit_path, 1611 transmit_ring, receive_path, 1612 receive_ring); 1613 mutex_unlock(&tb->lock); 1614 } 1615 return 0; 1616 } 1617 1618 /* hotplug handling */ 1619 1620 /* 1621 * tb_handle_hotplug() - handle hotplug event 1622 * 1623 * Executes on tb->wq. 1624 */ 1625 static void tb_handle_hotplug(struct work_struct *work) 1626 { 1627 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 1628 struct tb *tb = ev->tb; 1629 struct tb_cm *tcm = tb_priv(tb); 1630 struct tb_switch *sw; 1631 struct tb_port *port; 1632 1633 /* Bring the domain back from sleep if it was suspended */ 1634 pm_runtime_get_sync(&tb->dev); 1635 1636 mutex_lock(&tb->lock); 1637 if (!tcm->hotplug_active) 1638 goto out; /* during init, suspend or shutdown */ 1639 1640 sw = tb_switch_find_by_route(tb, ev->route); 1641 if (!sw) { 1642 tb_warn(tb, 1643 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 1644 ev->route, ev->port, ev->unplug); 1645 goto out; 1646 } 1647 if (ev->port > sw->config.max_port_number) { 1648 tb_warn(tb, 1649 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 1650 ev->route, ev->port, ev->unplug); 1651 goto put_sw; 1652 } 1653 port = &sw->ports[ev->port]; 1654 if (tb_is_upstream_port(port)) { 1655 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 1656 ev->route, ev->port, ev->unplug); 1657 goto put_sw; 1658 } 1659 1660 pm_runtime_get_sync(&sw->dev); 1661 1662 if (ev->unplug) { 1663 tb_retimer_remove_all(port); 1664 1665 if (tb_port_has_remote(port)) { 1666 tb_port_dbg(port, "switch unplugged\n"); 1667 tb_sw_set_unplugged(port->remote->sw); 1668 tb_free_invalid_tunnels(tb); 1669 tb_remove_dp_resources(port->remote->sw); 1670 tb_switch_tmu_disable(port->remote->sw); 1671 tb_switch_unconfigure_link(port->remote->sw); 1672 tb_switch_lane_bonding_disable(port->remote->sw); 1673 tb_switch_remove(port->remote->sw); 1674 port->remote = NULL; 1675 if (port->dual_link_port) 1676 port->dual_link_port->remote = NULL; 1677 /* Maybe we can create another DP tunnel */ 1678 tb_recalc_estimated_bandwidth(tb); 1679 tb_tunnel_dp(tb); 1680 } else if (port->xdomain) { 1681 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); 1682 1683 tb_port_dbg(port, "xdomain unplugged\n"); 1684 /* 1685 * Service drivers are unbound during 1686 * tb_xdomain_remove() so setting XDomain as 1687 * unplugged here prevents deadlock if they call 1688 * tb_xdomain_disable_paths(). We will tear down 1689 * all the tunnels below. 1690 */ 1691 xd->is_unplugged = true; 1692 tb_xdomain_remove(xd); 1693 port->xdomain = NULL; 1694 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); 1695 tb_xdomain_put(xd); 1696 tb_port_unconfigure_xdomain(port); 1697 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1698 tb_dp_resource_unavailable(tb, port); 1699 } else if (!port->port) { 1700 tb_sw_dbg(sw, "xHCI disconnect request\n"); 1701 tb_switch_xhci_disconnect(sw); 1702 } else { 1703 tb_port_dbg(port, 1704 "got unplug event for disconnected port, ignoring\n"); 1705 } 1706 } else if (port->remote) { 1707 tb_port_dbg(port, "got plug event for connected port, ignoring\n"); 1708 } else if (!port->port && sw->authorized) { 1709 tb_sw_dbg(sw, "xHCI connect request\n"); 1710 tb_switch_xhci_connect(sw); 1711 } else { 1712 if (tb_port_is_null(port)) { 1713 tb_port_dbg(port, "hotplug: scanning\n"); 1714 tb_scan_port(port); 1715 if (!port->remote) 1716 tb_port_dbg(port, "hotplug: no switch found\n"); 1717 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1718 tb_dp_resource_available(tb, port); 1719 } 1720 } 1721 1722 pm_runtime_mark_last_busy(&sw->dev); 1723 pm_runtime_put_autosuspend(&sw->dev); 1724 1725 put_sw: 1726 tb_switch_put(sw); 1727 out: 1728 mutex_unlock(&tb->lock); 1729 1730 pm_runtime_mark_last_busy(&tb->dev); 1731 pm_runtime_put_autosuspend(&tb->dev); 1732 1733 kfree(ev); 1734 } 1735 1736 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, 1737 int *requested_down) 1738 { 1739 int allocated_up, allocated_down, available_up, available_down, ret; 1740 int requested_up_corrected, requested_down_corrected, granularity; 1741 int max_up, max_down, max_up_rounded, max_down_rounded; 1742 struct tb *tb = tunnel->tb; 1743 struct tb_port *in, *out; 1744 1745 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down); 1746 if (ret) 1747 return ret; 1748 1749 in = tunnel->src_port; 1750 out = tunnel->dst_port; 1751 1752 tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n", 1753 allocated_up, allocated_down); 1754 1755 /* 1756 * If we get rounded up request from graphics side, say HBR2 x 4 1757 * that is 17500 instead of 17280 (this is because of the 1758 * granularity), we allow it too. Here the graphics has already 1759 * negotiated with the DPRX the maximum possible rates (which is 1760 * 17280 in this case). 1761 * 1762 * Since the link cannot go higher than 17280 we use that in our 1763 * calculations but the DP IN adapter Allocated BW write must be 1764 * the same value (17500) otherwise the adapter will mark it as 1765 * failed for graphics. 1766 */ 1767 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down); 1768 if (ret) 1769 return ret; 1770 1771 ret = usb4_dp_port_granularity(in); 1772 if (ret < 0) 1773 return ret; 1774 granularity = ret; 1775 1776 max_up_rounded = roundup(max_up, granularity); 1777 max_down_rounded = roundup(max_down, granularity); 1778 1779 /* 1780 * This will "fix" the request down to the maximum supported 1781 * rate * lanes if it is at the maximum rounded up level. 1782 */ 1783 requested_up_corrected = *requested_up; 1784 if (requested_up_corrected == max_up_rounded) 1785 requested_up_corrected = max_up; 1786 else if (requested_up_corrected < 0) 1787 requested_up_corrected = 0; 1788 requested_down_corrected = *requested_down; 1789 if (requested_down_corrected == max_down_rounded) 1790 requested_down_corrected = max_down; 1791 else if (requested_down_corrected < 0) 1792 requested_down_corrected = 0; 1793 1794 tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n", 1795 requested_up_corrected, requested_down_corrected); 1796 1797 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) || 1798 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) { 1799 tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n", 1800 requested_up_corrected, requested_down_corrected, 1801 max_up_rounded, max_down_rounded); 1802 return -ENOBUFS; 1803 } 1804 1805 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || 1806 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { 1807 /* 1808 * If requested bandwidth is less or equal than what is 1809 * currently allocated to that tunnel we simply change 1810 * the reservation of the tunnel. Since all the tunnels 1811 * going out from the same USB4 port are in the same 1812 * group the released bandwidth will be taken into 1813 * account for the other tunnels automatically below. 1814 */ 1815 return tb_tunnel_alloc_bandwidth(tunnel, requested_up, 1816 requested_down); 1817 } 1818 1819 /* 1820 * More bandwidth is requested. Release all the potential 1821 * bandwidth from USB3 first. 1822 */ 1823 ret = tb_release_unused_usb3_bandwidth(tb, in, out); 1824 if (ret) 1825 return ret; 1826 1827 /* 1828 * Then go over all tunnels that cross the same USB4 ports (they 1829 * are also in the same group but we use the same function here 1830 * that we use with the normal bandwidth allocation). 1831 */ 1832 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); 1833 if (ret) 1834 goto reclaim; 1835 1836 tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n", 1837 available_up, available_down); 1838 1839 if ((*requested_up >= 0 && available_up >= requested_up_corrected) || 1840 (*requested_down >= 0 && available_down >= requested_down_corrected)) { 1841 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, 1842 requested_down); 1843 } else { 1844 ret = -ENOBUFS; 1845 } 1846 1847 reclaim: 1848 tb_reclaim_usb3_bandwidth(tb, in, out); 1849 return ret; 1850 } 1851 1852 static void tb_handle_dp_bandwidth_request(struct work_struct *work) 1853 { 1854 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 1855 int requested_bw, requested_up, requested_down, ret; 1856 struct tb_port *in, *out; 1857 struct tb_tunnel *tunnel; 1858 struct tb *tb = ev->tb; 1859 struct tb_cm *tcm = tb_priv(tb); 1860 struct tb_switch *sw; 1861 1862 pm_runtime_get_sync(&tb->dev); 1863 1864 mutex_lock(&tb->lock); 1865 if (!tcm->hotplug_active) 1866 goto unlock; 1867 1868 sw = tb_switch_find_by_route(tb, ev->route); 1869 if (!sw) { 1870 tb_warn(tb, "bandwidth request from non-existent router %llx\n", 1871 ev->route); 1872 goto unlock; 1873 } 1874 1875 in = &sw->ports[ev->port]; 1876 if (!tb_port_is_dpin(in)) { 1877 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n"); 1878 goto unlock; 1879 } 1880 1881 tb_port_dbg(in, "handling bandwidth allocation request\n"); 1882 1883 if (!usb4_dp_port_bw_mode_enabled(in)) { 1884 tb_port_warn(in, "bandwidth allocation mode not enabled\n"); 1885 goto unlock; 1886 } 1887 1888 ret = usb4_dp_port_requested_bw(in); 1889 if (ret < 0) { 1890 if (ret == -ENODATA) 1891 tb_port_dbg(in, "no bandwidth request active\n"); 1892 else 1893 tb_port_warn(in, "failed to read requested bandwidth\n"); 1894 goto unlock; 1895 } 1896 requested_bw = ret; 1897 1898 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw); 1899 1900 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); 1901 if (!tunnel) { 1902 tb_port_warn(in, "failed to find tunnel\n"); 1903 goto unlock; 1904 } 1905 1906 out = tunnel->dst_port; 1907 1908 if (in->sw->config.depth < out->sw->config.depth) { 1909 requested_up = -1; 1910 requested_down = requested_bw; 1911 } else { 1912 requested_up = requested_bw; 1913 requested_down = -1; 1914 } 1915 1916 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down); 1917 if (ret) { 1918 if (ret == -ENOBUFS) 1919 tb_port_warn(in, "not enough bandwidth available\n"); 1920 else 1921 tb_port_warn(in, "failed to change bandwidth allocation\n"); 1922 } else { 1923 tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n", 1924 requested_up, requested_down); 1925 1926 /* Update other clients about the allocation change */ 1927 tb_recalc_estimated_bandwidth(tb); 1928 } 1929 1930 unlock: 1931 mutex_unlock(&tb->lock); 1932 1933 pm_runtime_mark_last_busy(&tb->dev); 1934 pm_runtime_put_autosuspend(&tb->dev); 1935 } 1936 1937 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port) 1938 { 1939 struct tb_hotplug_event *ev; 1940 1941 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 1942 if (!ev) 1943 return; 1944 1945 ev->tb = tb; 1946 ev->route = route; 1947 ev->port = port; 1948 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request); 1949 queue_work(tb->wq, &ev->work); 1950 } 1951 1952 static void tb_handle_notification(struct tb *tb, u64 route, 1953 const struct cfg_error_pkg *error) 1954 { 1955 1956 switch (error->error) { 1957 case TB_CFG_ERROR_PCIE_WAKE: 1958 case TB_CFG_ERROR_DP_CON_CHANGE: 1959 case TB_CFG_ERROR_DPTX_DISCOVERY: 1960 if (tb_cfg_ack_notification(tb->ctl, route, error)) 1961 tb_warn(tb, "could not ack notification on %llx\n", 1962 route); 1963 break; 1964 1965 case TB_CFG_ERROR_DP_BW: 1966 if (tb_cfg_ack_notification(tb->ctl, route, error)) 1967 tb_warn(tb, "could not ack notification on %llx\n", 1968 route); 1969 tb_queue_dp_bandwidth_request(tb, route, error->port); 1970 break; 1971 1972 default: 1973 /* Ignore for now */ 1974 break; 1975 } 1976 } 1977 1978 /* 1979 * tb_schedule_hotplug_handler() - callback function for the control channel 1980 * 1981 * Delegates to tb_handle_hotplug. 1982 */ 1983 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 1984 const void *buf, size_t size) 1985 { 1986 const struct cfg_event_pkg *pkg = buf; 1987 u64 route = tb_cfg_get_route(&pkg->header); 1988 1989 switch (type) { 1990 case TB_CFG_PKG_ERROR: 1991 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf); 1992 return; 1993 case TB_CFG_PKG_EVENT: 1994 break; 1995 default: 1996 tb_warn(tb, "unexpected event %#x, ignoring\n", type); 1997 return; 1998 } 1999 2000 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { 2001 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 2002 pkg->port); 2003 } 2004 2005 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); 2006 } 2007 2008 static void tb_stop(struct tb *tb) 2009 { 2010 struct tb_cm *tcm = tb_priv(tb); 2011 struct tb_tunnel *tunnel; 2012 struct tb_tunnel *n; 2013 2014 cancel_delayed_work(&tcm->remove_work); 2015 /* tunnels are only present after everything has been initialized */ 2016 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 2017 /* 2018 * DMA tunnels require the driver to be functional so we 2019 * tear them down. Other protocol tunnels can be left 2020 * intact. 2021 */ 2022 if (tb_tunnel_is_dma(tunnel)) 2023 tb_tunnel_deactivate(tunnel); 2024 tb_tunnel_free(tunnel); 2025 } 2026 tb_switch_remove(tb->root_switch); 2027 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 2028 } 2029 2030 static int tb_scan_finalize_switch(struct device *dev, void *data) 2031 { 2032 if (tb_is_switch(dev)) { 2033 struct tb_switch *sw = tb_to_switch(dev); 2034 2035 /* 2036 * If we found that the switch was already setup by the 2037 * boot firmware, mark it as authorized now before we 2038 * send uevent to userspace. 2039 */ 2040 if (sw->boot) 2041 sw->authorized = 1; 2042 2043 dev_set_uevent_suppress(dev, false); 2044 kobject_uevent(&dev->kobj, KOBJ_ADD); 2045 device_for_each_child(dev, NULL, tb_scan_finalize_switch); 2046 } 2047 2048 return 0; 2049 } 2050 2051 static int tb_start(struct tb *tb) 2052 { 2053 struct tb_cm *tcm = tb_priv(tb); 2054 int ret; 2055 2056 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 2057 if (IS_ERR(tb->root_switch)) 2058 return PTR_ERR(tb->root_switch); 2059 2060 /* 2061 * ICM firmware upgrade needs running firmware and in native 2062 * mode that is not available so disable firmware upgrade of the 2063 * root switch. 2064 * 2065 * However, USB4 routers support NVM firmware upgrade if they 2066 * implement the necessary router operations. 2067 */ 2068 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch); 2069 /* All USB4 routers support runtime PM */ 2070 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); 2071 2072 ret = tb_switch_configure(tb->root_switch); 2073 if (ret) { 2074 tb_switch_put(tb->root_switch); 2075 return ret; 2076 } 2077 2078 /* Announce the switch to the world */ 2079 ret = tb_switch_add(tb->root_switch); 2080 if (ret) { 2081 tb_switch_put(tb->root_switch); 2082 return ret; 2083 } 2084 2085 /* 2086 * To support highest CLx state, we set host router's TMU to 2087 * Normal mode. 2088 */ 2089 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL, 2090 false); 2091 /* Enable TMU if it is off */ 2092 tb_switch_tmu_enable(tb->root_switch); 2093 /* Full scan to discover devices added before the driver was loaded. */ 2094 tb_scan_switch(tb->root_switch); 2095 /* Find out tunnels created by the boot firmware */ 2096 tb_discover_tunnels(tb); 2097 /* Add DP resources from the DP tunnels created by the boot firmware */ 2098 tb_discover_dp_resources(tb); 2099 /* 2100 * If the boot firmware did not create USB 3.x tunnels create them 2101 * now for the whole topology. 2102 */ 2103 tb_create_usb3_tunnels(tb->root_switch); 2104 /* Add DP IN resources for the root switch */ 2105 tb_add_dp_resources(tb->root_switch); 2106 /* Make the discovered switches available to the userspace */ 2107 device_for_each_child(&tb->root_switch->dev, NULL, 2108 tb_scan_finalize_switch); 2109 2110 /* Allow tb_handle_hotplug to progress events */ 2111 tcm->hotplug_active = true; 2112 return 0; 2113 } 2114 2115 static int tb_suspend_noirq(struct tb *tb) 2116 { 2117 struct tb_cm *tcm = tb_priv(tb); 2118 2119 tb_dbg(tb, "suspending...\n"); 2120 tb_disconnect_and_release_dp(tb); 2121 tb_switch_suspend(tb->root_switch, false); 2122 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 2123 tb_dbg(tb, "suspend finished\n"); 2124 2125 return 0; 2126 } 2127 2128 static void tb_restore_children(struct tb_switch *sw) 2129 { 2130 struct tb_port *port; 2131 2132 /* No need to restore if the router is already unplugged */ 2133 if (sw->is_unplugged) 2134 return; 2135 2136 if (tb_enable_clx(sw)) 2137 tb_sw_warn(sw, "failed to re-enable CL states\n"); 2138 2139 if (tb_enable_tmu(sw)) 2140 tb_sw_warn(sw, "failed to restore TMU configuration\n"); 2141 2142 tb_switch_for_each_port(sw, port) { 2143 if (!tb_port_has_remote(port) && !port->xdomain) 2144 continue; 2145 2146 if (port->remote) { 2147 tb_switch_lane_bonding_enable(port->remote->sw); 2148 tb_switch_configure_link(port->remote->sw); 2149 2150 tb_restore_children(port->remote->sw); 2151 } else if (port->xdomain) { 2152 tb_port_configure_xdomain(port, port->xdomain); 2153 } 2154 } 2155 } 2156 2157 static int tb_resume_noirq(struct tb *tb) 2158 { 2159 struct tb_cm *tcm = tb_priv(tb); 2160 struct tb_tunnel *tunnel, *n; 2161 unsigned int usb3_delay = 0; 2162 LIST_HEAD(tunnels); 2163 2164 tb_dbg(tb, "resuming...\n"); 2165 2166 /* remove any pci devices the firmware might have setup */ 2167 tb_switch_reset(tb->root_switch); 2168 2169 tb_switch_resume(tb->root_switch); 2170 tb_free_invalid_tunnels(tb); 2171 tb_free_unplugged_children(tb->root_switch); 2172 tb_restore_children(tb->root_switch); 2173 2174 /* 2175 * If we get here from suspend to disk the boot firmware or the 2176 * restore kernel might have created tunnels of its own. Since 2177 * we cannot be sure they are usable for us we find and tear 2178 * them down. 2179 */ 2180 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); 2181 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { 2182 if (tb_tunnel_is_usb3(tunnel)) 2183 usb3_delay = 500; 2184 tb_tunnel_deactivate(tunnel); 2185 tb_tunnel_free(tunnel); 2186 } 2187 2188 /* Re-create our tunnels now */ 2189 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 2190 /* USB3 requires delay before it can be re-activated */ 2191 if (tb_tunnel_is_usb3(tunnel)) { 2192 msleep(usb3_delay); 2193 /* Only need to do it once */ 2194 usb3_delay = 0; 2195 } 2196 tb_tunnel_restart(tunnel); 2197 } 2198 if (!list_empty(&tcm->tunnel_list)) { 2199 /* 2200 * the pcie links need some time to get going. 2201 * 100ms works for me... 2202 */ 2203 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 2204 msleep(100); 2205 } 2206 /* Allow tb_handle_hotplug to progress events */ 2207 tcm->hotplug_active = true; 2208 tb_dbg(tb, "resume finished\n"); 2209 2210 return 0; 2211 } 2212 2213 static int tb_free_unplugged_xdomains(struct tb_switch *sw) 2214 { 2215 struct tb_port *port; 2216 int ret = 0; 2217 2218 tb_switch_for_each_port(sw, port) { 2219 if (tb_is_upstream_port(port)) 2220 continue; 2221 if (port->xdomain && port->xdomain->is_unplugged) { 2222 tb_retimer_remove_all(port); 2223 tb_xdomain_remove(port->xdomain); 2224 tb_port_unconfigure_xdomain(port); 2225 port->xdomain = NULL; 2226 ret++; 2227 } else if (port->remote) { 2228 ret += tb_free_unplugged_xdomains(port->remote->sw); 2229 } 2230 } 2231 2232 return ret; 2233 } 2234 2235 static int tb_freeze_noirq(struct tb *tb) 2236 { 2237 struct tb_cm *tcm = tb_priv(tb); 2238 2239 tcm->hotplug_active = false; 2240 return 0; 2241 } 2242 2243 static int tb_thaw_noirq(struct tb *tb) 2244 { 2245 struct tb_cm *tcm = tb_priv(tb); 2246 2247 tcm->hotplug_active = true; 2248 return 0; 2249 } 2250 2251 static void tb_complete(struct tb *tb) 2252 { 2253 /* 2254 * Release any unplugged XDomains and if there is a case where 2255 * another domain is swapped in place of unplugged XDomain we 2256 * need to run another rescan. 2257 */ 2258 mutex_lock(&tb->lock); 2259 if (tb_free_unplugged_xdomains(tb->root_switch)) 2260 tb_scan_switch(tb->root_switch); 2261 mutex_unlock(&tb->lock); 2262 } 2263 2264 static int tb_runtime_suspend(struct tb *tb) 2265 { 2266 struct tb_cm *tcm = tb_priv(tb); 2267 2268 mutex_lock(&tb->lock); 2269 tb_switch_suspend(tb->root_switch, true); 2270 tcm->hotplug_active = false; 2271 mutex_unlock(&tb->lock); 2272 2273 return 0; 2274 } 2275 2276 static void tb_remove_work(struct work_struct *work) 2277 { 2278 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); 2279 struct tb *tb = tcm_to_tb(tcm); 2280 2281 mutex_lock(&tb->lock); 2282 if (tb->root_switch) { 2283 tb_free_unplugged_children(tb->root_switch); 2284 tb_free_unplugged_xdomains(tb->root_switch); 2285 } 2286 mutex_unlock(&tb->lock); 2287 } 2288 2289 static int tb_runtime_resume(struct tb *tb) 2290 { 2291 struct tb_cm *tcm = tb_priv(tb); 2292 struct tb_tunnel *tunnel, *n; 2293 2294 mutex_lock(&tb->lock); 2295 tb_switch_resume(tb->root_switch); 2296 tb_free_invalid_tunnels(tb); 2297 tb_restore_children(tb->root_switch); 2298 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 2299 tb_tunnel_restart(tunnel); 2300 tcm->hotplug_active = true; 2301 mutex_unlock(&tb->lock); 2302 2303 /* 2304 * Schedule cleanup of any unplugged devices. Run this in a 2305 * separate thread to avoid possible deadlock if the device 2306 * removal runtime resumes the unplugged device. 2307 */ 2308 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); 2309 return 0; 2310 } 2311 2312 static const struct tb_cm_ops tb_cm_ops = { 2313 .start = tb_start, 2314 .stop = tb_stop, 2315 .suspend_noirq = tb_suspend_noirq, 2316 .resume_noirq = tb_resume_noirq, 2317 .freeze_noirq = tb_freeze_noirq, 2318 .thaw_noirq = tb_thaw_noirq, 2319 .complete = tb_complete, 2320 .runtime_suspend = tb_runtime_suspend, 2321 .runtime_resume = tb_runtime_resume, 2322 .handle_event = tb_handle_event, 2323 .disapprove_switch = tb_disconnect_pci, 2324 .approve_switch = tb_tunnel_pci, 2325 .approve_xdomain_paths = tb_approve_xdomain_paths, 2326 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, 2327 }; 2328 2329 /* 2330 * During suspend the Thunderbolt controller is reset and all PCIe 2331 * tunnels are lost. The NHI driver will try to reestablish all tunnels 2332 * during resume. This adds device links between the tunneled PCIe 2333 * downstream ports and the NHI so that the device core will make sure 2334 * NHI is resumed first before the rest. 2335 */ 2336 static void tb_apple_add_links(struct tb_nhi *nhi) 2337 { 2338 struct pci_dev *upstream, *pdev; 2339 2340 if (!x86_apple_machine) 2341 return; 2342 2343 switch (nhi->pdev->device) { 2344 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 2345 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 2346 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 2347 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 2348 break; 2349 default: 2350 return; 2351 } 2352 2353 upstream = pci_upstream_bridge(nhi->pdev); 2354 while (upstream) { 2355 if (!pci_is_pcie(upstream)) 2356 return; 2357 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) 2358 break; 2359 upstream = pci_upstream_bridge(upstream); 2360 } 2361 2362 if (!upstream) 2363 return; 2364 2365 /* 2366 * For each hotplug downstream port, create add device link 2367 * back to NHI so that PCIe tunnels can be re-established after 2368 * sleep. 2369 */ 2370 for_each_pci_bridge(pdev, upstream->subordinate) { 2371 const struct device_link *link; 2372 2373 if (!pci_is_pcie(pdev)) 2374 continue; 2375 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || 2376 !pdev->is_hotplug_bridge) 2377 continue; 2378 2379 link = device_link_add(&pdev->dev, &nhi->pdev->dev, 2380 DL_FLAG_AUTOREMOVE_SUPPLIER | 2381 DL_FLAG_PM_RUNTIME); 2382 if (link) { 2383 dev_dbg(&nhi->pdev->dev, "created link from %s\n", 2384 dev_name(&pdev->dev)); 2385 } else { 2386 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", 2387 dev_name(&pdev->dev)); 2388 } 2389 } 2390 } 2391 2392 struct tb *tb_probe(struct tb_nhi *nhi) 2393 { 2394 struct tb_cm *tcm; 2395 struct tb *tb; 2396 2397 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); 2398 if (!tb) 2399 return NULL; 2400 2401 if (tb_acpi_may_tunnel_pcie()) 2402 tb->security_level = TB_SECURITY_USER; 2403 else 2404 tb->security_level = TB_SECURITY_NOPCIE; 2405 2406 tb->cm_ops = &tb_cm_ops; 2407 2408 tcm = tb_priv(tb); 2409 INIT_LIST_HEAD(&tcm->tunnel_list); 2410 INIT_LIST_HEAD(&tcm->dp_resources); 2411 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); 2412 tb_init_bandwidth_groups(tcm); 2413 2414 tb_dbg(tb, "using software connection manager\n"); 2415 2416 tb_apple_add_links(nhi); 2417 tb_acpi_add_links(nhi); 2418 2419 return tb; 2420 } 2421