1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - bus logic (NHI independent) 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/delay.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/platform_data/x86/apple.h> 14 15 #include "tb.h" 16 #include "tb_regs.h" 17 #include "tunnel.h" 18 19 #define TB_TIMEOUT 100 /* ms */ 20 21 /** 22 * struct tb_cm - Simple Thunderbolt connection manager 23 * @tunnel_list: List of active tunnels 24 * @dp_resources: List of available DP resources for DP tunneling 25 * @hotplug_active: tb_handle_hotplug will stop progressing plug 26 * events and exit if this is not set (it needs to 27 * acquire the lock one more time). Used to drain wq 28 * after cfg has been paused. 29 * @remove_work: Work used to remove any unplugged routers after 30 * runtime resume 31 */ 32 struct tb_cm { 33 struct list_head tunnel_list; 34 struct list_head dp_resources; 35 bool hotplug_active; 36 struct delayed_work remove_work; 37 }; 38 39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm) 40 { 41 return ((void *)tcm - sizeof(struct tb)); 42 } 43 44 struct tb_hotplug_event { 45 struct work_struct work; 46 struct tb *tb; 47 u64 route; 48 u8 port; 49 bool unplug; 50 }; 51 52 static void tb_handle_hotplug(struct work_struct *work); 53 54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 55 { 56 struct tb_hotplug_event *ev; 57 58 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 59 if (!ev) 60 return; 61 62 ev->tb = tb; 63 ev->route = route; 64 ev->port = port; 65 ev->unplug = unplug; 66 INIT_WORK(&ev->work, tb_handle_hotplug); 67 queue_work(tb->wq, &ev->work); 68 } 69 70 /* enumeration & hot plug handling */ 71 72 static void tb_add_dp_resources(struct tb_switch *sw) 73 { 74 struct tb_cm *tcm = tb_priv(sw->tb); 75 struct tb_port *port; 76 77 tb_switch_for_each_port(sw, port) { 78 if (!tb_port_is_dpin(port)) 79 continue; 80 81 if (!tb_switch_query_dp_resource(sw, port)) 82 continue; 83 84 list_add_tail(&port->list, &tcm->dp_resources); 85 tb_port_dbg(port, "DP IN resource available\n"); 86 } 87 } 88 89 static void tb_remove_dp_resources(struct tb_switch *sw) 90 { 91 struct tb_cm *tcm = tb_priv(sw->tb); 92 struct tb_port *port, *tmp; 93 94 /* Clear children resources first */ 95 tb_switch_for_each_port(sw, port) { 96 if (tb_port_has_remote(port)) 97 tb_remove_dp_resources(port->remote->sw); 98 } 99 100 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { 101 if (port->sw == sw) { 102 tb_port_dbg(port, "DP OUT resource unavailable\n"); 103 list_del_init(&port->list); 104 } 105 } 106 } 107 108 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) 109 { 110 struct tb_cm *tcm = tb_priv(tb); 111 struct tb_port *p; 112 113 list_for_each_entry(p, &tcm->dp_resources, list) { 114 if (p == port) 115 return; 116 } 117 118 tb_port_dbg(port, "DP %s resource available discovered\n", 119 tb_port_is_dpin(port) ? "IN" : "OUT"); 120 list_add_tail(&port->list, &tcm->dp_resources); 121 } 122 123 static void tb_discover_dp_resources(struct tb *tb) 124 { 125 struct tb_cm *tcm = tb_priv(tb); 126 struct tb_tunnel *tunnel; 127 128 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 129 if (tb_tunnel_is_dp(tunnel)) 130 tb_discover_dp_resource(tb, tunnel->dst_port); 131 } 132 } 133 134 static void tb_switch_discover_tunnels(struct tb_switch *sw, 135 struct list_head *list, 136 bool alloc_hopids) 137 { 138 struct tb *tb = sw->tb; 139 struct tb_port *port; 140 141 tb_switch_for_each_port(sw, port) { 142 struct tb_tunnel *tunnel = NULL; 143 144 switch (port->config.type) { 145 case TB_TYPE_DP_HDMI_IN: 146 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); 147 break; 148 149 case TB_TYPE_PCIE_DOWN: 150 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); 151 break; 152 153 case TB_TYPE_USB3_DOWN: 154 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); 155 break; 156 157 default: 158 break; 159 } 160 161 if (tunnel) 162 list_add_tail(&tunnel->list, list); 163 } 164 165 tb_switch_for_each_port(sw, port) { 166 if (tb_port_has_remote(port)) { 167 tb_switch_discover_tunnels(port->remote->sw, list, 168 alloc_hopids); 169 } 170 } 171 } 172 173 static void tb_discover_tunnels(struct tb *tb) 174 { 175 struct tb_cm *tcm = tb_priv(tb); 176 struct tb_tunnel *tunnel; 177 178 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); 179 180 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 181 if (tb_tunnel_is_pci(tunnel)) { 182 struct tb_switch *parent = tunnel->dst_port->sw; 183 184 while (parent != tunnel->src_port->sw) { 185 parent->boot = true; 186 parent = tb_switch_parent(parent); 187 } 188 } else if (tb_tunnel_is_dp(tunnel)) { 189 /* Keep the domain from powering down */ 190 pm_runtime_get_sync(&tunnel->src_port->sw->dev); 191 pm_runtime_get_sync(&tunnel->dst_port->sw->dev); 192 } 193 } 194 } 195 196 static int tb_port_configure_xdomain(struct tb_port *port) 197 { 198 /* 199 * XDomain paths currently only support single lane so we must 200 * disable the other lane according to USB4 spec. 201 */ 202 tb_port_disable(port->dual_link_port); 203 204 if (tb_switch_is_usb4(port->sw)) 205 return usb4_port_configure_xdomain(port); 206 return tb_lc_configure_xdomain(port); 207 } 208 209 static void tb_port_unconfigure_xdomain(struct tb_port *port) 210 { 211 if (tb_switch_is_usb4(port->sw)) 212 usb4_port_unconfigure_xdomain(port); 213 else 214 tb_lc_unconfigure_xdomain(port); 215 216 tb_port_enable(port->dual_link_port); 217 } 218 219 static void tb_scan_xdomain(struct tb_port *port) 220 { 221 struct tb_switch *sw = port->sw; 222 struct tb *tb = sw->tb; 223 struct tb_xdomain *xd; 224 u64 route; 225 226 if (!tb_is_xdomain_enabled()) 227 return; 228 229 route = tb_downstream_route(port); 230 xd = tb_xdomain_find_by_route(tb, route); 231 if (xd) { 232 tb_xdomain_put(xd); 233 return; 234 } 235 236 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, 237 NULL); 238 if (xd) { 239 tb_port_at(route, sw)->xdomain = xd; 240 tb_port_configure_xdomain(port); 241 tb_xdomain_add(xd); 242 } 243 } 244 245 static int tb_enable_tmu(struct tb_switch *sw) 246 { 247 int ret; 248 249 /* If it is already enabled in correct mode, don't touch it */ 250 if (tb_switch_tmu_is_enabled(sw)) 251 return 0; 252 253 ret = tb_switch_tmu_disable(sw); 254 if (ret) 255 return ret; 256 257 ret = tb_switch_tmu_post_time(sw); 258 if (ret) 259 return ret; 260 261 return tb_switch_tmu_enable(sw); 262 } 263 264 /** 265 * tb_find_unused_port() - return the first inactive port on @sw 266 * @sw: Switch to find the port on 267 * @type: Port type to look for 268 */ 269 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, 270 enum tb_port_type type) 271 { 272 struct tb_port *port; 273 274 tb_switch_for_each_port(sw, port) { 275 if (tb_is_upstream_port(port)) 276 continue; 277 if (port->config.type != type) 278 continue; 279 if (!port->cap_adap) 280 continue; 281 if (tb_port_is_enabled(port)) 282 continue; 283 return port; 284 } 285 return NULL; 286 } 287 288 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, 289 const struct tb_port *port) 290 { 291 struct tb_port *down; 292 293 down = usb4_switch_map_usb3_down(sw, port); 294 if (down && !tb_usb3_port_is_enabled(down)) 295 return down; 296 return NULL; 297 } 298 299 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, 300 struct tb_port *src_port, 301 struct tb_port *dst_port) 302 { 303 struct tb_cm *tcm = tb_priv(tb); 304 struct tb_tunnel *tunnel; 305 306 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 307 if (tunnel->type == type && 308 ((src_port && src_port == tunnel->src_port) || 309 (dst_port && dst_port == tunnel->dst_port))) { 310 return tunnel; 311 } 312 } 313 314 return NULL; 315 } 316 317 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, 318 struct tb_port *src_port, 319 struct tb_port *dst_port) 320 { 321 struct tb_port *port, *usb3_down; 322 struct tb_switch *sw; 323 324 /* Pick the router that is deepest in the topology */ 325 if (dst_port->sw->config.depth > src_port->sw->config.depth) 326 sw = dst_port->sw; 327 else 328 sw = src_port->sw; 329 330 /* Can't be the host router */ 331 if (sw == tb->root_switch) 332 return NULL; 333 334 /* Find the downstream USB4 port that leads to this router */ 335 port = tb_port_at(tb_route(sw), tb->root_switch); 336 /* Find the corresponding host router USB3 downstream port */ 337 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); 338 if (!usb3_down) 339 return NULL; 340 341 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); 342 } 343 344 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, 345 struct tb_port *dst_port, int *available_up, int *available_down) 346 { 347 int usb3_consumed_up, usb3_consumed_down, ret; 348 struct tb_cm *tcm = tb_priv(tb); 349 struct tb_tunnel *tunnel; 350 struct tb_port *port; 351 352 tb_port_dbg(dst_port, "calculating available bandwidth\n"); 353 354 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 355 if (tunnel) { 356 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, 357 &usb3_consumed_down); 358 if (ret) 359 return ret; 360 } else { 361 usb3_consumed_up = 0; 362 usb3_consumed_down = 0; 363 } 364 365 *available_up = *available_down = 40000; 366 367 /* Find the minimum available bandwidth over all links */ 368 tb_for_each_port_on_path(src_port, dst_port, port) { 369 int link_speed, link_width, up_bw, down_bw; 370 371 if (!tb_port_is_null(port)) 372 continue; 373 374 if (tb_is_upstream_port(port)) { 375 link_speed = port->sw->link_speed; 376 } else { 377 link_speed = tb_port_get_link_speed(port); 378 if (link_speed < 0) 379 return link_speed; 380 } 381 382 link_width = port->bonded ? 2 : 1; 383 384 up_bw = link_speed * link_width * 1000; /* Mb/s */ 385 /* Leave 10% guard band */ 386 up_bw -= up_bw / 10; 387 down_bw = up_bw; 388 389 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw); 390 391 /* 392 * Find all DP tunnels that cross the port and reduce 393 * their consumed bandwidth from the available. 394 */ 395 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 396 int dp_consumed_up, dp_consumed_down; 397 398 if (!tb_tunnel_is_dp(tunnel)) 399 continue; 400 401 if (!tb_tunnel_port_on_path(tunnel, port)) 402 continue; 403 404 ret = tb_tunnel_consumed_bandwidth(tunnel, 405 &dp_consumed_up, 406 &dp_consumed_down); 407 if (ret) 408 return ret; 409 410 up_bw -= dp_consumed_up; 411 down_bw -= dp_consumed_down; 412 } 413 414 /* 415 * If USB3 is tunneled from the host router down to the 416 * branch leading to port we need to take USB3 consumed 417 * bandwidth into account regardless whether it actually 418 * crosses the port. 419 */ 420 up_bw -= usb3_consumed_up; 421 down_bw -= usb3_consumed_down; 422 423 if (up_bw < *available_up) 424 *available_up = up_bw; 425 if (down_bw < *available_down) 426 *available_down = down_bw; 427 } 428 429 if (*available_up < 0) 430 *available_up = 0; 431 if (*available_down < 0) 432 *available_down = 0; 433 434 return 0; 435 } 436 437 static int tb_release_unused_usb3_bandwidth(struct tb *tb, 438 struct tb_port *src_port, 439 struct tb_port *dst_port) 440 { 441 struct tb_tunnel *tunnel; 442 443 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 444 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; 445 } 446 447 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, 448 struct tb_port *dst_port) 449 { 450 int ret, available_up, available_down; 451 struct tb_tunnel *tunnel; 452 453 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 454 if (!tunnel) 455 return; 456 457 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); 458 459 /* 460 * Calculate available bandwidth for the first hop USB3 tunnel. 461 * That determines the whole USB3 bandwidth for this branch. 462 */ 463 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, 464 &available_up, &available_down); 465 if (ret) { 466 tb_warn(tb, "failed to calculate available bandwidth\n"); 467 return; 468 } 469 470 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", 471 available_up, available_down); 472 473 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); 474 } 475 476 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) 477 { 478 struct tb_switch *parent = tb_switch_parent(sw); 479 int ret, available_up, available_down; 480 struct tb_port *up, *down, *port; 481 struct tb_cm *tcm = tb_priv(tb); 482 struct tb_tunnel *tunnel; 483 484 if (!tb_acpi_may_tunnel_usb3()) { 485 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); 486 return 0; 487 } 488 489 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); 490 if (!up) 491 return 0; 492 493 if (!sw->link_usb4) 494 return 0; 495 496 /* 497 * Look up available down port. Since we are chaining it should 498 * be found right above this switch. 499 */ 500 port = tb_port_at(tb_route(sw), parent); 501 down = tb_find_usb3_down(parent, port); 502 if (!down) 503 return 0; 504 505 if (tb_route(parent)) { 506 struct tb_port *parent_up; 507 /* 508 * Check first that the parent switch has its upstream USB3 509 * port enabled. Otherwise the chain is not complete and 510 * there is no point setting up a new tunnel. 511 */ 512 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); 513 if (!parent_up || !tb_port_is_enabled(parent_up)) 514 return 0; 515 516 /* Make all unused bandwidth available for the new tunnel */ 517 ret = tb_release_unused_usb3_bandwidth(tb, down, up); 518 if (ret) 519 return ret; 520 } 521 522 ret = tb_available_bandwidth(tb, down, up, &available_up, 523 &available_down); 524 if (ret) 525 goto err_reclaim; 526 527 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", 528 available_up, available_down); 529 530 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, 531 available_down); 532 if (!tunnel) { 533 ret = -ENOMEM; 534 goto err_reclaim; 535 } 536 537 if (tb_tunnel_activate(tunnel)) { 538 tb_port_info(up, 539 "USB3 tunnel activation failed, aborting\n"); 540 ret = -EIO; 541 goto err_free; 542 } 543 544 list_add_tail(&tunnel->list, &tcm->tunnel_list); 545 if (tb_route(parent)) 546 tb_reclaim_usb3_bandwidth(tb, down, up); 547 548 return 0; 549 550 err_free: 551 tb_tunnel_free(tunnel); 552 err_reclaim: 553 if (tb_route(parent)) 554 tb_reclaim_usb3_bandwidth(tb, down, up); 555 556 return ret; 557 } 558 559 static int tb_create_usb3_tunnels(struct tb_switch *sw) 560 { 561 struct tb_port *port; 562 int ret; 563 564 if (!tb_acpi_may_tunnel_usb3()) 565 return 0; 566 567 if (tb_route(sw)) { 568 ret = tb_tunnel_usb3(sw->tb, sw); 569 if (ret) 570 return ret; 571 } 572 573 tb_switch_for_each_port(sw, port) { 574 if (!tb_port_has_remote(port)) 575 continue; 576 ret = tb_create_usb3_tunnels(port->remote->sw); 577 if (ret) 578 return ret; 579 } 580 581 return 0; 582 } 583 584 static void tb_scan_port(struct tb_port *port); 585 586 /* 587 * tb_scan_switch() - scan for and initialize downstream switches 588 */ 589 static void tb_scan_switch(struct tb_switch *sw) 590 { 591 struct tb_port *port; 592 593 pm_runtime_get_sync(&sw->dev); 594 595 tb_switch_for_each_port(sw, port) 596 tb_scan_port(port); 597 598 pm_runtime_mark_last_busy(&sw->dev); 599 pm_runtime_put_autosuspend(&sw->dev); 600 } 601 602 /* 603 * tb_scan_port() - check for and initialize switches below port 604 */ 605 static void tb_scan_port(struct tb_port *port) 606 { 607 struct tb_cm *tcm = tb_priv(port->sw->tb); 608 struct tb_port *upstream_port; 609 struct tb_switch *sw; 610 611 if (tb_is_upstream_port(port)) 612 return; 613 614 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && 615 !tb_dp_port_is_enabled(port)) { 616 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); 617 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, 618 false); 619 return; 620 } 621 622 if (port->config.type != TB_TYPE_PORT) 623 return; 624 if (port->dual_link_port && port->link_nr) 625 return; /* 626 * Downstream switch is reachable through two ports. 627 * Only scan on the primary port (link_nr == 0). 628 */ 629 if (tb_wait_for_port(port, false) <= 0) 630 return; 631 if (port->remote) { 632 tb_port_dbg(port, "port already has a remote\n"); 633 return; 634 } 635 636 tb_retimer_scan(port, true); 637 638 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 639 tb_downstream_route(port)); 640 if (IS_ERR(sw)) { 641 /* 642 * If there is an error accessing the connected switch 643 * it may be connected to another domain. Also we allow 644 * the other domain to be connected to a max depth switch. 645 */ 646 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 647 tb_scan_xdomain(port); 648 return; 649 } 650 651 if (tb_switch_configure(sw)) { 652 tb_switch_put(sw); 653 return; 654 } 655 656 /* 657 * If there was previously another domain connected remove it 658 * first. 659 */ 660 if (port->xdomain) { 661 tb_xdomain_remove(port->xdomain); 662 tb_port_unconfigure_xdomain(port); 663 port->xdomain = NULL; 664 } 665 666 /* 667 * Do not send uevents until we have discovered all existing 668 * tunnels and know which switches were authorized already by 669 * the boot firmware. 670 */ 671 if (!tcm->hotplug_active) 672 dev_set_uevent_suppress(&sw->dev, true); 673 674 /* 675 * At the moment Thunderbolt 2 and beyond (devices with LC) we 676 * can support runtime PM. 677 */ 678 sw->rpm = sw->generation > 1; 679 680 if (tb_switch_add(sw)) { 681 tb_switch_put(sw); 682 return; 683 } 684 685 /* Link the switches using both links if available */ 686 upstream_port = tb_upstream_port(sw); 687 port->remote = upstream_port; 688 upstream_port->remote = port; 689 if (port->dual_link_port && upstream_port->dual_link_port) { 690 port->dual_link_port->remote = upstream_port->dual_link_port; 691 upstream_port->dual_link_port->remote = port->dual_link_port; 692 } 693 694 /* Enable lane bonding if supported */ 695 tb_switch_lane_bonding_enable(sw); 696 /* Set the link configured */ 697 tb_switch_configure_link(sw); 698 699 if (tb_enable_tmu(sw)) 700 tb_sw_warn(sw, "failed to enable TMU\n"); 701 702 /* Scan upstream retimers */ 703 tb_retimer_scan(upstream_port, true); 704 705 /* 706 * Create USB 3.x tunnels only when the switch is plugged to the 707 * domain. This is because we scan the domain also during discovery 708 * and want to discover existing USB 3.x tunnels before we create 709 * any new. 710 */ 711 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) 712 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); 713 714 tb_add_dp_resources(sw); 715 tb_scan_switch(sw); 716 } 717 718 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) 719 { 720 struct tb_port *src_port, *dst_port; 721 struct tb *tb; 722 723 if (!tunnel) 724 return; 725 726 tb_tunnel_deactivate(tunnel); 727 list_del(&tunnel->list); 728 729 tb = tunnel->tb; 730 src_port = tunnel->src_port; 731 dst_port = tunnel->dst_port; 732 733 switch (tunnel->type) { 734 case TB_TUNNEL_DP: 735 /* 736 * In case of DP tunnel make sure the DP IN resource is 737 * deallocated properly. 738 */ 739 tb_switch_dealloc_dp_resource(src_port->sw, src_port); 740 /* Now we can allow the domain to runtime suspend again */ 741 pm_runtime_mark_last_busy(&dst_port->sw->dev); 742 pm_runtime_put_autosuspend(&dst_port->sw->dev); 743 pm_runtime_mark_last_busy(&src_port->sw->dev); 744 pm_runtime_put_autosuspend(&src_port->sw->dev); 745 fallthrough; 746 747 case TB_TUNNEL_USB3: 748 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); 749 break; 750 751 default: 752 /* 753 * PCIe and DMA tunnels do not consume guaranteed 754 * bandwidth. 755 */ 756 break; 757 } 758 759 tb_tunnel_free(tunnel); 760 } 761 762 /* 763 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 764 */ 765 static void tb_free_invalid_tunnels(struct tb *tb) 766 { 767 struct tb_cm *tcm = tb_priv(tb); 768 struct tb_tunnel *tunnel; 769 struct tb_tunnel *n; 770 771 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 772 if (tb_tunnel_is_invalid(tunnel)) 773 tb_deactivate_and_free_tunnel(tunnel); 774 } 775 } 776 777 /* 778 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 779 */ 780 static void tb_free_unplugged_children(struct tb_switch *sw) 781 { 782 struct tb_port *port; 783 784 tb_switch_for_each_port(sw, port) { 785 if (!tb_port_has_remote(port)) 786 continue; 787 788 if (port->remote->sw->is_unplugged) { 789 tb_retimer_remove_all(port); 790 tb_remove_dp_resources(port->remote->sw); 791 tb_switch_unconfigure_link(port->remote->sw); 792 tb_switch_lane_bonding_disable(port->remote->sw); 793 tb_switch_remove(port->remote->sw); 794 port->remote = NULL; 795 if (port->dual_link_port) 796 port->dual_link_port->remote = NULL; 797 } else { 798 tb_free_unplugged_children(port->remote->sw); 799 } 800 } 801 } 802 803 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 804 const struct tb_port *port) 805 { 806 struct tb_port *down = NULL; 807 808 /* 809 * To keep plugging devices consistently in the same PCIe 810 * hierarchy, do mapping here for switch downstream PCIe ports. 811 */ 812 if (tb_switch_is_usb4(sw)) { 813 down = usb4_switch_map_pcie_down(sw, port); 814 } else if (!tb_route(sw)) { 815 int phy_port = tb_phy_port_from_link(port->port); 816 int index; 817 818 /* 819 * Hard-coded Thunderbolt port to PCIe down port mapping 820 * per controller. 821 */ 822 if (tb_switch_is_cactus_ridge(sw) || 823 tb_switch_is_alpine_ridge(sw)) 824 index = !phy_port ? 6 : 7; 825 else if (tb_switch_is_falcon_ridge(sw)) 826 index = !phy_port ? 6 : 8; 827 else if (tb_switch_is_titan_ridge(sw)) 828 index = !phy_port ? 8 : 9; 829 else 830 goto out; 831 832 /* Validate the hard-coding */ 833 if (WARN_ON(index > sw->config.max_port_number)) 834 goto out; 835 836 down = &sw->ports[index]; 837 } 838 839 if (down) { 840 if (WARN_ON(!tb_port_is_pcie_down(down))) 841 goto out; 842 if (tb_pci_port_is_enabled(down)) 843 goto out; 844 845 return down; 846 } 847 848 out: 849 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); 850 } 851 852 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) 853 { 854 struct tb_port *host_port, *port; 855 struct tb_cm *tcm = tb_priv(tb); 856 857 host_port = tb_route(in->sw) ? 858 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; 859 860 list_for_each_entry(port, &tcm->dp_resources, list) { 861 if (!tb_port_is_dpout(port)) 862 continue; 863 864 if (tb_port_is_enabled(port)) { 865 tb_port_dbg(port, "in use\n"); 866 continue; 867 } 868 869 tb_port_dbg(port, "DP OUT available\n"); 870 871 /* 872 * Keep the DP tunnel under the topology starting from 873 * the same host router downstream port. 874 */ 875 if (host_port && tb_route(port->sw)) { 876 struct tb_port *p; 877 878 p = tb_port_at(tb_route(port->sw), tb->root_switch); 879 if (p != host_port) 880 continue; 881 } 882 883 return port; 884 } 885 886 return NULL; 887 } 888 889 static void tb_tunnel_dp(struct tb *tb) 890 { 891 int available_up, available_down, ret, link_nr; 892 struct tb_cm *tcm = tb_priv(tb); 893 struct tb_port *port, *in, *out; 894 struct tb_tunnel *tunnel; 895 896 if (!tb_acpi_may_tunnel_dp()) { 897 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); 898 return; 899 } 900 901 /* 902 * Find pair of inactive DP IN and DP OUT adapters and then 903 * establish a DP tunnel between them. 904 */ 905 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); 906 907 in = NULL; 908 out = NULL; 909 list_for_each_entry(port, &tcm->dp_resources, list) { 910 if (!tb_port_is_dpin(port)) 911 continue; 912 913 if (tb_port_is_enabled(port)) { 914 tb_port_dbg(port, "in use\n"); 915 continue; 916 } 917 918 tb_port_dbg(port, "DP IN available\n"); 919 920 out = tb_find_dp_out(tb, port); 921 if (out) { 922 in = port; 923 break; 924 } 925 } 926 927 if (!in) { 928 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); 929 return; 930 } 931 if (!out) { 932 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); 933 return; 934 } 935 936 /* 937 * This is only applicable to links that are not bonded (so 938 * when Thunderbolt 1 hardware is involved somewhere in the 939 * topology). For these try to share the DP bandwidth between 940 * the two lanes. 941 */ 942 link_nr = 1; 943 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 944 if (tb_tunnel_is_dp(tunnel)) { 945 link_nr = 0; 946 break; 947 } 948 } 949 950 /* 951 * DP stream needs the domain to be active so runtime resume 952 * both ends of the tunnel. 953 * 954 * This should bring the routers in the middle active as well 955 * and keeps the domain from runtime suspending while the DP 956 * tunnel is active. 957 */ 958 pm_runtime_get_sync(&in->sw->dev); 959 pm_runtime_get_sync(&out->sw->dev); 960 961 if (tb_switch_alloc_dp_resource(in->sw, in)) { 962 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); 963 goto err_rpm_put; 964 } 965 966 /* Make all unused USB3 bandwidth available for the new DP tunnel */ 967 ret = tb_release_unused_usb3_bandwidth(tb, in, out); 968 if (ret) { 969 tb_warn(tb, "failed to release unused bandwidth\n"); 970 goto err_dealloc_dp; 971 } 972 973 ret = tb_available_bandwidth(tb, in, out, &available_up, 974 &available_down); 975 if (ret) 976 goto err_reclaim; 977 978 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", 979 available_up, available_down); 980 981 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, 982 available_down); 983 if (!tunnel) { 984 tb_port_dbg(out, "could not allocate DP tunnel\n"); 985 goto err_reclaim; 986 } 987 988 if (tb_tunnel_activate(tunnel)) { 989 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 990 goto err_free; 991 } 992 993 list_add_tail(&tunnel->list, &tcm->tunnel_list); 994 tb_reclaim_usb3_bandwidth(tb, in, out); 995 return; 996 997 err_free: 998 tb_tunnel_free(tunnel); 999 err_reclaim: 1000 tb_reclaim_usb3_bandwidth(tb, in, out); 1001 err_dealloc_dp: 1002 tb_switch_dealloc_dp_resource(in->sw, in); 1003 err_rpm_put: 1004 pm_runtime_mark_last_busy(&out->sw->dev); 1005 pm_runtime_put_autosuspend(&out->sw->dev); 1006 pm_runtime_mark_last_busy(&in->sw->dev); 1007 pm_runtime_put_autosuspend(&in->sw->dev); 1008 } 1009 1010 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) 1011 { 1012 struct tb_port *in, *out; 1013 struct tb_tunnel *tunnel; 1014 1015 if (tb_port_is_dpin(port)) { 1016 tb_port_dbg(port, "DP IN resource unavailable\n"); 1017 in = port; 1018 out = NULL; 1019 } else { 1020 tb_port_dbg(port, "DP OUT resource unavailable\n"); 1021 in = NULL; 1022 out = port; 1023 } 1024 1025 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); 1026 tb_deactivate_and_free_tunnel(tunnel); 1027 list_del_init(&port->list); 1028 1029 /* 1030 * See if there is another DP OUT port that can be used for 1031 * to create another tunnel. 1032 */ 1033 tb_tunnel_dp(tb); 1034 } 1035 1036 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) 1037 { 1038 struct tb_cm *tcm = tb_priv(tb); 1039 struct tb_port *p; 1040 1041 if (tb_port_is_enabled(port)) 1042 return; 1043 1044 list_for_each_entry(p, &tcm->dp_resources, list) { 1045 if (p == port) 1046 return; 1047 } 1048 1049 tb_port_dbg(port, "DP %s resource available\n", 1050 tb_port_is_dpin(port) ? "IN" : "OUT"); 1051 list_add_tail(&port->list, &tcm->dp_resources); 1052 1053 /* Look for suitable DP IN <-> DP OUT pairs now */ 1054 tb_tunnel_dp(tb); 1055 } 1056 1057 static void tb_disconnect_and_release_dp(struct tb *tb) 1058 { 1059 struct tb_cm *tcm = tb_priv(tb); 1060 struct tb_tunnel *tunnel, *n; 1061 1062 /* 1063 * Tear down all DP tunnels and release their resources. They 1064 * will be re-established after resume based on plug events. 1065 */ 1066 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { 1067 if (tb_tunnel_is_dp(tunnel)) 1068 tb_deactivate_and_free_tunnel(tunnel); 1069 } 1070 1071 while (!list_empty(&tcm->dp_resources)) { 1072 struct tb_port *port; 1073 1074 port = list_first_entry(&tcm->dp_resources, 1075 struct tb_port, list); 1076 list_del_init(&port->list); 1077 } 1078 } 1079 1080 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) 1081 { 1082 struct tb_tunnel *tunnel; 1083 struct tb_port *up; 1084 1085 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 1086 if (WARN_ON(!up)) 1087 return -ENODEV; 1088 1089 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); 1090 if (WARN_ON(!tunnel)) 1091 return -ENODEV; 1092 1093 tb_tunnel_deactivate(tunnel); 1094 list_del(&tunnel->list); 1095 tb_tunnel_free(tunnel); 1096 return 0; 1097 } 1098 1099 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 1100 { 1101 struct tb_port *up, *down, *port; 1102 struct tb_cm *tcm = tb_priv(tb); 1103 struct tb_switch *parent_sw; 1104 struct tb_tunnel *tunnel; 1105 1106 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 1107 if (!up) 1108 return 0; 1109 1110 /* 1111 * Look up available down port. Since we are chaining it should 1112 * be found right above this switch. 1113 */ 1114 parent_sw = tb_to_switch(sw->dev.parent); 1115 port = tb_port_at(tb_route(sw), parent_sw); 1116 down = tb_find_pcie_down(parent_sw, port); 1117 if (!down) 1118 return 0; 1119 1120 tunnel = tb_tunnel_alloc_pci(tb, up, down); 1121 if (!tunnel) 1122 return -ENOMEM; 1123 1124 if (tb_tunnel_activate(tunnel)) { 1125 tb_port_info(up, 1126 "PCIe tunnel activation failed, aborting\n"); 1127 tb_tunnel_free(tunnel); 1128 return -EIO; 1129 } 1130 1131 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1132 return 0; 1133 } 1134 1135 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1136 int transmit_path, int transmit_ring, 1137 int receive_path, int receive_ring) 1138 { 1139 struct tb_cm *tcm = tb_priv(tb); 1140 struct tb_port *nhi_port, *dst_port; 1141 struct tb_tunnel *tunnel; 1142 struct tb_switch *sw; 1143 1144 sw = tb_to_switch(xd->dev.parent); 1145 dst_port = tb_port_at(xd->route, sw); 1146 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 1147 1148 mutex_lock(&tb->lock); 1149 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, 1150 transmit_ring, receive_path, receive_ring); 1151 if (!tunnel) { 1152 mutex_unlock(&tb->lock); 1153 return -ENOMEM; 1154 } 1155 1156 if (tb_tunnel_activate(tunnel)) { 1157 tb_port_info(nhi_port, 1158 "DMA tunnel activation failed, aborting\n"); 1159 tb_tunnel_free(tunnel); 1160 mutex_unlock(&tb->lock); 1161 return -EIO; 1162 } 1163 1164 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1165 mutex_unlock(&tb->lock); 1166 return 0; 1167 } 1168 1169 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1170 int transmit_path, int transmit_ring, 1171 int receive_path, int receive_ring) 1172 { 1173 struct tb_cm *tcm = tb_priv(tb); 1174 struct tb_port *nhi_port, *dst_port; 1175 struct tb_tunnel *tunnel, *n; 1176 struct tb_switch *sw; 1177 1178 sw = tb_to_switch(xd->dev.parent); 1179 dst_port = tb_port_at(xd->route, sw); 1180 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 1181 1182 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 1183 if (!tb_tunnel_is_dma(tunnel)) 1184 continue; 1185 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) 1186 continue; 1187 1188 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, 1189 receive_path, receive_ring)) 1190 tb_deactivate_and_free_tunnel(tunnel); 1191 } 1192 } 1193 1194 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, 1195 int transmit_path, int transmit_ring, 1196 int receive_path, int receive_ring) 1197 { 1198 if (!xd->is_unplugged) { 1199 mutex_lock(&tb->lock); 1200 __tb_disconnect_xdomain_paths(tb, xd, transmit_path, 1201 transmit_ring, receive_path, 1202 receive_ring); 1203 mutex_unlock(&tb->lock); 1204 } 1205 return 0; 1206 } 1207 1208 /* hotplug handling */ 1209 1210 /* 1211 * tb_handle_hotplug() - handle hotplug event 1212 * 1213 * Executes on tb->wq. 1214 */ 1215 static void tb_handle_hotplug(struct work_struct *work) 1216 { 1217 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 1218 struct tb *tb = ev->tb; 1219 struct tb_cm *tcm = tb_priv(tb); 1220 struct tb_switch *sw; 1221 struct tb_port *port; 1222 1223 /* Bring the domain back from sleep if it was suspended */ 1224 pm_runtime_get_sync(&tb->dev); 1225 1226 mutex_lock(&tb->lock); 1227 if (!tcm->hotplug_active) 1228 goto out; /* during init, suspend or shutdown */ 1229 1230 sw = tb_switch_find_by_route(tb, ev->route); 1231 if (!sw) { 1232 tb_warn(tb, 1233 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 1234 ev->route, ev->port, ev->unplug); 1235 goto out; 1236 } 1237 if (ev->port > sw->config.max_port_number) { 1238 tb_warn(tb, 1239 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 1240 ev->route, ev->port, ev->unplug); 1241 goto put_sw; 1242 } 1243 port = &sw->ports[ev->port]; 1244 if (tb_is_upstream_port(port)) { 1245 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 1246 ev->route, ev->port, ev->unplug); 1247 goto put_sw; 1248 } 1249 1250 pm_runtime_get_sync(&sw->dev); 1251 1252 if (ev->unplug) { 1253 tb_retimer_remove_all(port); 1254 1255 if (tb_port_has_remote(port)) { 1256 tb_port_dbg(port, "switch unplugged\n"); 1257 tb_sw_set_unplugged(port->remote->sw); 1258 tb_free_invalid_tunnels(tb); 1259 tb_remove_dp_resources(port->remote->sw); 1260 tb_switch_tmu_disable(port->remote->sw); 1261 tb_switch_unconfigure_link(port->remote->sw); 1262 tb_switch_lane_bonding_disable(port->remote->sw); 1263 tb_switch_remove(port->remote->sw); 1264 port->remote = NULL; 1265 if (port->dual_link_port) 1266 port->dual_link_port->remote = NULL; 1267 /* Maybe we can create another DP tunnel */ 1268 tb_tunnel_dp(tb); 1269 } else if (port->xdomain) { 1270 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); 1271 1272 tb_port_dbg(port, "xdomain unplugged\n"); 1273 /* 1274 * Service drivers are unbound during 1275 * tb_xdomain_remove() so setting XDomain as 1276 * unplugged here prevents deadlock if they call 1277 * tb_xdomain_disable_paths(). We will tear down 1278 * all the tunnels below. 1279 */ 1280 xd->is_unplugged = true; 1281 tb_xdomain_remove(xd); 1282 port->xdomain = NULL; 1283 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); 1284 tb_xdomain_put(xd); 1285 tb_port_unconfigure_xdomain(port); 1286 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1287 tb_dp_resource_unavailable(tb, port); 1288 } else { 1289 tb_port_dbg(port, 1290 "got unplug event for disconnected port, ignoring\n"); 1291 } 1292 } else if (port->remote) { 1293 tb_port_dbg(port, "got plug event for connected port, ignoring\n"); 1294 } else { 1295 if (tb_port_is_null(port)) { 1296 tb_port_dbg(port, "hotplug: scanning\n"); 1297 tb_scan_port(port); 1298 if (!port->remote) 1299 tb_port_dbg(port, "hotplug: no switch found\n"); 1300 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1301 tb_dp_resource_available(tb, port); 1302 } 1303 } 1304 1305 pm_runtime_mark_last_busy(&sw->dev); 1306 pm_runtime_put_autosuspend(&sw->dev); 1307 1308 put_sw: 1309 tb_switch_put(sw); 1310 out: 1311 mutex_unlock(&tb->lock); 1312 1313 pm_runtime_mark_last_busy(&tb->dev); 1314 pm_runtime_put_autosuspend(&tb->dev); 1315 1316 kfree(ev); 1317 } 1318 1319 /* 1320 * tb_schedule_hotplug_handler() - callback function for the control channel 1321 * 1322 * Delegates to tb_handle_hotplug. 1323 */ 1324 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 1325 const void *buf, size_t size) 1326 { 1327 const struct cfg_event_pkg *pkg = buf; 1328 u64 route; 1329 1330 if (type != TB_CFG_PKG_EVENT) { 1331 tb_warn(tb, "unexpected event %#x, ignoring\n", type); 1332 return; 1333 } 1334 1335 route = tb_cfg_get_route(&pkg->header); 1336 1337 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { 1338 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 1339 pkg->port); 1340 } 1341 1342 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); 1343 } 1344 1345 static void tb_stop(struct tb *tb) 1346 { 1347 struct tb_cm *tcm = tb_priv(tb); 1348 struct tb_tunnel *tunnel; 1349 struct tb_tunnel *n; 1350 1351 cancel_delayed_work(&tcm->remove_work); 1352 /* tunnels are only present after everything has been initialized */ 1353 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 1354 /* 1355 * DMA tunnels require the driver to be functional so we 1356 * tear them down. Other protocol tunnels can be left 1357 * intact. 1358 */ 1359 if (tb_tunnel_is_dma(tunnel)) 1360 tb_tunnel_deactivate(tunnel); 1361 tb_tunnel_free(tunnel); 1362 } 1363 tb_switch_remove(tb->root_switch); 1364 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 1365 } 1366 1367 static int tb_scan_finalize_switch(struct device *dev, void *data) 1368 { 1369 if (tb_is_switch(dev)) { 1370 struct tb_switch *sw = tb_to_switch(dev); 1371 1372 /* 1373 * If we found that the switch was already setup by the 1374 * boot firmware, mark it as authorized now before we 1375 * send uevent to userspace. 1376 */ 1377 if (sw->boot) 1378 sw->authorized = 1; 1379 1380 dev_set_uevent_suppress(dev, false); 1381 kobject_uevent(&dev->kobj, KOBJ_ADD); 1382 device_for_each_child(dev, NULL, tb_scan_finalize_switch); 1383 } 1384 1385 return 0; 1386 } 1387 1388 static int tb_start(struct tb *tb) 1389 { 1390 struct tb_cm *tcm = tb_priv(tb); 1391 int ret; 1392 1393 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 1394 if (IS_ERR(tb->root_switch)) 1395 return PTR_ERR(tb->root_switch); 1396 1397 /* 1398 * ICM firmware upgrade needs running firmware and in native 1399 * mode that is not available so disable firmware upgrade of the 1400 * root switch. 1401 */ 1402 tb->root_switch->no_nvm_upgrade = true; 1403 /* All USB4 routers support runtime PM */ 1404 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); 1405 1406 ret = tb_switch_configure(tb->root_switch); 1407 if (ret) { 1408 tb_switch_put(tb->root_switch); 1409 return ret; 1410 } 1411 1412 /* Announce the switch to the world */ 1413 ret = tb_switch_add(tb->root_switch); 1414 if (ret) { 1415 tb_switch_put(tb->root_switch); 1416 return ret; 1417 } 1418 1419 /* Enable TMU if it is off */ 1420 tb_switch_tmu_enable(tb->root_switch); 1421 /* Full scan to discover devices added before the driver was loaded. */ 1422 tb_scan_switch(tb->root_switch); 1423 /* Find out tunnels created by the boot firmware */ 1424 tb_discover_tunnels(tb); 1425 /* Add DP resources from the DP tunnels created by the boot firmware */ 1426 tb_discover_dp_resources(tb); 1427 /* 1428 * If the boot firmware did not create USB 3.x tunnels create them 1429 * now for the whole topology. 1430 */ 1431 tb_create_usb3_tunnels(tb->root_switch); 1432 /* Add DP IN resources for the root switch */ 1433 tb_add_dp_resources(tb->root_switch); 1434 /* Make the discovered switches available to the userspace */ 1435 device_for_each_child(&tb->root_switch->dev, NULL, 1436 tb_scan_finalize_switch); 1437 1438 /* Allow tb_handle_hotplug to progress events */ 1439 tcm->hotplug_active = true; 1440 return 0; 1441 } 1442 1443 static int tb_suspend_noirq(struct tb *tb) 1444 { 1445 struct tb_cm *tcm = tb_priv(tb); 1446 1447 tb_dbg(tb, "suspending...\n"); 1448 tb_disconnect_and_release_dp(tb); 1449 tb_switch_suspend(tb->root_switch, false); 1450 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 1451 tb_dbg(tb, "suspend finished\n"); 1452 1453 return 0; 1454 } 1455 1456 static void tb_restore_children(struct tb_switch *sw) 1457 { 1458 struct tb_port *port; 1459 1460 /* No need to restore if the router is already unplugged */ 1461 if (sw->is_unplugged) 1462 return; 1463 1464 if (tb_enable_tmu(sw)) 1465 tb_sw_warn(sw, "failed to restore TMU configuration\n"); 1466 1467 tb_switch_for_each_port(sw, port) { 1468 if (!tb_port_has_remote(port) && !port->xdomain) 1469 continue; 1470 1471 if (port->remote) { 1472 tb_switch_lane_bonding_enable(port->remote->sw); 1473 tb_switch_configure_link(port->remote->sw); 1474 1475 tb_restore_children(port->remote->sw); 1476 } else if (port->xdomain) { 1477 tb_port_configure_xdomain(port); 1478 } 1479 } 1480 } 1481 1482 static int tb_resume_noirq(struct tb *tb) 1483 { 1484 struct tb_cm *tcm = tb_priv(tb); 1485 struct tb_tunnel *tunnel, *n; 1486 unsigned int usb3_delay = 0; 1487 LIST_HEAD(tunnels); 1488 1489 tb_dbg(tb, "resuming...\n"); 1490 1491 /* remove any pci devices the firmware might have setup */ 1492 tb_switch_reset(tb->root_switch); 1493 1494 tb_switch_resume(tb->root_switch); 1495 tb_free_invalid_tunnels(tb); 1496 tb_free_unplugged_children(tb->root_switch); 1497 tb_restore_children(tb->root_switch); 1498 1499 /* 1500 * If we get here from suspend to disk the boot firmware or the 1501 * restore kernel might have created tunnels of its own. Since 1502 * we cannot be sure they are usable for us we find and tear 1503 * them down. 1504 */ 1505 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); 1506 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { 1507 if (tb_tunnel_is_usb3(tunnel)) 1508 usb3_delay = 500; 1509 tb_tunnel_deactivate(tunnel); 1510 tb_tunnel_free(tunnel); 1511 } 1512 1513 /* Re-create our tunnels now */ 1514 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 1515 /* USB3 requires delay before it can be re-activated */ 1516 if (tb_tunnel_is_usb3(tunnel)) { 1517 msleep(usb3_delay); 1518 /* Only need to do it once */ 1519 usb3_delay = 0; 1520 } 1521 tb_tunnel_restart(tunnel); 1522 } 1523 if (!list_empty(&tcm->tunnel_list)) { 1524 /* 1525 * the pcie links need some time to get going. 1526 * 100ms works for me... 1527 */ 1528 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 1529 msleep(100); 1530 } 1531 /* Allow tb_handle_hotplug to progress events */ 1532 tcm->hotplug_active = true; 1533 tb_dbg(tb, "resume finished\n"); 1534 1535 return 0; 1536 } 1537 1538 static int tb_free_unplugged_xdomains(struct tb_switch *sw) 1539 { 1540 struct tb_port *port; 1541 int ret = 0; 1542 1543 tb_switch_for_each_port(sw, port) { 1544 if (tb_is_upstream_port(port)) 1545 continue; 1546 if (port->xdomain && port->xdomain->is_unplugged) { 1547 tb_retimer_remove_all(port); 1548 tb_xdomain_remove(port->xdomain); 1549 tb_port_unconfigure_xdomain(port); 1550 port->xdomain = NULL; 1551 ret++; 1552 } else if (port->remote) { 1553 ret += tb_free_unplugged_xdomains(port->remote->sw); 1554 } 1555 } 1556 1557 return ret; 1558 } 1559 1560 static int tb_freeze_noirq(struct tb *tb) 1561 { 1562 struct tb_cm *tcm = tb_priv(tb); 1563 1564 tcm->hotplug_active = false; 1565 return 0; 1566 } 1567 1568 static int tb_thaw_noirq(struct tb *tb) 1569 { 1570 struct tb_cm *tcm = tb_priv(tb); 1571 1572 tcm->hotplug_active = true; 1573 return 0; 1574 } 1575 1576 static void tb_complete(struct tb *tb) 1577 { 1578 /* 1579 * Release any unplugged XDomains and if there is a case where 1580 * another domain is swapped in place of unplugged XDomain we 1581 * need to run another rescan. 1582 */ 1583 mutex_lock(&tb->lock); 1584 if (tb_free_unplugged_xdomains(tb->root_switch)) 1585 tb_scan_switch(tb->root_switch); 1586 mutex_unlock(&tb->lock); 1587 } 1588 1589 static int tb_runtime_suspend(struct tb *tb) 1590 { 1591 struct tb_cm *tcm = tb_priv(tb); 1592 1593 mutex_lock(&tb->lock); 1594 tb_switch_suspend(tb->root_switch, true); 1595 tcm->hotplug_active = false; 1596 mutex_unlock(&tb->lock); 1597 1598 return 0; 1599 } 1600 1601 static void tb_remove_work(struct work_struct *work) 1602 { 1603 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); 1604 struct tb *tb = tcm_to_tb(tcm); 1605 1606 mutex_lock(&tb->lock); 1607 if (tb->root_switch) { 1608 tb_free_unplugged_children(tb->root_switch); 1609 tb_free_unplugged_xdomains(tb->root_switch); 1610 } 1611 mutex_unlock(&tb->lock); 1612 } 1613 1614 static int tb_runtime_resume(struct tb *tb) 1615 { 1616 struct tb_cm *tcm = tb_priv(tb); 1617 struct tb_tunnel *tunnel, *n; 1618 1619 mutex_lock(&tb->lock); 1620 tb_switch_resume(tb->root_switch); 1621 tb_free_invalid_tunnels(tb); 1622 tb_restore_children(tb->root_switch); 1623 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 1624 tb_tunnel_restart(tunnel); 1625 tcm->hotplug_active = true; 1626 mutex_unlock(&tb->lock); 1627 1628 /* 1629 * Schedule cleanup of any unplugged devices. Run this in a 1630 * separate thread to avoid possible deadlock if the device 1631 * removal runtime resumes the unplugged device. 1632 */ 1633 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); 1634 return 0; 1635 } 1636 1637 static const struct tb_cm_ops tb_cm_ops = { 1638 .start = tb_start, 1639 .stop = tb_stop, 1640 .suspend_noirq = tb_suspend_noirq, 1641 .resume_noirq = tb_resume_noirq, 1642 .freeze_noirq = tb_freeze_noirq, 1643 .thaw_noirq = tb_thaw_noirq, 1644 .complete = tb_complete, 1645 .runtime_suspend = tb_runtime_suspend, 1646 .runtime_resume = tb_runtime_resume, 1647 .handle_event = tb_handle_event, 1648 .disapprove_switch = tb_disconnect_pci, 1649 .approve_switch = tb_tunnel_pci, 1650 .approve_xdomain_paths = tb_approve_xdomain_paths, 1651 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, 1652 }; 1653 1654 /* 1655 * During suspend the Thunderbolt controller is reset and all PCIe 1656 * tunnels are lost. The NHI driver will try to reestablish all tunnels 1657 * during resume. This adds device links between the tunneled PCIe 1658 * downstream ports and the NHI so that the device core will make sure 1659 * NHI is resumed first before the rest. 1660 */ 1661 static void tb_apple_add_links(struct tb_nhi *nhi) 1662 { 1663 struct pci_dev *upstream, *pdev; 1664 1665 if (!x86_apple_machine) 1666 return; 1667 1668 switch (nhi->pdev->device) { 1669 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1670 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1671 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 1672 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 1673 break; 1674 default: 1675 return; 1676 } 1677 1678 upstream = pci_upstream_bridge(nhi->pdev); 1679 while (upstream) { 1680 if (!pci_is_pcie(upstream)) 1681 return; 1682 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) 1683 break; 1684 upstream = pci_upstream_bridge(upstream); 1685 } 1686 1687 if (!upstream) 1688 return; 1689 1690 /* 1691 * For each hotplug downstream port, create add device link 1692 * back to NHI so that PCIe tunnels can be re-established after 1693 * sleep. 1694 */ 1695 for_each_pci_bridge(pdev, upstream->subordinate) { 1696 const struct device_link *link; 1697 1698 if (!pci_is_pcie(pdev)) 1699 continue; 1700 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || 1701 !pdev->is_hotplug_bridge) 1702 continue; 1703 1704 link = device_link_add(&pdev->dev, &nhi->pdev->dev, 1705 DL_FLAG_AUTOREMOVE_SUPPLIER | 1706 DL_FLAG_PM_RUNTIME); 1707 if (link) { 1708 dev_dbg(&nhi->pdev->dev, "created link from %s\n", 1709 dev_name(&pdev->dev)); 1710 } else { 1711 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", 1712 dev_name(&pdev->dev)); 1713 } 1714 } 1715 } 1716 1717 struct tb *tb_probe(struct tb_nhi *nhi) 1718 { 1719 struct tb_cm *tcm; 1720 struct tb *tb; 1721 1722 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); 1723 if (!tb) 1724 return NULL; 1725 1726 if (tb_acpi_may_tunnel_pcie()) 1727 tb->security_level = TB_SECURITY_USER; 1728 else 1729 tb->security_level = TB_SECURITY_NOPCIE; 1730 1731 tb->cm_ops = &tb_cm_ops; 1732 1733 tcm = tb_priv(tb); 1734 INIT_LIST_HEAD(&tcm->tunnel_list); 1735 INIT_LIST_HEAD(&tcm->dp_resources); 1736 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); 1737 1738 tb_dbg(tb, "using software connection manager\n"); 1739 1740 tb_apple_add_links(nhi); 1741 tb_acpi_add_links(nhi); 1742 1743 return tb; 1744 } 1745