1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - bus logic (NHI independent) 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/delay.h> 12 #include <linux/pm_runtime.h> 13 14 #include "tb.h" 15 #include "tb_regs.h" 16 #include "tunnel.h" 17 18 /** 19 * struct tb_cm - Simple Thunderbolt connection manager 20 * @tunnel_list: List of active tunnels 21 * @dp_resources: List of available DP resources for DP tunneling 22 * @hotplug_active: tb_handle_hotplug will stop progressing plug 23 * events and exit if this is not set (it needs to 24 * acquire the lock one more time). Used to drain wq 25 * after cfg has been paused. 26 * @remove_work: Work used to remove any unplugged routers after 27 * runtime resume 28 */ 29 struct tb_cm { 30 struct list_head tunnel_list; 31 struct list_head dp_resources; 32 bool hotplug_active; 33 struct delayed_work remove_work; 34 }; 35 36 static inline struct tb *tcm_to_tb(struct tb_cm *tcm) 37 { 38 return ((void *)tcm - sizeof(struct tb)); 39 } 40 41 struct tb_hotplug_event { 42 struct work_struct work; 43 struct tb *tb; 44 u64 route; 45 u8 port; 46 bool unplug; 47 }; 48 49 static void tb_handle_hotplug(struct work_struct *work); 50 51 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 52 { 53 struct tb_hotplug_event *ev; 54 55 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 56 if (!ev) 57 return; 58 59 ev->tb = tb; 60 ev->route = route; 61 ev->port = port; 62 ev->unplug = unplug; 63 INIT_WORK(&ev->work, tb_handle_hotplug); 64 queue_work(tb->wq, &ev->work); 65 } 66 67 /* enumeration & hot plug handling */ 68 69 static void tb_add_dp_resources(struct tb_switch *sw) 70 { 71 struct tb_cm *tcm = tb_priv(sw->tb); 72 struct tb_port *port; 73 74 tb_switch_for_each_port(sw, port) { 75 if (!tb_port_is_dpin(port)) 76 continue; 77 78 if (!tb_switch_query_dp_resource(sw, port)) 79 continue; 80 81 list_add_tail(&port->list, &tcm->dp_resources); 82 tb_port_dbg(port, "DP IN resource available\n"); 83 } 84 } 85 86 static void tb_remove_dp_resources(struct tb_switch *sw) 87 { 88 struct tb_cm *tcm = tb_priv(sw->tb); 89 struct tb_port *port, *tmp; 90 91 /* Clear children resources first */ 92 tb_switch_for_each_port(sw, port) { 93 if (tb_port_has_remote(port)) 94 tb_remove_dp_resources(port->remote->sw); 95 } 96 97 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { 98 if (port->sw == sw) { 99 tb_port_dbg(port, "DP OUT resource unavailable\n"); 100 list_del_init(&port->list); 101 } 102 } 103 } 104 105 static void tb_discover_tunnels(struct tb_switch *sw) 106 { 107 struct tb *tb = sw->tb; 108 struct tb_cm *tcm = tb_priv(tb); 109 struct tb_port *port; 110 111 tb_switch_for_each_port(sw, port) { 112 struct tb_tunnel *tunnel = NULL; 113 114 switch (port->config.type) { 115 case TB_TYPE_DP_HDMI_IN: 116 tunnel = tb_tunnel_discover_dp(tb, port); 117 break; 118 119 case TB_TYPE_PCIE_DOWN: 120 tunnel = tb_tunnel_discover_pci(tb, port); 121 break; 122 123 case TB_TYPE_USB3_DOWN: 124 tunnel = tb_tunnel_discover_usb3(tb, port); 125 break; 126 127 default: 128 break; 129 } 130 131 if (!tunnel) 132 continue; 133 134 if (tb_tunnel_is_pci(tunnel)) { 135 struct tb_switch *parent = tunnel->dst_port->sw; 136 137 while (parent != tunnel->src_port->sw) { 138 parent->boot = true; 139 parent = tb_switch_parent(parent); 140 } 141 } 142 143 list_add_tail(&tunnel->list, &tcm->tunnel_list); 144 } 145 146 tb_switch_for_each_port(sw, port) { 147 if (tb_port_has_remote(port)) 148 tb_discover_tunnels(port->remote->sw); 149 } 150 } 151 152 static int tb_port_configure_xdomain(struct tb_port *port) 153 { 154 /* 155 * XDomain paths currently only support single lane so we must 156 * disable the other lane according to USB4 spec. 157 */ 158 tb_port_disable(port->dual_link_port); 159 160 if (tb_switch_is_usb4(port->sw)) 161 return usb4_port_configure_xdomain(port); 162 return tb_lc_configure_xdomain(port); 163 } 164 165 static void tb_port_unconfigure_xdomain(struct tb_port *port) 166 { 167 if (tb_switch_is_usb4(port->sw)) 168 usb4_port_unconfigure_xdomain(port); 169 else 170 tb_lc_unconfigure_xdomain(port); 171 172 tb_port_enable(port->dual_link_port); 173 } 174 175 static void tb_scan_xdomain(struct tb_port *port) 176 { 177 struct tb_switch *sw = port->sw; 178 struct tb *tb = sw->tb; 179 struct tb_xdomain *xd; 180 u64 route; 181 182 route = tb_downstream_route(port); 183 xd = tb_xdomain_find_by_route(tb, route); 184 if (xd) { 185 tb_xdomain_put(xd); 186 return; 187 } 188 189 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, 190 NULL); 191 if (xd) { 192 tb_port_at(route, sw)->xdomain = xd; 193 tb_port_configure_xdomain(port); 194 tb_xdomain_add(xd); 195 } 196 } 197 198 static int tb_enable_tmu(struct tb_switch *sw) 199 { 200 int ret; 201 202 /* If it is already enabled in correct mode, don't touch it */ 203 if (tb_switch_tmu_is_enabled(sw)) 204 return 0; 205 206 ret = tb_switch_tmu_disable(sw); 207 if (ret) 208 return ret; 209 210 ret = tb_switch_tmu_post_time(sw); 211 if (ret) 212 return ret; 213 214 return tb_switch_tmu_enable(sw); 215 } 216 217 /** 218 * tb_find_unused_port() - return the first inactive port on @sw 219 * @sw: Switch to find the port on 220 * @type: Port type to look for 221 */ 222 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, 223 enum tb_port_type type) 224 { 225 struct tb_port *port; 226 227 tb_switch_for_each_port(sw, port) { 228 if (tb_is_upstream_port(port)) 229 continue; 230 if (port->config.type != type) 231 continue; 232 if (!port->cap_adap) 233 continue; 234 if (tb_port_is_enabled(port)) 235 continue; 236 return port; 237 } 238 return NULL; 239 } 240 241 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, 242 const struct tb_port *port) 243 { 244 struct tb_port *down; 245 246 down = usb4_switch_map_usb3_down(sw, port); 247 if (down && !tb_usb3_port_is_enabled(down)) 248 return down; 249 return NULL; 250 } 251 252 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, 253 struct tb_port *src_port, 254 struct tb_port *dst_port) 255 { 256 struct tb_cm *tcm = tb_priv(tb); 257 struct tb_tunnel *tunnel; 258 259 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 260 if (tunnel->type == type && 261 ((src_port && src_port == tunnel->src_port) || 262 (dst_port && dst_port == tunnel->dst_port))) { 263 return tunnel; 264 } 265 } 266 267 return NULL; 268 } 269 270 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, 271 struct tb_port *src_port, 272 struct tb_port *dst_port) 273 { 274 struct tb_port *port, *usb3_down; 275 struct tb_switch *sw; 276 277 /* Pick the router that is deepest in the topology */ 278 if (dst_port->sw->config.depth > src_port->sw->config.depth) 279 sw = dst_port->sw; 280 else 281 sw = src_port->sw; 282 283 /* Can't be the host router */ 284 if (sw == tb->root_switch) 285 return NULL; 286 287 /* Find the downstream USB4 port that leads to this router */ 288 port = tb_port_at(tb_route(sw), tb->root_switch); 289 /* Find the corresponding host router USB3 downstream port */ 290 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); 291 if (!usb3_down) 292 return NULL; 293 294 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); 295 } 296 297 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, 298 struct tb_port *dst_port, int *available_up, int *available_down) 299 { 300 int usb3_consumed_up, usb3_consumed_down, ret; 301 struct tb_cm *tcm = tb_priv(tb); 302 struct tb_tunnel *tunnel; 303 struct tb_port *port; 304 305 tb_port_dbg(dst_port, "calculating available bandwidth\n"); 306 307 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 308 if (tunnel) { 309 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, 310 &usb3_consumed_down); 311 if (ret) 312 return ret; 313 } else { 314 usb3_consumed_up = 0; 315 usb3_consumed_down = 0; 316 } 317 318 *available_up = *available_down = 40000; 319 320 /* Find the minimum available bandwidth over all links */ 321 tb_for_each_port_on_path(src_port, dst_port, port) { 322 int link_speed, link_width, up_bw, down_bw; 323 324 if (!tb_port_is_null(port)) 325 continue; 326 327 if (tb_is_upstream_port(port)) { 328 link_speed = port->sw->link_speed; 329 } else { 330 link_speed = tb_port_get_link_speed(port); 331 if (link_speed < 0) 332 return link_speed; 333 } 334 335 link_width = port->bonded ? 2 : 1; 336 337 up_bw = link_speed * link_width * 1000; /* Mb/s */ 338 /* Leave 10% guard band */ 339 up_bw -= up_bw / 10; 340 down_bw = up_bw; 341 342 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw); 343 344 /* 345 * Find all DP tunnels that cross the port and reduce 346 * their consumed bandwidth from the available. 347 */ 348 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 349 int dp_consumed_up, dp_consumed_down; 350 351 if (!tb_tunnel_is_dp(tunnel)) 352 continue; 353 354 if (!tb_tunnel_port_on_path(tunnel, port)) 355 continue; 356 357 ret = tb_tunnel_consumed_bandwidth(tunnel, 358 &dp_consumed_up, 359 &dp_consumed_down); 360 if (ret) 361 return ret; 362 363 up_bw -= dp_consumed_up; 364 down_bw -= dp_consumed_down; 365 } 366 367 /* 368 * If USB3 is tunneled from the host router down to the 369 * branch leading to port we need to take USB3 consumed 370 * bandwidth into account regardless whether it actually 371 * crosses the port. 372 */ 373 up_bw -= usb3_consumed_up; 374 down_bw -= usb3_consumed_down; 375 376 if (up_bw < *available_up) 377 *available_up = up_bw; 378 if (down_bw < *available_down) 379 *available_down = down_bw; 380 } 381 382 if (*available_up < 0) 383 *available_up = 0; 384 if (*available_down < 0) 385 *available_down = 0; 386 387 return 0; 388 } 389 390 static int tb_release_unused_usb3_bandwidth(struct tb *tb, 391 struct tb_port *src_port, 392 struct tb_port *dst_port) 393 { 394 struct tb_tunnel *tunnel; 395 396 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 397 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; 398 } 399 400 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, 401 struct tb_port *dst_port) 402 { 403 int ret, available_up, available_down; 404 struct tb_tunnel *tunnel; 405 406 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 407 if (!tunnel) 408 return; 409 410 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); 411 412 /* 413 * Calculate available bandwidth for the first hop USB3 tunnel. 414 * That determines the whole USB3 bandwidth for this branch. 415 */ 416 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, 417 &available_up, &available_down); 418 if (ret) { 419 tb_warn(tb, "failed to calculate available bandwidth\n"); 420 return; 421 } 422 423 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", 424 available_up, available_down); 425 426 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); 427 } 428 429 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) 430 { 431 struct tb_switch *parent = tb_switch_parent(sw); 432 int ret, available_up, available_down; 433 struct tb_port *up, *down, *port; 434 struct tb_cm *tcm = tb_priv(tb); 435 struct tb_tunnel *tunnel; 436 437 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); 438 if (!up) 439 return 0; 440 441 if (!sw->link_usb4) 442 return 0; 443 444 /* 445 * Look up available down port. Since we are chaining it should 446 * be found right above this switch. 447 */ 448 port = tb_port_at(tb_route(sw), parent); 449 down = tb_find_usb3_down(parent, port); 450 if (!down) 451 return 0; 452 453 if (tb_route(parent)) { 454 struct tb_port *parent_up; 455 /* 456 * Check first that the parent switch has its upstream USB3 457 * port enabled. Otherwise the chain is not complete and 458 * there is no point setting up a new tunnel. 459 */ 460 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); 461 if (!parent_up || !tb_port_is_enabled(parent_up)) 462 return 0; 463 464 /* Make all unused bandwidth available for the new tunnel */ 465 ret = tb_release_unused_usb3_bandwidth(tb, down, up); 466 if (ret) 467 return ret; 468 } 469 470 ret = tb_available_bandwidth(tb, down, up, &available_up, 471 &available_down); 472 if (ret) 473 goto err_reclaim; 474 475 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", 476 available_up, available_down); 477 478 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, 479 available_down); 480 if (!tunnel) { 481 ret = -ENOMEM; 482 goto err_reclaim; 483 } 484 485 if (tb_tunnel_activate(tunnel)) { 486 tb_port_info(up, 487 "USB3 tunnel activation failed, aborting\n"); 488 ret = -EIO; 489 goto err_free; 490 } 491 492 list_add_tail(&tunnel->list, &tcm->tunnel_list); 493 if (tb_route(parent)) 494 tb_reclaim_usb3_bandwidth(tb, down, up); 495 496 return 0; 497 498 err_free: 499 tb_tunnel_free(tunnel); 500 err_reclaim: 501 if (tb_route(parent)) 502 tb_reclaim_usb3_bandwidth(tb, down, up); 503 504 return ret; 505 } 506 507 static int tb_create_usb3_tunnels(struct tb_switch *sw) 508 { 509 struct tb_port *port; 510 int ret; 511 512 if (tb_route(sw)) { 513 ret = tb_tunnel_usb3(sw->tb, sw); 514 if (ret) 515 return ret; 516 } 517 518 tb_switch_for_each_port(sw, port) { 519 if (!tb_port_has_remote(port)) 520 continue; 521 ret = tb_create_usb3_tunnels(port->remote->sw); 522 if (ret) 523 return ret; 524 } 525 526 return 0; 527 } 528 529 static void tb_scan_port(struct tb_port *port); 530 531 /** 532 * tb_scan_switch() - scan for and initialize downstream switches 533 */ 534 static void tb_scan_switch(struct tb_switch *sw) 535 { 536 struct tb_port *port; 537 538 pm_runtime_get_sync(&sw->dev); 539 540 tb_switch_for_each_port(sw, port) 541 tb_scan_port(port); 542 543 pm_runtime_mark_last_busy(&sw->dev); 544 pm_runtime_put_autosuspend(&sw->dev); 545 } 546 547 /** 548 * tb_scan_port() - check for and initialize switches below port 549 */ 550 static void tb_scan_port(struct tb_port *port) 551 { 552 struct tb_cm *tcm = tb_priv(port->sw->tb); 553 struct tb_port *upstream_port; 554 struct tb_switch *sw; 555 556 if (tb_is_upstream_port(port)) 557 return; 558 559 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && 560 !tb_dp_port_is_enabled(port)) { 561 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); 562 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, 563 false); 564 return; 565 } 566 567 if (port->config.type != TB_TYPE_PORT) 568 return; 569 if (port->dual_link_port && port->link_nr) 570 return; /* 571 * Downstream switch is reachable through two ports. 572 * Only scan on the primary port (link_nr == 0). 573 */ 574 if (tb_wait_for_port(port, false) <= 0) 575 return; 576 if (port->remote) { 577 tb_port_dbg(port, "port already has a remote\n"); 578 return; 579 } 580 581 tb_retimer_scan(port); 582 583 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 584 tb_downstream_route(port)); 585 if (IS_ERR(sw)) { 586 /* 587 * If there is an error accessing the connected switch 588 * it may be connected to another domain. Also we allow 589 * the other domain to be connected to a max depth switch. 590 */ 591 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 592 tb_scan_xdomain(port); 593 return; 594 } 595 596 if (tb_switch_configure(sw)) { 597 tb_switch_put(sw); 598 return; 599 } 600 601 /* 602 * If there was previously another domain connected remove it 603 * first. 604 */ 605 if (port->xdomain) { 606 tb_xdomain_remove(port->xdomain); 607 tb_port_unconfigure_xdomain(port); 608 port->xdomain = NULL; 609 } 610 611 /* 612 * Do not send uevents until we have discovered all existing 613 * tunnels and know which switches were authorized already by 614 * the boot firmware. 615 */ 616 if (!tcm->hotplug_active) 617 dev_set_uevent_suppress(&sw->dev, true); 618 619 /* 620 * At the moment Thunderbolt 2 and beyond (devices with LC) we 621 * can support runtime PM. 622 */ 623 sw->rpm = sw->generation > 1; 624 625 if (tb_switch_add(sw)) { 626 tb_switch_put(sw); 627 return; 628 } 629 630 /* Link the switches using both links if available */ 631 upstream_port = tb_upstream_port(sw); 632 port->remote = upstream_port; 633 upstream_port->remote = port; 634 if (port->dual_link_port && upstream_port->dual_link_port) { 635 port->dual_link_port->remote = upstream_port->dual_link_port; 636 upstream_port->dual_link_port->remote = port->dual_link_port; 637 } 638 639 /* Enable lane bonding if supported */ 640 tb_switch_lane_bonding_enable(sw); 641 /* Set the link configured */ 642 tb_switch_configure_link(sw); 643 644 if (tb_enable_tmu(sw)) 645 tb_sw_warn(sw, "failed to enable TMU\n"); 646 647 /* Scan upstream retimers */ 648 tb_retimer_scan(upstream_port); 649 650 /* 651 * Create USB 3.x tunnels only when the switch is plugged to the 652 * domain. This is because we scan the domain also during discovery 653 * and want to discover existing USB 3.x tunnels before we create 654 * any new. 655 */ 656 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) 657 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); 658 659 tb_add_dp_resources(sw); 660 tb_scan_switch(sw); 661 } 662 663 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) 664 { 665 struct tb_port *src_port, *dst_port; 666 struct tb *tb; 667 668 if (!tunnel) 669 return; 670 671 tb_tunnel_deactivate(tunnel); 672 list_del(&tunnel->list); 673 674 tb = tunnel->tb; 675 src_port = tunnel->src_port; 676 dst_port = tunnel->dst_port; 677 678 switch (tunnel->type) { 679 case TB_TUNNEL_DP: 680 /* 681 * In case of DP tunnel make sure the DP IN resource is 682 * deallocated properly. 683 */ 684 tb_switch_dealloc_dp_resource(src_port->sw, src_port); 685 /* Now we can allow the domain to runtime suspend again */ 686 pm_runtime_mark_last_busy(&dst_port->sw->dev); 687 pm_runtime_put_autosuspend(&dst_port->sw->dev); 688 pm_runtime_mark_last_busy(&src_port->sw->dev); 689 pm_runtime_put_autosuspend(&src_port->sw->dev); 690 fallthrough; 691 692 case TB_TUNNEL_USB3: 693 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); 694 break; 695 696 default: 697 /* 698 * PCIe and DMA tunnels do not consume guaranteed 699 * bandwidth. 700 */ 701 break; 702 } 703 704 tb_tunnel_free(tunnel); 705 } 706 707 /** 708 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 709 */ 710 static void tb_free_invalid_tunnels(struct tb *tb) 711 { 712 struct tb_cm *tcm = tb_priv(tb); 713 struct tb_tunnel *tunnel; 714 struct tb_tunnel *n; 715 716 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 717 if (tb_tunnel_is_invalid(tunnel)) 718 tb_deactivate_and_free_tunnel(tunnel); 719 } 720 } 721 722 /** 723 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 724 */ 725 static void tb_free_unplugged_children(struct tb_switch *sw) 726 { 727 struct tb_port *port; 728 729 tb_switch_for_each_port(sw, port) { 730 if (!tb_port_has_remote(port)) 731 continue; 732 733 if (port->remote->sw->is_unplugged) { 734 tb_retimer_remove_all(port); 735 tb_remove_dp_resources(port->remote->sw); 736 tb_switch_unconfigure_link(port->remote->sw); 737 tb_switch_lane_bonding_disable(port->remote->sw); 738 tb_switch_remove(port->remote->sw); 739 port->remote = NULL; 740 if (port->dual_link_port) 741 port->dual_link_port->remote = NULL; 742 } else { 743 tb_free_unplugged_children(port->remote->sw); 744 } 745 } 746 } 747 748 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 749 const struct tb_port *port) 750 { 751 struct tb_port *down = NULL; 752 753 /* 754 * To keep plugging devices consistently in the same PCIe 755 * hierarchy, do mapping here for switch downstream PCIe ports. 756 */ 757 if (tb_switch_is_usb4(sw)) { 758 down = usb4_switch_map_pcie_down(sw, port); 759 } else if (!tb_route(sw)) { 760 int phy_port = tb_phy_port_from_link(port->port); 761 int index; 762 763 /* 764 * Hard-coded Thunderbolt port to PCIe down port mapping 765 * per controller. 766 */ 767 if (tb_switch_is_cactus_ridge(sw) || 768 tb_switch_is_alpine_ridge(sw)) 769 index = !phy_port ? 6 : 7; 770 else if (tb_switch_is_falcon_ridge(sw)) 771 index = !phy_port ? 6 : 8; 772 else if (tb_switch_is_titan_ridge(sw)) 773 index = !phy_port ? 8 : 9; 774 else 775 goto out; 776 777 /* Validate the hard-coding */ 778 if (WARN_ON(index > sw->config.max_port_number)) 779 goto out; 780 781 down = &sw->ports[index]; 782 } 783 784 if (down) { 785 if (WARN_ON(!tb_port_is_pcie_down(down))) 786 goto out; 787 if (tb_pci_port_is_enabled(down)) 788 goto out; 789 790 return down; 791 } 792 793 out: 794 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); 795 } 796 797 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) 798 { 799 struct tb_port *host_port, *port; 800 struct tb_cm *tcm = tb_priv(tb); 801 802 host_port = tb_route(in->sw) ? 803 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; 804 805 list_for_each_entry(port, &tcm->dp_resources, list) { 806 if (!tb_port_is_dpout(port)) 807 continue; 808 809 if (tb_port_is_enabled(port)) { 810 tb_port_dbg(port, "in use\n"); 811 continue; 812 } 813 814 tb_port_dbg(port, "DP OUT available\n"); 815 816 /* 817 * Keep the DP tunnel under the topology starting from 818 * the same host router downstream port. 819 */ 820 if (host_port && tb_route(port->sw)) { 821 struct tb_port *p; 822 823 p = tb_port_at(tb_route(port->sw), tb->root_switch); 824 if (p != host_port) 825 continue; 826 } 827 828 return port; 829 } 830 831 return NULL; 832 } 833 834 static void tb_tunnel_dp(struct tb *tb) 835 { 836 int available_up, available_down, ret; 837 struct tb_cm *tcm = tb_priv(tb); 838 struct tb_port *port, *in, *out; 839 struct tb_tunnel *tunnel; 840 841 /* 842 * Find pair of inactive DP IN and DP OUT adapters and then 843 * establish a DP tunnel between them. 844 */ 845 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); 846 847 in = NULL; 848 out = NULL; 849 list_for_each_entry(port, &tcm->dp_resources, list) { 850 if (!tb_port_is_dpin(port)) 851 continue; 852 853 if (tb_port_is_enabled(port)) { 854 tb_port_dbg(port, "in use\n"); 855 continue; 856 } 857 858 tb_port_dbg(port, "DP IN available\n"); 859 860 out = tb_find_dp_out(tb, port); 861 if (out) { 862 in = port; 863 break; 864 } 865 } 866 867 if (!in) { 868 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); 869 return; 870 } 871 if (!out) { 872 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); 873 return; 874 } 875 876 /* 877 * DP stream needs the domain to be active so runtime resume 878 * both ends of the tunnel. 879 * 880 * This should bring the routers in the middle active as well 881 * and keeps the domain from runtime suspending while the DP 882 * tunnel is active. 883 */ 884 pm_runtime_get_sync(&in->sw->dev); 885 pm_runtime_get_sync(&out->sw->dev); 886 887 if (tb_switch_alloc_dp_resource(in->sw, in)) { 888 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); 889 goto err_rpm_put; 890 } 891 892 /* Make all unused USB3 bandwidth available for the new DP tunnel */ 893 ret = tb_release_unused_usb3_bandwidth(tb, in, out); 894 if (ret) { 895 tb_warn(tb, "failed to release unused bandwidth\n"); 896 goto err_dealloc_dp; 897 } 898 899 ret = tb_available_bandwidth(tb, in, out, &available_up, 900 &available_down); 901 if (ret) 902 goto err_reclaim; 903 904 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", 905 available_up, available_down); 906 907 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down); 908 if (!tunnel) { 909 tb_port_dbg(out, "could not allocate DP tunnel\n"); 910 goto err_reclaim; 911 } 912 913 if (tb_tunnel_activate(tunnel)) { 914 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 915 goto err_free; 916 } 917 918 list_add_tail(&tunnel->list, &tcm->tunnel_list); 919 tb_reclaim_usb3_bandwidth(tb, in, out); 920 return; 921 922 err_free: 923 tb_tunnel_free(tunnel); 924 err_reclaim: 925 tb_reclaim_usb3_bandwidth(tb, in, out); 926 err_dealloc_dp: 927 tb_switch_dealloc_dp_resource(in->sw, in); 928 err_rpm_put: 929 pm_runtime_mark_last_busy(&out->sw->dev); 930 pm_runtime_put_autosuspend(&out->sw->dev); 931 pm_runtime_mark_last_busy(&in->sw->dev); 932 pm_runtime_put_autosuspend(&in->sw->dev); 933 } 934 935 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) 936 { 937 struct tb_port *in, *out; 938 struct tb_tunnel *tunnel; 939 940 if (tb_port_is_dpin(port)) { 941 tb_port_dbg(port, "DP IN resource unavailable\n"); 942 in = port; 943 out = NULL; 944 } else { 945 tb_port_dbg(port, "DP OUT resource unavailable\n"); 946 in = NULL; 947 out = port; 948 } 949 950 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); 951 tb_deactivate_and_free_tunnel(tunnel); 952 list_del_init(&port->list); 953 954 /* 955 * See if there is another DP OUT port that can be used for 956 * to create another tunnel. 957 */ 958 tb_tunnel_dp(tb); 959 } 960 961 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) 962 { 963 struct tb_cm *tcm = tb_priv(tb); 964 struct tb_port *p; 965 966 if (tb_port_is_enabled(port)) 967 return; 968 969 list_for_each_entry(p, &tcm->dp_resources, list) { 970 if (p == port) 971 return; 972 } 973 974 tb_port_dbg(port, "DP %s resource available\n", 975 tb_port_is_dpin(port) ? "IN" : "OUT"); 976 list_add_tail(&port->list, &tcm->dp_resources); 977 978 /* Look for suitable DP IN <-> DP OUT pairs now */ 979 tb_tunnel_dp(tb); 980 } 981 982 static void tb_disconnect_and_release_dp(struct tb *tb) 983 { 984 struct tb_cm *tcm = tb_priv(tb); 985 struct tb_tunnel *tunnel, *n; 986 987 /* 988 * Tear down all DP tunnels and release their resources. They 989 * will be re-established after resume based on plug events. 990 */ 991 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { 992 if (tb_tunnel_is_dp(tunnel)) 993 tb_deactivate_and_free_tunnel(tunnel); 994 } 995 996 while (!list_empty(&tcm->dp_resources)) { 997 struct tb_port *port; 998 999 port = list_first_entry(&tcm->dp_resources, 1000 struct tb_port, list); 1001 list_del_init(&port->list); 1002 } 1003 } 1004 1005 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 1006 { 1007 struct tb_port *up, *down, *port; 1008 struct tb_cm *tcm = tb_priv(tb); 1009 struct tb_switch *parent_sw; 1010 struct tb_tunnel *tunnel; 1011 1012 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 1013 if (!up) 1014 return 0; 1015 1016 /* 1017 * Look up available down port. Since we are chaining it should 1018 * be found right above this switch. 1019 */ 1020 parent_sw = tb_to_switch(sw->dev.parent); 1021 port = tb_port_at(tb_route(sw), parent_sw); 1022 down = tb_find_pcie_down(parent_sw, port); 1023 if (!down) 1024 return 0; 1025 1026 tunnel = tb_tunnel_alloc_pci(tb, up, down); 1027 if (!tunnel) 1028 return -ENOMEM; 1029 1030 if (tb_tunnel_activate(tunnel)) { 1031 tb_port_info(up, 1032 "PCIe tunnel activation failed, aborting\n"); 1033 tb_tunnel_free(tunnel); 1034 return -EIO; 1035 } 1036 1037 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1038 return 0; 1039 } 1040 1041 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 1042 { 1043 struct tb_cm *tcm = tb_priv(tb); 1044 struct tb_port *nhi_port, *dst_port; 1045 struct tb_tunnel *tunnel; 1046 struct tb_switch *sw; 1047 1048 sw = tb_to_switch(xd->dev.parent); 1049 dst_port = tb_port_at(xd->route, sw); 1050 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 1051 1052 mutex_lock(&tb->lock); 1053 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, 1054 xd->transmit_path, xd->receive_ring, 1055 xd->receive_path); 1056 if (!tunnel) { 1057 mutex_unlock(&tb->lock); 1058 return -ENOMEM; 1059 } 1060 1061 if (tb_tunnel_activate(tunnel)) { 1062 tb_port_info(nhi_port, 1063 "DMA tunnel activation failed, aborting\n"); 1064 tb_tunnel_free(tunnel); 1065 mutex_unlock(&tb->lock); 1066 return -EIO; 1067 } 1068 1069 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1070 mutex_unlock(&tb->lock); 1071 return 0; 1072 } 1073 1074 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 1075 { 1076 struct tb_port *dst_port; 1077 struct tb_tunnel *tunnel; 1078 struct tb_switch *sw; 1079 1080 sw = tb_to_switch(xd->dev.parent); 1081 dst_port = tb_port_at(xd->route, sw); 1082 1083 /* 1084 * It is possible that the tunnel was already teared down (in 1085 * case of cable disconnect) so it is fine if we cannot find it 1086 * here anymore. 1087 */ 1088 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); 1089 tb_deactivate_and_free_tunnel(tunnel); 1090 } 1091 1092 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 1093 { 1094 if (!xd->is_unplugged) { 1095 mutex_lock(&tb->lock); 1096 __tb_disconnect_xdomain_paths(tb, xd); 1097 mutex_unlock(&tb->lock); 1098 } 1099 return 0; 1100 } 1101 1102 /* hotplug handling */ 1103 1104 /** 1105 * tb_handle_hotplug() - handle hotplug event 1106 * 1107 * Executes on tb->wq. 1108 */ 1109 static void tb_handle_hotplug(struct work_struct *work) 1110 { 1111 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 1112 struct tb *tb = ev->tb; 1113 struct tb_cm *tcm = tb_priv(tb); 1114 struct tb_switch *sw; 1115 struct tb_port *port; 1116 1117 /* Bring the domain back from sleep if it was suspended */ 1118 pm_runtime_get_sync(&tb->dev); 1119 1120 mutex_lock(&tb->lock); 1121 if (!tcm->hotplug_active) 1122 goto out; /* during init, suspend or shutdown */ 1123 1124 sw = tb_switch_find_by_route(tb, ev->route); 1125 if (!sw) { 1126 tb_warn(tb, 1127 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 1128 ev->route, ev->port, ev->unplug); 1129 goto out; 1130 } 1131 if (ev->port > sw->config.max_port_number) { 1132 tb_warn(tb, 1133 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 1134 ev->route, ev->port, ev->unplug); 1135 goto put_sw; 1136 } 1137 port = &sw->ports[ev->port]; 1138 if (tb_is_upstream_port(port)) { 1139 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 1140 ev->route, ev->port, ev->unplug); 1141 goto put_sw; 1142 } 1143 1144 pm_runtime_get_sync(&sw->dev); 1145 1146 if (ev->unplug) { 1147 tb_retimer_remove_all(port); 1148 1149 if (tb_port_has_remote(port)) { 1150 tb_port_dbg(port, "switch unplugged\n"); 1151 tb_sw_set_unplugged(port->remote->sw); 1152 tb_free_invalid_tunnels(tb); 1153 tb_remove_dp_resources(port->remote->sw); 1154 tb_switch_tmu_disable(port->remote->sw); 1155 tb_switch_unconfigure_link(port->remote->sw); 1156 tb_switch_lane_bonding_disable(port->remote->sw); 1157 tb_switch_remove(port->remote->sw); 1158 port->remote = NULL; 1159 if (port->dual_link_port) 1160 port->dual_link_port->remote = NULL; 1161 /* Maybe we can create another DP tunnel */ 1162 tb_tunnel_dp(tb); 1163 } else if (port->xdomain) { 1164 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); 1165 1166 tb_port_dbg(port, "xdomain unplugged\n"); 1167 /* 1168 * Service drivers are unbound during 1169 * tb_xdomain_remove() so setting XDomain as 1170 * unplugged here prevents deadlock if they call 1171 * tb_xdomain_disable_paths(). We will tear down 1172 * the path below. 1173 */ 1174 xd->is_unplugged = true; 1175 tb_xdomain_remove(xd); 1176 port->xdomain = NULL; 1177 __tb_disconnect_xdomain_paths(tb, xd); 1178 tb_xdomain_put(xd); 1179 tb_port_unconfigure_xdomain(port); 1180 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1181 tb_dp_resource_unavailable(tb, port); 1182 } else { 1183 tb_port_dbg(port, 1184 "got unplug event for disconnected port, ignoring\n"); 1185 } 1186 } else if (port->remote) { 1187 tb_port_dbg(port, "got plug event for connected port, ignoring\n"); 1188 } else { 1189 if (tb_port_is_null(port)) { 1190 tb_port_dbg(port, "hotplug: scanning\n"); 1191 tb_scan_port(port); 1192 if (!port->remote) 1193 tb_port_dbg(port, "hotplug: no switch found\n"); 1194 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1195 tb_dp_resource_available(tb, port); 1196 } 1197 } 1198 1199 pm_runtime_mark_last_busy(&sw->dev); 1200 pm_runtime_put_autosuspend(&sw->dev); 1201 1202 put_sw: 1203 tb_switch_put(sw); 1204 out: 1205 mutex_unlock(&tb->lock); 1206 1207 pm_runtime_mark_last_busy(&tb->dev); 1208 pm_runtime_put_autosuspend(&tb->dev); 1209 1210 kfree(ev); 1211 } 1212 1213 /** 1214 * tb_schedule_hotplug_handler() - callback function for the control channel 1215 * 1216 * Delegates to tb_handle_hotplug. 1217 */ 1218 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 1219 const void *buf, size_t size) 1220 { 1221 const struct cfg_event_pkg *pkg = buf; 1222 u64 route; 1223 1224 if (type != TB_CFG_PKG_EVENT) { 1225 tb_warn(tb, "unexpected event %#x, ignoring\n", type); 1226 return; 1227 } 1228 1229 route = tb_cfg_get_route(&pkg->header); 1230 1231 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { 1232 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 1233 pkg->port); 1234 } 1235 1236 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); 1237 } 1238 1239 static void tb_stop(struct tb *tb) 1240 { 1241 struct tb_cm *tcm = tb_priv(tb); 1242 struct tb_tunnel *tunnel; 1243 struct tb_tunnel *n; 1244 1245 cancel_delayed_work(&tcm->remove_work); 1246 /* tunnels are only present after everything has been initialized */ 1247 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 1248 /* 1249 * DMA tunnels require the driver to be functional so we 1250 * tear them down. Other protocol tunnels can be left 1251 * intact. 1252 */ 1253 if (tb_tunnel_is_dma(tunnel)) 1254 tb_tunnel_deactivate(tunnel); 1255 tb_tunnel_free(tunnel); 1256 } 1257 tb_switch_remove(tb->root_switch); 1258 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 1259 } 1260 1261 static int tb_scan_finalize_switch(struct device *dev, void *data) 1262 { 1263 if (tb_is_switch(dev)) { 1264 struct tb_switch *sw = tb_to_switch(dev); 1265 1266 /* 1267 * If we found that the switch was already setup by the 1268 * boot firmware, mark it as authorized now before we 1269 * send uevent to userspace. 1270 */ 1271 if (sw->boot) 1272 sw->authorized = 1; 1273 1274 dev_set_uevent_suppress(dev, false); 1275 kobject_uevent(&dev->kobj, KOBJ_ADD); 1276 device_for_each_child(dev, NULL, tb_scan_finalize_switch); 1277 } 1278 1279 return 0; 1280 } 1281 1282 static int tb_start(struct tb *tb) 1283 { 1284 struct tb_cm *tcm = tb_priv(tb); 1285 int ret; 1286 1287 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 1288 if (IS_ERR(tb->root_switch)) 1289 return PTR_ERR(tb->root_switch); 1290 1291 /* 1292 * ICM firmware upgrade needs running firmware and in native 1293 * mode that is not available so disable firmware upgrade of the 1294 * root switch. 1295 */ 1296 tb->root_switch->no_nvm_upgrade = true; 1297 /* All USB4 routers support runtime PM */ 1298 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); 1299 1300 ret = tb_switch_configure(tb->root_switch); 1301 if (ret) { 1302 tb_switch_put(tb->root_switch); 1303 return ret; 1304 } 1305 1306 /* Announce the switch to the world */ 1307 ret = tb_switch_add(tb->root_switch); 1308 if (ret) { 1309 tb_switch_put(tb->root_switch); 1310 return ret; 1311 } 1312 1313 /* Enable TMU if it is off */ 1314 tb_switch_tmu_enable(tb->root_switch); 1315 /* Full scan to discover devices added before the driver was loaded. */ 1316 tb_scan_switch(tb->root_switch); 1317 /* Find out tunnels created by the boot firmware */ 1318 tb_discover_tunnels(tb->root_switch); 1319 /* 1320 * If the boot firmware did not create USB 3.x tunnels create them 1321 * now for the whole topology. 1322 */ 1323 tb_create_usb3_tunnels(tb->root_switch); 1324 /* Add DP IN resources for the root switch */ 1325 tb_add_dp_resources(tb->root_switch); 1326 /* Make the discovered switches available to the userspace */ 1327 device_for_each_child(&tb->root_switch->dev, NULL, 1328 tb_scan_finalize_switch); 1329 1330 /* Allow tb_handle_hotplug to progress events */ 1331 tcm->hotplug_active = true; 1332 return 0; 1333 } 1334 1335 static int tb_suspend_noirq(struct tb *tb) 1336 { 1337 struct tb_cm *tcm = tb_priv(tb); 1338 1339 tb_dbg(tb, "suspending...\n"); 1340 tb_disconnect_and_release_dp(tb); 1341 tb_switch_suspend(tb->root_switch, false); 1342 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 1343 tb_dbg(tb, "suspend finished\n"); 1344 1345 return 0; 1346 } 1347 1348 static void tb_restore_children(struct tb_switch *sw) 1349 { 1350 struct tb_port *port; 1351 1352 /* No need to restore if the router is already unplugged */ 1353 if (sw->is_unplugged) 1354 return; 1355 1356 if (tb_enable_tmu(sw)) 1357 tb_sw_warn(sw, "failed to restore TMU configuration\n"); 1358 1359 tb_switch_for_each_port(sw, port) { 1360 if (!tb_port_has_remote(port) && !port->xdomain) 1361 continue; 1362 1363 if (port->remote) { 1364 tb_switch_lane_bonding_enable(port->remote->sw); 1365 tb_switch_configure_link(port->remote->sw); 1366 1367 tb_restore_children(port->remote->sw); 1368 } else if (port->xdomain) { 1369 tb_port_configure_xdomain(port); 1370 } 1371 } 1372 } 1373 1374 static int tb_resume_noirq(struct tb *tb) 1375 { 1376 struct tb_cm *tcm = tb_priv(tb); 1377 struct tb_tunnel *tunnel, *n; 1378 1379 tb_dbg(tb, "resuming...\n"); 1380 1381 /* remove any pci devices the firmware might have setup */ 1382 tb_switch_reset(tb->root_switch); 1383 1384 tb_switch_resume(tb->root_switch); 1385 tb_free_invalid_tunnels(tb); 1386 tb_free_unplugged_children(tb->root_switch); 1387 tb_restore_children(tb->root_switch); 1388 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 1389 tb_tunnel_restart(tunnel); 1390 if (!list_empty(&tcm->tunnel_list)) { 1391 /* 1392 * the pcie links need some time to get going. 1393 * 100ms works for me... 1394 */ 1395 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 1396 msleep(100); 1397 } 1398 /* Allow tb_handle_hotplug to progress events */ 1399 tcm->hotplug_active = true; 1400 tb_dbg(tb, "resume finished\n"); 1401 1402 return 0; 1403 } 1404 1405 static int tb_free_unplugged_xdomains(struct tb_switch *sw) 1406 { 1407 struct tb_port *port; 1408 int ret = 0; 1409 1410 tb_switch_for_each_port(sw, port) { 1411 if (tb_is_upstream_port(port)) 1412 continue; 1413 if (port->xdomain && port->xdomain->is_unplugged) { 1414 tb_retimer_remove_all(port); 1415 tb_xdomain_remove(port->xdomain); 1416 tb_port_unconfigure_xdomain(port); 1417 port->xdomain = NULL; 1418 ret++; 1419 } else if (port->remote) { 1420 ret += tb_free_unplugged_xdomains(port->remote->sw); 1421 } 1422 } 1423 1424 return ret; 1425 } 1426 1427 static int tb_freeze_noirq(struct tb *tb) 1428 { 1429 struct tb_cm *tcm = tb_priv(tb); 1430 1431 tcm->hotplug_active = false; 1432 return 0; 1433 } 1434 1435 static int tb_thaw_noirq(struct tb *tb) 1436 { 1437 struct tb_cm *tcm = tb_priv(tb); 1438 1439 tcm->hotplug_active = true; 1440 return 0; 1441 } 1442 1443 static void tb_complete(struct tb *tb) 1444 { 1445 /* 1446 * Release any unplugged XDomains and if there is a case where 1447 * another domain is swapped in place of unplugged XDomain we 1448 * need to run another rescan. 1449 */ 1450 mutex_lock(&tb->lock); 1451 if (tb_free_unplugged_xdomains(tb->root_switch)) 1452 tb_scan_switch(tb->root_switch); 1453 mutex_unlock(&tb->lock); 1454 } 1455 1456 static int tb_runtime_suspend(struct tb *tb) 1457 { 1458 struct tb_cm *tcm = tb_priv(tb); 1459 1460 mutex_lock(&tb->lock); 1461 tb_switch_suspend(tb->root_switch, true); 1462 tcm->hotplug_active = false; 1463 mutex_unlock(&tb->lock); 1464 1465 return 0; 1466 } 1467 1468 static void tb_remove_work(struct work_struct *work) 1469 { 1470 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); 1471 struct tb *tb = tcm_to_tb(tcm); 1472 1473 mutex_lock(&tb->lock); 1474 if (tb->root_switch) { 1475 tb_free_unplugged_children(tb->root_switch); 1476 tb_free_unplugged_xdomains(tb->root_switch); 1477 } 1478 mutex_unlock(&tb->lock); 1479 } 1480 1481 static int tb_runtime_resume(struct tb *tb) 1482 { 1483 struct tb_cm *tcm = tb_priv(tb); 1484 struct tb_tunnel *tunnel, *n; 1485 1486 mutex_lock(&tb->lock); 1487 tb_switch_resume(tb->root_switch); 1488 tb_free_invalid_tunnels(tb); 1489 tb_restore_children(tb->root_switch); 1490 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 1491 tb_tunnel_restart(tunnel); 1492 tcm->hotplug_active = true; 1493 mutex_unlock(&tb->lock); 1494 1495 /* 1496 * Schedule cleanup of any unplugged devices. Run this in a 1497 * separate thread to avoid possible deadlock if the device 1498 * removal runtime resumes the unplugged device. 1499 */ 1500 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); 1501 return 0; 1502 } 1503 1504 static const struct tb_cm_ops tb_cm_ops = { 1505 .start = tb_start, 1506 .stop = tb_stop, 1507 .suspend_noirq = tb_suspend_noirq, 1508 .resume_noirq = tb_resume_noirq, 1509 .freeze_noirq = tb_freeze_noirq, 1510 .thaw_noirq = tb_thaw_noirq, 1511 .complete = tb_complete, 1512 .runtime_suspend = tb_runtime_suspend, 1513 .runtime_resume = tb_runtime_resume, 1514 .handle_event = tb_handle_event, 1515 .approve_switch = tb_tunnel_pci, 1516 .approve_xdomain_paths = tb_approve_xdomain_paths, 1517 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, 1518 }; 1519 1520 struct tb *tb_probe(struct tb_nhi *nhi) 1521 { 1522 struct tb_cm *tcm; 1523 struct tb *tb; 1524 1525 tb = tb_domain_alloc(nhi, sizeof(*tcm)); 1526 if (!tb) 1527 return NULL; 1528 1529 tb->security_level = TB_SECURITY_USER; 1530 tb->cm_ops = &tb_cm_ops; 1531 1532 tcm = tb_priv(tb); 1533 INIT_LIST_HEAD(&tcm->tunnel_list); 1534 INIT_LIST_HEAD(&tcm->dp_resources); 1535 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); 1536 1537 tb_dbg(tb, "using software connection manager\n"); 1538 1539 return tb; 1540 } 1541