1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - bus logic (NHI independent) 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/delay.h> 12 13 #include "tb.h" 14 #include "tb_regs.h" 15 #include "tunnel.h" 16 17 /** 18 * struct tb_cm - Simple Thunderbolt connection manager 19 * @tunnel_list: List of active tunnels 20 * @dp_resources: List of available DP resources for DP tunneling 21 * @hotplug_active: tb_handle_hotplug will stop progressing plug 22 * events and exit if this is not set (it needs to 23 * acquire the lock one more time). Used to drain wq 24 * after cfg has been paused. 25 */ 26 struct tb_cm { 27 struct list_head tunnel_list; 28 struct list_head dp_resources; 29 bool hotplug_active; 30 }; 31 32 struct tb_hotplug_event { 33 struct work_struct work; 34 struct tb *tb; 35 u64 route; 36 u8 port; 37 bool unplug; 38 }; 39 40 static void tb_handle_hotplug(struct work_struct *work); 41 42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 43 { 44 struct tb_hotplug_event *ev; 45 46 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 47 if (!ev) 48 return; 49 50 ev->tb = tb; 51 ev->route = route; 52 ev->port = port; 53 ev->unplug = unplug; 54 INIT_WORK(&ev->work, tb_handle_hotplug); 55 queue_work(tb->wq, &ev->work); 56 } 57 58 /* enumeration & hot plug handling */ 59 60 static void tb_add_dp_resources(struct tb_switch *sw) 61 { 62 struct tb_cm *tcm = tb_priv(sw->tb); 63 struct tb_port *port; 64 65 tb_switch_for_each_port(sw, port) { 66 if (!tb_port_is_dpin(port)) 67 continue; 68 69 if (!tb_switch_query_dp_resource(sw, port)) 70 continue; 71 72 list_add_tail(&port->list, &tcm->dp_resources); 73 tb_port_dbg(port, "DP IN resource available\n"); 74 } 75 } 76 77 static void tb_remove_dp_resources(struct tb_switch *sw) 78 { 79 struct tb_cm *tcm = tb_priv(sw->tb); 80 struct tb_port *port, *tmp; 81 82 /* Clear children resources first */ 83 tb_switch_for_each_port(sw, port) { 84 if (tb_port_has_remote(port)) 85 tb_remove_dp_resources(port->remote->sw); 86 } 87 88 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { 89 if (port->sw == sw) { 90 tb_port_dbg(port, "DP OUT resource unavailable\n"); 91 list_del_init(&port->list); 92 } 93 } 94 } 95 96 static void tb_discover_tunnels(struct tb_switch *sw) 97 { 98 struct tb *tb = sw->tb; 99 struct tb_cm *tcm = tb_priv(tb); 100 struct tb_port *port; 101 102 tb_switch_for_each_port(sw, port) { 103 struct tb_tunnel *tunnel = NULL; 104 105 switch (port->config.type) { 106 case TB_TYPE_DP_HDMI_IN: 107 tunnel = tb_tunnel_discover_dp(tb, port); 108 break; 109 110 case TB_TYPE_PCIE_DOWN: 111 tunnel = tb_tunnel_discover_pci(tb, port); 112 break; 113 114 case TB_TYPE_USB3_DOWN: 115 tunnel = tb_tunnel_discover_usb3(tb, port); 116 break; 117 118 default: 119 break; 120 } 121 122 if (!tunnel) 123 continue; 124 125 if (tb_tunnel_is_pci(tunnel)) { 126 struct tb_switch *parent = tunnel->dst_port->sw; 127 128 while (parent != tunnel->src_port->sw) { 129 parent->boot = true; 130 parent = tb_switch_parent(parent); 131 } 132 } 133 134 list_add_tail(&tunnel->list, &tcm->tunnel_list); 135 } 136 137 tb_switch_for_each_port(sw, port) { 138 if (tb_port_has_remote(port)) 139 tb_discover_tunnels(port->remote->sw); 140 } 141 } 142 143 static void tb_scan_xdomain(struct tb_port *port) 144 { 145 struct tb_switch *sw = port->sw; 146 struct tb *tb = sw->tb; 147 struct tb_xdomain *xd; 148 u64 route; 149 150 route = tb_downstream_route(port); 151 xd = tb_xdomain_find_by_route(tb, route); 152 if (xd) { 153 tb_xdomain_put(xd); 154 return; 155 } 156 157 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, 158 NULL); 159 if (xd) { 160 tb_port_at(route, sw)->xdomain = xd; 161 tb_xdomain_add(xd); 162 } 163 } 164 165 static int tb_enable_tmu(struct tb_switch *sw) 166 { 167 int ret; 168 169 /* If it is already enabled in correct mode, don't touch it */ 170 if (tb_switch_tmu_is_enabled(sw)) 171 return 0; 172 173 ret = tb_switch_tmu_disable(sw); 174 if (ret) 175 return ret; 176 177 ret = tb_switch_tmu_post_time(sw); 178 if (ret) 179 return ret; 180 181 return tb_switch_tmu_enable(sw); 182 } 183 184 /** 185 * tb_find_unused_port() - return the first inactive port on @sw 186 * @sw: Switch to find the port on 187 * @type: Port type to look for 188 */ 189 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, 190 enum tb_port_type type) 191 { 192 struct tb_port *port; 193 194 tb_switch_for_each_port(sw, port) { 195 if (tb_is_upstream_port(port)) 196 continue; 197 if (port->config.type != type) 198 continue; 199 if (!port->cap_adap) 200 continue; 201 if (tb_port_is_enabled(port)) 202 continue; 203 return port; 204 } 205 return NULL; 206 } 207 208 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, 209 const struct tb_port *port) 210 { 211 struct tb_port *down; 212 213 down = usb4_switch_map_usb3_down(sw, port); 214 if (down && !tb_usb3_port_is_enabled(down)) 215 return down; 216 return NULL; 217 } 218 219 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, 220 struct tb_port *src_port, 221 struct tb_port *dst_port) 222 { 223 struct tb_cm *tcm = tb_priv(tb); 224 struct tb_tunnel *tunnel; 225 226 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 227 if (tunnel->type == type && 228 ((src_port && src_port == tunnel->src_port) || 229 (dst_port && dst_port == tunnel->dst_port))) { 230 return tunnel; 231 } 232 } 233 234 return NULL; 235 } 236 237 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, 238 struct tb_port *src_port, 239 struct tb_port *dst_port) 240 { 241 struct tb_port *port, *usb3_down; 242 struct tb_switch *sw; 243 244 /* Pick the router that is deepest in the topology */ 245 if (dst_port->sw->config.depth > src_port->sw->config.depth) 246 sw = dst_port->sw; 247 else 248 sw = src_port->sw; 249 250 /* Can't be the host router */ 251 if (sw == tb->root_switch) 252 return NULL; 253 254 /* Find the downstream USB4 port that leads to this router */ 255 port = tb_port_at(tb_route(sw), tb->root_switch); 256 /* Find the corresponding host router USB3 downstream port */ 257 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); 258 if (!usb3_down) 259 return NULL; 260 261 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); 262 } 263 264 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, 265 struct tb_port *dst_port, int *available_up, int *available_down) 266 { 267 int usb3_consumed_up, usb3_consumed_down, ret; 268 struct tb_cm *tcm = tb_priv(tb); 269 struct tb_tunnel *tunnel; 270 struct tb_port *port; 271 272 tb_port_dbg(dst_port, "calculating available bandwidth\n"); 273 274 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 275 if (tunnel) { 276 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, 277 &usb3_consumed_down); 278 if (ret) 279 return ret; 280 } else { 281 usb3_consumed_up = 0; 282 usb3_consumed_down = 0; 283 } 284 285 *available_up = *available_down = 40000; 286 287 /* Find the minimum available bandwidth over all links */ 288 tb_for_each_port_on_path(src_port, dst_port, port) { 289 int link_speed, link_width, up_bw, down_bw; 290 291 if (!tb_port_is_null(port)) 292 continue; 293 294 if (tb_is_upstream_port(port)) { 295 link_speed = port->sw->link_speed; 296 } else { 297 link_speed = tb_port_get_link_speed(port); 298 if (link_speed < 0) 299 return link_speed; 300 } 301 302 link_width = port->bonded ? 2 : 1; 303 304 up_bw = link_speed * link_width * 1000; /* Mb/s */ 305 /* Leave 10% guard band */ 306 up_bw -= up_bw / 10; 307 down_bw = up_bw; 308 309 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw); 310 311 /* 312 * Find all DP tunnels that cross the port and reduce 313 * their consumed bandwidth from the available. 314 */ 315 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 316 int dp_consumed_up, dp_consumed_down; 317 318 if (!tb_tunnel_is_dp(tunnel)) 319 continue; 320 321 if (!tb_tunnel_port_on_path(tunnel, port)) 322 continue; 323 324 ret = tb_tunnel_consumed_bandwidth(tunnel, 325 &dp_consumed_up, 326 &dp_consumed_down); 327 if (ret) 328 return ret; 329 330 up_bw -= dp_consumed_up; 331 down_bw -= dp_consumed_down; 332 } 333 334 /* 335 * If USB3 is tunneled from the host router down to the 336 * branch leading to port we need to take USB3 consumed 337 * bandwidth into account regardless whether it actually 338 * crosses the port. 339 */ 340 up_bw -= usb3_consumed_up; 341 down_bw -= usb3_consumed_down; 342 343 if (up_bw < *available_up) 344 *available_up = up_bw; 345 if (down_bw < *available_down) 346 *available_down = down_bw; 347 } 348 349 if (*available_up < 0) 350 *available_up = 0; 351 if (*available_down < 0) 352 *available_down = 0; 353 354 return 0; 355 } 356 357 static int tb_release_unused_usb3_bandwidth(struct tb *tb, 358 struct tb_port *src_port, 359 struct tb_port *dst_port) 360 { 361 struct tb_tunnel *tunnel; 362 363 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 364 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; 365 } 366 367 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, 368 struct tb_port *dst_port) 369 { 370 int ret, available_up, available_down; 371 struct tb_tunnel *tunnel; 372 373 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); 374 if (!tunnel) 375 return; 376 377 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); 378 379 /* 380 * Calculate available bandwidth for the first hop USB3 tunnel. 381 * That determines the whole USB3 bandwidth for this branch. 382 */ 383 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, 384 &available_up, &available_down); 385 if (ret) { 386 tb_warn(tb, "failed to calculate available bandwidth\n"); 387 return; 388 } 389 390 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", 391 available_up, available_down); 392 393 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); 394 } 395 396 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) 397 { 398 struct tb_switch *parent = tb_switch_parent(sw); 399 int ret, available_up, available_down; 400 struct tb_port *up, *down, *port; 401 struct tb_cm *tcm = tb_priv(tb); 402 struct tb_tunnel *tunnel; 403 404 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); 405 if (!up) 406 return 0; 407 408 if (!sw->link_usb4) 409 return 0; 410 411 /* 412 * Look up available down port. Since we are chaining it should 413 * be found right above this switch. 414 */ 415 port = tb_port_at(tb_route(sw), parent); 416 down = tb_find_usb3_down(parent, port); 417 if (!down) 418 return 0; 419 420 if (tb_route(parent)) { 421 struct tb_port *parent_up; 422 /* 423 * Check first that the parent switch has its upstream USB3 424 * port enabled. Otherwise the chain is not complete and 425 * there is no point setting up a new tunnel. 426 */ 427 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); 428 if (!parent_up || !tb_port_is_enabled(parent_up)) 429 return 0; 430 431 /* Make all unused bandwidth available for the new tunnel */ 432 ret = tb_release_unused_usb3_bandwidth(tb, down, up); 433 if (ret) 434 return ret; 435 } 436 437 ret = tb_available_bandwidth(tb, down, up, &available_up, 438 &available_down); 439 if (ret) 440 goto err_reclaim; 441 442 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", 443 available_up, available_down); 444 445 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, 446 available_down); 447 if (!tunnel) { 448 ret = -ENOMEM; 449 goto err_reclaim; 450 } 451 452 if (tb_tunnel_activate(tunnel)) { 453 tb_port_info(up, 454 "USB3 tunnel activation failed, aborting\n"); 455 ret = -EIO; 456 goto err_free; 457 } 458 459 list_add_tail(&tunnel->list, &tcm->tunnel_list); 460 if (tb_route(parent)) 461 tb_reclaim_usb3_bandwidth(tb, down, up); 462 463 return 0; 464 465 err_free: 466 tb_tunnel_free(tunnel); 467 err_reclaim: 468 if (tb_route(parent)) 469 tb_reclaim_usb3_bandwidth(tb, down, up); 470 471 return ret; 472 } 473 474 static int tb_create_usb3_tunnels(struct tb_switch *sw) 475 { 476 struct tb_port *port; 477 int ret; 478 479 if (tb_route(sw)) { 480 ret = tb_tunnel_usb3(sw->tb, sw); 481 if (ret) 482 return ret; 483 } 484 485 tb_switch_for_each_port(sw, port) { 486 if (!tb_port_has_remote(port)) 487 continue; 488 ret = tb_create_usb3_tunnels(port->remote->sw); 489 if (ret) 490 return ret; 491 } 492 493 return 0; 494 } 495 496 static void tb_scan_port(struct tb_port *port); 497 498 /** 499 * tb_scan_switch() - scan for and initialize downstream switches 500 */ 501 static void tb_scan_switch(struct tb_switch *sw) 502 { 503 struct tb_port *port; 504 505 tb_switch_for_each_port(sw, port) 506 tb_scan_port(port); 507 } 508 509 /** 510 * tb_scan_port() - check for and initialize switches below port 511 */ 512 static void tb_scan_port(struct tb_port *port) 513 { 514 struct tb_cm *tcm = tb_priv(port->sw->tb); 515 struct tb_port *upstream_port; 516 struct tb_switch *sw; 517 518 if (tb_is_upstream_port(port)) 519 return; 520 521 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && 522 !tb_dp_port_is_enabled(port)) { 523 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); 524 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, 525 false); 526 return; 527 } 528 529 if (port->config.type != TB_TYPE_PORT) 530 return; 531 if (port->dual_link_port && port->link_nr) 532 return; /* 533 * Downstream switch is reachable through two ports. 534 * Only scan on the primary port (link_nr == 0). 535 */ 536 if (tb_wait_for_port(port, false) <= 0) 537 return; 538 if (port->remote) { 539 tb_port_dbg(port, "port already has a remote\n"); 540 return; 541 } 542 543 tb_retimer_scan(port); 544 545 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 546 tb_downstream_route(port)); 547 if (IS_ERR(sw)) { 548 /* 549 * If there is an error accessing the connected switch 550 * it may be connected to another domain. Also we allow 551 * the other domain to be connected to a max depth switch. 552 */ 553 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 554 tb_scan_xdomain(port); 555 return; 556 } 557 558 if (tb_switch_configure(sw)) { 559 tb_switch_put(sw); 560 return; 561 } 562 563 /* 564 * If there was previously another domain connected remove it 565 * first. 566 */ 567 if (port->xdomain) { 568 tb_xdomain_remove(port->xdomain); 569 port->xdomain = NULL; 570 } 571 572 /* 573 * Do not send uevents until we have discovered all existing 574 * tunnels and know which switches were authorized already by 575 * the boot firmware. 576 */ 577 if (!tcm->hotplug_active) 578 dev_set_uevent_suppress(&sw->dev, true); 579 580 if (tb_switch_add(sw)) { 581 tb_switch_put(sw); 582 return; 583 } 584 585 /* Link the switches using both links if available */ 586 upstream_port = tb_upstream_port(sw); 587 port->remote = upstream_port; 588 upstream_port->remote = port; 589 if (port->dual_link_port && upstream_port->dual_link_port) { 590 port->dual_link_port->remote = upstream_port->dual_link_port; 591 upstream_port->dual_link_port->remote = port->dual_link_port; 592 } 593 594 /* Enable lane bonding if supported */ 595 if (tb_switch_lane_bonding_enable(sw)) 596 tb_sw_warn(sw, "failed to enable lane bonding\n"); 597 598 if (tb_enable_tmu(sw)) 599 tb_sw_warn(sw, "failed to enable TMU\n"); 600 601 /* Scan upstream retimers */ 602 tb_retimer_scan(upstream_port); 603 604 /* 605 * Create USB 3.x tunnels only when the switch is plugged to the 606 * domain. This is because we scan the domain also during discovery 607 * and want to discover existing USB 3.x tunnels before we create 608 * any new. 609 */ 610 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) 611 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); 612 613 tb_add_dp_resources(sw); 614 tb_scan_switch(sw); 615 } 616 617 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) 618 { 619 struct tb_port *src_port, *dst_port; 620 struct tb *tb; 621 622 if (!tunnel) 623 return; 624 625 tb_tunnel_deactivate(tunnel); 626 list_del(&tunnel->list); 627 628 tb = tunnel->tb; 629 src_port = tunnel->src_port; 630 dst_port = tunnel->dst_port; 631 632 switch (tunnel->type) { 633 case TB_TUNNEL_DP: 634 /* 635 * In case of DP tunnel make sure the DP IN resource is 636 * deallocated properly. 637 */ 638 tb_switch_dealloc_dp_resource(src_port->sw, src_port); 639 fallthrough; 640 641 case TB_TUNNEL_USB3: 642 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); 643 break; 644 645 default: 646 /* 647 * PCIe and DMA tunnels do not consume guaranteed 648 * bandwidth. 649 */ 650 break; 651 } 652 653 tb_tunnel_free(tunnel); 654 } 655 656 /** 657 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 658 */ 659 static void tb_free_invalid_tunnels(struct tb *tb) 660 { 661 struct tb_cm *tcm = tb_priv(tb); 662 struct tb_tunnel *tunnel; 663 struct tb_tunnel *n; 664 665 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 666 if (tb_tunnel_is_invalid(tunnel)) 667 tb_deactivate_and_free_tunnel(tunnel); 668 } 669 } 670 671 /** 672 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 673 */ 674 static void tb_free_unplugged_children(struct tb_switch *sw) 675 { 676 struct tb_port *port; 677 678 tb_switch_for_each_port(sw, port) { 679 if (!tb_port_has_remote(port)) 680 continue; 681 682 if (port->remote->sw->is_unplugged) { 683 tb_retimer_remove_all(port); 684 tb_remove_dp_resources(port->remote->sw); 685 tb_switch_lane_bonding_disable(port->remote->sw); 686 tb_switch_remove(port->remote->sw); 687 port->remote = NULL; 688 if (port->dual_link_port) 689 port->dual_link_port->remote = NULL; 690 } else { 691 tb_free_unplugged_children(port->remote->sw); 692 } 693 } 694 } 695 696 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 697 const struct tb_port *port) 698 { 699 struct tb_port *down = NULL; 700 701 /* 702 * To keep plugging devices consistently in the same PCIe 703 * hierarchy, do mapping here for switch downstream PCIe ports. 704 */ 705 if (tb_switch_is_usb4(sw)) { 706 down = usb4_switch_map_pcie_down(sw, port); 707 } else if (!tb_route(sw)) { 708 int phy_port = tb_phy_port_from_link(port->port); 709 int index; 710 711 /* 712 * Hard-coded Thunderbolt port to PCIe down port mapping 713 * per controller. 714 */ 715 if (tb_switch_is_cactus_ridge(sw) || 716 tb_switch_is_alpine_ridge(sw)) 717 index = !phy_port ? 6 : 7; 718 else if (tb_switch_is_falcon_ridge(sw)) 719 index = !phy_port ? 6 : 8; 720 else if (tb_switch_is_titan_ridge(sw)) 721 index = !phy_port ? 8 : 9; 722 else 723 goto out; 724 725 /* Validate the hard-coding */ 726 if (WARN_ON(index > sw->config.max_port_number)) 727 goto out; 728 729 down = &sw->ports[index]; 730 } 731 732 if (down) { 733 if (WARN_ON(!tb_port_is_pcie_down(down))) 734 goto out; 735 if (tb_pci_port_is_enabled(down)) 736 goto out; 737 738 return down; 739 } 740 741 out: 742 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); 743 } 744 745 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) 746 { 747 struct tb_port *host_port, *port; 748 struct tb_cm *tcm = tb_priv(tb); 749 750 host_port = tb_route(in->sw) ? 751 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; 752 753 list_for_each_entry(port, &tcm->dp_resources, list) { 754 if (!tb_port_is_dpout(port)) 755 continue; 756 757 if (tb_port_is_enabled(port)) { 758 tb_port_dbg(port, "in use\n"); 759 continue; 760 } 761 762 tb_port_dbg(port, "DP OUT available\n"); 763 764 /* 765 * Keep the DP tunnel under the topology starting from 766 * the same host router downstream port. 767 */ 768 if (host_port && tb_route(port->sw)) { 769 struct tb_port *p; 770 771 p = tb_port_at(tb_route(port->sw), tb->root_switch); 772 if (p != host_port) 773 continue; 774 } 775 776 return port; 777 } 778 779 return NULL; 780 } 781 782 static void tb_tunnel_dp(struct tb *tb) 783 { 784 int available_up, available_down, ret; 785 struct tb_cm *tcm = tb_priv(tb); 786 struct tb_port *port, *in, *out; 787 struct tb_tunnel *tunnel; 788 789 /* 790 * Find pair of inactive DP IN and DP OUT adapters and then 791 * establish a DP tunnel between them. 792 */ 793 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); 794 795 in = NULL; 796 out = NULL; 797 list_for_each_entry(port, &tcm->dp_resources, list) { 798 if (!tb_port_is_dpin(port)) 799 continue; 800 801 if (tb_port_is_enabled(port)) { 802 tb_port_dbg(port, "in use\n"); 803 continue; 804 } 805 806 tb_port_dbg(port, "DP IN available\n"); 807 808 out = tb_find_dp_out(tb, port); 809 if (out) { 810 in = port; 811 break; 812 } 813 } 814 815 if (!in) { 816 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); 817 return; 818 } 819 if (!out) { 820 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); 821 return; 822 } 823 824 if (tb_switch_alloc_dp_resource(in->sw, in)) { 825 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); 826 return; 827 } 828 829 /* Make all unused USB3 bandwidth available for the new DP tunnel */ 830 ret = tb_release_unused_usb3_bandwidth(tb, in, out); 831 if (ret) { 832 tb_warn(tb, "failed to release unused bandwidth\n"); 833 goto err_dealloc_dp; 834 } 835 836 ret = tb_available_bandwidth(tb, in, out, &available_up, 837 &available_down); 838 if (ret) 839 goto err_reclaim; 840 841 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", 842 available_up, available_down); 843 844 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down); 845 if (!tunnel) { 846 tb_port_dbg(out, "could not allocate DP tunnel\n"); 847 goto err_reclaim; 848 } 849 850 if (tb_tunnel_activate(tunnel)) { 851 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 852 goto err_free; 853 } 854 855 list_add_tail(&tunnel->list, &tcm->tunnel_list); 856 tb_reclaim_usb3_bandwidth(tb, in, out); 857 return; 858 859 err_free: 860 tb_tunnel_free(tunnel); 861 err_reclaim: 862 tb_reclaim_usb3_bandwidth(tb, in, out); 863 err_dealloc_dp: 864 tb_switch_dealloc_dp_resource(in->sw, in); 865 } 866 867 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) 868 { 869 struct tb_port *in, *out; 870 struct tb_tunnel *tunnel; 871 872 if (tb_port_is_dpin(port)) { 873 tb_port_dbg(port, "DP IN resource unavailable\n"); 874 in = port; 875 out = NULL; 876 } else { 877 tb_port_dbg(port, "DP OUT resource unavailable\n"); 878 in = NULL; 879 out = port; 880 } 881 882 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); 883 tb_deactivate_and_free_tunnel(tunnel); 884 list_del_init(&port->list); 885 886 /* 887 * See if there is another DP OUT port that can be used for 888 * to create another tunnel. 889 */ 890 tb_tunnel_dp(tb); 891 } 892 893 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) 894 { 895 struct tb_cm *tcm = tb_priv(tb); 896 struct tb_port *p; 897 898 if (tb_port_is_enabled(port)) 899 return; 900 901 list_for_each_entry(p, &tcm->dp_resources, list) { 902 if (p == port) 903 return; 904 } 905 906 tb_port_dbg(port, "DP %s resource available\n", 907 tb_port_is_dpin(port) ? "IN" : "OUT"); 908 list_add_tail(&port->list, &tcm->dp_resources); 909 910 /* Look for suitable DP IN <-> DP OUT pairs now */ 911 tb_tunnel_dp(tb); 912 } 913 914 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 915 { 916 struct tb_port *up, *down, *port; 917 struct tb_cm *tcm = tb_priv(tb); 918 struct tb_switch *parent_sw; 919 struct tb_tunnel *tunnel; 920 921 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 922 if (!up) 923 return 0; 924 925 /* 926 * Look up available down port. Since we are chaining it should 927 * be found right above this switch. 928 */ 929 parent_sw = tb_to_switch(sw->dev.parent); 930 port = tb_port_at(tb_route(sw), parent_sw); 931 down = tb_find_pcie_down(parent_sw, port); 932 if (!down) 933 return 0; 934 935 tunnel = tb_tunnel_alloc_pci(tb, up, down); 936 if (!tunnel) 937 return -ENOMEM; 938 939 if (tb_tunnel_activate(tunnel)) { 940 tb_port_info(up, 941 "PCIe tunnel activation failed, aborting\n"); 942 tb_tunnel_free(tunnel); 943 return -EIO; 944 } 945 946 list_add_tail(&tunnel->list, &tcm->tunnel_list); 947 return 0; 948 } 949 950 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 951 { 952 struct tb_cm *tcm = tb_priv(tb); 953 struct tb_port *nhi_port, *dst_port; 954 struct tb_tunnel *tunnel; 955 struct tb_switch *sw; 956 957 sw = tb_to_switch(xd->dev.parent); 958 dst_port = tb_port_at(xd->route, sw); 959 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 960 961 mutex_lock(&tb->lock); 962 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, 963 xd->transmit_path, xd->receive_ring, 964 xd->receive_path); 965 if (!tunnel) { 966 mutex_unlock(&tb->lock); 967 return -ENOMEM; 968 } 969 970 if (tb_tunnel_activate(tunnel)) { 971 tb_port_info(nhi_port, 972 "DMA tunnel activation failed, aborting\n"); 973 tb_tunnel_free(tunnel); 974 mutex_unlock(&tb->lock); 975 return -EIO; 976 } 977 978 list_add_tail(&tunnel->list, &tcm->tunnel_list); 979 mutex_unlock(&tb->lock); 980 return 0; 981 } 982 983 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 984 { 985 struct tb_port *dst_port; 986 struct tb_tunnel *tunnel; 987 struct tb_switch *sw; 988 989 sw = tb_to_switch(xd->dev.parent); 990 dst_port = tb_port_at(xd->route, sw); 991 992 /* 993 * It is possible that the tunnel was already teared down (in 994 * case of cable disconnect) so it is fine if we cannot find it 995 * here anymore. 996 */ 997 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); 998 tb_deactivate_and_free_tunnel(tunnel); 999 } 1000 1001 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 1002 { 1003 if (!xd->is_unplugged) { 1004 mutex_lock(&tb->lock); 1005 __tb_disconnect_xdomain_paths(tb, xd); 1006 mutex_unlock(&tb->lock); 1007 } 1008 return 0; 1009 } 1010 1011 /* hotplug handling */ 1012 1013 /** 1014 * tb_handle_hotplug() - handle hotplug event 1015 * 1016 * Executes on tb->wq. 1017 */ 1018 static void tb_handle_hotplug(struct work_struct *work) 1019 { 1020 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 1021 struct tb *tb = ev->tb; 1022 struct tb_cm *tcm = tb_priv(tb); 1023 struct tb_switch *sw; 1024 struct tb_port *port; 1025 mutex_lock(&tb->lock); 1026 if (!tcm->hotplug_active) 1027 goto out; /* during init, suspend or shutdown */ 1028 1029 sw = tb_switch_find_by_route(tb, ev->route); 1030 if (!sw) { 1031 tb_warn(tb, 1032 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 1033 ev->route, ev->port, ev->unplug); 1034 goto out; 1035 } 1036 if (ev->port > sw->config.max_port_number) { 1037 tb_warn(tb, 1038 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 1039 ev->route, ev->port, ev->unplug); 1040 goto put_sw; 1041 } 1042 port = &sw->ports[ev->port]; 1043 if (tb_is_upstream_port(port)) { 1044 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 1045 ev->route, ev->port, ev->unplug); 1046 goto put_sw; 1047 } 1048 if (ev->unplug) { 1049 tb_retimer_remove_all(port); 1050 1051 if (tb_port_has_remote(port)) { 1052 tb_port_dbg(port, "switch unplugged\n"); 1053 tb_sw_set_unplugged(port->remote->sw); 1054 tb_free_invalid_tunnels(tb); 1055 tb_remove_dp_resources(port->remote->sw); 1056 tb_switch_tmu_disable(port->remote->sw); 1057 tb_switch_lane_bonding_disable(port->remote->sw); 1058 tb_switch_remove(port->remote->sw); 1059 port->remote = NULL; 1060 if (port->dual_link_port) 1061 port->dual_link_port->remote = NULL; 1062 /* Maybe we can create another DP tunnel */ 1063 tb_tunnel_dp(tb); 1064 } else if (port->xdomain) { 1065 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); 1066 1067 tb_port_dbg(port, "xdomain unplugged\n"); 1068 /* 1069 * Service drivers are unbound during 1070 * tb_xdomain_remove() so setting XDomain as 1071 * unplugged here prevents deadlock if they call 1072 * tb_xdomain_disable_paths(). We will tear down 1073 * the path below. 1074 */ 1075 xd->is_unplugged = true; 1076 tb_xdomain_remove(xd); 1077 port->xdomain = NULL; 1078 __tb_disconnect_xdomain_paths(tb, xd); 1079 tb_xdomain_put(xd); 1080 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1081 tb_dp_resource_unavailable(tb, port); 1082 } else { 1083 tb_port_dbg(port, 1084 "got unplug event for disconnected port, ignoring\n"); 1085 } 1086 } else if (port->remote) { 1087 tb_port_dbg(port, "got plug event for connected port, ignoring\n"); 1088 } else { 1089 if (tb_port_is_null(port)) { 1090 tb_port_dbg(port, "hotplug: scanning\n"); 1091 tb_scan_port(port); 1092 if (!port->remote) 1093 tb_port_dbg(port, "hotplug: no switch found\n"); 1094 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 1095 tb_dp_resource_available(tb, port); 1096 } 1097 } 1098 1099 put_sw: 1100 tb_switch_put(sw); 1101 out: 1102 mutex_unlock(&tb->lock); 1103 kfree(ev); 1104 } 1105 1106 /** 1107 * tb_schedule_hotplug_handler() - callback function for the control channel 1108 * 1109 * Delegates to tb_handle_hotplug. 1110 */ 1111 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 1112 const void *buf, size_t size) 1113 { 1114 const struct cfg_event_pkg *pkg = buf; 1115 u64 route; 1116 1117 if (type != TB_CFG_PKG_EVENT) { 1118 tb_warn(tb, "unexpected event %#x, ignoring\n", type); 1119 return; 1120 } 1121 1122 route = tb_cfg_get_route(&pkg->header); 1123 1124 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { 1125 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 1126 pkg->port); 1127 } 1128 1129 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); 1130 } 1131 1132 static void tb_stop(struct tb *tb) 1133 { 1134 struct tb_cm *tcm = tb_priv(tb); 1135 struct tb_tunnel *tunnel; 1136 struct tb_tunnel *n; 1137 1138 /* tunnels are only present after everything has been initialized */ 1139 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 1140 /* 1141 * DMA tunnels require the driver to be functional so we 1142 * tear them down. Other protocol tunnels can be left 1143 * intact. 1144 */ 1145 if (tb_tunnel_is_dma(tunnel)) 1146 tb_tunnel_deactivate(tunnel); 1147 tb_tunnel_free(tunnel); 1148 } 1149 tb_switch_remove(tb->root_switch); 1150 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 1151 } 1152 1153 static int tb_scan_finalize_switch(struct device *dev, void *data) 1154 { 1155 if (tb_is_switch(dev)) { 1156 struct tb_switch *sw = tb_to_switch(dev); 1157 1158 /* 1159 * If we found that the switch was already setup by the 1160 * boot firmware, mark it as authorized now before we 1161 * send uevent to userspace. 1162 */ 1163 if (sw->boot) 1164 sw->authorized = 1; 1165 1166 dev_set_uevent_suppress(dev, false); 1167 kobject_uevent(&dev->kobj, KOBJ_ADD); 1168 device_for_each_child(dev, NULL, tb_scan_finalize_switch); 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int tb_start(struct tb *tb) 1175 { 1176 struct tb_cm *tcm = tb_priv(tb); 1177 int ret; 1178 1179 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 1180 if (IS_ERR(tb->root_switch)) 1181 return PTR_ERR(tb->root_switch); 1182 1183 /* 1184 * ICM firmware upgrade needs running firmware and in native 1185 * mode that is not available so disable firmware upgrade of the 1186 * root switch. 1187 */ 1188 tb->root_switch->no_nvm_upgrade = true; 1189 1190 ret = tb_switch_configure(tb->root_switch); 1191 if (ret) { 1192 tb_switch_put(tb->root_switch); 1193 return ret; 1194 } 1195 1196 /* Announce the switch to the world */ 1197 ret = tb_switch_add(tb->root_switch); 1198 if (ret) { 1199 tb_switch_put(tb->root_switch); 1200 return ret; 1201 } 1202 1203 /* Enable TMU if it is off */ 1204 tb_switch_tmu_enable(tb->root_switch); 1205 /* Full scan to discover devices added before the driver was loaded. */ 1206 tb_scan_switch(tb->root_switch); 1207 /* Find out tunnels created by the boot firmware */ 1208 tb_discover_tunnels(tb->root_switch); 1209 /* 1210 * If the boot firmware did not create USB 3.x tunnels create them 1211 * now for the whole topology. 1212 */ 1213 tb_create_usb3_tunnels(tb->root_switch); 1214 /* Add DP IN resources for the root switch */ 1215 tb_add_dp_resources(tb->root_switch); 1216 /* Make the discovered switches available to the userspace */ 1217 device_for_each_child(&tb->root_switch->dev, NULL, 1218 tb_scan_finalize_switch); 1219 1220 /* Allow tb_handle_hotplug to progress events */ 1221 tcm->hotplug_active = true; 1222 return 0; 1223 } 1224 1225 static int tb_suspend_noirq(struct tb *tb) 1226 { 1227 struct tb_cm *tcm = tb_priv(tb); 1228 1229 tb_dbg(tb, "suspending...\n"); 1230 tb_switch_suspend(tb->root_switch); 1231 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 1232 tb_dbg(tb, "suspend finished\n"); 1233 1234 return 0; 1235 } 1236 1237 static void tb_restore_children(struct tb_switch *sw) 1238 { 1239 struct tb_port *port; 1240 1241 if (tb_enable_tmu(sw)) 1242 tb_sw_warn(sw, "failed to restore TMU configuration\n"); 1243 1244 tb_switch_for_each_port(sw, port) { 1245 if (!tb_port_has_remote(port)) 1246 continue; 1247 1248 if (tb_switch_lane_bonding_enable(port->remote->sw)) 1249 dev_warn(&sw->dev, "failed to restore lane bonding\n"); 1250 1251 tb_restore_children(port->remote->sw); 1252 } 1253 } 1254 1255 static int tb_resume_noirq(struct tb *tb) 1256 { 1257 struct tb_cm *tcm = tb_priv(tb); 1258 struct tb_tunnel *tunnel, *n; 1259 1260 tb_dbg(tb, "resuming...\n"); 1261 1262 /* remove any pci devices the firmware might have setup */ 1263 tb_switch_reset(tb, 0); 1264 1265 tb_switch_resume(tb->root_switch); 1266 tb_free_invalid_tunnels(tb); 1267 tb_free_unplugged_children(tb->root_switch); 1268 tb_restore_children(tb->root_switch); 1269 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 1270 tb_tunnel_restart(tunnel); 1271 if (!list_empty(&tcm->tunnel_list)) { 1272 /* 1273 * the pcie links need some time to get going. 1274 * 100ms works for me... 1275 */ 1276 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 1277 msleep(100); 1278 } 1279 /* Allow tb_handle_hotplug to progress events */ 1280 tcm->hotplug_active = true; 1281 tb_dbg(tb, "resume finished\n"); 1282 1283 return 0; 1284 } 1285 1286 static int tb_free_unplugged_xdomains(struct tb_switch *sw) 1287 { 1288 struct tb_port *port; 1289 int ret = 0; 1290 1291 tb_switch_for_each_port(sw, port) { 1292 if (tb_is_upstream_port(port)) 1293 continue; 1294 if (port->xdomain && port->xdomain->is_unplugged) { 1295 tb_retimer_remove_all(port); 1296 tb_xdomain_remove(port->xdomain); 1297 port->xdomain = NULL; 1298 ret++; 1299 } else if (port->remote) { 1300 ret += tb_free_unplugged_xdomains(port->remote->sw); 1301 } 1302 } 1303 1304 return ret; 1305 } 1306 1307 static void tb_complete(struct tb *tb) 1308 { 1309 /* 1310 * Release any unplugged XDomains and if there is a case where 1311 * another domain is swapped in place of unplugged XDomain we 1312 * need to run another rescan. 1313 */ 1314 mutex_lock(&tb->lock); 1315 if (tb_free_unplugged_xdomains(tb->root_switch)) 1316 tb_scan_switch(tb->root_switch); 1317 mutex_unlock(&tb->lock); 1318 } 1319 1320 static const struct tb_cm_ops tb_cm_ops = { 1321 .start = tb_start, 1322 .stop = tb_stop, 1323 .suspend_noirq = tb_suspend_noirq, 1324 .resume_noirq = tb_resume_noirq, 1325 .complete = tb_complete, 1326 .handle_event = tb_handle_event, 1327 .approve_switch = tb_tunnel_pci, 1328 .approve_xdomain_paths = tb_approve_xdomain_paths, 1329 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, 1330 }; 1331 1332 struct tb *tb_probe(struct tb_nhi *nhi) 1333 { 1334 struct tb_cm *tcm; 1335 struct tb *tb; 1336 1337 tb = tb_domain_alloc(nhi, sizeof(*tcm)); 1338 if (!tb) 1339 return NULL; 1340 1341 tb->security_level = TB_SECURITY_USER; 1342 tb->cm_ops = &tb_cm_ops; 1343 1344 tcm = tb_priv(tb); 1345 INIT_LIST_HEAD(&tcm->tunnel_list); 1346 INIT_LIST_HEAD(&tcm->dp_resources); 1347 1348 return tb; 1349 } 1350