1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - bus logic (NHI independent) 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/delay.h> 12 13 #include "tb.h" 14 #include "tb_regs.h" 15 #include "tunnel.h" 16 17 /** 18 * struct tb_cm - Simple Thunderbolt connection manager 19 * @tunnel_list: List of active tunnels 20 * @dp_resources: List of available DP resources for DP tunneling 21 * @hotplug_active: tb_handle_hotplug will stop progressing plug 22 * events and exit if this is not set (it needs to 23 * acquire the lock one more time). Used to drain wq 24 * after cfg has been paused. 25 */ 26 struct tb_cm { 27 struct list_head tunnel_list; 28 struct list_head dp_resources; 29 bool hotplug_active; 30 }; 31 32 struct tb_hotplug_event { 33 struct work_struct work; 34 struct tb *tb; 35 u64 route; 36 u8 port; 37 bool unplug; 38 }; 39 40 static void tb_handle_hotplug(struct work_struct *work); 41 42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 43 { 44 struct tb_hotplug_event *ev; 45 46 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 47 if (!ev) 48 return; 49 50 ev->tb = tb; 51 ev->route = route; 52 ev->port = port; 53 ev->unplug = unplug; 54 INIT_WORK(&ev->work, tb_handle_hotplug); 55 queue_work(tb->wq, &ev->work); 56 } 57 58 /* enumeration & hot plug handling */ 59 60 static void tb_add_dp_resources(struct tb_switch *sw) 61 { 62 struct tb_cm *tcm = tb_priv(sw->tb); 63 struct tb_port *port; 64 65 tb_switch_for_each_port(sw, port) { 66 if (!tb_port_is_dpin(port)) 67 continue; 68 69 if (!tb_switch_query_dp_resource(sw, port)) 70 continue; 71 72 list_add_tail(&port->list, &tcm->dp_resources); 73 tb_port_dbg(port, "DP IN resource available\n"); 74 } 75 } 76 77 static void tb_remove_dp_resources(struct tb_switch *sw) 78 { 79 struct tb_cm *tcm = tb_priv(sw->tb); 80 struct tb_port *port, *tmp; 81 82 /* Clear children resources first */ 83 tb_switch_for_each_port(sw, port) { 84 if (tb_port_has_remote(port)) 85 tb_remove_dp_resources(port->remote->sw); 86 } 87 88 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { 89 if (port->sw == sw) { 90 tb_port_dbg(port, "DP OUT resource unavailable\n"); 91 list_del_init(&port->list); 92 } 93 } 94 } 95 96 static void tb_discover_tunnels(struct tb_switch *sw) 97 { 98 struct tb *tb = sw->tb; 99 struct tb_cm *tcm = tb_priv(tb); 100 struct tb_port *port; 101 102 tb_switch_for_each_port(sw, port) { 103 struct tb_tunnel *tunnel = NULL; 104 105 switch (port->config.type) { 106 case TB_TYPE_DP_HDMI_IN: 107 tunnel = tb_tunnel_discover_dp(tb, port); 108 break; 109 110 case TB_TYPE_PCIE_DOWN: 111 tunnel = tb_tunnel_discover_pci(tb, port); 112 break; 113 114 case TB_TYPE_USB3_DOWN: 115 tunnel = tb_tunnel_discover_usb3(tb, port); 116 break; 117 118 default: 119 break; 120 } 121 122 if (!tunnel) 123 continue; 124 125 if (tb_tunnel_is_pci(tunnel)) { 126 struct tb_switch *parent = tunnel->dst_port->sw; 127 128 while (parent != tunnel->src_port->sw) { 129 parent->boot = true; 130 parent = tb_switch_parent(parent); 131 } 132 } 133 134 list_add_tail(&tunnel->list, &tcm->tunnel_list); 135 } 136 137 tb_switch_for_each_port(sw, port) { 138 if (tb_port_has_remote(port)) 139 tb_discover_tunnels(port->remote->sw); 140 } 141 } 142 143 static void tb_scan_xdomain(struct tb_port *port) 144 { 145 struct tb_switch *sw = port->sw; 146 struct tb *tb = sw->tb; 147 struct tb_xdomain *xd; 148 u64 route; 149 150 route = tb_downstream_route(port); 151 xd = tb_xdomain_find_by_route(tb, route); 152 if (xd) { 153 tb_xdomain_put(xd); 154 return; 155 } 156 157 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, 158 NULL); 159 if (xd) { 160 tb_port_at(route, sw)->xdomain = xd; 161 tb_xdomain_add(xd); 162 } 163 } 164 165 static int tb_enable_tmu(struct tb_switch *sw) 166 { 167 int ret; 168 169 /* If it is already enabled in correct mode, don't touch it */ 170 if (tb_switch_tmu_is_enabled(sw)) 171 return 0; 172 173 ret = tb_switch_tmu_disable(sw); 174 if (ret) 175 return ret; 176 177 ret = tb_switch_tmu_post_time(sw); 178 if (ret) 179 return ret; 180 181 return tb_switch_tmu_enable(sw); 182 } 183 184 /** 185 * tb_find_unused_port() - return the first inactive port on @sw 186 * @sw: Switch to find the port on 187 * @type: Port type to look for 188 */ 189 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, 190 enum tb_port_type type) 191 { 192 struct tb_port *port; 193 194 tb_switch_for_each_port(sw, port) { 195 if (tb_is_upstream_port(port)) 196 continue; 197 if (port->config.type != type) 198 continue; 199 if (!port->cap_adap) 200 continue; 201 if (tb_port_is_enabled(port)) 202 continue; 203 return port; 204 } 205 return NULL; 206 } 207 208 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, 209 const struct tb_port *port) 210 { 211 struct tb_port *down; 212 213 down = usb4_switch_map_usb3_down(sw, port); 214 if (down && !tb_usb3_port_is_enabled(down)) 215 return down; 216 return NULL; 217 } 218 219 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) 220 { 221 struct tb_switch *parent = tb_switch_parent(sw); 222 struct tb_port *up, *down, *port; 223 struct tb_cm *tcm = tb_priv(tb); 224 struct tb_tunnel *tunnel; 225 226 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); 227 if (!up) 228 return 0; 229 230 if (!sw->link_usb4) 231 return 0; 232 233 /* 234 * Look up available down port. Since we are chaining it should 235 * be found right above this switch. 236 */ 237 port = tb_port_at(tb_route(sw), parent); 238 down = tb_find_usb3_down(parent, port); 239 if (!down) 240 return 0; 241 242 if (tb_route(parent)) { 243 struct tb_port *parent_up; 244 /* 245 * Check first that the parent switch has its upstream USB3 246 * port enabled. Otherwise the chain is not complete and 247 * there is no point setting up a new tunnel. 248 */ 249 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); 250 if (!parent_up || !tb_port_is_enabled(parent_up)) 251 return 0; 252 } 253 254 tunnel = tb_tunnel_alloc_usb3(tb, up, down); 255 if (!tunnel) 256 return -ENOMEM; 257 258 if (tb_tunnel_activate(tunnel)) { 259 tb_port_info(up, 260 "USB3 tunnel activation failed, aborting\n"); 261 tb_tunnel_free(tunnel); 262 return -EIO; 263 } 264 265 list_add_tail(&tunnel->list, &tcm->tunnel_list); 266 return 0; 267 } 268 269 static int tb_create_usb3_tunnels(struct tb_switch *sw) 270 { 271 struct tb_port *port; 272 int ret; 273 274 if (tb_route(sw)) { 275 ret = tb_tunnel_usb3(sw->tb, sw); 276 if (ret) 277 return ret; 278 } 279 280 tb_switch_for_each_port(sw, port) { 281 if (!tb_port_has_remote(port)) 282 continue; 283 ret = tb_create_usb3_tunnels(port->remote->sw); 284 if (ret) 285 return ret; 286 } 287 288 return 0; 289 } 290 291 static void tb_scan_port(struct tb_port *port); 292 293 /** 294 * tb_scan_switch() - scan for and initialize downstream switches 295 */ 296 static void tb_scan_switch(struct tb_switch *sw) 297 { 298 struct tb_port *port; 299 300 tb_switch_for_each_port(sw, port) 301 tb_scan_port(port); 302 } 303 304 /** 305 * tb_scan_port() - check for and initialize switches below port 306 */ 307 static void tb_scan_port(struct tb_port *port) 308 { 309 struct tb_cm *tcm = tb_priv(port->sw->tb); 310 struct tb_port *upstream_port; 311 struct tb_switch *sw; 312 313 if (tb_is_upstream_port(port)) 314 return; 315 316 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && 317 !tb_dp_port_is_enabled(port)) { 318 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); 319 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, 320 false); 321 return; 322 } 323 324 if (port->config.type != TB_TYPE_PORT) 325 return; 326 if (port->dual_link_port && port->link_nr) 327 return; /* 328 * Downstream switch is reachable through two ports. 329 * Only scan on the primary port (link_nr == 0). 330 */ 331 if (tb_wait_for_port(port, false) <= 0) 332 return; 333 if (port->remote) { 334 tb_port_dbg(port, "port already has a remote\n"); 335 return; 336 } 337 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 338 tb_downstream_route(port)); 339 if (IS_ERR(sw)) { 340 /* 341 * If there is an error accessing the connected switch 342 * it may be connected to another domain. Also we allow 343 * the other domain to be connected to a max depth switch. 344 */ 345 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 346 tb_scan_xdomain(port); 347 return; 348 } 349 350 if (tb_switch_configure(sw)) { 351 tb_switch_put(sw); 352 return; 353 } 354 355 /* 356 * If there was previously another domain connected remove it 357 * first. 358 */ 359 if (port->xdomain) { 360 tb_xdomain_remove(port->xdomain); 361 port->xdomain = NULL; 362 } 363 364 /* 365 * Do not send uevents until we have discovered all existing 366 * tunnels and know which switches were authorized already by 367 * the boot firmware. 368 */ 369 if (!tcm->hotplug_active) 370 dev_set_uevent_suppress(&sw->dev, true); 371 372 if (tb_switch_add(sw)) { 373 tb_switch_put(sw); 374 return; 375 } 376 377 /* Link the switches using both links if available */ 378 upstream_port = tb_upstream_port(sw); 379 port->remote = upstream_port; 380 upstream_port->remote = port; 381 if (port->dual_link_port && upstream_port->dual_link_port) { 382 port->dual_link_port->remote = upstream_port->dual_link_port; 383 upstream_port->dual_link_port->remote = port->dual_link_port; 384 } 385 386 /* Enable lane bonding if supported */ 387 if (tb_switch_lane_bonding_enable(sw)) 388 tb_sw_warn(sw, "failed to enable lane bonding\n"); 389 390 if (tb_enable_tmu(sw)) 391 tb_sw_warn(sw, "failed to enable TMU\n"); 392 393 /* 394 * Create USB 3.x tunnels only when the switch is plugged to the 395 * domain. This is because we scan the domain also during discovery 396 * and want to discover existing USB 3.x tunnels before we create 397 * any new. 398 */ 399 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) 400 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); 401 402 tb_add_dp_resources(sw); 403 tb_scan_switch(sw); 404 } 405 406 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, 407 struct tb_port *src_port, 408 struct tb_port *dst_port) 409 { 410 struct tb_cm *tcm = tb_priv(tb); 411 struct tb_tunnel *tunnel; 412 413 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 414 if (tunnel->type == type && 415 ((src_port && src_port == tunnel->src_port) || 416 (dst_port && dst_port == tunnel->dst_port))) { 417 return tunnel; 418 } 419 } 420 421 return NULL; 422 } 423 424 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) 425 { 426 if (!tunnel) 427 return; 428 429 tb_tunnel_deactivate(tunnel); 430 list_del(&tunnel->list); 431 432 /* 433 * In case of DP tunnel make sure the DP IN resource is deallocated 434 * properly. 435 */ 436 if (tb_tunnel_is_dp(tunnel)) { 437 struct tb_port *in = tunnel->src_port; 438 439 tb_switch_dealloc_dp_resource(in->sw, in); 440 } 441 442 tb_tunnel_free(tunnel); 443 } 444 445 /** 446 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 447 */ 448 static void tb_free_invalid_tunnels(struct tb *tb) 449 { 450 struct tb_cm *tcm = tb_priv(tb); 451 struct tb_tunnel *tunnel; 452 struct tb_tunnel *n; 453 454 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 455 if (tb_tunnel_is_invalid(tunnel)) 456 tb_deactivate_and_free_tunnel(tunnel); 457 } 458 } 459 460 /** 461 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 462 */ 463 static void tb_free_unplugged_children(struct tb_switch *sw) 464 { 465 struct tb_port *port; 466 467 tb_switch_for_each_port(sw, port) { 468 if (!tb_port_has_remote(port)) 469 continue; 470 471 if (port->remote->sw->is_unplugged) { 472 tb_remove_dp_resources(port->remote->sw); 473 tb_switch_lane_bonding_disable(port->remote->sw); 474 tb_switch_remove(port->remote->sw); 475 port->remote = NULL; 476 if (port->dual_link_port) 477 port->dual_link_port->remote = NULL; 478 } else { 479 tb_free_unplugged_children(port->remote->sw); 480 } 481 } 482 } 483 484 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 485 const struct tb_port *port) 486 { 487 struct tb_port *down = NULL; 488 489 /* 490 * To keep plugging devices consistently in the same PCIe 491 * hierarchy, do mapping here for switch downstream PCIe ports. 492 */ 493 if (tb_switch_is_usb4(sw)) { 494 down = usb4_switch_map_pcie_down(sw, port); 495 } else if (!tb_route(sw)) { 496 int phy_port = tb_phy_port_from_link(port->port); 497 int index; 498 499 /* 500 * Hard-coded Thunderbolt port to PCIe down port mapping 501 * per controller. 502 */ 503 if (tb_switch_is_cactus_ridge(sw) || 504 tb_switch_is_alpine_ridge(sw)) 505 index = !phy_port ? 6 : 7; 506 else if (tb_switch_is_falcon_ridge(sw)) 507 index = !phy_port ? 6 : 8; 508 else if (tb_switch_is_titan_ridge(sw)) 509 index = !phy_port ? 8 : 9; 510 else 511 goto out; 512 513 /* Validate the hard-coding */ 514 if (WARN_ON(index > sw->config.max_port_number)) 515 goto out; 516 517 down = &sw->ports[index]; 518 } 519 520 if (down) { 521 if (WARN_ON(!tb_port_is_pcie_down(down))) 522 goto out; 523 if (tb_pci_port_is_enabled(down)) 524 goto out; 525 526 return down; 527 } 528 529 out: 530 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); 531 } 532 533 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in, 534 struct tb_port *out) 535 { 536 struct tb_switch *sw = out->sw; 537 struct tb_tunnel *tunnel; 538 int ret, bw, available_bw = 40000; 539 540 while (sw && sw != in->sw) { 541 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */ 542 /* Leave 10% guard band */ 543 bw -= bw / 10; 544 545 /* 546 * Check for any active DP tunnels that go through this 547 * switch and reduce their consumed bandwidth from 548 * available. 549 */ 550 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 551 int consumed_bw; 552 553 if (!tb_tunnel_switch_on_path(tunnel, sw)) 554 continue; 555 556 ret = tb_tunnel_consumed_bandwidth(tunnel, NULL, 557 &consumed_bw); 558 if (ret) 559 return ret; 560 561 bw -= consumed_bw; 562 } 563 564 if (bw < available_bw) 565 available_bw = bw; 566 567 sw = tb_switch_parent(sw); 568 } 569 570 return available_bw; 571 } 572 573 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) 574 { 575 struct tb_port *host_port, *port; 576 struct tb_cm *tcm = tb_priv(tb); 577 578 host_port = tb_route(in->sw) ? 579 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; 580 581 list_for_each_entry(port, &tcm->dp_resources, list) { 582 if (!tb_port_is_dpout(port)) 583 continue; 584 585 if (tb_port_is_enabled(port)) { 586 tb_port_dbg(port, "in use\n"); 587 continue; 588 } 589 590 tb_port_dbg(port, "DP OUT available\n"); 591 592 /* 593 * Keep the DP tunnel under the topology starting from 594 * the same host router downstream port. 595 */ 596 if (host_port && tb_route(port->sw)) { 597 struct tb_port *p; 598 599 p = tb_port_at(tb_route(port->sw), tb->root_switch); 600 if (p != host_port) 601 continue; 602 } 603 604 return port; 605 } 606 607 return NULL; 608 } 609 610 static void tb_tunnel_dp(struct tb *tb) 611 { 612 struct tb_cm *tcm = tb_priv(tb); 613 struct tb_port *port, *in, *out; 614 struct tb_tunnel *tunnel; 615 int available_bw; 616 617 /* 618 * Find pair of inactive DP IN and DP OUT adapters and then 619 * establish a DP tunnel between them. 620 */ 621 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); 622 623 in = NULL; 624 out = NULL; 625 list_for_each_entry(port, &tcm->dp_resources, list) { 626 if (!tb_port_is_dpin(port)) 627 continue; 628 629 if (tb_port_is_enabled(port)) { 630 tb_port_dbg(port, "in use\n"); 631 continue; 632 } 633 634 tb_port_dbg(port, "DP IN available\n"); 635 636 out = tb_find_dp_out(tb, port); 637 if (out) { 638 in = port; 639 break; 640 } 641 } 642 643 if (!in) { 644 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); 645 return; 646 } 647 if (!out) { 648 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); 649 return; 650 } 651 652 if (tb_switch_alloc_dp_resource(in->sw, in)) { 653 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); 654 return; 655 } 656 657 /* Calculate available bandwidth between in and out */ 658 available_bw = tb_available_bw(tcm, in, out); 659 if (available_bw < 0) { 660 tb_warn(tb, "failed to determine available bandwidth\n"); 661 return; 662 } 663 664 tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n", 665 available_bw); 666 667 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw); 668 if (!tunnel) { 669 tb_port_dbg(out, "could not allocate DP tunnel\n"); 670 goto dealloc_dp; 671 } 672 673 if (tb_tunnel_activate(tunnel)) { 674 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 675 tb_tunnel_free(tunnel); 676 goto dealloc_dp; 677 } 678 679 list_add_tail(&tunnel->list, &tcm->tunnel_list); 680 return; 681 682 dealloc_dp: 683 tb_switch_dealloc_dp_resource(in->sw, in); 684 } 685 686 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) 687 { 688 struct tb_port *in, *out; 689 struct tb_tunnel *tunnel; 690 691 if (tb_port_is_dpin(port)) { 692 tb_port_dbg(port, "DP IN resource unavailable\n"); 693 in = port; 694 out = NULL; 695 } else { 696 tb_port_dbg(port, "DP OUT resource unavailable\n"); 697 in = NULL; 698 out = port; 699 } 700 701 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); 702 tb_deactivate_and_free_tunnel(tunnel); 703 list_del_init(&port->list); 704 705 /* 706 * See if there is another DP OUT port that can be used for 707 * to create another tunnel. 708 */ 709 tb_tunnel_dp(tb); 710 } 711 712 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) 713 { 714 struct tb_cm *tcm = tb_priv(tb); 715 struct tb_port *p; 716 717 if (tb_port_is_enabled(port)) 718 return; 719 720 list_for_each_entry(p, &tcm->dp_resources, list) { 721 if (p == port) 722 return; 723 } 724 725 tb_port_dbg(port, "DP %s resource available\n", 726 tb_port_is_dpin(port) ? "IN" : "OUT"); 727 list_add_tail(&port->list, &tcm->dp_resources); 728 729 /* Look for suitable DP IN <-> DP OUT pairs now */ 730 tb_tunnel_dp(tb); 731 } 732 733 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 734 { 735 struct tb_port *up, *down, *port; 736 struct tb_cm *tcm = tb_priv(tb); 737 struct tb_switch *parent_sw; 738 struct tb_tunnel *tunnel; 739 740 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 741 if (!up) 742 return 0; 743 744 /* 745 * Look up available down port. Since we are chaining it should 746 * be found right above this switch. 747 */ 748 parent_sw = tb_to_switch(sw->dev.parent); 749 port = tb_port_at(tb_route(sw), parent_sw); 750 down = tb_find_pcie_down(parent_sw, port); 751 if (!down) 752 return 0; 753 754 tunnel = tb_tunnel_alloc_pci(tb, up, down); 755 if (!tunnel) 756 return -ENOMEM; 757 758 if (tb_tunnel_activate(tunnel)) { 759 tb_port_info(up, 760 "PCIe tunnel activation failed, aborting\n"); 761 tb_tunnel_free(tunnel); 762 return -EIO; 763 } 764 765 list_add_tail(&tunnel->list, &tcm->tunnel_list); 766 return 0; 767 } 768 769 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 770 { 771 struct tb_cm *tcm = tb_priv(tb); 772 struct tb_port *nhi_port, *dst_port; 773 struct tb_tunnel *tunnel; 774 struct tb_switch *sw; 775 776 sw = tb_to_switch(xd->dev.parent); 777 dst_port = tb_port_at(xd->route, sw); 778 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 779 780 mutex_lock(&tb->lock); 781 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, 782 xd->transmit_path, xd->receive_ring, 783 xd->receive_path); 784 if (!tunnel) { 785 mutex_unlock(&tb->lock); 786 return -ENOMEM; 787 } 788 789 if (tb_tunnel_activate(tunnel)) { 790 tb_port_info(nhi_port, 791 "DMA tunnel activation failed, aborting\n"); 792 tb_tunnel_free(tunnel); 793 mutex_unlock(&tb->lock); 794 return -EIO; 795 } 796 797 list_add_tail(&tunnel->list, &tcm->tunnel_list); 798 mutex_unlock(&tb->lock); 799 return 0; 800 } 801 802 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 803 { 804 struct tb_port *dst_port; 805 struct tb_tunnel *tunnel; 806 struct tb_switch *sw; 807 808 sw = tb_to_switch(xd->dev.parent); 809 dst_port = tb_port_at(xd->route, sw); 810 811 /* 812 * It is possible that the tunnel was already teared down (in 813 * case of cable disconnect) so it is fine if we cannot find it 814 * here anymore. 815 */ 816 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); 817 tb_deactivate_and_free_tunnel(tunnel); 818 } 819 820 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 821 { 822 if (!xd->is_unplugged) { 823 mutex_lock(&tb->lock); 824 __tb_disconnect_xdomain_paths(tb, xd); 825 mutex_unlock(&tb->lock); 826 } 827 return 0; 828 } 829 830 /* hotplug handling */ 831 832 /** 833 * tb_handle_hotplug() - handle hotplug event 834 * 835 * Executes on tb->wq. 836 */ 837 static void tb_handle_hotplug(struct work_struct *work) 838 { 839 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 840 struct tb *tb = ev->tb; 841 struct tb_cm *tcm = tb_priv(tb); 842 struct tb_switch *sw; 843 struct tb_port *port; 844 mutex_lock(&tb->lock); 845 if (!tcm->hotplug_active) 846 goto out; /* during init, suspend or shutdown */ 847 848 sw = tb_switch_find_by_route(tb, ev->route); 849 if (!sw) { 850 tb_warn(tb, 851 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 852 ev->route, ev->port, ev->unplug); 853 goto out; 854 } 855 if (ev->port > sw->config.max_port_number) { 856 tb_warn(tb, 857 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 858 ev->route, ev->port, ev->unplug); 859 goto put_sw; 860 } 861 port = &sw->ports[ev->port]; 862 if (tb_is_upstream_port(port)) { 863 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 864 ev->route, ev->port, ev->unplug); 865 goto put_sw; 866 } 867 if (ev->unplug) { 868 if (tb_port_has_remote(port)) { 869 tb_port_dbg(port, "switch unplugged\n"); 870 tb_sw_set_unplugged(port->remote->sw); 871 tb_free_invalid_tunnels(tb); 872 tb_remove_dp_resources(port->remote->sw); 873 tb_switch_tmu_disable(port->remote->sw); 874 tb_switch_lane_bonding_disable(port->remote->sw); 875 tb_switch_remove(port->remote->sw); 876 port->remote = NULL; 877 if (port->dual_link_port) 878 port->dual_link_port->remote = NULL; 879 /* Maybe we can create another DP tunnel */ 880 tb_tunnel_dp(tb); 881 } else if (port->xdomain) { 882 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); 883 884 tb_port_dbg(port, "xdomain unplugged\n"); 885 /* 886 * Service drivers are unbound during 887 * tb_xdomain_remove() so setting XDomain as 888 * unplugged here prevents deadlock if they call 889 * tb_xdomain_disable_paths(). We will tear down 890 * the path below. 891 */ 892 xd->is_unplugged = true; 893 tb_xdomain_remove(xd); 894 port->xdomain = NULL; 895 __tb_disconnect_xdomain_paths(tb, xd); 896 tb_xdomain_put(xd); 897 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 898 tb_dp_resource_unavailable(tb, port); 899 } else { 900 tb_port_dbg(port, 901 "got unplug event for disconnected port, ignoring\n"); 902 } 903 } else if (port->remote) { 904 tb_port_dbg(port, "got plug event for connected port, ignoring\n"); 905 } else { 906 if (tb_port_is_null(port)) { 907 tb_port_dbg(port, "hotplug: scanning\n"); 908 tb_scan_port(port); 909 if (!port->remote) 910 tb_port_dbg(port, "hotplug: no switch found\n"); 911 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 912 tb_dp_resource_available(tb, port); 913 } 914 } 915 916 put_sw: 917 tb_switch_put(sw); 918 out: 919 mutex_unlock(&tb->lock); 920 kfree(ev); 921 } 922 923 /** 924 * tb_schedule_hotplug_handler() - callback function for the control channel 925 * 926 * Delegates to tb_handle_hotplug. 927 */ 928 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 929 const void *buf, size_t size) 930 { 931 const struct cfg_event_pkg *pkg = buf; 932 u64 route; 933 934 if (type != TB_CFG_PKG_EVENT) { 935 tb_warn(tb, "unexpected event %#x, ignoring\n", type); 936 return; 937 } 938 939 route = tb_cfg_get_route(&pkg->header); 940 941 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { 942 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 943 pkg->port); 944 } 945 946 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); 947 } 948 949 static void tb_stop(struct tb *tb) 950 { 951 struct tb_cm *tcm = tb_priv(tb); 952 struct tb_tunnel *tunnel; 953 struct tb_tunnel *n; 954 955 /* tunnels are only present after everything has been initialized */ 956 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 957 /* 958 * DMA tunnels require the driver to be functional so we 959 * tear them down. Other protocol tunnels can be left 960 * intact. 961 */ 962 if (tb_tunnel_is_dma(tunnel)) 963 tb_tunnel_deactivate(tunnel); 964 tb_tunnel_free(tunnel); 965 } 966 tb_switch_remove(tb->root_switch); 967 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 968 } 969 970 static int tb_scan_finalize_switch(struct device *dev, void *data) 971 { 972 if (tb_is_switch(dev)) { 973 struct tb_switch *sw = tb_to_switch(dev); 974 975 /* 976 * If we found that the switch was already setup by the 977 * boot firmware, mark it as authorized now before we 978 * send uevent to userspace. 979 */ 980 if (sw->boot) 981 sw->authorized = 1; 982 983 dev_set_uevent_suppress(dev, false); 984 kobject_uevent(&dev->kobj, KOBJ_ADD); 985 device_for_each_child(dev, NULL, tb_scan_finalize_switch); 986 } 987 988 return 0; 989 } 990 991 static int tb_start(struct tb *tb) 992 { 993 struct tb_cm *tcm = tb_priv(tb); 994 int ret; 995 996 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 997 if (IS_ERR(tb->root_switch)) 998 return PTR_ERR(tb->root_switch); 999 1000 /* 1001 * ICM firmware upgrade needs running firmware and in native 1002 * mode that is not available so disable firmware upgrade of the 1003 * root switch. 1004 */ 1005 tb->root_switch->no_nvm_upgrade = true; 1006 1007 ret = tb_switch_configure(tb->root_switch); 1008 if (ret) { 1009 tb_switch_put(tb->root_switch); 1010 return ret; 1011 } 1012 1013 /* Announce the switch to the world */ 1014 ret = tb_switch_add(tb->root_switch); 1015 if (ret) { 1016 tb_switch_put(tb->root_switch); 1017 return ret; 1018 } 1019 1020 /* Enable TMU if it is off */ 1021 tb_switch_tmu_enable(tb->root_switch); 1022 /* Full scan to discover devices added before the driver was loaded. */ 1023 tb_scan_switch(tb->root_switch); 1024 /* Find out tunnels created by the boot firmware */ 1025 tb_discover_tunnels(tb->root_switch); 1026 /* 1027 * If the boot firmware did not create USB 3.x tunnels create them 1028 * now for the whole topology. 1029 */ 1030 tb_create_usb3_tunnels(tb->root_switch); 1031 /* Add DP IN resources for the root switch */ 1032 tb_add_dp_resources(tb->root_switch); 1033 /* Make the discovered switches available to the userspace */ 1034 device_for_each_child(&tb->root_switch->dev, NULL, 1035 tb_scan_finalize_switch); 1036 1037 /* Allow tb_handle_hotplug to progress events */ 1038 tcm->hotplug_active = true; 1039 return 0; 1040 } 1041 1042 static int tb_suspend_noirq(struct tb *tb) 1043 { 1044 struct tb_cm *tcm = tb_priv(tb); 1045 1046 tb_dbg(tb, "suspending...\n"); 1047 tb_switch_suspend(tb->root_switch); 1048 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 1049 tb_dbg(tb, "suspend finished\n"); 1050 1051 return 0; 1052 } 1053 1054 static void tb_restore_children(struct tb_switch *sw) 1055 { 1056 struct tb_port *port; 1057 1058 if (tb_enable_tmu(sw)) 1059 tb_sw_warn(sw, "failed to restore TMU configuration\n"); 1060 1061 tb_switch_for_each_port(sw, port) { 1062 if (!tb_port_has_remote(port)) 1063 continue; 1064 1065 if (tb_switch_lane_bonding_enable(port->remote->sw)) 1066 dev_warn(&sw->dev, "failed to restore lane bonding\n"); 1067 1068 tb_restore_children(port->remote->sw); 1069 } 1070 } 1071 1072 static int tb_resume_noirq(struct tb *tb) 1073 { 1074 struct tb_cm *tcm = tb_priv(tb); 1075 struct tb_tunnel *tunnel, *n; 1076 1077 tb_dbg(tb, "resuming...\n"); 1078 1079 /* remove any pci devices the firmware might have setup */ 1080 tb_switch_reset(tb, 0); 1081 1082 tb_switch_resume(tb->root_switch); 1083 tb_free_invalid_tunnels(tb); 1084 tb_free_unplugged_children(tb->root_switch); 1085 tb_restore_children(tb->root_switch); 1086 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 1087 tb_tunnel_restart(tunnel); 1088 if (!list_empty(&tcm->tunnel_list)) { 1089 /* 1090 * the pcie links need some time to get going. 1091 * 100ms works for me... 1092 */ 1093 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 1094 msleep(100); 1095 } 1096 /* Allow tb_handle_hotplug to progress events */ 1097 tcm->hotplug_active = true; 1098 tb_dbg(tb, "resume finished\n"); 1099 1100 return 0; 1101 } 1102 1103 static int tb_free_unplugged_xdomains(struct tb_switch *sw) 1104 { 1105 struct tb_port *port; 1106 int ret = 0; 1107 1108 tb_switch_for_each_port(sw, port) { 1109 if (tb_is_upstream_port(port)) 1110 continue; 1111 if (port->xdomain && port->xdomain->is_unplugged) { 1112 tb_xdomain_remove(port->xdomain); 1113 port->xdomain = NULL; 1114 ret++; 1115 } else if (port->remote) { 1116 ret += tb_free_unplugged_xdomains(port->remote->sw); 1117 } 1118 } 1119 1120 return ret; 1121 } 1122 1123 static void tb_complete(struct tb *tb) 1124 { 1125 /* 1126 * Release any unplugged XDomains and if there is a case where 1127 * another domain is swapped in place of unplugged XDomain we 1128 * need to run another rescan. 1129 */ 1130 mutex_lock(&tb->lock); 1131 if (tb_free_unplugged_xdomains(tb->root_switch)) 1132 tb_scan_switch(tb->root_switch); 1133 mutex_unlock(&tb->lock); 1134 } 1135 1136 static const struct tb_cm_ops tb_cm_ops = { 1137 .start = tb_start, 1138 .stop = tb_stop, 1139 .suspend_noirq = tb_suspend_noirq, 1140 .resume_noirq = tb_resume_noirq, 1141 .complete = tb_complete, 1142 .handle_event = tb_handle_event, 1143 .approve_switch = tb_tunnel_pci, 1144 .approve_xdomain_paths = tb_approve_xdomain_paths, 1145 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, 1146 }; 1147 1148 struct tb *tb_probe(struct tb_nhi *nhi) 1149 { 1150 struct tb_cm *tcm; 1151 struct tb *tb; 1152 1153 tb = tb_domain_alloc(nhi, sizeof(*tcm)); 1154 if (!tb) 1155 return NULL; 1156 1157 tb->security_level = TB_SECURITY_USER; 1158 tb->cm_ops = &tb_cm_ops; 1159 1160 tcm = tb_priv(tb); 1161 INIT_LIST_HEAD(&tcm->tunnel_list); 1162 INIT_LIST_HEAD(&tcm->dp_resources); 1163 1164 return tb; 1165 } 1166