1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - bus logic (NHI independent) 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/delay.h> 12 13 #include "tb.h" 14 #include "tb_regs.h" 15 #include "tunnel.h" 16 17 /** 18 * struct tb_cm - Simple Thunderbolt connection manager 19 * @tunnel_list: List of active tunnels 20 * @dp_resources: List of available DP resources for DP tunneling 21 * @hotplug_active: tb_handle_hotplug will stop progressing plug 22 * events and exit if this is not set (it needs to 23 * acquire the lock one more time). Used to drain wq 24 * after cfg has been paused. 25 */ 26 struct tb_cm { 27 struct list_head tunnel_list; 28 struct list_head dp_resources; 29 bool hotplug_active; 30 }; 31 32 struct tb_hotplug_event { 33 struct work_struct work; 34 struct tb *tb; 35 u64 route; 36 u8 port; 37 bool unplug; 38 }; 39 40 static void tb_handle_hotplug(struct work_struct *work); 41 42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 43 { 44 struct tb_hotplug_event *ev; 45 46 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 47 if (!ev) 48 return; 49 50 ev->tb = tb; 51 ev->route = route; 52 ev->port = port; 53 ev->unplug = unplug; 54 INIT_WORK(&ev->work, tb_handle_hotplug); 55 queue_work(tb->wq, &ev->work); 56 } 57 58 /* enumeration & hot plug handling */ 59 60 static void tb_add_dp_resources(struct tb_switch *sw) 61 { 62 struct tb_cm *tcm = tb_priv(sw->tb); 63 struct tb_port *port; 64 65 tb_switch_for_each_port(sw, port) { 66 if (!tb_port_is_dpin(port)) 67 continue; 68 69 if (!tb_switch_query_dp_resource(sw, port)) 70 continue; 71 72 list_add_tail(&port->list, &tcm->dp_resources); 73 tb_port_dbg(port, "DP IN resource available\n"); 74 } 75 } 76 77 static void tb_remove_dp_resources(struct tb_switch *sw) 78 { 79 struct tb_cm *tcm = tb_priv(sw->tb); 80 struct tb_port *port, *tmp; 81 82 /* Clear children resources first */ 83 tb_switch_for_each_port(sw, port) { 84 if (tb_port_has_remote(port)) 85 tb_remove_dp_resources(port->remote->sw); 86 } 87 88 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { 89 if (port->sw == sw) { 90 tb_port_dbg(port, "DP OUT resource unavailable\n"); 91 list_del_init(&port->list); 92 } 93 } 94 } 95 96 static void tb_discover_tunnels(struct tb_switch *sw) 97 { 98 struct tb *tb = sw->tb; 99 struct tb_cm *tcm = tb_priv(tb); 100 struct tb_port *port; 101 102 tb_switch_for_each_port(sw, port) { 103 struct tb_tunnel *tunnel = NULL; 104 105 switch (port->config.type) { 106 case TB_TYPE_DP_HDMI_IN: 107 tunnel = tb_tunnel_discover_dp(tb, port); 108 break; 109 110 case TB_TYPE_PCIE_DOWN: 111 tunnel = tb_tunnel_discover_pci(tb, port); 112 break; 113 114 default: 115 break; 116 } 117 118 if (!tunnel) 119 continue; 120 121 if (tb_tunnel_is_pci(tunnel)) { 122 struct tb_switch *parent = tunnel->dst_port->sw; 123 124 while (parent != tunnel->src_port->sw) { 125 parent->boot = true; 126 parent = tb_switch_parent(parent); 127 } 128 } 129 130 list_add_tail(&tunnel->list, &tcm->tunnel_list); 131 } 132 133 tb_switch_for_each_port(sw, port) { 134 if (tb_port_has_remote(port)) 135 tb_discover_tunnels(port->remote->sw); 136 } 137 } 138 139 static void tb_scan_xdomain(struct tb_port *port) 140 { 141 struct tb_switch *sw = port->sw; 142 struct tb *tb = sw->tb; 143 struct tb_xdomain *xd; 144 u64 route; 145 146 route = tb_downstream_route(port); 147 xd = tb_xdomain_find_by_route(tb, route); 148 if (xd) { 149 tb_xdomain_put(xd); 150 return; 151 } 152 153 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, 154 NULL); 155 if (xd) { 156 tb_port_at(route, sw)->xdomain = xd; 157 tb_xdomain_add(xd); 158 } 159 } 160 161 static int tb_enable_tmu(struct tb_switch *sw) 162 { 163 int ret; 164 165 /* If it is already enabled in correct mode, don't touch it */ 166 if (tb_switch_tmu_is_enabled(sw)) 167 return 0; 168 169 ret = tb_switch_tmu_disable(sw); 170 if (ret) 171 return ret; 172 173 ret = tb_switch_tmu_post_time(sw); 174 if (ret) 175 return ret; 176 177 return tb_switch_tmu_enable(sw); 178 } 179 180 static void tb_scan_port(struct tb_port *port); 181 182 /** 183 * tb_scan_switch() - scan for and initialize downstream switches 184 */ 185 static void tb_scan_switch(struct tb_switch *sw) 186 { 187 struct tb_port *port; 188 189 tb_switch_for_each_port(sw, port) 190 tb_scan_port(port); 191 } 192 193 /** 194 * tb_scan_port() - check for and initialize switches below port 195 */ 196 static void tb_scan_port(struct tb_port *port) 197 { 198 struct tb_cm *tcm = tb_priv(port->sw->tb); 199 struct tb_port *upstream_port; 200 struct tb_switch *sw; 201 202 if (tb_is_upstream_port(port)) 203 return; 204 205 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && 206 !tb_dp_port_is_enabled(port)) { 207 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); 208 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, 209 false); 210 return; 211 } 212 213 if (port->config.type != TB_TYPE_PORT) 214 return; 215 if (port->dual_link_port && port->link_nr) 216 return; /* 217 * Downstream switch is reachable through two ports. 218 * Only scan on the primary port (link_nr == 0). 219 */ 220 if (tb_wait_for_port(port, false) <= 0) 221 return; 222 if (port->remote) { 223 tb_port_dbg(port, "port already has a remote\n"); 224 return; 225 } 226 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 227 tb_downstream_route(port)); 228 if (IS_ERR(sw)) { 229 /* 230 * If there is an error accessing the connected switch 231 * it may be connected to another domain. Also we allow 232 * the other domain to be connected to a max depth switch. 233 */ 234 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 235 tb_scan_xdomain(port); 236 return; 237 } 238 239 if (tb_switch_configure(sw)) { 240 tb_switch_put(sw); 241 return; 242 } 243 244 /* 245 * If there was previously another domain connected remove it 246 * first. 247 */ 248 if (port->xdomain) { 249 tb_xdomain_remove(port->xdomain); 250 port->xdomain = NULL; 251 } 252 253 /* 254 * Do not send uevents until we have discovered all existing 255 * tunnels and know which switches were authorized already by 256 * the boot firmware. 257 */ 258 if (!tcm->hotplug_active) 259 dev_set_uevent_suppress(&sw->dev, true); 260 261 if (tb_switch_add(sw)) { 262 tb_switch_put(sw); 263 return; 264 } 265 266 /* Link the switches using both links if available */ 267 upstream_port = tb_upstream_port(sw); 268 port->remote = upstream_port; 269 upstream_port->remote = port; 270 if (port->dual_link_port && upstream_port->dual_link_port) { 271 port->dual_link_port->remote = upstream_port->dual_link_port; 272 upstream_port->dual_link_port->remote = port->dual_link_port; 273 } 274 275 /* Enable lane bonding if supported */ 276 if (tb_switch_lane_bonding_enable(sw)) 277 tb_sw_warn(sw, "failed to enable lane bonding\n"); 278 279 if (tb_enable_tmu(sw)) 280 tb_sw_warn(sw, "failed to enable TMU\n"); 281 282 tb_scan_switch(sw); 283 } 284 285 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, 286 struct tb_port *src_port, 287 struct tb_port *dst_port) 288 { 289 struct tb_cm *tcm = tb_priv(tb); 290 struct tb_tunnel *tunnel; 291 292 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 293 if (tunnel->type == type && 294 ((src_port && src_port == tunnel->src_port) || 295 (dst_port && dst_port == tunnel->dst_port))) { 296 return tunnel; 297 } 298 } 299 300 return NULL; 301 } 302 303 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) 304 { 305 if (!tunnel) 306 return; 307 308 tb_tunnel_deactivate(tunnel); 309 list_del(&tunnel->list); 310 311 /* 312 * In case of DP tunnel make sure the DP IN resource is deallocated 313 * properly. 314 */ 315 if (tb_tunnel_is_dp(tunnel)) { 316 struct tb_port *in = tunnel->src_port; 317 318 tb_switch_dealloc_dp_resource(in->sw, in); 319 } 320 321 tb_tunnel_free(tunnel); 322 } 323 324 /** 325 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 326 */ 327 static void tb_free_invalid_tunnels(struct tb *tb) 328 { 329 struct tb_cm *tcm = tb_priv(tb); 330 struct tb_tunnel *tunnel; 331 struct tb_tunnel *n; 332 333 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 334 if (tb_tunnel_is_invalid(tunnel)) 335 tb_deactivate_and_free_tunnel(tunnel); 336 } 337 } 338 339 /** 340 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 341 */ 342 static void tb_free_unplugged_children(struct tb_switch *sw) 343 { 344 struct tb_port *port; 345 346 tb_switch_for_each_port(sw, port) { 347 if (!tb_port_has_remote(port)) 348 continue; 349 350 if (port->remote->sw->is_unplugged) { 351 tb_remove_dp_resources(port->remote->sw); 352 tb_switch_lane_bonding_disable(port->remote->sw); 353 tb_switch_remove(port->remote->sw); 354 port->remote = NULL; 355 if (port->dual_link_port) 356 port->dual_link_port->remote = NULL; 357 } else { 358 tb_free_unplugged_children(port->remote->sw); 359 } 360 } 361 } 362 363 /** 364 * tb_find_unused_port() - return the first inactive port on @sw 365 * @sw: Switch to find the port on 366 * @type: Port type to look for 367 */ 368 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, 369 enum tb_port_type type) 370 { 371 struct tb_port *port; 372 373 tb_switch_for_each_port(sw, port) { 374 if (tb_is_upstream_port(port)) 375 continue; 376 if (port->config.type != type) 377 continue; 378 if (port->cap_adap) 379 continue; 380 if (tb_port_is_enabled(port)) 381 continue; 382 return port; 383 } 384 return NULL; 385 } 386 387 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 388 const struct tb_port *port) 389 { 390 struct tb_port *down = NULL; 391 392 /* 393 * To keep plugging devices consistently in the same PCIe 394 * hierarchy, do mapping here for switch downstream PCIe ports. 395 */ 396 if (tb_switch_is_usb4(sw)) { 397 down = usb4_switch_map_pcie_down(sw, port); 398 } else if (!tb_route(sw)) { 399 int phy_port = tb_phy_port_from_link(port->port); 400 int index; 401 402 /* 403 * Hard-coded Thunderbolt port to PCIe down port mapping 404 * per controller. 405 */ 406 if (tb_switch_is_cactus_ridge(sw) || 407 tb_switch_is_alpine_ridge(sw)) 408 index = !phy_port ? 6 : 7; 409 else if (tb_switch_is_falcon_ridge(sw)) 410 index = !phy_port ? 6 : 8; 411 else if (tb_switch_is_titan_ridge(sw)) 412 index = !phy_port ? 8 : 9; 413 else 414 goto out; 415 416 /* Validate the hard-coding */ 417 if (WARN_ON(index > sw->config.max_port_number)) 418 goto out; 419 420 down = &sw->ports[index]; 421 } 422 423 if (down) { 424 if (WARN_ON(!tb_port_is_pcie_down(down))) 425 goto out; 426 if (WARN_ON(tb_pci_port_is_enabled(down))) 427 goto out; 428 429 return down; 430 } 431 432 out: 433 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); 434 } 435 436 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in, 437 struct tb_port *out) 438 { 439 struct tb_switch *sw = out->sw; 440 struct tb_tunnel *tunnel; 441 int bw, available_bw = 40000; 442 443 while (sw && sw != in->sw) { 444 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */ 445 /* Leave 10% guard band */ 446 bw -= bw / 10; 447 448 /* 449 * Check for any active DP tunnels that go through this 450 * switch and reduce their consumed bandwidth from 451 * available. 452 */ 453 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 454 int consumed_bw; 455 456 if (!tb_tunnel_switch_on_path(tunnel, sw)) 457 continue; 458 459 consumed_bw = tb_tunnel_consumed_bandwidth(tunnel); 460 if (consumed_bw < 0) 461 return consumed_bw; 462 463 bw -= consumed_bw; 464 } 465 466 if (bw < available_bw) 467 available_bw = bw; 468 469 sw = tb_switch_parent(sw); 470 } 471 472 return available_bw; 473 } 474 475 static void tb_tunnel_dp(struct tb *tb) 476 { 477 struct tb_cm *tcm = tb_priv(tb); 478 struct tb_port *port, *in, *out; 479 struct tb_tunnel *tunnel; 480 int available_bw; 481 482 /* 483 * Find pair of inactive DP IN and DP OUT adapters and then 484 * establish a DP tunnel between them. 485 */ 486 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); 487 488 in = NULL; 489 out = NULL; 490 list_for_each_entry(port, &tcm->dp_resources, list) { 491 if (tb_port_is_enabled(port)) { 492 tb_port_dbg(port, "in use\n"); 493 continue; 494 } 495 496 tb_port_dbg(port, "available\n"); 497 498 if (!in && tb_port_is_dpin(port)) 499 in = port; 500 else if (!out && tb_port_is_dpout(port)) 501 out = port; 502 } 503 504 if (!in) { 505 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); 506 return; 507 } 508 if (!out) { 509 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); 510 return; 511 } 512 513 if (tb_switch_alloc_dp_resource(in->sw, in)) { 514 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); 515 return; 516 } 517 518 /* Calculate available bandwidth between in and out */ 519 available_bw = tb_available_bw(tcm, in, out); 520 if (available_bw < 0) { 521 tb_warn(tb, "failed to determine available bandwidth\n"); 522 return; 523 } 524 525 tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n", 526 available_bw); 527 528 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw); 529 if (!tunnel) { 530 tb_port_dbg(out, "could not allocate DP tunnel\n"); 531 goto dealloc_dp; 532 } 533 534 if (tb_tunnel_activate(tunnel)) { 535 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 536 tb_tunnel_free(tunnel); 537 goto dealloc_dp; 538 } 539 540 list_add_tail(&tunnel->list, &tcm->tunnel_list); 541 return; 542 543 dealloc_dp: 544 tb_switch_dealloc_dp_resource(in->sw, in); 545 } 546 547 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) 548 { 549 struct tb_port *in, *out; 550 struct tb_tunnel *tunnel; 551 552 if (tb_port_is_dpin(port)) { 553 tb_port_dbg(port, "DP IN resource unavailable\n"); 554 in = port; 555 out = NULL; 556 } else { 557 tb_port_dbg(port, "DP OUT resource unavailable\n"); 558 in = NULL; 559 out = port; 560 } 561 562 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); 563 tb_deactivate_and_free_tunnel(tunnel); 564 list_del_init(&port->list); 565 566 /* 567 * See if there is another DP OUT port that can be used for 568 * to create another tunnel. 569 */ 570 tb_tunnel_dp(tb); 571 } 572 573 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) 574 { 575 struct tb_cm *tcm = tb_priv(tb); 576 struct tb_port *p; 577 578 if (tb_port_is_enabled(port)) 579 return; 580 581 list_for_each_entry(p, &tcm->dp_resources, list) { 582 if (p == port) 583 return; 584 } 585 586 tb_port_dbg(port, "DP %s resource available\n", 587 tb_port_is_dpin(port) ? "IN" : "OUT"); 588 list_add_tail(&port->list, &tcm->dp_resources); 589 590 /* Look for suitable DP IN <-> DP OUT pairs now */ 591 tb_tunnel_dp(tb); 592 } 593 594 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 595 { 596 struct tb_port *up, *down, *port; 597 struct tb_cm *tcm = tb_priv(tb); 598 struct tb_switch *parent_sw; 599 struct tb_tunnel *tunnel; 600 601 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 602 if (!up) 603 return 0; 604 605 /* 606 * Look up available down port. Since we are chaining it should 607 * be found right above this switch. 608 */ 609 parent_sw = tb_to_switch(sw->dev.parent); 610 port = tb_port_at(tb_route(sw), parent_sw); 611 down = tb_find_pcie_down(parent_sw, port); 612 if (!down) 613 return 0; 614 615 tunnel = tb_tunnel_alloc_pci(tb, up, down); 616 if (!tunnel) 617 return -ENOMEM; 618 619 if (tb_tunnel_activate(tunnel)) { 620 tb_port_info(up, 621 "PCIe tunnel activation failed, aborting\n"); 622 tb_tunnel_free(tunnel); 623 return -EIO; 624 } 625 626 list_add_tail(&tunnel->list, &tcm->tunnel_list); 627 return 0; 628 } 629 630 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 631 { 632 struct tb_cm *tcm = tb_priv(tb); 633 struct tb_port *nhi_port, *dst_port; 634 struct tb_tunnel *tunnel; 635 struct tb_switch *sw; 636 637 sw = tb_to_switch(xd->dev.parent); 638 dst_port = tb_port_at(xd->route, sw); 639 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 640 641 mutex_lock(&tb->lock); 642 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, 643 xd->transmit_path, xd->receive_ring, 644 xd->receive_path); 645 if (!tunnel) { 646 mutex_unlock(&tb->lock); 647 return -ENOMEM; 648 } 649 650 if (tb_tunnel_activate(tunnel)) { 651 tb_port_info(nhi_port, 652 "DMA tunnel activation failed, aborting\n"); 653 tb_tunnel_free(tunnel); 654 mutex_unlock(&tb->lock); 655 return -EIO; 656 } 657 658 list_add_tail(&tunnel->list, &tcm->tunnel_list); 659 mutex_unlock(&tb->lock); 660 return 0; 661 } 662 663 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 664 { 665 struct tb_port *dst_port; 666 struct tb_tunnel *tunnel; 667 struct tb_switch *sw; 668 669 sw = tb_to_switch(xd->dev.parent); 670 dst_port = tb_port_at(xd->route, sw); 671 672 /* 673 * It is possible that the tunnel was already teared down (in 674 * case of cable disconnect) so it is fine if we cannot find it 675 * here anymore. 676 */ 677 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); 678 tb_deactivate_and_free_tunnel(tunnel); 679 } 680 681 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 682 { 683 if (!xd->is_unplugged) { 684 mutex_lock(&tb->lock); 685 __tb_disconnect_xdomain_paths(tb, xd); 686 mutex_unlock(&tb->lock); 687 } 688 return 0; 689 } 690 691 /* hotplug handling */ 692 693 /** 694 * tb_handle_hotplug() - handle hotplug event 695 * 696 * Executes on tb->wq. 697 */ 698 static void tb_handle_hotplug(struct work_struct *work) 699 { 700 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 701 struct tb *tb = ev->tb; 702 struct tb_cm *tcm = tb_priv(tb); 703 struct tb_switch *sw; 704 struct tb_port *port; 705 mutex_lock(&tb->lock); 706 if (!tcm->hotplug_active) 707 goto out; /* during init, suspend or shutdown */ 708 709 sw = tb_switch_find_by_route(tb, ev->route); 710 if (!sw) { 711 tb_warn(tb, 712 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 713 ev->route, ev->port, ev->unplug); 714 goto out; 715 } 716 if (ev->port > sw->config.max_port_number) { 717 tb_warn(tb, 718 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 719 ev->route, ev->port, ev->unplug); 720 goto put_sw; 721 } 722 port = &sw->ports[ev->port]; 723 if (tb_is_upstream_port(port)) { 724 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 725 ev->route, ev->port, ev->unplug); 726 goto put_sw; 727 } 728 if (ev->unplug) { 729 if (tb_port_has_remote(port)) { 730 tb_port_dbg(port, "switch unplugged\n"); 731 tb_sw_set_unplugged(port->remote->sw); 732 tb_free_invalid_tunnels(tb); 733 tb_remove_dp_resources(port->remote->sw); 734 tb_switch_tmu_disable(port->remote->sw); 735 tb_switch_lane_bonding_disable(port->remote->sw); 736 tb_switch_remove(port->remote->sw); 737 port->remote = NULL; 738 if (port->dual_link_port) 739 port->dual_link_port->remote = NULL; 740 /* Maybe we can create another DP tunnel */ 741 tb_tunnel_dp(tb); 742 } else if (port->xdomain) { 743 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); 744 745 tb_port_dbg(port, "xdomain unplugged\n"); 746 /* 747 * Service drivers are unbound during 748 * tb_xdomain_remove() so setting XDomain as 749 * unplugged here prevents deadlock if they call 750 * tb_xdomain_disable_paths(). We will tear down 751 * the path below. 752 */ 753 xd->is_unplugged = true; 754 tb_xdomain_remove(xd); 755 port->xdomain = NULL; 756 __tb_disconnect_xdomain_paths(tb, xd); 757 tb_xdomain_put(xd); 758 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 759 tb_dp_resource_unavailable(tb, port); 760 } else { 761 tb_port_dbg(port, 762 "got unplug event for disconnected port, ignoring\n"); 763 } 764 } else if (port->remote) { 765 tb_port_dbg(port, "got plug event for connected port, ignoring\n"); 766 } else { 767 if (tb_port_is_null(port)) { 768 tb_port_dbg(port, "hotplug: scanning\n"); 769 tb_scan_port(port); 770 if (!port->remote) 771 tb_port_dbg(port, "hotplug: no switch found\n"); 772 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 773 tb_dp_resource_available(tb, port); 774 } 775 } 776 777 put_sw: 778 tb_switch_put(sw); 779 out: 780 mutex_unlock(&tb->lock); 781 kfree(ev); 782 } 783 784 /** 785 * tb_schedule_hotplug_handler() - callback function for the control channel 786 * 787 * Delegates to tb_handle_hotplug. 788 */ 789 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 790 const void *buf, size_t size) 791 { 792 const struct cfg_event_pkg *pkg = buf; 793 u64 route; 794 795 if (type != TB_CFG_PKG_EVENT) { 796 tb_warn(tb, "unexpected event %#x, ignoring\n", type); 797 return; 798 } 799 800 route = tb_cfg_get_route(&pkg->header); 801 802 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { 803 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 804 pkg->port); 805 } 806 807 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); 808 } 809 810 static void tb_stop(struct tb *tb) 811 { 812 struct tb_cm *tcm = tb_priv(tb); 813 struct tb_tunnel *tunnel; 814 struct tb_tunnel *n; 815 816 /* tunnels are only present after everything has been initialized */ 817 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 818 /* 819 * DMA tunnels require the driver to be functional so we 820 * tear them down. Other protocol tunnels can be left 821 * intact. 822 */ 823 if (tb_tunnel_is_dma(tunnel)) 824 tb_tunnel_deactivate(tunnel); 825 tb_tunnel_free(tunnel); 826 } 827 tb_switch_remove(tb->root_switch); 828 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 829 } 830 831 static int tb_scan_finalize_switch(struct device *dev, void *data) 832 { 833 if (tb_is_switch(dev)) { 834 struct tb_switch *sw = tb_to_switch(dev); 835 836 /* 837 * If we found that the switch was already setup by the 838 * boot firmware, mark it as authorized now before we 839 * send uevent to userspace. 840 */ 841 if (sw->boot) 842 sw->authorized = 1; 843 844 dev_set_uevent_suppress(dev, false); 845 kobject_uevent(&dev->kobj, KOBJ_ADD); 846 device_for_each_child(dev, NULL, tb_scan_finalize_switch); 847 } 848 849 return 0; 850 } 851 852 static int tb_start(struct tb *tb) 853 { 854 struct tb_cm *tcm = tb_priv(tb); 855 int ret; 856 857 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 858 if (IS_ERR(tb->root_switch)) 859 return PTR_ERR(tb->root_switch); 860 861 /* 862 * ICM firmware upgrade needs running firmware and in native 863 * mode that is not available so disable firmware upgrade of the 864 * root switch. 865 */ 866 tb->root_switch->no_nvm_upgrade = true; 867 868 ret = tb_switch_configure(tb->root_switch); 869 if (ret) { 870 tb_switch_put(tb->root_switch); 871 return ret; 872 } 873 874 /* Announce the switch to the world */ 875 ret = tb_switch_add(tb->root_switch); 876 if (ret) { 877 tb_switch_put(tb->root_switch); 878 return ret; 879 } 880 881 /* Enable TMU if it is off */ 882 tb_switch_tmu_enable(tb->root_switch); 883 /* Full scan to discover devices added before the driver was loaded. */ 884 tb_scan_switch(tb->root_switch); 885 /* Find out tunnels created by the boot firmware */ 886 tb_discover_tunnels(tb->root_switch); 887 /* Add DP IN resources for the root switch */ 888 tb_add_dp_resources(tb->root_switch); 889 /* Make the discovered switches available to the userspace */ 890 device_for_each_child(&tb->root_switch->dev, NULL, 891 tb_scan_finalize_switch); 892 893 /* Allow tb_handle_hotplug to progress events */ 894 tcm->hotplug_active = true; 895 return 0; 896 } 897 898 static int tb_suspend_noirq(struct tb *tb) 899 { 900 struct tb_cm *tcm = tb_priv(tb); 901 902 tb_dbg(tb, "suspending...\n"); 903 tb_switch_suspend(tb->root_switch); 904 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 905 tb_dbg(tb, "suspend finished\n"); 906 907 return 0; 908 } 909 910 static void tb_restore_children(struct tb_switch *sw) 911 { 912 struct tb_port *port; 913 914 if (tb_enable_tmu(sw)) 915 tb_sw_warn(sw, "failed to restore TMU configuration\n"); 916 917 tb_switch_for_each_port(sw, port) { 918 if (!tb_port_has_remote(port)) 919 continue; 920 921 if (tb_switch_lane_bonding_enable(port->remote->sw)) 922 dev_warn(&sw->dev, "failed to restore lane bonding\n"); 923 924 tb_restore_children(port->remote->sw); 925 } 926 } 927 928 static int tb_resume_noirq(struct tb *tb) 929 { 930 struct tb_cm *tcm = tb_priv(tb); 931 struct tb_tunnel *tunnel, *n; 932 933 tb_dbg(tb, "resuming...\n"); 934 935 /* remove any pci devices the firmware might have setup */ 936 tb_switch_reset(tb, 0); 937 938 tb_switch_resume(tb->root_switch); 939 tb_free_invalid_tunnels(tb); 940 tb_free_unplugged_children(tb->root_switch); 941 tb_restore_children(tb->root_switch); 942 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 943 tb_tunnel_restart(tunnel); 944 if (!list_empty(&tcm->tunnel_list)) { 945 /* 946 * the pcie links need some time to get going. 947 * 100ms works for me... 948 */ 949 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 950 msleep(100); 951 } 952 /* Allow tb_handle_hotplug to progress events */ 953 tcm->hotplug_active = true; 954 tb_dbg(tb, "resume finished\n"); 955 956 return 0; 957 } 958 959 static int tb_free_unplugged_xdomains(struct tb_switch *sw) 960 { 961 struct tb_port *port; 962 int ret = 0; 963 964 tb_switch_for_each_port(sw, port) { 965 if (tb_is_upstream_port(port)) 966 continue; 967 if (port->xdomain && port->xdomain->is_unplugged) { 968 tb_xdomain_remove(port->xdomain); 969 port->xdomain = NULL; 970 ret++; 971 } else if (port->remote) { 972 ret += tb_free_unplugged_xdomains(port->remote->sw); 973 } 974 } 975 976 return ret; 977 } 978 979 static void tb_complete(struct tb *tb) 980 { 981 /* 982 * Release any unplugged XDomains and if there is a case where 983 * another domain is swapped in place of unplugged XDomain we 984 * need to run another rescan. 985 */ 986 mutex_lock(&tb->lock); 987 if (tb_free_unplugged_xdomains(tb->root_switch)) 988 tb_scan_switch(tb->root_switch); 989 mutex_unlock(&tb->lock); 990 } 991 992 static const struct tb_cm_ops tb_cm_ops = { 993 .start = tb_start, 994 .stop = tb_stop, 995 .suspend_noirq = tb_suspend_noirq, 996 .resume_noirq = tb_resume_noirq, 997 .complete = tb_complete, 998 .handle_event = tb_handle_event, 999 .approve_switch = tb_tunnel_pci, 1000 .approve_xdomain_paths = tb_approve_xdomain_paths, 1001 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, 1002 }; 1003 1004 struct tb *tb_probe(struct tb_nhi *nhi) 1005 { 1006 struct tb_cm *tcm; 1007 struct tb *tb; 1008 1009 tb = tb_domain_alloc(nhi, sizeof(*tcm)); 1010 if (!tb) 1011 return NULL; 1012 1013 tb->security_level = TB_SECURITY_USER; 1014 tb->cm_ops = &tb_cm_ops; 1015 1016 tcm = tb_priv(tb); 1017 INIT_LIST_HEAD(&tcm->tunnel_list); 1018 INIT_LIST_HEAD(&tcm->dp_resources); 1019 1020 return tb; 1021 } 1022