1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - bus logic (NHI independent) 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/delay.h> 12 13 #include "tb.h" 14 #include "tb_regs.h" 15 #include "tunnel.h" 16 17 /** 18 * struct tb_cm - Simple Thunderbolt connection manager 19 * @tunnel_list: List of active tunnels 20 * @dp_resources: List of available DP resources for DP tunneling 21 * @hotplug_active: tb_handle_hotplug will stop progressing plug 22 * events and exit if this is not set (it needs to 23 * acquire the lock one more time). Used to drain wq 24 * after cfg has been paused. 25 */ 26 struct tb_cm { 27 struct list_head tunnel_list; 28 struct list_head dp_resources; 29 bool hotplug_active; 30 }; 31 32 struct tb_hotplug_event { 33 struct work_struct work; 34 struct tb *tb; 35 u64 route; 36 u8 port; 37 bool unplug; 38 }; 39 40 static void tb_handle_hotplug(struct work_struct *work); 41 42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 43 { 44 struct tb_hotplug_event *ev; 45 46 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 47 if (!ev) 48 return; 49 50 ev->tb = tb; 51 ev->route = route; 52 ev->port = port; 53 ev->unplug = unplug; 54 INIT_WORK(&ev->work, tb_handle_hotplug); 55 queue_work(tb->wq, &ev->work); 56 } 57 58 /* enumeration & hot plug handling */ 59 60 static void tb_add_dp_resources(struct tb_switch *sw) 61 { 62 struct tb_cm *tcm = tb_priv(sw->tb); 63 struct tb_port *port; 64 65 tb_switch_for_each_port(sw, port) { 66 if (!tb_port_is_dpin(port)) 67 continue; 68 69 if (!tb_switch_query_dp_resource(sw, port)) 70 continue; 71 72 list_add_tail(&port->list, &tcm->dp_resources); 73 tb_port_dbg(port, "DP IN resource available\n"); 74 } 75 } 76 77 static void tb_remove_dp_resources(struct tb_switch *sw) 78 { 79 struct tb_cm *tcm = tb_priv(sw->tb); 80 struct tb_port *port, *tmp; 81 82 /* Clear children resources first */ 83 tb_switch_for_each_port(sw, port) { 84 if (tb_port_has_remote(port)) 85 tb_remove_dp_resources(port->remote->sw); 86 } 87 88 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { 89 if (port->sw == sw) { 90 tb_port_dbg(port, "DP OUT resource unavailable\n"); 91 list_del_init(&port->list); 92 } 93 } 94 } 95 96 static void tb_discover_tunnels(struct tb_switch *sw) 97 { 98 struct tb *tb = sw->tb; 99 struct tb_cm *tcm = tb_priv(tb); 100 struct tb_port *port; 101 102 tb_switch_for_each_port(sw, port) { 103 struct tb_tunnel *tunnel = NULL; 104 105 switch (port->config.type) { 106 case TB_TYPE_DP_HDMI_IN: 107 tunnel = tb_tunnel_discover_dp(tb, port); 108 break; 109 110 case TB_TYPE_PCIE_DOWN: 111 tunnel = tb_tunnel_discover_pci(tb, port); 112 break; 113 114 default: 115 break; 116 } 117 118 if (!tunnel) 119 continue; 120 121 if (tb_tunnel_is_pci(tunnel)) { 122 struct tb_switch *parent = tunnel->dst_port->sw; 123 124 while (parent != tunnel->src_port->sw) { 125 parent->boot = true; 126 parent = tb_switch_parent(parent); 127 } 128 } 129 130 list_add_tail(&tunnel->list, &tcm->tunnel_list); 131 } 132 133 tb_switch_for_each_port(sw, port) { 134 if (tb_port_has_remote(port)) 135 tb_discover_tunnels(port->remote->sw); 136 } 137 } 138 139 static void tb_scan_xdomain(struct tb_port *port) 140 { 141 struct tb_switch *sw = port->sw; 142 struct tb *tb = sw->tb; 143 struct tb_xdomain *xd; 144 u64 route; 145 146 route = tb_downstream_route(port); 147 xd = tb_xdomain_find_by_route(tb, route); 148 if (xd) { 149 tb_xdomain_put(xd); 150 return; 151 } 152 153 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, 154 NULL); 155 if (xd) { 156 tb_port_at(route, sw)->xdomain = xd; 157 tb_xdomain_add(xd); 158 } 159 } 160 161 static void tb_scan_port(struct tb_port *port); 162 163 /** 164 * tb_scan_switch() - scan for and initialize downstream switches 165 */ 166 static void tb_scan_switch(struct tb_switch *sw) 167 { 168 struct tb_port *port; 169 170 tb_switch_for_each_port(sw, port) 171 tb_scan_port(port); 172 } 173 174 /** 175 * tb_scan_port() - check for and initialize switches below port 176 */ 177 static void tb_scan_port(struct tb_port *port) 178 { 179 struct tb_cm *tcm = tb_priv(port->sw->tb); 180 struct tb_port *upstream_port; 181 struct tb_switch *sw; 182 183 if (tb_is_upstream_port(port)) 184 return; 185 186 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && 187 !tb_dp_port_is_enabled(port)) { 188 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); 189 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, 190 false); 191 return; 192 } 193 194 if (port->config.type != TB_TYPE_PORT) 195 return; 196 if (port->dual_link_port && port->link_nr) 197 return; /* 198 * Downstream switch is reachable through two ports. 199 * Only scan on the primary port (link_nr == 0). 200 */ 201 if (tb_wait_for_port(port, false) <= 0) 202 return; 203 if (port->remote) { 204 tb_port_dbg(port, "port already has a remote\n"); 205 return; 206 } 207 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 208 tb_downstream_route(port)); 209 if (IS_ERR(sw)) { 210 /* 211 * If there is an error accessing the connected switch 212 * it may be connected to another domain. Also we allow 213 * the other domain to be connected to a max depth switch. 214 */ 215 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 216 tb_scan_xdomain(port); 217 return; 218 } 219 220 if (tb_switch_configure(sw)) { 221 tb_switch_put(sw); 222 return; 223 } 224 225 /* 226 * If there was previously another domain connected remove it 227 * first. 228 */ 229 if (port->xdomain) { 230 tb_xdomain_remove(port->xdomain); 231 port->xdomain = NULL; 232 } 233 234 /* 235 * Do not send uevents until we have discovered all existing 236 * tunnels and know which switches were authorized already by 237 * the boot firmware. 238 */ 239 if (!tcm->hotplug_active) 240 dev_set_uevent_suppress(&sw->dev, true); 241 242 if (tb_switch_add(sw)) { 243 tb_switch_put(sw); 244 return; 245 } 246 247 /* Link the switches using both links if available */ 248 upstream_port = tb_upstream_port(sw); 249 port->remote = upstream_port; 250 upstream_port->remote = port; 251 if (port->dual_link_port && upstream_port->dual_link_port) { 252 port->dual_link_port->remote = upstream_port->dual_link_port; 253 upstream_port->dual_link_port->remote = port->dual_link_port; 254 } 255 256 /* Enable lane bonding if supported */ 257 if (tb_switch_lane_bonding_enable(sw)) 258 tb_sw_warn(sw, "failed to enable lane bonding\n"); 259 260 tb_scan_switch(sw); 261 } 262 263 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, 264 struct tb_port *src_port, 265 struct tb_port *dst_port) 266 { 267 struct tb_cm *tcm = tb_priv(tb); 268 struct tb_tunnel *tunnel; 269 270 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 271 if (tunnel->type == type && 272 ((src_port && src_port == tunnel->src_port) || 273 (dst_port && dst_port == tunnel->dst_port))) { 274 return tunnel; 275 } 276 } 277 278 return NULL; 279 } 280 281 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) 282 { 283 if (!tunnel) 284 return; 285 286 tb_tunnel_deactivate(tunnel); 287 list_del(&tunnel->list); 288 289 /* 290 * In case of DP tunnel make sure the DP IN resource is deallocated 291 * properly. 292 */ 293 if (tb_tunnel_is_dp(tunnel)) { 294 struct tb_port *in = tunnel->src_port; 295 296 tb_switch_dealloc_dp_resource(in->sw, in); 297 } 298 299 tb_tunnel_free(tunnel); 300 } 301 302 /** 303 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 304 */ 305 static void tb_free_invalid_tunnels(struct tb *tb) 306 { 307 struct tb_cm *tcm = tb_priv(tb); 308 struct tb_tunnel *tunnel; 309 struct tb_tunnel *n; 310 311 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 312 if (tb_tunnel_is_invalid(tunnel)) 313 tb_deactivate_and_free_tunnel(tunnel); 314 } 315 } 316 317 /** 318 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 319 */ 320 static void tb_free_unplugged_children(struct tb_switch *sw) 321 { 322 struct tb_port *port; 323 324 tb_switch_for_each_port(sw, port) { 325 if (!tb_port_has_remote(port)) 326 continue; 327 328 if (port->remote->sw->is_unplugged) { 329 tb_remove_dp_resources(port->remote->sw); 330 tb_switch_lane_bonding_disable(port->remote->sw); 331 tb_switch_remove(port->remote->sw); 332 port->remote = NULL; 333 if (port->dual_link_port) 334 port->dual_link_port->remote = NULL; 335 } else { 336 tb_free_unplugged_children(port->remote->sw); 337 } 338 } 339 } 340 341 /** 342 * tb_find_unused_port() - return the first inactive port on @sw 343 * @sw: Switch to find the port on 344 * @type: Port type to look for 345 */ 346 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, 347 enum tb_port_type type) 348 { 349 struct tb_port *port; 350 351 tb_switch_for_each_port(sw, port) { 352 if (tb_is_upstream_port(port)) 353 continue; 354 if (port->config.type != type) 355 continue; 356 if (port->cap_adap) 357 continue; 358 if (tb_port_is_enabled(port)) 359 continue; 360 return port; 361 } 362 return NULL; 363 } 364 365 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 366 const struct tb_port *port) 367 { 368 /* 369 * To keep plugging devices consistently in the same PCIe 370 * hierarchy, do mapping here for root switch downstream PCIe 371 * ports. 372 */ 373 if (!tb_route(sw)) { 374 int phy_port = tb_phy_port_from_link(port->port); 375 int index; 376 377 /* 378 * Hard-coded Thunderbolt port to PCIe down port mapping 379 * per controller. 380 */ 381 if (tb_switch_is_cactus_ridge(sw) || 382 tb_switch_is_alpine_ridge(sw)) 383 index = !phy_port ? 6 : 7; 384 else if (tb_switch_is_falcon_ridge(sw)) 385 index = !phy_port ? 6 : 8; 386 else if (tb_switch_is_titan_ridge(sw)) 387 index = !phy_port ? 8 : 9; 388 else 389 goto out; 390 391 /* Validate the hard-coding */ 392 if (WARN_ON(index > sw->config.max_port_number)) 393 goto out; 394 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index]))) 395 goto out; 396 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index]))) 397 goto out; 398 399 return &sw->ports[index]; 400 } 401 402 out: 403 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); 404 } 405 406 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in, 407 struct tb_port *out) 408 { 409 struct tb_switch *sw = out->sw; 410 struct tb_tunnel *tunnel; 411 int bw, available_bw = 40000; 412 413 while (sw && sw != in->sw) { 414 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */ 415 /* Leave 10% guard band */ 416 bw -= bw / 10; 417 418 /* 419 * Check for any active DP tunnels that go through this 420 * switch and reduce their consumed bandwidth from 421 * available. 422 */ 423 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 424 int consumed_bw; 425 426 if (!tb_tunnel_switch_on_path(tunnel, sw)) 427 continue; 428 429 consumed_bw = tb_tunnel_consumed_bandwidth(tunnel); 430 if (consumed_bw < 0) 431 return consumed_bw; 432 433 bw -= consumed_bw; 434 } 435 436 if (bw < available_bw) 437 available_bw = bw; 438 439 sw = tb_switch_parent(sw); 440 } 441 442 return available_bw; 443 } 444 445 static void tb_tunnel_dp(struct tb *tb) 446 { 447 struct tb_cm *tcm = tb_priv(tb); 448 struct tb_port *port, *in, *out; 449 struct tb_tunnel *tunnel; 450 int available_bw; 451 452 /* 453 * Find pair of inactive DP IN and DP OUT adapters and then 454 * establish a DP tunnel between them. 455 */ 456 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); 457 458 in = NULL; 459 out = NULL; 460 list_for_each_entry(port, &tcm->dp_resources, list) { 461 if (tb_port_is_enabled(port)) { 462 tb_port_dbg(port, "in use\n"); 463 continue; 464 } 465 466 tb_port_dbg(port, "available\n"); 467 468 if (!in && tb_port_is_dpin(port)) 469 in = port; 470 else if (!out && tb_port_is_dpout(port)) 471 out = port; 472 } 473 474 if (!in) { 475 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); 476 return; 477 } 478 if (!out) { 479 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); 480 return; 481 } 482 483 if (tb_switch_alloc_dp_resource(in->sw, in)) { 484 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); 485 return; 486 } 487 488 /* Calculate available bandwidth between in and out */ 489 available_bw = tb_available_bw(tcm, in, out); 490 if (available_bw < 0) { 491 tb_warn(tb, "failed to determine available bandwidth\n"); 492 return; 493 } 494 495 tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n", 496 available_bw); 497 498 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw); 499 if (!tunnel) { 500 tb_port_dbg(out, "could not allocate DP tunnel\n"); 501 goto dealloc_dp; 502 } 503 504 if (tb_tunnel_activate(tunnel)) { 505 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 506 tb_tunnel_free(tunnel); 507 goto dealloc_dp; 508 } 509 510 list_add_tail(&tunnel->list, &tcm->tunnel_list); 511 return; 512 513 dealloc_dp: 514 tb_switch_dealloc_dp_resource(in->sw, in); 515 } 516 517 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) 518 { 519 struct tb_port *in, *out; 520 struct tb_tunnel *tunnel; 521 522 if (tb_port_is_dpin(port)) { 523 tb_port_dbg(port, "DP IN resource unavailable\n"); 524 in = port; 525 out = NULL; 526 } else { 527 tb_port_dbg(port, "DP OUT resource unavailable\n"); 528 in = NULL; 529 out = port; 530 } 531 532 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); 533 tb_deactivate_and_free_tunnel(tunnel); 534 list_del_init(&port->list); 535 536 /* 537 * See if there is another DP OUT port that can be used for 538 * to create another tunnel. 539 */ 540 tb_tunnel_dp(tb); 541 } 542 543 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) 544 { 545 struct tb_cm *tcm = tb_priv(tb); 546 struct tb_port *p; 547 548 if (tb_port_is_enabled(port)) 549 return; 550 551 list_for_each_entry(p, &tcm->dp_resources, list) { 552 if (p == port) 553 return; 554 } 555 556 tb_port_dbg(port, "DP %s resource available\n", 557 tb_port_is_dpin(port) ? "IN" : "OUT"); 558 list_add_tail(&port->list, &tcm->dp_resources); 559 560 /* Look for suitable DP IN <-> DP OUT pairs now */ 561 tb_tunnel_dp(tb); 562 } 563 564 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 565 { 566 struct tb_port *up, *down, *port; 567 struct tb_cm *tcm = tb_priv(tb); 568 struct tb_switch *parent_sw; 569 struct tb_tunnel *tunnel; 570 571 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); 572 if (!up) 573 return 0; 574 575 /* 576 * Look up available down port. Since we are chaining it should 577 * be found right above this switch. 578 */ 579 parent_sw = tb_to_switch(sw->dev.parent); 580 port = tb_port_at(tb_route(sw), parent_sw); 581 down = tb_find_pcie_down(parent_sw, port); 582 if (!down) 583 return 0; 584 585 tunnel = tb_tunnel_alloc_pci(tb, up, down); 586 if (!tunnel) 587 return -ENOMEM; 588 589 if (tb_tunnel_activate(tunnel)) { 590 tb_port_info(up, 591 "PCIe tunnel activation failed, aborting\n"); 592 tb_tunnel_free(tunnel); 593 return -EIO; 594 } 595 596 list_add_tail(&tunnel->list, &tcm->tunnel_list); 597 return 0; 598 } 599 600 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 601 { 602 struct tb_cm *tcm = tb_priv(tb); 603 struct tb_port *nhi_port, *dst_port; 604 struct tb_tunnel *tunnel; 605 struct tb_switch *sw; 606 607 sw = tb_to_switch(xd->dev.parent); 608 dst_port = tb_port_at(xd->route, sw); 609 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 610 611 mutex_lock(&tb->lock); 612 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, 613 xd->transmit_path, xd->receive_ring, 614 xd->receive_path); 615 if (!tunnel) { 616 mutex_unlock(&tb->lock); 617 return -ENOMEM; 618 } 619 620 if (tb_tunnel_activate(tunnel)) { 621 tb_port_info(nhi_port, 622 "DMA tunnel activation failed, aborting\n"); 623 tb_tunnel_free(tunnel); 624 mutex_unlock(&tb->lock); 625 return -EIO; 626 } 627 628 list_add_tail(&tunnel->list, &tcm->tunnel_list); 629 mutex_unlock(&tb->lock); 630 return 0; 631 } 632 633 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 634 { 635 struct tb_port *dst_port; 636 struct tb_tunnel *tunnel; 637 struct tb_switch *sw; 638 639 sw = tb_to_switch(xd->dev.parent); 640 dst_port = tb_port_at(xd->route, sw); 641 642 /* 643 * It is possible that the tunnel was already teared down (in 644 * case of cable disconnect) so it is fine if we cannot find it 645 * here anymore. 646 */ 647 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); 648 tb_deactivate_and_free_tunnel(tunnel); 649 } 650 651 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) 652 { 653 if (!xd->is_unplugged) { 654 mutex_lock(&tb->lock); 655 __tb_disconnect_xdomain_paths(tb, xd); 656 mutex_unlock(&tb->lock); 657 } 658 return 0; 659 } 660 661 /* hotplug handling */ 662 663 /** 664 * tb_handle_hotplug() - handle hotplug event 665 * 666 * Executes on tb->wq. 667 */ 668 static void tb_handle_hotplug(struct work_struct *work) 669 { 670 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 671 struct tb *tb = ev->tb; 672 struct tb_cm *tcm = tb_priv(tb); 673 struct tb_switch *sw; 674 struct tb_port *port; 675 mutex_lock(&tb->lock); 676 if (!tcm->hotplug_active) 677 goto out; /* during init, suspend or shutdown */ 678 679 sw = tb_switch_find_by_route(tb, ev->route); 680 if (!sw) { 681 tb_warn(tb, 682 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 683 ev->route, ev->port, ev->unplug); 684 goto out; 685 } 686 if (ev->port > sw->config.max_port_number) { 687 tb_warn(tb, 688 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 689 ev->route, ev->port, ev->unplug); 690 goto put_sw; 691 } 692 port = &sw->ports[ev->port]; 693 if (tb_is_upstream_port(port)) { 694 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 695 ev->route, ev->port, ev->unplug); 696 goto put_sw; 697 } 698 if (ev->unplug) { 699 if (tb_port_has_remote(port)) { 700 tb_port_dbg(port, "switch unplugged\n"); 701 tb_sw_set_unplugged(port->remote->sw); 702 tb_free_invalid_tunnels(tb); 703 tb_remove_dp_resources(port->remote->sw); 704 tb_switch_lane_bonding_disable(port->remote->sw); 705 tb_switch_remove(port->remote->sw); 706 port->remote = NULL; 707 if (port->dual_link_port) 708 port->dual_link_port->remote = NULL; 709 /* Maybe we can create another DP tunnel */ 710 tb_tunnel_dp(tb); 711 } else if (port->xdomain) { 712 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); 713 714 tb_port_dbg(port, "xdomain unplugged\n"); 715 /* 716 * Service drivers are unbound during 717 * tb_xdomain_remove() so setting XDomain as 718 * unplugged here prevents deadlock if they call 719 * tb_xdomain_disable_paths(). We will tear down 720 * the path below. 721 */ 722 xd->is_unplugged = true; 723 tb_xdomain_remove(xd); 724 port->xdomain = NULL; 725 __tb_disconnect_xdomain_paths(tb, xd); 726 tb_xdomain_put(xd); 727 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 728 tb_dp_resource_unavailable(tb, port); 729 } else { 730 tb_port_dbg(port, 731 "got unplug event for disconnected port, ignoring\n"); 732 } 733 } else if (port->remote) { 734 tb_port_dbg(port, "got plug event for connected port, ignoring\n"); 735 } else { 736 if (tb_port_is_null(port)) { 737 tb_port_dbg(port, "hotplug: scanning\n"); 738 tb_scan_port(port); 739 if (!port->remote) 740 tb_port_dbg(port, "hotplug: no switch found\n"); 741 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { 742 tb_dp_resource_available(tb, port); 743 } 744 } 745 746 put_sw: 747 tb_switch_put(sw); 748 out: 749 mutex_unlock(&tb->lock); 750 kfree(ev); 751 } 752 753 /** 754 * tb_schedule_hotplug_handler() - callback function for the control channel 755 * 756 * Delegates to tb_handle_hotplug. 757 */ 758 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 759 const void *buf, size_t size) 760 { 761 const struct cfg_event_pkg *pkg = buf; 762 u64 route; 763 764 if (type != TB_CFG_PKG_EVENT) { 765 tb_warn(tb, "unexpected event %#x, ignoring\n", type); 766 return; 767 } 768 769 route = tb_cfg_get_route(&pkg->header); 770 771 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { 772 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 773 pkg->port); 774 } 775 776 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); 777 } 778 779 static void tb_stop(struct tb *tb) 780 { 781 struct tb_cm *tcm = tb_priv(tb); 782 struct tb_tunnel *tunnel; 783 struct tb_tunnel *n; 784 785 /* tunnels are only present after everything has been initialized */ 786 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 787 /* 788 * DMA tunnels require the driver to be functional so we 789 * tear them down. Other protocol tunnels can be left 790 * intact. 791 */ 792 if (tb_tunnel_is_dma(tunnel)) 793 tb_tunnel_deactivate(tunnel); 794 tb_tunnel_free(tunnel); 795 } 796 tb_switch_remove(tb->root_switch); 797 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 798 } 799 800 static int tb_scan_finalize_switch(struct device *dev, void *data) 801 { 802 if (tb_is_switch(dev)) { 803 struct tb_switch *sw = tb_to_switch(dev); 804 805 /* 806 * If we found that the switch was already setup by the 807 * boot firmware, mark it as authorized now before we 808 * send uevent to userspace. 809 */ 810 if (sw->boot) 811 sw->authorized = 1; 812 813 dev_set_uevent_suppress(dev, false); 814 kobject_uevent(&dev->kobj, KOBJ_ADD); 815 device_for_each_child(dev, NULL, tb_scan_finalize_switch); 816 } 817 818 return 0; 819 } 820 821 static int tb_start(struct tb *tb) 822 { 823 struct tb_cm *tcm = tb_priv(tb); 824 int ret; 825 826 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 827 if (IS_ERR(tb->root_switch)) 828 return PTR_ERR(tb->root_switch); 829 830 /* 831 * ICM firmware upgrade needs running firmware and in native 832 * mode that is not available so disable firmware upgrade of the 833 * root switch. 834 */ 835 tb->root_switch->no_nvm_upgrade = true; 836 837 ret = tb_switch_configure(tb->root_switch); 838 if (ret) { 839 tb_switch_put(tb->root_switch); 840 return ret; 841 } 842 843 /* Announce the switch to the world */ 844 ret = tb_switch_add(tb->root_switch); 845 if (ret) { 846 tb_switch_put(tb->root_switch); 847 return ret; 848 } 849 850 /* Full scan to discover devices added before the driver was loaded. */ 851 tb_scan_switch(tb->root_switch); 852 /* Find out tunnels created by the boot firmware */ 853 tb_discover_tunnels(tb->root_switch); 854 /* Add DP IN resources for the root switch */ 855 tb_add_dp_resources(tb->root_switch); 856 /* Make the discovered switches available to the userspace */ 857 device_for_each_child(&tb->root_switch->dev, NULL, 858 tb_scan_finalize_switch); 859 860 /* Allow tb_handle_hotplug to progress events */ 861 tcm->hotplug_active = true; 862 return 0; 863 } 864 865 static int tb_suspend_noirq(struct tb *tb) 866 { 867 struct tb_cm *tcm = tb_priv(tb); 868 869 tb_dbg(tb, "suspending...\n"); 870 tb_switch_suspend(tb->root_switch); 871 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 872 tb_dbg(tb, "suspend finished\n"); 873 874 return 0; 875 } 876 877 static void tb_restore_children(struct tb_switch *sw) 878 { 879 struct tb_port *port; 880 881 tb_switch_for_each_port(sw, port) { 882 if (!tb_port_has_remote(port)) 883 continue; 884 885 if (tb_switch_lane_bonding_enable(port->remote->sw)) 886 dev_warn(&sw->dev, "failed to restore lane bonding\n"); 887 888 tb_restore_children(port->remote->sw); 889 } 890 } 891 892 static int tb_resume_noirq(struct tb *tb) 893 { 894 struct tb_cm *tcm = tb_priv(tb); 895 struct tb_tunnel *tunnel, *n; 896 897 tb_dbg(tb, "resuming...\n"); 898 899 /* remove any pci devices the firmware might have setup */ 900 tb_switch_reset(tb, 0); 901 902 tb_switch_resume(tb->root_switch); 903 tb_free_invalid_tunnels(tb); 904 tb_free_unplugged_children(tb->root_switch); 905 tb_restore_children(tb->root_switch); 906 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 907 tb_tunnel_restart(tunnel); 908 if (!list_empty(&tcm->tunnel_list)) { 909 /* 910 * the pcie links need some time to get going. 911 * 100ms works for me... 912 */ 913 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 914 msleep(100); 915 } 916 /* Allow tb_handle_hotplug to progress events */ 917 tcm->hotplug_active = true; 918 tb_dbg(tb, "resume finished\n"); 919 920 return 0; 921 } 922 923 static int tb_free_unplugged_xdomains(struct tb_switch *sw) 924 { 925 struct tb_port *port; 926 int ret = 0; 927 928 tb_switch_for_each_port(sw, port) { 929 if (tb_is_upstream_port(port)) 930 continue; 931 if (port->xdomain && port->xdomain->is_unplugged) { 932 tb_xdomain_remove(port->xdomain); 933 port->xdomain = NULL; 934 ret++; 935 } else if (port->remote) { 936 ret += tb_free_unplugged_xdomains(port->remote->sw); 937 } 938 } 939 940 return ret; 941 } 942 943 static void tb_complete(struct tb *tb) 944 { 945 /* 946 * Release any unplugged XDomains and if there is a case where 947 * another domain is swapped in place of unplugged XDomain we 948 * need to run another rescan. 949 */ 950 mutex_lock(&tb->lock); 951 if (tb_free_unplugged_xdomains(tb->root_switch)) 952 tb_scan_switch(tb->root_switch); 953 mutex_unlock(&tb->lock); 954 } 955 956 static const struct tb_cm_ops tb_cm_ops = { 957 .start = tb_start, 958 .stop = tb_stop, 959 .suspend_noirq = tb_suspend_noirq, 960 .resume_noirq = tb_resume_noirq, 961 .complete = tb_complete, 962 .handle_event = tb_handle_event, 963 .approve_switch = tb_tunnel_pci, 964 .approve_xdomain_paths = tb_approve_xdomain_paths, 965 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, 966 }; 967 968 struct tb *tb_probe(struct tb_nhi *nhi) 969 { 970 struct tb_cm *tcm; 971 struct tb *tb; 972 973 tb = tb_domain_alloc(nhi, sizeof(*tcm)); 974 if (!tb) 975 return NULL; 976 977 tb->security_level = TB_SECURITY_USER; 978 tb->cm_ops = &tb_cm_ops; 979 980 tcm = tb_priv(tb); 981 INIT_LIST_HEAD(&tcm->tunnel_list); 982 INIT_LIST_HEAD(&tcm->dp_resources); 983 984 return tb; 985 } 986