1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2d6cc51cdSAndreas Noever /* 399cabbb0SMika Westerberg * Thunderbolt driver - bus logic (NHI independent) 4d6cc51cdSAndreas Noever * 5d6cc51cdSAndreas Noever * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 699cabbb0SMika Westerberg * Copyright (C) 2019, Intel Corporation 7d6cc51cdSAndreas Noever */ 8d6cc51cdSAndreas Noever 9d6cc51cdSAndreas Noever #include <linux/slab.h> 10d6cc51cdSAndreas Noever #include <linux/errno.h> 11d6cc51cdSAndreas Noever #include <linux/delay.h> 12630b3affSLukas Wunner #include <linux/platform_data/x86/apple.h> 13d6cc51cdSAndreas Noever 14d6cc51cdSAndreas Noever #include "tb.h" 157adf6097SAndreas Noever #include "tb_regs.h" 161752b9f7SMika Westerberg #include "tunnel.h" 17d6cc51cdSAndreas Noever 189d3cce0bSMika Westerberg /** 199d3cce0bSMika Westerberg * struct tb_cm - Simple Thunderbolt connection manager 209d3cce0bSMika Westerberg * @tunnel_list: List of active tunnels 219d3cce0bSMika Westerberg * @hotplug_active: tb_handle_hotplug will stop progressing plug 229d3cce0bSMika Westerberg * events and exit if this is not set (it needs to 239d3cce0bSMika Westerberg * acquire the lock one more time). Used to drain wq 249d3cce0bSMika Westerberg * after cfg has been paused. 259d3cce0bSMika Westerberg */ 269d3cce0bSMika Westerberg struct tb_cm { 279d3cce0bSMika Westerberg struct list_head tunnel_list; 289d3cce0bSMika Westerberg bool hotplug_active; 299d3cce0bSMika Westerberg }; 309da672a4SAndreas Noever 319da672a4SAndreas Noever /* enumeration & hot plug handling */ 329da672a4SAndreas Noever 330414bec5SMika Westerberg static void tb_discover_tunnels(struct tb_switch *sw) 340414bec5SMika Westerberg { 350414bec5SMika Westerberg struct tb *tb = sw->tb; 360414bec5SMika Westerberg struct tb_cm *tcm = tb_priv(tb); 370414bec5SMika Westerberg struct tb_port *port; 380414bec5SMika Westerberg int i; 390414bec5SMika Westerberg 400414bec5SMika Westerberg for (i = 1; i <= sw->config.max_port_number; i++) { 410414bec5SMika Westerberg struct tb_tunnel *tunnel = NULL; 420414bec5SMika Westerberg 430414bec5SMika Westerberg port = &sw->ports[i]; 440414bec5SMika Westerberg switch (port->config.type) { 450414bec5SMika Westerberg case TB_TYPE_PCIE_DOWN: 460414bec5SMika Westerberg tunnel = tb_tunnel_discover_pci(tb, port); 470414bec5SMika Westerberg break; 480414bec5SMika Westerberg 490414bec5SMika Westerberg default: 500414bec5SMika Westerberg break; 510414bec5SMika Westerberg } 520414bec5SMika Westerberg 530414bec5SMika Westerberg if (tunnel) { 540414bec5SMika Westerberg struct tb_switch *parent = tunnel->dst_port->sw; 550414bec5SMika Westerberg 560414bec5SMika Westerberg while (parent != tunnel->src_port->sw) { 570414bec5SMika Westerberg parent->boot = true; 580414bec5SMika Westerberg parent = tb_switch_parent(parent); 590414bec5SMika Westerberg } 600414bec5SMika Westerberg 610414bec5SMika Westerberg list_add_tail(&tunnel->list, &tcm->tunnel_list); 620414bec5SMika Westerberg } 630414bec5SMika Westerberg } 640414bec5SMika Westerberg 650414bec5SMika Westerberg for (i = 1; i <= sw->config.max_port_number; i++) { 660414bec5SMika Westerberg if (tb_port_has_remote(&sw->ports[i])) 670414bec5SMika Westerberg tb_discover_tunnels(sw->ports[i].remote->sw); 680414bec5SMika Westerberg } 690414bec5SMika Westerberg } 709da672a4SAndreas Noever 719da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port); 729da672a4SAndreas Noever 739da672a4SAndreas Noever /** 749da672a4SAndreas Noever * tb_scan_switch() - scan for and initialize downstream switches 759da672a4SAndreas Noever */ 769da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw) 779da672a4SAndreas Noever { 789da672a4SAndreas Noever int i; 799da672a4SAndreas Noever for (i = 1; i <= sw->config.max_port_number; i++) 809da672a4SAndreas Noever tb_scan_port(&sw->ports[i]); 819da672a4SAndreas Noever } 829da672a4SAndreas Noever 839da672a4SAndreas Noever /** 849da672a4SAndreas Noever * tb_scan_port() - check for and initialize switches below port 859da672a4SAndreas Noever */ 869da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port) 879da672a4SAndreas Noever { 8899cabbb0SMika Westerberg struct tb_cm *tcm = tb_priv(port->sw->tb); 89dfe40ca4SMika Westerberg struct tb_port *upstream_port; 909da672a4SAndreas Noever struct tb_switch *sw; 91dfe40ca4SMika Westerberg 929da672a4SAndreas Noever if (tb_is_upstream_port(port)) 939da672a4SAndreas Noever return; 949da672a4SAndreas Noever if (port->config.type != TB_TYPE_PORT) 959da672a4SAndreas Noever return; 96343fcb8cSAndreas Noever if (port->dual_link_port && port->link_nr) 97343fcb8cSAndreas Noever return; /* 98343fcb8cSAndreas Noever * Downstream switch is reachable through two ports. 99343fcb8cSAndreas Noever * Only scan on the primary port (link_nr == 0). 100343fcb8cSAndreas Noever */ 1019da672a4SAndreas Noever if (tb_wait_for_port(port, false) <= 0) 1029da672a4SAndreas Noever return; 1039da672a4SAndreas Noever if (port->remote) { 1049da672a4SAndreas Noever tb_port_WARN(port, "port already has a remote!\n"); 1059da672a4SAndreas Noever return; 1069da672a4SAndreas Noever } 107bfe778acSMika Westerberg sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 108bfe778acSMika Westerberg tb_downstream_route(port)); 1099da672a4SAndreas Noever if (!sw) 1109da672a4SAndreas Noever return; 111bfe778acSMika Westerberg 112bfe778acSMika Westerberg if (tb_switch_configure(sw)) { 113bfe778acSMika Westerberg tb_switch_put(sw); 114bfe778acSMika Westerberg return; 115bfe778acSMika Westerberg } 116bfe778acSMika Westerberg 11799cabbb0SMika Westerberg /* 11899cabbb0SMika Westerberg * Do not send uevents until we have discovered all existing 11999cabbb0SMika Westerberg * tunnels and know which switches were authorized already by 12099cabbb0SMika Westerberg * the boot firmware. 12199cabbb0SMika Westerberg */ 12299cabbb0SMika Westerberg if (!tcm->hotplug_active) 12399cabbb0SMika Westerberg dev_set_uevent_suppress(&sw->dev, true); 124f67cf491SMika Westerberg 125bfe778acSMika Westerberg if (tb_switch_add(sw)) { 126bfe778acSMika Westerberg tb_switch_put(sw); 127bfe778acSMika Westerberg return; 128bfe778acSMika Westerberg } 129bfe778acSMika Westerberg 130dfe40ca4SMika Westerberg /* Link the switches using both links if available */ 131dfe40ca4SMika Westerberg upstream_port = tb_upstream_port(sw); 132dfe40ca4SMika Westerberg port->remote = upstream_port; 133dfe40ca4SMika Westerberg upstream_port->remote = port; 134dfe40ca4SMika Westerberg if (port->dual_link_port && upstream_port->dual_link_port) { 135dfe40ca4SMika Westerberg port->dual_link_port->remote = upstream_port->dual_link_port; 136dfe40ca4SMika Westerberg upstream_port->dual_link_port->remote = port->dual_link_port; 137dfe40ca4SMika Westerberg } 138dfe40ca4SMika Westerberg 1399da672a4SAndreas Noever tb_scan_switch(sw); 1409da672a4SAndreas Noever } 1419da672a4SAndreas Noever 1423364f0c1SAndreas Noever /** 1433364f0c1SAndreas Noever * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 1443364f0c1SAndreas Noever */ 1453364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb) 1463364f0c1SAndreas Noever { 1479d3cce0bSMika Westerberg struct tb_cm *tcm = tb_priv(tb); 14893f36adeSMika Westerberg struct tb_tunnel *tunnel; 14993f36adeSMika Westerberg struct tb_tunnel *n; 1509d3cce0bSMika Westerberg 1519d3cce0bSMika Westerberg list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 15293f36adeSMika Westerberg if (tb_tunnel_is_invalid(tunnel)) { 15393f36adeSMika Westerberg tb_tunnel_deactivate(tunnel); 1549d3cce0bSMika Westerberg list_del(&tunnel->list); 15593f36adeSMika Westerberg tb_tunnel_free(tunnel); 1563364f0c1SAndreas Noever } 1573364f0c1SAndreas Noever } 1583364f0c1SAndreas Noever } 1593364f0c1SAndreas Noever 1603364f0c1SAndreas Noever /** 16123dd5bb4SAndreas Noever * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches 16223dd5bb4SAndreas Noever */ 16323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw) 16423dd5bb4SAndreas Noever { 16523dd5bb4SAndreas Noever int i; 16623dd5bb4SAndreas Noever for (i = 1; i <= sw->config.max_port_number; i++) { 16723dd5bb4SAndreas Noever struct tb_port *port = &sw->ports[i]; 168dfe40ca4SMika Westerberg 169dfe40ca4SMika Westerberg if (!tb_port_has_remote(port)) 17023dd5bb4SAndreas Noever continue; 171dfe40ca4SMika Westerberg 17223dd5bb4SAndreas Noever if (port->remote->sw->is_unplugged) { 173bfe778acSMika Westerberg tb_switch_remove(port->remote->sw); 17423dd5bb4SAndreas Noever port->remote = NULL; 175dfe40ca4SMika Westerberg if (port->dual_link_port) 176dfe40ca4SMika Westerberg port->dual_link_port->remote = NULL; 17723dd5bb4SAndreas Noever } else { 17823dd5bb4SAndreas Noever tb_free_unplugged_children(port->remote->sw); 17923dd5bb4SAndreas Noever } 18023dd5bb4SAndreas Noever } 18123dd5bb4SAndreas Noever } 18223dd5bb4SAndreas Noever 18323dd5bb4SAndreas Noever 18423dd5bb4SAndreas Noever /** 1853364f0c1SAndreas Noever * find_pci_up_port() - return the first PCIe up port on @sw or NULL 1863364f0c1SAndreas Noever */ 1873364f0c1SAndreas Noever static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw) 1883364f0c1SAndreas Noever { 1893364f0c1SAndreas Noever int i; 1903364f0c1SAndreas Noever for (i = 1; i <= sw->config.max_port_number; i++) 1913364f0c1SAndreas Noever if (sw->ports[i].config.type == TB_TYPE_PCIE_UP) 1923364f0c1SAndreas Noever return &sw->ports[i]; 1933364f0c1SAndreas Noever return NULL; 1943364f0c1SAndreas Noever } 1953364f0c1SAndreas Noever 1963364f0c1SAndreas Noever /** 1973364f0c1SAndreas Noever * find_unused_down_port() - return the first inactive PCIe down port on @sw 1983364f0c1SAndreas Noever */ 1993364f0c1SAndreas Noever static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw) 2003364f0c1SAndreas Noever { 2013364f0c1SAndreas Noever int i; 2023364f0c1SAndreas Noever int cap; 2033364f0c1SAndreas Noever int res; 2043364f0c1SAndreas Noever int data; 2053364f0c1SAndreas Noever for (i = 1; i <= sw->config.max_port_number; i++) { 2063364f0c1SAndreas Noever if (tb_is_upstream_port(&sw->ports[i])) 2073364f0c1SAndreas Noever continue; 2083364f0c1SAndreas Noever if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN) 2093364f0c1SAndreas Noever continue; 21056183c88SMika Westerberg cap = sw->ports[i].cap_adap; 21156183c88SMika Westerberg if (!cap) 2123364f0c1SAndreas Noever continue; 2133364f0c1SAndreas Noever res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1); 2143364f0c1SAndreas Noever if (res < 0) 2153364f0c1SAndreas Noever continue; 2163364f0c1SAndreas Noever if (data & 0x80000000) 2173364f0c1SAndreas Noever continue; 2183364f0c1SAndreas Noever return &sw->ports[i]; 2193364f0c1SAndreas Noever } 2203364f0c1SAndreas Noever return NULL; 2213364f0c1SAndreas Noever } 2223364f0c1SAndreas Noever 22399cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, 22499cabbb0SMika Westerberg const struct tb_port *port) 2253364f0c1SAndreas Noever { 22699cabbb0SMika Westerberg /* 22799cabbb0SMika Westerberg * To keep plugging devices consistently in the same PCIe 22899cabbb0SMika Westerberg * hierarchy, do mapping here for root switch downstream PCIe 22999cabbb0SMika Westerberg * ports. 23099cabbb0SMika Westerberg */ 23199cabbb0SMika Westerberg if (!tb_route(sw)) { 23299cabbb0SMika Westerberg int phy_port = tb_phy_port_from_link(port->port); 23399cabbb0SMika Westerberg int index; 23499cabbb0SMika Westerberg 23599cabbb0SMika Westerberg /* 23699cabbb0SMika Westerberg * Hard-coded Thunderbolt port to PCIe down port mapping 23799cabbb0SMika Westerberg * per controller. 23899cabbb0SMika Westerberg */ 23999cabbb0SMika Westerberg if (tb_switch_is_cr(sw)) 24099cabbb0SMika Westerberg index = !phy_port ? 6 : 7; 24199cabbb0SMika Westerberg else if (tb_switch_is_fr(sw)) 24299cabbb0SMika Westerberg index = !phy_port ? 6 : 8; 24399cabbb0SMika Westerberg else 24499cabbb0SMika Westerberg goto out; 24599cabbb0SMika Westerberg 24699cabbb0SMika Westerberg /* Validate the hard-coding */ 24799cabbb0SMika Westerberg if (WARN_ON(index > sw->config.max_port_number)) 24899cabbb0SMika Westerberg goto out; 24999cabbb0SMika Westerberg if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index]))) 25099cabbb0SMika Westerberg goto out; 25199cabbb0SMika Westerberg if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index]))) 25299cabbb0SMika Westerberg goto out; 25399cabbb0SMika Westerberg 25499cabbb0SMika Westerberg return &sw->ports[index]; 25599cabbb0SMika Westerberg } 25699cabbb0SMika Westerberg 25799cabbb0SMika Westerberg out: 25899cabbb0SMika Westerberg return tb_find_unused_down_port(sw); 25999cabbb0SMika Westerberg } 26099cabbb0SMika Westerberg 26199cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) 26299cabbb0SMika Westerberg { 26399cabbb0SMika Westerberg struct tb_port *up, *down, *port; 2649d3cce0bSMika Westerberg struct tb_cm *tcm = tb_priv(tb); 26599cabbb0SMika Westerberg struct tb_switch *parent_sw; 26699cabbb0SMika Westerberg struct tb_tunnel *tunnel; 2679d3cce0bSMika Westerberg 26899cabbb0SMika Westerberg up = tb_find_pci_up_port(sw); 26999cabbb0SMika Westerberg if (!up) 27099cabbb0SMika Westerberg return 0; 2713364f0c1SAndreas Noever 27299cabbb0SMika Westerberg /* 27399cabbb0SMika Westerberg * Look up available down port. Since we are chaining it should 27499cabbb0SMika Westerberg * be found right above this switch. 27599cabbb0SMika Westerberg */ 27699cabbb0SMika Westerberg parent_sw = tb_to_switch(sw->dev.parent); 27799cabbb0SMika Westerberg port = tb_port_at(tb_route(sw), parent_sw); 27899cabbb0SMika Westerberg down = tb_find_pcie_down(parent_sw, port); 27999cabbb0SMika Westerberg if (!down) 28099cabbb0SMika Westerberg return 0; 2813364f0c1SAndreas Noever 28299cabbb0SMika Westerberg tunnel = tb_tunnel_alloc_pci(tb, up, down); 28399cabbb0SMika Westerberg if (!tunnel) 28499cabbb0SMika Westerberg return -ENOMEM; 2853364f0c1SAndreas Noever 28693f36adeSMika Westerberg if (tb_tunnel_activate(tunnel)) { 28799cabbb0SMika Westerberg tb_port_info(up, 2883364f0c1SAndreas Noever "PCIe tunnel activation failed, aborting\n"); 28993f36adeSMika Westerberg tb_tunnel_free(tunnel); 29099cabbb0SMika Westerberg return -EIO; 2913364f0c1SAndreas Noever } 2923364f0c1SAndreas Noever 29399cabbb0SMika Westerberg list_add_tail(&tunnel->list, &tcm->tunnel_list); 29499cabbb0SMika Westerberg return 0; 2953364f0c1SAndreas Noever } 2969da672a4SAndreas Noever 297d6cc51cdSAndreas Noever /* hotplug handling */ 298d6cc51cdSAndreas Noever 299d6cc51cdSAndreas Noever struct tb_hotplug_event { 300d6cc51cdSAndreas Noever struct work_struct work; 301d6cc51cdSAndreas Noever struct tb *tb; 302d6cc51cdSAndreas Noever u64 route; 303d6cc51cdSAndreas Noever u8 port; 304d6cc51cdSAndreas Noever bool unplug; 305d6cc51cdSAndreas Noever }; 306d6cc51cdSAndreas Noever 307d6cc51cdSAndreas Noever /** 308d6cc51cdSAndreas Noever * tb_handle_hotplug() - handle hotplug event 309d6cc51cdSAndreas Noever * 310d6cc51cdSAndreas Noever * Executes on tb->wq. 311d6cc51cdSAndreas Noever */ 312d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work) 313d6cc51cdSAndreas Noever { 314d6cc51cdSAndreas Noever struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 315d6cc51cdSAndreas Noever struct tb *tb = ev->tb; 3169d3cce0bSMika Westerberg struct tb_cm *tcm = tb_priv(tb); 317053596d9SAndreas Noever struct tb_switch *sw; 318053596d9SAndreas Noever struct tb_port *port; 319d6cc51cdSAndreas Noever mutex_lock(&tb->lock); 3209d3cce0bSMika Westerberg if (!tcm->hotplug_active) 321d6cc51cdSAndreas Noever goto out; /* during init, suspend or shutdown */ 322d6cc51cdSAndreas Noever 3238f965efdSMika Westerberg sw = tb_switch_find_by_route(tb, ev->route); 324053596d9SAndreas Noever if (!sw) { 325053596d9SAndreas Noever tb_warn(tb, 326053596d9SAndreas Noever "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 327053596d9SAndreas Noever ev->route, ev->port, ev->unplug); 328053596d9SAndreas Noever goto out; 329053596d9SAndreas Noever } 330053596d9SAndreas Noever if (ev->port > sw->config.max_port_number) { 331053596d9SAndreas Noever tb_warn(tb, 332053596d9SAndreas Noever "hotplug event from non existent port %llx:%x (unplug: %d)\n", 333053596d9SAndreas Noever ev->route, ev->port, ev->unplug); 3348f965efdSMika Westerberg goto put_sw; 335053596d9SAndreas Noever } 336053596d9SAndreas Noever port = &sw->ports[ev->port]; 337053596d9SAndreas Noever if (tb_is_upstream_port(port)) { 338dfe40ca4SMika Westerberg tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", 339053596d9SAndreas Noever ev->route, ev->port, ev->unplug); 3408f965efdSMika Westerberg goto put_sw; 341053596d9SAndreas Noever } 342053596d9SAndreas Noever if (ev->unplug) { 343dfe40ca4SMika Westerberg if (tb_port_has_remote(port)) { 344053596d9SAndreas Noever tb_port_info(port, "unplugged\n"); 345aae20bb6SLukas Wunner tb_sw_set_unplugged(port->remote->sw); 3463364f0c1SAndreas Noever tb_free_invalid_tunnels(tb); 347bfe778acSMika Westerberg tb_switch_remove(port->remote->sw); 348053596d9SAndreas Noever port->remote = NULL; 349dfe40ca4SMika Westerberg if (port->dual_link_port) 350dfe40ca4SMika Westerberg port->dual_link_port->remote = NULL; 351053596d9SAndreas Noever } else { 352053596d9SAndreas Noever tb_port_info(port, 353053596d9SAndreas Noever "got unplug event for disconnected port, ignoring\n"); 354053596d9SAndreas Noever } 355053596d9SAndreas Noever } else if (port->remote) { 356053596d9SAndreas Noever tb_port_info(port, 357053596d9SAndreas Noever "got plug event for connected port, ignoring\n"); 358053596d9SAndreas Noever } else { 359053596d9SAndreas Noever tb_port_info(port, "hotplug: scanning\n"); 360053596d9SAndreas Noever tb_scan_port(port); 36199cabbb0SMika Westerberg if (!port->remote) 362053596d9SAndreas Noever tb_port_info(port, "hotplug: no switch found\n"); 363053596d9SAndreas Noever } 3648f965efdSMika Westerberg 3658f965efdSMika Westerberg put_sw: 3668f965efdSMika Westerberg tb_switch_put(sw); 367d6cc51cdSAndreas Noever out: 368d6cc51cdSAndreas Noever mutex_unlock(&tb->lock); 369d6cc51cdSAndreas Noever kfree(ev); 370d6cc51cdSAndreas Noever } 371d6cc51cdSAndreas Noever 372d6cc51cdSAndreas Noever /** 373d6cc51cdSAndreas Noever * tb_schedule_hotplug_handler() - callback function for the control channel 374d6cc51cdSAndreas Noever * 375d6cc51cdSAndreas Noever * Delegates to tb_handle_hotplug. 376d6cc51cdSAndreas Noever */ 37781a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, 37881a54b5eSMika Westerberg const void *buf, size_t size) 379d6cc51cdSAndreas Noever { 38081a54b5eSMika Westerberg const struct cfg_event_pkg *pkg = buf; 38181a54b5eSMika Westerberg struct tb_hotplug_event *ev; 38281a54b5eSMika Westerberg u64 route; 38381a54b5eSMika Westerberg 38481a54b5eSMika Westerberg if (type != TB_CFG_PKG_EVENT) { 38581a54b5eSMika Westerberg tb_warn(tb, "unexpected event %#x, ignoring\n", type); 38681a54b5eSMika Westerberg return; 38781a54b5eSMika Westerberg } 38881a54b5eSMika Westerberg 38981a54b5eSMika Westerberg route = tb_cfg_get_route(&pkg->header); 39081a54b5eSMika Westerberg 39181a54b5eSMika Westerberg if (tb_cfg_error(tb->ctl, route, pkg->port, 39281a54b5eSMika Westerberg TB_CFG_ERROR_ACK_PLUG_EVENT)) { 39381a54b5eSMika Westerberg tb_warn(tb, "could not ack plug event on %llx:%x\n", route, 39481a54b5eSMika Westerberg pkg->port); 39581a54b5eSMika Westerberg } 39681a54b5eSMika Westerberg 39781a54b5eSMika Westerberg ev = kmalloc(sizeof(*ev), GFP_KERNEL); 398d6cc51cdSAndreas Noever if (!ev) 399d6cc51cdSAndreas Noever return; 400d6cc51cdSAndreas Noever INIT_WORK(&ev->work, tb_handle_hotplug); 401d6cc51cdSAndreas Noever ev->tb = tb; 402d6cc51cdSAndreas Noever ev->route = route; 40381a54b5eSMika Westerberg ev->port = pkg->port; 40481a54b5eSMika Westerberg ev->unplug = pkg->unplug; 405d6cc51cdSAndreas Noever queue_work(tb->wq, &ev->work); 406d6cc51cdSAndreas Noever } 407d6cc51cdSAndreas Noever 4089d3cce0bSMika Westerberg static void tb_stop(struct tb *tb) 409d6cc51cdSAndreas Noever { 4109d3cce0bSMika Westerberg struct tb_cm *tcm = tb_priv(tb); 41193f36adeSMika Westerberg struct tb_tunnel *tunnel; 41293f36adeSMika Westerberg struct tb_tunnel *n; 4133364f0c1SAndreas Noever 4143364f0c1SAndreas Noever /* tunnels are only present after everything has been initialized */ 4159d3cce0bSMika Westerberg list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 41693f36adeSMika Westerberg tb_tunnel_deactivate(tunnel); 41793f36adeSMika Westerberg tb_tunnel_free(tunnel); 4183364f0c1SAndreas Noever } 419bfe778acSMika Westerberg tb_switch_remove(tb->root_switch); 4209d3cce0bSMika Westerberg tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 421d6cc51cdSAndreas Noever } 422d6cc51cdSAndreas Noever 42399cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data) 42499cabbb0SMika Westerberg { 42599cabbb0SMika Westerberg if (tb_is_switch(dev)) { 42699cabbb0SMika Westerberg struct tb_switch *sw = tb_to_switch(dev); 42799cabbb0SMika Westerberg 42899cabbb0SMika Westerberg /* 42999cabbb0SMika Westerberg * If we found that the switch was already setup by the 43099cabbb0SMika Westerberg * boot firmware, mark it as authorized now before we 43199cabbb0SMika Westerberg * send uevent to userspace. 43299cabbb0SMika Westerberg */ 43399cabbb0SMika Westerberg if (sw->boot) 43499cabbb0SMika Westerberg sw->authorized = 1; 43599cabbb0SMika Westerberg 43699cabbb0SMika Westerberg dev_set_uevent_suppress(dev, false); 43799cabbb0SMika Westerberg kobject_uevent(&dev->kobj, KOBJ_ADD); 43899cabbb0SMika Westerberg device_for_each_child(dev, NULL, tb_scan_finalize_switch); 43999cabbb0SMika Westerberg } 44099cabbb0SMika Westerberg 44199cabbb0SMika Westerberg return 0; 44299cabbb0SMika Westerberg } 44399cabbb0SMika Westerberg 4449d3cce0bSMika Westerberg static int tb_start(struct tb *tb) 445d6cc51cdSAndreas Noever { 4469d3cce0bSMika Westerberg struct tb_cm *tcm = tb_priv(tb); 447bfe778acSMika Westerberg int ret; 448d6cc51cdSAndreas Noever 449bfe778acSMika Westerberg tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 450a25c8b2fSAndreas Noever if (!tb->root_switch) 4519d3cce0bSMika Westerberg return -ENOMEM; 452a25c8b2fSAndreas Noever 453e6b245ccSMika Westerberg /* 454e6b245ccSMika Westerberg * ICM firmware upgrade needs running firmware and in native 455e6b245ccSMika Westerberg * mode that is not available so disable firmware upgrade of the 456e6b245ccSMika Westerberg * root switch. 457e6b245ccSMika Westerberg */ 458e6b245ccSMika Westerberg tb->root_switch->no_nvm_upgrade = true; 459e6b245ccSMika Westerberg 460bfe778acSMika Westerberg ret = tb_switch_configure(tb->root_switch); 461bfe778acSMika Westerberg if (ret) { 462bfe778acSMika Westerberg tb_switch_put(tb->root_switch); 463bfe778acSMika Westerberg return ret; 464bfe778acSMika Westerberg } 465bfe778acSMika Westerberg 466bfe778acSMika Westerberg /* Announce the switch to the world */ 467bfe778acSMika Westerberg ret = tb_switch_add(tb->root_switch); 468bfe778acSMika Westerberg if (ret) { 469bfe778acSMika Westerberg tb_switch_put(tb->root_switch); 470bfe778acSMika Westerberg return ret; 471bfe778acSMika Westerberg } 472bfe778acSMika Westerberg 4739da672a4SAndreas Noever /* Full scan to discover devices added before the driver was loaded. */ 4749da672a4SAndreas Noever tb_scan_switch(tb->root_switch); 4750414bec5SMika Westerberg /* Find out tunnels created by the boot firmware */ 4760414bec5SMika Westerberg tb_discover_tunnels(tb->root_switch); 47799cabbb0SMika Westerberg /* Make the discovered switches available to the userspace */ 47899cabbb0SMika Westerberg device_for_each_child(&tb->root_switch->dev, NULL, 47999cabbb0SMika Westerberg tb_scan_finalize_switch); 4809da672a4SAndreas Noever 481d6cc51cdSAndreas Noever /* Allow tb_handle_hotplug to progress events */ 4829d3cce0bSMika Westerberg tcm->hotplug_active = true; 4839d3cce0bSMika Westerberg return 0; 484d6cc51cdSAndreas Noever } 485d6cc51cdSAndreas Noever 4869d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb) 48723dd5bb4SAndreas Noever { 4889d3cce0bSMika Westerberg struct tb_cm *tcm = tb_priv(tb); 4899d3cce0bSMika Westerberg 490daa5140fSMika Westerberg tb_dbg(tb, "suspending...\n"); 49123dd5bb4SAndreas Noever tb_switch_suspend(tb->root_switch); 4929d3cce0bSMika Westerberg tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 493daa5140fSMika Westerberg tb_dbg(tb, "suspend finished\n"); 4949d3cce0bSMika Westerberg 4959d3cce0bSMika Westerberg return 0; 49623dd5bb4SAndreas Noever } 49723dd5bb4SAndreas Noever 4989d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb) 49923dd5bb4SAndreas Noever { 5009d3cce0bSMika Westerberg struct tb_cm *tcm = tb_priv(tb); 50193f36adeSMika Westerberg struct tb_tunnel *tunnel, *n; 5029d3cce0bSMika Westerberg 503daa5140fSMika Westerberg tb_dbg(tb, "resuming...\n"); 50423dd5bb4SAndreas Noever 50523dd5bb4SAndreas Noever /* remove any pci devices the firmware might have setup */ 50623dd5bb4SAndreas Noever tb_switch_reset(tb, 0); 50723dd5bb4SAndreas Noever 50823dd5bb4SAndreas Noever tb_switch_resume(tb->root_switch); 50923dd5bb4SAndreas Noever tb_free_invalid_tunnels(tb); 51023dd5bb4SAndreas Noever tb_free_unplugged_children(tb->root_switch); 5119d3cce0bSMika Westerberg list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 51293f36adeSMika Westerberg tb_tunnel_restart(tunnel); 5139d3cce0bSMika Westerberg if (!list_empty(&tcm->tunnel_list)) { 51423dd5bb4SAndreas Noever /* 51523dd5bb4SAndreas Noever * the pcie links need some time to get going. 51623dd5bb4SAndreas Noever * 100ms works for me... 51723dd5bb4SAndreas Noever */ 518daa5140fSMika Westerberg tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); 51923dd5bb4SAndreas Noever msleep(100); 52023dd5bb4SAndreas Noever } 52123dd5bb4SAndreas Noever /* Allow tb_handle_hotplug to progress events */ 5229d3cce0bSMika Westerberg tcm->hotplug_active = true; 523daa5140fSMika Westerberg tb_dbg(tb, "resume finished\n"); 5249d3cce0bSMika Westerberg 5259d3cce0bSMika Westerberg return 0; 5269d3cce0bSMika Westerberg } 5279d3cce0bSMika Westerberg 5289d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = { 5299d3cce0bSMika Westerberg .start = tb_start, 5309d3cce0bSMika Westerberg .stop = tb_stop, 5319d3cce0bSMika Westerberg .suspend_noirq = tb_suspend_noirq, 5329d3cce0bSMika Westerberg .resume_noirq = tb_resume_noirq, 53381a54b5eSMika Westerberg .handle_event = tb_handle_event, 53499cabbb0SMika Westerberg .approve_switch = tb_tunnel_pci, 5359d3cce0bSMika Westerberg }; 5369d3cce0bSMika Westerberg 5379d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi) 5389d3cce0bSMika Westerberg { 5399d3cce0bSMika Westerberg struct tb_cm *tcm; 5409d3cce0bSMika Westerberg struct tb *tb; 5419d3cce0bSMika Westerberg 542630b3affSLukas Wunner if (!x86_apple_machine) 543f67cf491SMika Westerberg return NULL; 544f67cf491SMika Westerberg 5459d3cce0bSMika Westerberg tb = tb_domain_alloc(nhi, sizeof(*tcm)); 5469d3cce0bSMika Westerberg if (!tb) 5479d3cce0bSMika Westerberg return NULL; 5489d3cce0bSMika Westerberg 54999cabbb0SMika Westerberg tb->security_level = TB_SECURITY_USER; 5509d3cce0bSMika Westerberg tb->cm_ops = &tb_cm_ops; 5519d3cce0bSMika Westerberg 5529d3cce0bSMika Westerberg tcm = tb_priv(tb); 5539d3cce0bSMika Westerberg INIT_LIST_HEAD(&tcm->tunnel_list); 5549d3cce0bSMika Westerberg 5559d3cce0bSMika Westerberg return tb; 55623dd5bb4SAndreas Noever } 557