xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 8f965efd)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
3d6cc51cdSAndreas Noever  * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6d6cc51cdSAndreas Noever  */
7d6cc51cdSAndreas Noever 
8d6cc51cdSAndreas Noever #include <linux/slab.h>
9d6cc51cdSAndreas Noever #include <linux/errno.h>
10d6cc51cdSAndreas Noever #include <linux/delay.h>
11630b3affSLukas Wunner #include <linux/platform_data/x86/apple.h>
12d6cc51cdSAndreas Noever 
13d6cc51cdSAndreas Noever #include "tb.h"
147adf6097SAndreas Noever #include "tb_regs.h"
153364f0c1SAndreas Noever #include "tunnel_pci.h"
16d6cc51cdSAndreas Noever 
179d3cce0bSMika Westerberg /**
189d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
199d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
209d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
219d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
229d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
239d3cce0bSMika Westerberg  *		    after cfg has been paused.
249d3cce0bSMika Westerberg  */
259d3cce0bSMika Westerberg struct tb_cm {
269d3cce0bSMika Westerberg 	struct list_head tunnel_list;
279d3cce0bSMika Westerberg 	bool hotplug_active;
289d3cce0bSMika Westerberg };
299da672a4SAndreas Noever 
309da672a4SAndreas Noever /* enumeration & hot plug handling */
319da672a4SAndreas Noever 
329da672a4SAndreas Noever 
339da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
349da672a4SAndreas Noever 
359da672a4SAndreas Noever /**
369da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
379da672a4SAndreas Noever  */
389da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
399da672a4SAndreas Noever {
409da672a4SAndreas Noever 	int i;
419da672a4SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++)
429da672a4SAndreas Noever 		tb_scan_port(&sw->ports[i]);
439da672a4SAndreas Noever }
449da672a4SAndreas Noever 
459da672a4SAndreas Noever /**
469da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
479da672a4SAndreas Noever  */
489da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
499da672a4SAndreas Noever {
509da672a4SAndreas Noever 	struct tb_switch *sw;
519da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
529da672a4SAndreas Noever 		return;
539da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
549da672a4SAndreas Noever 		return;
55343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
56343fcb8cSAndreas Noever 		return; /*
57343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
58343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
59343fcb8cSAndreas Noever 			 */
609da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
619da672a4SAndreas Noever 		return;
629da672a4SAndreas Noever 	if (port->remote) {
639da672a4SAndreas Noever 		tb_port_WARN(port, "port already has a remote!\n");
649da672a4SAndreas Noever 		return;
659da672a4SAndreas Noever 	}
66bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
67bfe778acSMika Westerberg 			     tb_downstream_route(port));
689da672a4SAndreas Noever 	if (!sw)
699da672a4SAndreas Noever 		return;
70bfe778acSMika Westerberg 
71bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
72bfe778acSMika Westerberg 		tb_switch_put(sw);
73bfe778acSMika Westerberg 		return;
74bfe778acSMika Westerberg 	}
75bfe778acSMika Westerberg 
76f67cf491SMika Westerberg 	sw->authorized = true;
77f67cf491SMika Westerberg 
78bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
79bfe778acSMika Westerberg 		tb_switch_put(sw);
80bfe778acSMika Westerberg 		return;
81bfe778acSMika Westerberg 	}
82bfe778acSMika Westerberg 
839da672a4SAndreas Noever 	port->remote = tb_upstream_port(sw);
849da672a4SAndreas Noever 	tb_upstream_port(sw)->remote = port;
859da672a4SAndreas Noever 	tb_scan_switch(sw);
869da672a4SAndreas Noever }
879da672a4SAndreas Noever 
883364f0c1SAndreas Noever /**
893364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
903364f0c1SAndreas Noever  */
913364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
923364f0c1SAndreas Noever {
939d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
943364f0c1SAndreas Noever 	struct tb_pci_tunnel *tunnel;
953364f0c1SAndreas Noever 	struct tb_pci_tunnel *n;
969d3cce0bSMika Westerberg 
979d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
983364f0c1SAndreas Noever 		if (tb_pci_is_invalid(tunnel)) {
993364f0c1SAndreas Noever 			tb_pci_deactivate(tunnel);
1009d3cce0bSMika Westerberg 			list_del(&tunnel->list);
1013364f0c1SAndreas Noever 			tb_pci_free(tunnel);
1023364f0c1SAndreas Noever 		}
1033364f0c1SAndreas Noever 	}
1043364f0c1SAndreas Noever }
1053364f0c1SAndreas Noever 
1063364f0c1SAndreas Noever /**
10723dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
10823dd5bb4SAndreas Noever  */
10923dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
11023dd5bb4SAndreas Noever {
11123dd5bb4SAndreas Noever 	int i;
11223dd5bb4SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++) {
11323dd5bb4SAndreas Noever 		struct tb_port *port = &sw->ports[i];
11423dd5bb4SAndreas Noever 		if (tb_is_upstream_port(port))
11523dd5bb4SAndreas Noever 			continue;
11623dd5bb4SAndreas Noever 		if (!port->remote)
11723dd5bb4SAndreas Noever 			continue;
11823dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
119bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
12023dd5bb4SAndreas Noever 			port->remote = NULL;
12123dd5bb4SAndreas Noever 		} else {
12223dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
12323dd5bb4SAndreas Noever 		}
12423dd5bb4SAndreas Noever 	}
12523dd5bb4SAndreas Noever }
12623dd5bb4SAndreas Noever 
12723dd5bb4SAndreas Noever 
12823dd5bb4SAndreas Noever /**
1293364f0c1SAndreas Noever  * find_pci_up_port() - return the first PCIe up port on @sw or NULL
1303364f0c1SAndreas Noever  */
1313364f0c1SAndreas Noever static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
1323364f0c1SAndreas Noever {
1333364f0c1SAndreas Noever 	int i;
1343364f0c1SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++)
1353364f0c1SAndreas Noever 		if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
1363364f0c1SAndreas Noever 			return &sw->ports[i];
1373364f0c1SAndreas Noever 	return NULL;
1383364f0c1SAndreas Noever }
1393364f0c1SAndreas Noever 
1403364f0c1SAndreas Noever /**
1413364f0c1SAndreas Noever  * find_unused_down_port() - return the first inactive PCIe down port on @sw
1423364f0c1SAndreas Noever  */
1433364f0c1SAndreas Noever static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
1443364f0c1SAndreas Noever {
1453364f0c1SAndreas Noever 	int i;
1463364f0c1SAndreas Noever 	int cap;
1473364f0c1SAndreas Noever 	int res;
1483364f0c1SAndreas Noever 	int data;
1493364f0c1SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++) {
1503364f0c1SAndreas Noever 		if (tb_is_upstream_port(&sw->ports[i]))
1513364f0c1SAndreas Noever 			continue;
1523364f0c1SAndreas Noever 		if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
1533364f0c1SAndreas Noever 			continue;
154da2da04bSMika Westerberg 		cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
155da2da04bSMika Westerberg 		if (cap < 0)
1563364f0c1SAndreas Noever 			continue;
1573364f0c1SAndreas Noever 		res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
1583364f0c1SAndreas Noever 		if (res < 0)
1593364f0c1SAndreas Noever 			continue;
1603364f0c1SAndreas Noever 		if (data & 0x80000000)
1613364f0c1SAndreas Noever 			continue;
1623364f0c1SAndreas Noever 		return &sw->ports[i];
1633364f0c1SAndreas Noever 	}
1643364f0c1SAndreas Noever 	return NULL;
1653364f0c1SAndreas Noever }
1663364f0c1SAndreas Noever 
1673364f0c1SAndreas Noever /**
1683364f0c1SAndreas Noever  * tb_activate_pcie_devices() - scan for and activate PCIe devices
1693364f0c1SAndreas Noever  *
1703364f0c1SAndreas Noever  * This method is somewhat ad hoc. For now it only supports one device
1713364f0c1SAndreas Noever  * per port and only devices at depth 1.
1723364f0c1SAndreas Noever  */
1733364f0c1SAndreas Noever static void tb_activate_pcie_devices(struct tb *tb)
1743364f0c1SAndreas Noever {
1753364f0c1SAndreas Noever 	int i;
1763364f0c1SAndreas Noever 	int cap;
1773364f0c1SAndreas Noever 	u32 data;
1783364f0c1SAndreas Noever 	struct tb_switch *sw;
1793364f0c1SAndreas Noever 	struct tb_port *up_port;
1803364f0c1SAndreas Noever 	struct tb_port *down_port;
1813364f0c1SAndreas Noever 	struct tb_pci_tunnel *tunnel;
1829d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1839d3cce0bSMika Westerberg 
1843364f0c1SAndreas Noever 	/* scan for pcie devices at depth 1*/
1853364f0c1SAndreas Noever 	for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
1863364f0c1SAndreas Noever 		if (tb_is_upstream_port(&tb->root_switch->ports[i]))
1873364f0c1SAndreas Noever 			continue;
1883364f0c1SAndreas Noever 		if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
1893364f0c1SAndreas Noever 			continue;
1903364f0c1SAndreas Noever 		if (!tb->root_switch->ports[i].remote)
1913364f0c1SAndreas Noever 			continue;
1923364f0c1SAndreas Noever 		sw = tb->root_switch->ports[i].remote->sw;
1933364f0c1SAndreas Noever 		up_port = tb_find_pci_up_port(sw);
1943364f0c1SAndreas Noever 		if (!up_port) {
1953364f0c1SAndreas Noever 			tb_sw_info(sw, "no PCIe devices found, aborting\n");
1963364f0c1SAndreas Noever 			continue;
1973364f0c1SAndreas Noever 		}
1983364f0c1SAndreas Noever 
1993364f0c1SAndreas Noever 		/* check whether port is already activated */
200da2da04bSMika Westerberg 		cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
201da2da04bSMika Westerberg 		if (cap < 0)
2023364f0c1SAndreas Noever 			continue;
2033364f0c1SAndreas Noever 		if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
2043364f0c1SAndreas Noever 			continue;
2053364f0c1SAndreas Noever 		if (data & 0x80000000) {
2063364f0c1SAndreas Noever 			tb_port_info(up_port,
2073364f0c1SAndreas Noever 				     "PCIe port already activated, aborting\n");
2083364f0c1SAndreas Noever 			continue;
2093364f0c1SAndreas Noever 		}
2103364f0c1SAndreas Noever 
2113364f0c1SAndreas Noever 		down_port = tb_find_unused_down_port(tb->root_switch);
2123364f0c1SAndreas Noever 		if (!down_port) {
2133364f0c1SAndreas Noever 			tb_port_info(up_port,
2143364f0c1SAndreas Noever 				     "All PCIe down ports are occupied, aborting\n");
2153364f0c1SAndreas Noever 			continue;
2163364f0c1SAndreas Noever 		}
2173364f0c1SAndreas Noever 		tunnel = tb_pci_alloc(tb, up_port, down_port);
2183364f0c1SAndreas Noever 		if (!tunnel) {
2193364f0c1SAndreas Noever 			tb_port_info(up_port,
2203364f0c1SAndreas Noever 				     "PCIe tunnel allocation failed, aborting\n");
2213364f0c1SAndreas Noever 			continue;
2223364f0c1SAndreas Noever 		}
2233364f0c1SAndreas Noever 
2243364f0c1SAndreas Noever 		if (tb_pci_activate(tunnel)) {
2253364f0c1SAndreas Noever 			tb_port_info(up_port,
2263364f0c1SAndreas Noever 				     "PCIe tunnel activation failed, aborting\n");
2273364f0c1SAndreas Noever 			tb_pci_free(tunnel);
228a2e37343SGustavo A. R. Silva 			continue;
2293364f0c1SAndreas Noever 		}
2303364f0c1SAndreas Noever 
2319d3cce0bSMika Westerberg 		list_add(&tunnel->list, &tcm->tunnel_list);
2323364f0c1SAndreas Noever 	}
2333364f0c1SAndreas Noever }
2349da672a4SAndreas Noever 
235d6cc51cdSAndreas Noever /* hotplug handling */
236d6cc51cdSAndreas Noever 
237d6cc51cdSAndreas Noever struct tb_hotplug_event {
238d6cc51cdSAndreas Noever 	struct work_struct work;
239d6cc51cdSAndreas Noever 	struct tb *tb;
240d6cc51cdSAndreas Noever 	u64 route;
241d6cc51cdSAndreas Noever 	u8 port;
242d6cc51cdSAndreas Noever 	bool unplug;
243d6cc51cdSAndreas Noever };
244d6cc51cdSAndreas Noever 
245d6cc51cdSAndreas Noever /**
246d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
247d6cc51cdSAndreas Noever  *
248d6cc51cdSAndreas Noever  * Executes on tb->wq.
249d6cc51cdSAndreas Noever  */
250d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
251d6cc51cdSAndreas Noever {
252d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
253d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
2549d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
255053596d9SAndreas Noever 	struct tb_switch *sw;
256053596d9SAndreas Noever 	struct tb_port *port;
257d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
2589d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
259d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
260d6cc51cdSAndreas Noever 
2618f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
262053596d9SAndreas Noever 	if (!sw) {
263053596d9SAndreas Noever 		tb_warn(tb,
264053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
265053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
266053596d9SAndreas Noever 		goto out;
267053596d9SAndreas Noever 	}
268053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
269053596d9SAndreas Noever 		tb_warn(tb,
270053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
271053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
2728f965efdSMika Westerberg 		goto put_sw;
273053596d9SAndreas Noever 	}
274053596d9SAndreas Noever 	port = &sw->ports[ev->port];
275053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
276053596d9SAndreas Noever 		tb_warn(tb,
277053596d9SAndreas Noever 			"hotplug event for upstream port %llx:%x (unplug: %d)\n",
278053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
2798f965efdSMika Westerberg 		goto put_sw;
280053596d9SAndreas Noever 	}
281053596d9SAndreas Noever 	if (ev->unplug) {
282053596d9SAndreas Noever 		if (port->remote) {
283053596d9SAndreas Noever 			tb_port_info(port, "unplugged\n");
284aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
2853364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
286bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
287053596d9SAndreas Noever 			port->remote = NULL;
288053596d9SAndreas Noever 		} else {
289053596d9SAndreas Noever 			tb_port_info(port,
290053596d9SAndreas Noever 				     "got unplug event for disconnected port, ignoring\n");
291053596d9SAndreas Noever 		}
292053596d9SAndreas Noever 	} else if (port->remote) {
293053596d9SAndreas Noever 		tb_port_info(port,
294053596d9SAndreas Noever 			     "got plug event for connected port, ignoring\n");
295053596d9SAndreas Noever 	} else {
296053596d9SAndreas Noever 		tb_port_info(port, "hotplug: scanning\n");
297053596d9SAndreas Noever 		tb_scan_port(port);
298053596d9SAndreas Noever 		if (!port->remote) {
299053596d9SAndreas Noever 			tb_port_info(port, "hotplug: no switch found\n");
300053596d9SAndreas Noever 		} else if (port->remote->sw->config.depth > 1) {
301053596d9SAndreas Noever 			tb_sw_warn(port->remote->sw,
302053596d9SAndreas Noever 				   "hotplug: chaining not supported\n");
3033364f0c1SAndreas Noever 		} else {
3043364f0c1SAndreas Noever 			tb_sw_info(port->remote->sw,
3053364f0c1SAndreas Noever 				   "hotplug: activating pcie devices\n");
3063364f0c1SAndreas Noever 			tb_activate_pcie_devices(tb);
307053596d9SAndreas Noever 		}
308053596d9SAndreas Noever 	}
3098f965efdSMika Westerberg 
3108f965efdSMika Westerberg put_sw:
3118f965efdSMika Westerberg 	tb_switch_put(sw);
312d6cc51cdSAndreas Noever out:
313d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
314d6cc51cdSAndreas Noever 	kfree(ev);
315d6cc51cdSAndreas Noever }
316d6cc51cdSAndreas Noever 
317d6cc51cdSAndreas Noever /**
318d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
319d6cc51cdSAndreas Noever  *
320d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
321d6cc51cdSAndreas Noever  */
32281a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
32381a54b5eSMika Westerberg 			    const void *buf, size_t size)
324d6cc51cdSAndreas Noever {
32581a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
32681a54b5eSMika Westerberg 	struct tb_hotplug_event *ev;
32781a54b5eSMika Westerberg 	u64 route;
32881a54b5eSMika Westerberg 
32981a54b5eSMika Westerberg 	if (type != TB_CFG_PKG_EVENT) {
33081a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
33181a54b5eSMika Westerberg 		return;
33281a54b5eSMika Westerberg 	}
33381a54b5eSMika Westerberg 
33481a54b5eSMika Westerberg 	route = tb_cfg_get_route(&pkg->header);
33581a54b5eSMika Westerberg 
33681a54b5eSMika Westerberg 	if (tb_cfg_error(tb->ctl, route, pkg->port,
33781a54b5eSMika Westerberg 			 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
33881a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
33981a54b5eSMika Westerberg 			pkg->port);
34081a54b5eSMika Westerberg 	}
34181a54b5eSMika Westerberg 
34281a54b5eSMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
343d6cc51cdSAndreas Noever 	if (!ev)
344d6cc51cdSAndreas Noever 		return;
345d6cc51cdSAndreas Noever 	INIT_WORK(&ev->work, tb_handle_hotplug);
346d6cc51cdSAndreas Noever 	ev->tb = tb;
347d6cc51cdSAndreas Noever 	ev->route = route;
34881a54b5eSMika Westerberg 	ev->port = pkg->port;
34981a54b5eSMika Westerberg 	ev->unplug = pkg->unplug;
350d6cc51cdSAndreas Noever 	queue_work(tb->wq, &ev->work);
351d6cc51cdSAndreas Noever }
352d6cc51cdSAndreas Noever 
3539d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
354d6cc51cdSAndreas Noever {
3559d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
3563364f0c1SAndreas Noever 	struct tb_pci_tunnel *tunnel;
3573364f0c1SAndreas Noever 	struct tb_pci_tunnel *n;
3583364f0c1SAndreas Noever 
3593364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
3609d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
3613364f0c1SAndreas Noever 		tb_pci_deactivate(tunnel);
3623364f0c1SAndreas Noever 		tb_pci_free(tunnel);
3633364f0c1SAndreas Noever 	}
364bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
3659d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
366d6cc51cdSAndreas Noever }
367d6cc51cdSAndreas Noever 
3689d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
369d6cc51cdSAndreas Noever {
3709d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
371bfe778acSMika Westerberg 	int ret;
372d6cc51cdSAndreas Noever 
373bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
374a25c8b2fSAndreas Noever 	if (!tb->root_switch)
3759d3cce0bSMika Westerberg 		return -ENOMEM;
376a25c8b2fSAndreas Noever 
377e6b245ccSMika Westerberg 	/*
378e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
379e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
380e6b245ccSMika Westerberg 	 * root switch.
381e6b245ccSMika Westerberg 	 */
382e6b245ccSMika Westerberg 	tb->root_switch->no_nvm_upgrade = true;
383e6b245ccSMika Westerberg 
384bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
385bfe778acSMika Westerberg 	if (ret) {
386bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
387bfe778acSMika Westerberg 		return ret;
388bfe778acSMika Westerberg 	}
389bfe778acSMika Westerberg 
390bfe778acSMika Westerberg 	/* Announce the switch to the world */
391bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
392bfe778acSMika Westerberg 	if (ret) {
393bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
394bfe778acSMika Westerberg 		return ret;
395bfe778acSMika Westerberg 	}
396bfe778acSMika Westerberg 
3979da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
3989da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
3993364f0c1SAndreas Noever 	tb_activate_pcie_devices(tb);
4009da672a4SAndreas Noever 
401d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
4029d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
4039d3cce0bSMika Westerberg 	return 0;
404d6cc51cdSAndreas Noever }
405d6cc51cdSAndreas Noever 
4069d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
40723dd5bb4SAndreas Noever {
4089d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
4099d3cce0bSMika Westerberg 
410daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
41123dd5bb4SAndreas Noever 	tb_switch_suspend(tb->root_switch);
4129d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
413daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
4149d3cce0bSMika Westerberg 
4159d3cce0bSMika Westerberg 	return 0;
41623dd5bb4SAndreas Noever }
41723dd5bb4SAndreas Noever 
4189d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
41923dd5bb4SAndreas Noever {
4209d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
42123dd5bb4SAndreas Noever 	struct tb_pci_tunnel *tunnel, *n;
4229d3cce0bSMika Westerberg 
423daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
42423dd5bb4SAndreas Noever 
42523dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
42623dd5bb4SAndreas Noever 	tb_switch_reset(tb, 0);
42723dd5bb4SAndreas Noever 
42823dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
42923dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
43023dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
4319d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
43223dd5bb4SAndreas Noever 		tb_pci_restart(tunnel);
4339d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
43423dd5bb4SAndreas Noever 		/*
43523dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
43623dd5bb4SAndreas Noever 		 * 100ms works for me...
43723dd5bb4SAndreas Noever 		 */
438daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
43923dd5bb4SAndreas Noever 		msleep(100);
44023dd5bb4SAndreas Noever 	}
44123dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
4429d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
443daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
4449d3cce0bSMika Westerberg 
4459d3cce0bSMika Westerberg 	return 0;
4469d3cce0bSMika Westerberg }
4479d3cce0bSMika Westerberg 
4489d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
4499d3cce0bSMika Westerberg 	.start = tb_start,
4509d3cce0bSMika Westerberg 	.stop = tb_stop,
4519d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
4529d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
45381a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
4549d3cce0bSMika Westerberg };
4559d3cce0bSMika Westerberg 
4569d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
4579d3cce0bSMika Westerberg {
4589d3cce0bSMika Westerberg 	struct tb_cm *tcm;
4599d3cce0bSMika Westerberg 	struct tb *tb;
4609d3cce0bSMika Westerberg 
461630b3affSLukas Wunner 	if (!x86_apple_machine)
462f67cf491SMika Westerberg 		return NULL;
463f67cf491SMika Westerberg 
4649d3cce0bSMika Westerberg 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
4659d3cce0bSMika Westerberg 	if (!tb)
4669d3cce0bSMika Westerberg 		return NULL;
4679d3cce0bSMika Westerberg 
468f67cf491SMika Westerberg 	tb->security_level = TB_SECURITY_NONE;
4699d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
4709d3cce0bSMika Westerberg 
4719d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
4729d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
4739d3cce0bSMika Westerberg 
4749d3cce0bSMika Westerberg 	return tb;
47523dd5bb4SAndreas Noever }
476