xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 62efe699)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
12630b3affSLukas Wunner #include <linux/platform_data/x86/apple.h>
13d6cc51cdSAndreas Noever 
14d6cc51cdSAndreas Noever #include "tb.h"
157adf6097SAndreas Noever #include "tb_regs.h"
161752b9f7SMika Westerberg #include "tunnel.h"
17d6cc51cdSAndreas Noever 
189d3cce0bSMika Westerberg /**
199d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
209d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
219d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
229d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
239d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
249d3cce0bSMika Westerberg  *		    after cfg has been paused.
259d3cce0bSMika Westerberg  */
269d3cce0bSMika Westerberg struct tb_cm {
279d3cce0bSMika Westerberg 	struct list_head tunnel_list;
289d3cce0bSMika Westerberg 	bool hotplug_active;
299d3cce0bSMika Westerberg };
309da672a4SAndreas Noever 
314f807e47SMika Westerberg struct tb_hotplug_event {
324f807e47SMika Westerberg 	struct work_struct work;
334f807e47SMika Westerberg 	struct tb *tb;
344f807e47SMika Westerberg 	u64 route;
354f807e47SMika Westerberg 	u8 port;
364f807e47SMika Westerberg 	bool unplug;
374f807e47SMika Westerberg };
384f807e47SMika Westerberg 
394f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
404f807e47SMika Westerberg 
414f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
424f807e47SMika Westerberg {
434f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
444f807e47SMika Westerberg 
454f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
464f807e47SMika Westerberg 	if (!ev)
474f807e47SMika Westerberg 		return;
484f807e47SMika Westerberg 
494f807e47SMika Westerberg 	ev->tb = tb;
504f807e47SMika Westerberg 	ev->route = route;
514f807e47SMika Westerberg 	ev->port = port;
524f807e47SMika Westerberg 	ev->unplug = unplug;
534f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
544f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
554f807e47SMika Westerberg }
564f807e47SMika Westerberg 
579da672a4SAndreas Noever /* enumeration & hot plug handling */
589da672a4SAndreas Noever 
590414bec5SMika Westerberg static void tb_discover_tunnels(struct tb_switch *sw)
600414bec5SMika Westerberg {
610414bec5SMika Westerberg 	struct tb *tb = sw->tb;
620414bec5SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
630414bec5SMika Westerberg 	struct tb_port *port;
640414bec5SMika Westerberg 	int i;
650414bec5SMika Westerberg 
660414bec5SMika Westerberg 	for (i = 1; i <= sw->config.max_port_number; i++) {
670414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
680414bec5SMika Westerberg 
690414bec5SMika Westerberg 		port = &sw->ports[i];
700414bec5SMika Westerberg 		switch (port->config.type) {
714f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
724f807e47SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port);
734f807e47SMika Westerberg 			break;
744f807e47SMika Westerberg 
750414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
760414bec5SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port);
770414bec5SMika Westerberg 			break;
780414bec5SMika Westerberg 
790414bec5SMika Westerberg 		default:
800414bec5SMika Westerberg 			break;
810414bec5SMika Westerberg 		}
820414bec5SMika Westerberg 
834f807e47SMika Westerberg 		if (!tunnel)
844f807e47SMika Westerberg 			continue;
854f807e47SMika Westerberg 
864f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
870414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
880414bec5SMika Westerberg 
890414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
900414bec5SMika Westerberg 				parent->boot = true;
910414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
920414bec5SMika Westerberg 			}
934f807e47SMika Westerberg 		}
940414bec5SMika Westerberg 
950414bec5SMika Westerberg 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
960414bec5SMika Westerberg 	}
970414bec5SMika Westerberg 
980414bec5SMika Westerberg 	for (i = 1; i <= sw->config.max_port_number; i++) {
990414bec5SMika Westerberg 		if (tb_port_has_remote(&sw->ports[i]))
1000414bec5SMika Westerberg 			tb_discover_tunnels(sw->ports[i].remote->sw);
1010414bec5SMika Westerberg 	}
1020414bec5SMika Westerberg }
1039da672a4SAndreas Noever 
1047ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
1057ea4cd6bSMika Westerberg {
1067ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
1077ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
1087ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
1097ea4cd6bSMika Westerberg 	u64 route;
1107ea4cd6bSMika Westerberg 
1117ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
1127ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
1137ea4cd6bSMika Westerberg 	if (xd) {
1147ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
1157ea4cd6bSMika Westerberg 		return;
1167ea4cd6bSMika Westerberg 	}
1177ea4cd6bSMika Westerberg 
1187ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
1197ea4cd6bSMika Westerberg 			      NULL);
1207ea4cd6bSMika Westerberg 	if (xd) {
1217ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
1227ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
1237ea4cd6bSMika Westerberg 	}
1247ea4cd6bSMika Westerberg }
1257ea4cd6bSMika Westerberg 
1269da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
1279da672a4SAndreas Noever 
1289da672a4SAndreas Noever /**
1299da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
1309da672a4SAndreas Noever  */
1319da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
1329da672a4SAndreas Noever {
1339da672a4SAndreas Noever 	int i;
1349da672a4SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++)
1359da672a4SAndreas Noever 		tb_scan_port(&sw->ports[i]);
1369da672a4SAndreas Noever }
1379da672a4SAndreas Noever 
1389da672a4SAndreas Noever /**
1399da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
1409da672a4SAndreas Noever  */
1419da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
1429da672a4SAndreas Noever {
14399cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
144dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
1459da672a4SAndreas Noever 	struct tb_switch *sw;
146dfe40ca4SMika Westerberg 
1479da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
1489da672a4SAndreas Noever 		return;
1494f807e47SMika Westerberg 
1504f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1514f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
1524f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1534f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1544f807e47SMika Westerberg 				 false);
1554f807e47SMika Westerberg 		return;
1564f807e47SMika Westerberg 	}
1574f807e47SMika Westerberg 
1589da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
1599da672a4SAndreas Noever 		return;
160343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
161343fcb8cSAndreas Noever 		return; /*
162343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
163343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
164343fcb8cSAndreas Noever 			 */
1659da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
1669da672a4SAndreas Noever 		return;
1679da672a4SAndreas Noever 	if (port->remote) {
1687ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
1699da672a4SAndreas Noever 		return;
1709da672a4SAndreas Noever 	}
171bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
172bfe778acSMika Westerberg 			     tb_downstream_route(port));
1737ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
1747ea4cd6bSMika Westerberg 		/*
1757ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
1767ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
1777ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
1787ea4cd6bSMika Westerberg 		 */
1797ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1807ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
1819da672a4SAndreas Noever 		return;
1827ea4cd6bSMika Westerberg 	}
183bfe778acSMika Westerberg 
184bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
185bfe778acSMika Westerberg 		tb_switch_put(sw);
186bfe778acSMika Westerberg 		return;
187bfe778acSMika Westerberg 	}
188bfe778acSMika Westerberg 
18999cabbb0SMika Westerberg 	/*
1907ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
1917ea4cd6bSMika Westerberg 	 * first.
1927ea4cd6bSMika Westerberg 	 */
1937ea4cd6bSMika Westerberg 	if (port->xdomain) {
1947ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
1957ea4cd6bSMika Westerberg 		port->xdomain = NULL;
1967ea4cd6bSMika Westerberg 	}
1977ea4cd6bSMika Westerberg 
1987ea4cd6bSMika Westerberg 	/*
19999cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
20099cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
20199cabbb0SMika Westerberg 	 * the boot firmware.
20299cabbb0SMika Westerberg 	 */
20399cabbb0SMika Westerberg 	if (!tcm->hotplug_active)
20499cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
205f67cf491SMika Westerberg 
206bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
207bfe778acSMika Westerberg 		tb_switch_put(sw);
208bfe778acSMika Westerberg 		return;
209bfe778acSMika Westerberg 	}
210bfe778acSMika Westerberg 
211dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
212dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
213dfe40ca4SMika Westerberg 	port->remote = upstream_port;
214dfe40ca4SMika Westerberg 	upstream_port->remote = port;
215dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
216dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
217dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
218dfe40ca4SMika Westerberg 	}
219dfe40ca4SMika Westerberg 
2209da672a4SAndreas Noever 	tb_scan_switch(sw);
2219da672a4SAndreas Noever }
2229da672a4SAndreas Noever 
2234f807e47SMika Westerberg static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
2244f807e47SMika Westerberg 			  struct tb_port *src_port, struct tb_port *dst_port)
2254f807e47SMika Westerberg {
2264f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2274f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
2284f807e47SMika Westerberg 
2294f807e47SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
2304f807e47SMika Westerberg 		if (tunnel->type == type &&
2314f807e47SMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
2324f807e47SMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
2334f807e47SMika Westerberg 			tb_tunnel_deactivate(tunnel);
2344f807e47SMika Westerberg 			list_del(&tunnel->list);
2354f807e47SMika Westerberg 			tb_tunnel_free(tunnel);
2364f807e47SMika Westerberg 			return 0;
2374f807e47SMika Westerberg 		}
2384f807e47SMika Westerberg 	}
2394f807e47SMika Westerberg 
2404f807e47SMika Westerberg 	return -ENODEV;
2414f807e47SMika Westerberg }
2424f807e47SMika Westerberg 
2433364f0c1SAndreas Noever /**
2443364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
2453364f0c1SAndreas Noever  */
2463364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
2473364f0c1SAndreas Noever {
2489d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
24993f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
25093f36adeSMika Westerberg 	struct tb_tunnel *n;
2519d3cce0bSMika Westerberg 
2529d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
25393f36adeSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel)) {
25493f36adeSMika Westerberg 			tb_tunnel_deactivate(tunnel);
2559d3cce0bSMika Westerberg 			list_del(&tunnel->list);
25693f36adeSMika Westerberg 			tb_tunnel_free(tunnel);
2573364f0c1SAndreas Noever 		}
2583364f0c1SAndreas Noever 	}
2593364f0c1SAndreas Noever }
2603364f0c1SAndreas Noever 
2613364f0c1SAndreas Noever /**
26223dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
26323dd5bb4SAndreas Noever  */
26423dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
26523dd5bb4SAndreas Noever {
26623dd5bb4SAndreas Noever 	int i;
26723dd5bb4SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++) {
26823dd5bb4SAndreas Noever 		struct tb_port *port = &sw->ports[i];
269dfe40ca4SMika Westerberg 
270dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
27123dd5bb4SAndreas Noever 			continue;
272dfe40ca4SMika Westerberg 
27323dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
274bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
27523dd5bb4SAndreas Noever 			port->remote = NULL;
276dfe40ca4SMika Westerberg 			if (port->dual_link_port)
277dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
27823dd5bb4SAndreas Noever 		} else {
27923dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
28023dd5bb4SAndreas Noever 		}
28123dd5bb4SAndreas Noever 	}
28223dd5bb4SAndreas Noever }
28323dd5bb4SAndreas Noever 
28423dd5bb4SAndreas Noever /**
285e78db6f0SMika Westerberg  * tb_find_port() - return the first port of @type on @sw or NULL
286e78db6f0SMika Westerberg  * @sw: Switch to find the port from
287e78db6f0SMika Westerberg  * @type: Port type to look for
2883364f0c1SAndreas Noever  */
289e78db6f0SMika Westerberg static struct tb_port *tb_find_port(struct tb_switch *sw,
290e78db6f0SMika Westerberg 				    enum tb_port_type type)
2913364f0c1SAndreas Noever {
2923364f0c1SAndreas Noever 	int i;
2933364f0c1SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++)
294e78db6f0SMika Westerberg 		if (sw->ports[i].config.type == type)
2953364f0c1SAndreas Noever 			return &sw->ports[i];
2963364f0c1SAndreas Noever 	return NULL;
2973364f0c1SAndreas Noever }
2983364f0c1SAndreas Noever 
2993364f0c1SAndreas Noever /**
300e78db6f0SMika Westerberg  * tb_find_unused_port() - return the first inactive port on @sw
301e78db6f0SMika Westerberg  * @sw: Switch to find the port on
302e78db6f0SMika Westerberg  * @type: Port type to look for
3033364f0c1SAndreas Noever  */
304e78db6f0SMika Westerberg static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
305e78db6f0SMika Westerberg 					   enum tb_port_type type)
3063364f0c1SAndreas Noever {
3073364f0c1SAndreas Noever 	int i;
308e78db6f0SMika Westerberg 
3093364f0c1SAndreas Noever 	for (i = 1; i <= sw->config.max_port_number; i++) {
3103364f0c1SAndreas Noever 		if (tb_is_upstream_port(&sw->ports[i]))
3113364f0c1SAndreas Noever 			continue;
312e78db6f0SMika Westerberg 		if (sw->ports[i].config.type != type)
3133364f0c1SAndreas Noever 			continue;
314e78db6f0SMika Westerberg 		if (!sw->ports[i].cap_adap)
3153364f0c1SAndreas Noever 			continue;
316e78db6f0SMika Westerberg 		if (tb_port_is_enabled(&sw->ports[i]))
3173364f0c1SAndreas Noever 			continue;
3183364f0c1SAndreas Noever 		return &sw->ports[i];
3193364f0c1SAndreas Noever 	}
3203364f0c1SAndreas Noever 	return NULL;
3213364f0c1SAndreas Noever }
3223364f0c1SAndreas Noever 
32399cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
32499cabbb0SMika Westerberg 					 const struct tb_port *port)
3253364f0c1SAndreas Noever {
32699cabbb0SMika Westerberg 	/*
32799cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
32899cabbb0SMika Westerberg 	 * hierarchy, do mapping here for root switch downstream PCIe
32999cabbb0SMika Westerberg 	 * ports.
33099cabbb0SMika Westerberg 	 */
33199cabbb0SMika Westerberg 	if (!tb_route(sw)) {
33299cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
33399cabbb0SMika Westerberg 		int index;
33499cabbb0SMika Westerberg 
33599cabbb0SMika Westerberg 		/*
33699cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
33799cabbb0SMika Westerberg 		 * per controller.
33899cabbb0SMika Westerberg 		 */
33999cabbb0SMika Westerberg 		if (tb_switch_is_cr(sw))
34099cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
34199cabbb0SMika Westerberg 		else if (tb_switch_is_fr(sw))
34299cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
34399cabbb0SMika Westerberg 		else
34499cabbb0SMika Westerberg 			goto out;
34599cabbb0SMika Westerberg 
34699cabbb0SMika Westerberg 		/* Validate the hard-coding */
34799cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
34899cabbb0SMika Westerberg 			goto out;
34999cabbb0SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
35099cabbb0SMika Westerberg 			goto out;
35199cabbb0SMika Westerberg 		if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
35299cabbb0SMika Westerberg 			goto out;
35399cabbb0SMika Westerberg 
35499cabbb0SMika Westerberg 		return &sw->ports[index];
35599cabbb0SMika Westerberg 	}
35699cabbb0SMika Westerberg 
35799cabbb0SMika Westerberg out:
358e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
35999cabbb0SMika Westerberg }
36099cabbb0SMika Westerberg 
3614f807e47SMika Westerberg static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
3624f807e47SMika Westerberg {
3634f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
3644f807e47SMika Westerberg 	struct tb_switch *sw = out->sw;
3654f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
3664f807e47SMika Westerberg 	struct tb_port *in;
3674f807e47SMika Westerberg 
3684f807e47SMika Westerberg 	if (tb_port_is_enabled(out))
3694f807e47SMika Westerberg 		return 0;
3704f807e47SMika Westerberg 
3714f807e47SMika Westerberg 	do {
3724f807e47SMika Westerberg 		sw = tb_to_switch(sw->dev.parent);
3734f807e47SMika Westerberg 		if (!sw)
3744f807e47SMika Westerberg 			return 0;
3754f807e47SMika Westerberg 		in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
3764f807e47SMika Westerberg 	} while (!in);
3774f807e47SMika Westerberg 
3784f807e47SMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out);
3794f807e47SMika Westerberg 	if (!tunnel) {
3804f807e47SMika Westerberg 		tb_port_dbg(out, "DP tunnel allocation failed\n");
3814f807e47SMika Westerberg 		return -ENOMEM;
3824f807e47SMika Westerberg 	}
3834f807e47SMika Westerberg 
3844f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
3854f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
3864f807e47SMika Westerberg 		tb_tunnel_free(tunnel);
3874f807e47SMika Westerberg 		return -EIO;
3884f807e47SMika Westerberg 	}
3894f807e47SMika Westerberg 
3904f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
3914f807e47SMika Westerberg 	return 0;
3924f807e47SMika Westerberg }
3934f807e47SMika Westerberg 
3944f807e47SMika Westerberg static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
3954f807e47SMika Westerberg {
3964f807e47SMika Westerberg 	tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
3974f807e47SMika Westerberg }
3984f807e47SMika Westerberg 
39999cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
40099cabbb0SMika Westerberg {
40199cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
4029d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
40399cabbb0SMika Westerberg 	struct tb_switch *parent_sw;
40499cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
4059d3cce0bSMika Westerberg 
406e78db6f0SMika Westerberg 	up = tb_find_port(sw, TB_TYPE_PCIE_UP);
40799cabbb0SMika Westerberg 	if (!up)
40899cabbb0SMika Westerberg 		return 0;
4093364f0c1SAndreas Noever 
41099cabbb0SMika Westerberg 	/*
41199cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
41299cabbb0SMika Westerberg 	 * be found right above this switch.
41399cabbb0SMika Westerberg 	 */
41499cabbb0SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
41599cabbb0SMika Westerberg 	port = tb_port_at(tb_route(sw), parent_sw);
41699cabbb0SMika Westerberg 	down = tb_find_pcie_down(parent_sw, port);
41799cabbb0SMika Westerberg 	if (!down)
41899cabbb0SMika Westerberg 		return 0;
4193364f0c1SAndreas Noever 
42099cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
42199cabbb0SMika Westerberg 	if (!tunnel)
42299cabbb0SMika Westerberg 		return -ENOMEM;
4233364f0c1SAndreas Noever 
42493f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
42599cabbb0SMika Westerberg 		tb_port_info(up,
4263364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
42793f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
42899cabbb0SMika Westerberg 		return -EIO;
4293364f0c1SAndreas Noever 	}
4303364f0c1SAndreas Noever 
43199cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
43299cabbb0SMika Westerberg 	return 0;
4333364f0c1SAndreas Noever }
4349da672a4SAndreas Noever 
4357ea4cd6bSMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
4367ea4cd6bSMika Westerberg {
4377ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
4387ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
4397ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
4407ea4cd6bSMika Westerberg 	struct tb_switch *sw;
4417ea4cd6bSMika Westerberg 
4427ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
4437ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
4447ea4cd6bSMika Westerberg 	nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
4457ea4cd6bSMika Westerberg 
4467ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
4477ea4cd6bSMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
4487ea4cd6bSMika Westerberg 				     xd->transmit_path, xd->receive_ring,
4497ea4cd6bSMika Westerberg 				     xd->receive_path);
4507ea4cd6bSMika Westerberg 	if (!tunnel) {
4517ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
4527ea4cd6bSMika Westerberg 		return -ENOMEM;
4537ea4cd6bSMika Westerberg 	}
4547ea4cd6bSMika Westerberg 
4557ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
4567ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
4577ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
4587ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
4597ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
4607ea4cd6bSMika Westerberg 		return -EIO;
4617ea4cd6bSMika Westerberg 	}
4627ea4cd6bSMika Westerberg 
4637ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
4647ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
4657ea4cd6bSMika Westerberg 	return 0;
4667ea4cd6bSMika Westerberg }
4677ea4cd6bSMika Westerberg 
4687ea4cd6bSMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
4697ea4cd6bSMika Westerberg {
4707ea4cd6bSMika Westerberg 	struct tb_port *dst_port;
4717ea4cd6bSMika Westerberg 	struct tb_switch *sw;
4727ea4cd6bSMika Westerberg 
4737ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
4747ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
4757ea4cd6bSMika Westerberg 
4767ea4cd6bSMika Westerberg 	/*
4777ea4cd6bSMika Westerberg 	 * It is possible that the tunnel was already teared down (in
4787ea4cd6bSMika Westerberg 	 * case of cable disconnect) so it is fine if we cannot find it
4797ea4cd6bSMika Westerberg 	 * here anymore.
4807ea4cd6bSMika Westerberg 	 */
4817ea4cd6bSMika Westerberg 	tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
4827ea4cd6bSMika Westerberg }
4837ea4cd6bSMika Westerberg 
4847ea4cd6bSMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
4857ea4cd6bSMika Westerberg {
4867ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
4877ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
4887ea4cd6bSMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd);
4897ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
4907ea4cd6bSMika Westerberg 	}
4917ea4cd6bSMika Westerberg 	return 0;
4927ea4cd6bSMika Westerberg }
4937ea4cd6bSMika Westerberg 
494d6cc51cdSAndreas Noever /* hotplug handling */
495d6cc51cdSAndreas Noever 
496d6cc51cdSAndreas Noever /**
497d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
498d6cc51cdSAndreas Noever  *
499d6cc51cdSAndreas Noever  * Executes on tb->wq.
500d6cc51cdSAndreas Noever  */
501d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
502d6cc51cdSAndreas Noever {
503d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
504d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
5059d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
506053596d9SAndreas Noever 	struct tb_switch *sw;
507053596d9SAndreas Noever 	struct tb_port *port;
508d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
5099d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
510d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
511d6cc51cdSAndreas Noever 
5128f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
513053596d9SAndreas Noever 	if (!sw) {
514053596d9SAndreas Noever 		tb_warn(tb,
515053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
516053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
517053596d9SAndreas Noever 		goto out;
518053596d9SAndreas Noever 	}
519053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
520053596d9SAndreas Noever 		tb_warn(tb,
521053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
522053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
5238f965efdSMika Westerberg 		goto put_sw;
524053596d9SAndreas Noever 	}
525053596d9SAndreas Noever 	port = &sw->ports[ev->port];
526053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
527dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
528053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
5298f965efdSMika Westerberg 		goto put_sw;
530053596d9SAndreas Noever 	}
531053596d9SAndreas Noever 	if (ev->unplug) {
532dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
5337ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
534aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
5353364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
536bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
537053596d9SAndreas Noever 			port->remote = NULL;
538dfe40ca4SMika Westerberg 			if (port->dual_link_port)
539dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
5407ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
5417ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
5427ea4cd6bSMika Westerberg 
5437ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
5447ea4cd6bSMika Westerberg 			/*
5457ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
5467ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
5477ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
5487ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
5497ea4cd6bSMika Westerberg 			 * the path below.
5507ea4cd6bSMika Westerberg 			 */
5517ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
5527ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
5537ea4cd6bSMika Westerberg 			port->xdomain = NULL;
5547ea4cd6bSMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd);
5557ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
5564f807e47SMika Westerberg 		} else if (tb_port_is_dpout(port)) {
5574f807e47SMika Westerberg 			tb_teardown_dp(tb, port);
558053596d9SAndreas Noever 		} else {
55962efe699SMika Westerberg 			tb_port_dbg(port,
560053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
561053596d9SAndreas Noever 		}
562053596d9SAndreas Noever 	} else if (port->remote) {
56362efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
564053596d9SAndreas Noever 	} else {
565344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
56662efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
567053596d9SAndreas Noever 			tb_scan_port(port);
56899cabbb0SMika Westerberg 			if (!port->remote)
56962efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
5704f807e47SMika Westerberg 		} else if (tb_port_is_dpout(port)) {
5714f807e47SMika Westerberg 			tb_tunnel_dp(tb, port);
572053596d9SAndreas Noever 		}
573344e0643SMika Westerberg 	}
5748f965efdSMika Westerberg 
5758f965efdSMika Westerberg put_sw:
5768f965efdSMika Westerberg 	tb_switch_put(sw);
577d6cc51cdSAndreas Noever out:
578d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
579d6cc51cdSAndreas Noever 	kfree(ev);
580d6cc51cdSAndreas Noever }
581d6cc51cdSAndreas Noever 
582d6cc51cdSAndreas Noever /**
583d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
584d6cc51cdSAndreas Noever  *
585d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
586d6cc51cdSAndreas Noever  */
58781a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
58881a54b5eSMika Westerberg 			    const void *buf, size_t size)
589d6cc51cdSAndreas Noever {
59081a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
59181a54b5eSMika Westerberg 	u64 route;
59281a54b5eSMika Westerberg 
59381a54b5eSMika Westerberg 	if (type != TB_CFG_PKG_EVENT) {
59481a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
59581a54b5eSMika Westerberg 		return;
59681a54b5eSMika Westerberg 	}
59781a54b5eSMika Westerberg 
59881a54b5eSMika Westerberg 	route = tb_cfg_get_route(&pkg->header);
59981a54b5eSMika Westerberg 
60081a54b5eSMika Westerberg 	if (tb_cfg_error(tb->ctl, route, pkg->port,
60181a54b5eSMika Westerberg 			 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
60281a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
60381a54b5eSMika Westerberg 			pkg->port);
60481a54b5eSMika Westerberg 	}
60581a54b5eSMika Westerberg 
6064f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
607d6cc51cdSAndreas Noever }
608d6cc51cdSAndreas Noever 
6099d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
610d6cc51cdSAndreas Noever {
6119d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
61293f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
61393f36adeSMika Westerberg 	struct tb_tunnel *n;
6143364f0c1SAndreas Noever 
6153364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
6167ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
6177ea4cd6bSMika Westerberg 		/*
6187ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
6197ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
6207ea4cd6bSMika Westerberg 		 * intact.
6217ea4cd6bSMika Westerberg 		 */
6227ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
6237ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
62493f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
6257ea4cd6bSMika Westerberg 	}
626bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
6279d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
628d6cc51cdSAndreas Noever }
629d6cc51cdSAndreas Noever 
63099cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
63199cabbb0SMika Westerberg {
63299cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
63399cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
63499cabbb0SMika Westerberg 
63599cabbb0SMika Westerberg 		/*
63699cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
63799cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
63899cabbb0SMika Westerberg 		 * send uevent to userspace.
63999cabbb0SMika Westerberg 		 */
64099cabbb0SMika Westerberg 		if (sw->boot)
64199cabbb0SMika Westerberg 			sw->authorized = 1;
64299cabbb0SMika Westerberg 
64399cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
64499cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
64599cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
64699cabbb0SMika Westerberg 	}
64799cabbb0SMika Westerberg 
64899cabbb0SMika Westerberg 	return 0;
64999cabbb0SMika Westerberg }
65099cabbb0SMika Westerberg 
6519d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
652d6cc51cdSAndreas Noever {
6539d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
654bfe778acSMika Westerberg 	int ret;
655d6cc51cdSAndreas Noever 
656bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
657444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
658444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
659a25c8b2fSAndreas Noever 
660e6b245ccSMika Westerberg 	/*
661e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
662e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
663e6b245ccSMika Westerberg 	 * root switch.
664e6b245ccSMika Westerberg 	 */
665e6b245ccSMika Westerberg 	tb->root_switch->no_nvm_upgrade = true;
666e6b245ccSMika Westerberg 
667bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
668bfe778acSMika Westerberg 	if (ret) {
669bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
670bfe778acSMika Westerberg 		return ret;
671bfe778acSMika Westerberg 	}
672bfe778acSMika Westerberg 
673bfe778acSMika Westerberg 	/* Announce the switch to the world */
674bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
675bfe778acSMika Westerberg 	if (ret) {
676bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
677bfe778acSMika Westerberg 		return ret;
678bfe778acSMika Westerberg 	}
679bfe778acSMika Westerberg 
6809da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
6819da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
6820414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
6830414bec5SMika Westerberg 	tb_discover_tunnels(tb->root_switch);
68499cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
68599cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
68699cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
6879da672a4SAndreas Noever 
688d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
6899d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
6909d3cce0bSMika Westerberg 	return 0;
691d6cc51cdSAndreas Noever }
692d6cc51cdSAndreas Noever 
6939d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
69423dd5bb4SAndreas Noever {
6959d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
6969d3cce0bSMika Westerberg 
697daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
69823dd5bb4SAndreas Noever 	tb_switch_suspend(tb->root_switch);
6999d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
700daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
7019d3cce0bSMika Westerberg 
7029d3cce0bSMika Westerberg 	return 0;
70323dd5bb4SAndreas Noever }
70423dd5bb4SAndreas Noever 
7059d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
70623dd5bb4SAndreas Noever {
7079d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
70893f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
7099d3cce0bSMika Westerberg 
710daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
71123dd5bb4SAndreas Noever 
71223dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
71323dd5bb4SAndreas Noever 	tb_switch_reset(tb, 0);
71423dd5bb4SAndreas Noever 
71523dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
71623dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
71723dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
7189d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
71993f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
7209d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
72123dd5bb4SAndreas Noever 		/*
72223dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
72323dd5bb4SAndreas Noever 		 * 100ms works for me...
72423dd5bb4SAndreas Noever 		 */
725daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
72623dd5bb4SAndreas Noever 		msleep(100);
72723dd5bb4SAndreas Noever 	}
72823dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
7299d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
730daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
7319d3cce0bSMika Westerberg 
7329d3cce0bSMika Westerberg 	return 0;
7339d3cce0bSMika Westerberg }
7349d3cce0bSMika Westerberg 
7357ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
7367ea4cd6bSMika Westerberg {
7377ea4cd6bSMika Westerberg 	int i, ret = 0;
7387ea4cd6bSMika Westerberg 
7397ea4cd6bSMika Westerberg 	for (i = 1; i <= sw->config.max_port_number; i++) {
7407ea4cd6bSMika Westerberg 		struct tb_port *port = &sw->ports[i];
7417ea4cd6bSMika Westerberg 
7427ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
7437ea4cd6bSMika Westerberg 			continue;
7447ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
7457ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
7467ea4cd6bSMika Westerberg 			port->xdomain = NULL;
7477ea4cd6bSMika Westerberg 			ret++;
7487ea4cd6bSMika Westerberg 		} else if (port->remote) {
7497ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
7507ea4cd6bSMika Westerberg 		}
7517ea4cd6bSMika Westerberg 	}
7527ea4cd6bSMika Westerberg 
7537ea4cd6bSMika Westerberg 	return ret;
7547ea4cd6bSMika Westerberg }
7557ea4cd6bSMika Westerberg 
7567ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
7577ea4cd6bSMika Westerberg {
7587ea4cd6bSMika Westerberg 	/*
7597ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
7607ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
7617ea4cd6bSMika Westerberg 	 * need to run another rescan.
7627ea4cd6bSMika Westerberg 	 */
7637ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
7647ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
7657ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
7667ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
7677ea4cd6bSMika Westerberg }
7687ea4cd6bSMika Westerberg 
7699d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
7709d3cce0bSMika Westerberg 	.start = tb_start,
7719d3cce0bSMika Westerberg 	.stop = tb_stop,
7729d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
7739d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
7747ea4cd6bSMika Westerberg 	.complete = tb_complete,
77581a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
77699cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
7777ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
7787ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
7799d3cce0bSMika Westerberg };
7809d3cce0bSMika Westerberg 
7819d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
7829d3cce0bSMika Westerberg {
7839d3cce0bSMika Westerberg 	struct tb_cm *tcm;
7849d3cce0bSMika Westerberg 	struct tb *tb;
7859d3cce0bSMika Westerberg 
786630b3affSLukas Wunner 	if (!x86_apple_machine)
787f67cf491SMika Westerberg 		return NULL;
788f67cf491SMika Westerberg 
7899d3cce0bSMika Westerberg 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
7909d3cce0bSMika Westerberg 	if (!tb)
7919d3cce0bSMika Westerberg 		return NULL;
7929d3cce0bSMika Westerberg 
79399cabbb0SMika Westerberg 	tb->security_level = TB_SECURITY_USER;
7949d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
7959d3cce0bSMika Westerberg 
7969d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
7979d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
7989d3cce0bSMika Westerberg 
7999d3cce0bSMika Westerberg 	return tb;
80023dd5bb4SAndreas Noever }
801