xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 9d2d0a5c)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13349bfe08SMika Westerberg #include <linux/platform_data/x86/apple.h>
14d6cc51cdSAndreas Noever 
15d6cc51cdSAndreas Noever #include "tb.h"
167adf6097SAndreas Noever #include "tb_regs.h"
171752b9f7SMika Westerberg #include "tunnel.h"
18d6cc51cdSAndreas Noever 
197f0a34d7SMika Westerberg #define TB_TIMEOUT	100 /* ms */
207f0a34d7SMika Westerberg 
219d3cce0bSMika Westerberg /**
229d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
239d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
248afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
259d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
269d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
279d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
289d3cce0bSMika Westerberg  *		    after cfg has been paused.
296ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
306ac6faeeSMika Westerberg  *		 runtime resume
319d3cce0bSMika Westerberg  */
329d3cce0bSMika Westerberg struct tb_cm {
339d3cce0bSMika Westerberg 	struct list_head tunnel_list;
348afe909bSMika Westerberg 	struct list_head dp_resources;
359d3cce0bSMika Westerberg 	bool hotplug_active;
366ac6faeeSMika Westerberg 	struct delayed_work remove_work;
379d3cce0bSMika Westerberg };
389da672a4SAndreas Noever 
396ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
406ac6faeeSMika Westerberg {
416ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
426ac6faeeSMika Westerberg }
436ac6faeeSMika Westerberg 
444f807e47SMika Westerberg struct tb_hotplug_event {
454f807e47SMika Westerberg 	struct work_struct work;
464f807e47SMika Westerberg 	struct tb *tb;
474f807e47SMika Westerberg 	u64 route;
484f807e47SMika Westerberg 	u8 port;
494f807e47SMika Westerberg 	bool unplug;
504f807e47SMika Westerberg };
514f807e47SMika Westerberg 
524f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
534f807e47SMika Westerberg 
544f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
554f807e47SMika Westerberg {
564f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
574f807e47SMika Westerberg 
584f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
594f807e47SMika Westerberg 	if (!ev)
604f807e47SMika Westerberg 		return;
614f807e47SMika Westerberg 
624f807e47SMika Westerberg 	ev->tb = tb;
634f807e47SMika Westerberg 	ev->route = route;
644f807e47SMika Westerberg 	ev->port = port;
654f807e47SMika Westerberg 	ev->unplug = unplug;
664f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
674f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
684f807e47SMika Westerberg }
694f807e47SMika Westerberg 
709da672a4SAndreas Noever /* enumeration & hot plug handling */
719da672a4SAndreas Noever 
728afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
738afe909bSMika Westerberg {
748afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
758afe909bSMika Westerberg 	struct tb_port *port;
768afe909bSMika Westerberg 
778afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
788afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
798afe909bSMika Westerberg 			continue;
808afe909bSMika Westerberg 
818afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
828afe909bSMika Westerberg 			continue;
838afe909bSMika Westerberg 
848afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
858afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
868afe909bSMika Westerberg 	}
878afe909bSMika Westerberg }
888afe909bSMika Westerberg 
898afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
908afe909bSMika Westerberg {
918afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
928afe909bSMika Westerberg 	struct tb_port *port, *tmp;
938afe909bSMika Westerberg 
948afe909bSMika Westerberg 	/* Clear children resources first */
958afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
968afe909bSMika Westerberg 		if (tb_port_has_remote(port))
978afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
988afe909bSMika Westerberg 	}
998afe909bSMika Westerberg 
1008afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
1018afe909bSMika Westerberg 		if (port->sw == sw) {
1028afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
1038afe909bSMika Westerberg 			list_del_init(&port->list);
1048afe909bSMika Westerberg 		}
1058afe909bSMika Westerberg 	}
1068afe909bSMika Westerberg }
1078afe909bSMika Westerberg 
10843bddb26SMika Westerberg static void tb_switch_discover_tunnels(struct tb_switch *sw,
10943bddb26SMika Westerberg 				       struct list_head *list,
11043bddb26SMika Westerberg 				       bool alloc_hopids)
1110414bec5SMika Westerberg {
1120414bec5SMika Westerberg 	struct tb *tb = sw->tb;
1130414bec5SMika Westerberg 	struct tb_port *port;
1140414bec5SMika Westerberg 
115b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1160414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
1170414bec5SMika Westerberg 
1180414bec5SMika Westerberg 		switch (port->config.type) {
1194f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
12043bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
1214f807e47SMika Westerberg 			break;
1224f807e47SMika Westerberg 
1230414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
12443bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
1250414bec5SMika Westerberg 			break;
1260414bec5SMika Westerberg 
127e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
12843bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
129e6f81858SRajmohan Mani 			break;
130e6f81858SRajmohan Mani 
1310414bec5SMika Westerberg 		default:
1320414bec5SMika Westerberg 			break;
1330414bec5SMika Westerberg 		}
1340414bec5SMika Westerberg 
13543bddb26SMika Westerberg 		if (tunnel)
13643bddb26SMika Westerberg 			list_add_tail(&tunnel->list, list);
13743bddb26SMika Westerberg 	}
1384f807e47SMika Westerberg 
13943bddb26SMika Westerberg 	tb_switch_for_each_port(sw, port) {
14043bddb26SMika Westerberg 		if (tb_port_has_remote(port)) {
14143bddb26SMika Westerberg 			tb_switch_discover_tunnels(port->remote->sw, list,
14243bddb26SMika Westerberg 						   alloc_hopids);
14343bddb26SMika Westerberg 		}
14443bddb26SMika Westerberg 	}
14543bddb26SMika Westerberg }
14643bddb26SMika Westerberg 
14743bddb26SMika Westerberg static void tb_discover_tunnels(struct tb *tb)
14843bddb26SMika Westerberg {
14943bddb26SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15043bddb26SMika Westerberg 	struct tb_tunnel *tunnel;
15143bddb26SMika Westerberg 
15243bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
15343bddb26SMika Westerberg 
15443bddb26SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1554f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
1560414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
1570414bec5SMika Westerberg 
1580414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
1590414bec5SMika Westerberg 				parent->boot = true;
1600414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
1610414bec5SMika Westerberg 			}
162c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
163c94732bdSMika Westerberg 			/* Keep the domain from powering down */
164c94732bdSMika Westerberg 			pm_runtime_get_sync(&tunnel->src_port->sw->dev);
165c94732bdSMika Westerberg 			pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
1664f807e47SMika Westerberg 		}
1670414bec5SMika Westerberg 	}
1680414bec5SMika Westerberg }
1699da672a4SAndreas Noever 
170284652a4SMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port)
171284652a4SMika Westerberg {
172341d4518SMika Westerberg 	/*
173341d4518SMika Westerberg 	 * XDomain paths currently only support single lane so we must
174341d4518SMika Westerberg 	 * disable the other lane according to USB4 spec.
175341d4518SMika Westerberg 	 */
176341d4518SMika Westerberg 	tb_port_disable(port->dual_link_port);
177341d4518SMika Westerberg 
178284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
179284652a4SMika Westerberg 		return usb4_port_configure_xdomain(port);
180284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
181284652a4SMika Westerberg }
182284652a4SMika Westerberg 
183284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
184284652a4SMika Westerberg {
185284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
186284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
187284652a4SMika Westerberg 	else
188284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
189341d4518SMika Westerberg 
190341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
191284652a4SMika Westerberg }
192284652a4SMika Westerberg 
1937ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
1947ea4cd6bSMika Westerberg {
1957ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
1967ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
1977ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
1987ea4cd6bSMika Westerberg 	u64 route;
1997ea4cd6bSMika Westerberg 
2005ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
2015ca67688SMika Westerberg 		return;
2025ca67688SMika Westerberg 
2037ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
2047ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
2057ea4cd6bSMika Westerberg 	if (xd) {
2067ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
2077ea4cd6bSMika Westerberg 		return;
2087ea4cd6bSMika Westerberg 	}
2097ea4cd6bSMika Westerberg 
2107ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
2117ea4cd6bSMika Westerberg 			      NULL);
2127ea4cd6bSMika Westerberg 	if (xd) {
2137ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
214284652a4SMika Westerberg 		tb_port_configure_xdomain(port);
2157ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
2167ea4cd6bSMika Westerberg 	}
2177ea4cd6bSMika Westerberg }
2187ea4cd6bSMika Westerberg 
219cf29b9afSRajmohan Mani static int tb_enable_tmu(struct tb_switch *sw)
220cf29b9afSRajmohan Mani {
221cf29b9afSRajmohan Mani 	int ret;
222cf29b9afSRajmohan Mani 
223cf29b9afSRajmohan Mani 	/* If it is already enabled in correct mode, don't touch it */
224a28ec0e1SGil Fine 	if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
225cf29b9afSRajmohan Mani 		return 0;
226cf29b9afSRajmohan Mani 
227cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_disable(sw);
228cf29b9afSRajmohan Mani 	if (ret)
229cf29b9afSRajmohan Mani 		return ret;
230cf29b9afSRajmohan Mani 
231cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_post_time(sw);
232cf29b9afSRajmohan Mani 	if (ret)
233cf29b9afSRajmohan Mani 		return ret;
234cf29b9afSRajmohan Mani 
235cf29b9afSRajmohan Mani 	return tb_switch_tmu_enable(sw);
236cf29b9afSRajmohan Mani }
237cf29b9afSRajmohan Mani 
238e6f81858SRajmohan Mani /**
239e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
240e6f81858SRajmohan Mani  * @sw: Switch to find the port on
241e6f81858SRajmohan Mani  * @type: Port type to look for
242e6f81858SRajmohan Mani  */
243e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
244e6f81858SRajmohan Mani 					   enum tb_port_type type)
245e6f81858SRajmohan Mani {
246e6f81858SRajmohan Mani 	struct tb_port *port;
247e6f81858SRajmohan Mani 
248e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
249e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
250e6f81858SRajmohan Mani 			continue;
251e6f81858SRajmohan Mani 		if (port->config.type != type)
252e6f81858SRajmohan Mani 			continue;
253e6f81858SRajmohan Mani 		if (!port->cap_adap)
254e6f81858SRajmohan Mani 			continue;
255e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
256e6f81858SRajmohan Mani 			continue;
257e6f81858SRajmohan Mani 		return port;
258e6f81858SRajmohan Mani 	}
259e6f81858SRajmohan Mani 	return NULL;
260e6f81858SRajmohan Mani }
261e6f81858SRajmohan Mani 
262e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
263e6f81858SRajmohan Mani 					 const struct tb_port *port)
264e6f81858SRajmohan Mani {
265e6f81858SRajmohan Mani 	struct tb_port *down;
266e6f81858SRajmohan Mani 
267e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
26877cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
269e6f81858SRajmohan Mani 		return down;
27077cfa40fSMika Westerberg 	return NULL;
271e6f81858SRajmohan Mani }
272e6f81858SRajmohan Mani 
2730bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
2740bd680cdSMika Westerberg 					struct tb_port *src_port,
2750bd680cdSMika Westerberg 					struct tb_port *dst_port)
2760bd680cdSMika Westerberg {
2770bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2780bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
2790bd680cdSMika Westerberg 
2800bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
2810bd680cdSMika Westerberg 		if (tunnel->type == type &&
2820bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
2830bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
2840bd680cdSMika Westerberg 			return tunnel;
2850bd680cdSMika Westerberg 		}
2860bd680cdSMika Westerberg 	}
2870bd680cdSMika Westerberg 
2880bd680cdSMika Westerberg 	return NULL;
2890bd680cdSMika Westerberg }
2900bd680cdSMika Westerberg 
2910bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
2920bd680cdSMika Westerberg 						   struct tb_port *src_port,
2930bd680cdSMika Westerberg 						   struct tb_port *dst_port)
2940bd680cdSMika Westerberg {
2950bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
2960bd680cdSMika Westerberg 	struct tb_switch *sw;
2970bd680cdSMika Westerberg 
2980bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
2990bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
3000bd680cdSMika Westerberg 		sw = dst_port->sw;
3010bd680cdSMika Westerberg 	else
3020bd680cdSMika Westerberg 		sw = src_port->sw;
3030bd680cdSMika Westerberg 
3040bd680cdSMika Westerberg 	/* Can't be the host router */
3050bd680cdSMika Westerberg 	if (sw == tb->root_switch)
3060bd680cdSMika Westerberg 		return NULL;
3070bd680cdSMika Westerberg 
3080bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
3090bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
3100bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
3110bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
3120bd680cdSMika Westerberg 	if (!usb3_down)
3130bd680cdSMika Westerberg 		return NULL;
3140bd680cdSMika Westerberg 
3150bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
3160bd680cdSMika Westerberg }
3170bd680cdSMika Westerberg 
3180bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
3190bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
3200bd680cdSMika Westerberg {
3210bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
3220bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
3230bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
3240bd680cdSMika Westerberg 	struct tb_port *port;
3250bd680cdSMika Westerberg 
3260bd680cdSMika Westerberg 	tb_port_dbg(dst_port, "calculating available bandwidth\n");
3270bd680cdSMika Westerberg 
3280bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
3290bd680cdSMika Westerberg 	if (tunnel) {
3300bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
3310bd680cdSMika Westerberg 						   &usb3_consumed_down);
3320bd680cdSMika Westerberg 		if (ret)
3330bd680cdSMika Westerberg 			return ret;
3340bd680cdSMika Westerberg 	} else {
3350bd680cdSMika Westerberg 		usb3_consumed_up = 0;
3360bd680cdSMika Westerberg 		usb3_consumed_down = 0;
3370bd680cdSMika Westerberg 	}
3380bd680cdSMika Westerberg 
3390bd680cdSMika Westerberg 	*available_up = *available_down = 40000;
3400bd680cdSMika Westerberg 
3410bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
3420bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
3430bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
3440bd680cdSMika Westerberg 
3450bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
3460bd680cdSMika Westerberg 			continue;
3470bd680cdSMika Westerberg 
3480bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
3490bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
3500bd680cdSMika Westerberg 		} else {
3510bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
3520bd680cdSMika Westerberg 			if (link_speed < 0)
3530bd680cdSMika Westerberg 				return link_speed;
3540bd680cdSMika Westerberg 		}
3550bd680cdSMika Westerberg 
3560bd680cdSMika Westerberg 		link_width = port->bonded ? 2 : 1;
3570bd680cdSMika Westerberg 
3580bd680cdSMika Westerberg 		up_bw = link_speed * link_width * 1000; /* Mb/s */
3590bd680cdSMika Westerberg 		/* Leave 10% guard band */
3600bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
3610bd680cdSMika Westerberg 		down_bw = up_bw;
3620bd680cdSMika Westerberg 
3630bd680cdSMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
3640bd680cdSMika Westerberg 
3650bd680cdSMika Westerberg 		/*
3660bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
3670bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
3680bd680cdSMika Westerberg 		 */
3690bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
3700bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
3710bd680cdSMika Westerberg 
3720bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
3730bd680cdSMika Westerberg 				continue;
3740bd680cdSMika Westerberg 
3750bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
3760bd680cdSMika Westerberg 				continue;
3770bd680cdSMika Westerberg 
3780bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
3790bd680cdSMika Westerberg 							   &dp_consumed_up,
3800bd680cdSMika Westerberg 							   &dp_consumed_down);
3810bd680cdSMika Westerberg 			if (ret)
3820bd680cdSMika Westerberg 				return ret;
3830bd680cdSMika Westerberg 
3840bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
3850bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
3860bd680cdSMika Westerberg 		}
3870bd680cdSMika Westerberg 
3880bd680cdSMika Westerberg 		/*
3890bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
3900bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
3910bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
3920bd680cdSMika Westerberg 		 * crosses the port.
3930bd680cdSMika Westerberg 		 */
3940bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
3950bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
3960bd680cdSMika Westerberg 
3970bd680cdSMika Westerberg 		if (up_bw < *available_up)
3980bd680cdSMika Westerberg 			*available_up = up_bw;
3990bd680cdSMika Westerberg 		if (down_bw < *available_down)
4000bd680cdSMika Westerberg 			*available_down = down_bw;
4010bd680cdSMika Westerberg 	}
4020bd680cdSMika Westerberg 
4030bd680cdSMika Westerberg 	if (*available_up < 0)
4040bd680cdSMika Westerberg 		*available_up = 0;
4050bd680cdSMika Westerberg 	if (*available_down < 0)
4060bd680cdSMika Westerberg 		*available_down = 0;
4070bd680cdSMika Westerberg 
4080bd680cdSMika Westerberg 	return 0;
4090bd680cdSMika Westerberg }
4100bd680cdSMika Westerberg 
4110bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
4120bd680cdSMika Westerberg 					    struct tb_port *src_port,
4130bd680cdSMika Westerberg 					    struct tb_port *dst_port)
4140bd680cdSMika Westerberg {
4150bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4160bd680cdSMika Westerberg 
4170bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
4180bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
4190bd680cdSMika Westerberg }
4200bd680cdSMika Westerberg 
4210bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
4220bd680cdSMika Westerberg 				      struct tb_port *dst_port)
4230bd680cdSMika Westerberg {
4240bd680cdSMika Westerberg 	int ret, available_up, available_down;
4250bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4260bd680cdSMika Westerberg 
4270bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
4280bd680cdSMika Westerberg 	if (!tunnel)
4290bd680cdSMika Westerberg 		return;
4300bd680cdSMika Westerberg 
4310bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
4320bd680cdSMika Westerberg 
4330bd680cdSMika Westerberg 	/*
4340bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
4350bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
4360bd680cdSMika Westerberg 	 */
4370bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
4380bd680cdSMika Westerberg 				     &available_up, &available_down);
4390bd680cdSMika Westerberg 	if (ret) {
4400bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
4410bd680cdSMika Westerberg 		return;
4420bd680cdSMika Westerberg 	}
4430bd680cdSMika Westerberg 
4440bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
4450bd680cdSMika Westerberg 	       available_up, available_down);
4460bd680cdSMika Westerberg 
4470bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
4480bd680cdSMika Westerberg }
4490bd680cdSMika Westerberg 
450e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
451e6f81858SRajmohan Mani {
452e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
4530bd680cdSMika Westerberg 	int ret, available_up, available_down;
454e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
455e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
456e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
457e6f81858SRajmohan Mani 
458c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
459c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
460c6da62a2SMika Westerberg 		return 0;
461c6da62a2SMika Westerberg 	}
462c6da62a2SMika Westerberg 
463e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
464e6f81858SRajmohan Mani 	if (!up)
465e6f81858SRajmohan Mani 		return 0;
466e6f81858SRajmohan Mani 
467bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
468bbcf40b3SMika Westerberg 		return 0;
469bbcf40b3SMika Westerberg 
470e6f81858SRajmohan Mani 	/*
471e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
472e6f81858SRajmohan Mani 	 * be found right above this switch.
473e6f81858SRajmohan Mani 	 */
474e6f81858SRajmohan Mani 	port = tb_port_at(tb_route(sw), parent);
475e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
476e6f81858SRajmohan Mani 	if (!down)
477e6f81858SRajmohan Mani 		return 0;
478e6f81858SRajmohan Mani 
479e6f81858SRajmohan Mani 	if (tb_route(parent)) {
480e6f81858SRajmohan Mani 		struct tb_port *parent_up;
481e6f81858SRajmohan Mani 		/*
482e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
483e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
484e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
485e6f81858SRajmohan Mani 		 */
486e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
487e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
488e6f81858SRajmohan Mani 			return 0;
4890bd680cdSMika Westerberg 
4900bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
4910bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
4920bd680cdSMika Westerberg 		if (ret)
4930bd680cdSMika Westerberg 			return ret;
494e6f81858SRajmohan Mani 	}
495e6f81858SRajmohan Mani 
4960bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
4970bd680cdSMika Westerberg 				     &available_down);
4980bd680cdSMika Westerberg 	if (ret)
4990bd680cdSMika Westerberg 		goto err_reclaim;
5000bd680cdSMika Westerberg 
5010bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
5020bd680cdSMika Westerberg 		    available_up, available_down);
5030bd680cdSMika Westerberg 
5040bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
5050bd680cdSMika Westerberg 				      available_down);
5060bd680cdSMika Westerberg 	if (!tunnel) {
5070bd680cdSMika Westerberg 		ret = -ENOMEM;
5080bd680cdSMika Westerberg 		goto err_reclaim;
5090bd680cdSMika Westerberg 	}
510e6f81858SRajmohan Mani 
511e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
512e6f81858SRajmohan Mani 		tb_port_info(up,
513e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
5140bd680cdSMika Westerberg 		ret = -EIO;
5150bd680cdSMika Westerberg 		goto err_free;
516e6f81858SRajmohan Mani 	}
517e6f81858SRajmohan Mani 
518e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
5190bd680cdSMika Westerberg 	if (tb_route(parent))
5200bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
5210bd680cdSMika Westerberg 
522e6f81858SRajmohan Mani 	return 0;
5230bd680cdSMika Westerberg 
5240bd680cdSMika Westerberg err_free:
5250bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
5260bd680cdSMika Westerberg err_reclaim:
5270bd680cdSMika Westerberg 	if (tb_route(parent))
5280bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
5290bd680cdSMika Westerberg 
5300bd680cdSMika Westerberg 	return ret;
531e6f81858SRajmohan Mani }
532e6f81858SRajmohan Mani 
533e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
534e6f81858SRajmohan Mani {
535e6f81858SRajmohan Mani 	struct tb_port *port;
536e6f81858SRajmohan Mani 	int ret;
537e6f81858SRajmohan Mani 
538c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
539c6da62a2SMika Westerberg 		return 0;
540c6da62a2SMika Westerberg 
541e6f81858SRajmohan Mani 	if (tb_route(sw)) {
542e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
543e6f81858SRajmohan Mani 		if (ret)
544e6f81858SRajmohan Mani 			return ret;
545e6f81858SRajmohan Mani 	}
546e6f81858SRajmohan Mani 
547e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
548e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
549e6f81858SRajmohan Mani 			continue;
550e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
551e6f81858SRajmohan Mani 		if (ret)
552e6f81858SRajmohan Mani 			return ret;
553e6f81858SRajmohan Mani 	}
554e6f81858SRajmohan Mani 
555e6f81858SRajmohan Mani 	return 0;
556e6f81858SRajmohan Mani }
557e6f81858SRajmohan Mani 
5589da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
5599da672a4SAndreas Noever 
560877e50b3SLee Jones /*
5619da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
5629da672a4SAndreas Noever  */
5639da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
5649da672a4SAndreas Noever {
565b433d010SMika Westerberg 	struct tb_port *port;
566b433d010SMika Westerberg 
5676ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
5686ac6faeeSMika Westerberg 
569b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
570b433d010SMika Westerberg 		tb_scan_port(port);
5716ac6faeeSMika Westerberg 
5726ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
5736ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
5749da672a4SAndreas Noever }
5759da672a4SAndreas Noever 
576877e50b3SLee Jones /*
5779da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
5789da672a4SAndreas Noever  */
5799da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
5809da672a4SAndreas Noever {
58199cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
582dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
5839da672a4SAndreas Noever 	struct tb_switch *sw;
584dfe40ca4SMika Westerberg 
5859da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
5869da672a4SAndreas Noever 		return;
5874f807e47SMika Westerberg 
5884f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
5894f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
5904f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
5914f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
5924f807e47SMika Westerberg 				 false);
5934f807e47SMika Westerberg 		return;
5944f807e47SMika Westerberg 	}
5954f807e47SMika Westerberg 
5969da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
5979da672a4SAndreas Noever 		return;
598343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
599343fcb8cSAndreas Noever 		return; /*
600343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
601343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
602343fcb8cSAndreas Noever 			 */
6039da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
6049da672a4SAndreas Noever 		return;
6059da672a4SAndreas Noever 	if (port->remote) {
6067ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
6079da672a4SAndreas Noever 		return;
6089da672a4SAndreas Noever 	}
609dacb1287SKranthi Kuntala 
6103fb10ea4SRajmohan Mani 	tb_retimer_scan(port, true);
611dacb1287SKranthi Kuntala 
612bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
613bfe778acSMika Westerberg 			     tb_downstream_route(port));
6147ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
6157ea4cd6bSMika Westerberg 		/*
6167ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
6177ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
6187ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
6197ea4cd6bSMika Westerberg 		 */
6207ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
6217ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
6229da672a4SAndreas Noever 		return;
6237ea4cd6bSMika Westerberg 	}
624bfe778acSMika Westerberg 
625bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
626bfe778acSMika Westerberg 		tb_switch_put(sw);
627bfe778acSMika Westerberg 		return;
628bfe778acSMika Westerberg 	}
629bfe778acSMika Westerberg 
63099cabbb0SMika Westerberg 	/*
6317ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
6327ea4cd6bSMika Westerberg 	 * first.
6337ea4cd6bSMika Westerberg 	 */
6347ea4cd6bSMika Westerberg 	if (port->xdomain) {
6357ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
636284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
6377ea4cd6bSMika Westerberg 		port->xdomain = NULL;
6387ea4cd6bSMika Westerberg 	}
6397ea4cd6bSMika Westerberg 
6407ea4cd6bSMika Westerberg 	/*
64199cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
64299cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
64399cabbb0SMika Westerberg 	 * the boot firmware.
64499cabbb0SMika Westerberg 	 */
64599cabbb0SMika Westerberg 	if (!tcm->hotplug_active)
64699cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
647f67cf491SMika Westerberg 
6486ac6faeeSMika Westerberg 	/*
6496ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
6506ac6faeeSMika Westerberg 	 * can support runtime PM.
6516ac6faeeSMika Westerberg 	 */
6526ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
6536ac6faeeSMika Westerberg 
654bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
655bfe778acSMika Westerberg 		tb_switch_put(sw);
656bfe778acSMika Westerberg 		return;
657bfe778acSMika Westerberg 	}
658bfe778acSMika Westerberg 
659dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
660dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
661dfe40ca4SMika Westerberg 	port->remote = upstream_port;
662dfe40ca4SMika Westerberg 	upstream_port->remote = port;
663dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
664dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
665dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
666dfe40ca4SMika Westerberg 	}
667dfe40ca4SMika Westerberg 
66891c0c120SMika Westerberg 	/* Enable lane bonding if supported */
6692ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
670de462039SMika Westerberg 	/* Set the link configured */
671de462039SMika Westerberg 	tb_switch_configure_link(sw);
6728a90e4faSGil Fine 	if (tb_switch_enable_clx(sw, TB_CL0S))
6738a90e4faSGil Fine 		tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
6748a90e4faSGil Fine 
6758a90e4faSGil Fine 	tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
6768a90e4faSGil Fine 				tb_switch_is_clx_enabled(sw));
67791c0c120SMika Westerberg 
678cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
679cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
680cf29b9afSRajmohan Mani 
681dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
6823fb10ea4SRajmohan Mani 	tb_retimer_scan(upstream_port, true);
683dacb1287SKranthi Kuntala 
684e6f81858SRajmohan Mani 	/*
685e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
686e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
687e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
688e6f81858SRajmohan Mani 	 * any new.
689e6f81858SRajmohan Mani 	 */
690e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
691e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
692e6f81858SRajmohan Mani 
693e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
6949da672a4SAndreas Noever 	tb_scan_switch(sw);
6959da672a4SAndreas Noever }
6969da672a4SAndreas Noever 
6978afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
6988afe909bSMika Westerberg {
6990bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
7000bd680cdSMika Westerberg 	struct tb *tb;
7010bd680cdSMika Westerberg 
7028afe909bSMika Westerberg 	if (!tunnel)
7038afe909bSMika Westerberg 		return;
7048afe909bSMika Westerberg 
7058afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
7068afe909bSMika Westerberg 	list_del(&tunnel->list);
7078afe909bSMika Westerberg 
7080bd680cdSMika Westerberg 	tb = tunnel->tb;
7090bd680cdSMika Westerberg 	src_port = tunnel->src_port;
7100bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
7118afe909bSMika Westerberg 
7120bd680cdSMika Westerberg 	switch (tunnel->type) {
7130bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
7140bd680cdSMika Westerberg 		/*
7150bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
7160bd680cdSMika Westerberg 		 * deallocated properly.
7170bd680cdSMika Westerberg 		 */
7180bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
7196ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
7206ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
7216ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
7226ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
7236ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
7240bd680cdSMika Westerberg 		fallthrough;
7250bd680cdSMika Westerberg 
7260bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
7270bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
7280bd680cdSMika Westerberg 		break;
7290bd680cdSMika Westerberg 
7300bd680cdSMika Westerberg 	default:
7310bd680cdSMika Westerberg 		/*
7320bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
7330bd680cdSMika Westerberg 		 * bandwidth.
7340bd680cdSMika Westerberg 		 */
7350bd680cdSMika Westerberg 		break;
7368afe909bSMika Westerberg 	}
7378afe909bSMika Westerberg 
7388afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
7394f807e47SMika Westerberg }
7404f807e47SMika Westerberg 
741877e50b3SLee Jones /*
7423364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
7433364f0c1SAndreas Noever  */
7443364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
7453364f0c1SAndreas Noever {
7469d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
74793f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
74893f36adeSMika Westerberg 	struct tb_tunnel *n;
7499d3cce0bSMika Westerberg 
7509d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
7518afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
7528afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
7533364f0c1SAndreas Noever 	}
7543364f0c1SAndreas Noever }
7553364f0c1SAndreas Noever 
756877e50b3SLee Jones /*
75723dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
75823dd5bb4SAndreas Noever  */
75923dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
76023dd5bb4SAndreas Noever {
761b433d010SMika Westerberg 	struct tb_port *port;
762dfe40ca4SMika Westerberg 
763b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
764dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
76523dd5bb4SAndreas Noever 			continue;
766dfe40ca4SMika Westerberg 
76723dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
768dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
7698afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
770de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
77191c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
772bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
77323dd5bb4SAndreas Noever 			port->remote = NULL;
774dfe40ca4SMika Westerberg 			if (port->dual_link_port)
775dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
77623dd5bb4SAndreas Noever 		} else {
77723dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
77823dd5bb4SAndreas Noever 		}
77923dd5bb4SAndreas Noever 	}
78023dd5bb4SAndreas Noever }
78123dd5bb4SAndreas Noever 
78299cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
78399cabbb0SMika Westerberg 					 const struct tb_port *port)
7843364f0c1SAndreas Noever {
785b0407983SMika Westerberg 	struct tb_port *down = NULL;
786b0407983SMika Westerberg 
78799cabbb0SMika Westerberg 	/*
78899cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
789b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
79099cabbb0SMika Westerberg 	 */
791b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
792b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
793b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
79499cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
79599cabbb0SMika Westerberg 		int index;
79699cabbb0SMika Westerberg 
79799cabbb0SMika Westerberg 		/*
79899cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
79999cabbb0SMika Westerberg 		 * per controller.
80099cabbb0SMika Westerberg 		 */
8017bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
8027bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
80399cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
80417a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
80599cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
8067bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
8077bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
80899cabbb0SMika Westerberg 		else
80999cabbb0SMika Westerberg 			goto out;
81099cabbb0SMika Westerberg 
81199cabbb0SMika Westerberg 		/* Validate the hard-coding */
81299cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
81399cabbb0SMika Westerberg 			goto out;
814b0407983SMika Westerberg 
815b0407983SMika Westerberg 		down = &sw->ports[index];
816b0407983SMika Westerberg 	}
817b0407983SMika Westerberg 
818b0407983SMika Westerberg 	if (down) {
819b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
82099cabbb0SMika Westerberg 			goto out;
8219cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
82299cabbb0SMika Westerberg 			goto out;
82399cabbb0SMika Westerberg 
824b0407983SMika Westerberg 		return down;
82599cabbb0SMika Westerberg 	}
82699cabbb0SMika Westerberg 
82799cabbb0SMika Westerberg out:
828e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
82999cabbb0SMika Westerberg }
83099cabbb0SMika Westerberg 
831e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
832e876f34aSMika Westerberg {
833e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
834e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
835e876f34aSMika Westerberg 
836e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
837e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
838e876f34aSMika Westerberg 
839e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
840e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
841e876f34aSMika Westerberg 			continue;
842e876f34aSMika Westerberg 
843e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
844e876f34aSMika Westerberg 			tb_port_dbg(port, "in use\n");
845e876f34aSMika Westerberg 			continue;
846e876f34aSMika Westerberg 		}
847e876f34aSMika Westerberg 
848e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
849e876f34aSMika Westerberg 
850e876f34aSMika Westerberg 		/*
851e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
852e876f34aSMika Westerberg 		 * the same host router downstream port.
853e876f34aSMika Westerberg 		 */
854e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
855e876f34aSMika Westerberg 			struct tb_port *p;
856e876f34aSMika Westerberg 
857e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
858e876f34aSMika Westerberg 			if (p != host_port)
859e876f34aSMika Westerberg 				continue;
860e876f34aSMika Westerberg 		}
861e876f34aSMika Westerberg 
862e876f34aSMika Westerberg 		return port;
863e876f34aSMika Westerberg 	}
864e876f34aSMika Westerberg 
865e876f34aSMika Westerberg 	return NULL;
866e876f34aSMika Westerberg }
867e876f34aSMika Westerberg 
8688afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
8694f807e47SMika Westerberg {
870*9d2d0a5cSMika Westerberg 	int available_up, available_down, ret, link_nr;
8714f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
8728afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
8734f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
8744f807e47SMika Westerberg 
875c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
876c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
877c6da62a2SMika Westerberg 		return;
878c6da62a2SMika Westerberg 	}
879c6da62a2SMika Westerberg 
8808afe909bSMika Westerberg 	/*
8818afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
8828afe909bSMika Westerberg 	 * establish a DP tunnel between them.
8838afe909bSMika Westerberg 	 */
8848afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
8854f807e47SMika Westerberg 
8868afe909bSMika Westerberg 	in = NULL;
8878afe909bSMika Westerberg 	out = NULL;
8888afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
889e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
890e876f34aSMika Westerberg 			continue;
891e876f34aSMika Westerberg 
8928afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
8938afe909bSMika Westerberg 			tb_port_dbg(port, "in use\n");
8948afe909bSMika Westerberg 			continue;
8958afe909bSMika Westerberg 		}
8968afe909bSMika Westerberg 
897e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
8988afe909bSMika Westerberg 
899e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
900e876f34aSMika Westerberg 		if (out) {
9018afe909bSMika Westerberg 			in = port;
902e876f34aSMika Westerberg 			break;
903e876f34aSMika Westerberg 		}
9048afe909bSMika Westerberg 	}
9058afe909bSMika Westerberg 
9068afe909bSMika Westerberg 	if (!in) {
9078afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
9088afe909bSMika Westerberg 		return;
9098afe909bSMika Westerberg 	}
9108afe909bSMika Westerberg 	if (!out) {
9118afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
9128afe909bSMika Westerberg 		return;
9138afe909bSMika Westerberg 	}
9148afe909bSMika Westerberg 
9156ac6faeeSMika Westerberg 	/*
916*9d2d0a5cSMika Westerberg 	 * This is only applicable to links that are not bonded (so
917*9d2d0a5cSMika Westerberg 	 * when Thunderbolt 1 hardware is involved somewhere in the
918*9d2d0a5cSMika Westerberg 	 * topology). For these try to share the DP bandwidth between
919*9d2d0a5cSMika Westerberg 	 * the two lanes.
920*9d2d0a5cSMika Westerberg 	 */
921*9d2d0a5cSMika Westerberg 	link_nr = 1;
922*9d2d0a5cSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
923*9d2d0a5cSMika Westerberg 		if (tb_tunnel_is_dp(tunnel)) {
924*9d2d0a5cSMika Westerberg 			link_nr = 0;
925*9d2d0a5cSMika Westerberg 			break;
926*9d2d0a5cSMika Westerberg 		}
927*9d2d0a5cSMika Westerberg 	}
928*9d2d0a5cSMika Westerberg 
929*9d2d0a5cSMika Westerberg 	/*
9306ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
9316ac6faeeSMika Westerberg 	 * both ends of the tunnel.
9326ac6faeeSMika Westerberg 	 *
9336ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
9346ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
9356ac6faeeSMika Westerberg 	 * tunnel is active.
9366ac6faeeSMika Westerberg 	 */
9376ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
9386ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
9396ac6faeeSMika Westerberg 
9408afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
9418afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
9426ac6faeeSMika Westerberg 		goto err_rpm_put;
9438afe909bSMika Westerberg 	}
9444f807e47SMika Westerberg 
9450bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
9460bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
9470bd680cdSMika Westerberg 	if (ret) {
9480bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
9490bd680cdSMika Westerberg 		goto err_dealloc_dp;
950a11b88adSMika Westerberg 	}
951a11b88adSMika Westerberg 
9520bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up,
9530bd680cdSMika Westerberg 				     &available_down);
9540bd680cdSMika Westerberg 	if (ret)
9550bd680cdSMika Westerberg 		goto err_reclaim;
956a11b88adSMika Westerberg 
9570bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
9580bd680cdSMika Westerberg 	       available_up, available_down);
9590bd680cdSMika Westerberg 
960*9d2d0a5cSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
961*9d2d0a5cSMika Westerberg 				    available_down);
9624f807e47SMika Westerberg 	if (!tunnel) {
9638afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
9640bd680cdSMika Westerberg 		goto err_reclaim;
9654f807e47SMika Westerberg 	}
9664f807e47SMika Westerberg 
9674f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
9684f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
9690bd680cdSMika Westerberg 		goto err_free;
9704f807e47SMika Westerberg 	}
9714f807e47SMika Westerberg 
9724f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
9730bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
9748afe909bSMika Westerberg 	return;
9758afe909bSMika Westerberg 
9760bd680cdSMika Westerberg err_free:
9770bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
9780bd680cdSMika Westerberg err_reclaim:
9790bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
9800bd680cdSMika Westerberg err_dealloc_dp:
9818afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
9826ac6faeeSMika Westerberg err_rpm_put:
9836ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
9846ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
9856ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
9866ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
9874f807e47SMika Westerberg }
9884f807e47SMika Westerberg 
9898afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
9904f807e47SMika Westerberg {
9918afe909bSMika Westerberg 	struct tb_port *in, *out;
9928afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
9938afe909bSMika Westerberg 
9948afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
9958afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
9968afe909bSMika Westerberg 		in = port;
9978afe909bSMika Westerberg 		out = NULL;
9988afe909bSMika Westerberg 	} else {
9998afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
10008afe909bSMika Westerberg 		in = NULL;
10018afe909bSMika Westerberg 		out = port;
10028afe909bSMika Westerberg 	}
10038afe909bSMika Westerberg 
10048afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
10058afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
10068afe909bSMika Westerberg 	list_del_init(&port->list);
10078afe909bSMika Westerberg 
10088afe909bSMika Westerberg 	/*
10098afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
10108afe909bSMika Westerberg 	 * to create another tunnel.
10118afe909bSMika Westerberg 	 */
10128afe909bSMika Westerberg 	tb_tunnel_dp(tb);
10138afe909bSMika Westerberg }
10148afe909bSMika Westerberg 
10158afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
10168afe909bSMika Westerberg {
10178afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
10188afe909bSMika Westerberg 	struct tb_port *p;
10198afe909bSMika Westerberg 
10208afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
10218afe909bSMika Westerberg 		return;
10228afe909bSMika Westerberg 
10238afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
10248afe909bSMika Westerberg 		if (p == port)
10258afe909bSMika Westerberg 			return;
10268afe909bSMika Westerberg 	}
10278afe909bSMika Westerberg 
10288afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
10298afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
10308afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
10318afe909bSMika Westerberg 
10328afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
10338afe909bSMika Westerberg 	tb_tunnel_dp(tb);
10344f807e47SMika Westerberg }
10354f807e47SMika Westerberg 
103681a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
103781a2e3e4SMika Westerberg {
103881a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
103981a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
104081a2e3e4SMika Westerberg 
104181a2e3e4SMika Westerberg 	/*
104281a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
104381a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
104481a2e3e4SMika Westerberg 	 */
104581a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
104681a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
104781a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
104881a2e3e4SMika Westerberg 	}
104981a2e3e4SMika Westerberg 
105081a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
105181a2e3e4SMika Westerberg 		struct tb_port *port;
105281a2e3e4SMika Westerberg 
105381a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
105481a2e3e4SMika Westerberg 					struct tb_port, list);
105581a2e3e4SMika Westerberg 		list_del_init(&port->list);
105681a2e3e4SMika Westerberg 	}
105781a2e3e4SMika Westerberg }
105881a2e3e4SMika Westerberg 
10593da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
10603da88be2SMika Westerberg {
10613da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
10623da88be2SMika Westerberg 	struct tb_port *up;
10633da88be2SMika Westerberg 
10643da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
10653da88be2SMika Westerberg 	if (WARN_ON(!up))
10663da88be2SMika Westerberg 		return -ENODEV;
10673da88be2SMika Westerberg 
10683da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
10693da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
10703da88be2SMika Westerberg 		return -ENODEV;
10713da88be2SMika Westerberg 
107230a4eca6SMika Westerberg 	tb_switch_xhci_disconnect(sw);
107330a4eca6SMika Westerberg 
10743da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
10753da88be2SMika Westerberg 	list_del(&tunnel->list);
10763da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
10773da88be2SMika Westerberg 	return 0;
10783da88be2SMika Westerberg }
10793da88be2SMika Westerberg 
108099cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
108199cabbb0SMika Westerberg {
108299cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
10839d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
108499cabbb0SMika Westerberg 	struct tb_switch *parent_sw;
108599cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
10869d3cce0bSMika Westerberg 
1087386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
108899cabbb0SMika Westerberg 	if (!up)
108999cabbb0SMika Westerberg 		return 0;
10903364f0c1SAndreas Noever 
109199cabbb0SMika Westerberg 	/*
109299cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
109399cabbb0SMika Westerberg 	 * be found right above this switch.
109499cabbb0SMika Westerberg 	 */
109599cabbb0SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
109699cabbb0SMika Westerberg 	port = tb_port_at(tb_route(sw), parent_sw);
109799cabbb0SMika Westerberg 	down = tb_find_pcie_down(parent_sw, port);
109899cabbb0SMika Westerberg 	if (!down)
109999cabbb0SMika Westerberg 		return 0;
11003364f0c1SAndreas Noever 
110199cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
110299cabbb0SMika Westerberg 	if (!tunnel)
110399cabbb0SMika Westerberg 		return -ENOMEM;
11043364f0c1SAndreas Noever 
110593f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
110699cabbb0SMika Westerberg 		tb_port_info(up,
11073364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
110893f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
110999cabbb0SMika Westerberg 		return -EIO;
11103364f0c1SAndreas Noever 	}
11113364f0c1SAndreas Noever 
111243f977bcSGil Fine 	/*
111343f977bcSGil Fine 	 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
111443f977bcSGil Fine 	 * here.
111543f977bcSGil Fine 	 */
111643f977bcSGil Fine 	if (tb_switch_pcie_l1_enable(sw))
111743f977bcSGil Fine 		tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
111843f977bcSGil Fine 
111930a4eca6SMika Westerberg 	if (tb_switch_xhci_connect(sw))
112030a4eca6SMika Westerberg 		tb_sw_warn(sw, "failed to connect xHCI\n");
112130a4eca6SMika Westerberg 
112299cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
112399cabbb0SMika Westerberg 	return 0;
11243364f0c1SAndreas Noever }
11259da672a4SAndreas Noever 
1126180b0689SMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1127180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
1128180b0689SMika Westerberg 				    int receive_path, int receive_ring)
11297ea4cd6bSMika Westerberg {
11307ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
11317ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
11327ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
11337ea4cd6bSMika Westerberg 	struct tb_switch *sw;
11347ea4cd6bSMika Westerberg 
11357ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
11367ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1137386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
11387ea4cd6bSMika Westerberg 
11397ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
1140180b0689SMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1141180b0689SMika Westerberg 				     transmit_ring, receive_path, receive_ring);
11427ea4cd6bSMika Westerberg 	if (!tunnel) {
11437ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11447ea4cd6bSMika Westerberg 		return -ENOMEM;
11457ea4cd6bSMika Westerberg 	}
11467ea4cd6bSMika Westerberg 
11477ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
11487ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
11497ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
11507ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
11517ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11527ea4cd6bSMika Westerberg 		return -EIO;
11537ea4cd6bSMika Westerberg 	}
11547ea4cd6bSMika Westerberg 
11557ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
11567ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
11577ea4cd6bSMika Westerberg 	return 0;
11587ea4cd6bSMika Westerberg }
11597ea4cd6bSMika Westerberg 
1160180b0689SMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1161180b0689SMika Westerberg 					  int transmit_path, int transmit_ring,
1162180b0689SMika Westerberg 					  int receive_path, int receive_ring)
11637ea4cd6bSMika Westerberg {
1164180b0689SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1165180b0689SMika Westerberg 	struct tb_port *nhi_port, *dst_port;
1166180b0689SMika Westerberg 	struct tb_tunnel *tunnel, *n;
11677ea4cd6bSMika Westerberg 	struct tb_switch *sw;
11687ea4cd6bSMika Westerberg 
11697ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
11707ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1171180b0689SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
11727ea4cd6bSMika Westerberg 
1173180b0689SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1174180b0689SMika Westerberg 		if (!tb_tunnel_is_dma(tunnel))
1175180b0689SMika Westerberg 			continue;
1176180b0689SMika Westerberg 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1177180b0689SMika Westerberg 			continue;
1178180b0689SMika Westerberg 
1179180b0689SMika Westerberg 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1180180b0689SMika Westerberg 					receive_path, receive_ring))
11818afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
11827ea4cd6bSMika Westerberg 	}
1183180b0689SMika Westerberg }
11847ea4cd6bSMika Westerberg 
1185180b0689SMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1186180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
1187180b0689SMika Westerberg 				       int receive_path, int receive_ring)
11887ea4cd6bSMika Westerberg {
11897ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
11907ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
1191180b0689SMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1192180b0689SMika Westerberg 					      transmit_ring, receive_path,
1193180b0689SMika Westerberg 					      receive_ring);
11947ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11957ea4cd6bSMika Westerberg 	}
11967ea4cd6bSMika Westerberg 	return 0;
11977ea4cd6bSMika Westerberg }
11987ea4cd6bSMika Westerberg 
1199d6cc51cdSAndreas Noever /* hotplug handling */
1200d6cc51cdSAndreas Noever 
1201877e50b3SLee Jones /*
1202d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1203d6cc51cdSAndreas Noever  *
1204d6cc51cdSAndreas Noever  * Executes on tb->wq.
1205d6cc51cdSAndreas Noever  */
1206d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1207d6cc51cdSAndreas Noever {
1208d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1209d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
12109d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1211053596d9SAndreas Noever 	struct tb_switch *sw;
1212053596d9SAndreas Noever 	struct tb_port *port;
1213284652a4SMika Westerberg 
12146ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
12156ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
12166ac6faeeSMika Westerberg 
1217d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
12189d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1219d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1220d6cc51cdSAndreas Noever 
12218f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1222053596d9SAndreas Noever 	if (!sw) {
1223053596d9SAndreas Noever 		tb_warn(tb,
1224053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1225053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1226053596d9SAndreas Noever 		goto out;
1227053596d9SAndreas Noever 	}
1228053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1229053596d9SAndreas Noever 		tb_warn(tb,
1230053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1231053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
12328f965efdSMika Westerberg 		goto put_sw;
1233053596d9SAndreas Noever 	}
1234053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1235053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1236dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1237053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
12388f965efdSMika Westerberg 		goto put_sw;
1239053596d9SAndreas Noever 	}
12406ac6faeeSMika Westerberg 
12416ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
12426ac6faeeSMika Westerberg 
1243053596d9SAndreas Noever 	if (ev->unplug) {
1244dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1245dacb1287SKranthi Kuntala 
1246dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
12477ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1248aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
12493364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
12508afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1251cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1252de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
125391c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1254bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1255053596d9SAndreas Noever 			port->remote = NULL;
1256dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1257dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
12588afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
12598afe909bSMika Westerberg 			tb_tunnel_dp(tb);
12607ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
12617ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
12627ea4cd6bSMika Westerberg 
12637ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
12647ea4cd6bSMika Westerberg 			/*
12657ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
12667ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
12677ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
12687ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
1269180b0689SMika Westerberg 			 * all the tunnels below.
12707ea4cd6bSMika Westerberg 			 */
12717ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
12727ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
12737ea4cd6bSMika Westerberg 			port->xdomain = NULL;
1274180b0689SMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
12757ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1276284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
12778afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
12788afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
127930a4eca6SMika Westerberg 		} else if (!port->port) {
128030a4eca6SMika Westerberg 			tb_sw_dbg(sw, "xHCI disconnect request\n");
128130a4eca6SMika Westerberg 			tb_switch_xhci_disconnect(sw);
1282053596d9SAndreas Noever 		} else {
128362efe699SMika Westerberg 			tb_port_dbg(port,
1284053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1285053596d9SAndreas Noever 		}
1286053596d9SAndreas Noever 	} else if (port->remote) {
128762efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
128830a4eca6SMika Westerberg 	} else if (!port->port && sw->authorized) {
128930a4eca6SMika Westerberg 		tb_sw_dbg(sw, "xHCI connect request\n");
129030a4eca6SMika Westerberg 		tb_switch_xhci_connect(sw);
1291053596d9SAndreas Noever 	} else {
1292344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
129362efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1294053596d9SAndreas Noever 			tb_scan_port(port);
129599cabbb0SMika Westerberg 			if (!port->remote)
129662efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
12978afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
12988afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1299053596d9SAndreas Noever 		}
1300344e0643SMika Westerberg 	}
13018f965efdSMika Westerberg 
13026ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
13036ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
13046ac6faeeSMika Westerberg 
13058f965efdSMika Westerberg put_sw:
13068f965efdSMika Westerberg 	tb_switch_put(sw);
1307d6cc51cdSAndreas Noever out:
1308d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
13096ac6faeeSMika Westerberg 
13106ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
13116ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
13126ac6faeeSMika Westerberg 
1313d6cc51cdSAndreas Noever 	kfree(ev);
1314d6cc51cdSAndreas Noever }
1315d6cc51cdSAndreas Noever 
1316877e50b3SLee Jones /*
1317d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
1318d6cc51cdSAndreas Noever  *
1319d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
1320d6cc51cdSAndreas Noever  */
132181a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
132281a54b5eSMika Westerberg 			    const void *buf, size_t size)
1323d6cc51cdSAndreas Noever {
132481a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
132581a54b5eSMika Westerberg 	u64 route;
132681a54b5eSMika Westerberg 
132781a54b5eSMika Westerberg 	if (type != TB_CFG_PKG_EVENT) {
132881a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
132981a54b5eSMika Westerberg 		return;
133081a54b5eSMika Westerberg 	}
133181a54b5eSMika Westerberg 
133281a54b5eSMika Westerberg 	route = tb_cfg_get_route(&pkg->header);
133381a54b5eSMika Westerberg 
1334210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
133581a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
133681a54b5eSMika Westerberg 			pkg->port);
133781a54b5eSMika Westerberg 	}
133881a54b5eSMika Westerberg 
13394f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1340d6cc51cdSAndreas Noever }
1341d6cc51cdSAndreas Noever 
13429d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
1343d6cc51cdSAndreas Noever {
13449d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
134593f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
134693f36adeSMika Westerberg 	struct tb_tunnel *n;
13473364f0c1SAndreas Noever 
13486ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
13493364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
13507ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
13517ea4cd6bSMika Westerberg 		/*
13527ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
13537ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
13547ea4cd6bSMika Westerberg 		 * intact.
13557ea4cd6bSMika Westerberg 		 */
13567ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
13577ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
135893f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
13597ea4cd6bSMika Westerberg 	}
1360bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
13619d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1362d6cc51cdSAndreas Noever }
1363d6cc51cdSAndreas Noever 
136499cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
136599cabbb0SMika Westerberg {
136699cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
136799cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
136899cabbb0SMika Westerberg 
136999cabbb0SMika Westerberg 		/*
137099cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
137199cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
137299cabbb0SMika Westerberg 		 * send uevent to userspace.
137399cabbb0SMika Westerberg 		 */
137499cabbb0SMika Westerberg 		if (sw->boot)
137599cabbb0SMika Westerberg 			sw->authorized = 1;
137699cabbb0SMika Westerberg 
137799cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
137899cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
137999cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
138099cabbb0SMika Westerberg 	}
138199cabbb0SMika Westerberg 
138299cabbb0SMika Westerberg 	return 0;
138399cabbb0SMika Westerberg }
138499cabbb0SMika Westerberg 
13859d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
1386d6cc51cdSAndreas Noever {
13879d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1388bfe778acSMika Westerberg 	int ret;
1389d6cc51cdSAndreas Noever 
1390bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1391444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
1392444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
1393a25c8b2fSAndreas Noever 
1394e6b245ccSMika Westerberg 	/*
1395e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
1396e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
1397e6b245ccSMika Westerberg 	 * root switch.
1398e6b245ccSMika Westerberg 	 */
1399e6b245ccSMika Westerberg 	tb->root_switch->no_nvm_upgrade = true;
14006ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
14016ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1402e6b245ccSMika Westerberg 
1403bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
1404bfe778acSMika Westerberg 	if (ret) {
1405bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1406bfe778acSMika Westerberg 		return ret;
1407bfe778acSMika Westerberg 	}
1408bfe778acSMika Westerberg 
1409bfe778acSMika Westerberg 	/* Announce the switch to the world */
1410bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
1411bfe778acSMika Westerberg 	if (ret) {
1412bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1413bfe778acSMika Westerberg 		return ret;
1414bfe778acSMika Westerberg 	}
1415bfe778acSMika Westerberg 
1416a28ec0e1SGil Fine 	tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
1417cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
1418cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
14199da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
14209da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
14210414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
142243bddb26SMika Westerberg 	tb_discover_tunnels(tb);
1423e6f81858SRajmohan Mani 	/*
1424e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
1425e6f81858SRajmohan Mani 	 * now for the whole topology.
1426e6f81858SRajmohan Mani 	 */
1427e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
14288afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
14298afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
143099cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
143199cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
143299cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
14339da672a4SAndreas Noever 
1434d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
14359d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
14369d3cce0bSMika Westerberg 	return 0;
1437d6cc51cdSAndreas Noever }
1438d6cc51cdSAndreas Noever 
14399d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
144023dd5bb4SAndreas Noever {
14419d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
14429d3cce0bSMika Westerberg 
1443daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
144481a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
14456ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
14469d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1447daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
14489d3cce0bSMika Westerberg 
14499d3cce0bSMika Westerberg 	return 0;
145023dd5bb4SAndreas Noever }
145123dd5bb4SAndreas Noever 
145291c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
145391c0c120SMika Westerberg {
145491c0c120SMika Westerberg 	struct tb_port *port;
145591c0c120SMika Westerberg 
14566ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
14576ac6faeeSMika Westerberg 	if (sw->is_unplugged)
14586ac6faeeSMika Westerberg 		return;
14596ac6faeeSMika Westerberg 
14608a90e4faSGil Fine 	if (tb_switch_enable_clx(sw, TB_CL0S))
14618a90e4faSGil Fine 		tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
14628a90e4faSGil Fine 
1463a28ec0e1SGil Fine 	/*
1464a28ec0e1SGil Fine 	 * tb_switch_tmu_configure() was already called when the switch was
1465a28ec0e1SGil Fine 	 * added before entering system sleep or runtime suspend,
1466a28ec0e1SGil Fine 	 * so no need to call it again before enabling TMU.
1467a28ec0e1SGil Fine 	 */
1468cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
1469cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1470cf29b9afSRajmohan Mani 
147191c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1472284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
147391c0c120SMika Westerberg 			continue;
147491c0c120SMika Westerberg 
1475284652a4SMika Westerberg 		if (port->remote) {
14762ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
1477de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
147891c0c120SMika Westerberg 
147991c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
1480284652a4SMika Westerberg 		} else if (port->xdomain) {
1481284652a4SMika Westerberg 			tb_port_configure_xdomain(port);
1482284652a4SMika Westerberg 		}
148391c0c120SMika Westerberg 	}
148491c0c120SMika Westerberg }
148591c0c120SMika Westerberg 
14869d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
148723dd5bb4SAndreas Noever {
14889d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
148993f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
149043bddb26SMika Westerberg 	unsigned int usb3_delay = 0;
149143bddb26SMika Westerberg 	LIST_HEAD(tunnels);
14929d3cce0bSMika Westerberg 
1493daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
149423dd5bb4SAndreas Noever 
149523dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
1496356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
149723dd5bb4SAndreas Noever 
149823dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
149923dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
150023dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
150191c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
150243bddb26SMika Westerberg 
150343bddb26SMika Westerberg 	/*
150443bddb26SMika Westerberg 	 * If we get here from suspend to disk the boot firmware or the
150543bddb26SMika Westerberg 	 * restore kernel might have created tunnels of its own. Since
150643bddb26SMika Westerberg 	 * we cannot be sure they are usable for us we find and tear
150743bddb26SMika Westerberg 	 * them down.
150843bddb26SMika Westerberg 	 */
150943bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
151043bddb26SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
151143bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel))
151243bddb26SMika Westerberg 			usb3_delay = 500;
151343bddb26SMika Westerberg 		tb_tunnel_deactivate(tunnel);
151443bddb26SMika Westerberg 		tb_tunnel_free(tunnel);
151543bddb26SMika Westerberg 	}
151643bddb26SMika Westerberg 
151743bddb26SMika Westerberg 	/* Re-create our tunnels now */
151843bddb26SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
151943bddb26SMika Westerberg 		/* USB3 requires delay before it can be re-activated */
152043bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel)) {
152143bddb26SMika Westerberg 			msleep(usb3_delay);
152243bddb26SMika Westerberg 			/* Only need to do it once */
152343bddb26SMika Westerberg 			usb3_delay = 0;
152443bddb26SMika Westerberg 		}
152593f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
152643bddb26SMika Westerberg 	}
15279d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
152823dd5bb4SAndreas Noever 		/*
152923dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
153023dd5bb4SAndreas Noever 		 * 100ms works for me...
153123dd5bb4SAndreas Noever 		 */
1532daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
153323dd5bb4SAndreas Noever 		msleep(100);
153423dd5bb4SAndreas Noever 	}
153523dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
15369d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
1537daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
15389d3cce0bSMika Westerberg 
15399d3cce0bSMika Westerberg 	return 0;
15409d3cce0bSMika Westerberg }
15419d3cce0bSMika Westerberg 
15427ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
15437ea4cd6bSMika Westerberg {
1544b433d010SMika Westerberg 	struct tb_port *port;
1545b433d010SMika Westerberg 	int ret = 0;
15467ea4cd6bSMika Westerberg 
1547b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
15487ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
15497ea4cd6bSMika Westerberg 			continue;
15507ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
1551dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
15527ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
1553284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
15547ea4cd6bSMika Westerberg 			port->xdomain = NULL;
15557ea4cd6bSMika Westerberg 			ret++;
15567ea4cd6bSMika Westerberg 		} else if (port->remote) {
15577ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
15587ea4cd6bSMika Westerberg 		}
15597ea4cd6bSMika Westerberg 	}
15607ea4cd6bSMika Westerberg 
15617ea4cd6bSMika Westerberg 	return ret;
15627ea4cd6bSMika Westerberg }
15637ea4cd6bSMika Westerberg 
1564884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
1565884e4d57SMika Westerberg {
1566884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1567884e4d57SMika Westerberg 
1568884e4d57SMika Westerberg 	tcm->hotplug_active = false;
1569884e4d57SMika Westerberg 	return 0;
1570884e4d57SMika Westerberg }
1571884e4d57SMika Westerberg 
1572884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
1573884e4d57SMika Westerberg {
1574884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1575884e4d57SMika Westerberg 
1576884e4d57SMika Westerberg 	tcm->hotplug_active = true;
1577884e4d57SMika Westerberg 	return 0;
1578884e4d57SMika Westerberg }
1579884e4d57SMika Westerberg 
15807ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
15817ea4cd6bSMika Westerberg {
15827ea4cd6bSMika Westerberg 	/*
15837ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
15847ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
15857ea4cd6bSMika Westerberg 	 * need to run another rescan.
15867ea4cd6bSMika Westerberg 	 */
15877ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
15887ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
15897ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
15907ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
15917ea4cd6bSMika Westerberg }
15927ea4cd6bSMika Westerberg 
15936ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
15946ac6faeeSMika Westerberg {
15956ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15966ac6faeeSMika Westerberg 
15976ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
15986ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
15996ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
16006ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
16016ac6faeeSMika Westerberg 
16026ac6faeeSMika Westerberg 	return 0;
16036ac6faeeSMika Westerberg }
16046ac6faeeSMika Westerberg 
16056ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
16066ac6faeeSMika Westerberg {
16076ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
16086ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
16096ac6faeeSMika Westerberg 
16106ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
16116ac6faeeSMika Westerberg 	if (tb->root_switch) {
16126ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
16136ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
16146ac6faeeSMika Westerberg 	}
16156ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
16166ac6faeeSMika Westerberg }
16176ac6faeeSMika Westerberg 
16186ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
16196ac6faeeSMika Westerberg {
16206ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
16216ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
16226ac6faeeSMika Westerberg 
16236ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
16246ac6faeeSMika Westerberg 	tb_switch_resume(tb->root_switch);
16256ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
16266ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
16276ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
16286ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
16296ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
16306ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
16316ac6faeeSMika Westerberg 
16326ac6faeeSMika Westerberg 	/*
16336ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
16346ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
16356ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
16366ac6faeeSMika Westerberg 	 */
16376ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
16386ac6faeeSMika Westerberg 	return 0;
16396ac6faeeSMika Westerberg }
16406ac6faeeSMika Westerberg 
16419d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
16429d3cce0bSMika Westerberg 	.start = tb_start,
16439d3cce0bSMika Westerberg 	.stop = tb_stop,
16449d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
16459d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
1646884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
1647884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
16487ea4cd6bSMika Westerberg 	.complete = tb_complete,
16496ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
16506ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
165181a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
16523da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
165399cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
16547ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
16557ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
16569d3cce0bSMika Westerberg };
16579d3cce0bSMika Westerberg 
1658349bfe08SMika Westerberg /*
1659349bfe08SMika Westerberg  * During suspend the Thunderbolt controller is reset and all PCIe
1660349bfe08SMika Westerberg  * tunnels are lost. The NHI driver will try to reestablish all tunnels
1661349bfe08SMika Westerberg  * during resume. This adds device links between the tunneled PCIe
1662349bfe08SMika Westerberg  * downstream ports and the NHI so that the device core will make sure
1663349bfe08SMika Westerberg  * NHI is resumed first before the rest.
1664349bfe08SMika Westerberg  */
1665349bfe08SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi)
1666349bfe08SMika Westerberg {
1667349bfe08SMika Westerberg 	struct pci_dev *upstream, *pdev;
1668349bfe08SMika Westerberg 
1669349bfe08SMika Westerberg 	if (!x86_apple_machine)
1670349bfe08SMika Westerberg 		return;
1671349bfe08SMika Westerberg 
1672349bfe08SMika Westerberg 	switch (nhi->pdev->device) {
1673349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1674349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1675349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1676349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1677349bfe08SMika Westerberg 		break;
1678349bfe08SMika Westerberg 	default:
1679349bfe08SMika Westerberg 		return;
1680349bfe08SMika Westerberg 	}
1681349bfe08SMika Westerberg 
1682349bfe08SMika Westerberg 	upstream = pci_upstream_bridge(nhi->pdev);
1683349bfe08SMika Westerberg 	while (upstream) {
1684349bfe08SMika Westerberg 		if (!pci_is_pcie(upstream))
1685349bfe08SMika Westerberg 			return;
1686349bfe08SMika Westerberg 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1687349bfe08SMika Westerberg 			break;
1688349bfe08SMika Westerberg 		upstream = pci_upstream_bridge(upstream);
1689349bfe08SMika Westerberg 	}
1690349bfe08SMika Westerberg 
1691349bfe08SMika Westerberg 	if (!upstream)
1692349bfe08SMika Westerberg 		return;
1693349bfe08SMika Westerberg 
1694349bfe08SMika Westerberg 	/*
1695349bfe08SMika Westerberg 	 * For each hotplug downstream port, create add device link
1696349bfe08SMika Westerberg 	 * back to NHI so that PCIe tunnels can be re-established after
1697349bfe08SMika Westerberg 	 * sleep.
1698349bfe08SMika Westerberg 	 */
1699349bfe08SMika Westerberg 	for_each_pci_bridge(pdev, upstream->subordinate) {
1700349bfe08SMika Westerberg 		const struct device_link *link;
1701349bfe08SMika Westerberg 
1702349bfe08SMika Westerberg 		if (!pci_is_pcie(pdev))
1703349bfe08SMika Westerberg 			continue;
1704349bfe08SMika Westerberg 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1705349bfe08SMika Westerberg 		    !pdev->is_hotplug_bridge)
1706349bfe08SMika Westerberg 			continue;
1707349bfe08SMika Westerberg 
1708349bfe08SMika Westerberg 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1709349bfe08SMika Westerberg 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
1710349bfe08SMika Westerberg 				       DL_FLAG_PM_RUNTIME);
1711349bfe08SMika Westerberg 		if (link) {
1712349bfe08SMika Westerberg 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1713349bfe08SMika Westerberg 				dev_name(&pdev->dev));
1714349bfe08SMika Westerberg 		} else {
1715349bfe08SMika Westerberg 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1716349bfe08SMika Westerberg 				 dev_name(&pdev->dev));
1717349bfe08SMika Westerberg 		}
1718349bfe08SMika Westerberg 	}
1719349bfe08SMika Westerberg }
1720349bfe08SMika Westerberg 
17219d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
17229d3cce0bSMika Westerberg {
17239d3cce0bSMika Westerberg 	struct tb_cm *tcm;
17249d3cce0bSMika Westerberg 	struct tb *tb;
17259d3cce0bSMika Westerberg 
17267f0a34d7SMika Westerberg 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
17279d3cce0bSMika Westerberg 	if (!tb)
17289d3cce0bSMika Westerberg 		return NULL;
17299d3cce0bSMika Westerberg 
1730c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
173199cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
1732c6da62a2SMika Westerberg 	else
1733c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
1734c6da62a2SMika Westerberg 
17359d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
17369d3cce0bSMika Westerberg 
17379d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
17389d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
17398afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
17406ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
17419d3cce0bSMika Westerberg 
1742e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
1743e0258805SMika Westerberg 
1744349bfe08SMika Westerberg 	tb_apple_add_links(nhi);
1745349bfe08SMika Westerberg 	tb_acpi_add_links(nhi);
1746349bfe08SMika Westerberg 
17479d3cce0bSMika Westerberg 	return tb;
174823dd5bb4SAndreas Noever }
1749