xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 43f977bc)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13349bfe08SMika Westerberg #include <linux/platform_data/x86/apple.h>
14d6cc51cdSAndreas Noever 
15d6cc51cdSAndreas Noever #include "tb.h"
167adf6097SAndreas Noever #include "tb_regs.h"
171752b9f7SMika Westerberg #include "tunnel.h"
18d6cc51cdSAndreas Noever 
197f0a34d7SMika Westerberg #define TB_TIMEOUT	100 /* ms */
207f0a34d7SMika Westerberg 
219d3cce0bSMika Westerberg /**
229d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
239d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
248afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
259d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
269d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
279d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
289d3cce0bSMika Westerberg  *		    after cfg has been paused.
296ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
306ac6faeeSMika Westerberg  *		 runtime resume
319d3cce0bSMika Westerberg  */
329d3cce0bSMika Westerberg struct tb_cm {
339d3cce0bSMika Westerberg 	struct list_head tunnel_list;
348afe909bSMika Westerberg 	struct list_head dp_resources;
359d3cce0bSMika Westerberg 	bool hotplug_active;
366ac6faeeSMika Westerberg 	struct delayed_work remove_work;
379d3cce0bSMika Westerberg };
389da672a4SAndreas Noever 
396ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
406ac6faeeSMika Westerberg {
416ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
426ac6faeeSMika Westerberg }
436ac6faeeSMika Westerberg 
444f807e47SMika Westerberg struct tb_hotplug_event {
454f807e47SMika Westerberg 	struct work_struct work;
464f807e47SMika Westerberg 	struct tb *tb;
474f807e47SMika Westerberg 	u64 route;
484f807e47SMika Westerberg 	u8 port;
494f807e47SMika Westerberg 	bool unplug;
504f807e47SMika Westerberg };
514f807e47SMika Westerberg 
524f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
534f807e47SMika Westerberg 
544f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
554f807e47SMika Westerberg {
564f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
574f807e47SMika Westerberg 
584f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
594f807e47SMika Westerberg 	if (!ev)
604f807e47SMika Westerberg 		return;
614f807e47SMika Westerberg 
624f807e47SMika Westerberg 	ev->tb = tb;
634f807e47SMika Westerberg 	ev->route = route;
644f807e47SMika Westerberg 	ev->port = port;
654f807e47SMika Westerberg 	ev->unplug = unplug;
664f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
674f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
684f807e47SMika Westerberg }
694f807e47SMika Westerberg 
709da672a4SAndreas Noever /* enumeration & hot plug handling */
719da672a4SAndreas Noever 
728afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
738afe909bSMika Westerberg {
748afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
758afe909bSMika Westerberg 	struct tb_port *port;
768afe909bSMika Westerberg 
778afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
788afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
798afe909bSMika Westerberg 			continue;
808afe909bSMika Westerberg 
818afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
828afe909bSMika Westerberg 			continue;
838afe909bSMika Westerberg 
848afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
858afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
868afe909bSMika Westerberg 	}
878afe909bSMika Westerberg }
888afe909bSMika Westerberg 
898afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
908afe909bSMika Westerberg {
918afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
928afe909bSMika Westerberg 	struct tb_port *port, *tmp;
938afe909bSMika Westerberg 
948afe909bSMika Westerberg 	/* Clear children resources first */
958afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
968afe909bSMika Westerberg 		if (tb_port_has_remote(port))
978afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
988afe909bSMika Westerberg 	}
998afe909bSMika Westerberg 
1008afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
1018afe909bSMika Westerberg 		if (port->sw == sw) {
1028afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
1038afe909bSMika Westerberg 			list_del_init(&port->list);
1048afe909bSMika Westerberg 		}
1058afe909bSMika Westerberg 	}
1068afe909bSMika Westerberg }
1078afe909bSMika Westerberg 
10843bddb26SMika Westerberg static void tb_switch_discover_tunnels(struct tb_switch *sw,
10943bddb26SMika Westerberg 				       struct list_head *list,
11043bddb26SMika Westerberg 				       bool alloc_hopids)
1110414bec5SMika Westerberg {
1120414bec5SMika Westerberg 	struct tb *tb = sw->tb;
1130414bec5SMika Westerberg 	struct tb_port *port;
1140414bec5SMika Westerberg 
115b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1160414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
1170414bec5SMika Westerberg 
1180414bec5SMika Westerberg 		switch (port->config.type) {
1194f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
12043bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
1214f807e47SMika Westerberg 			break;
1224f807e47SMika Westerberg 
1230414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
12443bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
1250414bec5SMika Westerberg 			break;
1260414bec5SMika Westerberg 
127e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
12843bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
129e6f81858SRajmohan Mani 			break;
130e6f81858SRajmohan Mani 
1310414bec5SMika Westerberg 		default:
1320414bec5SMika Westerberg 			break;
1330414bec5SMika Westerberg 		}
1340414bec5SMika Westerberg 
13543bddb26SMika Westerberg 		if (tunnel)
13643bddb26SMika Westerberg 			list_add_tail(&tunnel->list, list);
13743bddb26SMika Westerberg 	}
1384f807e47SMika Westerberg 
13943bddb26SMika Westerberg 	tb_switch_for_each_port(sw, port) {
14043bddb26SMika Westerberg 		if (tb_port_has_remote(port)) {
14143bddb26SMika Westerberg 			tb_switch_discover_tunnels(port->remote->sw, list,
14243bddb26SMika Westerberg 						   alloc_hopids);
14343bddb26SMika Westerberg 		}
14443bddb26SMika Westerberg 	}
14543bddb26SMika Westerberg }
14643bddb26SMika Westerberg 
14743bddb26SMika Westerberg static void tb_discover_tunnels(struct tb *tb)
14843bddb26SMika Westerberg {
14943bddb26SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15043bddb26SMika Westerberg 	struct tb_tunnel *tunnel;
15143bddb26SMika Westerberg 
15243bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
15343bddb26SMika Westerberg 
15443bddb26SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1554f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
1560414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
1570414bec5SMika Westerberg 
1580414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
1590414bec5SMika Westerberg 				parent->boot = true;
1600414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
1610414bec5SMika Westerberg 			}
162c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
163c94732bdSMika Westerberg 			/* Keep the domain from powering down */
164c94732bdSMika Westerberg 			pm_runtime_get_sync(&tunnel->src_port->sw->dev);
165c94732bdSMika Westerberg 			pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
1664f807e47SMika Westerberg 		}
1670414bec5SMika Westerberg 	}
1680414bec5SMika Westerberg }
1699da672a4SAndreas Noever 
170284652a4SMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port)
171284652a4SMika Westerberg {
172341d4518SMika Westerberg 	/*
173341d4518SMika Westerberg 	 * XDomain paths currently only support single lane so we must
174341d4518SMika Westerberg 	 * disable the other lane according to USB4 spec.
175341d4518SMika Westerberg 	 */
176341d4518SMika Westerberg 	tb_port_disable(port->dual_link_port);
177341d4518SMika Westerberg 
178284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
179284652a4SMika Westerberg 		return usb4_port_configure_xdomain(port);
180284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
181284652a4SMika Westerberg }
182284652a4SMika Westerberg 
183284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
184284652a4SMika Westerberg {
185284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
186284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
187284652a4SMika Westerberg 	else
188284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
189341d4518SMika Westerberg 
190341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
191284652a4SMika Westerberg }
192284652a4SMika Westerberg 
1937ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
1947ea4cd6bSMika Westerberg {
1957ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
1967ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
1977ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
1987ea4cd6bSMika Westerberg 	u64 route;
1997ea4cd6bSMika Westerberg 
2005ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
2015ca67688SMika Westerberg 		return;
2025ca67688SMika Westerberg 
2037ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
2047ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
2057ea4cd6bSMika Westerberg 	if (xd) {
2067ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
2077ea4cd6bSMika Westerberg 		return;
2087ea4cd6bSMika Westerberg 	}
2097ea4cd6bSMika Westerberg 
2107ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
2117ea4cd6bSMika Westerberg 			      NULL);
2127ea4cd6bSMika Westerberg 	if (xd) {
2137ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
214284652a4SMika Westerberg 		tb_port_configure_xdomain(port);
2157ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
2167ea4cd6bSMika Westerberg 	}
2177ea4cd6bSMika Westerberg }
2187ea4cd6bSMika Westerberg 
219cf29b9afSRajmohan Mani static int tb_enable_tmu(struct tb_switch *sw)
220cf29b9afSRajmohan Mani {
221cf29b9afSRajmohan Mani 	int ret;
222cf29b9afSRajmohan Mani 
223cf29b9afSRajmohan Mani 	/* If it is already enabled in correct mode, don't touch it */
224a28ec0e1SGil Fine 	if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
225cf29b9afSRajmohan Mani 		return 0;
226cf29b9afSRajmohan Mani 
227cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_disable(sw);
228cf29b9afSRajmohan Mani 	if (ret)
229cf29b9afSRajmohan Mani 		return ret;
230cf29b9afSRajmohan Mani 
231cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_post_time(sw);
232cf29b9afSRajmohan Mani 	if (ret)
233cf29b9afSRajmohan Mani 		return ret;
234cf29b9afSRajmohan Mani 
235cf29b9afSRajmohan Mani 	return tb_switch_tmu_enable(sw);
236cf29b9afSRajmohan Mani }
237cf29b9afSRajmohan Mani 
238e6f81858SRajmohan Mani /**
239e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
240e6f81858SRajmohan Mani  * @sw: Switch to find the port on
241e6f81858SRajmohan Mani  * @type: Port type to look for
242e6f81858SRajmohan Mani  */
243e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
244e6f81858SRajmohan Mani 					   enum tb_port_type type)
245e6f81858SRajmohan Mani {
246e6f81858SRajmohan Mani 	struct tb_port *port;
247e6f81858SRajmohan Mani 
248e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
249e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
250e6f81858SRajmohan Mani 			continue;
251e6f81858SRajmohan Mani 		if (port->config.type != type)
252e6f81858SRajmohan Mani 			continue;
253e6f81858SRajmohan Mani 		if (!port->cap_adap)
254e6f81858SRajmohan Mani 			continue;
255e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
256e6f81858SRajmohan Mani 			continue;
257e6f81858SRajmohan Mani 		return port;
258e6f81858SRajmohan Mani 	}
259e6f81858SRajmohan Mani 	return NULL;
260e6f81858SRajmohan Mani }
261e6f81858SRajmohan Mani 
262e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
263e6f81858SRajmohan Mani 					 const struct tb_port *port)
264e6f81858SRajmohan Mani {
265e6f81858SRajmohan Mani 	struct tb_port *down;
266e6f81858SRajmohan Mani 
267e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
26877cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
269e6f81858SRajmohan Mani 		return down;
27077cfa40fSMika Westerberg 	return NULL;
271e6f81858SRajmohan Mani }
272e6f81858SRajmohan Mani 
2730bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
2740bd680cdSMika Westerberg 					struct tb_port *src_port,
2750bd680cdSMika Westerberg 					struct tb_port *dst_port)
2760bd680cdSMika Westerberg {
2770bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2780bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
2790bd680cdSMika Westerberg 
2800bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
2810bd680cdSMika Westerberg 		if (tunnel->type == type &&
2820bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
2830bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
2840bd680cdSMika Westerberg 			return tunnel;
2850bd680cdSMika Westerberg 		}
2860bd680cdSMika Westerberg 	}
2870bd680cdSMika Westerberg 
2880bd680cdSMika Westerberg 	return NULL;
2890bd680cdSMika Westerberg }
2900bd680cdSMika Westerberg 
2910bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
2920bd680cdSMika Westerberg 						   struct tb_port *src_port,
2930bd680cdSMika Westerberg 						   struct tb_port *dst_port)
2940bd680cdSMika Westerberg {
2950bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
2960bd680cdSMika Westerberg 	struct tb_switch *sw;
2970bd680cdSMika Westerberg 
2980bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
2990bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
3000bd680cdSMika Westerberg 		sw = dst_port->sw;
3010bd680cdSMika Westerberg 	else
3020bd680cdSMika Westerberg 		sw = src_port->sw;
3030bd680cdSMika Westerberg 
3040bd680cdSMika Westerberg 	/* Can't be the host router */
3050bd680cdSMika Westerberg 	if (sw == tb->root_switch)
3060bd680cdSMika Westerberg 		return NULL;
3070bd680cdSMika Westerberg 
3080bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
3090bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
3100bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
3110bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
3120bd680cdSMika Westerberg 	if (!usb3_down)
3130bd680cdSMika Westerberg 		return NULL;
3140bd680cdSMika Westerberg 
3150bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
3160bd680cdSMika Westerberg }
3170bd680cdSMika Westerberg 
3180bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
3190bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
3200bd680cdSMika Westerberg {
3210bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
3220bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
3230bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
3240bd680cdSMika Westerberg 	struct tb_port *port;
3250bd680cdSMika Westerberg 
3260bd680cdSMika Westerberg 	tb_port_dbg(dst_port, "calculating available bandwidth\n");
3270bd680cdSMika Westerberg 
3280bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
3290bd680cdSMika Westerberg 	if (tunnel) {
3300bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
3310bd680cdSMika Westerberg 						   &usb3_consumed_down);
3320bd680cdSMika Westerberg 		if (ret)
3330bd680cdSMika Westerberg 			return ret;
3340bd680cdSMika Westerberg 	} else {
3350bd680cdSMika Westerberg 		usb3_consumed_up = 0;
3360bd680cdSMika Westerberg 		usb3_consumed_down = 0;
3370bd680cdSMika Westerberg 	}
3380bd680cdSMika Westerberg 
3390bd680cdSMika Westerberg 	*available_up = *available_down = 40000;
3400bd680cdSMika Westerberg 
3410bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
3420bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
3430bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
3440bd680cdSMika Westerberg 
3450bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
3460bd680cdSMika Westerberg 			continue;
3470bd680cdSMika Westerberg 
3480bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
3490bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
3500bd680cdSMika Westerberg 		} else {
3510bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
3520bd680cdSMika Westerberg 			if (link_speed < 0)
3530bd680cdSMika Westerberg 				return link_speed;
3540bd680cdSMika Westerberg 		}
3550bd680cdSMika Westerberg 
3560bd680cdSMika Westerberg 		link_width = port->bonded ? 2 : 1;
3570bd680cdSMika Westerberg 
3580bd680cdSMika Westerberg 		up_bw = link_speed * link_width * 1000; /* Mb/s */
3590bd680cdSMika Westerberg 		/* Leave 10% guard band */
3600bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
3610bd680cdSMika Westerberg 		down_bw = up_bw;
3620bd680cdSMika Westerberg 
3630bd680cdSMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
3640bd680cdSMika Westerberg 
3650bd680cdSMika Westerberg 		/*
3660bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
3670bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
3680bd680cdSMika Westerberg 		 */
3690bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
3700bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
3710bd680cdSMika Westerberg 
3720bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
3730bd680cdSMika Westerberg 				continue;
3740bd680cdSMika Westerberg 
3750bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
3760bd680cdSMika Westerberg 				continue;
3770bd680cdSMika Westerberg 
3780bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
3790bd680cdSMika Westerberg 							   &dp_consumed_up,
3800bd680cdSMika Westerberg 							   &dp_consumed_down);
3810bd680cdSMika Westerberg 			if (ret)
3820bd680cdSMika Westerberg 				return ret;
3830bd680cdSMika Westerberg 
3840bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
3850bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
3860bd680cdSMika Westerberg 		}
3870bd680cdSMika Westerberg 
3880bd680cdSMika Westerberg 		/*
3890bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
3900bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
3910bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
3920bd680cdSMika Westerberg 		 * crosses the port.
3930bd680cdSMika Westerberg 		 */
3940bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
3950bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
3960bd680cdSMika Westerberg 
3970bd680cdSMika Westerberg 		if (up_bw < *available_up)
3980bd680cdSMika Westerberg 			*available_up = up_bw;
3990bd680cdSMika Westerberg 		if (down_bw < *available_down)
4000bd680cdSMika Westerberg 			*available_down = down_bw;
4010bd680cdSMika Westerberg 	}
4020bd680cdSMika Westerberg 
4030bd680cdSMika Westerberg 	if (*available_up < 0)
4040bd680cdSMika Westerberg 		*available_up = 0;
4050bd680cdSMika Westerberg 	if (*available_down < 0)
4060bd680cdSMika Westerberg 		*available_down = 0;
4070bd680cdSMika Westerberg 
4080bd680cdSMika Westerberg 	return 0;
4090bd680cdSMika Westerberg }
4100bd680cdSMika Westerberg 
4110bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
4120bd680cdSMika Westerberg 					    struct tb_port *src_port,
4130bd680cdSMika Westerberg 					    struct tb_port *dst_port)
4140bd680cdSMika Westerberg {
4150bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4160bd680cdSMika Westerberg 
4170bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
4180bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
4190bd680cdSMika Westerberg }
4200bd680cdSMika Westerberg 
4210bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
4220bd680cdSMika Westerberg 				      struct tb_port *dst_port)
4230bd680cdSMika Westerberg {
4240bd680cdSMika Westerberg 	int ret, available_up, available_down;
4250bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4260bd680cdSMika Westerberg 
4270bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
4280bd680cdSMika Westerberg 	if (!tunnel)
4290bd680cdSMika Westerberg 		return;
4300bd680cdSMika Westerberg 
4310bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
4320bd680cdSMika Westerberg 
4330bd680cdSMika Westerberg 	/*
4340bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
4350bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
4360bd680cdSMika Westerberg 	 */
4370bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
4380bd680cdSMika Westerberg 				     &available_up, &available_down);
4390bd680cdSMika Westerberg 	if (ret) {
4400bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
4410bd680cdSMika Westerberg 		return;
4420bd680cdSMika Westerberg 	}
4430bd680cdSMika Westerberg 
4440bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
4450bd680cdSMika Westerberg 	       available_up, available_down);
4460bd680cdSMika Westerberg 
4470bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
4480bd680cdSMika Westerberg }
4490bd680cdSMika Westerberg 
450e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
451e6f81858SRajmohan Mani {
452e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
4530bd680cdSMika Westerberg 	int ret, available_up, available_down;
454e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
455e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
456e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
457e6f81858SRajmohan Mani 
458c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
459c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
460c6da62a2SMika Westerberg 		return 0;
461c6da62a2SMika Westerberg 	}
462c6da62a2SMika Westerberg 
463e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
464e6f81858SRajmohan Mani 	if (!up)
465e6f81858SRajmohan Mani 		return 0;
466e6f81858SRajmohan Mani 
467bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
468bbcf40b3SMika Westerberg 		return 0;
469bbcf40b3SMika Westerberg 
470e6f81858SRajmohan Mani 	/*
471e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
472e6f81858SRajmohan Mani 	 * be found right above this switch.
473e6f81858SRajmohan Mani 	 */
474e6f81858SRajmohan Mani 	port = tb_port_at(tb_route(sw), parent);
475e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
476e6f81858SRajmohan Mani 	if (!down)
477e6f81858SRajmohan Mani 		return 0;
478e6f81858SRajmohan Mani 
479e6f81858SRajmohan Mani 	if (tb_route(parent)) {
480e6f81858SRajmohan Mani 		struct tb_port *parent_up;
481e6f81858SRajmohan Mani 		/*
482e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
483e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
484e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
485e6f81858SRajmohan Mani 		 */
486e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
487e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
488e6f81858SRajmohan Mani 			return 0;
4890bd680cdSMika Westerberg 
4900bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
4910bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
4920bd680cdSMika Westerberg 		if (ret)
4930bd680cdSMika Westerberg 			return ret;
494e6f81858SRajmohan Mani 	}
495e6f81858SRajmohan Mani 
4960bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
4970bd680cdSMika Westerberg 				     &available_down);
4980bd680cdSMika Westerberg 	if (ret)
4990bd680cdSMika Westerberg 		goto err_reclaim;
5000bd680cdSMika Westerberg 
5010bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
5020bd680cdSMika Westerberg 		    available_up, available_down);
5030bd680cdSMika Westerberg 
5040bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
5050bd680cdSMika Westerberg 				      available_down);
5060bd680cdSMika Westerberg 	if (!tunnel) {
5070bd680cdSMika Westerberg 		ret = -ENOMEM;
5080bd680cdSMika Westerberg 		goto err_reclaim;
5090bd680cdSMika Westerberg 	}
510e6f81858SRajmohan Mani 
511e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
512e6f81858SRajmohan Mani 		tb_port_info(up,
513e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
5140bd680cdSMika Westerberg 		ret = -EIO;
5150bd680cdSMika Westerberg 		goto err_free;
516e6f81858SRajmohan Mani 	}
517e6f81858SRajmohan Mani 
518e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
5190bd680cdSMika Westerberg 	if (tb_route(parent))
5200bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
5210bd680cdSMika Westerberg 
522e6f81858SRajmohan Mani 	return 0;
5230bd680cdSMika Westerberg 
5240bd680cdSMika Westerberg err_free:
5250bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
5260bd680cdSMika Westerberg err_reclaim:
5270bd680cdSMika Westerberg 	if (tb_route(parent))
5280bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
5290bd680cdSMika Westerberg 
5300bd680cdSMika Westerberg 	return ret;
531e6f81858SRajmohan Mani }
532e6f81858SRajmohan Mani 
533e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
534e6f81858SRajmohan Mani {
535e6f81858SRajmohan Mani 	struct tb_port *port;
536e6f81858SRajmohan Mani 	int ret;
537e6f81858SRajmohan Mani 
538c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
539c6da62a2SMika Westerberg 		return 0;
540c6da62a2SMika Westerberg 
541e6f81858SRajmohan Mani 	if (tb_route(sw)) {
542e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
543e6f81858SRajmohan Mani 		if (ret)
544e6f81858SRajmohan Mani 			return ret;
545e6f81858SRajmohan Mani 	}
546e6f81858SRajmohan Mani 
547e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
548e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
549e6f81858SRajmohan Mani 			continue;
550e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
551e6f81858SRajmohan Mani 		if (ret)
552e6f81858SRajmohan Mani 			return ret;
553e6f81858SRajmohan Mani 	}
554e6f81858SRajmohan Mani 
555e6f81858SRajmohan Mani 	return 0;
556e6f81858SRajmohan Mani }
557e6f81858SRajmohan Mani 
5589da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
5599da672a4SAndreas Noever 
560877e50b3SLee Jones /*
5619da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
5629da672a4SAndreas Noever  */
5639da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
5649da672a4SAndreas Noever {
565b433d010SMika Westerberg 	struct tb_port *port;
566b433d010SMika Westerberg 
5676ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
5686ac6faeeSMika Westerberg 
569b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
570b433d010SMika Westerberg 		tb_scan_port(port);
5716ac6faeeSMika Westerberg 
5726ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
5736ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
5749da672a4SAndreas Noever }
5759da672a4SAndreas Noever 
576877e50b3SLee Jones /*
5779da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
5789da672a4SAndreas Noever  */
5799da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
5809da672a4SAndreas Noever {
58199cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
582dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
5839da672a4SAndreas Noever 	struct tb_switch *sw;
584dfe40ca4SMika Westerberg 
5859da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
5869da672a4SAndreas Noever 		return;
5874f807e47SMika Westerberg 
5884f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
5894f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
5904f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
5914f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
5924f807e47SMika Westerberg 				 false);
5934f807e47SMika Westerberg 		return;
5944f807e47SMika Westerberg 	}
5954f807e47SMika Westerberg 
5969da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
5979da672a4SAndreas Noever 		return;
598343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
599343fcb8cSAndreas Noever 		return; /*
600343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
601343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
602343fcb8cSAndreas Noever 			 */
6039da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
6049da672a4SAndreas Noever 		return;
6059da672a4SAndreas Noever 	if (port->remote) {
6067ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
6079da672a4SAndreas Noever 		return;
6089da672a4SAndreas Noever 	}
609dacb1287SKranthi Kuntala 
6103fb10ea4SRajmohan Mani 	tb_retimer_scan(port, true);
611dacb1287SKranthi Kuntala 
612bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
613bfe778acSMika Westerberg 			     tb_downstream_route(port));
6147ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
6157ea4cd6bSMika Westerberg 		/*
6167ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
6177ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
6187ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
6197ea4cd6bSMika Westerberg 		 */
6207ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
6217ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
6229da672a4SAndreas Noever 		return;
6237ea4cd6bSMika Westerberg 	}
624bfe778acSMika Westerberg 
625bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
626bfe778acSMika Westerberg 		tb_switch_put(sw);
627bfe778acSMika Westerberg 		return;
628bfe778acSMika Westerberg 	}
629bfe778acSMika Westerberg 
63099cabbb0SMika Westerberg 	/*
6317ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
6327ea4cd6bSMika Westerberg 	 * first.
6337ea4cd6bSMika Westerberg 	 */
6347ea4cd6bSMika Westerberg 	if (port->xdomain) {
6357ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
636284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
6377ea4cd6bSMika Westerberg 		port->xdomain = NULL;
6387ea4cd6bSMika Westerberg 	}
6397ea4cd6bSMika Westerberg 
6407ea4cd6bSMika Westerberg 	/*
64199cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
64299cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
64399cabbb0SMika Westerberg 	 * the boot firmware.
64499cabbb0SMika Westerberg 	 */
64599cabbb0SMika Westerberg 	if (!tcm->hotplug_active)
64699cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
647f67cf491SMika Westerberg 
6486ac6faeeSMika Westerberg 	/*
6496ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
6506ac6faeeSMika Westerberg 	 * can support runtime PM.
6516ac6faeeSMika Westerberg 	 */
6526ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
6536ac6faeeSMika Westerberg 
654bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
655bfe778acSMika Westerberg 		tb_switch_put(sw);
656bfe778acSMika Westerberg 		return;
657bfe778acSMika Westerberg 	}
658bfe778acSMika Westerberg 
659dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
660dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
661dfe40ca4SMika Westerberg 	port->remote = upstream_port;
662dfe40ca4SMika Westerberg 	upstream_port->remote = port;
663dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
664dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
665dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
666dfe40ca4SMika Westerberg 	}
667dfe40ca4SMika Westerberg 
66891c0c120SMika Westerberg 	/* Enable lane bonding if supported */
6692ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
670de462039SMika Westerberg 	/* Set the link configured */
671de462039SMika Westerberg 	tb_switch_configure_link(sw);
6728a90e4faSGil Fine 	if (tb_switch_enable_clx(sw, TB_CL0S))
6738a90e4faSGil Fine 		tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
6748a90e4faSGil Fine 
6758a90e4faSGil Fine 	tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
6768a90e4faSGil Fine 				tb_switch_is_clx_enabled(sw));
67791c0c120SMika Westerberg 
678cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
679cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
680cf29b9afSRajmohan Mani 
681dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
6823fb10ea4SRajmohan Mani 	tb_retimer_scan(upstream_port, true);
683dacb1287SKranthi Kuntala 
684e6f81858SRajmohan Mani 	/*
685e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
686e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
687e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
688e6f81858SRajmohan Mani 	 * any new.
689e6f81858SRajmohan Mani 	 */
690e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
691e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
692e6f81858SRajmohan Mani 
693e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
6949da672a4SAndreas Noever 	tb_scan_switch(sw);
6959da672a4SAndreas Noever }
6969da672a4SAndreas Noever 
6978afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
6988afe909bSMika Westerberg {
6990bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
7000bd680cdSMika Westerberg 	struct tb *tb;
7010bd680cdSMika Westerberg 
7028afe909bSMika Westerberg 	if (!tunnel)
7038afe909bSMika Westerberg 		return;
7048afe909bSMika Westerberg 
7058afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
7068afe909bSMika Westerberg 	list_del(&tunnel->list);
7078afe909bSMika Westerberg 
7080bd680cdSMika Westerberg 	tb = tunnel->tb;
7090bd680cdSMika Westerberg 	src_port = tunnel->src_port;
7100bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
7118afe909bSMika Westerberg 
7120bd680cdSMika Westerberg 	switch (tunnel->type) {
7130bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
7140bd680cdSMika Westerberg 		/*
7150bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
7160bd680cdSMika Westerberg 		 * deallocated properly.
7170bd680cdSMika Westerberg 		 */
7180bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
7196ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
7206ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
7216ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
7226ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
7236ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
7240bd680cdSMika Westerberg 		fallthrough;
7250bd680cdSMika Westerberg 
7260bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
7270bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
7280bd680cdSMika Westerberg 		break;
7290bd680cdSMika Westerberg 
7300bd680cdSMika Westerberg 	default:
7310bd680cdSMika Westerberg 		/*
7320bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
7330bd680cdSMika Westerberg 		 * bandwidth.
7340bd680cdSMika Westerberg 		 */
7350bd680cdSMika Westerberg 		break;
7368afe909bSMika Westerberg 	}
7378afe909bSMika Westerberg 
7388afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
7394f807e47SMika Westerberg }
7404f807e47SMika Westerberg 
741877e50b3SLee Jones /*
7423364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
7433364f0c1SAndreas Noever  */
7443364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
7453364f0c1SAndreas Noever {
7469d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
74793f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
74893f36adeSMika Westerberg 	struct tb_tunnel *n;
7499d3cce0bSMika Westerberg 
7509d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
7518afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
7528afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
7533364f0c1SAndreas Noever 	}
7543364f0c1SAndreas Noever }
7553364f0c1SAndreas Noever 
756877e50b3SLee Jones /*
75723dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
75823dd5bb4SAndreas Noever  */
75923dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
76023dd5bb4SAndreas Noever {
761b433d010SMika Westerberg 	struct tb_port *port;
762dfe40ca4SMika Westerberg 
763b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
764dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
76523dd5bb4SAndreas Noever 			continue;
766dfe40ca4SMika Westerberg 
76723dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
768dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
7698afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
770de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
77191c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
772bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
77323dd5bb4SAndreas Noever 			port->remote = NULL;
774dfe40ca4SMika Westerberg 			if (port->dual_link_port)
775dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
77623dd5bb4SAndreas Noever 		} else {
77723dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
77823dd5bb4SAndreas Noever 		}
77923dd5bb4SAndreas Noever 	}
78023dd5bb4SAndreas Noever }
78123dd5bb4SAndreas Noever 
78299cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
78399cabbb0SMika Westerberg 					 const struct tb_port *port)
7843364f0c1SAndreas Noever {
785b0407983SMika Westerberg 	struct tb_port *down = NULL;
786b0407983SMika Westerberg 
78799cabbb0SMika Westerberg 	/*
78899cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
789b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
79099cabbb0SMika Westerberg 	 */
791b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
792b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
793b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
79499cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
79599cabbb0SMika Westerberg 		int index;
79699cabbb0SMika Westerberg 
79799cabbb0SMika Westerberg 		/*
79899cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
79999cabbb0SMika Westerberg 		 * per controller.
80099cabbb0SMika Westerberg 		 */
8017bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
8027bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
80399cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
80417a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
80599cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
8067bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
8077bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
80899cabbb0SMika Westerberg 		else
80999cabbb0SMika Westerberg 			goto out;
81099cabbb0SMika Westerberg 
81199cabbb0SMika Westerberg 		/* Validate the hard-coding */
81299cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
81399cabbb0SMika Westerberg 			goto out;
814b0407983SMika Westerberg 
815b0407983SMika Westerberg 		down = &sw->ports[index];
816b0407983SMika Westerberg 	}
817b0407983SMika Westerberg 
818b0407983SMika Westerberg 	if (down) {
819b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
82099cabbb0SMika Westerberg 			goto out;
8219cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
82299cabbb0SMika Westerberg 			goto out;
82399cabbb0SMika Westerberg 
824b0407983SMika Westerberg 		return down;
82599cabbb0SMika Westerberg 	}
82699cabbb0SMika Westerberg 
82799cabbb0SMika Westerberg out:
828e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
82999cabbb0SMika Westerberg }
83099cabbb0SMika Westerberg 
831e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
832e876f34aSMika Westerberg {
833e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
834e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
835e876f34aSMika Westerberg 
836e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
837e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
838e876f34aSMika Westerberg 
839e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
840e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
841e876f34aSMika Westerberg 			continue;
842e876f34aSMika Westerberg 
843e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
844e876f34aSMika Westerberg 			tb_port_dbg(port, "in use\n");
845e876f34aSMika Westerberg 			continue;
846e876f34aSMika Westerberg 		}
847e876f34aSMika Westerberg 
848e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
849e876f34aSMika Westerberg 
850e876f34aSMika Westerberg 		/*
851e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
852e876f34aSMika Westerberg 		 * the same host router downstream port.
853e876f34aSMika Westerberg 		 */
854e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
855e876f34aSMika Westerberg 			struct tb_port *p;
856e876f34aSMika Westerberg 
857e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
858e876f34aSMika Westerberg 			if (p != host_port)
859e876f34aSMika Westerberg 				continue;
860e876f34aSMika Westerberg 		}
861e876f34aSMika Westerberg 
862e876f34aSMika Westerberg 		return port;
863e876f34aSMika Westerberg 	}
864e876f34aSMika Westerberg 
865e876f34aSMika Westerberg 	return NULL;
866e876f34aSMika Westerberg }
867e876f34aSMika Westerberg 
8688afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
8694f807e47SMika Westerberg {
8700bd680cdSMika Westerberg 	int available_up, available_down, ret;
8714f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
8728afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
8734f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
8744f807e47SMika Westerberg 
875c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
876c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
877c6da62a2SMika Westerberg 		return;
878c6da62a2SMika Westerberg 	}
879c6da62a2SMika Westerberg 
8808afe909bSMika Westerberg 	/*
8818afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
8828afe909bSMika Westerberg 	 * establish a DP tunnel between them.
8838afe909bSMika Westerberg 	 */
8848afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
8854f807e47SMika Westerberg 
8868afe909bSMika Westerberg 	in = NULL;
8878afe909bSMika Westerberg 	out = NULL;
8888afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
889e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
890e876f34aSMika Westerberg 			continue;
891e876f34aSMika Westerberg 
8928afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
8938afe909bSMika Westerberg 			tb_port_dbg(port, "in use\n");
8948afe909bSMika Westerberg 			continue;
8958afe909bSMika Westerberg 		}
8968afe909bSMika Westerberg 
897e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
8988afe909bSMika Westerberg 
899e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
900e876f34aSMika Westerberg 		if (out) {
9018afe909bSMika Westerberg 			in = port;
902e876f34aSMika Westerberg 			break;
903e876f34aSMika Westerberg 		}
9048afe909bSMika Westerberg 	}
9058afe909bSMika Westerberg 
9068afe909bSMika Westerberg 	if (!in) {
9078afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
9088afe909bSMika Westerberg 		return;
9098afe909bSMika Westerberg 	}
9108afe909bSMika Westerberg 	if (!out) {
9118afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
9128afe909bSMika Westerberg 		return;
9138afe909bSMika Westerberg 	}
9148afe909bSMika Westerberg 
9156ac6faeeSMika Westerberg 	/*
9166ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
9176ac6faeeSMika Westerberg 	 * both ends of the tunnel.
9186ac6faeeSMika Westerberg 	 *
9196ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
9206ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
9216ac6faeeSMika Westerberg 	 * tunnel is active.
9226ac6faeeSMika Westerberg 	 */
9236ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
9246ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
9256ac6faeeSMika Westerberg 
9268afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
9278afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
9286ac6faeeSMika Westerberg 		goto err_rpm_put;
9298afe909bSMika Westerberg 	}
9304f807e47SMika Westerberg 
9310bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
9320bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
9330bd680cdSMika Westerberg 	if (ret) {
9340bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
9350bd680cdSMika Westerberg 		goto err_dealloc_dp;
936a11b88adSMika Westerberg 	}
937a11b88adSMika Westerberg 
9380bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up,
9390bd680cdSMika Westerberg 				     &available_down);
9400bd680cdSMika Westerberg 	if (ret)
9410bd680cdSMika Westerberg 		goto err_reclaim;
942a11b88adSMika Westerberg 
9430bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
9440bd680cdSMika Westerberg 	       available_up, available_down);
9450bd680cdSMika Westerberg 
9460bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
9474f807e47SMika Westerberg 	if (!tunnel) {
9488afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
9490bd680cdSMika Westerberg 		goto err_reclaim;
9504f807e47SMika Westerberg 	}
9514f807e47SMika Westerberg 
9524f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
9534f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
9540bd680cdSMika Westerberg 		goto err_free;
9554f807e47SMika Westerberg 	}
9564f807e47SMika Westerberg 
9574f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
9580bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
9598afe909bSMika Westerberg 	return;
9608afe909bSMika Westerberg 
9610bd680cdSMika Westerberg err_free:
9620bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
9630bd680cdSMika Westerberg err_reclaim:
9640bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
9650bd680cdSMika Westerberg err_dealloc_dp:
9668afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
9676ac6faeeSMika Westerberg err_rpm_put:
9686ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
9696ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
9706ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
9716ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
9724f807e47SMika Westerberg }
9734f807e47SMika Westerberg 
9748afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
9754f807e47SMika Westerberg {
9768afe909bSMika Westerberg 	struct tb_port *in, *out;
9778afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
9788afe909bSMika Westerberg 
9798afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
9808afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
9818afe909bSMika Westerberg 		in = port;
9828afe909bSMika Westerberg 		out = NULL;
9838afe909bSMika Westerberg 	} else {
9848afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
9858afe909bSMika Westerberg 		in = NULL;
9868afe909bSMika Westerberg 		out = port;
9878afe909bSMika Westerberg 	}
9888afe909bSMika Westerberg 
9898afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
9908afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
9918afe909bSMika Westerberg 	list_del_init(&port->list);
9928afe909bSMika Westerberg 
9938afe909bSMika Westerberg 	/*
9948afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
9958afe909bSMika Westerberg 	 * to create another tunnel.
9968afe909bSMika Westerberg 	 */
9978afe909bSMika Westerberg 	tb_tunnel_dp(tb);
9988afe909bSMika Westerberg }
9998afe909bSMika Westerberg 
10008afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
10018afe909bSMika Westerberg {
10028afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
10038afe909bSMika Westerberg 	struct tb_port *p;
10048afe909bSMika Westerberg 
10058afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
10068afe909bSMika Westerberg 		return;
10078afe909bSMika Westerberg 
10088afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
10098afe909bSMika Westerberg 		if (p == port)
10108afe909bSMika Westerberg 			return;
10118afe909bSMika Westerberg 	}
10128afe909bSMika Westerberg 
10138afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
10148afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
10158afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
10168afe909bSMika Westerberg 
10178afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
10188afe909bSMika Westerberg 	tb_tunnel_dp(tb);
10194f807e47SMika Westerberg }
10204f807e47SMika Westerberg 
102181a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
102281a2e3e4SMika Westerberg {
102381a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
102481a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
102581a2e3e4SMika Westerberg 
102681a2e3e4SMika Westerberg 	/*
102781a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
102881a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
102981a2e3e4SMika Westerberg 	 */
103081a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
103181a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
103281a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
103381a2e3e4SMika Westerberg 	}
103481a2e3e4SMika Westerberg 
103581a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
103681a2e3e4SMika Westerberg 		struct tb_port *port;
103781a2e3e4SMika Westerberg 
103881a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
103981a2e3e4SMika Westerberg 					struct tb_port, list);
104081a2e3e4SMika Westerberg 		list_del_init(&port->list);
104181a2e3e4SMika Westerberg 	}
104281a2e3e4SMika Westerberg }
104381a2e3e4SMika Westerberg 
10443da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
10453da88be2SMika Westerberg {
10463da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
10473da88be2SMika Westerberg 	struct tb_port *up;
10483da88be2SMika Westerberg 
10493da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
10503da88be2SMika Westerberg 	if (WARN_ON(!up))
10513da88be2SMika Westerberg 		return -ENODEV;
10523da88be2SMika Westerberg 
10533da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
10543da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
10553da88be2SMika Westerberg 		return -ENODEV;
10563da88be2SMika Westerberg 
10573da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
10583da88be2SMika Westerberg 	list_del(&tunnel->list);
10593da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
10603da88be2SMika Westerberg 	return 0;
10613da88be2SMika Westerberg }
10623da88be2SMika Westerberg 
106399cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
106499cabbb0SMika Westerberg {
106599cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
10669d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
106799cabbb0SMika Westerberg 	struct tb_switch *parent_sw;
106899cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
10699d3cce0bSMika Westerberg 
1070386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
107199cabbb0SMika Westerberg 	if (!up)
107299cabbb0SMika Westerberg 		return 0;
10733364f0c1SAndreas Noever 
107499cabbb0SMika Westerberg 	/*
107599cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
107699cabbb0SMika Westerberg 	 * be found right above this switch.
107799cabbb0SMika Westerberg 	 */
107899cabbb0SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
107999cabbb0SMika Westerberg 	port = tb_port_at(tb_route(sw), parent_sw);
108099cabbb0SMika Westerberg 	down = tb_find_pcie_down(parent_sw, port);
108199cabbb0SMika Westerberg 	if (!down)
108299cabbb0SMika Westerberg 		return 0;
10833364f0c1SAndreas Noever 
108499cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
108599cabbb0SMika Westerberg 	if (!tunnel)
108699cabbb0SMika Westerberg 		return -ENOMEM;
10873364f0c1SAndreas Noever 
108893f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
108999cabbb0SMika Westerberg 		tb_port_info(up,
10903364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
109193f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
109299cabbb0SMika Westerberg 		return -EIO;
10933364f0c1SAndreas Noever 	}
10943364f0c1SAndreas Noever 
1095*43f977bcSGil Fine 	/*
1096*43f977bcSGil Fine 	 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1097*43f977bcSGil Fine 	 * here.
1098*43f977bcSGil Fine 	 */
1099*43f977bcSGil Fine 	if (tb_switch_pcie_l1_enable(sw))
1100*43f977bcSGil Fine 		tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1101*43f977bcSGil Fine 
110299cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
110399cabbb0SMika Westerberg 	return 0;
11043364f0c1SAndreas Noever }
11059da672a4SAndreas Noever 
1106180b0689SMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1107180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
1108180b0689SMika Westerberg 				    int receive_path, int receive_ring)
11097ea4cd6bSMika Westerberg {
11107ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
11117ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
11127ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
11137ea4cd6bSMika Westerberg 	struct tb_switch *sw;
11147ea4cd6bSMika Westerberg 
11157ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
11167ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1117386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
11187ea4cd6bSMika Westerberg 
11197ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
1120180b0689SMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1121180b0689SMika Westerberg 				     transmit_ring, receive_path, receive_ring);
11227ea4cd6bSMika Westerberg 	if (!tunnel) {
11237ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11247ea4cd6bSMika Westerberg 		return -ENOMEM;
11257ea4cd6bSMika Westerberg 	}
11267ea4cd6bSMika Westerberg 
11277ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
11287ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
11297ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
11307ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
11317ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11327ea4cd6bSMika Westerberg 		return -EIO;
11337ea4cd6bSMika Westerberg 	}
11347ea4cd6bSMika Westerberg 
11357ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
11367ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
11377ea4cd6bSMika Westerberg 	return 0;
11387ea4cd6bSMika Westerberg }
11397ea4cd6bSMika Westerberg 
1140180b0689SMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1141180b0689SMika Westerberg 					  int transmit_path, int transmit_ring,
1142180b0689SMika Westerberg 					  int receive_path, int receive_ring)
11437ea4cd6bSMika Westerberg {
1144180b0689SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1145180b0689SMika Westerberg 	struct tb_port *nhi_port, *dst_port;
1146180b0689SMika Westerberg 	struct tb_tunnel *tunnel, *n;
11477ea4cd6bSMika Westerberg 	struct tb_switch *sw;
11487ea4cd6bSMika Westerberg 
11497ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
11507ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1151180b0689SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
11527ea4cd6bSMika Westerberg 
1153180b0689SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1154180b0689SMika Westerberg 		if (!tb_tunnel_is_dma(tunnel))
1155180b0689SMika Westerberg 			continue;
1156180b0689SMika Westerberg 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1157180b0689SMika Westerberg 			continue;
1158180b0689SMika Westerberg 
1159180b0689SMika Westerberg 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1160180b0689SMika Westerberg 					receive_path, receive_ring))
11618afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
11627ea4cd6bSMika Westerberg 	}
1163180b0689SMika Westerberg }
11647ea4cd6bSMika Westerberg 
1165180b0689SMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1166180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
1167180b0689SMika Westerberg 				       int receive_path, int receive_ring)
11687ea4cd6bSMika Westerberg {
11697ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
11707ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
1171180b0689SMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1172180b0689SMika Westerberg 					      transmit_ring, receive_path,
1173180b0689SMika Westerberg 					      receive_ring);
11747ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11757ea4cd6bSMika Westerberg 	}
11767ea4cd6bSMika Westerberg 	return 0;
11777ea4cd6bSMika Westerberg }
11787ea4cd6bSMika Westerberg 
1179d6cc51cdSAndreas Noever /* hotplug handling */
1180d6cc51cdSAndreas Noever 
1181877e50b3SLee Jones /*
1182d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1183d6cc51cdSAndreas Noever  *
1184d6cc51cdSAndreas Noever  * Executes on tb->wq.
1185d6cc51cdSAndreas Noever  */
1186d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1187d6cc51cdSAndreas Noever {
1188d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1189d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
11909d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1191053596d9SAndreas Noever 	struct tb_switch *sw;
1192053596d9SAndreas Noever 	struct tb_port *port;
1193284652a4SMika Westerberg 
11946ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
11956ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
11966ac6faeeSMika Westerberg 
1197d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
11989d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1199d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1200d6cc51cdSAndreas Noever 
12018f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1202053596d9SAndreas Noever 	if (!sw) {
1203053596d9SAndreas Noever 		tb_warn(tb,
1204053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1205053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1206053596d9SAndreas Noever 		goto out;
1207053596d9SAndreas Noever 	}
1208053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1209053596d9SAndreas Noever 		tb_warn(tb,
1210053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1211053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
12128f965efdSMika Westerberg 		goto put_sw;
1213053596d9SAndreas Noever 	}
1214053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1215053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1216dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1217053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
12188f965efdSMika Westerberg 		goto put_sw;
1219053596d9SAndreas Noever 	}
12206ac6faeeSMika Westerberg 
12216ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
12226ac6faeeSMika Westerberg 
1223053596d9SAndreas Noever 	if (ev->unplug) {
1224dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1225dacb1287SKranthi Kuntala 
1226dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
12277ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1228aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
12293364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
12308afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1231cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1232de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
123391c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1234bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1235053596d9SAndreas Noever 			port->remote = NULL;
1236dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1237dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
12388afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
12398afe909bSMika Westerberg 			tb_tunnel_dp(tb);
12407ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
12417ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
12427ea4cd6bSMika Westerberg 
12437ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
12447ea4cd6bSMika Westerberg 			/*
12457ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
12467ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
12477ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
12487ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
1249180b0689SMika Westerberg 			 * all the tunnels below.
12507ea4cd6bSMika Westerberg 			 */
12517ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
12527ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
12537ea4cd6bSMika Westerberg 			port->xdomain = NULL;
1254180b0689SMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
12557ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1256284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
12578afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
12588afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
1259053596d9SAndreas Noever 		} else {
126062efe699SMika Westerberg 			tb_port_dbg(port,
1261053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1262053596d9SAndreas Noever 		}
1263053596d9SAndreas Noever 	} else if (port->remote) {
126462efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1265053596d9SAndreas Noever 	} else {
1266344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
126762efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1268053596d9SAndreas Noever 			tb_scan_port(port);
126999cabbb0SMika Westerberg 			if (!port->remote)
127062efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
12718afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
12728afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1273053596d9SAndreas Noever 		}
1274344e0643SMika Westerberg 	}
12758f965efdSMika Westerberg 
12766ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
12776ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
12786ac6faeeSMika Westerberg 
12798f965efdSMika Westerberg put_sw:
12808f965efdSMika Westerberg 	tb_switch_put(sw);
1281d6cc51cdSAndreas Noever out:
1282d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
12836ac6faeeSMika Westerberg 
12846ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
12856ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
12866ac6faeeSMika Westerberg 
1287d6cc51cdSAndreas Noever 	kfree(ev);
1288d6cc51cdSAndreas Noever }
1289d6cc51cdSAndreas Noever 
1290877e50b3SLee Jones /*
1291d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
1292d6cc51cdSAndreas Noever  *
1293d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
1294d6cc51cdSAndreas Noever  */
129581a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
129681a54b5eSMika Westerberg 			    const void *buf, size_t size)
1297d6cc51cdSAndreas Noever {
129881a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
129981a54b5eSMika Westerberg 	u64 route;
130081a54b5eSMika Westerberg 
130181a54b5eSMika Westerberg 	if (type != TB_CFG_PKG_EVENT) {
130281a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
130381a54b5eSMika Westerberg 		return;
130481a54b5eSMika Westerberg 	}
130581a54b5eSMika Westerberg 
130681a54b5eSMika Westerberg 	route = tb_cfg_get_route(&pkg->header);
130781a54b5eSMika Westerberg 
1308210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
130981a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
131081a54b5eSMika Westerberg 			pkg->port);
131181a54b5eSMika Westerberg 	}
131281a54b5eSMika Westerberg 
13134f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1314d6cc51cdSAndreas Noever }
1315d6cc51cdSAndreas Noever 
13169d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
1317d6cc51cdSAndreas Noever {
13189d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
131993f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
132093f36adeSMika Westerberg 	struct tb_tunnel *n;
13213364f0c1SAndreas Noever 
13226ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
13233364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
13247ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
13257ea4cd6bSMika Westerberg 		/*
13267ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
13277ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
13287ea4cd6bSMika Westerberg 		 * intact.
13297ea4cd6bSMika Westerberg 		 */
13307ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
13317ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
133293f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
13337ea4cd6bSMika Westerberg 	}
1334bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
13359d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1336d6cc51cdSAndreas Noever }
1337d6cc51cdSAndreas Noever 
133899cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
133999cabbb0SMika Westerberg {
134099cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
134199cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
134299cabbb0SMika Westerberg 
134399cabbb0SMika Westerberg 		/*
134499cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
134599cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
134699cabbb0SMika Westerberg 		 * send uevent to userspace.
134799cabbb0SMika Westerberg 		 */
134899cabbb0SMika Westerberg 		if (sw->boot)
134999cabbb0SMika Westerberg 			sw->authorized = 1;
135099cabbb0SMika Westerberg 
135199cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
135299cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
135399cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
135499cabbb0SMika Westerberg 	}
135599cabbb0SMika Westerberg 
135699cabbb0SMika Westerberg 	return 0;
135799cabbb0SMika Westerberg }
135899cabbb0SMika Westerberg 
13599d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
1360d6cc51cdSAndreas Noever {
13619d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1362bfe778acSMika Westerberg 	int ret;
1363d6cc51cdSAndreas Noever 
1364bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1365444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
1366444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
1367a25c8b2fSAndreas Noever 
1368e6b245ccSMika Westerberg 	/*
1369e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
1370e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
1371e6b245ccSMika Westerberg 	 * root switch.
1372e6b245ccSMika Westerberg 	 */
1373e6b245ccSMika Westerberg 	tb->root_switch->no_nvm_upgrade = true;
13746ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
13756ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1376e6b245ccSMika Westerberg 
1377bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
1378bfe778acSMika Westerberg 	if (ret) {
1379bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1380bfe778acSMika Westerberg 		return ret;
1381bfe778acSMika Westerberg 	}
1382bfe778acSMika Westerberg 
1383bfe778acSMika Westerberg 	/* Announce the switch to the world */
1384bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
1385bfe778acSMika Westerberg 	if (ret) {
1386bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1387bfe778acSMika Westerberg 		return ret;
1388bfe778acSMika Westerberg 	}
1389bfe778acSMika Westerberg 
1390a28ec0e1SGil Fine 	tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
1391cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
1392cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
13939da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
13949da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
13950414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
139643bddb26SMika Westerberg 	tb_discover_tunnels(tb);
1397e6f81858SRajmohan Mani 	/*
1398e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
1399e6f81858SRajmohan Mani 	 * now for the whole topology.
1400e6f81858SRajmohan Mani 	 */
1401e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
14028afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
14038afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
140499cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
140599cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
140699cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
14079da672a4SAndreas Noever 
1408d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
14099d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
14109d3cce0bSMika Westerberg 	return 0;
1411d6cc51cdSAndreas Noever }
1412d6cc51cdSAndreas Noever 
14139d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
141423dd5bb4SAndreas Noever {
14159d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
14169d3cce0bSMika Westerberg 
1417daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
141881a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
14196ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
14209d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1421daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
14229d3cce0bSMika Westerberg 
14239d3cce0bSMika Westerberg 	return 0;
142423dd5bb4SAndreas Noever }
142523dd5bb4SAndreas Noever 
142691c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
142791c0c120SMika Westerberg {
142891c0c120SMika Westerberg 	struct tb_port *port;
142991c0c120SMika Westerberg 
14306ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
14316ac6faeeSMika Westerberg 	if (sw->is_unplugged)
14326ac6faeeSMika Westerberg 		return;
14336ac6faeeSMika Westerberg 
14348a90e4faSGil Fine 	if (tb_switch_enable_clx(sw, TB_CL0S))
14358a90e4faSGil Fine 		tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
14368a90e4faSGil Fine 
1437a28ec0e1SGil Fine 	/*
1438a28ec0e1SGil Fine 	 * tb_switch_tmu_configure() was already called when the switch was
1439a28ec0e1SGil Fine 	 * added before entering system sleep or runtime suspend,
1440a28ec0e1SGil Fine 	 * so no need to call it again before enabling TMU.
1441a28ec0e1SGil Fine 	 */
1442cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
1443cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1444cf29b9afSRajmohan Mani 
144591c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1446284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
144791c0c120SMika Westerberg 			continue;
144891c0c120SMika Westerberg 
1449284652a4SMika Westerberg 		if (port->remote) {
14502ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
1451de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
145291c0c120SMika Westerberg 
145391c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
1454284652a4SMika Westerberg 		} else if (port->xdomain) {
1455284652a4SMika Westerberg 			tb_port_configure_xdomain(port);
1456284652a4SMika Westerberg 		}
145791c0c120SMika Westerberg 	}
145891c0c120SMika Westerberg }
145991c0c120SMika Westerberg 
14609d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
146123dd5bb4SAndreas Noever {
14629d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
146393f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
146443bddb26SMika Westerberg 	unsigned int usb3_delay = 0;
146543bddb26SMika Westerberg 	LIST_HEAD(tunnels);
14669d3cce0bSMika Westerberg 
1467daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
146823dd5bb4SAndreas Noever 
146923dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
1470356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
147123dd5bb4SAndreas Noever 
147223dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
147323dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
147423dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
147591c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
147643bddb26SMika Westerberg 
147743bddb26SMika Westerberg 	/*
147843bddb26SMika Westerberg 	 * If we get here from suspend to disk the boot firmware or the
147943bddb26SMika Westerberg 	 * restore kernel might have created tunnels of its own. Since
148043bddb26SMika Westerberg 	 * we cannot be sure they are usable for us we find and tear
148143bddb26SMika Westerberg 	 * them down.
148243bddb26SMika Westerberg 	 */
148343bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
148443bddb26SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
148543bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel))
148643bddb26SMika Westerberg 			usb3_delay = 500;
148743bddb26SMika Westerberg 		tb_tunnel_deactivate(tunnel);
148843bddb26SMika Westerberg 		tb_tunnel_free(tunnel);
148943bddb26SMika Westerberg 	}
149043bddb26SMika Westerberg 
149143bddb26SMika Westerberg 	/* Re-create our tunnels now */
149243bddb26SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
149343bddb26SMika Westerberg 		/* USB3 requires delay before it can be re-activated */
149443bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel)) {
149543bddb26SMika Westerberg 			msleep(usb3_delay);
149643bddb26SMika Westerberg 			/* Only need to do it once */
149743bddb26SMika Westerberg 			usb3_delay = 0;
149843bddb26SMika Westerberg 		}
149993f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
150043bddb26SMika Westerberg 	}
15019d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
150223dd5bb4SAndreas Noever 		/*
150323dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
150423dd5bb4SAndreas Noever 		 * 100ms works for me...
150523dd5bb4SAndreas Noever 		 */
1506daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
150723dd5bb4SAndreas Noever 		msleep(100);
150823dd5bb4SAndreas Noever 	}
150923dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
15109d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
1511daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
15129d3cce0bSMika Westerberg 
15139d3cce0bSMika Westerberg 	return 0;
15149d3cce0bSMika Westerberg }
15159d3cce0bSMika Westerberg 
15167ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
15177ea4cd6bSMika Westerberg {
1518b433d010SMika Westerberg 	struct tb_port *port;
1519b433d010SMika Westerberg 	int ret = 0;
15207ea4cd6bSMika Westerberg 
1521b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
15227ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
15237ea4cd6bSMika Westerberg 			continue;
15247ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
1525dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
15267ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
1527284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
15287ea4cd6bSMika Westerberg 			port->xdomain = NULL;
15297ea4cd6bSMika Westerberg 			ret++;
15307ea4cd6bSMika Westerberg 		} else if (port->remote) {
15317ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
15327ea4cd6bSMika Westerberg 		}
15337ea4cd6bSMika Westerberg 	}
15347ea4cd6bSMika Westerberg 
15357ea4cd6bSMika Westerberg 	return ret;
15367ea4cd6bSMika Westerberg }
15377ea4cd6bSMika Westerberg 
1538884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
1539884e4d57SMika Westerberg {
1540884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1541884e4d57SMika Westerberg 
1542884e4d57SMika Westerberg 	tcm->hotplug_active = false;
1543884e4d57SMika Westerberg 	return 0;
1544884e4d57SMika Westerberg }
1545884e4d57SMika Westerberg 
1546884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
1547884e4d57SMika Westerberg {
1548884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1549884e4d57SMika Westerberg 
1550884e4d57SMika Westerberg 	tcm->hotplug_active = true;
1551884e4d57SMika Westerberg 	return 0;
1552884e4d57SMika Westerberg }
1553884e4d57SMika Westerberg 
15547ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
15557ea4cd6bSMika Westerberg {
15567ea4cd6bSMika Westerberg 	/*
15577ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
15587ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
15597ea4cd6bSMika Westerberg 	 * need to run another rescan.
15607ea4cd6bSMika Westerberg 	 */
15617ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
15627ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
15637ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
15647ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
15657ea4cd6bSMika Westerberg }
15667ea4cd6bSMika Westerberg 
15676ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
15686ac6faeeSMika Westerberg {
15696ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15706ac6faeeSMika Westerberg 
15716ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
15726ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
15736ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
15746ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
15756ac6faeeSMika Westerberg 
15766ac6faeeSMika Westerberg 	return 0;
15776ac6faeeSMika Westerberg }
15786ac6faeeSMika Westerberg 
15796ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
15806ac6faeeSMika Westerberg {
15816ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
15826ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
15836ac6faeeSMika Westerberg 
15846ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
15856ac6faeeSMika Westerberg 	if (tb->root_switch) {
15866ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
15876ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
15886ac6faeeSMika Westerberg 	}
15896ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
15906ac6faeeSMika Westerberg }
15916ac6faeeSMika Westerberg 
15926ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
15936ac6faeeSMika Westerberg {
15946ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15956ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
15966ac6faeeSMika Westerberg 
15976ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
15986ac6faeeSMika Westerberg 	tb_switch_resume(tb->root_switch);
15996ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
16006ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
16016ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
16026ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
16036ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
16046ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
16056ac6faeeSMika Westerberg 
16066ac6faeeSMika Westerberg 	/*
16076ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
16086ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
16096ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
16106ac6faeeSMika Westerberg 	 */
16116ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
16126ac6faeeSMika Westerberg 	return 0;
16136ac6faeeSMika Westerberg }
16146ac6faeeSMika Westerberg 
16159d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
16169d3cce0bSMika Westerberg 	.start = tb_start,
16179d3cce0bSMika Westerberg 	.stop = tb_stop,
16189d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
16199d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
1620884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
1621884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
16227ea4cd6bSMika Westerberg 	.complete = tb_complete,
16236ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
16246ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
162581a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
16263da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
162799cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
16287ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
16297ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
16309d3cce0bSMika Westerberg };
16319d3cce0bSMika Westerberg 
1632349bfe08SMika Westerberg /*
1633349bfe08SMika Westerberg  * During suspend the Thunderbolt controller is reset and all PCIe
1634349bfe08SMika Westerberg  * tunnels are lost. The NHI driver will try to reestablish all tunnels
1635349bfe08SMika Westerberg  * during resume. This adds device links between the tunneled PCIe
1636349bfe08SMika Westerberg  * downstream ports and the NHI so that the device core will make sure
1637349bfe08SMika Westerberg  * NHI is resumed first before the rest.
1638349bfe08SMika Westerberg  */
1639349bfe08SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi)
1640349bfe08SMika Westerberg {
1641349bfe08SMika Westerberg 	struct pci_dev *upstream, *pdev;
1642349bfe08SMika Westerberg 
1643349bfe08SMika Westerberg 	if (!x86_apple_machine)
1644349bfe08SMika Westerberg 		return;
1645349bfe08SMika Westerberg 
1646349bfe08SMika Westerberg 	switch (nhi->pdev->device) {
1647349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1648349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1649349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1650349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1651349bfe08SMika Westerberg 		break;
1652349bfe08SMika Westerberg 	default:
1653349bfe08SMika Westerberg 		return;
1654349bfe08SMika Westerberg 	}
1655349bfe08SMika Westerberg 
1656349bfe08SMika Westerberg 	upstream = pci_upstream_bridge(nhi->pdev);
1657349bfe08SMika Westerberg 	while (upstream) {
1658349bfe08SMika Westerberg 		if (!pci_is_pcie(upstream))
1659349bfe08SMika Westerberg 			return;
1660349bfe08SMika Westerberg 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1661349bfe08SMika Westerberg 			break;
1662349bfe08SMika Westerberg 		upstream = pci_upstream_bridge(upstream);
1663349bfe08SMika Westerberg 	}
1664349bfe08SMika Westerberg 
1665349bfe08SMika Westerberg 	if (!upstream)
1666349bfe08SMika Westerberg 		return;
1667349bfe08SMika Westerberg 
1668349bfe08SMika Westerberg 	/*
1669349bfe08SMika Westerberg 	 * For each hotplug downstream port, create add device link
1670349bfe08SMika Westerberg 	 * back to NHI so that PCIe tunnels can be re-established after
1671349bfe08SMika Westerberg 	 * sleep.
1672349bfe08SMika Westerberg 	 */
1673349bfe08SMika Westerberg 	for_each_pci_bridge(pdev, upstream->subordinate) {
1674349bfe08SMika Westerberg 		const struct device_link *link;
1675349bfe08SMika Westerberg 
1676349bfe08SMika Westerberg 		if (!pci_is_pcie(pdev))
1677349bfe08SMika Westerberg 			continue;
1678349bfe08SMika Westerberg 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1679349bfe08SMika Westerberg 		    !pdev->is_hotplug_bridge)
1680349bfe08SMika Westerberg 			continue;
1681349bfe08SMika Westerberg 
1682349bfe08SMika Westerberg 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1683349bfe08SMika Westerberg 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
1684349bfe08SMika Westerberg 				       DL_FLAG_PM_RUNTIME);
1685349bfe08SMika Westerberg 		if (link) {
1686349bfe08SMika Westerberg 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1687349bfe08SMika Westerberg 				dev_name(&pdev->dev));
1688349bfe08SMika Westerberg 		} else {
1689349bfe08SMika Westerberg 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1690349bfe08SMika Westerberg 				 dev_name(&pdev->dev));
1691349bfe08SMika Westerberg 		}
1692349bfe08SMika Westerberg 	}
1693349bfe08SMika Westerberg }
1694349bfe08SMika Westerberg 
16959d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
16969d3cce0bSMika Westerberg {
16979d3cce0bSMika Westerberg 	struct tb_cm *tcm;
16989d3cce0bSMika Westerberg 	struct tb *tb;
16999d3cce0bSMika Westerberg 
17007f0a34d7SMika Westerberg 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
17019d3cce0bSMika Westerberg 	if (!tb)
17029d3cce0bSMika Westerberg 		return NULL;
17039d3cce0bSMika Westerberg 
1704c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
170599cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
1706c6da62a2SMika Westerberg 	else
1707c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
1708c6da62a2SMika Westerberg 
17099d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
17109d3cce0bSMika Westerberg 
17119d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
17129d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
17138afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
17146ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
17159d3cce0bSMika Westerberg 
1716e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
1717e0258805SMika Westerberg 
1718349bfe08SMika Westerberg 	tb_apple_add_links(nhi);
1719349bfe08SMika Westerberg 	tb_acpi_add_links(nhi);
1720349bfe08SMika Westerberg 
17219d3cce0bSMika Westerberg 	return tb;
172223dd5bb4SAndreas Noever }
1723