xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 53ba2e16)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13349bfe08SMika Westerberg #include <linux/platform_data/x86/apple.h>
14d6cc51cdSAndreas Noever 
15d6cc51cdSAndreas Noever #include "tb.h"
167adf6097SAndreas Noever #include "tb_regs.h"
171752b9f7SMika Westerberg #include "tunnel.h"
18d6cc51cdSAndreas Noever 
197f0a34d7SMika Westerberg #define TB_TIMEOUT	100	/* ms */
206ce35635SMika Westerberg #define MAX_GROUPS	7	/* max Group_ID is 7 */
217f0a34d7SMika Westerberg 
229d3cce0bSMika Westerberg /**
239d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
249d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
258afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
269d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
279d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
289d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
299d3cce0bSMika Westerberg  *		    after cfg has been paused.
306ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
316ac6faeeSMika Westerberg  *		 runtime resume
326ce35635SMika Westerberg  * @groups: Bandwidth groups used in this domain.
339d3cce0bSMika Westerberg  */
349d3cce0bSMika Westerberg struct tb_cm {
359d3cce0bSMika Westerberg 	struct list_head tunnel_list;
368afe909bSMika Westerberg 	struct list_head dp_resources;
379d3cce0bSMika Westerberg 	bool hotplug_active;
386ac6faeeSMika Westerberg 	struct delayed_work remove_work;
396ce35635SMika Westerberg 	struct tb_bandwidth_group groups[MAX_GROUPS];
409d3cce0bSMika Westerberg };
419da672a4SAndreas Noever 
426ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
436ac6faeeSMika Westerberg {
446ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
456ac6faeeSMika Westerberg }
466ac6faeeSMika Westerberg 
474f807e47SMika Westerberg struct tb_hotplug_event {
484f807e47SMika Westerberg 	struct work_struct work;
494f807e47SMika Westerberg 	struct tb *tb;
504f807e47SMika Westerberg 	u64 route;
514f807e47SMika Westerberg 	u8 port;
524f807e47SMika Westerberg 	bool unplug;
534f807e47SMika Westerberg };
544f807e47SMika Westerberg 
556ce35635SMika Westerberg static void tb_init_bandwidth_groups(struct tb_cm *tcm)
566ce35635SMika Westerberg {
576ce35635SMika Westerberg 	int i;
586ce35635SMika Westerberg 
596ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
606ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
616ce35635SMika Westerberg 
626ce35635SMika Westerberg 		group->tb = tcm_to_tb(tcm);
636ce35635SMika Westerberg 		group->index = i + 1;
646ce35635SMika Westerberg 		INIT_LIST_HEAD(&group->ports);
656ce35635SMika Westerberg 	}
666ce35635SMika Westerberg }
676ce35635SMika Westerberg 
686ce35635SMika Westerberg static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
696ce35635SMika Westerberg 					   struct tb_port *in)
706ce35635SMika Westerberg {
716ce35635SMika Westerberg 	if (!group || WARN_ON(in->group))
726ce35635SMika Westerberg 		return;
736ce35635SMika Westerberg 
746ce35635SMika Westerberg 	in->group = group;
756ce35635SMika Westerberg 	list_add_tail(&in->group_list, &group->ports);
766ce35635SMika Westerberg 
776ce35635SMika Westerberg 	tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
786ce35635SMika Westerberg }
796ce35635SMika Westerberg 
806ce35635SMika Westerberg static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
816ce35635SMika Westerberg {
826ce35635SMika Westerberg 	int i;
836ce35635SMika Westerberg 
846ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
856ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
866ce35635SMika Westerberg 
876ce35635SMika Westerberg 		if (list_empty(&group->ports))
886ce35635SMika Westerberg 			return group;
896ce35635SMika Westerberg 	}
906ce35635SMika Westerberg 
916ce35635SMika Westerberg 	return NULL;
926ce35635SMika Westerberg }
936ce35635SMika Westerberg 
946ce35635SMika Westerberg static struct tb_bandwidth_group *
956ce35635SMika Westerberg tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
966ce35635SMika Westerberg 			  struct tb_port *out)
976ce35635SMika Westerberg {
986ce35635SMika Westerberg 	struct tb_bandwidth_group *group;
996ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
1006ce35635SMika Westerberg 
1016ce35635SMika Westerberg 	/*
1026ce35635SMika Westerberg 	 * Find all DP tunnels that go through all the same USB4 links
1036ce35635SMika Westerberg 	 * as this one. Because we always setup tunnels the same way we
1046ce35635SMika Westerberg 	 * can just check for the routers at both ends of the tunnels
1056ce35635SMika Westerberg 	 * and if they are the same we have a match.
1066ce35635SMika Westerberg 	 */
1076ce35635SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1086ce35635SMika Westerberg 		if (!tb_tunnel_is_dp(tunnel))
1096ce35635SMika Westerberg 			continue;
1106ce35635SMika Westerberg 
1116ce35635SMika Westerberg 		if (tunnel->src_port->sw == in->sw &&
1126ce35635SMika Westerberg 		    tunnel->dst_port->sw == out->sw) {
1136ce35635SMika Westerberg 			group = tunnel->src_port->group;
1146ce35635SMika Westerberg 			if (group) {
1156ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(group, in);
1166ce35635SMika Westerberg 				return group;
1176ce35635SMika Westerberg 			}
1186ce35635SMika Westerberg 		}
1196ce35635SMika Westerberg 	}
1206ce35635SMika Westerberg 
1216ce35635SMika Westerberg 	/* Pick up next available group then */
1226ce35635SMika Westerberg 	group = tb_find_free_bandwidth_group(tcm);
1236ce35635SMika Westerberg 	if (group)
1246ce35635SMika Westerberg 		tb_bandwidth_group_attach_port(group, in);
1256ce35635SMika Westerberg 	else
1266ce35635SMika Westerberg 		tb_port_warn(in, "no available bandwidth groups\n");
1276ce35635SMika Westerberg 
1286ce35635SMika Westerberg 	return group;
1296ce35635SMika Westerberg }
1306ce35635SMika Westerberg 
1316ce35635SMika Westerberg static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1326ce35635SMika Westerberg 					struct tb_port *out)
1336ce35635SMika Westerberg {
1346ce35635SMika Westerberg 	if (usb4_dp_port_bw_mode_enabled(in)) {
1356ce35635SMika Westerberg 		int index, i;
1366ce35635SMika Westerberg 
1376ce35635SMika Westerberg 		index = usb4_dp_port_group_id(in);
1386ce35635SMika Westerberg 		for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1396ce35635SMika Westerberg 			if (tcm->groups[i].index == index) {
1406ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1416ce35635SMika Westerberg 				return;
1426ce35635SMika Westerberg 			}
1436ce35635SMika Westerberg 		}
1446ce35635SMika Westerberg 	}
1456ce35635SMika Westerberg 
1466ce35635SMika Westerberg 	tb_attach_bandwidth_group(tcm, in, out);
1476ce35635SMika Westerberg }
1486ce35635SMika Westerberg 
1496ce35635SMika Westerberg static void tb_detach_bandwidth_group(struct tb_port *in)
1506ce35635SMika Westerberg {
1516ce35635SMika Westerberg 	struct tb_bandwidth_group *group = in->group;
1526ce35635SMika Westerberg 
1536ce35635SMika Westerberg 	if (group) {
1546ce35635SMika Westerberg 		in->group = NULL;
1556ce35635SMika Westerberg 		list_del_init(&in->group_list);
1566ce35635SMika Westerberg 
1576ce35635SMika Westerberg 		tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1586ce35635SMika Westerberg 	}
1596ce35635SMika Westerberg }
1606ce35635SMika Westerberg 
1614f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
1624f807e47SMika Westerberg 
1634f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
1644f807e47SMika Westerberg {
1654f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
1664f807e47SMika Westerberg 
1674f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1684f807e47SMika Westerberg 	if (!ev)
1694f807e47SMika Westerberg 		return;
1704f807e47SMika Westerberg 
1714f807e47SMika Westerberg 	ev->tb = tb;
1724f807e47SMika Westerberg 	ev->route = route;
1734f807e47SMika Westerberg 	ev->port = port;
1744f807e47SMika Westerberg 	ev->unplug = unplug;
1754f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
1764f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
1774f807e47SMika Westerberg }
1784f807e47SMika Westerberg 
1799da672a4SAndreas Noever /* enumeration & hot plug handling */
1809da672a4SAndreas Noever 
1818afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
1828afe909bSMika Westerberg {
1838afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
1848afe909bSMika Westerberg 	struct tb_port *port;
1858afe909bSMika Westerberg 
1868afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
1878afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
1888afe909bSMika Westerberg 			continue;
1898afe909bSMika Westerberg 
1908afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
1918afe909bSMika Westerberg 			continue;
1928afe909bSMika Westerberg 
1938afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
1948afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
1958afe909bSMika Westerberg 	}
1968afe909bSMika Westerberg }
1978afe909bSMika Westerberg 
1988afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
1998afe909bSMika Westerberg {
2008afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
2018afe909bSMika Westerberg 	struct tb_port *port, *tmp;
2028afe909bSMika Westerberg 
2038afe909bSMika Westerberg 	/* Clear children resources first */
2048afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
2058afe909bSMika Westerberg 		if (tb_port_has_remote(port))
2068afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
2078afe909bSMika Westerberg 	}
2088afe909bSMika Westerberg 
2098afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
2108afe909bSMika Westerberg 		if (port->sw == sw) {
2118afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
2128afe909bSMika Westerberg 			list_del_init(&port->list);
2138afe909bSMika Westerberg 		}
2148afe909bSMika Westerberg 	}
2158afe909bSMika Westerberg }
2168afe909bSMika Westerberg 
217b60e31bfSSanjay R Mehta static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218b60e31bfSSanjay R Mehta {
219b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
220b60e31bfSSanjay R Mehta 	struct tb_port *p;
221b60e31bfSSanjay R Mehta 
222b60e31bfSSanjay R Mehta 	list_for_each_entry(p, &tcm->dp_resources, list) {
223b60e31bfSSanjay R Mehta 		if (p == port)
224b60e31bfSSanjay R Mehta 			return;
225b60e31bfSSanjay R Mehta 	}
226b60e31bfSSanjay R Mehta 
227b60e31bfSSanjay R Mehta 	tb_port_dbg(port, "DP %s resource available discovered\n",
228b60e31bfSSanjay R Mehta 		    tb_port_is_dpin(port) ? "IN" : "OUT");
229b60e31bfSSanjay R Mehta 	list_add_tail(&port->list, &tcm->dp_resources);
230b60e31bfSSanjay R Mehta }
231b60e31bfSSanjay R Mehta 
232b60e31bfSSanjay R Mehta static void tb_discover_dp_resources(struct tb *tb)
233b60e31bfSSanjay R Mehta {
234b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
235b60e31bfSSanjay R Mehta 	struct tb_tunnel *tunnel;
236b60e31bfSSanjay R Mehta 
237b60e31bfSSanjay R Mehta 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238b60e31bfSSanjay R Mehta 		if (tb_tunnel_is_dp(tunnel))
239b60e31bfSSanjay R Mehta 			tb_discover_dp_resource(tb, tunnel->dst_port);
240b60e31bfSSanjay R Mehta 	}
241b60e31bfSSanjay R Mehta }
242b60e31bfSSanjay R Mehta 
243*53ba2e16SMika Westerberg /* Enables CL states up to host router */
2441a9b6cb8SMika Westerberg static int tb_enable_clx(struct tb_switch *sw)
2451a9b6cb8SMika Westerberg {
246*53ba2e16SMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
247*53ba2e16SMika Westerberg 	const struct tb_tunnel *tunnel;
2481a9b6cb8SMika Westerberg 	int ret;
2491a9b6cb8SMika Westerberg 
2501a9b6cb8SMika Westerberg 	/*
2519650de73SMika Westerberg 	 * Currently only enable CLx for the first link. This is enough
2529650de73SMika Westerberg 	 * to allow the CPU to save energy at least on Intel hardware
2539650de73SMika Westerberg 	 * and makes it slightly simpler to implement. We may change
2549650de73SMika Westerberg 	 * this in the future to cover the whole topology if it turns
2559650de73SMika Westerberg 	 * out to be beneficial.
2569650de73SMika Westerberg 	 */
257*53ba2e16SMika Westerberg 	while (sw && sw->config.depth > 1)
258*53ba2e16SMika Westerberg 		sw = tb_switch_parent(sw);
259*53ba2e16SMika Westerberg 
260*53ba2e16SMika Westerberg 	if (!sw)
261*53ba2e16SMika Westerberg 		return 0;
262*53ba2e16SMika Westerberg 
2639650de73SMika Westerberg 	if (sw->config.depth != 1)
2649650de73SMika Westerberg 		return 0;
2659650de73SMika Westerberg 
2669650de73SMika Westerberg 	/*
267*53ba2e16SMika Westerberg 	 * If we are re-enabling then check if there is an active DMA
268*53ba2e16SMika Westerberg 	 * tunnel and in that case bail out.
269*53ba2e16SMika Westerberg 	 */
270*53ba2e16SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
271*53ba2e16SMika Westerberg 		if (tb_tunnel_is_dma(tunnel)) {
272*53ba2e16SMika Westerberg 			if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
273*53ba2e16SMika Westerberg 				return 0;
274*53ba2e16SMika Westerberg 		}
275*53ba2e16SMika Westerberg 	}
276*53ba2e16SMika Westerberg 
277*53ba2e16SMika Westerberg 	/*
2781a9b6cb8SMika Westerberg 	 * CL0s and CL1 are enabled and supported together.
2791a9b6cb8SMika Westerberg 	 * Silently ignore CLx enabling in case CLx is not supported.
2801a9b6cb8SMika Westerberg 	 */
28135627353SMika Westerberg 	ret = tb_switch_clx_enable(sw, TB_CL0S | TB_CL1);
2821a9b6cb8SMika Westerberg 	return ret == -EOPNOTSUPP ? 0 : ret;
2831a9b6cb8SMika Westerberg }
2841a9b6cb8SMika Westerberg 
285*53ba2e16SMika Westerberg /* Disables CL states up to the host router */
286*53ba2e16SMika Westerberg static void tb_disable_clx(struct tb_switch *sw)
287*53ba2e16SMika Westerberg {
288*53ba2e16SMika Westerberg 	do {
289*53ba2e16SMika Westerberg 		if (tb_switch_clx_disable(sw) < 0)
290*53ba2e16SMika Westerberg 			tb_sw_warn(sw, "failed to disable CL states\n");
291*53ba2e16SMika Westerberg 		sw = tb_switch_parent(sw);
292*53ba2e16SMika Westerberg 	} while (sw);
293*53ba2e16SMika Westerberg }
294*53ba2e16SMika Westerberg 
2957d283f41SMika Westerberg static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
2967d283f41SMika Westerberg {
2977d283f41SMika Westerberg 	struct tb_switch *sw;
2987d283f41SMika Westerberg 
2997d283f41SMika Westerberg 	sw = tb_to_switch(dev);
3007d283f41SMika Westerberg 	if (sw) {
3017d283f41SMika Westerberg 		tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
30212a14f2fSMika Westerberg 					tb_switch_clx_is_enabled(sw, TB_CL1));
3037d283f41SMika Westerberg 		if (tb_switch_tmu_enable(sw))
3047d283f41SMika Westerberg 			tb_sw_warn(sw, "failed to increase TMU rate\n");
3057d283f41SMika Westerberg 	}
3067d283f41SMika Westerberg 
3077d283f41SMika Westerberg 	return 0;
3087d283f41SMika Westerberg }
3097d283f41SMika Westerberg 
3107d283f41SMika Westerberg static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
3117d283f41SMika Westerberg {
3127d283f41SMika Westerberg 	struct tb_switch *sw;
3137d283f41SMika Westerberg 
3147d283f41SMika Westerberg 	if (!tunnel)
3157d283f41SMika Westerberg 		return;
3167d283f41SMika Westerberg 
3177d283f41SMika Westerberg 	/*
3187d283f41SMika Westerberg 	 * Once first DP tunnel is established we change the TMU
3197d283f41SMika Westerberg 	 * accuracy of first depth child routers (and the host router)
3207d283f41SMika Westerberg 	 * to the highest. This is needed for the DP tunneling to work
3217d283f41SMika Westerberg 	 * but also allows CL0s.
3227d283f41SMika Westerberg 	 */
3237d283f41SMika Westerberg 	sw = tunnel->tb->root_switch;
3247d283f41SMika Westerberg 	device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
3257d283f41SMika Westerberg }
3267d283f41SMika Westerberg 
3274e7b4955SMika Westerberg static int tb_enable_tmu(struct tb_switch *sw)
3284e7b4955SMika Westerberg {
3294e7b4955SMika Westerberg 	int ret;
3304e7b4955SMika Westerberg 
3314e7b4955SMika Westerberg 	/*
3324e7b4955SMika Westerberg 	 * If CL1 is enabled then we need to configure the TMU accuracy
3334e7b4955SMika Westerberg 	 * level to normal. Otherwise we keep the TMU running at the
3344e7b4955SMika Westerberg 	 * highest accuracy.
3354e7b4955SMika Westerberg 	 */
33612a14f2fSMika Westerberg 	if (tb_switch_clx_is_enabled(sw, TB_CL1))
337ef34add8SMika Westerberg 		ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
3384e7b4955SMika Westerberg 	else
339ef34add8SMika Westerberg 		ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
340ef34add8SMika Westerberg 	if (ret)
341ef34add8SMika Westerberg 		return ret;
3424e7b4955SMika Westerberg 
3434e7b4955SMika Westerberg 	/* If it is already enabled in correct mode, don't touch it */
3444e7b4955SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
3454e7b4955SMika Westerberg 		return 0;
3464e7b4955SMika Westerberg 
3474e7b4955SMika Westerberg 	ret = tb_switch_tmu_disable(sw);
3484e7b4955SMika Westerberg 	if (ret)
3494e7b4955SMika Westerberg 		return ret;
3504e7b4955SMika Westerberg 
3514e7b4955SMika Westerberg 	ret = tb_switch_tmu_post_time(sw);
3524e7b4955SMika Westerberg 	if (ret)
3534e7b4955SMika Westerberg 		return ret;
3544e7b4955SMika Westerberg 
3554e7b4955SMika Westerberg 	return tb_switch_tmu_enable(sw);
3564e7b4955SMika Westerberg }
3574e7b4955SMika Westerberg 
35843bddb26SMika Westerberg static void tb_switch_discover_tunnels(struct tb_switch *sw,
35943bddb26SMika Westerberg 				       struct list_head *list,
36043bddb26SMika Westerberg 				       bool alloc_hopids)
3610414bec5SMika Westerberg {
3620414bec5SMika Westerberg 	struct tb *tb = sw->tb;
3630414bec5SMika Westerberg 	struct tb_port *port;
3640414bec5SMika Westerberg 
365b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3660414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
3670414bec5SMika Westerberg 
3680414bec5SMika Westerberg 		switch (port->config.type) {
3694f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
37043bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
3717d283f41SMika Westerberg 			tb_increase_tmu_accuracy(tunnel);
3724f807e47SMika Westerberg 			break;
3734f807e47SMika Westerberg 
3740414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
37543bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
3760414bec5SMika Westerberg 			break;
3770414bec5SMika Westerberg 
378e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
37943bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
380e6f81858SRajmohan Mani 			break;
381e6f81858SRajmohan Mani 
3820414bec5SMika Westerberg 		default:
3830414bec5SMika Westerberg 			break;
3840414bec5SMika Westerberg 		}
3850414bec5SMika Westerberg 
38643bddb26SMika Westerberg 		if (tunnel)
38743bddb26SMika Westerberg 			list_add_tail(&tunnel->list, list);
38843bddb26SMika Westerberg 	}
3894f807e47SMika Westerberg 
39043bddb26SMika Westerberg 	tb_switch_for_each_port(sw, port) {
39143bddb26SMika Westerberg 		if (tb_port_has_remote(port)) {
39243bddb26SMika Westerberg 			tb_switch_discover_tunnels(port->remote->sw, list,
39343bddb26SMika Westerberg 						   alloc_hopids);
39443bddb26SMika Westerberg 		}
39543bddb26SMika Westerberg 	}
39643bddb26SMika Westerberg }
39743bddb26SMika Westerberg 
39843bddb26SMika Westerberg static void tb_discover_tunnels(struct tb *tb)
39943bddb26SMika Westerberg {
40043bddb26SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
40143bddb26SMika Westerberg 	struct tb_tunnel *tunnel;
40243bddb26SMika Westerberg 
40343bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
40443bddb26SMika Westerberg 
40543bddb26SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4064f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
4070414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
4080414bec5SMika Westerberg 
4090414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
4100414bec5SMika Westerberg 				parent->boot = true;
4110414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
4120414bec5SMika Westerberg 			}
413c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
4146ce35635SMika Westerberg 			struct tb_port *in = tunnel->src_port;
4156ce35635SMika Westerberg 			struct tb_port *out = tunnel->dst_port;
4166ce35635SMika Westerberg 
417c94732bdSMika Westerberg 			/* Keep the domain from powering down */
4186ce35635SMika Westerberg 			pm_runtime_get_sync(&in->sw->dev);
4196ce35635SMika Westerberg 			pm_runtime_get_sync(&out->sw->dev);
4206ce35635SMika Westerberg 
4216ce35635SMika Westerberg 			tb_discover_bandwidth_group(tcm, in, out);
4224f807e47SMika Westerberg 		}
4230414bec5SMika Westerberg 	}
4240414bec5SMika Westerberg }
4259da672a4SAndreas Noever 
426f9cad07bSMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
427284652a4SMika Westerberg {
428284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
429f9cad07bSMika Westerberg 		return usb4_port_configure_xdomain(port, xd);
430284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
431284652a4SMika Westerberg }
432284652a4SMika Westerberg 
433284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
434284652a4SMika Westerberg {
435284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
436284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
437284652a4SMika Westerberg 	else
438284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
439341d4518SMika Westerberg 
440341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
441284652a4SMika Westerberg }
442284652a4SMika Westerberg 
4437ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
4447ea4cd6bSMika Westerberg {
4457ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
4467ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
4477ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
4487ea4cd6bSMika Westerberg 	u64 route;
4497ea4cd6bSMika Westerberg 
4505ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
4515ca67688SMika Westerberg 		return;
4525ca67688SMika Westerberg 
4537ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
4547ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
4557ea4cd6bSMika Westerberg 	if (xd) {
4567ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
4577ea4cd6bSMika Westerberg 		return;
4587ea4cd6bSMika Westerberg 	}
4597ea4cd6bSMika Westerberg 
4607ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
4617ea4cd6bSMika Westerberg 			      NULL);
4627ea4cd6bSMika Westerberg 	if (xd) {
4637ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
464f9cad07bSMika Westerberg 		tb_port_configure_xdomain(port, xd);
4657ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
4667ea4cd6bSMika Westerberg 	}
4677ea4cd6bSMika Westerberg }
4687ea4cd6bSMika Westerberg 
469e6f81858SRajmohan Mani /**
470e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
471e6f81858SRajmohan Mani  * @sw: Switch to find the port on
472e6f81858SRajmohan Mani  * @type: Port type to look for
473e6f81858SRajmohan Mani  */
474e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
475e6f81858SRajmohan Mani 					   enum tb_port_type type)
476e6f81858SRajmohan Mani {
477e6f81858SRajmohan Mani 	struct tb_port *port;
478e6f81858SRajmohan Mani 
479e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
480e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
481e6f81858SRajmohan Mani 			continue;
482e6f81858SRajmohan Mani 		if (port->config.type != type)
483e6f81858SRajmohan Mani 			continue;
484e6f81858SRajmohan Mani 		if (!port->cap_adap)
485e6f81858SRajmohan Mani 			continue;
486e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
487e6f81858SRajmohan Mani 			continue;
488e6f81858SRajmohan Mani 		return port;
489e6f81858SRajmohan Mani 	}
490e6f81858SRajmohan Mani 	return NULL;
491e6f81858SRajmohan Mani }
492e6f81858SRajmohan Mani 
493e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
494e6f81858SRajmohan Mani 					 const struct tb_port *port)
495e6f81858SRajmohan Mani {
496e6f81858SRajmohan Mani 	struct tb_port *down;
497e6f81858SRajmohan Mani 
498e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
49977cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
500e6f81858SRajmohan Mani 		return down;
50177cfa40fSMika Westerberg 	return NULL;
502e6f81858SRajmohan Mani }
503e6f81858SRajmohan Mani 
5040bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
5050bd680cdSMika Westerberg 					struct tb_port *src_port,
5060bd680cdSMika Westerberg 					struct tb_port *dst_port)
5070bd680cdSMika Westerberg {
5080bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5090bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5100bd680cdSMika Westerberg 
5110bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
5120bd680cdSMika Westerberg 		if (tunnel->type == type &&
5130bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
5140bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
5150bd680cdSMika Westerberg 			return tunnel;
5160bd680cdSMika Westerberg 		}
5170bd680cdSMika Westerberg 	}
5180bd680cdSMika Westerberg 
5190bd680cdSMika Westerberg 	return NULL;
5200bd680cdSMika Westerberg }
5210bd680cdSMika Westerberg 
5220bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
5230bd680cdSMika Westerberg 						   struct tb_port *src_port,
5240bd680cdSMika Westerberg 						   struct tb_port *dst_port)
5250bd680cdSMika Westerberg {
5260bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
5270bd680cdSMika Westerberg 	struct tb_switch *sw;
5280bd680cdSMika Westerberg 
5290bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
5300bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
5310bd680cdSMika Westerberg 		sw = dst_port->sw;
5320bd680cdSMika Westerberg 	else
5330bd680cdSMika Westerberg 		sw = src_port->sw;
5340bd680cdSMika Westerberg 
5350bd680cdSMika Westerberg 	/* Can't be the host router */
5360bd680cdSMika Westerberg 	if (sw == tb->root_switch)
5370bd680cdSMika Westerberg 		return NULL;
5380bd680cdSMika Westerberg 
5390bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
5400bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
5410bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
5420bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
5430bd680cdSMika Westerberg 	if (!usb3_down)
5440bd680cdSMika Westerberg 		return NULL;
5450bd680cdSMika Westerberg 
5460bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
5470bd680cdSMika Westerberg }
5480bd680cdSMika Westerberg 
5490bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
5500bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
5510bd680cdSMika Westerberg {
5520bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
5530bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5540bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5550bd680cdSMika Westerberg 	struct tb_port *port;
5560bd680cdSMika Westerberg 
5572426fdf7SMika Westerberg 	tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
5582426fdf7SMika Westerberg 	       tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
5592426fdf7SMika Westerberg 	       dst_port->port);
5600bd680cdSMika Westerberg 
5610bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
5626ce35635SMika Westerberg 	if (tunnel && tunnel->src_port != src_port &&
5636ce35635SMika Westerberg 	    tunnel->dst_port != dst_port) {
5640bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
5650bd680cdSMika Westerberg 						   &usb3_consumed_down);
5660bd680cdSMika Westerberg 		if (ret)
5670bd680cdSMika Westerberg 			return ret;
5680bd680cdSMika Westerberg 	} else {
5690bd680cdSMika Westerberg 		usb3_consumed_up = 0;
5700bd680cdSMika Westerberg 		usb3_consumed_down = 0;
5710bd680cdSMika Westerberg 	}
5720bd680cdSMika Westerberg 
5730bd680cdSMika Westerberg 	*available_up = *available_down = 40000;
5740bd680cdSMika Westerberg 
5750bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
5760bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
5770bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
5780bd680cdSMika Westerberg 
5790bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
5800bd680cdSMika Westerberg 			continue;
5810bd680cdSMika Westerberg 
5820bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
5830bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
5840bd680cdSMika Westerberg 		} else {
5850bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
5860bd680cdSMika Westerberg 			if (link_speed < 0)
5870bd680cdSMika Westerberg 				return link_speed;
5880bd680cdSMika Westerberg 		}
5890bd680cdSMika Westerberg 
5900bd680cdSMika Westerberg 		link_width = port->bonded ? 2 : 1;
5910bd680cdSMika Westerberg 
5920bd680cdSMika Westerberg 		up_bw = link_speed * link_width * 1000; /* Mb/s */
5930bd680cdSMika Westerberg 		/* Leave 10% guard band */
5940bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
5950bd680cdSMika Westerberg 		down_bw = up_bw;
5960bd680cdSMika Westerberg 
5972426fdf7SMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
5982426fdf7SMika Westerberg 			    down_bw);
5990bd680cdSMika Westerberg 
6000bd680cdSMika Westerberg 		/*
6010bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
6020bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
6030bd680cdSMika Westerberg 		 */
6040bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
6050bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
6060bd680cdSMika Westerberg 
6076ce35635SMika Westerberg 			if (tb_tunnel_is_invalid(tunnel))
6086ce35635SMika Westerberg 				continue;
6096ce35635SMika Westerberg 
6100bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
6110bd680cdSMika Westerberg 				continue;
6120bd680cdSMika Westerberg 
6130bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
6140bd680cdSMika Westerberg 				continue;
6150bd680cdSMika Westerberg 
6166ce35635SMika Westerberg 			/*
6176ce35635SMika Westerberg 			 * Ignore the DP tunnel between src_port and
6186ce35635SMika Westerberg 			 * dst_port because it is the same tunnel and we
6196ce35635SMika Westerberg 			 * may be re-calculating estimated bandwidth.
6206ce35635SMika Westerberg 			 */
6216ce35635SMika Westerberg 			if (tunnel->src_port == src_port &&
6226ce35635SMika Westerberg 			    tunnel->dst_port == dst_port)
6236ce35635SMika Westerberg 				continue;
6246ce35635SMika Westerberg 
6250bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
6260bd680cdSMika Westerberg 							   &dp_consumed_up,
6270bd680cdSMika Westerberg 							   &dp_consumed_down);
6280bd680cdSMika Westerberg 			if (ret)
6290bd680cdSMika Westerberg 				return ret;
6300bd680cdSMika Westerberg 
6310bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
6320bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
6330bd680cdSMika Westerberg 		}
6340bd680cdSMika Westerberg 
6350bd680cdSMika Westerberg 		/*
6360bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
6370bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
6380bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
6390bd680cdSMika Westerberg 		 * crosses the port.
6400bd680cdSMika Westerberg 		 */
6410bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
6420bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
6430bd680cdSMika Westerberg 
6440bd680cdSMika Westerberg 		if (up_bw < *available_up)
6450bd680cdSMika Westerberg 			*available_up = up_bw;
6460bd680cdSMika Westerberg 		if (down_bw < *available_down)
6470bd680cdSMika Westerberg 			*available_down = down_bw;
6480bd680cdSMika Westerberg 	}
6490bd680cdSMika Westerberg 
6500bd680cdSMika Westerberg 	if (*available_up < 0)
6510bd680cdSMika Westerberg 		*available_up = 0;
6520bd680cdSMika Westerberg 	if (*available_down < 0)
6530bd680cdSMika Westerberg 		*available_down = 0;
6540bd680cdSMika Westerberg 
6550bd680cdSMika Westerberg 	return 0;
6560bd680cdSMika Westerberg }
6570bd680cdSMika Westerberg 
6580bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
6590bd680cdSMika Westerberg 					    struct tb_port *src_port,
6600bd680cdSMika Westerberg 					    struct tb_port *dst_port)
6610bd680cdSMika Westerberg {
6620bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
6630bd680cdSMika Westerberg 
6640bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6650bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
6660bd680cdSMika Westerberg }
6670bd680cdSMika Westerberg 
6680bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
6690bd680cdSMika Westerberg 				      struct tb_port *dst_port)
6700bd680cdSMika Westerberg {
6710bd680cdSMika Westerberg 	int ret, available_up, available_down;
6720bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
6730bd680cdSMika Westerberg 
6740bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6750bd680cdSMika Westerberg 	if (!tunnel)
6760bd680cdSMika Westerberg 		return;
6770bd680cdSMika Westerberg 
6780bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
6790bd680cdSMika Westerberg 
6800bd680cdSMika Westerberg 	/*
6810bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
6820bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
6830bd680cdSMika Westerberg 	 */
6840bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
6850bd680cdSMika Westerberg 				     &available_up, &available_down);
6860bd680cdSMika Westerberg 	if (ret) {
6870bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
6880bd680cdSMika Westerberg 		return;
6890bd680cdSMika Westerberg 	}
6900bd680cdSMika Westerberg 
6910bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
6920bd680cdSMika Westerberg 	       available_up, available_down);
6930bd680cdSMika Westerberg 
6940bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
6950bd680cdSMika Westerberg }
6960bd680cdSMika Westerberg 
697e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
698e6f81858SRajmohan Mani {
699e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
7000bd680cdSMika Westerberg 	int ret, available_up, available_down;
701e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
702e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
703e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
704e6f81858SRajmohan Mani 
705c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
706c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
707c6da62a2SMika Westerberg 		return 0;
708c6da62a2SMika Westerberg 	}
709c6da62a2SMika Westerberg 
710e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
711e6f81858SRajmohan Mani 	if (!up)
712e6f81858SRajmohan Mani 		return 0;
713e6f81858SRajmohan Mani 
714bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
715bbcf40b3SMika Westerberg 		return 0;
716bbcf40b3SMika Westerberg 
717e6f81858SRajmohan Mani 	/*
718e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
719e6f81858SRajmohan Mani 	 * be found right above this switch.
720e6f81858SRajmohan Mani 	 */
7217ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
722e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
723e6f81858SRajmohan Mani 	if (!down)
724e6f81858SRajmohan Mani 		return 0;
725e6f81858SRajmohan Mani 
726e6f81858SRajmohan Mani 	if (tb_route(parent)) {
727e6f81858SRajmohan Mani 		struct tb_port *parent_up;
728e6f81858SRajmohan Mani 		/*
729e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
730e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
731e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
732e6f81858SRajmohan Mani 		 */
733e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
734e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
735e6f81858SRajmohan Mani 			return 0;
7360bd680cdSMika Westerberg 
7370bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
7380bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
7390bd680cdSMika Westerberg 		if (ret)
7400bd680cdSMika Westerberg 			return ret;
741e6f81858SRajmohan Mani 	}
742e6f81858SRajmohan Mani 
7430bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
7440bd680cdSMika Westerberg 				     &available_down);
7450bd680cdSMika Westerberg 	if (ret)
7460bd680cdSMika Westerberg 		goto err_reclaim;
7470bd680cdSMika Westerberg 
7480bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
7490bd680cdSMika Westerberg 		    available_up, available_down);
7500bd680cdSMika Westerberg 
7510bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
7520bd680cdSMika Westerberg 				      available_down);
7530bd680cdSMika Westerberg 	if (!tunnel) {
7540bd680cdSMika Westerberg 		ret = -ENOMEM;
7550bd680cdSMika Westerberg 		goto err_reclaim;
7560bd680cdSMika Westerberg 	}
757e6f81858SRajmohan Mani 
758e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
759e6f81858SRajmohan Mani 		tb_port_info(up,
760e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
7610bd680cdSMika Westerberg 		ret = -EIO;
7620bd680cdSMika Westerberg 		goto err_free;
763e6f81858SRajmohan Mani 	}
764e6f81858SRajmohan Mani 
765e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
7660bd680cdSMika Westerberg 	if (tb_route(parent))
7670bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
7680bd680cdSMika Westerberg 
769e6f81858SRajmohan Mani 	return 0;
7700bd680cdSMika Westerberg 
7710bd680cdSMika Westerberg err_free:
7720bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
7730bd680cdSMika Westerberg err_reclaim:
7740bd680cdSMika Westerberg 	if (tb_route(parent))
7750bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
7760bd680cdSMika Westerberg 
7770bd680cdSMika Westerberg 	return ret;
778e6f81858SRajmohan Mani }
779e6f81858SRajmohan Mani 
780e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
781e6f81858SRajmohan Mani {
782e6f81858SRajmohan Mani 	struct tb_port *port;
783e6f81858SRajmohan Mani 	int ret;
784e6f81858SRajmohan Mani 
785c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
786c6da62a2SMika Westerberg 		return 0;
787c6da62a2SMika Westerberg 
788e6f81858SRajmohan Mani 	if (tb_route(sw)) {
789e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
790e6f81858SRajmohan Mani 		if (ret)
791e6f81858SRajmohan Mani 			return ret;
792e6f81858SRajmohan Mani 	}
793e6f81858SRajmohan Mani 
794e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
795e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
796e6f81858SRajmohan Mani 			continue;
797e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
798e6f81858SRajmohan Mani 		if (ret)
799e6f81858SRajmohan Mani 			return ret;
800e6f81858SRajmohan Mani 	}
801e6f81858SRajmohan Mani 
802e6f81858SRajmohan Mani 	return 0;
803e6f81858SRajmohan Mani }
804e6f81858SRajmohan Mani 
8059da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
8069da672a4SAndreas Noever 
807877e50b3SLee Jones /*
8089da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
8099da672a4SAndreas Noever  */
8109da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
8119da672a4SAndreas Noever {
812b433d010SMika Westerberg 	struct tb_port *port;
813b433d010SMika Westerberg 
8146ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
8156ac6faeeSMika Westerberg 
816b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
817b433d010SMika Westerberg 		tb_scan_port(port);
8186ac6faeeSMika Westerberg 
8196ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
8206ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
8219da672a4SAndreas Noever }
8229da672a4SAndreas Noever 
823877e50b3SLee Jones /*
8249da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
8259da672a4SAndreas Noever  */
8269da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
8279da672a4SAndreas Noever {
82899cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
829dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
8303fe95742SMika Westerberg 	bool discovery = false;
8319da672a4SAndreas Noever 	struct tb_switch *sw;
832dfe40ca4SMika Westerberg 
8339da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
8349da672a4SAndreas Noever 		return;
8354f807e47SMika Westerberg 
8364f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
8374f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
8384f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
8394f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
8404f807e47SMika Westerberg 				 false);
8414f807e47SMika Westerberg 		return;
8424f807e47SMika Westerberg 	}
8434f807e47SMika Westerberg 
8449da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
8459da672a4SAndreas Noever 		return;
846343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
847343fcb8cSAndreas Noever 		return; /*
848343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
849343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
850343fcb8cSAndreas Noever 			 */
85123257cfcSMika Westerberg 
85223257cfcSMika Westerberg 	if (port->usb4)
85323257cfcSMika Westerberg 		pm_runtime_get_sync(&port->usb4->dev);
85423257cfcSMika Westerberg 
8559da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
85623257cfcSMika Westerberg 		goto out_rpm_put;
8579da672a4SAndreas Noever 	if (port->remote) {
8587ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
85923257cfcSMika Westerberg 		goto out_rpm_put;
8609da672a4SAndreas Noever 	}
861dacb1287SKranthi Kuntala 
8623fb10ea4SRajmohan Mani 	tb_retimer_scan(port, true);
863dacb1287SKranthi Kuntala 
864bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
865bfe778acSMika Westerberg 			     tb_downstream_route(port));
8667ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
8677ea4cd6bSMika Westerberg 		/*
8687ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
8697ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
8707ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
8717ea4cd6bSMika Westerberg 		 */
8727ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
8737ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
87423257cfcSMika Westerberg 		goto out_rpm_put;
8757ea4cd6bSMika Westerberg 	}
876bfe778acSMika Westerberg 
877bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
878bfe778acSMika Westerberg 		tb_switch_put(sw);
87923257cfcSMika Westerberg 		goto out_rpm_put;
880bfe778acSMika Westerberg 	}
881bfe778acSMika Westerberg 
88299cabbb0SMika Westerberg 	/*
8837ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
8847ea4cd6bSMika Westerberg 	 * first.
8857ea4cd6bSMika Westerberg 	 */
8867ea4cd6bSMika Westerberg 	if (port->xdomain) {
8877ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
888284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
8897ea4cd6bSMika Westerberg 		port->xdomain = NULL;
8907ea4cd6bSMika Westerberg 	}
8917ea4cd6bSMika Westerberg 
8927ea4cd6bSMika Westerberg 	/*
89399cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
89499cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
89599cabbb0SMika Westerberg 	 * the boot firmware.
89699cabbb0SMika Westerberg 	 */
8973fe95742SMika Westerberg 	if (!tcm->hotplug_active) {
89899cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
8993fe95742SMika Westerberg 		discovery = true;
9003fe95742SMika Westerberg 	}
901f67cf491SMika Westerberg 
9026ac6faeeSMika Westerberg 	/*
9036ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
9046ac6faeeSMika Westerberg 	 * can support runtime PM.
9056ac6faeeSMika Westerberg 	 */
9066ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
9076ac6faeeSMika Westerberg 
908bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
909bfe778acSMika Westerberg 		tb_switch_put(sw);
91023257cfcSMika Westerberg 		goto out_rpm_put;
911bfe778acSMika Westerberg 	}
912bfe778acSMika Westerberg 
913dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
914dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
915dfe40ca4SMika Westerberg 	port->remote = upstream_port;
916dfe40ca4SMika Westerberg 	upstream_port->remote = port;
917dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
918dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
919dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
920dfe40ca4SMika Westerberg 	}
921dfe40ca4SMika Westerberg 
92291c0c120SMika Westerberg 	/* Enable lane bonding if supported */
9232ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
924de462039SMika Westerberg 	/* Set the link configured */
925de462039SMika Westerberg 	tb_switch_configure_link(sw);
926b017a46dSGil Fine 	/*
927b017a46dSGil Fine 	 * CL0s and CL1 are enabled and supported together.
928b017a46dSGil Fine 	 * Silently ignore CLx enabling in case CLx is not supported.
929b017a46dSGil Fine 	 */
9301a9b6cb8SMika Westerberg 	if (discovery)
9313fe95742SMika Westerberg 		tb_sw_dbg(sw, "discovery, not touching CL states\n");
9321a9b6cb8SMika Westerberg 	else if (tb_enable_clx(sw))
9331a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to enable CL states\n");
9348a90e4faSGil Fine 
935cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
936cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
937cf29b9afSRajmohan Mani 
938dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
9393fb10ea4SRajmohan Mani 	tb_retimer_scan(upstream_port, true);
940dacb1287SKranthi Kuntala 
941e6f81858SRajmohan Mani 	/*
942e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
943e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
944e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
945e6f81858SRajmohan Mani 	 * any new.
946e6f81858SRajmohan Mani 	 */
947e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
948e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
949e6f81858SRajmohan Mani 
950e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
9519da672a4SAndreas Noever 	tb_scan_switch(sw);
95223257cfcSMika Westerberg 
95323257cfcSMika Westerberg out_rpm_put:
95423257cfcSMika Westerberg 	if (port->usb4) {
95523257cfcSMika Westerberg 		pm_runtime_mark_last_busy(&port->usb4->dev);
95623257cfcSMika Westerberg 		pm_runtime_put_autosuspend(&port->usb4->dev);
95723257cfcSMika Westerberg 	}
9589da672a4SAndreas Noever }
9599da672a4SAndreas Noever 
9608afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
9618afe909bSMika Westerberg {
9620bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
9630bd680cdSMika Westerberg 	struct tb *tb;
9640bd680cdSMika Westerberg 
9658afe909bSMika Westerberg 	if (!tunnel)
9668afe909bSMika Westerberg 		return;
9678afe909bSMika Westerberg 
9688afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
9698afe909bSMika Westerberg 	list_del(&tunnel->list);
9708afe909bSMika Westerberg 
9710bd680cdSMika Westerberg 	tb = tunnel->tb;
9720bd680cdSMika Westerberg 	src_port = tunnel->src_port;
9730bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
9748afe909bSMika Westerberg 
9750bd680cdSMika Westerberg 	switch (tunnel->type) {
9760bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
9776ce35635SMika Westerberg 		tb_detach_bandwidth_group(src_port);
9780bd680cdSMika Westerberg 		/*
9790bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
9800bd680cdSMika Westerberg 		 * deallocated properly.
9810bd680cdSMika Westerberg 		 */
9820bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
9836ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
9846ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
9856ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
9866ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
9876ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
9880bd680cdSMika Westerberg 		fallthrough;
9890bd680cdSMika Westerberg 
9900bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
9910bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
9920bd680cdSMika Westerberg 		break;
9930bd680cdSMika Westerberg 
9940bd680cdSMika Westerberg 	default:
9950bd680cdSMika Westerberg 		/*
9960bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
9970bd680cdSMika Westerberg 		 * bandwidth.
9980bd680cdSMika Westerberg 		 */
9990bd680cdSMika Westerberg 		break;
10008afe909bSMika Westerberg 	}
10018afe909bSMika Westerberg 
10028afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
10034f807e47SMika Westerberg }
10044f807e47SMika Westerberg 
1005877e50b3SLee Jones /*
10063364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
10073364f0c1SAndreas Noever  */
10083364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
10093364f0c1SAndreas Noever {
10109d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
101193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
101293f36adeSMika Westerberg 	struct tb_tunnel *n;
10139d3cce0bSMika Westerberg 
10149d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
10158afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
10168afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
10173364f0c1SAndreas Noever 	}
10183364f0c1SAndreas Noever }
10193364f0c1SAndreas Noever 
1020877e50b3SLee Jones /*
102123dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
102223dd5bb4SAndreas Noever  */
102323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
102423dd5bb4SAndreas Noever {
1025b433d010SMika Westerberg 	struct tb_port *port;
1026dfe40ca4SMika Westerberg 
1027b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1028dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
102923dd5bb4SAndreas Noever 			continue;
1030dfe40ca4SMika Westerberg 
103123dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
1032dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
10338afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1034de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
103591c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1036bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
103723dd5bb4SAndreas Noever 			port->remote = NULL;
1038dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1039dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
104023dd5bb4SAndreas Noever 		} else {
104123dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
104223dd5bb4SAndreas Noever 		}
104323dd5bb4SAndreas Noever 	}
104423dd5bb4SAndreas Noever }
104523dd5bb4SAndreas Noever 
104699cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
104799cabbb0SMika Westerberg 					 const struct tb_port *port)
10483364f0c1SAndreas Noever {
1049b0407983SMika Westerberg 	struct tb_port *down = NULL;
1050b0407983SMika Westerberg 
105199cabbb0SMika Westerberg 	/*
105299cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
1053b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
105499cabbb0SMika Westerberg 	 */
1055b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
1056b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
1057b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
105899cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
105999cabbb0SMika Westerberg 		int index;
106099cabbb0SMika Westerberg 
106199cabbb0SMika Westerberg 		/*
106299cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
106399cabbb0SMika Westerberg 		 * per controller.
106499cabbb0SMika Westerberg 		 */
10657bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
10667bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
106799cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
106817a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
106999cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
10707bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
10717bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
107299cabbb0SMika Westerberg 		else
107399cabbb0SMika Westerberg 			goto out;
107499cabbb0SMika Westerberg 
107599cabbb0SMika Westerberg 		/* Validate the hard-coding */
107699cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
107799cabbb0SMika Westerberg 			goto out;
1078b0407983SMika Westerberg 
1079b0407983SMika Westerberg 		down = &sw->ports[index];
1080b0407983SMika Westerberg 	}
1081b0407983SMika Westerberg 
1082b0407983SMika Westerberg 	if (down) {
1083b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
108499cabbb0SMika Westerberg 			goto out;
10859cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
108699cabbb0SMika Westerberg 			goto out;
108799cabbb0SMika Westerberg 
1088b0407983SMika Westerberg 		return down;
108999cabbb0SMika Westerberg 	}
109099cabbb0SMika Westerberg 
109199cabbb0SMika Westerberg out:
1092e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
109399cabbb0SMika Westerberg }
109499cabbb0SMika Westerberg 
10956ce35635SMika Westerberg static void
10966ce35635SMika Westerberg tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
10976ce35635SMika Westerberg {
10986ce35635SMika Westerberg 	struct tb_tunnel *first_tunnel;
10996ce35635SMika Westerberg 	struct tb *tb = group->tb;
11006ce35635SMika Westerberg 	struct tb_port *in;
11016ce35635SMika Westerberg 	int ret;
11026ce35635SMika Westerberg 
11036ce35635SMika Westerberg 	tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
11046ce35635SMika Westerberg 	       group->index);
11056ce35635SMika Westerberg 
11066ce35635SMika Westerberg 	first_tunnel = NULL;
11076ce35635SMika Westerberg 	list_for_each_entry(in, &group->ports, group_list) {
11086ce35635SMika Westerberg 		int estimated_bw, estimated_up, estimated_down;
11096ce35635SMika Westerberg 		struct tb_tunnel *tunnel;
11106ce35635SMika Westerberg 		struct tb_port *out;
11116ce35635SMika Westerberg 
11126ce35635SMika Westerberg 		if (!usb4_dp_port_bw_mode_enabled(in))
11136ce35635SMika Westerberg 			continue;
11146ce35635SMika Westerberg 
11156ce35635SMika Westerberg 		tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
11166ce35635SMika Westerberg 		if (WARN_ON(!tunnel))
11176ce35635SMika Westerberg 			break;
11186ce35635SMika Westerberg 
11196ce35635SMika Westerberg 		if (!first_tunnel) {
11206ce35635SMika Westerberg 			/*
11216ce35635SMika Westerberg 			 * Since USB3 bandwidth is shared by all DP
11226ce35635SMika Westerberg 			 * tunnels under the host router USB4 port, even
11236ce35635SMika Westerberg 			 * if they do not begin from the host router, we
11246ce35635SMika Westerberg 			 * can release USB3 bandwidth just once and not
11256ce35635SMika Westerberg 			 * for each tunnel separately.
11266ce35635SMika Westerberg 			 */
11276ce35635SMika Westerberg 			first_tunnel = tunnel;
11286ce35635SMika Westerberg 			ret = tb_release_unused_usb3_bandwidth(tb,
11296ce35635SMika Westerberg 				first_tunnel->src_port, first_tunnel->dst_port);
11306ce35635SMika Westerberg 			if (ret) {
11316ce35635SMika Westerberg 				tb_port_warn(in,
11326ce35635SMika Westerberg 					"failed to release unused bandwidth\n");
11336ce35635SMika Westerberg 				break;
11346ce35635SMika Westerberg 			}
11356ce35635SMika Westerberg 		}
11366ce35635SMika Westerberg 
11376ce35635SMika Westerberg 		out = tunnel->dst_port;
11386ce35635SMika Westerberg 		ret = tb_available_bandwidth(tb, in, out, &estimated_up,
11396ce35635SMika Westerberg 					     &estimated_down);
11406ce35635SMika Westerberg 		if (ret) {
11416ce35635SMika Westerberg 			tb_port_warn(in,
11426ce35635SMika Westerberg 				"failed to re-calculate estimated bandwidth\n");
11436ce35635SMika Westerberg 			break;
11446ce35635SMika Westerberg 		}
11456ce35635SMika Westerberg 
11466ce35635SMika Westerberg 		/*
11476ce35635SMika Westerberg 		 * Estimated bandwidth includes:
11486ce35635SMika Westerberg 		 *  - already allocated bandwidth for the DP tunnel
11496ce35635SMika Westerberg 		 *  - available bandwidth along the path
11506ce35635SMika Westerberg 		 *  - bandwidth allocated for USB 3.x but not used.
11516ce35635SMika Westerberg 		 */
11526ce35635SMika Westerberg 		tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
11536ce35635SMika Westerberg 			    estimated_up, estimated_down);
11546ce35635SMika Westerberg 
11556ce35635SMika Westerberg 		if (in->sw->config.depth < out->sw->config.depth)
11566ce35635SMika Westerberg 			estimated_bw = estimated_down;
11576ce35635SMika Westerberg 		else
11586ce35635SMika Westerberg 			estimated_bw = estimated_up;
11596ce35635SMika Westerberg 
11606ce35635SMika Westerberg 		if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
11616ce35635SMika Westerberg 			tb_port_warn(in, "failed to update estimated bandwidth\n");
11626ce35635SMika Westerberg 	}
11636ce35635SMika Westerberg 
11646ce35635SMika Westerberg 	if (first_tunnel)
11656ce35635SMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
11666ce35635SMika Westerberg 					  first_tunnel->dst_port);
11676ce35635SMika Westerberg 
11686ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
11696ce35635SMika Westerberg }
11706ce35635SMika Westerberg 
11716ce35635SMika Westerberg static void tb_recalc_estimated_bandwidth(struct tb *tb)
11726ce35635SMika Westerberg {
11736ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
11746ce35635SMika Westerberg 	int i;
11756ce35635SMika Westerberg 
11766ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
11776ce35635SMika Westerberg 
11786ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
11796ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
11806ce35635SMika Westerberg 
11816ce35635SMika Westerberg 		if (!list_empty(&group->ports))
11826ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth_for_group(group);
11836ce35635SMika Westerberg 	}
11846ce35635SMika Westerberg 
11856ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth re-calculation done\n");
11866ce35635SMika Westerberg }
11876ce35635SMika Westerberg 
1188e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1189e876f34aSMika Westerberg {
1190e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
1191e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1192e876f34aSMika Westerberg 
1193e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
1194e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1195e876f34aSMika Westerberg 
1196e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1197e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
1198e876f34aSMika Westerberg 			continue;
1199e876f34aSMika Westerberg 
1200e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
1201b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP OUT in use\n");
1202e876f34aSMika Westerberg 			continue;
1203e876f34aSMika Westerberg 		}
1204e876f34aSMika Westerberg 
1205e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
1206e876f34aSMika Westerberg 
1207e876f34aSMika Westerberg 		/*
1208e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
1209e876f34aSMika Westerberg 		 * the same host router downstream port.
1210e876f34aSMika Westerberg 		 */
1211e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
1212e876f34aSMika Westerberg 			struct tb_port *p;
1213e876f34aSMika Westerberg 
1214e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
1215e876f34aSMika Westerberg 			if (p != host_port)
1216e876f34aSMika Westerberg 				continue;
1217e876f34aSMika Westerberg 		}
1218e876f34aSMika Westerberg 
1219e876f34aSMika Westerberg 		return port;
1220e876f34aSMika Westerberg 	}
1221e876f34aSMika Westerberg 
1222e876f34aSMika Westerberg 	return NULL;
1223e876f34aSMika Westerberg }
1224e876f34aSMika Westerberg 
12258afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
12264f807e47SMika Westerberg {
12279d2d0a5cSMika Westerberg 	int available_up, available_down, ret, link_nr;
12284f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
12298afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
12304f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
12314f807e47SMika Westerberg 
1232c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
1233c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1234c6da62a2SMika Westerberg 		return;
1235c6da62a2SMika Westerberg 	}
1236c6da62a2SMika Westerberg 
12378afe909bSMika Westerberg 	/*
12388afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
12398afe909bSMika Westerberg 	 * establish a DP tunnel between them.
12408afe909bSMika Westerberg 	 */
12418afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
12424f807e47SMika Westerberg 
12438afe909bSMika Westerberg 	in = NULL;
12448afe909bSMika Westerberg 	out = NULL;
12458afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1246e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
1247e876f34aSMika Westerberg 			continue;
1248e876f34aSMika Westerberg 
12498afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
1250b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP IN in use\n");
12518afe909bSMika Westerberg 			continue;
12528afe909bSMika Westerberg 		}
12538afe909bSMika Westerberg 
1254e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
12558afe909bSMika Westerberg 
1256e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
1257e876f34aSMika Westerberg 		if (out) {
12588afe909bSMika Westerberg 			in = port;
1259e876f34aSMika Westerberg 			break;
1260e876f34aSMika Westerberg 		}
12618afe909bSMika Westerberg 	}
12628afe909bSMika Westerberg 
12638afe909bSMika Westerberg 	if (!in) {
12648afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
12658afe909bSMika Westerberg 		return;
12668afe909bSMika Westerberg 	}
12678afe909bSMika Westerberg 	if (!out) {
12688afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
12698afe909bSMika Westerberg 		return;
12708afe909bSMika Westerberg 	}
12718afe909bSMika Westerberg 
12726ac6faeeSMika Westerberg 	/*
12739d2d0a5cSMika Westerberg 	 * This is only applicable to links that are not bonded (so
12749d2d0a5cSMika Westerberg 	 * when Thunderbolt 1 hardware is involved somewhere in the
12759d2d0a5cSMika Westerberg 	 * topology). For these try to share the DP bandwidth between
12769d2d0a5cSMika Westerberg 	 * the two lanes.
12779d2d0a5cSMika Westerberg 	 */
12789d2d0a5cSMika Westerberg 	link_nr = 1;
12799d2d0a5cSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
12809d2d0a5cSMika Westerberg 		if (tb_tunnel_is_dp(tunnel)) {
12819d2d0a5cSMika Westerberg 			link_nr = 0;
12829d2d0a5cSMika Westerberg 			break;
12839d2d0a5cSMika Westerberg 		}
12849d2d0a5cSMika Westerberg 	}
12859d2d0a5cSMika Westerberg 
12869d2d0a5cSMika Westerberg 	/*
12876ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
12886ac6faeeSMika Westerberg 	 * both ends of the tunnel.
12896ac6faeeSMika Westerberg 	 *
12906ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
12916ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
12926ac6faeeSMika Westerberg 	 * tunnel is active.
12936ac6faeeSMika Westerberg 	 */
12946ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
12956ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
12966ac6faeeSMika Westerberg 
12978afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
12988afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
12996ac6faeeSMika Westerberg 		goto err_rpm_put;
13008afe909bSMika Westerberg 	}
13014f807e47SMika Westerberg 
13026ce35635SMika Westerberg 	if (!tb_attach_bandwidth_group(tcm, in, out))
13036ce35635SMika Westerberg 		goto err_dealloc_dp;
13046ce35635SMika Westerberg 
13050bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
13060bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
13070bd680cdSMika Westerberg 	if (ret) {
13080bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
13096ce35635SMika Westerberg 		goto err_detach_group;
1310a11b88adSMika Westerberg 	}
1311a11b88adSMika Westerberg 
13126ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
13130bd680cdSMika Westerberg 	if (ret)
13146ce35635SMika Westerberg 		goto err_reclaim_usb;
1315a11b88adSMika Westerberg 
13160bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
13170bd680cdSMika Westerberg 	       available_up, available_down);
13180bd680cdSMika Westerberg 
13199d2d0a5cSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
13209d2d0a5cSMika Westerberg 				    available_down);
13214f807e47SMika Westerberg 	if (!tunnel) {
13228afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
13236ce35635SMika Westerberg 		goto err_reclaim_usb;
13244f807e47SMika Westerberg 	}
13254f807e47SMika Westerberg 
13264f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
13274f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
13280bd680cdSMika Westerberg 		goto err_free;
13294f807e47SMika Westerberg 	}
13304f807e47SMika Westerberg 
13314f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
13320bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
13336ce35635SMika Westerberg 
13346ce35635SMika Westerberg 	/* Update the domain with the new bandwidth estimation */
13356ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
13366ce35635SMika Westerberg 
13373084b48fSGil Fine 	/*
13383084b48fSGil Fine 	 * In case of DP tunnel exists, change host router's 1st children
13393084b48fSGil Fine 	 * TMU mode to HiFi for CL0s to work.
13403084b48fSGil Fine 	 */
13417d283f41SMika Westerberg 	tb_increase_tmu_accuracy(tunnel);
13428afe909bSMika Westerberg 	return;
13438afe909bSMika Westerberg 
13440bd680cdSMika Westerberg err_free:
13450bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
13466ce35635SMika Westerberg err_reclaim_usb:
13470bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
13486ce35635SMika Westerberg err_detach_group:
13496ce35635SMika Westerberg 	tb_detach_bandwidth_group(in);
13500bd680cdSMika Westerberg err_dealloc_dp:
13518afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
13526ac6faeeSMika Westerberg err_rpm_put:
13536ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
13546ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
13556ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
13566ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
13574f807e47SMika Westerberg }
13584f807e47SMika Westerberg 
13598afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
13604f807e47SMika Westerberg {
13618afe909bSMika Westerberg 	struct tb_port *in, *out;
13628afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
13638afe909bSMika Westerberg 
13648afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
13658afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
13668afe909bSMika Westerberg 		in = port;
13678afe909bSMika Westerberg 		out = NULL;
13688afe909bSMika Westerberg 	} else {
13698afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
13708afe909bSMika Westerberg 		in = NULL;
13718afe909bSMika Westerberg 		out = port;
13728afe909bSMika Westerberg 	}
13738afe909bSMika Westerberg 
13748afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
13758afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
13768afe909bSMika Westerberg 	list_del_init(&port->list);
13778afe909bSMika Westerberg 
13788afe909bSMika Westerberg 	/*
13798afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
13808afe909bSMika Westerberg 	 * to create another tunnel.
13818afe909bSMika Westerberg 	 */
13826ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
13838afe909bSMika Westerberg 	tb_tunnel_dp(tb);
13848afe909bSMika Westerberg }
13858afe909bSMika Westerberg 
13868afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
13878afe909bSMika Westerberg {
13888afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
13898afe909bSMika Westerberg 	struct tb_port *p;
13908afe909bSMika Westerberg 
13918afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
13928afe909bSMika Westerberg 		return;
13938afe909bSMika Westerberg 
13948afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
13958afe909bSMika Westerberg 		if (p == port)
13968afe909bSMika Westerberg 			return;
13978afe909bSMika Westerberg 	}
13988afe909bSMika Westerberg 
13998afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
14008afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
14018afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
14028afe909bSMika Westerberg 
14038afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
14048afe909bSMika Westerberg 	tb_tunnel_dp(tb);
14054f807e47SMika Westerberg }
14064f807e47SMika Westerberg 
140781a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
140881a2e3e4SMika Westerberg {
140981a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
141081a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
141181a2e3e4SMika Westerberg 
141281a2e3e4SMika Westerberg 	/*
141381a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
141481a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
141581a2e3e4SMika Westerberg 	 */
141681a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
141781a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
141881a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
141981a2e3e4SMika Westerberg 	}
142081a2e3e4SMika Westerberg 
142181a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
142281a2e3e4SMika Westerberg 		struct tb_port *port;
142381a2e3e4SMika Westerberg 
142481a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
142581a2e3e4SMika Westerberg 					struct tb_port, list);
142681a2e3e4SMika Westerberg 		list_del_init(&port->list);
142781a2e3e4SMika Westerberg 	}
142881a2e3e4SMika Westerberg }
142981a2e3e4SMika Westerberg 
14303da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
14313da88be2SMika Westerberg {
14323da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
14333da88be2SMika Westerberg 	struct tb_port *up;
14343da88be2SMika Westerberg 
14353da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
14363da88be2SMika Westerberg 	if (WARN_ON(!up))
14373da88be2SMika Westerberg 		return -ENODEV;
14383da88be2SMika Westerberg 
14393da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
14403da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
14413da88be2SMika Westerberg 		return -ENODEV;
14423da88be2SMika Westerberg 
144330a4eca6SMika Westerberg 	tb_switch_xhci_disconnect(sw);
144430a4eca6SMika Westerberg 
14453da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
14463da88be2SMika Westerberg 	list_del(&tunnel->list);
14473da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
14483da88be2SMika Westerberg 	return 0;
14493da88be2SMika Westerberg }
14503da88be2SMika Westerberg 
145199cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
145299cabbb0SMika Westerberg {
145399cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
14549d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
145599cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
14569d3cce0bSMika Westerberg 
1457386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
145899cabbb0SMika Westerberg 	if (!up)
145999cabbb0SMika Westerberg 		return 0;
14603364f0c1SAndreas Noever 
146199cabbb0SMika Westerberg 	/*
146299cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
146399cabbb0SMika Westerberg 	 * be found right above this switch.
146499cabbb0SMika Westerberg 	 */
14657ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
14667ce54221SGil Fine 	down = tb_find_pcie_down(tb_switch_parent(sw), port);
146799cabbb0SMika Westerberg 	if (!down)
146899cabbb0SMika Westerberg 		return 0;
14693364f0c1SAndreas Noever 
147099cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
147199cabbb0SMika Westerberg 	if (!tunnel)
147299cabbb0SMika Westerberg 		return -ENOMEM;
14733364f0c1SAndreas Noever 
147493f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
147599cabbb0SMika Westerberg 		tb_port_info(up,
14763364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
147793f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
147899cabbb0SMika Westerberg 		return -EIO;
14793364f0c1SAndreas Noever 	}
14803364f0c1SAndreas Noever 
148143f977bcSGil Fine 	/*
148243f977bcSGil Fine 	 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
148343f977bcSGil Fine 	 * here.
148443f977bcSGil Fine 	 */
148543f977bcSGil Fine 	if (tb_switch_pcie_l1_enable(sw))
148643f977bcSGil Fine 		tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
148743f977bcSGil Fine 
148830a4eca6SMika Westerberg 	if (tb_switch_xhci_connect(sw))
148930a4eca6SMika Westerberg 		tb_sw_warn(sw, "failed to connect xHCI\n");
149030a4eca6SMika Westerberg 
149199cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
149299cabbb0SMika Westerberg 	return 0;
14933364f0c1SAndreas Noever }
14949da672a4SAndreas Noever 
1495180b0689SMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1496180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
1497180b0689SMika Westerberg 				    int receive_path, int receive_ring)
14987ea4cd6bSMika Westerberg {
14997ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15007ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
15017ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
15027ea4cd6bSMika Westerberg 	struct tb_switch *sw;
1503*53ba2e16SMika Westerberg 	int ret;
15047ea4cd6bSMika Westerberg 
15057ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
15067ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1507386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
15087ea4cd6bSMika Westerberg 
15097ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
1510*53ba2e16SMika Westerberg 
1511*53ba2e16SMika Westerberg 	/*
1512*53ba2e16SMika Westerberg 	 * When tunneling DMA paths the link should not enter CL states
1513*53ba2e16SMika Westerberg 	 * so disable them now.
1514*53ba2e16SMika Westerberg 	 */
1515*53ba2e16SMika Westerberg 	tb_disable_clx(sw);
1516*53ba2e16SMika Westerberg 
1517180b0689SMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1518180b0689SMika Westerberg 				     transmit_ring, receive_path, receive_ring);
15197ea4cd6bSMika Westerberg 	if (!tunnel) {
1520*53ba2e16SMika Westerberg 		ret = -ENOMEM;
1521*53ba2e16SMika Westerberg 		goto err_clx;
15227ea4cd6bSMika Westerberg 	}
15237ea4cd6bSMika Westerberg 
15247ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
15257ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
15267ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
1527*53ba2e16SMika Westerberg 		ret = -EIO;
1528*53ba2e16SMika Westerberg 		goto err_free;
15297ea4cd6bSMika Westerberg 	}
15307ea4cd6bSMika Westerberg 
15317ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
15327ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
15337ea4cd6bSMika Westerberg 	return 0;
1534*53ba2e16SMika Westerberg 
1535*53ba2e16SMika Westerberg err_free:
1536*53ba2e16SMika Westerberg 	tb_tunnel_free(tunnel);
1537*53ba2e16SMika Westerberg err_clx:
1538*53ba2e16SMika Westerberg 	tb_enable_clx(sw);
1539*53ba2e16SMika Westerberg 	mutex_unlock(&tb->lock);
1540*53ba2e16SMika Westerberg 
1541*53ba2e16SMika Westerberg 	return ret;
15427ea4cd6bSMika Westerberg }
15437ea4cd6bSMika Westerberg 
1544180b0689SMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1545180b0689SMika Westerberg 					  int transmit_path, int transmit_ring,
1546180b0689SMika Westerberg 					  int receive_path, int receive_ring)
15477ea4cd6bSMika Westerberg {
1548180b0689SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1549180b0689SMika Westerberg 	struct tb_port *nhi_port, *dst_port;
1550180b0689SMika Westerberg 	struct tb_tunnel *tunnel, *n;
15517ea4cd6bSMika Westerberg 	struct tb_switch *sw;
15527ea4cd6bSMika Westerberg 
15537ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
15547ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1555180b0689SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
15567ea4cd6bSMika Westerberg 
1557180b0689SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1558180b0689SMika Westerberg 		if (!tb_tunnel_is_dma(tunnel))
1559180b0689SMika Westerberg 			continue;
1560180b0689SMika Westerberg 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1561180b0689SMika Westerberg 			continue;
1562180b0689SMika Westerberg 
1563180b0689SMika Westerberg 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1564180b0689SMika Westerberg 					receive_path, receive_ring))
15658afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
15667ea4cd6bSMika Westerberg 	}
1567*53ba2e16SMika Westerberg 
1568*53ba2e16SMika Westerberg 	/*
1569*53ba2e16SMika Westerberg 	 * Try to re-enable CL states now, it is OK if this fails
1570*53ba2e16SMika Westerberg 	 * because we may still have another DMA tunnel active through
1571*53ba2e16SMika Westerberg 	 * the same host router USB4 downstream port.
1572*53ba2e16SMika Westerberg 	 */
1573*53ba2e16SMika Westerberg 	tb_enable_clx(sw);
1574180b0689SMika Westerberg }
15757ea4cd6bSMika Westerberg 
1576180b0689SMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1577180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
1578180b0689SMika Westerberg 				       int receive_path, int receive_ring)
15797ea4cd6bSMika Westerberg {
15807ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
15817ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
1582180b0689SMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1583180b0689SMika Westerberg 					      transmit_ring, receive_path,
1584180b0689SMika Westerberg 					      receive_ring);
15857ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
15867ea4cd6bSMika Westerberg 	}
15877ea4cd6bSMika Westerberg 	return 0;
15887ea4cd6bSMika Westerberg }
15897ea4cd6bSMika Westerberg 
1590d6cc51cdSAndreas Noever /* hotplug handling */
1591d6cc51cdSAndreas Noever 
1592877e50b3SLee Jones /*
1593d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1594d6cc51cdSAndreas Noever  *
1595d6cc51cdSAndreas Noever  * Executes on tb->wq.
1596d6cc51cdSAndreas Noever  */
1597d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1598d6cc51cdSAndreas Noever {
1599d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1600d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
16019d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1602053596d9SAndreas Noever 	struct tb_switch *sw;
1603053596d9SAndreas Noever 	struct tb_port *port;
1604284652a4SMika Westerberg 
16056ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
16066ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
16076ac6faeeSMika Westerberg 
1608d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
16099d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1610d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1611d6cc51cdSAndreas Noever 
16128f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1613053596d9SAndreas Noever 	if (!sw) {
1614053596d9SAndreas Noever 		tb_warn(tb,
1615053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1616053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1617053596d9SAndreas Noever 		goto out;
1618053596d9SAndreas Noever 	}
1619053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1620053596d9SAndreas Noever 		tb_warn(tb,
1621053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1622053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
16238f965efdSMika Westerberg 		goto put_sw;
1624053596d9SAndreas Noever 	}
1625053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1626053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1627dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1628053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
16298f965efdSMika Westerberg 		goto put_sw;
1630053596d9SAndreas Noever 	}
16316ac6faeeSMika Westerberg 
16326ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
16336ac6faeeSMika Westerberg 
1634053596d9SAndreas Noever 	if (ev->unplug) {
1635dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1636dacb1287SKranthi Kuntala 
1637dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
16387ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1639aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
16403364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
16418afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1642cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1643de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
164491c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1645bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1646053596d9SAndreas Noever 			port->remote = NULL;
1647dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1648dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
16498afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
16506ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth(tb);
16518afe909bSMika Westerberg 			tb_tunnel_dp(tb);
16527ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
16537ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
16547ea4cd6bSMika Westerberg 
16557ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
16567ea4cd6bSMika Westerberg 			/*
16577ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
16587ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
16597ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
16607ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
1661180b0689SMika Westerberg 			 * all the tunnels below.
16627ea4cd6bSMika Westerberg 			 */
16637ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
16647ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
16657ea4cd6bSMika Westerberg 			port->xdomain = NULL;
1666180b0689SMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
16677ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1668284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
16698afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
16708afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
167130a4eca6SMika Westerberg 		} else if (!port->port) {
167230a4eca6SMika Westerberg 			tb_sw_dbg(sw, "xHCI disconnect request\n");
167330a4eca6SMika Westerberg 			tb_switch_xhci_disconnect(sw);
1674053596d9SAndreas Noever 		} else {
167562efe699SMika Westerberg 			tb_port_dbg(port,
1676053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1677053596d9SAndreas Noever 		}
1678053596d9SAndreas Noever 	} else if (port->remote) {
167962efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
168030a4eca6SMika Westerberg 	} else if (!port->port && sw->authorized) {
168130a4eca6SMika Westerberg 		tb_sw_dbg(sw, "xHCI connect request\n");
168230a4eca6SMika Westerberg 		tb_switch_xhci_connect(sw);
1683053596d9SAndreas Noever 	} else {
1684344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
168562efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1686053596d9SAndreas Noever 			tb_scan_port(port);
168799cabbb0SMika Westerberg 			if (!port->remote)
168862efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
16898afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
16908afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1691053596d9SAndreas Noever 		}
1692344e0643SMika Westerberg 	}
16938f965efdSMika Westerberg 
16946ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
16956ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
16966ac6faeeSMika Westerberg 
16978f965efdSMika Westerberg put_sw:
16988f965efdSMika Westerberg 	tb_switch_put(sw);
1699d6cc51cdSAndreas Noever out:
1700d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
17016ac6faeeSMika Westerberg 
17026ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
17036ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
17046ac6faeeSMika Westerberg 
1705d6cc51cdSAndreas Noever 	kfree(ev);
1706d6cc51cdSAndreas Noever }
1707d6cc51cdSAndreas Noever 
17086ce35635SMika Westerberg static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
17096ce35635SMika Westerberg 				 int *requested_down)
17106ce35635SMika Westerberg {
17116ce35635SMika Westerberg 	int allocated_up, allocated_down, available_up, available_down, ret;
17126ce35635SMika Westerberg 	int requested_up_corrected, requested_down_corrected, granularity;
17136ce35635SMika Westerberg 	int max_up, max_down, max_up_rounded, max_down_rounded;
17146ce35635SMika Westerberg 	struct tb *tb = tunnel->tb;
17156ce35635SMika Westerberg 	struct tb_port *in, *out;
17166ce35635SMika Westerberg 
17176ce35635SMika Westerberg 	ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
17186ce35635SMika Westerberg 	if (ret)
17196ce35635SMika Westerberg 		return ret;
17206ce35635SMika Westerberg 
17216ce35635SMika Westerberg 	in = tunnel->src_port;
17226ce35635SMika Westerberg 	out = tunnel->dst_port;
17236ce35635SMika Westerberg 
17246ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
17256ce35635SMika Westerberg 		    allocated_up, allocated_down);
17266ce35635SMika Westerberg 
17276ce35635SMika Westerberg 	/*
17286ce35635SMika Westerberg 	 * If we get rounded up request from graphics side, say HBR2 x 4
17296ce35635SMika Westerberg 	 * that is 17500 instead of 17280 (this is because of the
17306ce35635SMika Westerberg 	 * granularity), we allow it too. Here the graphics has already
17316ce35635SMika Westerberg 	 * negotiated with the DPRX the maximum possible rates (which is
17326ce35635SMika Westerberg 	 * 17280 in this case).
17336ce35635SMika Westerberg 	 *
17346ce35635SMika Westerberg 	 * Since the link cannot go higher than 17280 we use that in our
17356ce35635SMika Westerberg 	 * calculations but the DP IN adapter Allocated BW write must be
17366ce35635SMika Westerberg 	 * the same value (17500) otherwise the adapter will mark it as
17376ce35635SMika Westerberg 	 * failed for graphics.
17386ce35635SMika Westerberg 	 */
17396ce35635SMika Westerberg 	ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
17406ce35635SMika Westerberg 	if (ret)
17416ce35635SMika Westerberg 		return ret;
17426ce35635SMika Westerberg 
17436ce35635SMika Westerberg 	ret = usb4_dp_port_granularity(in);
17446ce35635SMika Westerberg 	if (ret < 0)
17456ce35635SMika Westerberg 		return ret;
17466ce35635SMika Westerberg 	granularity = ret;
17476ce35635SMika Westerberg 
17486ce35635SMika Westerberg 	max_up_rounded = roundup(max_up, granularity);
17496ce35635SMika Westerberg 	max_down_rounded = roundup(max_down, granularity);
17506ce35635SMika Westerberg 
17516ce35635SMika Westerberg 	/*
17526ce35635SMika Westerberg 	 * This will "fix" the request down to the maximum supported
17536ce35635SMika Westerberg 	 * rate * lanes if it is at the maximum rounded up level.
17546ce35635SMika Westerberg 	 */
17556ce35635SMika Westerberg 	requested_up_corrected = *requested_up;
17566ce35635SMika Westerberg 	if (requested_up_corrected == max_up_rounded)
17576ce35635SMika Westerberg 		requested_up_corrected = max_up;
17586ce35635SMika Westerberg 	else if (requested_up_corrected < 0)
17596ce35635SMika Westerberg 		requested_up_corrected = 0;
17606ce35635SMika Westerberg 	requested_down_corrected = *requested_down;
17616ce35635SMika Westerberg 	if (requested_down_corrected == max_down_rounded)
17626ce35635SMika Westerberg 		requested_down_corrected = max_down;
17636ce35635SMika Westerberg 	else if (requested_down_corrected < 0)
17646ce35635SMika Westerberg 		requested_down_corrected = 0;
17656ce35635SMika Westerberg 
17666ce35635SMika Westerberg 	tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
17676ce35635SMika Westerberg 		    requested_up_corrected, requested_down_corrected);
17686ce35635SMika Westerberg 
17696ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
17706ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
17716ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
17726ce35635SMika Westerberg 			    requested_up_corrected, requested_down_corrected,
17736ce35635SMika Westerberg 			    max_up_rounded, max_down_rounded);
17746ce35635SMika Westerberg 		return -ENOBUFS;
17756ce35635SMika Westerberg 	}
17766ce35635SMika Westerberg 
17776ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
17786ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
17796ce35635SMika Westerberg 		/*
17806ce35635SMika Westerberg 		 * If requested bandwidth is less or equal than what is
17816ce35635SMika Westerberg 		 * currently allocated to that tunnel we simply change
17826ce35635SMika Westerberg 		 * the reservation of the tunnel. Since all the tunnels
17836ce35635SMika Westerberg 		 * going out from the same USB4 port are in the same
17846ce35635SMika Westerberg 		 * group the released bandwidth will be taken into
17856ce35635SMika Westerberg 		 * account for the other tunnels automatically below.
17866ce35635SMika Westerberg 		 */
17876ce35635SMika Westerberg 		return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
17886ce35635SMika Westerberg 						 requested_down);
17896ce35635SMika Westerberg 	}
17906ce35635SMika Westerberg 
17916ce35635SMika Westerberg 	/*
17926ce35635SMika Westerberg 	 * More bandwidth is requested. Release all the potential
17936ce35635SMika Westerberg 	 * bandwidth from USB3 first.
17946ce35635SMika Westerberg 	 */
17956ce35635SMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
17966ce35635SMika Westerberg 	if (ret)
17976ce35635SMika Westerberg 		return ret;
17986ce35635SMika Westerberg 
17996ce35635SMika Westerberg 	/*
18006ce35635SMika Westerberg 	 * Then go over all tunnels that cross the same USB4 ports (they
18016ce35635SMika Westerberg 	 * are also in the same group but we use the same function here
18026ce35635SMika Westerberg 	 * that we use with the normal bandwidth allocation).
18036ce35635SMika Westerberg 	 */
18046ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
18056ce35635SMika Westerberg 	if (ret)
18066ce35635SMika Westerberg 		goto reclaim;
18076ce35635SMika Westerberg 
18086ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
18096ce35635SMika Westerberg 		    available_up, available_down);
18106ce35635SMika Westerberg 
18116ce35635SMika Westerberg 	if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
18126ce35635SMika Westerberg 	    (*requested_down >= 0 && available_down >= requested_down_corrected)) {
18136ce35635SMika Westerberg 		ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
18146ce35635SMika Westerberg 						requested_down);
18156ce35635SMika Westerberg 	} else {
18166ce35635SMika Westerberg 		ret = -ENOBUFS;
18176ce35635SMika Westerberg 	}
18186ce35635SMika Westerberg 
18196ce35635SMika Westerberg reclaim:
18206ce35635SMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
18216ce35635SMika Westerberg 	return ret;
18226ce35635SMika Westerberg }
18236ce35635SMika Westerberg 
18246ce35635SMika Westerberg static void tb_handle_dp_bandwidth_request(struct work_struct *work)
18256ce35635SMika Westerberg {
18266ce35635SMika Westerberg 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
18276ce35635SMika Westerberg 	int requested_bw, requested_up, requested_down, ret;
18286ce35635SMika Westerberg 	struct tb_port *in, *out;
18296ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
18306ce35635SMika Westerberg 	struct tb *tb = ev->tb;
18316ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
18326ce35635SMika Westerberg 	struct tb_switch *sw;
18336ce35635SMika Westerberg 
18346ce35635SMika Westerberg 	pm_runtime_get_sync(&tb->dev);
18356ce35635SMika Westerberg 
18366ce35635SMika Westerberg 	mutex_lock(&tb->lock);
18376ce35635SMika Westerberg 	if (!tcm->hotplug_active)
18386ce35635SMika Westerberg 		goto unlock;
18396ce35635SMika Westerberg 
18406ce35635SMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
18416ce35635SMika Westerberg 	if (!sw) {
18426ce35635SMika Westerberg 		tb_warn(tb, "bandwidth request from non-existent router %llx\n",
18436ce35635SMika Westerberg 			ev->route);
18446ce35635SMika Westerberg 		goto unlock;
18456ce35635SMika Westerberg 	}
18466ce35635SMika Westerberg 
18476ce35635SMika Westerberg 	in = &sw->ports[ev->port];
18486ce35635SMika Westerberg 	if (!tb_port_is_dpin(in)) {
18496ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
18506ce35635SMika Westerberg 		goto unlock;
18516ce35635SMika Westerberg 	}
18526ce35635SMika Westerberg 
18536ce35635SMika Westerberg 	tb_port_dbg(in, "handling bandwidth allocation request\n");
18546ce35635SMika Westerberg 
18556ce35635SMika Westerberg 	if (!usb4_dp_port_bw_mode_enabled(in)) {
18566ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth allocation mode not enabled\n");
18576ce35635SMika Westerberg 		goto unlock;
18586ce35635SMika Westerberg 	}
18596ce35635SMika Westerberg 
1860ace75e18SMika Westerberg 	ret = usb4_dp_port_requested_bw(in);
1861ace75e18SMika Westerberg 	if (ret < 0) {
1862ace75e18SMika Westerberg 		if (ret == -ENODATA)
18636ce35635SMika Westerberg 			tb_port_dbg(in, "no bandwidth request active\n");
1864ace75e18SMika Westerberg 		else
1865ace75e18SMika Westerberg 			tb_port_warn(in, "failed to read requested bandwidth\n");
18666ce35635SMika Westerberg 		goto unlock;
18676ce35635SMika Westerberg 	}
1868ace75e18SMika Westerberg 	requested_bw = ret;
18696ce35635SMika Westerberg 
18706ce35635SMika Westerberg 	tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
18716ce35635SMika Westerberg 
18726ce35635SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
18736ce35635SMika Westerberg 	if (!tunnel) {
18746ce35635SMika Westerberg 		tb_port_warn(in, "failed to find tunnel\n");
18756ce35635SMika Westerberg 		goto unlock;
18766ce35635SMika Westerberg 	}
18776ce35635SMika Westerberg 
18786ce35635SMika Westerberg 	out = tunnel->dst_port;
18796ce35635SMika Westerberg 
18806ce35635SMika Westerberg 	if (in->sw->config.depth < out->sw->config.depth) {
18816ce35635SMika Westerberg 		requested_up = -1;
18826ce35635SMika Westerberg 		requested_down = requested_bw;
18836ce35635SMika Westerberg 	} else {
18846ce35635SMika Westerberg 		requested_up = requested_bw;
18856ce35635SMika Westerberg 		requested_down = -1;
18866ce35635SMika Westerberg 	}
18876ce35635SMika Westerberg 
18886ce35635SMika Westerberg 	ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
18896ce35635SMika Westerberg 	if (ret) {
18906ce35635SMika Westerberg 		if (ret == -ENOBUFS)
18916ce35635SMika Westerberg 			tb_port_warn(in, "not enough bandwidth available\n");
18926ce35635SMika Westerberg 		else
18936ce35635SMika Westerberg 			tb_port_warn(in, "failed to change bandwidth allocation\n");
18946ce35635SMika Westerberg 	} else {
18956ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
18966ce35635SMika Westerberg 			    requested_up, requested_down);
18976ce35635SMika Westerberg 
18986ce35635SMika Westerberg 		/* Update other clients about the allocation change */
18996ce35635SMika Westerberg 		tb_recalc_estimated_bandwidth(tb);
19006ce35635SMika Westerberg 	}
19016ce35635SMika Westerberg 
19026ce35635SMika Westerberg unlock:
19036ce35635SMika Westerberg 	mutex_unlock(&tb->lock);
19046ce35635SMika Westerberg 
19056ce35635SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
19066ce35635SMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
19076ce35635SMika Westerberg }
19086ce35635SMika Westerberg 
19096ce35635SMika Westerberg static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
19106ce35635SMika Westerberg {
19116ce35635SMika Westerberg 	struct tb_hotplug_event *ev;
19126ce35635SMika Westerberg 
19136ce35635SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
19146ce35635SMika Westerberg 	if (!ev)
19156ce35635SMika Westerberg 		return;
19166ce35635SMika Westerberg 
19176ce35635SMika Westerberg 	ev->tb = tb;
19186ce35635SMika Westerberg 	ev->route = route;
19196ce35635SMika Westerberg 	ev->port = port;
19206ce35635SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
19216ce35635SMika Westerberg 	queue_work(tb->wq, &ev->work);
19226ce35635SMika Westerberg }
19236ce35635SMika Westerberg 
19246ce35635SMika Westerberg static void tb_handle_notification(struct tb *tb, u64 route,
19256ce35635SMika Westerberg 				   const struct cfg_error_pkg *error)
19266ce35635SMika Westerberg {
19276ce35635SMika Westerberg 	if (tb_cfg_ack_notification(tb->ctl, route, error))
19286ce35635SMika Westerberg 		tb_warn(tb, "could not ack notification on %llx\n", route);
19296ce35635SMika Westerberg 
19306ce35635SMika Westerberg 	switch (error->error) {
19316ce35635SMika Westerberg 	case TB_CFG_ERROR_DP_BW:
19326ce35635SMika Westerberg 		tb_queue_dp_bandwidth_request(tb, route, error->port);
19336ce35635SMika Westerberg 		break;
19346ce35635SMika Westerberg 
19356ce35635SMika Westerberg 	default:
19366ce35635SMika Westerberg 		/* Ack is enough */
19376ce35635SMika Westerberg 		return;
19386ce35635SMika Westerberg 	}
19396ce35635SMika Westerberg }
19406ce35635SMika Westerberg 
1941877e50b3SLee Jones /*
1942d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
1943d6cc51cdSAndreas Noever  *
1944d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
1945d6cc51cdSAndreas Noever  */
194681a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
194781a54b5eSMika Westerberg 			    const void *buf, size_t size)
1948d6cc51cdSAndreas Noever {
194981a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
19506ce35635SMika Westerberg 	u64 route = tb_cfg_get_route(&pkg->header);
195181a54b5eSMika Westerberg 
19526ce35635SMika Westerberg 	switch (type) {
19536ce35635SMika Westerberg 	case TB_CFG_PKG_ERROR:
19546ce35635SMika Westerberg 		tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
19556ce35635SMika Westerberg 		return;
19566ce35635SMika Westerberg 	case TB_CFG_PKG_EVENT:
19576ce35635SMika Westerberg 		break;
19586ce35635SMika Westerberg 	default:
195981a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
196081a54b5eSMika Westerberg 		return;
196181a54b5eSMika Westerberg 	}
196281a54b5eSMika Westerberg 
1963210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
196481a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
196581a54b5eSMika Westerberg 			pkg->port);
196681a54b5eSMika Westerberg 	}
196781a54b5eSMika Westerberg 
19684f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1969d6cc51cdSAndreas Noever }
1970d6cc51cdSAndreas Noever 
19719d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
1972d6cc51cdSAndreas Noever {
19739d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
197493f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
197593f36adeSMika Westerberg 	struct tb_tunnel *n;
19763364f0c1SAndreas Noever 
19776ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
19783364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
19797ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
19807ea4cd6bSMika Westerberg 		/*
19817ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
19827ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
19837ea4cd6bSMika Westerberg 		 * intact.
19847ea4cd6bSMika Westerberg 		 */
19857ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
19867ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
198793f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
19887ea4cd6bSMika Westerberg 	}
1989bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
19909d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1991d6cc51cdSAndreas Noever }
1992d6cc51cdSAndreas Noever 
199399cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
199499cabbb0SMika Westerberg {
199599cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
199699cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
199799cabbb0SMika Westerberg 
199899cabbb0SMika Westerberg 		/*
199999cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
200099cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
200199cabbb0SMika Westerberg 		 * send uevent to userspace.
200299cabbb0SMika Westerberg 		 */
200399cabbb0SMika Westerberg 		if (sw->boot)
200499cabbb0SMika Westerberg 			sw->authorized = 1;
200599cabbb0SMika Westerberg 
200699cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
200799cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
200899cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
200999cabbb0SMika Westerberg 	}
201099cabbb0SMika Westerberg 
201199cabbb0SMika Westerberg 	return 0;
201299cabbb0SMika Westerberg }
201399cabbb0SMika Westerberg 
20149d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
2015d6cc51cdSAndreas Noever {
20169d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2017bfe778acSMika Westerberg 	int ret;
2018d6cc51cdSAndreas Noever 
2019bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2020444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
2021444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
2022a25c8b2fSAndreas Noever 
2023e6b245ccSMika Westerberg 	/*
2024e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
2025e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
2026e6b245ccSMika Westerberg 	 * root switch.
20275172eb9aSSzuying Chen 	 *
20285172eb9aSSzuying Chen 	 * However, USB4 routers support NVM firmware upgrade if they
20295172eb9aSSzuying Chen 	 * implement the necessary router operations.
2030e6b245ccSMika Westerberg 	 */
20315172eb9aSSzuying Chen 	tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
20326ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
20336ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2034e6b245ccSMika Westerberg 
2035bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
2036bfe778acSMika Westerberg 	if (ret) {
2037bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
2038bfe778acSMika Westerberg 		return ret;
2039bfe778acSMika Westerberg 	}
2040bfe778acSMika Westerberg 
2041bfe778acSMika Westerberg 	/* Announce the switch to the world */
2042bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
2043bfe778acSMika Westerberg 	if (ret) {
2044bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
2045bfe778acSMika Westerberg 		return ret;
2046bfe778acSMika Westerberg 	}
2047bfe778acSMika Westerberg 
2048b017a46dSGil Fine 	/*
2049b017a46dSGil Fine 	 * To support highest CLx state, we set host router's TMU to
2050b017a46dSGil Fine 	 * Normal mode.
2051b017a46dSGil Fine 	 */
2052b017a46dSGil Fine 	tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
2053b017a46dSGil Fine 				false);
2054cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
2055cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
20569da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
20579da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
20580414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
205943bddb26SMika Westerberg 	tb_discover_tunnels(tb);
2060b60e31bfSSanjay R Mehta 	/* Add DP resources from the DP tunnels created by the boot firmware */
2061b60e31bfSSanjay R Mehta 	tb_discover_dp_resources(tb);
2062e6f81858SRajmohan Mani 	/*
2063e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
2064e6f81858SRajmohan Mani 	 * now for the whole topology.
2065e6f81858SRajmohan Mani 	 */
2066e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
20678afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
20688afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
206999cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
207099cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
207199cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
20729da672a4SAndreas Noever 
2073d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
20749d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
20759d3cce0bSMika Westerberg 	return 0;
2076d6cc51cdSAndreas Noever }
2077d6cc51cdSAndreas Noever 
20789d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
207923dd5bb4SAndreas Noever {
20809d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
20819d3cce0bSMika Westerberg 
2082daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
208381a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
20846ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
20859d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2086daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
20879d3cce0bSMika Westerberg 
20889d3cce0bSMika Westerberg 	return 0;
208923dd5bb4SAndreas Noever }
209023dd5bb4SAndreas Noever 
209191c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
209291c0c120SMika Westerberg {
209391c0c120SMika Westerberg 	struct tb_port *port;
209491c0c120SMika Westerberg 
20956ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
20966ac6faeeSMika Westerberg 	if (sw->is_unplugged)
20976ac6faeeSMika Westerberg 		return;
20986ac6faeeSMika Westerberg 
20991a9b6cb8SMika Westerberg 	if (tb_enable_clx(sw))
21001a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to re-enable CL states\n");
2101b017a46dSGil Fine 
2102cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
2103cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
2104cf29b9afSRajmohan Mani 
210591c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
2106284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
210791c0c120SMika Westerberg 			continue;
210891c0c120SMika Westerberg 
2109284652a4SMika Westerberg 		if (port->remote) {
21102ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
2111de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
211291c0c120SMika Westerberg 
211391c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
2114284652a4SMika Westerberg 		} else if (port->xdomain) {
2115f9cad07bSMika Westerberg 			tb_port_configure_xdomain(port, port->xdomain);
2116284652a4SMika Westerberg 		}
211791c0c120SMika Westerberg 	}
211891c0c120SMika Westerberg }
211991c0c120SMika Westerberg 
21209d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
212123dd5bb4SAndreas Noever {
21229d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
212393f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
212443bddb26SMika Westerberg 	unsigned int usb3_delay = 0;
212543bddb26SMika Westerberg 	LIST_HEAD(tunnels);
21269d3cce0bSMika Westerberg 
2127daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
212823dd5bb4SAndreas Noever 
212923dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
2130356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
213123dd5bb4SAndreas Noever 
213223dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
213323dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
213423dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
213591c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
213643bddb26SMika Westerberg 
213743bddb26SMika Westerberg 	/*
213843bddb26SMika Westerberg 	 * If we get here from suspend to disk the boot firmware or the
213943bddb26SMika Westerberg 	 * restore kernel might have created tunnels of its own. Since
214043bddb26SMika Westerberg 	 * we cannot be sure they are usable for us we find and tear
214143bddb26SMika Westerberg 	 * them down.
214243bddb26SMika Westerberg 	 */
214343bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
214443bddb26SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
214543bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel))
214643bddb26SMika Westerberg 			usb3_delay = 500;
214743bddb26SMika Westerberg 		tb_tunnel_deactivate(tunnel);
214843bddb26SMika Westerberg 		tb_tunnel_free(tunnel);
214943bddb26SMika Westerberg 	}
215043bddb26SMika Westerberg 
215143bddb26SMika Westerberg 	/* Re-create our tunnels now */
215243bddb26SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
215343bddb26SMika Westerberg 		/* USB3 requires delay before it can be re-activated */
215443bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel)) {
215543bddb26SMika Westerberg 			msleep(usb3_delay);
215643bddb26SMika Westerberg 			/* Only need to do it once */
215743bddb26SMika Westerberg 			usb3_delay = 0;
215843bddb26SMika Westerberg 		}
215993f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
216043bddb26SMika Westerberg 	}
21619d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
216223dd5bb4SAndreas Noever 		/*
216323dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
216423dd5bb4SAndreas Noever 		 * 100ms works for me...
216523dd5bb4SAndreas Noever 		 */
2166daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
216723dd5bb4SAndreas Noever 		msleep(100);
216823dd5bb4SAndreas Noever 	}
216923dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
21709d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
2171daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
21729d3cce0bSMika Westerberg 
21739d3cce0bSMika Westerberg 	return 0;
21749d3cce0bSMika Westerberg }
21759d3cce0bSMika Westerberg 
21767ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
21777ea4cd6bSMika Westerberg {
2178b433d010SMika Westerberg 	struct tb_port *port;
2179b433d010SMika Westerberg 	int ret = 0;
21807ea4cd6bSMika Westerberg 
2181b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
21827ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
21837ea4cd6bSMika Westerberg 			continue;
21847ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
2185dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
21867ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
2187284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
21887ea4cd6bSMika Westerberg 			port->xdomain = NULL;
21897ea4cd6bSMika Westerberg 			ret++;
21907ea4cd6bSMika Westerberg 		} else if (port->remote) {
21917ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
21927ea4cd6bSMika Westerberg 		}
21937ea4cd6bSMika Westerberg 	}
21947ea4cd6bSMika Westerberg 
21957ea4cd6bSMika Westerberg 	return ret;
21967ea4cd6bSMika Westerberg }
21977ea4cd6bSMika Westerberg 
2198884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
2199884e4d57SMika Westerberg {
2200884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2201884e4d57SMika Westerberg 
2202884e4d57SMika Westerberg 	tcm->hotplug_active = false;
2203884e4d57SMika Westerberg 	return 0;
2204884e4d57SMika Westerberg }
2205884e4d57SMika Westerberg 
2206884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
2207884e4d57SMika Westerberg {
2208884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2209884e4d57SMika Westerberg 
2210884e4d57SMika Westerberg 	tcm->hotplug_active = true;
2211884e4d57SMika Westerberg 	return 0;
2212884e4d57SMika Westerberg }
2213884e4d57SMika Westerberg 
22147ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
22157ea4cd6bSMika Westerberg {
22167ea4cd6bSMika Westerberg 	/*
22177ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
22187ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
22197ea4cd6bSMika Westerberg 	 * need to run another rescan.
22207ea4cd6bSMika Westerberg 	 */
22217ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
22227ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
22237ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
22247ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
22257ea4cd6bSMika Westerberg }
22267ea4cd6bSMika Westerberg 
22276ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
22286ac6faeeSMika Westerberg {
22296ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
22306ac6faeeSMika Westerberg 
22316ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
22326ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
22336ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
22346ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
22356ac6faeeSMika Westerberg 
22366ac6faeeSMika Westerberg 	return 0;
22376ac6faeeSMika Westerberg }
22386ac6faeeSMika Westerberg 
22396ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
22406ac6faeeSMika Westerberg {
22416ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
22426ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
22436ac6faeeSMika Westerberg 
22446ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
22456ac6faeeSMika Westerberg 	if (tb->root_switch) {
22466ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
22476ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
22486ac6faeeSMika Westerberg 	}
22496ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
22506ac6faeeSMika Westerberg }
22516ac6faeeSMika Westerberg 
22526ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
22536ac6faeeSMika Westerberg {
22546ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
22556ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
22566ac6faeeSMika Westerberg 
22576ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
22586ac6faeeSMika Westerberg 	tb_switch_resume(tb->root_switch);
22596ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
22606ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
22616ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
22626ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
22636ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
22646ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
22656ac6faeeSMika Westerberg 
22666ac6faeeSMika Westerberg 	/*
22676ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
22686ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
22696ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
22706ac6faeeSMika Westerberg 	 */
22716ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
22726ac6faeeSMika Westerberg 	return 0;
22736ac6faeeSMika Westerberg }
22746ac6faeeSMika Westerberg 
22759d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
22769d3cce0bSMika Westerberg 	.start = tb_start,
22779d3cce0bSMika Westerberg 	.stop = tb_stop,
22789d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
22799d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
2280884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
2281884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
22827ea4cd6bSMika Westerberg 	.complete = tb_complete,
22836ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
22846ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
228581a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
22863da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
228799cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
22887ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
22897ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
22909d3cce0bSMika Westerberg };
22919d3cce0bSMika Westerberg 
2292349bfe08SMika Westerberg /*
2293349bfe08SMika Westerberg  * During suspend the Thunderbolt controller is reset and all PCIe
2294349bfe08SMika Westerberg  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2295349bfe08SMika Westerberg  * during resume. This adds device links between the tunneled PCIe
2296349bfe08SMika Westerberg  * downstream ports and the NHI so that the device core will make sure
2297349bfe08SMika Westerberg  * NHI is resumed first before the rest.
2298349bfe08SMika Westerberg  */
2299349bfe08SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi)
2300349bfe08SMika Westerberg {
2301349bfe08SMika Westerberg 	struct pci_dev *upstream, *pdev;
2302349bfe08SMika Westerberg 
2303349bfe08SMika Westerberg 	if (!x86_apple_machine)
2304349bfe08SMika Westerberg 		return;
2305349bfe08SMika Westerberg 
2306349bfe08SMika Westerberg 	switch (nhi->pdev->device) {
2307349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2308349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2309349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2310349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2311349bfe08SMika Westerberg 		break;
2312349bfe08SMika Westerberg 	default:
2313349bfe08SMika Westerberg 		return;
2314349bfe08SMika Westerberg 	}
2315349bfe08SMika Westerberg 
2316349bfe08SMika Westerberg 	upstream = pci_upstream_bridge(nhi->pdev);
2317349bfe08SMika Westerberg 	while (upstream) {
2318349bfe08SMika Westerberg 		if (!pci_is_pcie(upstream))
2319349bfe08SMika Westerberg 			return;
2320349bfe08SMika Westerberg 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2321349bfe08SMika Westerberg 			break;
2322349bfe08SMika Westerberg 		upstream = pci_upstream_bridge(upstream);
2323349bfe08SMika Westerberg 	}
2324349bfe08SMika Westerberg 
2325349bfe08SMika Westerberg 	if (!upstream)
2326349bfe08SMika Westerberg 		return;
2327349bfe08SMika Westerberg 
2328349bfe08SMika Westerberg 	/*
2329349bfe08SMika Westerberg 	 * For each hotplug downstream port, create add device link
2330349bfe08SMika Westerberg 	 * back to NHI so that PCIe tunnels can be re-established after
2331349bfe08SMika Westerberg 	 * sleep.
2332349bfe08SMika Westerberg 	 */
2333349bfe08SMika Westerberg 	for_each_pci_bridge(pdev, upstream->subordinate) {
2334349bfe08SMika Westerberg 		const struct device_link *link;
2335349bfe08SMika Westerberg 
2336349bfe08SMika Westerberg 		if (!pci_is_pcie(pdev))
2337349bfe08SMika Westerberg 			continue;
2338349bfe08SMika Westerberg 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2339349bfe08SMika Westerberg 		    !pdev->is_hotplug_bridge)
2340349bfe08SMika Westerberg 			continue;
2341349bfe08SMika Westerberg 
2342349bfe08SMika Westerberg 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2343349bfe08SMika Westerberg 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
2344349bfe08SMika Westerberg 				       DL_FLAG_PM_RUNTIME);
2345349bfe08SMika Westerberg 		if (link) {
2346349bfe08SMika Westerberg 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2347349bfe08SMika Westerberg 				dev_name(&pdev->dev));
2348349bfe08SMika Westerberg 		} else {
2349349bfe08SMika Westerberg 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2350349bfe08SMika Westerberg 				 dev_name(&pdev->dev));
2351349bfe08SMika Westerberg 		}
2352349bfe08SMika Westerberg 	}
2353349bfe08SMika Westerberg }
2354349bfe08SMika Westerberg 
23559d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
23569d3cce0bSMika Westerberg {
23579d3cce0bSMika Westerberg 	struct tb_cm *tcm;
23589d3cce0bSMika Westerberg 	struct tb *tb;
23599d3cce0bSMika Westerberg 
23607f0a34d7SMika Westerberg 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
23619d3cce0bSMika Westerberg 	if (!tb)
23629d3cce0bSMika Westerberg 		return NULL;
23639d3cce0bSMika Westerberg 
2364c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
236599cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
2366c6da62a2SMika Westerberg 	else
2367c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
2368c6da62a2SMika Westerberg 
23699d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
23709d3cce0bSMika Westerberg 
23719d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
23729d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
23738afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
23746ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
23756ce35635SMika Westerberg 	tb_init_bandwidth_groups(tcm);
23769d3cce0bSMika Westerberg 
2377e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
2378e0258805SMika Westerberg 
2379349bfe08SMika Westerberg 	tb_apple_add_links(nhi);
2380349bfe08SMika Westerberg 	tb_acpi_add_links(nhi);
2381349bfe08SMika Westerberg 
23829d3cce0bSMika Westerberg 	return tb;
238323dd5bb4SAndreas Noever }
2384