xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision c67f926e)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13349bfe08SMika Westerberg #include <linux/platform_data/x86/apple.h>
14d6cc51cdSAndreas Noever 
15d6cc51cdSAndreas Noever #include "tb.h"
167adf6097SAndreas Noever #include "tb_regs.h"
171752b9f7SMika Westerberg #include "tunnel.h"
18d6cc51cdSAndreas Noever 
197f0a34d7SMika Westerberg #define TB_TIMEOUT	100	/* ms */
206ce35635SMika Westerberg #define MAX_GROUPS	7	/* max Group_ID is 7 */
217f0a34d7SMika Westerberg 
229d3cce0bSMika Westerberg /**
239d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
249d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
258afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
269d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
279d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
289d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
299d3cce0bSMika Westerberg  *		    after cfg has been paused.
306ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
316ac6faeeSMika Westerberg  *		 runtime resume
326ce35635SMika Westerberg  * @groups: Bandwidth groups used in this domain.
339d3cce0bSMika Westerberg  */
349d3cce0bSMika Westerberg struct tb_cm {
359d3cce0bSMika Westerberg 	struct list_head tunnel_list;
368afe909bSMika Westerberg 	struct list_head dp_resources;
379d3cce0bSMika Westerberg 	bool hotplug_active;
386ac6faeeSMika Westerberg 	struct delayed_work remove_work;
396ce35635SMika Westerberg 	struct tb_bandwidth_group groups[MAX_GROUPS];
409d3cce0bSMika Westerberg };
419da672a4SAndreas Noever 
tcm_to_tb(struct tb_cm * tcm)426ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
436ac6faeeSMika Westerberg {
446ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
456ac6faeeSMika Westerberg }
466ac6faeeSMika Westerberg 
474f807e47SMika Westerberg struct tb_hotplug_event {
484f807e47SMika Westerberg 	struct work_struct work;
494f807e47SMika Westerberg 	struct tb *tb;
504f807e47SMika Westerberg 	u64 route;
514f807e47SMika Westerberg 	u8 port;
524f807e47SMika Westerberg 	bool unplug;
534f807e47SMika Westerberg };
544f807e47SMika Westerberg 
tb_init_bandwidth_groups(struct tb_cm * tcm)556ce35635SMika Westerberg static void tb_init_bandwidth_groups(struct tb_cm *tcm)
566ce35635SMika Westerberg {
576ce35635SMika Westerberg 	int i;
586ce35635SMika Westerberg 
596ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
606ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
616ce35635SMika Westerberg 
626ce35635SMika Westerberg 		group->tb = tcm_to_tb(tcm);
636ce35635SMika Westerberg 		group->index = i + 1;
646ce35635SMika Westerberg 		INIT_LIST_HEAD(&group->ports);
656ce35635SMika Westerberg 	}
666ce35635SMika Westerberg }
676ce35635SMika Westerberg 
tb_bandwidth_group_attach_port(struct tb_bandwidth_group * group,struct tb_port * in)686ce35635SMika Westerberg static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
696ce35635SMika Westerberg 					   struct tb_port *in)
706ce35635SMika Westerberg {
716ce35635SMika Westerberg 	if (!group || WARN_ON(in->group))
726ce35635SMika Westerberg 		return;
736ce35635SMika Westerberg 
746ce35635SMika Westerberg 	in->group = group;
756ce35635SMika Westerberg 	list_add_tail(&in->group_list, &group->ports);
766ce35635SMika Westerberg 
776ce35635SMika Westerberg 	tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
786ce35635SMika Westerberg }
796ce35635SMika Westerberg 
tb_find_free_bandwidth_group(struct tb_cm * tcm)806ce35635SMika Westerberg static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
816ce35635SMika Westerberg {
826ce35635SMika Westerberg 	int i;
836ce35635SMika Westerberg 
846ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
856ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
866ce35635SMika Westerberg 
876ce35635SMika Westerberg 		if (list_empty(&group->ports))
886ce35635SMika Westerberg 			return group;
896ce35635SMika Westerberg 	}
906ce35635SMika Westerberg 
916ce35635SMika Westerberg 	return NULL;
926ce35635SMika Westerberg }
936ce35635SMika Westerberg 
946ce35635SMika Westerberg static struct tb_bandwidth_group *
tb_attach_bandwidth_group(struct tb_cm * tcm,struct tb_port * in,struct tb_port * out)956ce35635SMika Westerberg tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
966ce35635SMika Westerberg 			  struct tb_port *out)
976ce35635SMika Westerberg {
986ce35635SMika Westerberg 	struct tb_bandwidth_group *group;
996ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
1006ce35635SMika Westerberg 
1016ce35635SMika Westerberg 	/*
1026ce35635SMika Westerberg 	 * Find all DP tunnels that go through all the same USB4 links
1036ce35635SMika Westerberg 	 * as this one. Because we always setup tunnels the same way we
1046ce35635SMika Westerberg 	 * can just check for the routers at both ends of the tunnels
1056ce35635SMika Westerberg 	 * and if they are the same we have a match.
1066ce35635SMika Westerberg 	 */
1076ce35635SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1086ce35635SMika Westerberg 		if (!tb_tunnel_is_dp(tunnel))
1096ce35635SMika Westerberg 			continue;
1106ce35635SMika Westerberg 
1116ce35635SMika Westerberg 		if (tunnel->src_port->sw == in->sw &&
1126ce35635SMika Westerberg 		    tunnel->dst_port->sw == out->sw) {
1136ce35635SMika Westerberg 			group = tunnel->src_port->group;
1146ce35635SMika Westerberg 			if (group) {
1156ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(group, in);
1166ce35635SMika Westerberg 				return group;
1176ce35635SMika Westerberg 			}
1186ce35635SMika Westerberg 		}
1196ce35635SMika Westerberg 	}
1206ce35635SMika Westerberg 
1216ce35635SMika Westerberg 	/* Pick up next available group then */
1226ce35635SMika Westerberg 	group = tb_find_free_bandwidth_group(tcm);
1236ce35635SMika Westerberg 	if (group)
1246ce35635SMika Westerberg 		tb_bandwidth_group_attach_port(group, in);
1256ce35635SMika Westerberg 	else
1266ce35635SMika Westerberg 		tb_port_warn(in, "no available bandwidth groups\n");
1276ce35635SMika Westerberg 
1286ce35635SMika Westerberg 	return group;
1296ce35635SMika Westerberg }
1306ce35635SMika Westerberg 
tb_discover_bandwidth_group(struct tb_cm * tcm,struct tb_port * in,struct tb_port * out)1316ce35635SMika Westerberg static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1326ce35635SMika Westerberg 					struct tb_port *out)
1336ce35635SMika Westerberg {
1348d73f6b8SMika Westerberg 	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1356ce35635SMika Westerberg 		int index, i;
1366ce35635SMika Westerberg 
1376ce35635SMika Westerberg 		index = usb4_dp_port_group_id(in);
1386ce35635SMika Westerberg 		for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1396ce35635SMika Westerberg 			if (tcm->groups[i].index == index) {
1406ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1416ce35635SMika Westerberg 				return;
1426ce35635SMika Westerberg 			}
1436ce35635SMika Westerberg 		}
1446ce35635SMika Westerberg 	}
1456ce35635SMika Westerberg 
1466ce35635SMika Westerberg 	tb_attach_bandwidth_group(tcm, in, out);
1476ce35635SMika Westerberg }
1486ce35635SMika Westerberg 
tb_detach_bandwidth_group(struct tb_port * in)1496ce35635SMika Westerberg static void tb_detach_bandwidth_group(struct tb_port *in)
1506ce35635SMika Westerberg {
1516ce35635SMika Westerberg 	struct tb_bandwidth_group *group = in->group;
1526ce35635SMika Westerberg 
1536ce35635SMika Westerberg 	if (group) {
1546ce35635SMika Westerberg 		in->group = NULL;
1556ce35635SMika Westerberg 		list_del_init(&in->group_list);
1566ce35635SMika Westerberg 
1576ce35635SMika Westerberg 		tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1586ce35635SMika Westerberg 	}
1596ce35635SMika Westerberg }
1606ce35635SMika Westerberg 
1614f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
1624f807e47SMika Westerberg 
tb_queue_hotplug(struct tb * tb,u64 route,u8 port,bool unplug)1634f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
1644f807e47SMika Westerberg {
1654f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
1664f807e47SMika Westerberg 
1674f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1684f807e47SMika Westerberg 	if (!ev)
1694f807e47SMika Westerberg 		return;
1704f807e47SMika Westerberg 
1714f807e47SMika Westerberg 	ev->tb = tb;
1724f807e47SMika Westerberg 	ev->route = route;
1734f807e47SMika Westerberg 	ev->port = port;
1744f807e47SMika Westerberg 	ev->unplug = unplug;
1754f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
1764f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
1774f807e47SMika Westerberg }
1784f807e47SMika Westerberg 
1799da672a4SAndreas Noever /* enumeration & hot plug handling */
1809da672a4SAndreas Noever 
tb_add_dp_resources(struct tb_switch * sw)1818afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
1828afe909bSMika Westerberg {
1838afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
1848afe909bSMika Westerberg 	struct tb_port *port;
1858afe909bSMika Westerberg 
1868afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
1878afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
1888afe909bSMika Westerberg 			continue;
1898afe909bSMika Westerberg 
1908afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
1918afe909bSMika Westerberg 			continue;
1928afe909bSMika Westerberg 
1938afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
1948afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
1958afe909bSMika Westerberg 	}
1968afe909bSMika Westerberg }
1978afe909bSMika Westerberg 
tb_remove_dp_resources(struct tb_switch * sw)1988afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
1998afe909bSMika Westerberg {
2008afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
2018afe909bSMika Westerberg 	struct tb_port *port, *tmp;
2028afe909bSMika Westerberg 
2038afe909bSMika Westerberg 	/* Clear children resources first */
2048afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
2058afe909bSMika Westerberg 		if (tb_port_has_remote(port))
2068afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
2078afe909bSMika Westerberg 	}
2088afe909bSMika Westerberg 
2098afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
2108afe909bSMika Westerberg 		if (port->sw == sw) {
2118afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
2128afe909bSMika Westerberg 			list_del_init(&port->list);
2138afe909bSMika Westerberg 		}
2148afe909bSMika Westerberg 	}
2158afe909bSMika Westerberg }
2168afe909bSMika Westerberg 
tb_discover_dp_resource(struct tb * tb,struct tb_port * port)217b60e31bfSSanjay R Mehta static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218b60e31bfSSanjay R Mehta {
219b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
220b60e31bfSSanjay R Mehta 	struct tb_port *p;
221b60e31bfSSanjay R Mehta 
222b60e31bfSSanjay R Mehta 	list_for_each_entry(p, &tcm->dp_resources, list) {
223b60e31bfSSanjay R Mehta 		if (p == port)
224b60e31bfSSanjay R Mehta 			return;
225b60e31bfSSanjay R Mehta 	}
226b60e31bfSSanjay R Mehta 
227b60e31bfSSanjay R Mehta 	tb_port_dbg(port, "DP %s resource available discovered\n",
228b60e31bfSSanjay R Mehta 		    tb_port_is_dpin(port) ? "IN" : "OUT");
229b60e31bfSSanjay R Mehta 	list_add_tail(&port->list, &tcm->dp_resources);
230b60e31bfSSanjay R Mehta }
231b60e31bfSSanjay R Mehta 
tb_discover_dp_resources(struct tb * tb)232b60e31bfSSanjay R Mehta static void tb_discover_dp_resources(struct tb *tb)
233b60e31bfSSanjay R Mehta {
234b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
235b60e31bfSSanjay R Mehta 	struct tb_tunnel *tunnel;
236b60e31bfSSanjay R Mehta 
237b60e31bfSSanjay R Mehta 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238b60e31bfSSanjay R Mehta 		if (tb_tunnel_is_dp(tunnel))
239b60e31bfSSanjay R Mehta 			tb_discover_dp_resource(tb, tunnel->dst_port);
240b60e31bfSSanjay R Mehta 	}
241b60e31bfSSanjay R Mehta }
242b60e31bfSSanjay R Mehta 
24353ba2e16SMika Westerberg /* Enables CL states up to host router */
tb_enable_clx(struct tb_switch * sw)2441a9b6cb8SMika Westerberg static int tb_enable_clx(struct tb_switch *sw)
2451a9b6cb8SMika Westerberg {
24653ba2e16SMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
247fd4d58d1SMika Westerberg 	unsigned int clx = TB_CL0S | TB_CL1;
24853ba2e16SMika Westerberg 	const struct tb_tunnel *tunnel;
2491a9b6cb8SMika Westerberg 	int ret;
2501a9b6cb8SMika Westerberg 
2511a9b6cb8SMika Westerberg 	/*
2529650de73SMika Westerberg 	 * Currently only enable CLx for the first link. This is enough
2539650de73SMika Westerberg 	 * to allow the CPU to save energy at least on Intel hardware
2549650de73SMika Westerberg 	 * and makes it slightly simpler to implement. We may change
2559650de73SMika Westerberg 	 * this in the future to cover the whole topology if it turns
2569650de73SMika Westerberg 	 * out to be beneficial.
2579650de73SMika Westerberg 	 */
25853ba2e16SMika Westerberg 	while (sw && sw->config.depth > 1)
25953ba2e16SMika Westerberg 		sw = tb_switch_parent(sw);
26053ba2e16SMika Westerberg 
26153ba2e16SMika Westerberg 	if (!sw)
26253ba2e16SMika Westerberg 		return 0;
26353ba2e16SMika Westerberg 
2649650de73SMika Westerberg 	if (sw->config.depth != 1)
2659650de73SMika Westerberg 		return 0;
2669650de73SMika Westerberg 
2679650de73SMika Westerberg 	/*
26853ba2e16SMika Westerberg 	 * If we are re-enabling then check if there is an active DMA
26953ba2e16SMika Westerberg 	 * tunnel and in that case bail out.
27053ba2e16SMika Westerberg 	 */
27153ba2e16SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
27253ba2e16SMika Westerberg 		if (tb_tunnel_is_dma(tunnel)) {
27353ba2e16SMika Westerberg 			if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
27453ba2e16SMika Westerberg 				return 0;
27553ba2e16SMika Westerberg 		}
27653ba2e16SMika Westerberg 	}
27753ba2e16SMika Westerberg 
27853ba2e16SMika Westerberg 	/*
279fd4d58d1SMika Westerberg 	 * Initially try with CL2. If that's not supported by the
280fd4d58d1SMika Westerberg 	 * topology try with CL0s and CL1 and then give up.
2811a9b6cb8SMika Westerberg 	 */
282fd4d58d1SMika Westerberg 	ret = tb_switch_clx_enable(sw, clx | TB_CL2);
283fd4d58d1SMika Westerberg 	if (ret == -EOPNOTSUPP)
284fd4d58d1SMika Westerberg 		ret = tb_switch_clx_enable(sw, clx);
2851a9b6cb8SMika Westerberg 	return ret == -EOPNOTSUPP ? 0 : ret;
2861a9b6cb8SMika Westerberg }
2871a9b6cb8SMika Westerberg 
28853ba2e16SMika Westerberg /* Disables CL states up to the host router */
tb_disable_clx(struct tb_switch * sw)28953ba2e16SMika Westerberg static void tb_disable_clx(struct tb_switch *sw)
29053ba2e16SMika Westerberg {
29153ba2e16SMika Westerberg 	do {
29253ba2e16SMika Westerberg 		if (tb_switch_clx_disable(sw) < 0)
29353ba2e16SMika Westerberg 			tb_sw_warn(sw, "failed to disable CL states\n");
29453ba2e16SMika Westerberg 		sw = tb_switch_parent(sw);
29553ba2e16SMika Westerberg 	} while (sw);
29653ba2e16SMika Westerberg }
29753ba2e16SMika Westerberg 
tb_increase_switch_tmu_accuracy(struct device * dev,void * data)2987d283f41SMika Westerberg static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
2997d283f41SMika Westerberg {
3007d283f41SMika Westerberg 	struct tb_switch *sw;
3017d283f41SMika Westerberg 
3027d283f41SMika Westerberg 	sw = tb_to_switch(dev);
303d49b4f04SMika Westerberg 	if (!sw)
304d49b4f04SMika Westerberg 		return 0;
305d49b4f04SMika Westerberg 
306d49b4f04SMika Westerberg 	if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
307d49b4f04SMika Westerberg 		enum tb_switch_tmu_mode mode;
308d49b4f04SMika Westerberg 		int ret;
309d49b4f04SMika Westerberg 
310d49b4f04SMika Westerberg 		if (tb_switch_clx_is_enabled(sw, TB_CL1))
311d49b4f04SMika Westerberg 			mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
312d49b4f04SMika Westerberg 		else
313d49b4f04SMika Westerberg 			mode = TB_SWITCH_TMU_MODE_HIFI_BI;
314d49b4f04SMika Westerberg 
315d49b4f04SMika Westerberg 		ret = tb_switch_tmu_configure(sw, mode);
316d49b4f04SMika Westerberg 		if (ret)
317d49b4f04SMika Westerberg 			return ret;
318d49b4f04SMika Westerberg 
319d49b4f04SMika Westerberg 		return tb_switch_tmu_enable(sw);
3207d283f41SMika Westerberg 	}
3217d283f41SMika Westerberg 
3227d283f41SMika Westerberg 	return 0;
3237d283f41SMika Westerberg }
3247d283f41SMika Westerberg 
tb_increase_tmu_accuracy(struct tb_tunnel * tunnel)3257d283f41SMika Westerberg static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
3267d283f41SMika Westerberg {
3277d283f41SMika Westerberg 	struct tb_switch *sw;
3287d283f41SMika Westerberg 
3297d283f41SMika Westerberg 	if (!tunnel)
3307d283f41SMika Westerberg 		return;
3317d283f41SMika Westerberg 
3327d283f41SMika Westerberg 	/*
3337d283f41SMika Westerberg 	 * Once first DP tunnel is established we change the TMU
3347d283f41SMika Westerberg 	 * accuracy of first depth child routers (and the host router)
3357d283f41SMika Westerberg 	 * to the highest. This is needed for the DP tunneling to work
3367d283f41SMika Westerberg 	 * but also allows CL0s.
337d49b4f04SMika Westerberg 	 *
338d49b4f04SMika Westerberg 	 * If both routers are v2 then we don't need to do anything as
339d49b4f04SMika Westerberg 	 * they are using enhanced TMU mode that allows all CLx.
3407d283f41SMika Westerberg 	 */
3417d283f41SMika Westerberg 	sw = tunnel->tb->root_switch;
3427d283f41SMika Westerberg 	device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
3437d283f41SMika Westerberg }
3447d283f41SMika Westerberg 
tb_enable_tmu(struct tb_switch * sw)3454e7b4955SMika Westerberg static int tb_enable_tmu(struct tb_switch *sw)
3464e7b4955SMika Westerberg {
3474e7b4955SMika Westerberg 	int ret;
3484e7b4955SMika Westerberg 
3494e7b4955SMika Westerberg 	/*
350d49b4f04SMika Westerberg 	 * If both routers at the end of the link are v2 we simply
351d49b4f04SMika Westerberg 	 * enable the enhanched uni-directional mode. That covers all
352d49b4f04SMika Westerberg 	 * the CL states. For v1 and before we need to use the normal
353d49b4f04SMika Westerberg 	 * rate to allow CL1 (when supported). Otherwise we keep the TMU
354d49b4f04SMika Westerberg 	 * running at the highest accuracy.
3554e7b4955SMika Westerberg 	 */
356d49b4f04SMika Westerberg 	ret = tb_switch_tmu_configure(sw,
357d49b4f04SMika Westerberg 			TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
358d49b4f04SMika Westerberg 	if (ret == -EOPNOTSUPP) {
35912a14f2fSMika Westerberg 		if (tb_switch_clx_is_enabled(sw, TB_CL1))
360d49b4f04SMika Westerberg 			ret = tb_switch_tmu_configure(sw,
361d49b4f04SMika Westerberg 					TB_SWITCH_TMU_MODE_LOWRES);
3624e7b4955SMika Westerberg 		else
363d49b4f04SMika Westerberg 			ret = tb_switch_tmu_configure(sw,
364d49b4f04SMika Westerberg 					TB_SWITCH_TMU_MODE_HIFI_BI);
365d49b4f04SMika Westerberg 	}
366ef34add8SMika Westerberg 	if (ret)
367ef34add8SMika Westerberg 		return ret;
3684e7b4955SMika Westerberg 
3694e7b4955SMika Westerberg 	/* If it is already enabled in correct mode, don't touch it */
3704e7b4955SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
3714e7b4955SMika Westerberg 		return 0;
3724e7b4955SMika Westerberg 
3734e7b4955SMika Westerberg 	ret = tb_switch_tmu_disable(sw);
3744e7b4955SMika Westerberg 	if (ret)
3754e7b4955SMika Westerberg 		return ret;
3764e7b4955SMika Westerberg 
3774e7b4955SMika Westerberg 	ret = tb_switch_tmu_post_time(sw);
3784e7b4955SMika Westerberg 	if (ret)
3794e7b4955SMika Westerberg 		return ret;
3804e7b4955SMika Westerberg 
3814e7b4955SMika Westerberg 	return tb_switch_tmu_enable(sw);
3824e7b4955SMika Westerberg }
3834e7b4955SMika Westerberg 
tb_switch_discover_tunnels(struct tb_switch * sw,struct list_head * list,bool alloc_hopids)38443bddb26SMika Westerberg static void tb_switch_discover_tunnels(struct tb_switch *sw,
38543bddb26SMika Westerberg 				       struct list_head *list,
38643bddb26SMika Westerberg 				       bool alloc_hopids)
3870414bec5SMika Westerberg {
3880414bec5SMika Westerberg 	struct tb *tb = sw->tb;
3890414bec5SMika Westerberg 	struct tb_port *port;
3900414bec5SMika Westerberg 
391b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3920414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
3930414bec5SMika Westerberg 
3940414bec5SMika Westerberg 		switch (port->config.type) {
3954f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
39643bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
3977d283f41SMika Westerberg 			tb_increase_tmu_accuracy(tunnel);
3984f807e47SMika Westerberg 			break;
3994f807e47SMika Westerberg 
4000414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
40143bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
4020414bec5SMika Westerberg 			break;
4030414bec5SMika Westerberg 
404e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
40543bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
406e6f81858SRajmohan Mani 			break;
407e6f81858SRajmohan Mani 
4080414bec5SMika Westerberg 		default:
4090414bec5SMika Westerberg 			break;
4100414bec5SMika Westerberg 		}
4110414bec5SMika Westerberg 
41243bddb26SMika Westerberg 		if (tunnel)
41343bddb26SMika Westerberg 			list_add_tail(&tunnel->list, list);
41443bddb26SMika Westerberg 	}
4154f807e47SMika Westerberg 
41643bddb26SMika Westerberg 	tb_switch_for_each_port(sw, port) {
41743bddb26SMika Westerberg 		if (tb_port_has_remote(port)) {
41843bddb26SMika Westerberg 			tb_switch_discover_tunnels(port->remote->sw, list,
41943bddb26SMika Westerberg 						   alloc_hopids);
42043bddb26SMika Westerberg 		}
42143bddb26SMika Westerberg 	}
42243bddb26SMika Westerberg }
42343bddb26SMika Westerberg 
tb_discover_tunnels(struct tb * tb)42443bddb26SMika Westerberg static void tb_discover_tunnels(struct tb *tb)
42543bddb26SMika Westerberg {
42643bddb26SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
42743bddb26SMika Westerberg 	struct tb_tunnel *tunnel;
42843bddb26SMika Westerberg 
42943bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
43043bddb26SMika Westerberg 
43143bddb26SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4324f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
4330414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
4340414bec5SMika Westerberg 
4350414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
4360414bec5SMika Westerberg 				parent->boot = true;
4370414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
4380414bec5SMika Westerberg 			}
439c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
4406ce35635SMika Westerberg 			struct tb_port *in = tunnel->src_port;
4416ce35635SMika Westerberg 			struct tb_port *out = tunnel->dst_port;
4426ce35635SMika Westerberg 
443c94732bdSMika Westerberg 			/* Keep the domain from powering down */
4446ce35635SMika Westerberg 			pm_runtime_get_sync(&in->sw->dev);
4456ce35635SMika Westerberg 			pm_runtime_get_sync(&out->sw->dev);
4466ce35635SMika Westerberg 
4476ce35635SMika Westerberg 			tb_discover_bandwidth_group(tcm, in, out);
4484f807e47SMika Westerberg 		}
4490414bec5SMika Westerberg 	}
4500414bec5SMika Westerberg }
4519da672a4SAndreas Noever 
tb_port_configure_xdomain(struct tb_port * port,struct tb_xdomain * xd)452f9cad07bSMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
453284652a4SMika Westerberg {
454284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
455f9cad07bSMika Westerberg 		return usb4_port_configure_xdomain(port, xd);
456284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
457284652a4SMika Westerberg }
458284652a4SMika Westerberg 
tb_port_unconfigure_xdomain(struct tb_port * port)459284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
460284652a4SMika Westerberg {
461284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
462284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
463284652a4SMika Westerberg 	else
464284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
465341d4518SMika Westerberg 
466341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
467284652a4SMika Westerberg }
468284652a4SMika Westerberg 
tb_scan_xdomain(struct tb_port * port)4697ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
4707ea4cd6bSMika Westerberg {
4717ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
4727ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
4737ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
4747ea4cd6bSMika Westerberg 	u64 route;
4757ea4cd6bSMika Westerberg 
4765ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
4775ca67688SMika Westerberg 		return;
4785ca67688SMika Westerberg 
4797ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
4807ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
4817ea4cd6bSMika Westerberg 	if (xd) {
4827ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
4837ea4cd6bSMika Westerberg 		return;
4847ea4cd6bSMika Westerberg 	}
4857ea4cd6bSMika Westerberg 
4867ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
4877ea4cd6bSMika Westerberg 			      NULL);
4887ea4cd6bSMika Westerberg 	if (xd) {
4897ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
490f9cad07bSMika Westerberg 		tb_port_configure_xdomain(port, xd);
4917ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
4927ea4cd6bSMika Westerberg 	}
4937ea4cd6bSMika Westerberg }
4947ea4cd6bSMika Westerberg 
495e6f81858SRajmohan Mani /**
496e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
497e6f81858SRajmohan Mani  * @sw: Switch to find the port on
498e6f81858SRajmohan Mani  * @type: Port type to look for
499e6f81858SRajmohan Mani  */
tb_find_unused_port(struct tb_switch * sw,enum tb_port_type type)500e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
501e6f81858SRajmohan Mani 					   enum tb_port_type type)
502e6f81858SRajmohan Mani {
503e6f81858SRajmohan Mani 	struct tb_port *port;
504e6f81858SRajmohan Mani 
505e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
506e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
507e6f81858SRajmohan Mani 			continue;
508e6f81858SRajmohan Mani 		if (port->config.type != type)
509e6f81858SRajmohan Mani 			continue;
510e6f81858SRajmohan Mani 		if (!port->cap_adap)
511e6f81858SRajmohan Mani 			continue;
512e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
513e6f81858SRajmohan Mani 			continue;
514e6f81858SRajmohan Mani 		return port;
515e6f81858SRajmohan Mani 	}
516e6f81858SRajmohan Mani 	return NULL;
517e6f81858SRajmohan Mani }
518e6f81858SRajmohan Mani 
tb_find_usb3_down(struct tb_switch * sw,const struct tb_port * port)519e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
520e6f81858SRajmohan Mani 					 const struct tb_port *port)
521e6f81858SRajmohan Mani {
522e6f81858SRajmohan Mani 	struct tb_port *down;
523e6f81858SRajmohan Mani 
524e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
52577cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
526e6f81858SRajmohan Mani 		return down;
52777cfa40fSMika Westerberg 	return NULL;
528e6f81858SRajmohan Mani }
529e6f81858SRajmohan Mani 
tb_find_tunnel(struct tb * tb,enum tb_tunnel_type type,struct tb_port * src_port,struct tb_port * dst_port)5300bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
5310bd680cdSMika Westerberg 					struct tb_port *src_port,
5320bd680cdSMika Westerberg 					struct tb_port *dst_port)
5330bd680cdSMika Westerberg {
5340bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5350bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5360bd680cdSMika Westerberg 
5370bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
5380bd680cdSMika Westerberg 		if (tunnel->type == type &&
5390bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
5400bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
5410bd680cdSMika Westerberg 			return tunnel;
5420bd680cdSMika Westerberg 		}
5430bd680cdSMika Westerberg 	}
5440bd680cdSMika Westerberg 
5450bd680cdSMika Westerberg 	return NULL;
5460bd680cdSMika Westerberg }
5470bd680cdSMika Westerberg 
tb_find_first_usb3_tunnel(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)5480bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
5490bd680cdSMika Westerberg 						   struct tb_port *src_port,
5500bd680cdSMika Westerberg 						   struct tb_port *dst_port)
5510bd680cdSMika Westerberg {
5520bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
5530bd680cdSMika Westerberg 	struct tb_switch *sw;
5540bd680cdSMika Westerberg 
5550bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
5560bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
5570bd680cdSMika Westerberg 		sw = dst_port->sw;
5580bd680cdSMika Westerberg 	else
5590bd680cdSMika Westerberg 		sw = src_port->sw;
5600bd680cdSMika Westerberg 
5610bd680cdSMika Westerberg 	/* Can't be the host router */
5620bd680cdSMika Westerberg 	if (sw == tb->root_switch)
5630bd680cdSMika Westerberg 		return NULL;
5640bd680cdSMika Westerberg 
5650bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
5660bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
5670bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
5680bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
5690bd680cdSMika Westerberg 	if (!usb3_down)
5700bd680cdSMika Westerberg 		return NULL;
5710bd680cdSMika Westerberg 
5720bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
5730bd680cdSMika Westerberg }
5740bd680cdSMika Westerberg 
tb_available_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,int * available_up,int * available_down)5750bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
5760bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
5770bd680cdSMika Westerberg {
5780bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
5790bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5800bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5810bd680cdSMika Westerberg 	struct tb_port *port;
5820bd680cdSMika Westerberg 
5832426fdf7SMika Westerberg 	tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
5842426fdf7SMika Westerberg 	       tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
5852426fdf7SMika Westerberg 	       dst_port->port);
5860bd680cdSMika Westerberg 
5870bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
5886ce35635SMika Westerberg 	if (tunnel && tunnel->src_port != src_port &&
5896ce35635SMika Westerberg 	    tunnel->dst_port != dst_port) {
5900bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
5910bd680cdSMika Westerberg 						   &usb3_consumed_down);
5920bd680cdSMika Westerberg 		if (ret)
5930bd680cdSMika Westerberg 			return ret;
5940bd680cdSMika Westerberg 	} else {
5950bd680cdSMika Westerberg 		usb3_consumed_up = 0;
5960bd680cdSMika Westerberg 		usb3_consumed_down = 0;
5970bd680cdSMika Westerberg 	}
5980bd680cdSMika Westerberg 
599e111fb92SGil Fine 	/* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
600e111fb92SGil Fine 	*available_up = *available_down = 120000;
6010bd680cdSMika Westerberg 
6020bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
6030bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
6040bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
6050bd680cdSMika Westerberg 
6060bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
6070bd680cdSMika Westerberg 			continue;
6080bd680cdSMika Westerberg 
6090bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
6100bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
611e111fb92SGil Fine 			/*
612e111fb92SGil Fine 			 * sw->link_width is from upstream perspective
613e111fb92SGil Fine 			 * so we use the opposite for downstream of the
614e111fb92SGil Fine 			 * host router.
615e111fb92SGil Fine 			 */
616e111fb92SGil Fine 			if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
617e111fb92SGil Fine 				up_bw = link_speed * 3 * 1000;
618e111fb92SGil Fine 				down_bw = link_speed * 1 * 1000;
619e111fb92SGil Fine 			} else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
620e111fb92SGil Fine 				up_bw = link_speed * 1 * 1000;
621e111fb92SGil Fine 				down_bw = link_speed * 3 * 1000;
622e111fb92SGil Fine 			} else {
623e111fb92SGil Fine 				up_bw = link_speed * port->sw->link_width * 1000;
624e111fb92SGil Fine 				down_bw = up_bw;
625e111fb92SGil Fine 			}
6260bd680cdSMika Westerberg 		} else {
6270bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
6280bd680cdSMika Westerberg 			if (link_speed < 0)
6290bd680cdSMika Westerberg 				return link_speed;
630e111fb92SGil Fine 
631e111fb92SGil Fine 			link_width = tb_port_get_link_width(port);
632e111fb92SGil Fine 			if (link_width < 0)
633e111fb92SGil Fine 				return link_width;
634e111fb92SGil Fine 
635e111fb92SGil Fine 			if (link_width == TB_LINK_WIDTH_ASYM_TX) {
636e111fb92SGil Fine 				up_bw = link_speed * 1 * 1000;
637e111fb92SGil Fine 				down_bw = link_speed * 3 * 1000;
638e111fb92SGil Fine 			} else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
639e111fb92SGil Fine 				up_bw = link_speed * 3 * 1000;
640e111fb92SGil Fine 				down_bw = link_speed * 1 * 1000;
641e111fb92SGil Fine 			} else {
642e111fb92SGil Fine 				up_bw = link_speed * link_width * 1000;
643e111fb92SGil Fine 				down_bw = up_bw;
644e111fb92SGil Fine 			}
6450bd680cdSMika Westerberg 		}
6460bd680cdSMika Westerberg 
6470bd680cdSMika Westerberg 		/* Leave 10% guard band */
6480bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
649e111fb92SGil Fine 		down_bw -= down_bw / 10;
6500bd680cdSMika Westerberg 
6512426fdf7SMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
6522426fdf7SMika Westerberg 			    down_bw);
6530bd680cdSMika Westerberg 
6540bd680cdSMika Westerberg 		/*
6550bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
6560bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
6570bd680cdSMika Westerberg 		 */
6580bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
6590bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
6600bd680cdSMika Westerberg 
6616ce35635SMika Westerberg 			if (tb_tunnel_is_invalid(tunnel))
6626ce35635SMika Westerberg 				continue;
6636ce35635SMika Westerberg 
6640bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
6650bd680cdSMika Westerberg 				continue;
6660bd680cdSMika Westerberg 
6670bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
6680bd680cdSMika Westerberg 				continue;
6690bd680cdSMika Westerberg 
6706ce35635SMika Westerberg 			/*
6716ce35635SMika Westerberg 			 * Ignore the DP tunnel between src_port and
6726ce35635SMika Westerberg 			 * dst_port because it is the same tunnel and we
6736ce35635SMika Westerberg 			 * may be re-calculating estimated bandwidth.
6746ce35635SMika Westerberg 			 */
6756ce35635SMika Westerberg 			if (tunnel->src_port == src_port &&
6766ce35635SMika Westerberg 			    tunnel->dst_port == dst_port)
6776ce35635SMika Westerberg 				continue;
6786ce35635SMika Westerberg 
6790bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
6800bd680cdSMika Westerberg 							   &dp_consumed_up,
6810bd680cdSMika Westerberg 							   &dp_consumed_down);
6820bd680cdSMika Westerberg 			if (ret)
6830bd680cdSMika Westerberg 				return ret;
6840bd680cdSMika Westerberg 
6850bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
6860bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
6870bd680cdSMika Westerberg 		}
6880bd680cdSMika Westerberg 
6890bd680cdSMika Westerberg 		/*
6900bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
6910bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
6920bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
6930bd680cdSMika Westerberg 		 * crosses the port.
6940bd680cdSMika Westerberg 		 */
6950bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
6960bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
6970bd680cdSMika Westerberg 
6980bd680cdSMika Westerberg 		if (up_bw < *available_up)
6990bd680cdSMika Westerberg 			*available_up = up_bw;
7000bd680cdSMika Westerberg 		if (down_bw < *available_down)
7010bd680cdSMika Westerberg 			*available_down = down_bw;
7020bd680cdSMika Westerberg 	}
7030bd680cdSMika Westerberg 
7040bd680cdSMika Westerberg 	if (*available_up < 0)
7050bd680cdSMika Westerberg 		*available_up = 0;
7060bd680cdSMika Westerberg 	if (*available_down < 0)
7070bd680cdSMika Westerberg 		*available_down = 0;
7080bd680cdSMika Westerberg 
7090bd680cdSMika Westerberg 	return 0;
7100bd680cdSMika Westerberg }
7110bd680cdSMika Westerberg 
tb_release_unused_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)7120bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
7130bd680cdSMika Westerberg 					    struct tb_port *src_port,
7140bd680cdSMika Westerberg 					    struct tb_port *dst_port)
7150bd680cdSMika Westerberg {
7160bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
7170bd680cdSMika Westerberg 
7180bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
7190bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
7200bd680cdSMika Westerberg }
7210bd680cdSMika Westerberg 
tb_reclaim_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)7220bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
7230bd680cdSMika Westerberg 				      struct tb_port *dst_port)
7240bd680cdSMika Westerberg {
7250bd680cdSMika Westerberg 	int ret, available_up, available_down;
7260bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
7270bd680cdSMika Westerberg 
7280bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
7290bd680cdSMika Westerberg 	if (!tunnel)
7300bd680cdSMika Westerberg 		return;
7310bd680cdSMika Westerberg 
7320bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
7330bd680cdSMika Westerberg 
7340bd680cdSMika Westerberg 	/*
7350bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
7360bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
7370bd680cdSMika Westerberg 	 */
7380bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
7390bd680cdSMika Westerberg 				     &available_up, &available_down);
7400bd680cdSMika Westerberg 	if (ret) {
7410bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
7420bd680cdSMika Westerberg 		return;
7430bd680cdSMika Westerberg 	}
7440bd680cdSMika Westerberg 
7450bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
7460bd680cdSMika Westerberg 	       available_up, available_down);
7470bd680cdSMika Westerberg 
7480bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
7490bd680cdSMika Westerberg }
7500bd680cdSMika Westerberg 
tb_tunnel_usb3(struct tb * tb,struct tb_switch * sw)751e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
752e6f81858SRajmohan Mani {
753e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
7540bd680cdSMika Westerberg 	int ret, available_up, available_down;
755e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
756e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
757e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
758e6f81858SRajmohan Mani 
759c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
760c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
761c6da62a2SMika Westerberg 		return 0;
762c6da62a2SMika Westerberg 	}
763c6da62a2SMika Westerberg 
764e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
765e6f81858SRajmohan Mani 	if (!up)
766e6f81858SRajmohan Mani 		return 0;
767e6f81858SRajmohan Mani 
768bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
769bbcf40b3SMika Westerberg 		return 0;
770bbcf40b3SMika Westerberg 
771e6f81858SRajmohan Mani 	/*
772e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
773e6f81858SRajmohan Mani 	 * be found right above this switch.
774e6f81858SRajmohan Mani 	 */
7757ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
776e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
777e6f81858SRajmohan Mani 	if (!down)
778e6f81858SRajmohan Mani 		return 0;
779e6f81858SRajmohan Mani 
780e6f81858SRajmohan Mani 	if (tb_route(parent)) {
781e6f81858SRajmohan Mani 		struct tb_port *parent_up;
782e6f81858SRajmohan Mani 		/*
783e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
784e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
785e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
786e6f81858SRajmohan Mani 		 */
787e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
788e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
789e6f81858SRajmohan Mani 			return 0;
7900bd680cdSMika Westerberg 
7910bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
7920bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
7930bd680cdSMika Westerberg 		if (ret)
7940bd680cdSMika Westerberg 			return ret;
795e6f81858SRajmohan Mani 	}
796e6f81858SRajmohan Mani 
7970bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
7980bd680cdSMika Westerberg 				     &available_down);
7990bd680cdSMika Westerberg 	if (ret)
8000bd680cdSMika Westerberg 		goto err_reclaim;
8010bd680cdSMika Westerberg 
8020bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
8030bd680cdSMika Westerberg 		    available_up, available_down);
8040bd680cdSMika Westerberg 
8050bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
8060bd680cdSMika Westerberg 				      available_down);
8070bd680cdSMika Westerberg 	if (!tunnel) {
8080bd680cdSMika Westerberg 		ret = -ENOMEM;
8090bd680cdSMika Westerberg 		goto err_reclaim;
8100bd680cdSMika Westerberg 	}
811e6f81858SRajmohan Mani 
812e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
813e6f81858SRajmohan Mani 		tb_port_info(up,
814e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
8150bd680cdSMika Westerberg 		ret = -EIO;
8160bd680cdSMika Westerberg 		goto err_free;
817e6f81858SRajmohan Mani 	}
818e6f81858SRajmohan Mani 
819e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
8200bd680cdSMika Westerberg 	if (tb_route(parent))
8210bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
8220bd680cdSMika Westerberg 
823e6f81858SRajmohan Mani 	return 0;
8240bd680cdSMika Westerberg 
8250bd680cdSMika Westerberg err_free:
8260bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
8270bd680cdSMika Westerberg err_reclaim:
8280bd680cdSMika Westerberg 	if (tb_route(parent))
8290bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
8300bd680cdSMika Westerberg 
8310bd680cdSMika Westerberg 	return ret;
832e6f81858SRajmohan Mani }
833e6f81858SRajmohan Mani 
tb_create_usb3_tunnels(struct tb_switch * sw)834e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
835e6f81858SRajmohan Mani {
836e6f81858SRajmohan Mani 	struct tb_port *port;
837e6f81858SRajmohan Mani 	int ret;
838e6f81858SRajmohan Mani 
839c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
840c6da62a2SMika Westerberg 		return 0;
841c6da62a2SMika Westerberg 
842e6f81858SRajmohan Mani 	if (tb_route(sw)) {
843e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
844e6f81858SRajmohan Mani 		if (ret)
845e6f81858SRajmohan Mani 			return ret;
846e6f81858SRajmohan Mani 	}
847e6f81858SRajmohan Mani 
848e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
849e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
850e6f81858SRajmohan Mani 			continue;
851e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
852e6f81858SRajmohan Mani 		if (ret)
853e6f81858SRajmohan Mani 			return ret;
854e6f81858SRajmohan Mani 	}
855e6f81858SRajmohan Mani 
856e6f81858SRajmohan Mani 	return 0;
857e6f81858SRajmohan Mani }
858e6f81858SRajmohan Mani 
8599da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
8609da672a4SAndreas Noever 
861877e50b3SLee Jones /*
8629da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
8639da672a4SAndreas Noever  */
tb_scan_switch(struct tb_switch * sw)8649da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
8659da672a4SAndreas Noever {
866b433d010SMika Westerberg 	struct tb_port *port;
867b433d010SMika Westerberg 
8686ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
8696ac6faeeSMika Westerberg 
870b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
871b433d010SMika Westerberg 		tb_scan_port(port);
8726ac6faeeSMika Westerberg 
8736ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
8746ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
8759da672a4SAndreas Noever }
8769da672a4SAndreas Noever 
877877e50b3SLee Jones /*
8789da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
8799da672a4SAndreas Noever  */
tb_scan_port(struct tb_port * port)8809da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
8819da672a4SAndreas Noever {
88299cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
883dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
8843fe95742SMika Westerberg 	bool discovery = false;
8859da672a4SAndreas Noever 	struct tb_switch *sw;
886dfe40ca4SMika Westerberg 
8879da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
8889da672a4SAndreas Noever 		return;
8894f807e47SMika Westerberg 
8904f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
8914f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
8924f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
8934f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
8944f807e47SMika Westerberg 				 false);
8954f807e47SMika Westerberg 		return;
8964f807e47SMika Westerberg 	}
8974f807e47SMika Westerberg 
8989da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
8999da672a4SAndreas Noever 		return;
900343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
901343fcb8cSAndreas Noever 		return; /*
902343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
903343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
904343fcb8cSAndreas Noever 			 */
90523257cfcSMika Westerberg 
90623257cfcSMika Westerberg 	if (port->usb4)
90723257cfcSMika Westerberg 		pm_runtime_get_sync(&port->usb4->dev);
90823257cfcSMika Westerberg 
9099da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
91023257cfcSMika Westerberg 		goto out_rpm_put;
9119da672a4SAndreas Noever 	if (port->remote) {
9127ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
91323257cfcSMika Westerberg 		goto out_rpm_put;
9149da672a4SAndreas Noever 	}
915dacb1287SKranthi Kuntala 
9163fb10ea4SRajmohan Mani 	tb_retimer_scan(port, true);
917dacb1287SKranthi Kuntala 
918bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
919bfe778acSMika Westerberg 			     tb_downstream_route(port));
9207ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
9217ea4cd6bSMika Westerberg 		/*
9227ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
9237ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
9247ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
9257ea4cd6bSMika Westerberg 		 */
9267ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
9277ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
92823257cfcSMika Westerberg 		goto out_rpm_put;
9297ea4cd6bSMika Westerberg 	}
930bfe778acSMika Westerberg 
931bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
932bfe778acSMika Westerberg 		tb_switch_put(sw);
93323257cfcSMika Westerberg 		goto out_rpm_put;
934bfe778acSMika Westerberg 	}
935bfe778acSMika Westerberg 
93699cabbb0SMika Westerberg 	/*
9377ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
9387ea4cd6bSMika Westerberg 	 * first.
9397ea4cd6bSMika Westerberg 	 */
9407ea4cd6bSMika Westerberg 	if (port->xdomain) {
9417ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
942284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
9437ea4cd6bSMika Westerberg 		port->xdomain = NULL;
9447ea4cd6bSMika Westerberg 	}
9457ea4cd6bSMika Westerberg 
9467ea4cd6bSMika Westerberg 	/*
94799cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
94899cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
94999cabbb0SMika Westerberg 	 * the boot firmware.
95099cabbb0SMika Westerberg 	 */
9513fe95742SMika Westerberg 	if (!tcm->hotplug_active) {
95299cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
9533fe95742SMika Westerberg 		discovery = true;
9543fe95742SMika Westerberg 	}
955f67cf491SMika Westerberg 
9566ac6faeeSMika Westerberg 	/*
9576ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
9586ac6faeeSMika Westerberg 	 * can support runtime PM.
9596ac6faeeSMika Westerberg 	 */
9606ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
9616ac6faeeSMika Westerberg 
962bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
963bfe778acSMika Westerberg 		tb_switch_put(sw);
96423257cfcSMika Westerberg 		goto out_rpm_put;
965bfe778acSMika Westerberg 	}
966bfe778acSMika Westerberg 
967dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
968dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
969dfe40ca4SMika Westerberg 	port->remote = upstream_port;
970dfe40ca4SMika Westerberg 	upstream_port->remote = port;
971dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
972dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
973dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
974dfe40ca4SMika Westerberg 	}
975dfe40ca4SMika Westerberg 
97691c0c120SMika Westerberg 	/* Enable lane bonding if supported */
9772ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
978de462039SMika Westerberg 	/* Set the link configured */
979de462039SMika Westerberg 	tb_switch_configure_link(sw);
980b017a46dSGil Fine 	/*
981b017a46dSGil Fine 	 * CL0s and CL1 are enabled and supported together.
982b017a46dSGil Fine 	 * Silently ignore CLx enabling in case CLx is not supported.
983b017a46dSGil Fine 	 */
9841a9b6cb8SMika Westerberg 	if (discovery)
9853fe95742SMika Westerberg 		tb_sw_dbg(sw, "discovery, not touching CL states\n");
9861a9b6cb8SMika Westerberg 	else if (tb_enable_clx(sw))
9871a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to enable CL states\n");
9888a90e4faSGil Fine 
989cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
990cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
991cf29b9afSRajmohan Mani 
992d49b4f04SMika Westerberg 	/*
993d49b4f04SMika Westerberg 	 * Configuration valid needs to be set after the TMU has been
994d49b4f04SMika Westerberg 	 * enabled for the upstream port of the router so we do it here.
995d49b4f04SMika Westerberg 	 */
996d49b4f04SMika Westerberg 	tb_switch_configuration_valid(sw);
997d49b4f04SMika Westerberg 
998dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
9993fb10ea4SRajmohan Mani 	tb_retimer_scan(upstream_port, true);
1000dacb1287SKranthi Kuntala 
1001e6f81858SRajmohan Mani 	/*
1002e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
1003e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
1004e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
1005e6f81858SRajmohan Mani 	 * any new.
1006e6f81858SRajmohan Mani 	 */
1007e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1008e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1009e6f81858SRajmohan Mani 
1010e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
10119da672a4SAndreas Noever 	tb_scan_switch(sw);
101223257cfcSMika Westerberg 
101323257cfcSMika Westerberg out_rpm_put:
101423257cfcSMika Westerberg 	if (port->usb4) {
101523257cfcSMika Westerberg 		pm_runtime_mark_last_busy(&port->usb4->dev);
101623257cfcSMika Westerberg 		pm_runtime_put_autosuspend(&port->usb4->dev);
101723257cfcSMika Westerberg 	}
10189da672a4SAndreas Noever }
10199da672a4SAndreas Noever 
tb_deactivate_and_free_tunnel(struct tb_tunnel * tunnel)10208afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
10218afe909bSMika Westerberg {
10220bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
10230bd680cdSMika Westerberg 	struct tb *tb;
10240bd680cdSMika Westerberg 
10258afe909bSMika Westerberg 	if (!tunnel)
10268afe909bSMika Westerberg 		return;
10278afe909bSMika Westerberg 
10288afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
10298afe909bSMika Westerberg 	list_del(&tunnel->list);
10308afe909bSMika Westerberg 
10310bd680cdSMika Westerberg 	tb = tunnel->tb;
10320bd680cdSMika Westerberg 	src_port = tunnel->src_port;
10330bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
10348afe909bSMika Westerberg 
10350bd680cdSMika Westerberg 	switch (tunnel->type) {
10360bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
10376ce35635SMika Westerberg 		tb_detach_bandwidth_group(src_port);
10380bd680cdSMika Westerberg 		/*
10390bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
10400bd680cdSMika Westerberg 		 * deallocated properly.
10410bd680cdSMika Westerberg 		 */
10420bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
10436ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
10446ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
10456ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
10466ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
10476ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
10480bd680cdSMika Westerberg 		fallthrough;
10490bd680cdSMika Westerberg 
10500bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
10510bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
10520bd680cdSMika Westerberg 		break;
10530bd680cdSMika Westerberg 
10540bd680cdSMika Westerberg 	default:
10550bd680cdSMika Westerberg 		/*
10560bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
10570bd680cdSMika Westerberg 		 * bandwidth.
10580bd680cdSMika Westerberg 		 */
10590bd680cdSMika Westerberg 		break;
10608afe909bSMika Westerberg 	}
10618afe909bSMika Westerberg 
10628afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
10634f807e47SMika Westerberg }
10644f807e47SMika Westerberg 
1065877e50b3SLee Jones /*
10663364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
10673364f0c1SAndreas Noever  */
tb_free_invalid_tunnels(struct tb * tb)10683364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
10693364f0c1SAndreas Noever {
10709d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
107193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
107293f36adeSMika Westerberg 	struct tb_tunnel *n;
10739d3cce0bSMika Westerberg 
10749d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
10758afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
10768afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
10773364f0c1SAndreas Noever 	}
10783364f0c1SAndreas Noever }
10793364f0c1SAndreas Noever 
1080877e50b3SLee Jones /*
108123dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
108223dd5bb4SAndreas Noever  */
tb_free_unplugged_children(struct tb_switch * sw)108323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
108423dd5bb4SAndreas Noever {
1085b433d010SMika Westerberg 	struct tb_port *port;
1086dfe40ca4SMika Westerberg 
1087b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1088dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
108923dd5bb4SAndreas Noever 			continue;
1090dfe40ca4SMika Westerberg 
109123dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
1092dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
10938afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1094de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
109591c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1096bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
109723dd5bb4SAndreas Noever 			port->remote = NULL;
1098dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1099dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
110023dd5bb4SAndreas Noever 		} else {
110123dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
110223dd5bb4SAndreas Noever 		}
110323dd5bb4SAndreas Noever 	}
110423dd5bb4SAndreas Noever }
110523dd5bb4SAndreas Noever 
tb_find_pcie_down(struct tb_switch * sw,const struct tb_port * port)110699cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
110799cabbb0SMika Westerberg 					 const struct tb_port *port)
11083364f0c1SAndreas Noever {
1109b0407983SMika Westerberg 	struct tb_port *down = NULL;
1110b0407983SMika Westerberg 
111199cabbb0SMika Westerberg 	/*
111299cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
1113b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
111499cabbb0SMika Westerberg 	 */
1115b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
1116b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
1117b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
111899cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
111999cabbb0SMika Westerberg 		int index;
112099cabbb0SMika Westerberg 
112199cabbb0SMika Westerberg 		/*
112299cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
112399cabbb0SMika Westerberg 		 * per controller.
112499cabbb0SMika Westerberg 		 */
11257bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
11267bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
112799cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
112817a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
112999cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
11307bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
11317bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
113299cabbb0SMika Westerberg 		else
113399cabbb0SMika Westerberg 			goto out;
113499cabbb0SMika Westerberg 
113599cabbb0SMika Westerberg 		/* Validate the hard-coding */
113699cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
113799cabbb0SMika Westerberg 			goto out;
1138b0407983SMika Westerberg 
1139b0407983SMika Westerberg 		down = &sw->ports[index];
1140b0407983SMika Westerberg 	}
1141b0407983SMika Westerberg 
1142b0407983SMika Westerberg 	if (down) {
1143b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
114499cabbb0SMika Westerberg 			goto out;
11459cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
114699cabbb0SMika Westerberg 			goto out;
114799cabbb0SMika Westerberg 
1148b0407983SMika Westerberg 		return down;
114999cabbb0SMika Westerberg 	}
115099cabbb0SMika Westerberg 
115199cabbb0SMika Westerberg out:
1152e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
115399cabbb0SMika Westerberg }
115499cabbb0SMika Westerberg 
11556ce35635SMika Westerberg static void
tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group * group)11566ce35635SMika Westerberg tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
11576ce35635SMika Westerberg {
11586ce35635SMika Westerberg 	struct tb_tunnel *first_tunnel;
11596ce35635SMika Westerberg 	struct tb *tb = group->tb;
11606ce35635SMika Westerberg 	struct tb_port *in;
11616ce35635SMika Westerberg 	int ret;
11626ce35635SMika Westerberg 
11636ce35635SMika Westerberg 	tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
11646ce35635SMika Westerberg 	       group->index);
11656ce35635SMika Westerberg 
11666ce35635SMika Westerberg 	first_tunnel = NULL;
11676ce35635SMika Westerberg 	list_for_each_entry(in, &group->ports, group_list) {
11686ce35635SMika Westerberg 		int estimated_bw, estimated_up, estimated_down;
11696ce35635SMika Westerberg 		struct tb_tunnel *tunnel;
11706ce35635SMika Westerberg 		struct tb_port *out;
11716ce35635SMika Westerberg 
11728d73f6b8SMika Westerberg 		if (!usb4_dp_port_bandwidth_mode_enabled(in))
11736ce35635SMika Westerberg 			continue;
11746ce35635SMika Westerberg 
11756ce35635SMika Westerberg 		tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
11766ce35635SMika Westerberg 		if (WARN_ON(!tunnel))
11776ce35635SMika Westerberg 			break;
11786ce35635SMika Westerberg 
11796ce35635SMika Westerberg 		if (!first_tunnel) {
11806ce35635SMika Westerberg 			/*
11816ce35635SMika Westerberg 			 * Since USB3 bandwidth is shared by all DP
11826ce35635SMika Westerberg 			 * tunnels under the host router USB4 port, even
11836ce35635SMika Westerberg 			 * if they do not begin from the host router, we
11846ce35635SMika Westerberg 			 * can release USB3 bandwidth just once and not
11856ce35635SMika Westerberg 			 * for each tunnel separately.
11866ce35635SMika Westerberg 			 */
11876ce35635SMika Westerberg 			first_tunnel = tunnel;
11886ce35635SMika Westerberg 			ret = tb_release_unused_usb3_bandwidth(tb,
11896ce35635SMika Westerberg 				first_tunnel->src_port, first_tunnel->dst_port);
11906ce35635SMika Westerberg 			if (ret) {
11916ce35635SMika Westerberg 				tb_port_warn(in,
11926ce35635SMika Westerberg 					"failed to release unused bandwidth\n");
11936ce35635SMika Westerberg 				break;
11946ce35635SMika Westerberg 			}
11956ce35635SMika Westerberg 		}
11966ce35635SMika Westerberg 
11976ce35635SMika Westerberg 		out = tunnel->dst_port;
11986ce35635SMika Westerberg 		ret = tb_available_bandwidth(tb, in, out, &estimated_up,
11996ce35635SMika Westerberg 					     &estimated_down);
12006ce35635SMika Westerberg 		if (ret) {
12016ce35635SMika Westerberg 			tb_port_warn(in,
12026ce35635SMika Westerberg 				"failed to re-calculate estimated bandwidth\n");
12036ce35635SMika Westerberg 			break;
12046ce35635SMika Westerberg 		}
12056ce35635SMika Westerberg 
12066ce35635SMika Westerberg 		/*
12076ce35635SMika Westerberg 		 * Estimated bandwidth includes:
12086ce35635SMika Westerberg 		 *  - already allocated bandwidth for the DP tunnel
12096ce35635SMika Westerberg 		 *  - available bandwidth along the path
12106ce35635SMika Westerberg 		 *  - bandwidth allocated for USB 3.x but not used.
12116ce35635SMika Westerberg 		 */
12126ce35635SMika Westerberg 		tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
12136ce35635SMika Westerberg 			    estimated_up, estimated_down);
12146ce35635SMika Westerberg 
12156ce35635SMika Westerberg 		if (in->sw->config.depth < out->sw->config.depth)
12166ce35635SMika Westerberg 			estimated_bw = estimated_down;
12176ce35635SMika Westerberg 		else
12186ce35635SMika Westerberg 			estimated_bw = estimated_up;
12196ce35635SMika Westerberg 
12208d73f6b8SMika Westerberg 		if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
12216ce35635SMika Westerberg 			tb_port_warn(in, "failed to update estimated bandwidth\n");
12226ce35635SMika Westerberg 	}
12236ce35635SMika Westerberg 
12246ce35635SMika Westerberg 	if (first_tunnel)
12256ce35635SMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
12266ce35635SMika Westerberg 					  first_tunnel->dst_port);
12276ce35635SMika Westerberg 
12286ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
12296ce35635SMika Westerberg }
12306ce35635SMika Westerberg 
tb_recalc_estimated_bandwidth(struct tb * tb)12316ce35635SMika Westerberg static void tb_recalc_estimated_bandwidth(struct tb *tb)
12326ce35635SMika Westerberg {
12336ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
12346ce35635SMika Westerberg 	int i;
12356ce35635SMika Westerberg 
12366ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
12376ce35635SMika Westerberg 
12386ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
12396ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
12406ce35635SMika Westerberg 
12416ce35635SMika Westerberg 		if (!list_empty(&group->ports))
12426ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth_for_group(group);
12436ce35635SMika Westerberg 	}
12446ce35635SMika Westerberg 
12456ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth re-calculation done\n");
12466ce35635SMika Westerberg }
12476ce35635SMika Westerberg 
tb_find_dp_out(struct tb * tb,struct tb_port * in)1248e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1249e876f34aSMika Westerberg {
1250e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
1251e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1252e876f34aSMika Westerberg 
1253e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
1254e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1255e876f34aSMika Westerberg 
1256e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1257e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
1258e876f34aSMika Westerberg 			continue;
1259e876f34aSMika Westerberg 
1260e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
1261b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP OUT in use\n");
1262e876f34aSMika Westerberg 			continue;
1263e876f34aSMika Westerberg 		}
1264e876f34aSMika Westerberg 
1265e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
1266e876f34aSMika Westerberg 
1267e876f34aSMika Westerberg 		/*
1268e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
1269e876f34aSMika Westerberg 		 * the same host router downstream port.
1270e876f34aSMika Westerberg 		 */
1271e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
1272e876f34aSMika Westerberg 			struct tb_port *p;
1273e876f34aSMika Westerberg 
1274e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
1275e876f34aSMika Westerberg 			if (p != host_port)
1276e876f34aSMika Westerberg 				continue;
1277e876f34aSMika Westerberg 		}
1278e876f34aSMika Westerberg 
1279e876f34aSMika Westerberg 		return port;
1280e876f34aSMika Westerberg 	}
1281e876f34aSMika Westerberg 
1282e876f34aSMika Westerberg 	return NULL;
1283e876f34aSMika Westerberg }
1284e876f34aSMika Westerberg 
tb_tunnel_dp(struct tb * tb)12858afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
12864f807e47SMika Westerberg {
12879d2d0a5cSMika Westerberg 	int available_up, available_down, ret, link_nr;
12884f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
12898afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
12904f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
12914f807e47SMika Westerberg 
1292c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
1293c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1294c6da62a2SMika Westerberg 		return;
1295c6da62a2SMika Westerberg 	}
1296c6da62a2SMika Westerberg 
12978afe909bSMika Westerberg 	/*
12988afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
12998afe909bSMika Westerberg 	 * establish a DP tunnel between them.
13008afe909bSMika Westerberg 	 */
13018afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
13024f807e47SMika Westerberg 
13038afe909bSMika Westerberg 	in = NULL;
13048afe909bSMika Westerberg 	out = NULL;
13058afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1306e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
1307e876f34aSMika Westerberg 			continue;
1308e876f34aSMika Westerberg 
13098afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
1310b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP IN in use\n");
13118afe909bSMika Westerberg 			continue;
13128afe909bSMika Westerberg 		}
13138afe909bSMika Westerberg 
1314e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
13158afe909bSMika Westerberg 
1316e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
1317e876f34aSMika Westerberg 		if (out) {
13188afe909bSMika Westerberg 			in = port;
1319e876f34aSMika Westerberg 			break;
1320e876f34aSMika Westerberg 		}
13218afe909bSMika Westerberg 	}
13228afe909bSMika Westerberg 
13238afe909bSMika Westerberg 	if (!in) {
13248afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
13258afe909bSMika Westerberg 		return;
13268afe909bSMika Westerberg 	}
13278afe909bSMika Westerberg 	if (!out) {
13288afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
13298afe909bSMika Westerberg 		return;
13308afe909bSMika Westerberg 	}
13318afe909bSMika Westerberg 
13326ac6faeeSMika Westerberg 	/*
13339d2d0a5cSMika Westerberg 	 * This is only applicable to links that are not bonded (so
13349d2d0a5cSMika Westerberg 	 * when Thunderbolt 1 hardware is involved somewhere in the
13359d2d0a5cSMika Westerberg 	 * topology). For these try to share the DP bandwidth between
13369d2d0a5cSMika Westerberg 	 * the two lanes.
13379d2d0a5cSMika Westerberg 	 */
13389d2d0a5cSMika Westerberg 	link_nr = 1;
13399d2d0a5cSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
13409d2d0a5cSMika Westerberg 		if (tb_tunnel_is_dp(tunnel)) {
13419d2d0a5cSMika Westerberg 			link_nr = 0;
13429d2d0a5cSMika Westerberg 			break;
13439d2d0a5cSMika Westerberg 		}
13449d2d0a5cSMika Westerberg 	}
13459d2d0a5cSMika Westerberg 
13469d2d0a5cSMika Westerberg 	/*
13476ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
13486ac6faeeSMika Westerberg 	 * both ends of the tunnel.
13496ac6faeeSMika Westerberg 	 *
13506ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
13516ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
13526ac6faeeSMika Westerberg 	 * tunnel is active.
13536ac6faeeSMika Westerberg 	 */
13546ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
13556ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
13566ac6faeeSMika Westerberg 
13578afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
13588afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
13596ac6faeeSMika Westerberg 		goto err_rpm_put;
13608afe909bSMika Westerberg 	}
13614f807e47SMika Westerberg 
13626ce35635SMika Westerberg 	if (!tb_attach_bandwidth_group(tcm, in, out))
13636ce35635SMika Westerberg 		goto err_dealloc_dp;
13646ce35635SMika Westerberg 
13650bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
13660bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
13670bd680cdSMika Westerberg 	if (ret) {
13680bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
13696ce35635SMika Westerberg 		goto err_detach_group;
1370a11b88adSMika Westerberg 	}
1371a11b88adSMika Westerberg 
13726ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
13730bd680cdSMika Westerberg 	if (ret)
13746ce35635SMika Westerberg 		goto err_reclaim_usb;
1375a11b88adSMika Westerberg 
13760bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
13770bd680cdSMika Westerberg 	       available_up, available_down);
13780bd680cdSMika Westerberg 
13799d2d0a5cSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
13809d2d0a5cSMika Westerberg 				    available_down);
13814f807e47SMika Westerberg 	if (!tunnel) {
13828afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
13836ce35635SMika Westerberg 		goto err_reclaim_usb;
13844f807e47SMika Westerberg 	}
13854f807e47SMika Westerberg 
13864f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
13874f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
13880bd680cdSMika Westerberg 		goto err_free;
13894f807e47SMika Westerberg 	}
13904f807e47SMika Westerberg 
13914f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
13920bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
13936ce35635SMika Westerberg 
13946ce35635SMika Westerberg 	/* Update the domain with the new bandwidth estimation */
13956ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
13966ce35635SMika Westerberg 
13973084b48fSGil Fine 	/*
13983084b48fSGil Fine 	 * In case of DP tunnel exists, change host router's 1st children
13993084b48fSGil Fine 	 * TMU mode to HiFi for CL0s to work.
14003084b48fSGil Fine 	 */
14017d283f41SMika Westerberg 	tb_increase_tmu_accuracy(tunnel);
14028afe909bSMika Westerberg 	return;
14038afe909bSMika Westerberg 
14040bd680cdSMika Westerberg err_free:
14050bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
14066ce35635SMika Westerberg err_reclaim_usb:
14070bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
14086ce35635SMika Westerberg err_detach_group:
14096ce35635SMika Westerberg 	tb_detach_bandwidth_group(in);
14100bd680cdSMika Westerberg err_dealloc_dp:
14118afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
14126ac6faeeSMika Westerberg err_rpm_put:
14136ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
14146ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
14156ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
14166ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
14174f807e47SMika Westerberg }
14184f807e47SMika Westerberg 
tb_enter_redrive(struct tb_port * port)1419d8518f05SMika Westerberg static void tb_enter_redrive(struct tb_port *port)
1420d8518f05SMika Westerberg {
1421d8518f05SMika Westerberg 	struct tb_switch *sw = port->sw;
1422d8518f05SMika Westerberg 
1423d8518f05SMika Westerberg 	if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1424d8518f05SMika Westerberg 		return;
1425d8518f05SMika Westerberg 
1426d8518f05SMika Westerberg 	/*
1427d8518f05SMika Westerberg 	 * If we get hot-unplug for the DP IN port of the host router
1428d8518f05SMika Westerberg 	 * and the DP resource is not available anymore it means there
1429d8518f05SMika Westerberg 	 * is a monitor connected directly to the Type-C port and we are
1430d8518f05SMika Westerberg 	 * in "redrive" mode. For this to work we cannot enter RTD3 so
1431d8518f05SMika Westerberg 	 * we bump up the runtime PM reference count here.
1432d8518f05SMika Westerberg 	 */
1433d8518f05SMika Westerberg 	if (!tb_port_is_dpin(port))
1434d8518f05SMika Westerberg 		return;
1435d8518f05SMika Westerberg 	if (tb_route(sw))
1436d8518f05SMika Westerberg 		return;
1437d8518f05SMika Westerberg 	if (!tb_switch_query_dp_resource(sw, port)) {
1438d8518f05SMika Westerberg 		port->redrive = true;
1439d8518f05SMika Westerberg 		pm_runtime_get(&sw->dev);
1440d8518f05SMika Westerberg 		tb_port_dbg(port, "enter redrive mode, keeping powered\n");
1441d8518f05SMika Westerberg 	}
1442d8518f05SMika Westerberg }
1443d8518f05SMika Westerberg 
tb_exit_redrive(struct tb_port * port)1444d8518f05SMika Westerberg static void tb_exit_redrive(struct tb_port *port)
1445d8518f05SMika Westerberg {
1446d8518f05SMika Westerberg 	struct tb_switch *sw = port->sw;
1447d8518f05SMika Westerberg 
1448d8518f05SMika Westerberg 	if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1449d8518f05SMika Westerberg 		return;
1450d8518f05SMika Westerberg 
1451d8518f05SMika Westerberg 	if (!tb_port_is_dpin(port))
1452d8518f05SMika Westerberg 		return;
1453d8518f05SMika Westerberg 	if (tb_route(sw))
1454d8518f05SMika Westerberg 		return;
1455d8518f05SMika Westerberg 	if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
1456d8518f05SMika Westerberg 		port->redrive = false;
1457d8518f05SMika Westerberg 		pm_runtime_put(&sw->dev);
1458d8518f05SMika Westerberg 		tb_port_dbg(port, "exit redrive mode\n");
1459d8518f05SMika Westerberg 	}
1460d8518f05SMika Westerberg }
1461d8518f05SMika Westerberg 
tb_dp_resource_unavailable(struct tb * tb,struct tb_port * port)14628afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
14634f807e47SMika Westerberg {
14648afe909bSMika Westerberg 	struct tb_port *in, *out;
14658afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
14668afe909bSMika Westerberg 
14678afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
14688afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
14698afe909bSMika Westerberg 		in = port;
14708afe909bSMika Westerberg 		out = NULL;
14718afe909bSMika Westerberg 	} else {
14728afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
14738afe909bSMika Westerberg 		in = NULL;
14748afe909bSMika Westerberg 		out = port;
14758afe909bSMika Westerberg 	}
14768afe909bSMika Westerberg 
14778afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1478d8518f05SMika Westerberg 	if (tunnel)
14798afe909bSMika Westerberg 		tb_deactivate_and_free_tunnel(tunnel);
1480d8518f05SMika Westerberg 	else
1481d8518f05SMika Westerberg 		tb_enter_redrive(port);
14828afe909bSMika Westerberg 	list_del_init(&port->list);
14838afe909bSMika Westerberg 
14848afe909bSMika Westerberg 	/*
14858afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
14868afe909bSMika Westerberg 	 * to create another tunnel.
14878afe909bSMika Westerberg 	 */
14886ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
14898afe909bSMika Westerberg 	tb_tunnel_dp(tb);
14908afe909bSMika Westerberg }
14918afe909bSMika Westerberg 
tb_dp_resource_available(struct tb * tb,struct tb_port * port)14928afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
14938afe909bSMika Westerberg {
14948afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
14958afe909bSMika Westerberg 	struct tb_port *p;
14968afe909bSMika Westerberg 
14978afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
14988afe909bSMika Westerberg 		return;
14998afe909bSMika Westerberg 
15008afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
15018afe909bSMika Westerberg 		if (p == port)
15028afe909bSMika Westerberg 			return;
15038afe909bSMika Westerberg 	}
15048afe909bSMika Westerberg 
15058afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
15068afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
15078afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
1508d8518f05SMika Westerberg 	tb_exit_redrive(port);
15098afe909bSMika Westerberg 
15108afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
15118afe909bSMika Westerberg 	tb_tunnel_dp(tb);
15124f807e47SMika Westerberg }
15134f807e47SMika Westerberg 
tb_disconnect_and_release_dp(struct tb * tb)151481a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
151581a2e3e4SMika Westerberg {
151681a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
151781a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
151881a2e3e4SMika Westerberg 
151981a2e3e4SMika Westerberg 	/*
152081a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
152181a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
152281a2e3e4SMika Westerberg 	 */
152381a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
152481a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
152581a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
152681a2e3e4SMika Westerberg 	}
152781a2e3e4SMika Westerberg 
152881a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
152981a2e3e4SMika Westerberg 		struct tb_port *port;
153081a2e3e4SMika Westerberg 
153181a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
153281a2e3e4SMika Westerberg 					struct tb_port, list);
153381a2e3e4SMika Westerberg 		list_del_init(&port->list);
153481a2e3e4SMika Westerberg 	}
153581a2e3e4SMika Westerberg }
153681a2e3e4SMika Westerberg 
tb_disconnect_pci(struct tb * tb,struct tb_switch * sw)15373da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
15383da88be2SMika Westerberg {
15393da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
15403da88be2SMika Westerberg 	struct tb_port *up;
15413da88be2SMika Westerberg 
15423da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
15433da88be2SMika Westerberg 	if (WARN_ON(!up))
15443da88be2SMika Westerberg 		return -ENODEV;
15453da88be2SMika Westerberg 
15463da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
15473da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
15483da88be2SMika Westerberg 		return -ENODEV;
15493da88be2SMika Westerberg 
155030a4eca6SMika Westerberg 	tb_switch_xhci_disconnect(sw);
155130a4eca6SMika Westerberg 
15523da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
15533da88be2SMika Westerberg 	list_del(&tunnel->list);
15543da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
15553da88be2SMika Westerberg 	return 0;
15563da88be2SMika Westerberg }
15573da88be2SMika Westerberg 
tb_tunnel_pci(struct tb * tb,struct tb_switch * sw)155899cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
155999cabbb0SMika Westerberg {
156099cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
15619d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
156299cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
15639d3cce0bSMika Westerberg 
1564386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
156599cabbb0SMika Westerberg 	if (!up)
156699cabbb0SMika Westerberg 		return 0;
15673364f0c1SAndreas Noever 
156899cabbb0SMika Westerberg 	/*
156999cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
157099cabbb0SMika Westerberg 	 * be found right above this switch.
157199cabbb0SMika Westerberg 	 */
15727ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
15737ce54221SGil Fine 	down = tb_find_pcie_down(tb_switch_parent(sw), port);
157499cabbb0SMika Westerberg 	if (!down)
157599cabbb0SMika Westerberg 		return 0;
15763364f0c1SAndreas Noever 
157799cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
157899cabbb0SMika Westerberg 	if (!tunnel)
157999cabbb0SMika Westerberg 		return -ENOMEM;
15803364f0c1SAndreas Noever 
158193f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
158299cabbb0SMika Westerberg 		tb_port_info(up,
15833364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
158493f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
158599cabbb0SMika Westerberg 		return -EIO;
15863364f0c1SAndreas Noever 	}
15873364f0c1SAndreas Noever 
158843f977bcSGil Fine 	/*
158943f977bcSGil Fine 	 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
159043f977bcSGil Fine 	 * here.
159143f977bcSGil Fine 	 */
159243f977bcSGil Fine 	if (tb_switch_pcie_l1_enable(sw))
159343f977bcSGil Fine 		tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
159443f977bcSGil Fine 
159530a4eca6SMika Westerberg 	if (tb_switch_xhci_connect(sw))
159630a4eca6SMika Westerberg 		tb_sw_warn(sw, "failed to connect xHCI\n");
159730a4eca6SMika Westerberg 
159899cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
159999cabbb0SMika Westerberg 	return 0;
16003364f0c1SAndreas Noever }
16019da672a4SAndreas Noever 
tb_approve_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1602180b0689SMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1603180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
1604180b0689SMika Westerberg 				    int receive_path, int receive_ring)
16057ea4cd6bSMika Westerberg {
16067ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
16077ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
16087ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
16097ea4cd6bSMika Westerberg 	struct tb_switch *sw;
161053ba2e16SMika Westerberg 	int ret;
16117ea4cd6bSMika Westerberg 
16127ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
16137ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1614386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
16157ea4cd6bSMika Westerberg 
16167ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
161753ba2e16SMika Westerberg 
161853ba2e16SMika Westerberg 	/*
161953ba2e16SMika Westerberg 	 * When tunneling DMA paths the link should not enter CL states
162053ba2e16SMika Westerberg 	 * so disable them now.
162153ba2e16SMika Westerberg 	 */
162253ba2e16SMika Westerberg 	tb_disable_clx(sw);
162353ba2e16SMika Westerberg 
1624180b0689SMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1625180b0689SMika Westerberg 				     transmit_ring, receive_path, receive_ring);
16267ea4cd6bSMika Westerberg 	if (!tunnel) {
162753ba2e16SMika Westerberg 		ret = -ENOMEM;
162853ba2e16SMika Westerberg 		goto err_clx;
16297ea4cd6bSMika Westerberg 	}
16307ea4cd6bSMika Westerberg 
16317ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
16327ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
16337ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
163453ba2e16SMika Westerberg 		ret = -EIO;
163553ba2e16SMika Westerberg 		goto err_free;
16367ea4cd6bSMika Westerberg 	}
16377ea4cd6bSMika Westerberg 
16387ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
16397ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
16407ea4cd6bSMika Westerberg 	return 0;
164153ba2e16SMika Westerberg 
164253ba2e16SMika Westerberg err_free:
164353ba2e16SMika Westerberg 	tb_tunnel_free(tunnel);
164453ba2e16SMika Westerberg err_clx:
164553ba2e16SMika Westerberg 	tb_enable_clx(sw);
164653ba2e16SMika Westerberg 	mutex_unlock(&tb->lock);
164753ba2e16SMika Westerberg 
164853ba2e16SMika Westerberg 	return ret;
16497ea4cd6bSMika Westerberg }
16507ea4cd6bSMika Westerberg 
__tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1651180b0689SMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1652180b0689SMika Westerberg 					  int transmit_path, int transmit_ring,
1653180b0689SMika Westerberg 					  int receive_path, int receive_ring)
16547ea4cd6bSMika Westerberg {
1655180b0689SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1656180b0689SMika Westerberg 	struct tb_port *nhi_port, *dst_port;
1657180b0689SMika Westerberg 	struct tb_tunnel *tunnel, *n;
16587ea4cd6bSMika Westerberg 	struct tb_switch *sw;
16597ea4cd6bSMika Westerberg 
16607ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
16617ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1662180b0689SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
16637ea4cd6bSMika Westerberg 
1664180b0689SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1665180b0689SMika Westerberg 		if (!tb_tunnel_is_dma(tunnel))
1666180b0689SMika Westerberg 			continue;
1667180b0689SMika Westerberg 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1668180b0689SMika Westerberg 			continue;
1669180b0689SMika Westerberg 
1670180b0689SMika Westerberg 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1671180b0689SMika Westerberg 					receive_path, receive_ring))
16728afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
16737ea4cd6bSMika Westerberg 	}
167453ba2e16SMika Westerberg 
167553ba2e16SMika Westerberg 	/*
167653ba2e16SMika Westerberg 	 * Try to re-enable CL states now, it is OK if this fails
167753ba2e16SMika Westerberg 	 * because we may still have another DMA tunnel active through
167853ba2e16SMika Westerberg 	 * the same host router USB4 downstream port.
167953ba2e16SMika Westerberg 	 */
168053ba2e16SMika Westerberg 	tb_enable_clx(sw);
1681180b0689SMika Westerberg }
16827ea4cd6bSMika Westerberg 
tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1683180b0689SMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1684180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
1685180b0689SMika Westerberg 				       int receive_path, int receive_ring)
16867ea4cd6bSMika Westerberg {
16877ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
16887ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
1689180b0689SMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1690180b0689SMika Westerberg 					      transmit_ring, receive_path,
1691180b0689SMika Westerberg 					      receive_ring);
16927ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
16937ea4cd6bSMika Westerberg 	}
16947ea4cd6bSMika Westerberg 	return 0;
16957ea4cd6bSMika Westerberg }
16967ea4cd6bSMika Westerberg 
1697d6cc51cdSAndreas Noever /* hotplug handling */
1698d6cc51cdSAndreas Noever 
1699877e50b3SLee Jones /*
1700d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1701d6cc51cdSAndreas Noever  *
1702d6cc51cdSAndreas Noever  * Executes on tb->wq.
1703d6cc51cdSAndreas Noever  */
tb_handle_hotplug(struct work_struct * work)1704d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1705d6cc51cdSAndreas Noever {
1706d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1707d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
17089d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1709053596d9SAndreas Noever 	struct tb_switch *sw;
1710053596d9SAndreas Noever 	struct tb_port *port;
1711284652a4SMika Westerberg 
17126ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
17136ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
17146ac6faeeSMika Westerberg 
1715d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
17169d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1717d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1718d6cc51cdSAndreas Noever 
17198f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1720053596d9SAndreas Noever 	if (!sw) {
1721053596d9SAndreas Noever 		tb_warn(tb,
1722053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1723053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1724053596d9SAndreas Noever 		goto out;
1725053596d9SAndreas Noever 	}
1726053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1727053596d9SAndreas Noever 		tb_warn(tb,
1728053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1729053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
17308f965efdSMika Westerberg 		goto put_sw;
1731053596d9SAndreas Noever 	}
1732053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1733053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1734dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1735053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
17368f965efdSMika Westerberg 		goto put_sw;
1737053596d9SAndreas Noever 	}
17386ac6faeeSMika Westerberg 
17396ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
17406ac6faeeSMika Westerberg 
1741053596d9SAndreas Noever 	if (ev->unplug) {
1742dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1743dacb1287SKranthi Kuntala 
1744dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
17457ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1746aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
17473364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
17488afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1749cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1750de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
175191c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1752bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1753053596d9SAndreas Noever 			port->remote = NULL;
1754dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1755dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
17568afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
17576ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth(tb);
17588afe909bSMika Westerberg 			tb_tunnel_dp(tb);
17597ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
17607ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
17617ea4cd6bSMika Westerberg 
17627ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
17637ea4cd6bSMika Westerberg 			/*
17647ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
17657ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
17667ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
17677ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
1768180b0689SMika Westerberg 			 * all the tunnels below.
17697ea4cd6bSMika Westerberg 			 */
17707ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
17717ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
17727ea4cd6bSMika Westerberg 			port->xdomain = NULL;
1773180b0689SMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
17747ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1775284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
17768afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
17778afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
177830a4eca6SMika Westerberg 		} else if (!port->port) {
177930a4eca6SMika Westerberg 			tb_sw_dbg(sw, "xHCI disconnect request\n");
178030a4eca6SMika Westerberg 			tb_switch_xhci_disconnect(sw);
1781053596d9SAndreas Noever 		} else {
178262efe699SMika Westerberg 			tb_port_dbg(port,
1783053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1784053596d9SAndreas Noever 		}
1785053596d9SAndreas Noever 	} else if (port->remote) {
178662efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
178730a4eca6SMika Westerberg 	} else if (!port->port && sw->authorized) {
178830a4eca6SMika Westerberg 		tb_sw_dbg(sw, "xHCI connect request\n");
178930a4eca6SMika Westerberg 		tb_switch_xhci_connect(sw);
1790053596d9SAndreas Noever 	} else {
1791344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
179262efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1793053596d9SAndreas Noever 			tb_scan_port(port);
179499cabbb0SMika Westerberg 			if (!port->remote)
179562efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
17968afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
17978afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1798053596d9SAndreas Noever 		}
1799344e0643SMika Westerberg 	}
18008f965efdSMika Westerberg 
18016ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
18026ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
18036ac6faeeSMika Westerberg 
18048f965efdSMika Westerberg put_sw:
18058f965efdSMika Westerberg 	tb_switch_put(sw);
1806d6cc51cdSAndreas Noever out:
1807d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
18086ac6faeeSMika Westerberg 
18096ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
18106ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
18116ac6faeeSMika Westerberg 
1812d6cc51cdSAndreas Noever 	kfree(ev);
1813d6cc51cdSAndreas Noever }
1814d6cc51cdSAndreas Noever 
tb_alloc_dp_bandwidth(struct tb_tunnel * tunnel,int * requested_up,int * requested_down)18156ce35635SMika Westerberg static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
18166ce35635SMika Westerberg 				 int *requested_down)
18176ce35635SMika Westerberg {
18186ce35635SMika Westerberg 	int allocated_up, allocated_down, available_up, available_down, ret;
18196ce35635SMika Westerberg 	int requested_up_corrected, requested_down_corrected, granularity;
18206ce35635SMika Westerberg 	int max_up, max_down, max_up_rounded, max_down_rounded;
18216ce35635SMika Westerberg 	struct tb *tb = tunnel->tb;
18226ce35635SMika Westerberg 	struct tb_port *in, *out;
18236ce35635SMika Westerberg 
18246ce35635SMika Westerberg 	ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
18256ce35635SMika Westerberg 	if (ret)
18266ce35635SMika Westerberg 		return ret;
18276ce35635SMika Westerberg 
18286ce35635SMika Westerberg 	in = tunnel->src_port;
18296ce35635SMika Westerberg 	out = tunnel->dst_port;
18306ce35635SMika Westerberg 
18316ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
18326ce35635SMika Westerberg 		    allocated_up, allocated_down);
18336ce35635SMika Westerberg 
18346ce35635SMika Westerberg 	/*
18356ce35635SMika Westerberg 	 * If we get rounded up request from graphics side, say HBR2 x 4
18366ce35635SMika Westerberg 	 * that is 17500 instead of 17280 (this is because of the
18376ce35635SMika Westerberg 	 * granularity), we allow it too. Here the graphics has already
18386ce35635SMika Westerberg 	 * negotiated with the DPRX the maximum possible rates (which is
18396ce35635SMika Westerberg 	 * 17280 in this case).
18406ce35635SMika Westerberg 	 *
18416ce35635SMika Westerberg 	 * Since the link cannot go higher than 17280 we use that in our
18426ce35635SMika Westerberg 	 * calculations but the DP IN adapter Allocated BW write must be
18436ce35635SMika Westerberg 	 * the same value (17500) otherwise the adapter will mark it as
18446ce35635SMika Westerberg 	 * failed for graphics.
18456ce35635SMika Westerberg 	 */
18466ce35635SMika Westerberg 	ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
18476ce35635SMika Westerberg 	if (ret)
18486ce35635SMika Westerberg 		return ret;
18496ce35635SMika Westerberg 
18506ce35635SMika Westerberg 	ret = usb4_dp_port_granularity(in);
18516ce35635SMika Westerberg 	if (ret < 0)
18526ce35635SMika Westerberg 		return ret;
18536ce35635SMika Westerberg 	granularity = ret;
18546ce35635SMika Westerberg 
18556ce35635SMika Westerberg 	max_up_rounded = roundup(max_up, granularity);
18566ce35635SMika Westerberg 	max_down_rounded = roundup(max_down, granularity);
18576ce35635SMika Westerberg 
18586ce35635SMika Westerberg 	/*
18596ce35635SMika Westerberg 	 * This will "fix" the request down to the maximum supported
18606ce35635SMika Westerberg 	 * rate * lanes if it is at the maximum rounded up level.
18616ce35635SMika Westerberg 	 */
18626ce35635SMika Westerberg 	requested_up_corrected = *requested_up;
18636ce35635SMika Westerberg 	if (requested_up_corrected == max_up_rounded)
18646ce35635SMika Westerberg 		requested_up_corrected = max_up;
18656ce35635SMika Westerberg 	else if (requested_up_corrected < 0)
18666ce35635SMika Westerberg 		requested_up_corrected = 0;
18676ce35635SMika Westerberg 	requested_down_corrected = *requested_down;
18686ce35635SMika Westerberg 	if (requested_down_corrected == max_down_rounded)
18696ce35635SMika Westerberg 		requested_down_corrected = max_down;
18706ce35635SMika Westerberg 	else if (requested_down_corrected < 0)
18716ce35635SMika Westerberg 		requested_down_corrected = 0;
18726ce35635SMika Westerberg 
18736ce35635SMika Westerberg 	tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
18746ce35635SMika Westerberg 		    requested_up_corrected, requested_down_corrected);
18756ce35635SMika Westerberg 
18766ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
18776ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
18786ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
18796ce35635SMika Westerberg 			    requested_up_corrected, requested_down_corrected,
18806ce35635SMika Westerberg 			    max_up_rounded, max_down_rounded);
18816ce35635SMika Westerberg 		return -ENOBUFS;
18826ce35635SMika Westerberg 	}
18836ce35635SMika Westerberg 
18846ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
18856ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
18866ce35635SMika Westerberg 		/*
18876ce35635SMika Westerberg 		 * If requested bandwidth is less or equal than what is
18886ce35635SMika Westerberg 		 * currently allocated to that tunnel we simply change
18896ce35635SMika Westerberg 		 * the reservation of the tunnel. Since all the tunnels
18906ce35635SMika Westerberg 		 * going out from the same USB4 port are in the same
18916ce35635SMika Westerberg 		 * group the released bandwidth will be taken into
18926ce35635SMika Westerberg 		 * account for the other tunnels automatically below.
18936ce35635SMika Westerberg 		 */
18946ce35635SMika Westerberg 		return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
18956ce35635SMika Westerberg 						 requested_down);
18966ce35635SMika Westerberg 	}
18976ce35635SMika Westerberg 
18986ce35635SMika Westerberg 	/*
18996ce35635SMika Westerberg 	 * More bandwidth is requested. Release all the potential
19006ce35635SMika Westerberg 	 * bandwidth from USB3 first.
19016ce35635SMika Westerberg 	 */
19026ce35635SMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
19036ce35635SMika Westerberg 	if (ret)
19046ce35635SMika Westerberg 		return ret;
19056ce35635SMika Westerberg 
19066ce35635SMika Westerberg 	/*
19076ce35635SMika Westerberg 	 * Then go over all tunnels that cross the same USB4 ports (they
19086ce35635SMika Westerberg 	 * are also in the same group but we use the same function here
19096ce35635SMika Westerberg 	 * that we use with the normal bandwidth allocation).
19106ce35635SMika Westerberg 	 */
19116ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
19126ce35635SMika Westerberg 	if (ret)
19136ce35635SMika Westerberg 		goto reclaim;
19146ce35635SMika Westerberg 
19156ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
19166ce35635SMika Westerberg 		    available_up, available_down);
19176ce35635SMika Westerberg 
19186ce35635SMika Westerberg 	if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
19196ce35635SMika Westerberg 	    (*requested_down >= 0 && available_down >= requested_down_corrected)) {
19206ce35635SMika Westerberg 		ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
19216ce35635SMika Westerberg 						requested_down);
19226ce35635SMika Westerberg 	} else {
19236ce35635SMika Westerberg 		ret = -ENOBUFS;
19246ce35635SMika Westerberg 	}
19256ce35635SMika Westerberg 
19266ce35635SMika Westerberg reclaim:
19276ce35635SMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
19286ce35635SMika Westerberg 	return ret;
19296ce35635SMika Westerberg }
19306ce35635SMika Westerberg 
tb_handle_dp_bandwidth_request(struct work_struct * work)19316ce35635SMika Westerberg static void tb_handle_dp_bandwidth_request(struct work_struct *work)
19326ce35635SMika Westerberg {
19336ce35635SMika Westerberg 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
19346ce35635SMika Westerberg 	int requested_bw, requested_up, requested_down, ret;
19356ce35635SMika Westerberg 	struct tb_port *in, *out;
19366ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
19376ce35635SMika Westerberg 	struct tb *tb = ev->tb;
19386ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
19396ce35635SMika Westerberg 	struct tb_switch *sw;
19406ce35635SMika Westerberg 
19416ce35635SMika Westerberg 	pm_runtime_get_sync(&tb->dev);
19426ce35635SMika Westerberg 
19436ce35635SMika Westerberg 	mutex_lock(&tb->lock);
19446ce35635SMika Westerberg 	if (!tcm->hotplug_active)
19456ce35635SMika Westerberg 		goto unlock;
19466ce35635SMika Westerberg 
19476ce35635SMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
19486ce35635SMika Westerberg 	if (!sw) {
19496ce35635SMika Westerberg 		tb_warn(tb, "bandwidth request from non-existent router %llx\n",
19506ce35635SMika Westerberg 			ev->route);
19516ce35635SMika Westerberg 		goto unlock;
19526ce35635SMika Westerberg 	}
19536ce35635SMika Westerberg 
19546ce35635SMika Westerberg 	in = &sw->ports[ev->port];
19556ce35635SMika Westerberg 	if (!tb_port_is_dpin(in)) {
19566ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
1957ec4405edSGil Fine 		goto put_sw;
19586ce35635SMika Westerberg 	}
19596ce35635SMika Westerberg 
19606ce35635SMika Westerberg 	tb_port_dbg(in, "handling bandwidth allocation request\n");
19616ce35635SMika Westerberg 
19628d73f6b8SMika Westerberg 	if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
19636ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth allocation mode not enabled\n");
1964ec4405edSGil Fine 		goto put_sw;
19656ce35635SMika Westerberg 	}
19666ce35635SMika Westerberg 
19678d73f6b8SMika Westerberg 	ret = usb4_dp_port_requested_bandwidth(in);
1968ace75e18SMika Westerberg 	if (ret < 0) {
1969ace75e18SMika Westerberg 		if (ret == -ENODATA)
19706ce35635SMika Westerberg 			tb_port_dbg(in, "no bandwidth request active\n");
1971ace75e18SMika Westerberg 		else
1972ace75e18SMika Westerberg 			tb_port_warn(in, "failed to read requested bandwidth\n");
1973ec4405edSGil Fine 		goto put_sw;
19746ce35635SMika Westerberg 	}
1975ace75e18SMika Westerberg 	requested_bw = ret;
19766ce35635SMika Westerberg 
19776ce35635SMika Westerberg 	tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
19786ce35635SMika Westerberg 
19796ce35635SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
19806ce35635SMika Westerberg 	if (!tunnel) {
19816ce35635SMika Westerberg 		tb_port_warn(in, "failed to find tunnel\n");
1982ec4405edSGil Fine 		goto put_sw;
19836ce35635SMika Westerberg 	}
19846ce35635SMika Westerberg 
19856ce35635SMika Westerberg 	out = tunnel->dst_port;
19866ce35635SMika Westerberg 
19876ce35635SMika Westerberg 	if (in->sw->config.depth < out->sw->config.depth) {
19886ce35635SMika Westerberg 		requested_up = -1;
19896ce35635SMika Westerberg 		requested_down = requested_bw;
19906ce35635SMika Westerberg 	} else {
19916ce35635SMika Westerberg 		requested_up = requested_bw;
19926ce35635SMika Westerberg 		requested_down = -1;
19936ce35635SMika Westerberg 	}
19946ce35635SMika Westerberg 
19956ce35635SMika Westerberg 	ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
19966ce35635SMika Westerberg 	if (ret) {
19976ce35635SMika Westerberg 		if (ret == -ENOBUFS)
19986ce35635SMika Westerberg 			tb_port_warn(in, "not enough bandwidth available\n");
19996ce35635SMika Westerberg 		else
20006ce35635SMika Westerberg 			tb_port_warn(in, "failed to change bandwidth allocation\n");
20016ce35635SMika Westerberg 	} else {
20026ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
20036ce35635SMika Westerberg 			    requested_up, requested_down);
20046ce35635SMika Westerberg 
20056ce35635SMika Westerberg 		/* Update other clients about the allocation change */
20066ce35635SMika Westerberg 		tb_recalc_estimated_bandwidth(tb);
20076ce35635SMika Westerberg 	}
20086ce35635SMika Westerberg 
2009ec4405edSGil Fine put_sw:
2010ec4405edSGil Fine 	tb_switch_put(sw);
20116ce35635SMika Westerberg unlock:
20126ce35635SMika Westerberg 	mutex_unlock(&tb->lock);
20136ce35635SMika Westerberg 
20146ce35635SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
20156ce35635SMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
2016596a5123SMika Westerberg 
2017596a5123SMika Westerberg 	kfree(ev);
20186ce35635SMika Westerberg }
20196ce35635SMika Westerberg 
tb_queue_dp_bandwidth_request(struct tb * tb,u64 route,u8 port)20206ce35635SMika Westerberg static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
20216ce35635SMika Westerberg {
20226ce35635SMika Westerberg 	struct tb_hotplug_event *ev;
20236ce35635SMika Westerberg 
20246ce35635SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
20256ce35635SMika Westerberg 	if (!ev)
20266ce35635SMika Westerberg 		return;
20276ce35635SMika Westerberg 
20286ce35635SMika Westerberg 	ev->tb = tb;
20296ce35635SMika Westerberg 	ev->route = route;
20306ce35635SMika Westerberg 	ev->port = port;
20316ce35635SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
20326ce35635SMika Westerberg 	queue_work(tb->wq, &ev->work);
20336ce35635SMika Westerberg }
20346ce35635SMika Westerberg 
tb_handle_notification(struct tb * tb,u64 route,const struct cfg_error_pkg * error)20356ce35635SMika Westerberg static void tb_handle_notification(struct tb *tb, u64 route,
20366ce35635SMika Westerberg 				   const struct cfg_error_pkg *error)
20376ce35635SMika Westerberg {
20386ce35635SMika Westerberg 
20396ce35635SMika Westerberg 	switch (error->error) {
2040235d0194SMika Westerberg 	case TB_CFG_ERROR_PCIE_WAKE:
2041235d0194SMika Westerberg 	case TB_CFG_ERROR_DP_CON_CHANGE:
2042235d0194SMika Westerberg 	case TB_CFG_ERROR_DPTX_DISCOVERY:
2043235d0194SMika Westerberg 		if (tb_cfg_ack_notification(tb->ctl, route, error))
2044235d0194SMika Westerberg 			tb_warn(tb, "could not ack notification on %llx\n",
2045235d0194SMika Westerberg 				route);
2046235d0194SMika Westerberg 		break;
2047235d0194SMika Westerberg 
20486ce35635SMika Westerberg 	case TB_CFG_ERROR_DP_BW:
2049235d0194SMika Westerberg 		if (tb_cfg_ack_notification(tb->ctl, route, error))
2050235d0194SMika Westerberg 			tb_warn(tb, "could not ack notification on %llx\n",
2051235d0194SMika Westerberg 				route);
20526ce35635SMika Westerberg 		tb_queue_dp_bandwidth_request(tb, route, error->port);
20536ce35635SMika Westerberg 		break;
20546ce35635SMika Westerberg 
20556ce35635SMika Westerberg 	default:
2056235d0194SMika Westerberg 		/* Ignore for now */
2057235d0194SMika Westerberg 		break;
20586ce35635SMika Westerberg 	}
20596ce35635SMika Westerberg }
20606ce35635SMika Westerberg 
2061877e50b3SLee Jones /*
2062d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
2063d6cc51cdSAndreas Noever  *
2064d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
2065d6cc51cdSAndreas Noever  */
tb_handle_event(struct tb * tb,enum tb_cfg_pkg_type type,const void * buf,size_t size)206681a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
206781a54b5eSMika Westerberg 			    const void *buf, size_t size)
2068d6cc51cdSAndreas Noever {
206981a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
20706ce35635SMika Westerberg 	u64 route = tb_cfg_get_route(&pkg->header);
207181a54b5eSMika Westerberg 
20726ce35635SMika Westerberg 	switch (type) {
20736ce35635SMika Westerberg 	case TB_CFG_PKG_ERROR:
20746ce35635SMika Westerberg 		tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
20756ce35635SMika Westerberg 		return;
20766ce35635SMika Westerberg 	case TB_CFG_PKG_EVENT:
20776ce35635SMika Westerberg 		break;
20786ce35635SMika Westerberg 	default:
207981a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
208081a54b5eSMika Westerberg 		return;
208181a54b5eSMika Westerberg 	}
208281a54b5eSMika Westerberg 
2083210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
208481a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
208581a54b5eSMika Westerberg 			pkg->port);
208681a54b5eSMika Westerberg 	}
208781a54b5eSMika Westerberg 
20884f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2089d6cc51cdSAndreas Noever }
2090d6cc51cdSAndreas Noever 
tb_stop(struct tb * tb)20919d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
2092d6cc51cdSAndreas Noever {
20939d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
209493f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
209593f36adeSMika Westerberg 	struct tb_tunnel *n;
20963364f0c1SAndreas Noever 
20976ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
20983364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
20997ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
21007ea4cd6bSMika Westerberg 		/*
21017ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
21027ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
21037ea4cd6bSMika Westerberg 		 * intact.
21047ea4cd6bSMika Westerberg 		 */
21057ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
21067ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
210793f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
21087ea4cd6bSMika Westerberg 	}
2109bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
21109d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2111d6cc51cdSAndreas Noever }
2112d6cc51cdSAndreas Noever 
tb_scan_finalize_switch(struct device * dev,void * data)211399cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
211499cabbb0SMika Westerberg {
211599cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
211699cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
211799cabbb0SMika Westerberg 
211899cabbb0SMika Westerberg 		/*
211999cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
212099cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
212199cabbb0SMika Westerberg 		 * send uevent to userspace.
212299cabbb0SMika Westerberg 		 */
212399cabbb0SMika Westerberg 		if (sw->boot)
212499cabbb0SMika Westerberg 			sw->authorized = 1;
212599cabbb0SMika Westerberg 
212699cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
212799cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
212899cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
212999cabbb0SMika Westerberg 	}
213099cabbb0SMika Westerberg 
213199cabbb0SMika Westerberg 	return 0;
213299cabbb0SMika Westerberg }
213399cabbb0SMika Westerberg 
tb_start(struct tb * tb,bool reset)21343c1d704dSSanath S static int tb_start(struct tb *tb, bool reset)
2135d6cc51cdSAndreas Noever {
21369d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2137bfe778acSMika Westerberg 	int ret;
2138d6cc51cdSAndreas Noever 
2139bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2140444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
2141444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
2142a25c8b2fSAndreas Noever 
2143e6b245ccSMika Westerberg 	/*
2144e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
2145e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
2146e6b245ccSMika Westerberg 	 * root switch.
21475172eb9aSSzuying Chen 	 *
21485172eb9aSSzuying Chen 	 * However, USB4 routers support NVM firmware upgrade if they
21495172eb9aSSzuying Chen 	 * implement the necessary router operations.
2150e6b245ccSMika Westerberg 	 */
21515172eb9aSSzuying Chen 	tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
21526ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
21536ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2154e6b245ccSMika Westerberg 
2155bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
2156bfe778acSMika Westerberg 	if (ret) {
2157bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
2158bfe778acSMika Westerberg 		return ret;
2159bfe778acSMika Westerberg 	}
2160bfe778acSMika Westerberg 
2161bfe778acSMika Westerberg 	/* Announce the switch to the world */
2162bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
2163bfe778acSMika Westerberg 	if (ret) {
2164bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
2165bfe778acSMika Westerberg 		return ret;
2166bfe778acSMika Westerberg 	}
2167bfe778acSMika Westerberg 
2168b017a46dSGil Fine 	/*
2169b017a46dSGil Fine 	 * To support highest CLx state, we set host router's TMU to
2170b017a46dSGil Fine 	 * Normal mode.
2171b017a46dSGil Fine 	 */
2172d49b4f04SMika Westerberg 	tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2173cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
2174cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
21753c1d704dSSanath S 
21763c1d704dSSanath S 	/*
21773c1d704dSSanath S 	 * Boot firmware might have created tunnels of its own. Since we
21783c1d704dSSanath S 	 * cannot be sure they are usable for us, tear them down and
21793c1d704dSSanath S 	 * reset the ports to handle it as new hotplug for USB4 v1
21803c1d704dSSanath S 	 * routers (for USB4 v2 and beyond we already do host reset).
21813c1d704dSSanath S 	 */
21823c1d704dSSanath S 	if (reset && usb4_switch_version(tb->root_switch) == 1) {
21833c1d704dSSanath S 		tb_switch_reset(tb->root_switch);
21843c1d704dSSanath S 	} else {
21859da672a4SAndreas Noever 		/* Full scan to discover devices added before the driver was loaded. */
21869da672a4SAndreas Noever 		tb_scan_switch(tb->root_switch);
21870414bec5SMika Westerberg 		/* Find out tunnels created by the boot firmware */
218843bddb26SMika Westerberg 		tb_discover_tunnels(tb);
2189b60e31bfSSanjay R Mehta 		/* Add DP resources from the DP tunnels created by the boot firmware */
2190b60e31bfSSanjay R Mehta 		tb_discover_dp_resources(tb);
21913c1d704dSSanath S 	}
21923c1d704dSSanath S 
2193e6f81858SRajmohan Mani 	/*
2194e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
2195e6f81858SRajmohan Mani 	 * now for the whole topology.
2196e6f81858SRajmohan Mani 	 */
2197e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
21988afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
21998afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
220099cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
220199cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
220299cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
22039da672a4SAndreas Noever 
2204d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
22059d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
22069d3cce0bSMika Westerberg 	return 0;
2207d6cc51cdSAndreas Noever }
2208d6cc51cdSAndreas Noever 
tb_suspend_noirq(struct tb * tb)22099d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
221023dd5bb4SAndreas Noever {
22119d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
22129d3cce0bSMika Westerberg 
2213daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
221481a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
22156ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
22169d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2217daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
22189d3cce0bSMika Westerberg 
22199d3cce0bSMika Westerberg 	return 0;
222023dd5bb4SAndreas Noever }
222123dd5bb4SAndreas Noever 
tb_restore_children(struct tb_switch * sw)222291c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
222391c0c120SMika Westerberg {
222491c0c120SMika Westerberg 	struct tb_port *port;
222591c0c120SMika Westerberg 
22266ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
22276ac6faeeSMika Westerberg 	if (sw->is_unplugged)
22286ac6faeeSMika Westerberg 		return;
22296ac6faeeSMika Westerberg 
22301a9b6cb8SMika Westerberg 	if (tb_enable_clx(sw))
22311a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to re-enable CL states\n");
2232b017a46dSGil Fine 
2233cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
2234cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
2235cf29b9afSRajmohan Mani 
2236d49b4f04SMika Westerberg 	tb_switch_configuration_valid(sw);
2237d49b4f04SMika Westerberg 
223891c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
2239284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
224091c0c120SMika Westerberg 			continue;
224191c0c120SMika Westerberg 
2242284652a4SMika Westerberg 		if (port->remote) {
22432ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
2244de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
224591c0c120SMika Westerberg 
224691c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
2247284652a4SMika Westerberg 		} else if (port->xdomain) {
2248f9cad07bSMika Westerberg 			tb_port_configure_xdomain(port, port->xdomain);
2249284652a4SMika Westerberg 		}
225091c0c120SMika Westerberg 	}
225191c0c120SMika Westerberg }
225291c0c120SMika Westerberg 
tb_resume_noirq(struct tb * tb)22539d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
225423dd5bb4SAndreas Noever {
22559d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
225693f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
225743bddb26SMika Westerberg 	unsigned int usb3_delay = 0;
225843bddb26SMika Westerberg 	LIST_HEAD(tunnels);
22599d3cce0bSMika Westerberg 
2260daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
226123dd5bb4SAndreas Noever 
2262c67f926eSMika Westerberg 	/*
2263c67f926eSMika Westerberg 	 * For non-USB4 hosts (Apple systems) remove any PCIe devices
2264c67f926eSMika Westerberg 	 * the firmware might have setup.
2265c67f926eSMika Westerberg 	 */
2266c67f926eSMika Westerberg 	if (!tb_switch_is_usb4(tb->root_switch))
2267356b6c4eSMika Westerberg 		tb_switch_reset(tb->root_switch);
226823dd5bb4SAndreas Noever 
22693238b23eSGil Fine 	tb_switch_resume(tb->root_switch, false);
227023dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
227123dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
227291c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
227343bddb26SMika Westerberg 
227443bddb26SMika Westerberg 	/*
227543bddb26SMika Westerberg 	 * If we get here from suspend to disk the boot firmware or the
227643bddb26SMika Westerberg 	 * restore kernel might have created tunnels of its own. Since
227743bddb26SMika Westerberg 	 * we cannot be sure they are usable for us we find and tear
227843bddb26SMika Westerberg 	 * them down.
227943bddb26SMika Westerberg 	 */
228043bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
228143bddb26SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
228243bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel))
228343bddb26SMika Westerberg 			usb3_delay = 500;
228443bddb26SMika Westerberg 		tb_tunnel_deactivate(tunnel);
228543bddb26SMika Westerberg 		tb_tunnel_free(tunnel);
228643bddb26SMika Westerberg 	}
228743bddb26SMika Westerberg 
228843bddb26SMika Westerberg 	/* Re-create our tunnels now */
228943bddb26SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
229043bddb26SMika Westerberg 		/* USB3 requires delay before it can be re-activated */
229143bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel)) {
229243bddb26SMika Westerberg 			msleep(usb3_delay);
229343bddb26SMika Westerberg 			/* Only need to do it once */
229443bddb26SMika Westerberg 			usb3_delay = 0;
229543bddb26SMika Westerberg 		}
229693f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
229743bddb26SMika Westerberg 	}
22989d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
229923dd5bb4SAndreas Noever 		/*
230023dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
230123dd5bb4SAndreas Noever 		 * 100ms works for me...
230223dd5bb4SAndreas Noever 		 */
2303daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
230423dd5bb4SAndreas Noever 		msleep(100);
230523dd5bb4SAndreas Noever 	}
230623dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
23079d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
2308daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
23099d3cce0bSMika Westerberg 
23109d3cce0bSMika Westerberg 	return 0;
23119d3cce0bSMika Westerberg }
23129d3cce0bSMika Westerberg 
tb_free_unplugged_xdomains(struct tb_switch * sw)23137ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
23147ea4cd6bSMika Westerberg {
2315b433d010SMika Westerberg 	struct tb_port *port;
2316b433d010SMika Westerberg 	int ret = 0;
23177ea4cd6bSMika Westerberg 
2318b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
23197ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
23207ea4cd6bSMika Westerberg 			continue;
23217ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
2322dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
23237ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
2324284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
23257ea4cd6bSMika Westerberg 			port->xdomain = NULL;
23267ea4cd6bSMika Westerberg 			ret++;
23277ea4cd6bSMika Westerberg 		} else if (port->remote) {
23287ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
23297ea4cd6bSMika Westerberg 		}
23307ea4cd6bSMika Westerberg 	}
23317ea4cd6bSMika Westerberg 
23327ea4cd6bSMika Westerberg 	return ret;
23337ea4cd6bSMika Westerberg }
23347ea4cd6bSMika Westerberg 
tb_freeze_noirq(struct tb * tb)2335884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
2336884e4d57SMika Westerberg {
2337884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2338884e4d57SMika Westerberg 
2339884e4d57SMika Westerberg 	tcm->hotplug_active = false;
2340884e4d57SMika Westerberg 	return 0;
2341884e4d57SMika Westerberg }
2342884e4d57SMika Westerberg 
tb_thaw_noirq(struct tb * tb)2343884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
2344884e4d57SMika Westerberg {
2345884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2346884e4d57SMika Westerberg 
2347884e4d57SMika Westerberg 	tcm->hotplug_active = true;
2348884e4d57SMika Westerberg 	return 0;
2349884e4d57SMika Westerberg }
2350884e4d57SMika Westerberg 
tb_complete(struct tb * tb)23517ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
23527ea4cd6bSMika Westerberg {
23537ea4cd6bSMika Westerberg 	/*
23547ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
23557ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
23567ea4cd6bSMika Westerberg 	 * need to run another rescan.
23577ea4cd6bSMika Westerberg 	 */
23587ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
23597ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
23607ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
23617ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
23627ea4cd6bSMika Westerberg }
23637ea4cd6bSMika Westerberg 
tb_runtime_suspend(struct tb * tb)23646ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
23656ac6faeeSMika Westerberg {
23666ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
23676ac6faeeSMika Westerberg 
23686ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
23696ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
23706ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
23716ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
23726ac6faeeSMika Westerberg 
23736ac6faeeSMika Westerberg 	return 0;
23746ac6faeeSMika Westerberg }
23756ac6faeeSMika Westerberg 
tb_remove_work(struct work_struct * work)23766ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
23776ac6faeeSMika Westerberg {
23786ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
23796ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
23806ac6faeeSMika Westerberg 
23816ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
23826ac6faeeSMika Westerberg 	if (tb->root_switch) {
23836ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
23846ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
23856ac6faeeSMika Westerberg 	}
23866ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
23876ac6faeeSMika Westerberg }
23886ac6faeeSMika Westerberg 
tb_runtime_resume(struct tb * tb)23896ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
23906ac6faeeSMika Westerberg {
23916ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
23926ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
23936ac6faeeSMika Westerberg 
23946ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
23953238b23eSGil Fine 	tb_switch_resume(tb->root_switch, true);
23966ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
23976ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
23986ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
23996ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
24006ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
24016ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
24026ac6faeeSMika Westerberg 
24036ac6faeeSMika Westerberg 	/*
24046ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
24056ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
24066ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
24076ac6faeeSMika Westerberg 	 */
24086ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
24096ac6faeeSMika Westerberg 	return 0;
24106ac6faeeSMika Westerberg }
24116ac6faeeSMika Westerberg 
24129d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
24139d3cce0bSMika Westerberg 	.start = tb_start,
24149d3cce0bSMika Westerberg 	.stop = tb_stop,
24159d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
24169d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
2417884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
2418884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
24197ea4cd6bSMika Westerberg 	.complete = tb_complete,
24206ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
24216ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
242281a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
24233da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
242499cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
24257ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
24267ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
24279d3cce0bSMika Westerberg };
24289d3cce0bSMika Westerberg 
2429349bfe08SMika Westerberg /*
2430349bfe08SMika Westerberg  * During suspend the Thunderbolt controller is reset and all PCIe
2431349bfe08SMika Westerberg  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2432349bfe08SMika Westerberg  * during resume. This adds device links between the tunneled PCIe
2433349bfe08SMika Westerberg  * downstream ports and the NHI so that the device core will make sure
2434349bfe08SMika Westerberg  * NHI is resumed first before the rest.
2435349bfe08SMika Westerberg  */
tb_apple_add_links(struct tb_nhi * nhi)2436408e1d96SMika Westerberg static bool tb_apple_add_links(struct tb_nhi *nhi)
2437349bfe08SMika Westerberg {
2438349bfe08SMika Westerberg 	struct pci_dev *upstream, *pdev;
2439408e1d96SMika Westerberg 	bool ret;
2440349bfe08SMika Westerberg 
2441349bfe08SMika Westerberg 	if (!x86_apple_machine)
2442408e1d96SMika Westerberg 		return false;
2443349bfe08SMika Westerberg 
2444349bfe08SMika Westerberg 	switch (nhi->pdev->device) {
2445349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2446349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2447349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2448349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2449349bfe08SMika Westerberg 		break;
2450349bfe08SMika Westerberg 	default:
2451408e1d96SMika Westerberg 		return false;
2452349bfe08SMika Westerberg 	}
2453349bfe08SMika Westerberg 
2454349bfe08SMika Westerberg 	upstream = pci_upstream_bridge(nhi->pdev);
2455349bfe08SMika Westerberg 	while (upstream) {
2456349bfe08SMika Westerberg 		if (!pci_is_pcie(upstream))
2457408e1d96SMika Westerberg 			return false;
2458349bfe08SMika Westerberg 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2459349bfe08SMika Westerberg 			break;
2460349bfe08SMika Westerberg 		upstream = pci_upstream_bridge(upstream);
2461349bfe08SMika Westerberg 	}
2462349bfe08SMika Westerberg 
2463349bfe08SMika Westerberg 	if (!upstream)
2464408e1d96SMika Westerberg 		return false;
2465349bfe08SMika Westerberg 
2466349bfe08SMika Westerberg 	/*
2467349bfe08SMika Westerberg 	 * For each hotplug downstream port, create add device link
2468349bfe08SMika Westerberg 	 * back to NHI so that PCIe tunnels can be re-established after
2469349bfe08SMika Westerberg 	 * sleep.
2470349bfe08SMika Westerberg 	 */
2471408e1d96SMika Westerberg 	ret = false;
2472349bfe08SMika Westerberg 	for_each_pci_bridge(pdev, upstream->subordinate) {
2473349bfe08SMika Westerberg 		const struct device_link *link;
2474349bfe08SMika Westerberg 
2475349bfe08SMika Westerberg 		if (!pci_is_pcie(pdev))
2476349bfe08SMika Westerberg 			continue;
2477349bfe08SMika Westerberg 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2478349bfe08SMika Westerberg 		    !pdev->is_hotplug_bridge)
2479349bfe08SMika Westerberg 			continue;
2480349bfe08SMika Westerberg 
2481349bfe08SMika Westerberg 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2482349bfe08SMika Westerberg 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
2483349bfe08SMika Westerberg 				       DL_FLAG_PM_RUNTIME);
2484349bfe08SMika Westerberg 		if (link) {
2485349bfe08SMika Westerberg 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2486349bfe08SMika Westerberg 				dev_name(&pdev->dev));
2487408e1d96SMika Westerberg 			ret = true;
2488349bfe08SMika Westerberg 		} else {
2489349bfe08SMika Westerberg 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2490349bfe08SMika Westerberg 				 dev_name(&pdev->dev));
2491349bfe08SMika Westerberg 		}
2492349bfe08SMika Westerberg 	}
2493408e1d96SMika Westerberg 
2494408e1d96SMika Westerberg 	return ret;
2495349bfe08SMika Westerberg }
2496349bfe08SMika Westerberg 
tb_probe(struct tb_nhi * nhi)24979d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
24989d3cce0bSMika Westerberg {
24999d3cce0bSMika Westerberg 	struct tb_cm *tcm;
25009d3cce0bSMika Westerberg 	struct tb *tb;
25019d3cce0bSMika Westerberg 
25027f0a34d7SMika Westerberg 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
25039d3cce0bSMika Westerberg 	if (!tb)
25049d3cce0bSMika Westerberg 		return NULL;
25059d3cce0bSMika Westerberg 
2506c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
250799cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
2508c6da62a2SMika Westerberg 	else
2509c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
2510c6da62a2SMika Westerberg 
25119d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
25129d3cce0bSMika Westerberg 
25139d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
25149d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
25158afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
25166ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
25176ce35635SMika Westerberg 	tb_init_bandwidth_groups(tcm);
25189d3cce0bSMika Westerberg 
2519e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
2520e0258805SMika Westerberg 
2521408e1d96SMika Westerberg 	/*
2522408e1d96SMika Westerberg 	 * Device links are needed to make sure we establish tunnels
2523408e1d96SMika Westerberg 	 * before the PCIe/USB stack is resumed so complain here if we
2524408e1d96SMika Westerberg 	 * found them missing.
2525408e1d96SMika Westerberg 	 */
2526408e1d96SMika Westerberg 	if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
2527408e1d96SMika Westerberg 		tb_warn(tb, "device links to tunneled native ports are missing!\n");
2528349bfe08SMika Westerberg 
25299d3cce0bSMika Westerberg 	return tb;
253023dd5bb4SAndreas Noever }
2531