xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 8d73f6b8)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13349bfe08SMika Westerberg #include <linux/platform_data/x86/apple.h>
14d6cc51cdSAndreas Noever 
15d6cc51cdSAndreas Noever #include "tb.h"
167adf6097SAndreas Noever #include "tb_regs.h"
171752b9f7SMika Westerberg #include "tunnel.h"
18d6cc51cdSAndreas Noever 
197f0a34d7SMika Westerberg #define TB_TIMEOUT	100	/* ms */
206ce35635SMika Westerberg #define MAX_GROUPS	7	/* max Group_ID is 7 */
217f0a34d7SMika Westerberg 
229d3cce0bSMika Westerberg /**
239d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
249d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
258afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
269d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
279d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
289d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
299d3cce0bSMika Westerberg  *		    after cfg has been paused.
306ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
316ac6faeeSMika Westerberg  *		 runtime resume
326ce35635SMika Westerberg  * @groups: Bandwidth groups used in this domain.
339d3cce0bSMika Westerberg  */
349d3cce0bSMika Westerberg struct tb_cm {
359d3cce0bSMika Westerberg 	struct list_head tunnel_list;
368afe909bSMika Westerberg 	struct list_head dp_resources;
379d3cce0bSMika Westerberg 	bool hotplug_active;
386ac6faeeSMika Westerberg 	struct delayed_work remove_work;
396ce35635SMika Westerberg 	struct tb_bandwidth_group groups[MAX_GROUPS];
409d3cce0bSMika Westerberg };
419da672a4SAndreas Noever 
426ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
436ac6faeeSMika Westerberg {
446ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
456ac6faeeSMika Westerberg }
466ac6faeeSMika Westerberg 
474f807e47SMika Westerberg struct tb_hotplug_event {
484f807e47SMika Westerberg 	struct work_struct work;
494f807e47SMika Westerberg 	struct tb *tb;
504f807e47SMika Westerberg 	u64 route;
514f807e47SMika Westerberg 	u8 port;
524f807e47SMika Westerberg 	bool unplug;
534f807e47SMika Westerberg };
544f807e47SMika Westerberg 
556ce35635SMika Westerberg static void tb_init_bandwidth_groups(struct tb_cm *tcm)
566ce35635SMika Westerberg {
576ce35635SMika Westerberg 	int i;
586ce35635SMika Westerberg 
596ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
606ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
616ce35635SMika Westerberg 
626ce35635SMika Westerberg 		group->tb = tcm_to_tb(tcm);
636ce35635SMika Westerberg 		group->index = i + 1;
646ce35635SMika Westerberg 		INIT_LIST_HEAD(&group->ports);
656ce35635SMika Westerberg 	}
666ce35635SMika Westerberg }
676ce35635SMika Westerberg 
686ce35635SMika Westerberg static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
696ce35635SMika Westerberg 					   struct tb_port *in)
706ce35635SMika Westerberg {
716ce35635SMika Westerberg 	if (!group || WARN_ON(in->group))
726ce35635SMika Westerberg 		return;
736ce35635SMika Westerberg 
746ce35635SMika Westerberg 	in->group = group;
756ce35635SMika Westerberg 	list_add_tail(&in->group_list, &group->ports);
766ce35635SMika Westerberg 
776ce35635SMika Westerberg 	tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
786ce35635SMika Westerberg }
796ce35635SMika Westerberg 
806ce35635SMika Westerberg static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
816ce35635SMika Westerberg {
826ce35635SMika Westerberg 	int i;
836ce35635SMika Westerberg 
846ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
856ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
866ce35635SMika Westerberg 
876ce35635SMika Westerberg 		if (list_empty(&group->ports))
886ce35635SMika Westerberg 			return group;
896ce35635SMika Westerberg 	}
906ce35635SMika Westerberg 
916ce35635SMika Westerberg 	return NULL;
926ce35635SMika Westerberg }
936ce35635SMika Westerberg 
946ce35635SMika Westerberg static struct tb_bandwidth_group *
956ce35635SMika Westerberg tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
966ce35635SMika Westerberg 			  struct tb_port *out)
976ce35635SMika Westerberg {
986ce35635SMika Westerberg 	struct tb_bandwidth_group *group;
996ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
1006ce35635SMika Westerberg 
1016ce35635SMika Westerberg 	/*
1026ce35635SMika Westerberg 	 * Find all DP tunnels that go through all the same USB4 links
1036ce35635SMika Westerberg 	 * as this one. Because we always setup tunnels the same way we
1046ce35635SMika Westerberg 	 * can just check for the routers at both ends of the tunnels
1056ce35635SMika Westerberg 	 * and if they are the same we have a match.
1066ce35635SMika Westerberg 	 */
1076ce35635SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1086ce35635SMika Westerberg 		if (!tb_tunnel_is_dp(tunnel))
1096ce35635SMika Westerberg 			continue;
1106ce35635SMika Westerberg 
1116ce35635SMika Westerberg 		if (tunnel->src_port->sw == in->sw &&
1126ce35635SMika Westerberg 		    tunnel->dst_port->sw == out->sw) {
1136ce35635SMika Westerberg 			group = tunnel->src_port->group;
1146ce35635SMika Westerberg 			if (group) {
1156ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(group, in);
1166ce35635SMika Westerberg 				return group;
1176ce35635SMika Westerberg 			}
1186ce35635SMika Westerberg 		}
1196ce35635SMika Westerberg 	}
1206ce35635SMika Westerberg 
1216ce35635SMika Westerberg 	/* Pick up next available group then */
1226ce35635SMika Westerberg 	group = tb_find_free_bandwidth_group(tcm);
1236ce35635SMika Westerberg 	if (group)
1246ce35635SMika Westerberg 		tb_bandwidth_group_attach_port(group, in);
1256ce35635SMika Westerberg 	else
1266ce35635SMika Westerberg 		tb_port_warn(in, "no available bandwidth groups\n");
1276ce35635SMika Westerberg 
1286ce35635SMika Westerberg 	return group;
1296ce35635SMika Westerberg }
1306ce35635SMika Westerberg 
1316ce35635SMika Westerberg static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1326ce35635SMika Westerberg 					struct tb_port *out)
1336ce35635SMika Westerberg {
134*8d73f6b8SMika Westerberg 	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1356ce35635SMika Westerberg 		int index, i;
1366ce35635SMika Westerberg 
1376ce35635SMika Westerberg 		index = usb4_dp_port_group_id(in);
1386ce35635SMika Westerberg 		for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1396ce35635SMika Westerberg 			if (tcm->groups[i].index == index) {
1406ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1416ce35635SMika Westerberg 				return;
1426ce35635SMika Westerberg 			}
1436ce35635SMika Westerberg 		}
1446ce35635SMika Westerberg 	}
1456ce35635SMika Westerberg 
1466ce35635SMika Westerberg 	tb_attach_bandwidth_group(tcm, in, out);
1476ce35635SMika Westerberg }
1486ce35635SMika Westerberg 
1496ce35635SMika Westerberg static void tb_detach_bandwidth_group(struct tb_port *in)
1506ce35635SMika Westerberg {
1516ce35635SMika Westerberg 	struct tb_bandwidth_group *group = in->group;
1526ce35635SMika Westerberg 
1536ce35635SMika Westerberg 	if (group) {
1546ce35635SMika Westerberg 		in->group = NULL;
1556ce35635SMika Westerberg 		list_del_init(&in->group_list);
1566ce35635SMika Westerberg 
1576ce35635SMika Westerberg 		tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1586ce35635SMika Westerberg 	}
1596ce35635SMika Westerberg }
1606ce35635SMika Westerberg 
1614f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
1624f807e47SMika Westerberg 
1634f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
1644f807e47SMika Westerberg {
1654f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
1664f807e47SMika Westerberg 
1674f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1684f807e47SMika Westerberg 	if (!ev)
1694f807e47SMika Westerberg 		return;
1704f807e47SMika Westerberg 
1714f807e47SMika Westerberg 	ev->tb = tb;
1724f807e47SMika Westerberg 	ev->route = route;
1734f807e47SMika Westerberg 	ev->port = port;
1744f807e47SMika Westerberg 	ev->unplug = unplug;
1754f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
1764f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
1774f807e47SMika Westerberg }
1784f807e47SMika Westerberg 
1799da672a4SAndreas Noever /* enumeration & hot plug handling */
1809da672a4SAndreas Noever 
1818afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
1828afe909bSMika Westerberg {
1838afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
1848afe909bSMika Westerberg 	struct tb_port *port;
1858afe909bSMika Westerberg 
1868afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
1878afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
1888afe909bSMika Westerberg 			continue;
1898afe909bSMika Westerberg 
1908afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
1918afe909bSMika Westerberg 			continue;
1928afe909bSMika Westerberg 
1938afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
1948afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
1958afe909bSMika Westerberg 	}
1968afe909bSMika Westerberg }
1978afe909bSMika Westerberg 
1988afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
1998afe909bSMika Westerberg {
2008afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
2018afe909bSMika Westerberg 	struct tb_port *port, *tmp;
2028afe909bSMika Westerberg 
2038afe909bSMika Westerberg 	/* Clear children resources first */
2048afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
2058afe909bSMika Westerberg 		if (tb_port_has_remote(port))
2068afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
2078afe909bSMika Westerberg 	}
2088afe909bSMika Westerberg 
2098afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
2108afe909bSMika Westerberg 		if (port->sw == sw) {
2118afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
2128afe909bSMika Westerberg 			list_del_init(&port->list);
2138afe909bSMika Westerberg 		}
2148afe909bSMika Westerberg 	}
2158afe909bSMika Westerberg }
2168afe909bSMika Westerberg 
217b60e31bfSSanjay R Mehta static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218b60e31bfSSanjay R Mehta {
219b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
220b60e31bfSSanjay R Mehta 	struct tb_port *p;
221b60e31bfSSanjay R Mehta 
222b60e31bfSSanjay R Mehta 	list_for_each_entry(p, &tcm->dp_resources, list) {
223b60e31bfSSanjay R Mehta 		if (p == port)
224b60e31bfSSanjay R Mehta 			return;
225b60e31bfSSanjay R Mehta 	}
226b60e31bfSSanjay R Mehta 
227b60e31bfSSanjay R Mehta 	tb_port_dbg(port, "DP %s resource available discovered\n",
228b60e31bfSSanjay R Mehta 		    tb_port_is_dpin(port) ? "IN" : "OUT");
229b60e31bfSSanjay R Mehta 	list_add_tail(&port->list, &tcm->dp_resources);
230b60e31bfSSanjay R Mehta }
231b60e31bfSSanjay R Mehta 
232b60e31bfSSanjay R Mehta static void tb_discover_dp_resources(struct tb *tb)
233b60e31bfSSanjay R Mehta {
234b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
235b60e31bfSSanjay R Mehta 	struct tb_tunnel *tunnel;
236b60e31bfSSanjay R Mehta 
237b60e31bfSSanjay R Mehta 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238b60e31bfSSanjay R Mehta 		if (tb_tunnel_is_dp(tunnel))
239b60e31bfSSanjay R Mehta 			tb_discover_dp_resource(tb, tunnel->dst_port);
240b60e31bfSSanjay R Mehta 	}
241b60e31bfSSanjay R Mehta }
242b60e31bfSSanjay R Mehta 
24353ba2e16SMika Westerberg /* Enables CL states up to host router */
2441a9b6cb8SMika Westerberg static int tb_enable_clx(struct tb_switch *sw)
2451a9b6cb8SMika Westerberg {
24653ba2e16SMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
247fd4d58d1SMika Westerberg 	unsigned int clx = TB_CL0S | TB_CL1;
24853ba2e16SMika Westerberg 	const struct tb_tunnel *tunnel;
2491a9b6cb8SMika Westerberg 	int ret;
2501a9b6cb8SMika Westerberg 
2511a9b6cb8SMika Westerberg 	/*
2529650de73SMika Westerberg 	 * Currently only enable CLx for the first link. This is enough
2539650de73SMika Westerberg 	 * to allow the CPU to save energy at least on Intel hardware
2549650de73SMika Westerberg 	 * and makes it slightly simpler to implement. We may change
2559650de73SMika Westerberg 	 * this in the future to cover the whole topology if it turns
2569650de73SMika Westerberg 	 * out to be beneficial.
2579650de73SMika Westerberg 	 */
25853ba2e16SMika Westerberg 	while (sw && sw->config.depth > 1)
25953ba2e16SMika Westerberg 		sw = tb_switch_parent(sw);
26053ba2e16SMika Westerberg 
26153ba2e16SMika Westerberg 	if (!sw)
26253ba2e16SMika Westerberg 		return 0;
26353ba2e16SMika Westerberg 
2649650de73SMika Westerberg 	if (sw->config.depth != 1)
2659650de73SMika Westerberg 		return 0;
2669650de73SMika Westerberg 
2679650de73SMika Westerberg 	/*
26853ba2e16SMika Westerberg 	 * If we are re-enabling then check if there is an active DMA
26953ba2e16SMika Westerberg 	 * tunnel and in that case bail out.
27053ba2e16SMika Westerberg 	 */
27153ba2e16SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
27253ba2e16SMika Westerberg 		if (tb_tunnel_is_dma(tunnel)) {
27353ba2e16SMika Westerberg 			if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
27453ba2e16SMika Westerberg 				return 0;
27553ba2e16SMika Westerberg 		}
27653ba2e16SMika Westerberg 	}
27753ba2e16SMika Westerberg 
27853ba2e16SMika Westerberg 	/*
279fd4d58d1SMika Westerberg 	 * Initially try with CL2. If that's not supported by the
280fd4d58d1SMika Westerberg 	 * topology try with CL0s and CL1 and then give up.
2811a9b6cb8SMika Westerberg 	 */
282fd4d58d1SMika Westerberg 	ret = tb_switch_clx_enable(sw, clx | TB_CL2);
283fd4d58d1SMika Westerberg 	if (ret == -EOPNOTSUPP)
284fd4d58d1SMika Westerberg 		ret = tb_switch_clx_enable(sw, clx);
2851a9b6cb8SMika Westerberg 	return ret == -EOPNOTSUPP ? 0 : ret;
2861a9b6cb8SMika Westerberg }
2871a9b6cb8SMika Westerberg 
28853ba2e16SMika Westerberg /* Disables CL states up to the host router */
28953ba2e16SMika Westerberg static void tb_disable_clx(struct tb_switch *sw)
29053ba2e16SMika Westerberg {
29153ba2e16SMika Westerberg 	do {
29253ba2e16SMika Westerberg 		if (tb_switch_clx_disable(sw) < 0)
29353ba2e16SMika Westerberg 			tb_sw_warn(sw, "failed to disable CL states\n");
29453ba2e16SMika Westerberg 		sw = tb_switch_parent(sw);
29553ba2e16SMika Westerberg 	} while (sw);
29653ba2e16SMika Westerberg }
29753ba2e16SMika Westerberg 
2987d283f41SMika Westerberg static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
2997d283f41SMika Westerberg {
3007d283f41SMika Westerberg 	struct tb_switch *sw;
3017d283f41SMika Westerberg 
3027d283f41SMika Westerberg 	sw = tb_to_switch(dev);
303d49b4f04SMika Westerberg 	if (!sw)
304d49b4f04SMika Westerberg 		return 0;
305d49b4f04SMika Westerberg 
306d49b4f04SMika Westerberg 	if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
307d49b4f04SMika Westerberg 		enum tb_switch_tmu_mode mode;
308d49b4f04SMika Westerberg 		int ret;
309d49b4f04SMika Westerberg 
310d49b4f04SMika Westerberg 		if (tb_switch_clx_is_enabled(sw, TB_CL1))
311d49b4f04SMika Westerberg 			mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
312d49b4f04SMika Westerberg 		else
313d49b4f04SMika Westerberg 			mode = TB_SWITCH_TMU_MODE_HIFI_BI;
314d49b4f04SMika Westerberg 
315d49b4f04SMika Westerberg 		ret = tb_switch_tmu_configure(sw, mode);
316d49b4f04SMika Westerberg 		if (ret)
317d49b4f04SMika Westerberg 			return ret;
318d49b4f04SMika Westerberg 
319d49b4f04SMika Westerberg 		return tb_switch_tmu_enable(sw);
3207d283f41SMika Westerberg 	}
3217d283f41SMika Westerberg 
3227d283f41SMika Westerberg 	return 0;
3237d283f41SMika Westerberg }
3247d283f41SMika Westerberg 
3257d283f41SMika Westerberg static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
3267d283f41SMika Westerberg {
3277d283f41SMika Westerberg 	struct tb_switch *sw;
3287d283f41SMika Westerberg 
3297d283f41SMika Westerberg 	if (!tunnel)
3307d283f41SMika Westerberg 		return;
3317d283f41SMika Westerberg 
3327d283f41SMika Westerberg 	/*
3337d283f41SMika Westerberg 	 * Once first DP tunnel is established we change the TMU
3347d283f41SMika Westerberg 	 * accuracy of first depth child routers (and the host router)
3357d283f41SMika Westerberg 	 * to the highest. This is needed for the DP tunneling to work
3367d283f41SMika Westerberg 	 * but also allows CL0s.
337d49b4f04SMika Westerberg 	 *
338d49b4f04SMika Westerberg 	 * If both routers are v2 then we don't need to do anything as
339d49b4f04SMika Westerberg 	 * they are using enhanced TMU mode that allows all CLx.
3407d283f41SMika Westerberg 	 */
3417d283f41SMika Westerberg 	sw = tunnel->tb->root_switch;
3427d283f41SMika Westerberg 	device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
3437d283f41SMika Westerberg }
3447d283f41SMika Westerberg 
3454e7b4955SMika Westerberg static int tb_enable_tmu(struct tb_switch *sw)
3464e7b4955SMika Westerberg {
3474e7b4955SMika Westerberg 	int ret;
3484e7b4955SMika Westerberg 
3494e7b4955SMika Westerberg 	/*
350d49b4f04SMika Westerberg 	 * If both routers at the end of the link are v2 we simply
351d49b4f04SMika Westerberg 	 * enable the enhanched uni-directional mode. That covers all
352d49b4f04SMika Westerberg 	 * the CL states. For v1 and before we need to use the normal
353d49b4f04SMika Westerberg 	 * rate to allow CL1 (when supported). Otherwise we keep the TMU
354d49b4f04SMika Westerberg 	 * running at the highest accuracy.
3554e7b4955SMika Westerberg 	 */
356d49b4f04SMika Westerberg 	ret = tb_switch_tmu_configure(sw,
357d49b4f04SMika Westerberg 			TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
358d49b4f04SMika Westerberg 	if (ret == -EOPNOTSUPP) {
35912a14f2fSMika Westerberg 		if (tb_switch_clx_is_enabled(sw, TB_CL1))
360d49b4f04SMika Westerberg 			ret = tb_switch_tmu_configure(sw,
361d49b4f04SMika Westerberg 					TB_SWITCH_TMU_MODE_LOWRES);
3624e7b4955SMika Westerberg 		else
363d49b4f04SMika Westerberg 			ret = tb_switch_tmu_configure(sw,
364d49b4f04SMika Westerberg 					TB_SWITCH_TMU_MODE_HIFI_BI);
365d49b4f04SMika Westerberg 	}
366ef34add8SMika Westerberg 	if (ret)
367ef34add8SMika Westerberg 		return ret;
3684e7b4955SMika Westerberg 
3694e7b4955SMika Westerberg 	/* If it is already enabled in correct mode, don't touch it */
3704e7b4955SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
3714e7b4955SMika Westerberg 		return 0;
3724e7b4955SMika Westerberg 
3734e7b4955SMika Westerberg 	ret = tb_switch_tmu_disable(sw);
3744e7b4955SMika Westerberg 	if (ret)
3754e7b4955SMika Westerberg 		return ret;
3764e7b4955SMika Westerberg 
3774e7b4955SMika Westerberg 	ret = tb_switch_tmu_post_time(sw);
3784e7b4955SMika Westerberg 	if (ret)
3794e7b4955SMika Westerberg 		return ret;
3804e7b4955SMika Westerberg 
3814e7b4955SMika Westerberg 	return tb_switch_tmu_enable(sw);
3824e7b4955SMika Westerberg }
3834e7b4955SMika Westerberg 
38443bddb26SMika Westerberg static void tb_switch_discover_tunnels(struct tb_switch *sw,
38543bddb26SMika Westerberg 				       struct list_head *list,
38643bddb26SMika Westerberg 				       bool alloc_hopids)
3870414bec5SMika Westerberg {
3880414bec5SMika Westerberg 	struct tb *tb = sw->tb;
3890414bec5SMika Westerberg 	struct tb_port *port;
3900414bec5SMika Westerberg 
391b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3920414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
3930414bec5SMika Westerberg 
3940414bec5SMika Westerberg 		switch (port->config.type) {
3954f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
39643bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
3977d283f41SMika Westerberg 			tb_increase_tmu_accuracy(tunnel);
3984f807e47SMika Westerberg 			break;
3994f807e47SMika Westerberg 
4000414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
40143bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
4020414bec5SMika Westerberg 			break;
4030414bec5SMika Westerberg 
404e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
40543bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
406e6f81858SRajmohan Mani 			break;
407e6f81858SRajmohan Mani 
4080414bec5SMika Westerberg 		default:
4090414bec5SMika Westerberg 			break;
4100414bec5SMika Westerberg 		}
4110414bec5SMika Westerberg 
41243bddb26SMika Westerberg 		if (tunnel)
41343bddb26SMika Westerberg 			list_add_tail(&tunnel->list, list);
41443bddb26SMika Westerberg 	}
4154f807e47SMika Westerberg 
41643bddb26SMika Westerberg 	tb_switch_for_each_port(sw, port) {
41743bddb26SMika Westerberg 		if (tb_port_has_remote(port)) {
41843bddb26SMika Westerberg 			tb_switch_discover_tunnels(port->remote->sw, list,
41943bddb26SMika Westerberg 						   alloc_hopids);
42043bddb26SMika Westerberg 		}
42143bddb26SMika Westerberg 	}
42243bddb26SMika Westerberg }
42343bddb26SMika Westerberg 
42443bddb26SMika Westerberg static void tb_discover_tunnels(struct tb *tb)
42543bddb26SMika Westerberg {
42643bddb26SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
42743bddb26SMika Westerberg 	struct tb_tunnel *tunnel;
42843bddb26SMika Westerberg 
42943bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
43043bddb26SMika Westerberg 
43143bddb26SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4324f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
4330414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
4340414bec5SMika Westerberg 
4350414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
4360414bec5SMika Westerberg 				parent->boot = true;
4370414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
4380414bec5SMika Westerberg 			}
439c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
4406ce35635SMika Westerberg 			struct tb_port *in = tunnel->src_port;
4416ce35635SMika Westerberg 			struct tb_port *out = tunnel->dst_port;
4426ce35635SMika Westerberg 
443c94732bdSMika Westerberg 			/* Keep the domain from powering down */
4446ce35635SMika Westerberg 			pm_runtime_get_sync(&in->sw->dev);
4456ce35635SMika Westerberg 			pm_runtime_get_sync(&out->sw->dev);
4466ce35635SMika Westerberg 
4476ce35635SMika Westerberg 			tb_discover_bandwidth_group(tcm, in, out);
4484f807e47SMika Westerberg 		}
4490414bec5SMika Westerberg 	}
4500414bec5SMika Westerberg }
4519da672a4SAndreas Noever 
452f9cad07bSMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
453284652a4SMika Westerberg {
454284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
455f9cad07bSMika Westerberg 		return usb4_port_configure_xdomain(port, xd);
456284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
457284652a4SMika Westerberg }
458284652a4SMika Westerberg 
459284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
460284652a4SMika Westerberg {
461284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
462284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
463284652a4SMika Westerberg 	else
464284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
465341d4518SMika Westerberg 
466341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
467284652a4SMika Westerberg }
468284652a4SMika Westerberg 
4697ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
4707ea4cd6bSMika Westerberg {
4717ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
4727ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
4737ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
4747ea4cd6bSMika Westerberg 	u64 route;
4757ea4cd6bSMika Westerberg 
4765ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
4775ca67688SMika Westerberg 		return;
4785ca67688SMika Westerberg 
4797ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
4807ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
4817ea4cd6bSMika Westerberg 	if (xd) {
4827ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
4837ea4cd6bSMika Westerberg 		return;
4847ea4cd6bSMika Westerberg 	}
4857ea4cd6bSMika Westerberg 
4867ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
4877ea4cd6bSMika Westerberg 			      NULL);
4887ea4cd6bSMika Westerberg 	if (xd) {
4897ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
490f9cad07bSMika Westerberg 		tb_port_configure_xdomain(port, xd);
4917ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
4927ea4cd6bSMika Westerberg 	}
4937ea4cd6bSMika Westerberg }
4947ea4cd6bSMika Westerberg 
495e6f81858SRajmohan Mani /**
496e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
497e6f81858SRajmohan Mani  * @sw: Switch to find the port on
498e6f81858SRajmohan Mani  * @type: Port type to look for
499e6f81858SRajmohan Mani  */
500e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
501e6f81858SRajmohan Mani 					   enum tb_port_type type)
502e6f81858SRajmohan Mani {
503e6f81858SRajmohan Mani 	struct tb_port *port;
504e6f81858SRajmohan Mani 
505e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
506e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
507e6f81858SRajmohan Mani 			continue;
508e6f81858SRajmohan Mani 		if (port->config.type != type)
509e6f81858SRajmohan Mani 			continue;
510e6f81858SRajmohan Mani 		if (!port->cap_adap)
511e6f81858SRajmohan Mani 			continue;
512e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
513e6f81858SRajmohan Mani 			continue;
514e6f81858SRajmohan Mani 		return port;
515e6f81858SRajmohan Mani 	}
516e6f81858SRajmohan Mani 	return NULL;
517e6f81858SRajmohan Mani }
518e6f81858SRajmohan Mani 
519e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
520e6f81858SRajmohan Mani 					 const struct tb_port *port)
521e6f81858SRajmohan Mani {
522e6f81858SRajmohan Mani 	struct tb_port *down;
523e6f81858SRajmohan Mani 
524e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
52577cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
526e6f81858SRajmohan Mani 		return down;
52777cfa40fSMika Westerberg 	return NULL;
528e6f81858SRajmohan Mani }
529e6f81858SRajmohan Mani 
5300bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
5310bd680cdSMika Westerberg 					struct tb_port *src_port,
5320bd680cdSMika Westerberg 					struct tb_port *dst_port)
5330bd680cdSMika Westerberg {
5340bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5350bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5360bd680cdSMika Westerberg 
5370bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
5380bd680cdSMika Westerberg 		if (tunnel->type == type &&
5390bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
5400bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
5410bd680cdSMika Westerberg 			return tunnel;
5420bd680cdSMika Westerberg 		}
5430bd680cdSMika Westerberg 	}
5440bd680cdSMika Westerberg 
5450bd680cdSMika Westerberg 	return NULL;
5460bd680cdSMika Westerberg }
5470bd680cdSMika Westerberg 
5480bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
5490bd680cdSMika Westerberg 						   struct tb_port *src_port,
5500bd680cdSMika Westerberg 						   struct tb_port *dst_port)
5510bd680cdSMika Westerberg {
5520bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
5530bd680cdSMika Westerberg 	struct tb_switch *sw;
5540bd680cdSMika Westerberg 
5550bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
5560bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
5570bd680cdSMika Westerberg 		sw = dst_port->sw;
5580bd680cdSMika Westerberg 	else
5590bd680cdSMika Westerberg 		sw = src_port->sw;
5600bd680cdSMika Westerberg 
5610bd680cdSMika Westerberg 	/* Can't be the host router */
5620bd680cdSMika Westerberg 	if (sw == tb->root_switch)
5630bd680cdSMika Westerberg 		return NULL;
5640bd680cdSMika Westerberg 
5650bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
5660bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
5670bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
5680bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
5690bd680cdSMika Westerberg 	if (!usb3_down)
5700bd680cdSMika Westerberg 		return NULL;
5710bd680cdSMika Westerberg 
5720bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
5730bd680cdSMika Westerberg }
5740bd680cdSMika Westerberg 
5750bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
5760bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
5770bd680cdSMika Westerberg {
5780bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
5790bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5800bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5810bd680cdSMika Westerberg 	struct tb_port *port;
5820bd680cdSMika Westerberg 
5832426fdf7SMika Westerberg 	tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
5842426fdf7SMika Westerberg 	       tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
5852426fdf7SMika Westerberg 	       dst_port->port);
5860bd680cdSMika Westerberg 
5870bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
5886ce35635SMika Westerberg 	if (tunnel && tunnel->src_port != src_port &&
5896ce35635SMika Westerberg 	    tunnel->dst_port != dst_port) {
5900bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
5910bd680cdSMika Westerberg 						   &usb3_consumed_down);
5920bd680cdSMika Westerberg 		if (ret)
5930bd680cdSMika Westerberg 			return ret;
5940bd680cdSMika Westerberg 	} else {
5950bd680cdSMika Westerberg 		usb3_consumed_up = 0;
5960bd680cdSMika Westerberg 		usb3_consumed_down = 0;
5970bd680cdSMika Westerberg 	}
5980bd680cdSMika Westerberg 
599e111fb92SGil Fine 	/* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
600e111fb92SGil Fine 	*available_up = *available_down = 120000;
6010bd680cdSMika Westerberg 
6020bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
6030bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
6040bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
6050bd680cdSMika Westerberg 
6060bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
6070bd680cdSMika Westerberg 			continue;
6080bd680cdSMika Westerberg 
6090bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
6100bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
611e111fb92SGil Fine 			/*
612e111fb92SGil Fine 			 * sw->link_width is from upstream perspective
613e111fb92SGil Fine 			 * so we use the opposite for downstream of the
614e111fb92SGil Fine 			 * host router.
615e111fb92SGil Fine 			 */
616e111fb92SGil Fine 			if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
617e111fb92SGil Fine 				up_bw = link_speed * 3 * 1000;
618e111fb92SGil Fine 				down_bw = link_speed * 1 * 1000;
619e111fb92SGil Fine 			} else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
620e111fb92SGil Fine 				up_bw = link_speed * 1 * 1000;
621e111fb92SGil Fine 				down_bw = link_speed * 3 * 1000;
622e111fb92SGil Fine 			} else {
623e111fb92SGil Fine 				up_bw = link_speed * port->sw->link_width * 1000;
624e111fb92SGil Fine 				down_bw = up_bw;
625e111fb92SGil Fine 			}
6260bd680cdSMika Westerberg 		} else {
6270bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
6280bd680cdSMika Westerberg 			if (link_speed < 0)
6290bd680cdSMika Westerberg 				return link_speed;
630e111fb92SGil Fine 
631e111fb92SGil Fine 			link_width = tb_port_get_link_width(port);
632e111fb92SGil Fine 			if (link_width < 0)
633e111fb92SGil Fine 				return link_width;
634e111fb92SGil Fine 
635e111fb92SGil Fine 			if (link_width == TB_LINK_WIDTH_ASYM_TX) {
636e111fb92SGil Fine 				up_bw = link_speed * 1 * 1000;
637e111fb92SGil Fine 				down_bw = link_speed * 3 * 1000;
638e111fb92SGil Fine 			} else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
639e111fb92SGil Fine 				up_bw = link_speed * 3 * 1000;
640e111fb92SGil Fine 				down_bw = link_speed * 1 * 1000;
641e111fb92SGil Fine 			} else {
642e111fb92SGil Fine 				up_bw = link_speed * link_width * 1000;
643e111fb92SGil Fine 				down_bw = up_bw;
644e111fb92SGil Fine 			}
6450bd680cdSMika Westerberg 		}
6460bd680cdSMika Westerberg 
6470bd680cdSMika Westerberg 		/* Leave 10% guard band */
6480bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
649e111fb92SGil Fine 		down_bw -= down_bw / 10;
6500bd680cdSMika Westerberg 
6512426fdf7SMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
6522426fdf7SMika Westerberg 			    down_bw);
6530bd680cdSMika Westerberg 
6540bd680cdSMika Westerberg 		/*
6550bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
6560bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
6570bd680cdSMika Westerberg 		 */
6580bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
6590bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
6600bd680cdSMika Westerberg 
6616ce35635SMika Westerberg 			if (tb_tunnel_is_invalid(tunnel))
6626ce35635SMika Westerberg 				continue;
6636ce35635SMika Westerberg 
6640bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
6650bd680cdSMika Westerberg 				continue;
6660bd680cdSMika Westerberg 
6670bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
6680bd680cdSMika Westerberg 				continue;
6690bd680cdSMika Westerberg 
6706ce35635SMika Westerberg 			/*
6716ce35635SMika Westerberg 			 * Ignore the DP tunnel between src_port and
6726ce35635SMika Westerberg 			 * dst_port because it is the same tunnel and we
6736ce35635SMika Westerberg 			 * may be re-calculating estimated bandwidth.
6746ce35635SMika Westerberg 			 */
6756ce35635SMika Westerberg 			if (tunnel->src_port == src_port &&
6766ce35635SMika Westerberg 			    tunnel->dst_port == dst_port)
6776ce35635SMika Westerberg 				continue;
6786ce35635SMika Westerberg 
6790bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
6800bd680cdSMika Westerberg 							   &dp_consumed_up,
6810bd680cdSMika Westerberg 							   &dp_consumed_down);
6820bd680cdSMika Westerberg 			if (ret)
6830bd680cdSMika Westerberg 				return ret;
6840bd680cdSMika Westerberg 
6850bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
6860bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
6870bd680cdSMika Westerberg 		}
6880bd680cdSMika Westerberg 
6890bd680cdSMika Westerberg 		/*
6900bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
6910bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
6920bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
6930bd680cdSMika Westerberg 		 * crosses the port.
6940bd680cdSMika Westerberg 		 */
6950bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
6960bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
6970bd680cdSMika Westerberg 
6980bd680cdSMika Westerberg 		if (up_bw < *available_up)
6990bd680cdSMika Westerberg 			*available_up = up_bw;
7000bd680cdSMika Westerberg 		if (down_bw < *available_down)
7010bd680cdSMika Westerberg 			*available_down = down_bw;
7020bd680cdSMika Westerberg 	}
7030bd680cdSMika Westerberg 
7040bd680cdSMika Westerberg 	if (*available_up < 0)
7050bd680cdSMika Westerberg 		*available_up = 0;
7060bd680cdSMika Westerberg 	if (*available_down < 0)
7070bd680cdSMika Westerberg 		*available_down = 0;
7080bd680cdSMika Westerberg 
7090bd680cdSMika Westerberg 	return 0;
7100bd680cdSMika Westerberg }
7110bd680cdSMika Westerberg 
7120bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
7130bd680cdSMika Westerberg 					    struct tb_port *src_port,
7140bd680cdSMika Westerberg 					    struct tb_port *dst_port)
7150bd680cdSMika Westerberg {
7160bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
7170bd680cdSMika Westerberg 
7180bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
7190bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
7200bd680cdSMika Westerberg }
7210bd680cdSMika Westerberg 
7220bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
7230bd680cdSMika Westerberg 				      struct tb_port *dst_port)
7240bd680cdSMika Westerberg {
7250bd680cdSMika Westerberg 	int ret, available_up, available_down;
7260bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
7270bd680cdSMika Westerberg 
7280bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
7290bd680cdSMika Westerberg 	if (!tunnel)
7300bd680cdSMika Westerberg 		return;
7310bd680cdSMika Westerberg 
7320bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
7330bd680cdSMika Westerberg 
7340bd680cdSMika Westerberg 	/*
7350bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
7360bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
7370bd680cdSMika Westerberg 	 */
7380bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
7390bd680cdSMika Westerberg 				     &available_up, &available_down);
7400bd680cdSMika Westerberg 	if (ret) {
7410bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
7420bd680cdSMika Westerberg 		return;
7430bd680cdSMika Westerberg 	}
7440bd680cdSMika Westerberg 
7450bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
7460bd680cdSMika Westerberg 	       available_up, available_down);
7470bd680cdSMika Westerberg 
7480bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
7490bd680cdSMika Westerberg }
7500bd680cdSMika Westerberg 
751e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
752e6f81858SRajmohan Mani {
753e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
7540bd680cdSMika Westerberg 	int ret, available_up, available_down;
755e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
756e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
757e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
758e6f81858SRajmohan Mani 
759c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
760c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
761c6da62a2SMika Westerberg 		return 0;
762c6da62a2SMika Westerberg 	}
763c6da62a2SMika Westerberg 
764e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
765e6f81858SRajmohan Mani 	if (!up)
766e6f81858SRajmohan Mani 		return 0;
767e6f81858SRajmohan Mani 
768bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
769bbcf40b3SMika Westerberg 		return 0;
770bbcf40b3SMika Westerberg 
771e6f81858SRajmohan Mani 	/*
772e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
773e6f81858SRajmohan Mani 	 * be found right above this switch.
774e6f81858SRajmohan Mani 	 */
7757ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
776e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
777e6f81858SRajmohan Mani 	if (!down)
778e6f81858SRajmohan Mani 		return 0;
779e6f81858SRajmohan Mani 
780e6f81858SRajmohan Mani 	if (tb_route(parent)) {
781e6f81858SRajmohan Mani 		struct tb_port *parent_up;
782e6f81858SRajmohan Mani 		/*
783e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
784e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
785e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
786e6f81858SRajmohan Mani 		 */
787e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
788e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
789e6f81858SRajmohan Mani 			return 0;
7900bd680cdSMika Westerberg 
7910bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
7920bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
7930bd680cdSMika Westerberg 		if (ret)
7940bd680cdSMika Westerberg 			return ret;
795e6f81858SRajmohan Mani 	}
796e6f81858SRajmohan Mani 
7970bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
7980bd680cdSMika Westerberg 				     &available_down);
7990bd680cdSMika Westerberg 	if (ret)
8000bd680cdSMika Westerberg 		goto err_reclaim;
8010bd680cdSMika Westerberg 
8020bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
8030bd680cdSMika Westerberg 		    available_up, available_down);
8040bd680cdSMika Westerberg 
8050bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
8060bd680cdSMika Westerberg 				      available_down);
8070bd680cdSMika Westerberg 	if (!tunnel) {
8080bd680cdSMika Westerberg 		ret = -ENOMEM;
8090bd680cdSMika Westerberg 		goto err_reclaim;
8100bd680cdSMika Westerberg 	}
811e6f81858SRajmohan Mani 
812e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
813e6f81858SRajmohan Mani 		tb_port_info(up,
814e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
8150bd680cdSMika Westerberg 		ret = -EIO;
8160bd680cdSMika Westerberg 		goto err_free;
817e6f81858SRajmohan Mani 	}
818e6f81858SRajmohan Mani 
819e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
8200bd680cdSMika Westerberg 	if (tb_route(parent))
8210bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
8220bd680cdSMika Westerberg 
823e6f81858SRajmohan Mani 	return 0;
8240bd680cdSMika Westerberg 
8250bd680cdSMika Westerberg err_free:
8260bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
8270bd680cdSMika Westerberg err_reclaim:
8280bd680cdSMika Westerberg 	if (tb_route(parent))
8290bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
8300bd680cdSMika Westerberg 
8310bd680cdSMika Westerberg 	return ret;
832e6f81858SRajmohan Mani }
833e6f81858SRajmohan Mani 
834e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
835e6f81858SRajmohan Mani {
836e6f81858SRajmohan Mani 	struct tb_port *port;
837e6f81858SRajmohan Mani 	int ret;
838e6f81858SRajmohan Mani 
839c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
840c6da62a2SMika Westerberg 		return 0;
841c6da62a2SMika Westerberg 
842e6f81858SRajmohan Mani 	if (tb_route(sw)) {
843e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
844e6f81858SRajmohan Mani 		if (ret)
845e6f81858SRajmohan Mani 			return ret;
846e6f81858SRajmohan Mani 	}
847e6f81858SRajmohan Mani 
848e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
849e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
850e6f81858SRajmohan Mani 			continue;
851e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
852e6f81858SRajmohan Mani 		if (ret)
853e6f81858SRajmohan Mani 			return ret;
854e6f81858SRajmohan Mani 	}
855e6f81858SRajmohan Mani 
856e6f81858SRajmohan Mani 	return 0;
857e6f81858SRajmohan Mani }
858e6f81858SRajmohan Mani 
8599da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
8609da672a4SAndreas Noever 
861877e50b3SLee Jones /*
8629da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
8639da672a4SAndreas Noever  */
8649da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
8659da672a4SAndreas Noever {
866b433d010SMika Westerberg 	struct tb_port *port;
867b433d010SMika Westerberg 
8686ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
8696ac6faeeSMika Westerberg 
870b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
871b433d010SMika Westerberg 		tb_scan_port(port);
8726ac6faeeSMika Westerberg 
8736ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
8746ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
8759da672a4SAndreas Noever }
8769da672a4SAndreas Noever 
877877e50b3SLee Jones /*
8789da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
8799da672a4SAndreas Noever  */
8809da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
8819da672a4SAndreas Noever {
88299cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
883dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
8843fe95742SMika Westerberg 	bool discovery = false;
8859da672a4SAndreas Noever 	struct tb_switch *sw;
886dfe40ca4SMika Westerberg 
8879da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
8889da672a4SAndreas Noever 		return;
8894f807e47SMika Westerberg 
8904f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
8914f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
8924f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
8934f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
8944f807e47SMika Westerberg 				 false);
8954f807e47SMika Westerberg 		return;
8964f807e47SMika Westerberg 	}
8974f807e47SMika Westerberg 
8989da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
8999da672a4SAndreas Noever 		return;
900343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
901343fcb8cSAndreas Noever 		return; /*
902343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
903343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
904343fcb8cSAndreas Noever 			 */
90523257cfcSMika Westerberg 
90623257cfcSMika Westerberg 	if (port->usb4)
90723257cfcSMika Westerberg 		pm_runtime_get_sync(&port->usb4->dev);
90823257cfcSMika Westerberg 
9099da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
91023257cfcSMika Westerberg 		goto out_rpm_put;
9119da672a4SAndreas Noever 	if (port->remote) {
9127ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
91323257cfcSMika Westerberg 		goto out_rpm_put;
9149da672a4SAndreas Noever 	}
915dacb1287SKranthi Kuntala 
9163fb10ea4SRajmohan Mani 	tb_retimer_scan(port, true);
917dacb1287SKranthi Kuntala 
918bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
919bfe778acSMika Westerberg 			     tb_downstream_route(port));
9207ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
9217ea4cd6bSMika Westerberg 		/*
9227ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
9237ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
9247ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
9257ea4cd6bSMika Westerberg 		 */
9267ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
9277ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
92823257cfcSMika Westerberg 		goto out_rpm_put;
9297ea4cd6bSMika Westerberg 	}
930bfe778acSMika Westerberg 
931bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
932bfe778acSMika Westerberg 		tb_switch_put(sw);
93323257cfcSMika Westerberg 		goto out_rpm_put;
934bfe778acSMika Westerberg 	}
935bfe778acSMika Westerberg 
93699cabbb0SMika Westerberg 	/*
9377ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
9387ea4cd6bSMika Westerberg 	 * first.
9397ea4cd6bSMika Westerberg 	 */
9407ea4cd6bSMika Westerberg 	if (port->xdomain) {
9417ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
942284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
9437ea4cd6bSMika Westerberg 		port->xdomain = NULL;
9447ea4cd6bSMika Westerberg 	}
9457ea4cd6bSMika Westerberg 
9467ea4cd6bSMika Westerberg 	/*
94799cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
94899cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
94999cabbb0SMika Westerberg 	 * the boot firmware.
95099cabbb0SMika Westerberg 	 */
9513fe95742SMika Westerberg 	if (!tcm->hotplug_active) {
95299cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
9533fe95742SMika Westerberg 		discovery = true;
9543fe95742SMika Westerberg 	}
955f67cf491SMika Westerberg 
9566ac6faeeSMika Westerberg 	/*
9576ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
9586ac6faeeSMika Westerberg 	 * can support runtime PM.
9596ac6faeeSMika Westerberg 	 */
9606ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
9616ac6faeeSMika Westerberg 
962bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
963bfe778acSMika Westerberg 		tb_switch_put(sw);
96423257cfcSMika Westerberg 		goto out_rpm_put;
965bfe778acSMika Westerberg 	}
966bfe778acSMika Westerberg 
967dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
968dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
969dfe40ca4SMika Westerberg 	port->remote = upstream_port;
970dfe40ca4SMika Westerberg 	upstream_port->remote = port;
971dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
972dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
973dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
974dfe40ca4SMika Westerberg 	}
975dfe40ca4SMika Westerberg 
97691c0c120SMika Westerberg 	/* Enable lane bonding if supported */
9772ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
978de462039SMika Westerberg 	/* Set the link configured */
979de462039SMika Westerberg 	tb_switch_configure_link(sw);
980b017a46dSGil Fine 	/*
981b017a46dSGil Fine 	 * CL0s and CL1 are enabled and supported together.
982b017a46dSGil Fine 	 * Silently ignore CLx enabling in case CLx is not supported.
983b017a46dSGil Fine 	 */
9841a9b6cb8SMika Westerberg 	if (discovery)
9853fe95742SMika Westerberg 		tb_sw_dbg(sw, "discovery, not touching CL states\n");
9861a9b6cb8SMika Westerberg 	else if (tb_enable_clx(sw))
9871a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to enable CL states\n");
9888a90e4faSGil Fine 
989cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
990cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
991cf29b9afSRajmohan Mani 
992d49b4f04SMika Westerberg 	/*
993d49b4f04SMika Westerberg 	 * Configuration valid needs to be set after the TMU has been
994d49b4f04SMika Westerberg 	 * enabled for the upstream port of the router so we do it here.
995d49b4f04SMika Westerberg 	 */
996d49b4f04SMika Westerberg 	tb_switch_configuration_valid(sw);
997d49b4f04SMika Westerberg 
998dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
9993fb10ea4SRajmohan Mani 	tb_retimer_scan(upstream_port, true);
1000dacb1287SKranthi Kuntala 
1001e6f81858SRajmohan Mani 	/*
1002e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
1003e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
1004e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
1005e6f81858SRajmohan Mani 	 * any new.
1006e6f81858SRajmohan Mani 	 */
1007e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1008e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1009e6f81858SRajmohan Mani 
1010e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
10119da672a4SAndreas Noever 	tb_scan_switch(sw);
101223257cfcSMika Westerberg 
101323257cfcSMika Westerberg out_rpm_put:
101423257cfcSMika Westerberg 	if (port->usb4) {
101523257cfcSMika Westerberg 		pm_runtime_mark_last_busy(&port->usb4->dev);
101623257cfcSMika Westerberg 		pm_runtime_put_autosuspend(&port->usb4->dev);
101723257cfcSMika Westerberg 	}
10189da672a4SAndreas Noever }
10199da672a4SAndreas Noever 
10208afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
10218afe909bSMika Westerberg {
10220bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
10230bd680cdSMika Westerberg 	struct tb *tb;
10240bd680cdSMika Westerberg 
10258afe909bSMika Westerberg 	if (!tunnel)
10268afe909bSMika Westerberg 		return;
10278afe909bSMika Westerberg 
10288afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
10298afe909bSMika Westerberg 	list_del(&tunnel->list);
10308afe909bSMika Westerberg 
10310bd680cdSMika Westerberg 	tb = tunnel->tb;
10320bd680cdSMika Westerberg 	src_port = tunnel->src_port;
10330bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
10348afe909bSMika Westerberg 
10350bd680cdSMika Westerberg 	switch (tunnel->type) {
10360bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
10376ce35635SMika Westerberg 		tb_detach_bandwidth_group(src_port);
10380bd680cdSMika Westerberg 		/*
10390bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
10400bd680cdSMika Westerberg 		 * deallocated properly.
10410bd680cdSMika Westerberg 		 */
10420bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
10436ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
10446ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
10456ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
10466ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
10476ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
10480bd680cdSMika Westerberg 		fallthrough;
10490bd680cdSMika Westerberg 
10500bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
10510bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
10520bd680cdSMika Westerberg 		break;
10530bd680cdSMika Westerberg 
10540bd680cdSMika Westerberg 	default:
10550bd680cdSMika Westerberg 		/*
10560bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
10570bd680cdSMika Westerberg 		 * bandwidth.
10580bd680cdSMika Westerberg 		 */
10590bd680cdSMika Westerberg 		break;
10608afe909bSMika Westerberg 	}
10618afe909bSMika Westerberg 
10628afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
10634f807e47SMika Westerberg }
10644f807e47SMika Westerberg 
1065877e50b3SLee Jones /*
10663364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
10673364f0c1SAndreas Noever  */
10683364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
10693364f0c1SAndreas Noever {
10709d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
107193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
107293f36adeSMika Westerberg 	struct tb_tunnel *n;
10739d3cce0bSMika Westerberg 
10749d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
10758afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
10768afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
10773364f0c1SAndreas Noever 	}
10783364f0c1SAndreas Noever }
10793364f0c1SAndreas Noever 
1080877e50b3SLee Jones /*
108123dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
108223dd5bb4SAndreas Noever  */
108323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
108423dd5bb4SAndreas Noever {
1085b433d010SMika Westerberg 	struct tb_port *port;
1086dfe40ca4SMika Westerberg 
1087b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1088dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
108923dd5bb4SAndreas Noever 			continue;
1090dfe40ca4SMika Westerberg 
109123dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
1092dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
10938afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1094de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
109591c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1096bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
109723dd5bb4SAndreas Noever 			port->remote = NULL;
1098dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1099dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
110023dd5bb4SAndreas Noever 		} else {
110123dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
110223dd5bb4SAndreas Noever 		}
110323dd5bb4SAndreas Noever 	}
110423dd5bb4SAndreas Noever }
110523dd5bb4SAndreas Noever 
110699cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
110799cabbb0SMika Westerberg 					 const struct tb_port *port)
11083364f0c1SAndreas Noever {
1109b0407983SMika Westerberg 	struct tb_port *down = NULL;
1110b0407983SMika Westerberg 
111199cabbb0SMika Westerberg 	/*
111299cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
1113b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
111499cabbb0SMika Westerberg 	 */
1115b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
1116b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
1117b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
111899cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
111999cabbb0SMika Westerberg 		int index;
112099cabbb0SMika Westerberg 
112199cabbb0SMika Westerberg 		/*
112299cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
112399cabbb0SMika Westerberg 		 * per controller.
112499cabbb0SMika Westerberg 		 */
11257bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
11267bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
112799cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
112817a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
112999cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
11307bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
11317bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
113299cabbb0SMika Westerberg 		else
113399cabbb0SMika Westerberg 			goto out;
113499cabbb0SMika Westerberg 
113599cabbb0SMika Westerberg 		/* Validate the hard-coding */
113699cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
113799cabbb0SMika Westerberg 			goto out;
1138b0407983SMika Westerberg 
1139b0407983SMika Westerberg 		down = &sw->ports[index];
1140b0407983SMika Westerberg 	}
1141b0407983SMika Westerberg 
1142b0407983SMika Westerberg 	if (down) {
1143b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
114499cabbb0SMika Westerberg 			goto out;
11459cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
114699cabbb0SMika Westerberg 			goto out;
114799cabbb0SMika Westerberg 
1148b0407983SMika Westerberg 		return down;
114999cabbb0SMika Westerberg 	}
115099cabbb0SMika Westerberg 
115199cabbb0SMika Westerberg out:
1152e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
115399cabbb0SMika Westerberg }
115499cabbb0SMika Westerberg 
11556ce35635SMika Westerberg static void
11566ce35635SMika Westerberg tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
11576ce35635SMika Westerberg {
11586ce35635SMika Westerberg 	struct tb_tunnel *first_tunnel;
11596ce35635SMika Westerberg 	struct tb *tb = group->tb;
11606ce35635SMika Westerberg 	struct tb_port *in;
11616ce35635SMika Westerberg 	int ret;
11626ce35635SMika Westerberg 
11636ce35635SMika Westerberg 	tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
11646ce35635SMika Westerberg 	       group->index);
11656ce35635SMika Westerberg 
11666ce35635SMika Westerberg 	first_tunnel = NULL;
11676ce35635SMika Westerberg 	list_for_each_entry(in, &group->ports, group_list) {
11686ce35635SMika Westerberg 		int estimated_bw, estimated_up, estimated_down;
11696ce35635SMika Westerberg 		struct tb_tunnel *tunnel;
11706ce35635SMika Westerberg 		struct tb_port *out;
11716ce35635SMika Westerberg 
1172*8d73f6b8SMika Westerberg 		if (!usb4_dp_port_bandwidth_mode_enabled(in))
11736ce35635SMika Westerberg 			continue;
11746ce35635SMika Westerberg 
11756ce35635SMika Westerberg 		tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
11766ce35635SMika Westerberg 		if (WARN_ON(!tunnel))
11776ce35635SMika Westerberg 			break;
11786ce35635SMika Westerberg 
11796ce35635SMika Westerberg 		if (!first_tunnel) {
11806ce35635SMika Westerberg 			/*
11816ce35635SMika Westerberg 			 * Since USB3 bandwidth is shared by all DP
11826ce35635SMika Westerberg 			 * tunnels under the host router USB4 port, even
11836ce35635SMika Westerberg 			 * if they do not begin from the host router, we
11846ce35635SMika Westerberg 			 * can release USB3 bandwidth just once and not
11856ce35635SMika Westerberg 			 * for each tunnel separately.
11866ce35635SMika Westerberg 			 */
11876ce35635SMika Westerberg 			first_tunnel = tunnel;
11886ce35635SMika Westerberg 			ret = tb_release_unused_usb3_bandwidth(tb,
11896ce35635SMika Westerberg 				first_tunnel->src_port, first_tunnel->dst_port);
11906ce35635SMika Westerberg 			if (ret) {
11916ce35635SMika Westerberg 				tb_port_warn(in,
11926ce35635SMika Westerberg 					"failed to release unused bandwidth\n");
11936ce35635SMika Westerberg 				break;
11946ce35635SMika Westerberg 			}
11956ce35635SMika Westerberg 		}
11966ce35635SMika Westerberg 
11976ce35635SMika Westerberg 		out = tunnel->dst_port;
11986ce35635SMika Westerberg 		ret = tb_available_bandwidth(tb, in, out, &estimated_up,
11996ce35635SMika Westerberg 					     &estimated_down);
12006ce35635SMika Westerberg 		if (ret) {
12016ce35635SMika Westerberg 			tb_port_warn(in,
12026ce35635SMika Westerberg 				"failed to re-calculate estimated bandwidth\n");
12036ce35635SMika Westerberg 			break;
12046ce35635SMika Westerberg 		}
12056ce35635SMika Westerberg 
12066ce35635SMika Westerberg 		/*
12076ce35635SMika Westerberg 		 * Estimated bandwidth includes:
12086ce35635SMika Westerberg 		 *  - already allocated bandwidth for the DP tunnel
12096ce35635SMika Westerberg 		 *  - available bandwidth along the path
12106ce35635SMika Westerberg 		 *  - bandwidth allocated for USB 3.x but not used.
12116ce35635SMika Westerberg 		 */
12126ce35635SMika Westerberg 		tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
12136ce35635SMika Westerberg 			    estimated_up, estimated_down);
12146ce35635SMika Westerberg 
12156ce35635SMika Westerberg 		if (in->sw->config.depth < out->sw->config.depth)
12166ce35635SMika Westerberg 			estimated_bw = estimated_down;
12176ce35635SMika Westerberg 		else
12186ce35635SMika Westerberg 			estimated_bw = estimated_up;
12196ce35635SMika Westerberg 
1220*8d73f6b8SMika Westerberg 		if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
12216ce35635SMika Westerberg 			tb_port_warn(in, "failed to update estimated bandwidth\n");
12226ce35635SMika Westerberg 	}
12236ce35635SMika Westerberg 
12246ce35635SMika Westerberg 	if (first_tunnel)
12256ce35635SMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
12266ce35635SMika Westerberg 					  first_tunnel->dst_port);
12276ce35635SMika Westerberg 
12286ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
12296ce35635SMika Westerberg }
12306ce35635SMika Westerberg 
12316ce35635SMika Westerberg static void tb_recalc_estimated_bandwidth(struct tb *tb)
12326ce35635SMika Westerberg {
12336ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
12346ce35635SMika Westerberg 	int i;
12356ce35635SMika Westerberg 
12366ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
12376ce35635SMika Westerberg 
12386ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
12396ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
12406ce35635SMika Westerberg 
12416ce35635SMika Westerberg 		if (!list_empty(&group->ports))
12426ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth_for_group(group);
12436ce35635SMika Westerberg 	}
12446ce35635SMika Westerberg 
12456ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth re-calculation done\n");
12466ce35635SMika Westerberg }
12476ce35635SMika Westerberg 
1248e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1249e876f34aSMika Westerberg {
1250e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
1251e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1252e876f34aSMika Westerberg 
1253e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
1254e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1255e876f34aSMika Westerberg 
1256e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1257e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
1258e876f34aSMika Westerberg 			continue;
1259e876f34aSMika Westerberg 
1260e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
1261b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP OUT in use\n");
1262e876f34aSMika Westerberg 			continue;
1263e876f34aSMika Westerberg 		}
1264e876f34aSMika Westerberg 
1265e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
1266e876f34aSMika Westerberg 
1267e876f34aSMika Westerberg 		/*
1268e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
1269e876f34aSMika Westerberg 		 * the same host router downstream port.
1270e876f34aSMika Westerberg 		 */
1271e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
1272e876f34aSMika Westerberg 			struct tb_port *p;
1273e876f34aSMika Westerberg 
1274e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
1275e876f34aSMika Westerberg 			if (p != host_port)
1276e876f34aSMika Westerberg 				continue;
1277e876f34aSMika Westerberg 		}
1278e876f34aSMika Westerberg 
1279e876f34aSMika Westerberg 		return port;
1280e876f34aSMika Westerberg 	}
1281e876f34aSMika Westerberg 
1282e876f34aSMika Westerberg 	return NULL;
1283e876f34aSMika Westerberg }
1284e876f34aSMika Westerberg 
12858afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
12864f807e47SMika Westerberg {
12879d2d0a5cSMika Westerberg 	int available_up, available_down, ret, link_nr;
12884f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
12898afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
12904f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
12914f807e47SMika Westerberg 
1292c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
1293c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1294c6da62a2SMika Westerberg 		return;
1295c6da62a2SMika Westerberg 	}
1296c6da62a2SMika Westerberg 
12978afe909bSMika Westerberg 	/*
12988afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
12998afe909bSMika Westerberg 	 * establish a DP tunnel between them.
13008afe909bSMika Westerberg 	 */
13018afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
13024f807e47SMika Westerberg 
13038afe909bSMika Westerberg 	in = NULL;
13048afe909bSMika Westerberg 	out = NULL;
13058afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1306e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
1307e876f34aSMika Westerberg 			continue;
1308e876f34aSMika Westerberg 
13098afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
1310b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP IN in use\n");
13118afe909bSMika Westerberg 			continue;
13128afe909bSMika Westerberg 		}
13138afe909bSMika Westerberg 
1314e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
13158afe909bSMika Westerberg 
1316e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
1317e876f34aSMika Westerberg 		if (out) {
13188afe909bSMika Westerberg 			in = port;
1319e876f34aSMika Westerberg 			break;
1320e876f34aSMika Westerberg 		}
13218afe909bSMika Westerberg 	}
13228afe909bSMika Westerberg 
13238afe909bSMika Westerberg 	if (!in) {
13248afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
13258afe909bSMika Westerberg 		return;
13268afe909bSMika Westerberg 	}
13278afe909bSMika Westerberg 	if (!out) {
13288afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
13298afe909bSMika Westerberg 		return;
13308afe909bSMika Westerberg 	}
13318afe909bSMika Westerberg 
13326ac6faeeSMika Westerberg 	/*
13339d2d0a5cSMika Westerberg 	 * This is only applicable to links that are not bonded (so
13349d2d0a5cSMika Westerberg 	 * when Thunderbolt 1 hardware is involved somewhere in the
13359d2d0a5cSMika Westerberg 	 * topology). For these try to share the DP bandwidth between
13369d2d0a5cSMika Westerberg 	 * the two lanes.
13379d2d0a5cSMika Westerberg 	 */
13389d2d0a5cSMika Westerberg 	link_nr = 1;
13399d2d0a5cSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
13409d2d0a5cSMika Westerberg 		if (tb_tunnel_is_dp(tunnel)) {
13419d2d0a5cSMika Westerberg 			link_nr = 0;
13429d2d0a5cSMika Westerberg 			break;
13439d2d0a5cSMika Westerberg 		}
13449d2d0a5cSMika Westerberg 	}
13459d2d0a5cSMika Westerberg 
13469d2d0a5cSMika Westerberg 	/*
13476ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
13486ac6faeeSMika Westerberg 	 * both ends of the tunnel.
13496ac6faeeSMika Westerberg 	 *
13506ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
13516ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
13526ac6faeeSMika Westerberg 	 * tunnel is active.
13536ac6faeeSMika Westerberg 	 */
13546ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
13556ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
13566ac6faeeSMika Westerberg 
13578afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
13588afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
13596ac6faeeSMika Westerberg 		goto err_rpm_put;
13608afe909bSMika Westerberg 	}
13614f807e47SMika Westerberg 
13626ce35635SMika Westerberg 	if (!tb_attach_bandwidth_group(tcm, in, out))
13636ce35635SMika Westerberg 		goto err_dealloc_dp;
13646ce35635SMika Westerberg 
13650bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
13660bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
13670bd680cdSMika Westerberg 	if (ret) {
13680bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
13696ce35635SMika Westerberg 		goto err_detach_group;
1370a11b88adSMika Westerberg 	}
1371a11b88adSMika Westerberg 
13726ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
13730bd680cdSMika Westerberg 	if (ret)
13746ce35635SMika Westerberg 		goto err_reclaim_usb;
1375a11b88adSMika Westerberg 
13760bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
13770bd680cdSMika Westerberg 	       available_up, available_down);
13780bd680cdSMika Westerberg 
13799d2d0a5cSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
13809d2d0a5cSMika Westerberg 				    available_down);
13814f807e47SMika Westerberg 	if (!tunnel) {
13828afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
13836ce35635SMika Westerberg 		goto err_reclaim_usb;
13844f807e47SMika Westerberg 	}
13854f807e47SMika Westerberg 
13864f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
13874f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
13880bd680cdSMika Westerberg 		goto err_free;
13894f807e47SMika Westerberg 	}
13904f807e47SMika Westerberg 
13914f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
13920bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
13936ce35635SMika Westerberg 
13946ce35635SMika Westerberg 	/* Update the domain with the new bandwidth estimation */
13956ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
13966ce35635SMika Westerberg 
13973084b48fSGil Fine 	/*
13983084b48fSGil Fine 	 * In case of DP tunnel exists, change host router's 1st children
13993084b48fSGil Fine 	 * TMU mode to HiFi for CL0s to work.
14003084b48fSGil Fine 	 */
14017d283f41SMika Westerberg 	tb_increase_tmu_accuracy(tunnel);
14028afe909bSMika Westerberg 	return;
14038afe909bSMika Westerberg 
14040bd680cdSMika Westerberg err_free:
14050bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
14066ce35635SMika Westerberg err_reclaim_usb:
14070bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
14086ce35635SMika Westerberg err_detach_group:
14096ce35635SMika Westerberg 	tb_detach_bandwidth_group(in);
14100bd680cdSMika Westerberg err_dealloc_dp:
14118afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
14126ac6faeeSMika Westerberg err_rpm_put:
14136ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
14146ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
14156ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
14166ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
14174f807e47SMika Westerberg }
14184f807e47SMika Westerberg 
14198afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
14204f807e47SMika Westerberg {
14218afe909bSMika Westerberg 	struct tb_port *in, *out;
14228afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
14238afe909bSMika Westerberg 
14248afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
14258afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
14268afe909bSMika Westerberg 		in = port;
14278afe909bSMika Westerberg 		out = NULL;
14288afe909bSMika Westerberg 	} else {
14298afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
14308afe909bSMika Westerberg 		in = NULL;
14318afe909bSMika Westerberg 		out = port;
14328afe909bSMika Westerberg 	}
14338afe909bSMika Westerberg 
14348afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
14358afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
14368afe909bSMika Westerberg 	list_del_init(&port->list);
14378afe909bSMika Westerberg 
14388afe909bSMika Westerberg 	/*
14398afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
14408afe909bSMika Westerberg 	 * to create another tunnel.
14418afe909bSMika Westerberg 	 */
14426ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
14438afe909bSMika Westerberg 	tb_tunnel_dp(tb);
14448afe909bSMika Westerberg }
14458afe909bSMika Westerberg 
14468afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
14478afe909bSMika Westerberg {
14488afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
14498afe909bSMika Westerberg 	struct tb_port *p;
14508afe909bSMika Westerberg 
14518afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
14528afe909bSMika Westerberg 		return;
14538afe909bSMika Westerberg 
14548afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
14558afe909bSMika Westerberg 		if (p == port)
14568afe909bSMika Westerberg 			return;
14578afe909bSMika Westerberg 	}
14588afe909bSMika Westerberg 
14598afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
14608afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
14618afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
14628afe909bSMika Westerberg 
14638afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
14648afe909bSMika Westerberg 	tb_tunnel_dp(tb);
14654f807e47SMika Westerberg }
14664f807e47SMika Westerberg 
146781a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
146881a2e3e4SMika Westerberg {
146981a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
147081a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
147181a2e3e4SMika Westerberg 
147281a2e3e4SMika Westerberg 	/*
147381a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
147481a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
147581a2e3e4SMika Westerberg 	 */
147681a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
147781a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
147881a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
147981a2e3e4SMika Westerberg 	}
148081a2e3e4SMika Westerberg 
148181a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
148281a2e3e4SMika Westerberg 		struct tb_port *port;
148381a2e3e4SMika Westerberg 
148481a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
148581a2e3e4SMika Westerberg 					struct tb_port, list);
148681a2e3e4SMika Westerberg 		list_del_init(&port->list);
148781a2e3e4SMika Westerberg 	}
148881a2e3e4SMika Westerberg }
148981a2e3e4SMika Westerberg 
14903da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
14913da88be2SMika Westerberg {
14923da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
14933da88be2SMika Westerberg 	struct tb_port *up;
14943da88be2SMika Westerberg 
14953da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
14963da88be2SMika Westerberg 	if (WARN_ON(!up))
14973da88be2SMika Westerberg 		return -ENODEV;
14983da88be2SMika Westerberg 
14993da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
15003da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
15013da88be2SMika Westerberg 		return -ENODEV;
15023da88be2SMika Westerberg 
150330a4eca6SMika Westerberg 	tb_switch_xhci_disconnect(sw);
150430a4eca6SMika Westerberg 
15053da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
15063da88be2SMika Westerberg 	list_del(&tunnel->list);
15073da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
15083da88be2SMika Westerberg 	return 0;
15093da88be2SMika Westerberg }
15103da88be2SMika Westerberg 
151199cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
151299cabbb0SMika Westerberg {
151399cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
15149d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
151599cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
15169d3cce0bSMika Westerberg 
1517386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
151899cabbb0SMika Westerberg 	if (!up)
151999cabbb0SMika Westerberg 		return 0;
15203364f0c1SAndreas Noever 
152199cabbb0SMika Westerberg 	/*
152299cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
152399cabbb0SMika Westerberg 	 * be found right above this switch.
152499cabbb0SMika Westerberg 	 */
15257ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
15267ce54221SGil Fine 	down = tb_find_pcie_down(tb_switch_parent(sw), port);
152799cabbb0SMika Westerberg 	if (!down)
152899cabbb0SMika Westerberg 		return 0;
15293364f0c1SAndreas Noever 
153099cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
153199cabbb0SMika Westerberg 	if (!tunnel)
153299cabbb0SMika Westerberg 		return -ENOMEM;
15333364f0c1SAndreas Noever 
153493f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
153599cabbb0SMika Westerberg 		tb_port_info(up,
15363364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
153793f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
153899cabbb0SMika Westerberg 		return -EIO;
15393364f0c1SAndreas Noever 	}
15403364f0c1SAndreas Noever 
154143f977bcSGil Fine 	/*
154243f977bcSGil Fine 	 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
154343f977bcSGil Fine 	 * here.
154443f977bcSGil Fine 	 */
154543f977bcSGil Fine 	if (tb_switch_pcie_l1_enable(sw))
154643f977bcSGil Fine 		tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
154743f977bcSGil Fine 
154830a4eca6SMika Westerberg 	if (tb_switch_xhci_connect(sw))
154930a4eca6SMika Westerberg 		tb_sw_warn(sw, "failed to connect xHCI\n");
155030a4eca6SMika Westerberg 
155199cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
155299cabbb0SMika Westerberg 	return 0;
15533364f0c1SAndreas Noever }
15549da672a4SAndreas Noever 
1555180b0689SMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1556180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
1557180b0689SMika Westerberg 				    int receive_path, int receive_ring)
15587ea4cd6bSMika Westerberg {
15597ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15607ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
15617ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
15627ea4cd6bSMika Westerberg 	struct tb_switch *sw;
156353ba2e16SMika Westerberg 	int ret;
15647ea4cd6bSMika Westerberg 
15657ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
15667ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1567386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
15687ea4cd6bSMika Westerberg 
15697ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
157053ba2e16SMika Westerberg 
157153ba2e16SMika Westerberg 	/*
157253ba2e16SMika Westerberg 	 * When tunneling DMA paths the link should not enter CL states
157353ba2e16SMika Westerberg 	 * so disable them now.
157453ba2e16SMika Westerberg 	 */
157553ba2e16SMika Westerberg 	tb_disable_clx(sw);
157653ba2e16SMika Westerberg 
1577180b0689SMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1578180b0689SMika Westerberg 				     transmit_ring, receive_path, receive_ring);
15797ea4cd6bSMika Westerberg 	if (!tunnel) {
158053ba2e16SMika Westerberg 		ret = -ENOMEM;
158153ba2e16SMika Westerberg 		goto err_clx;
15827ea4cd6bSMika Westerberg 	}
15837ea4cd6bSMika Westerberg 
15847ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
15857ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
15867ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
158753ba2e16SMika Westerberg 		ret = -EIO;
158853ba2e16SMika Westerberg 		goto err_free;
15897ea4cd6bSMika Westerberg 	}
15907ea4cd6bSMika Westerberg 
15917ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
15927ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
15937ea4cd6bSMika Westerberg 	return 0;
159453ba2e16SMika Westerberg 
159553ba2e16SMika Westerberg err_free:
159653ba2e16SMika Westerberg 	tb_tunnel_free(tunnel);
159753ba2e16SMika Westerberg err_clx:
159853ba2e16SMika Westerberg 	tb_enable_clx(sw);
159953ba2e16SMika Westerberg 	mutex_unlock(&tb->lock);
160053ba2e16SMika Westerberg 
160153ba2e16SMika Westerberg 	return ret;
16027ea4cd6bSMika Westerberg }
16037ea4cd6bSMika Westerberg 
1604180b0689SMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1605180b0689SMika Westerberg 					  int transmit_path, int transmit_ring,
1606180b0689SMika Westerberg 					  int receive_path, int receive_ring)
16077ea4cd6bSMika Westerberg {
1608180b0689SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1609180b0689SMika Westerberg 	struct tb_port *nhi_port, *dst_port;
1610180b0689SMika Westerberg 	struct tb_tunnel *tunnel, *n;
16117ea4cd6bSMika Westerberg 	struct tb_switch *sw;
16127ea4cd6bSMika Westerberg 
16137ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
16147ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1615180b0689SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
16167ea4cd6bSMika Westerberg 
1617180b0689SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1618180b0689SMika Westerberg 		if (!tb_tunnel_is_dma(tunnel))
1619180b0689SMika Westerberg 			continue;
1620180b0689SMika Westerberg 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1621180b0689SMika Westerberg 			continue;
1622180b0689SMika Westerberg 
1623180b0689SMika Westerberg 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1624180b0689SMika Westerberg 					receive_path, receive_ring))
16258afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
16267ea4cd6bSMika Westerberg 	}
162753ba2e16SMika Westerberg 
162853ba2e16SMika Westerberg 	/*
162953ba2e16SMika Westerberg 	 * Try to re-enable CL states now, it is OK if this fails
163053ba2e16SMika Westerberg 	 * because we may still have another DMA tunnel active through
163153ba2e16SMika Westerberg 	 * the same host router USB4 downstream port.
163253ba2e16SMika Westerberg 	 */
163353ba2e16SMika Westerberg 	tb_enable_clx(sw);
1634180b0689SMika Westerberg }
16357ea4cd6bSMika Westerberg 
1636180b0689SMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1637180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
1638180b0689SMika Westerberg 				       int receive_path, int receive_ring)
16397ea4cd6bSMika Westerberg {
16407ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
16417ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
1642180b0689SMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1643180b0689SMika Westerberg 					      transmit_ring, receive_path,
1644180b0689SMika Westerberg 					      receive_ring);
16457ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
16467ea4cd6bSMika Westerberg 	}
16477ea4cd6bSMika Westerberg 	return 0;
16487ea4cd6bSMika Westerberg }
16497ea4cd6bSMika Westerberg 
1650d6cc51cdSAndreas Noever /* hotplug handling */
1651d6cc51cdSAndreas Noever 
1652877e50b3SLee Jones /*
1653d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1654d6cc51cdSAndreas Noever  *
1655d6cc51cdSAndreas Noever  * Executes on tb->wq.
1656d6cc51cdSAndreas Noever  */
1657d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1658d6cc51cdSAndreas Noever {
1659d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1660d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
16619d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1662053596d9SAndreas Noever 	struct tb_switch *sw;
1663053596d9SAndreas Noever 	struct tb_port *port;
1664284652a4SMika Westerberg 
16656ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
16666ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
16676ac6faeeSMika Westerberg 
1668d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
16699d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1670d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1671d6cc51cdSAndreas Noever 
16728f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1673053596d9SAndreas Noever 	if (!sw) {
1674053596d9SAndreas Noever 		tb_warn(tb,
1675053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1676053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1677053596d9SAndreas Noever 		goto out;
1678053596d9SAndreas Noever 	}
1679053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1680053596d9SAndreas Noever 		tb_warn(tb,
1681053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1682053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
16838f965efdSMika Westerberg 		goto put_sw;
1684053596d9SAndreas Noever 	}
1685053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1686053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1687dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1688053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
16898f965efdSMika Westerberg 		goto put_sw;
1690053596d9SAndreas Noever 	}
16916ac6faeeSMika Westerberg 
16926ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
16936ac6faeeSMika Westerberg 
1694053596d9SAndreas Noever 	if (ev->unplug) {
1695dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1696dacb1287SKranthi Kuntala 
1697dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
16987ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1699aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
17003364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
17018afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1702cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1703de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
170491c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1705bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1706053596d9SAndreas Noever 			port->remote = NULL;
1707dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1708dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
17098afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
17106ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth(tb);
17118afe909bSMika Westerberg 			tb_tunnel_dp(tb);
17127ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
17137ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
17147ea4cd6bSMika Westerberg 
17157ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
17167ea4cd6bSMika Westerberg 			/*
17177ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
17187ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
17197ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
17207ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
1721180b0689SMika Westerberg 			 * all the tunnels below.
17227ea4cd6bSMika Westerberg 			 */
17237ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
17247ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
17257ea4cd6bSMika Westerberg 			port->xdomain = NULL;
1726180b0689SMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
17277ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1728284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
17298afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
17308afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
173130a4eca6SMika Westerberg 		} else if (!port->port) {
173230a4eca6SMika Westerberg 			tb_sw_dbg(sw, "xHCI disconnect request\n");
173330a4eca6SMika Westerberg 			tb_switch_xhci_disconnect(sw);
1734053596d9SAndreas Noever 		} else {
173562efe699SMika Westerberg 			tb_port_dbg(port,
1736053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1737053596d9SAndreas Noever 		}
1738053596d9SAndreas Noever 	} else if (port->remote) {
173962efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
174030a4eca6SMika Westerberg 	} else if (!port->port && sw->authorized) {
174130a4eca6SMika Westerberg 		tb_sw_dbg(sw, "xHCI connect request\n");
174230a4eca6SMika Westerberg 		tb_switch_xhci_connect(sw);
1743053596d9SAndreas Noever 	} else {
1744344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
174562efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1746053596d9SAndreas Noever 			tb_scan_port(port);
174799cabbb0SMika Westerberg 			if (!port->remote)
174862efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
17498afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
17508afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1751053596d9SAndreas Noever 		}
1752344e0643SMika Westerberg 	}
17538f965efdSMika Westerberg 
17546ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
17556ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
17566ac6faeeSMika Westerberg 
17578f965efdSMika Westerberg put_sw:
17588f965efdSMika Westerberg 	tb_switch_put(sw);
1759d6cc51cdSAndreas Noever out:
1760d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
17616ac6faeeSMika Westerberg 
17626ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
17636ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
17646ac6faeeSMika Westerberg 
1765d6cc51cdSAndreas Noever 	kfree(ev);
1766d6cc51cdSAndreas Noever }
1767d6cc51cdSAndreas Noever 
17686ce35635SMika Westerberg static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
17696ce35635SMika Westerberg 				 int *requested_down)
17706ce35635SMika Westerberg {
17716ce35635SMika Westerberg 	int allocated_up, allocated_down, available_up, available_down, ret;
17726ce35635SMika Westerberg 	int requested_up_corrected, requested_down_corrected, granularity;
17736ce35635SMika Westerberg 	int max_up, max_down, max_up_rounded, max_down_rounded;
17746ce35635SMika Westerberg 	struct tb *tb = tunnel->tb;
17756ce35635SMika Westerberg 	struct tb_port *in, *out;
17766ce35635SMika Westerberg 
17776ce35635SMika Westerberg 	ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
17786ce35635SMika Westerberg 	if (ret)
17796ce35635SMika Westerberg 		return ret;
17806ce35635SMika Westerberg 
17816ce35635SMika Westerberg 	in = tunnel->src_port;
17826ce35635SMika Westerberg 	out = tunnel->dst_port;
17836ce35635SMika Westerberg 
17846ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
17856ce35635SMika Westerberg 		    allocated_up, allocated_down);
17866ce35635SMika Westerberg 
17876ce35635SMika Westerberg 	/*
17886ce35635SMika Westerberg 	 * If we get rounded up request from graphics side, say HBR2 x 4
17896ce35635SMika Westerberg 	 * that is 17500 instead of 17280 (this is because of the
17906ce35635SMika Westerberg 	 * granularity), we allow it too. Here the graphics has already
17916ce35635SMika Westerberg 	 * negotiated with the DPRX the maximum possible rates (which is
17926ce35635SMika Westerberg 	 * 17280 in this case).
17936ce35635SMika Westerberg 	 *
17946ce35635SMika Westerberg 	 * Since the link cannot go higher than 17280 we use that in our
17956ce35635SMika Westerberg 	 * calculations but the DP IN adapter Allocated BW write must be
17966ce35635SMika Westerberg 	 * the same value (17500) otherwise the adapter will mark it as
17976ce35635SMika Westerberg 	 * failed for graphics.
17986ce35635SMika Westerberg 	 */
17996ce35635SMika Westerberg 	ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
18006ce35635SMika Westerberg 	if (ret)
18016ce35635SMika Westerberg 		return ret;
18026ce35635SMika Westerberg 
18036ce35635SMika Westerberg 	ret = usb4_dp_port_granularity(in);
18046ce35635SMika Westerberg 	if (ret < 0)
18056ce35635SMika Westerberg 		return ret;
18066ce35635SMika Westerberg 	granularity = ret;
18076ce35635SMika Westerberg 
18086ce35635SMika Westerberg 	max_up_rounded = roundup(max_up, granularity);
18096ce35635SMika Westerberg 	max_down_rounded = roundup(max_down, granularity);
18106ce35635SMika Westerberg 
18116ce35635SMika Westerberg 	/*
18126ce35635SMika Westerberg 	 * This will "fix" the request down to the maximum supported
18136ce35635SMika Westerberg 	 * rate * lanes if it is at the maximum rounded up level.
18146ce35635SMika Westerberg 	 */
18156ce35635SMika Westerberg 	requested_up_corrected = *requested_up;
18166ce35635SMika Westerberg 	if (requested_up_corrected == max_up_rounded)
18176ce35635SMika Westerberg 		requested_up_corrected = max_up;
18186ce35635SMika Westerberg 	else if (requested_up_corrected < 0)
18196ce35635SMika Westerberg 		requested_up_corrected = 0;
18206ce35635SMika Westerberg 	requested_down_corrected = *requested_down;
18216ce35635SMika Westerberg 	if (requested_down_corrected == max_down_rounded)
18226ce35635SMika Westerberg 		requested_down_corrected = max_down;
18236ce35635SMika Westerberg 	else if (requested_down_corrected < 0)
18246ce35635SMika Westerberg 		requested_down_corrected = 0;
18256ce35635SMika Westerberg 
18266ce35635SMika Westerberg 	tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
18276ce35635SMika Westerberg 		    requested_up_corrected, requested_down_corrected);
18286ce35635SMika Westerberg 
18296ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
18306ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
18316ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
18326ce35635SMika Westerberg 			    requested_up_corrected, requested_down_corrected,
18336ce35635SMika Westerberg 			    max_up_rounded, max_down_rounded);
18346ce35635SMika Westerberg 		return -ENOBUFS;
18356ce35635SMika Westerberg 	}
18366ce35635SMika Westerberg 
18376ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
18386ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
18396ce35635SMika Westerberg 		/*
18406ce35635SMika Westerberg 		 * If requested bandwidth is less or equal than what is
18416ce35635SMika Westerberg 		 * currently allocated to that tunnel we simply change
18426ce35635SMika Westerberg 		 * the reservation of the tunnel. Since all the tunnels
18436ce35635SMika Westerberg 		 * going out from the same USB4 port are in the same
18446ce35635SMika Westerberg 		 * group the released bandwidth will be taken into
18456ce35635SMika Westerberg 		 * account for the other tunnels automatically below.
18466ce35635SMika Westerberg 		 */
18476ce35635SMika Westerberg 		return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
18486ce35635SMika Westerberg 						 requested_down);
18496ce35635SMika Westerberg 	}
18506ce35635SMika Westerberg 
18516ce35635SMika Westerberg 	/*
18526ce35635SMika Westerberg 	 * More bandwidth is requested. Release all the potential
18536ce35635SMika Westerberg 	 * bandwidth from USB3 first.
18546ce35635SMika Westerberg 	 */
18556ce35635SMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
18566ce35635SMika Westerberg 	if (ret)
18576ce35635SMika Westerberg 		return ret;
18586ce35635SMika Westerberg 
18596ce35635SMika Westerberg 	/*
18606ce35635SMika Westerberg 	 * Then go over all tunnels that cross the same USB4 ports (they
18616ce35635SMika Westerberg 	 * are also in the same group but we use the same function here
18626ce35635SMika Westerberg 	 * that we use with the normal bandwidth allocation).
18636ce35635SMika Westerberg 	 */
18646ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
18656ce35635SMika Westerberg 	if (ret)
18666ce35635SMika Westerberg 		goto reclaim;
18676ce35635SMika Westerberg 
18686ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
18696ce35635SMika Westerberg 		    available_up, available_down);
18706ce35635SMika Westerberg 
18716ce35635SMika Westerberg 	if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
18726ce35635SMika Westerberg 	    (*requested_down >= 0 && available_down >= requested_down_corrected)) {
18736ce35635SMika Westerberg 		ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
18746ce35635SMika Westerberg 						requested_down);
18756ce35635SMika Westerberg 	} else {
18766ce35635SMika Westerberg 		ret = -ENOBUFS;
18776ce35635SMika Westerberg 	}
18786ce35635SMika Westerberg 
18796ce35635SMika Westerberg reclaim:
18806ce35635SMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
18816ce35635SMika Westerberg 	return ret;
18826ce35635SMika Westerberg }
18836ce35635SMika Westerberg 
18846ce35635SMika Westerberg static void tb_handle_dp_bandwidth_request(struct work_struct *work)
18856ce35635SMika Westerberg {
18866ce35635SMika Westerberg 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
18876ce35635SMika Westerberg 	int requested_bw, requested_up, requested_down, ret;
18886ce35635SMika Westerberg 	struct tb_port *in, *out;
18896ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
18906ce35635SMika Westerberg 	struct tb *tb = ev->tb;
18916ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
18926ce35635SMika Westerberg 	struct tb_switch *sw;
18936ce35635SMika Westerberg 
18946ce35635SMika Westerberg 	pm_runtime_get_sync(&tb->dev);
18956ce35635SMika Westerberg 
18966ce35635SMika Westerberg 	mutex_lock(&tb->lock);
18976ce35635SMika Westerberg 	if (!tcm->hotplug_active)
18986ce35635SMika Westerberg 		goto unlock;
18996ce35635SMika Westerberg 
19006ce35635SMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
19016ce35635SMika Westerberg 	if (!sw) {
19026ce35635SMika Westerberg 		tb_warn(tb, "bandwidth request from non-existent router %llx\n",
19036ce35635SMika Westerberg 			ev->route);
19046ce35635SMika Westerberg 		goto unlock;
19056ce35635SMika Westerberg 	}
19066ce35635SMika Westerberg 
19076ce35635SMika Westerberg 	in = &sw->ports[ev->port];
19086ce35635SMika Westerberg 	if (!tb_port_is_dpin(in)) {
19096ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
19106ce35635SMika Westerberg 		goto unlock;
19116ce35635SMika Westerberg 	}
19126ce35635SMika Westerberg 
19136ce35635SMika Westerberg 	tb_port_dbg(in, "handling bandwidth allocation request\n");
19146ce35635SMika Westerberg 
1915*8d73f6b8SMika Westerberg 	if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
19166ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth allocation mode not enabled\n");
19176ce35635SMika Westerberg 		goto unlock;
19186ce35635SMika Westerberg 	}
19196ce35635SMika Westerberg 
1920*8d73f6b8SMika Westerberg 	ret = usb4_dp_port_requested_bandwidth(in);
1921ace75e18SMika Westerberg 	if (ret < 0) {
1922ace75e18SMika Westerberg 		if (ret == -ENODATA)
19236ce35635SMika Westerberg 			tb_port_dbg(in, "no bandwidth request active\n");
1924ace75e18SMika Westerberg 		else
1925ace75e18SMika Westerberg 			tb_port_warn(in, "failed to read requested bandwidth\n");
19266ce35635SMika Westerberg 		goto unlock;
19276ce35635SMika Westerberg 	}
1928ace75e18SMika Westerberg 	requested_bw = ret;
19296ce35635SMika Westerberg 
19306ce35635SMika Westerberg 	tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
19316ce35635SMika Westerberg 
19326ce35635SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
19336ce35635SMika Westerberg 	if (!tunnel) {
19346ce35635SMika Westerberg 		tb_port_warn(in, "failed to find tunnel\n");
19356ce35635SMika Westerberg 		goto unlock;
19366ce35635SMika Westerberg 	}
19376ce35635SMika Westerberg 
19386ce35635SMika Westerberg 	out = tunnel->dst_port;
19396ce35635SMika Westerberg 
19406ce35635SMika Westerberg 	if (in->sw->config.depth < out->sw->config.depth) {
19416ce35635SMika Westerberg 		requested_up = -1;
19426ce35635SMika Westerberg 		requested_down = requested_bw;
19436ce35635SMika Westerberg 	} else {
19446ce35635SMika Westerberg 		requested_up = requested_bw;
19456ce35635SMika Westerberg 		requested_down = -1;
19466ce35635SMika Westerberg 	}
19476ce35635SMika Westerberg 
19486ce35635SMika Westerberg 	ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
19496ce35635SMika Westerberg 	if (ret) {
19506ce35635SMika Westerberg 		if (ret == -ENOBUFS)
19516ce35635SMika Westerberg 			tb_port_warn(in, "not enough bandwidth available\n");
19526ce35635SMika Westerberg 		else
19536ce35635SMika Westerberg 			tb_port_warn(in, "failed to change bandwidth allocation\n");
19546ce35635SMika Westerberg 	} else {
19556ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
19566ce35635SMika Westerberg 			    requested_up, requested_down);
19576ce35635SMika Westerberg 
19586ce35635SMika Westerberg 		/* Update other clients about the allocation change */
19596ce35635SMika Westerberg 		tb_recalc_estimated_bandwidth(tb);
19606ce35635SMika Westerberg 	}
19616ce35635SMika Westerberg 
19626ce35635SMika Westerberg unlock:
19636ce35635SMika Westerberg 	mutex_unlock(&tb->lock);
19646ce35635SMika Westerberg 
19656ce35635SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
19666ce35635SMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
19676ce35635SMika Westerberg }
19686ce35635SMika Westerberg 
19696ce35635SMika Westerberg static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
19706ce35635SMika Westerberg {
19716ce35635SMika Westerberg 	struct tb_hotplug_event *ev;
19726ce35635SMika Westerberg 
19736ce35635SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
19746ce35635SMika Westerberg 	if (!ev)
19756ce35635SMika Westerberg 		return;
19766ce35635SMika Westerberg 
19776ce35635SMika Westerberg 	ev->tb = tb;
19786ce35635SMika Westerberg 	ev->route = route;
19796ce35635SMika Westerberg 	ev->port = port;
19806ce35635SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
19816ce35635SMika Westerberg 	queue_work(tb->wq, &ev->work);
19826ce35635SMika Westerberg }
19836ce35635SMika Westerberg 
19846ce35635SMika Westerberg static void tb_handle_notification(struct tb *tb, u64 route,
19856ce35635SMika Westerberg 				   const struct cfg_error_pkg *error)
19866ce35635SMika Westerberg {
19876ce35635SMika Westerberg 
19886ce35635SMika Westerberg 	switch (error->error) {
1989235d0194SMika Westerberg 	case TB_CFG_ERROR_PCIE_WAKE:
1990235d0194SMika Westerberg 	case TB_CFG_ERROR_DP_CON_CHANGE:
1991235d0194SMika Westerberg 	case TB_CFG_ERROR_DPTX_DISCOVERY:
1992235d0194SMika Westerberg 		if (tb_cfg_ack_notification(tb->ctl, route, error))
1993235d0194SMika Westerberg 			tb_warn(tb, "could not ack notification on %llx\n",
1994235d0194SMika Westerberg 				route);
1995235d0194SMika Westerberg 		break;
1996235d0194SMika Westerberg 
19976ce35635SMika Westerberg 	case TB_CFG_ERROR_DP_BW:
1998235d0194SMika Westerberg 		if (tb_cfg_ack_notification(tb->ctl, route, error))
1999235d0194SMika Westerberg 			tb_warn(tb, "could not ack notification on %llx\n",
2000235d0194SMika Westerberg 				route);
20016ce35635SMika Westerberg 		tb_queue_dp_bandwidth_request(tb, route, error->port);
20026ce35635SMika Westerberg 		break;
20036ce35635SMika Westerberg 
20046ce35635SMika Westerberg 	default:
2005235d0194SMika Westerberg 		/* Ignore for now */
2006235d0194SMika Westerberg 		break;
20076ce35635SMika Westerberg 	}
20086ce35635SMika Westerberg }
20096ce35635SMika Westerberg 
2010877e50b3SLee Jones /*
2011d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
2012d6cc51cdSAndreas Noever  *
2013d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
2014d6cc51cdSAndreas Noever  */
201581a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
201681a54b5eSMika Westerberg 			    const void *buf, size_t size)
2017d6cc51cdSAndreas Noever {
201881a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
20196ce35635SMika Westerberg 	u64 route = tb_cfg_get_route(&pkg->header);
202081a54b5eSMika Westerberg 
20216ce35635SMika Westerberg 	switch (type) {
20226ce35635SMika Westerberg 	case TB_CFG_PKG_ERROR:
20236ce35635SMika Westerberg 		tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
20246ce35635SMika Westerberg 		return;
20256ce35635SMika Westerberg 	case TB_CFG_PKG_EVENT:
20266ce35635SMika Westerberg 		break;
20276ce35635SMika Westerberg 	default:
202881a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
202981a54b5eSMika Westerberg 		return;
203081a54b5eSMika Westerberg 	}
203181a54b5eSMika Westerberg 
2032210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
203381a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
203481a54b5eSMika Westerberg 			pkg->port);
203581a54b5eSMika Westerberg 	}
203681a54b5eSMika Westerberg 
20374f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2038d6cc51cdSAndreas Noever }
2039d6cc51cdSAndreas Noever 
20409d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
2041d6cc51cdSAndreas Noever {
20429d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
204393f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
204493f36adeSMika Westerberg 	struct tb_tunnel *n;
20453364f0c1SAndreas Noever 
20466ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
20473364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
20487ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
20497ea4cd6bSMika Westerberg 		/*
20507ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
20517ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
20527ea4cd6bSMika Westerberg 		 * intact.
20537ea4cd6bSMika Westerberg 		 */
20547ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
20557ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
205693f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
20577ea4cd6bSMika Westerberg 	}
2058bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
20599d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2060d6cc51cdSAndreas Noever }
2061d6cc51cdSAndreas Noever 
206299cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
206399cabbb0SMika Westerberg {
206499cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
206599cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
206699cabbb0SMika Westerberg 
206799cabbb0SMika Westerberg 		/*
206899cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
206999cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
207099cabbb0SMika Westerberg 		 * send uevent to userspace.
207199cabbb0SMika Westerberg 		 */
207299cabbb0SMika Westerberg 		if (sw->boot)
207399cabbb0SMika Westerberg 			sw->authorized = 1;
207499cabbb0SMika Westerberg 
207599cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
207699cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
207799cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
207899cabbb0SMika Westerberg 	}
207999cabbb0SMika Westerberg 
208099cabbb0SMika Westerberg 	return 0;
208199cabbb0SMika Westerberg }
208299cabbb0SMika Westerberg 
20839d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
2084d6cc51cdSAndreas Noever {
20859d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2086bfe778acSMika Westerberg 	int ret;
2087d6cc51cdSAndreas Noever 
2088bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2089444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
2090444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
2091a25c8b2fSAndreas Noever 
2092e6b245ccSMika Westerberg 	/*
2093e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
2094e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
2095e6b245ccSMika Westerberg 	 * root switch.
20965172eb9aSSzuying Chen 	 *
20975172eb9aSSzuying Chen 	 * However, USB4 routers support NVM firmware upgrade if they
20985172eb9aSSzuying Chen 	 * implement the necessary router operations.
2099e6b245ccSMika Westerberg 	 */
21005172eb9aSSzuying Chen 	tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
21016ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
21026ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2103e6b245ccSMika Westerberg 
2104bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
2105bfe778acSMika Westerberg 	if (ret) {
2106bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
2107bfe778acSMika Westerberg 		return ret;
2108bfe778acSMika Westerberg 	}
2109bfe778acSMika Westerberg 
2110bfe778acSMika Westerberg 	/* Announce the switch to the world */
2111bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
2112bfe778acSMika Westerberg 	if (ret) {
2113bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
2114bfe778acSMika Westerberg 		return ret;
2115bfe778acSMika Westerberg 	}
2116bfe778acSMika Westerberg 
2117b017a46dSGil Fine 	/*
2118b017a46dSGil Fine 	 * To support highest CLx state, we set host router's TMU to
2119b017a46dSGil Fine 	 * Normal mode.
2120b017a46dSGil Fine 	 */
2121d49b4f04SMika Westerberg 	tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2122cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
2123cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
21249da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
21259da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
21260414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
212743bddb26SMika Westerberg 	tb_discover_tunnels(tb);
2128b60e31bfSSanjay R Mehta 	/* Add DP resources from the DP tunnels created by the boot firmware */
2129b60e31bfSSanjay R Mehta 	tb_discover_dp_resources(tb);
2130e6f81858SRajmohan Mani 	/*
2131e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
2132e6f81858SRajmohan Mani 	 * now for the whole topology.
2133e6f81858SRajmohan Mani 	 */
2134e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
21358afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
21368afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
213799cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
213899cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
213999cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
21409da672a4SAndreas Noever 
2141d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
21429d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
21439d3cce0bSMika Westerberg 	return 0;
2144d6cc51cdSAndreas Noever }
2145d6cc51cdSAndreas Noever 
21469d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
214723dd5bb4SAndreas Noever {
21489d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
21499d3cce0bSMika Westerberg 
2150daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
215181a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
21526ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
21539d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2154daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
21559d3cce0bSMika Westerberg 
21569d3cce0bSMika Westerberg 	return 0;
215723dd5bb4SAndreas Noever }
215823dd5bb4SAndreas Noever 
215991c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
216091c0c120SMika Westerberg {
216191c0c120SMika Westerberg 	struct tb_port *port;
216291c0c120SMika Westerberg 
21636ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
21646ac6faeeSMika Westerberg 	if (sw->is_unplugged)
21656ac6faeeSMika Westerberg 		return;
21666ac6faeeSMika Westerberg 
21671a9b6cb8SMika Westerberg 	if (tb_enable_clx(sw))
21681a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to re-enable CL states\n");
2169b017a46dSGil Fine 
2170cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
2171cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
2172cf29b9afSRajmohan Mani 
2173d49b4f04SMika Westerberg 	tb_switch_configuration_valid(sw);
2174d49b4f04SMika Westerberg 
217591c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
2176284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
217791c0c120SMika Westerberg 			continue;
217891c0c120SMika Westerberg 
2179284652a4SMika Westerberg 		if (port->remote) {
21802ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
2181de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
218291c0c120SMika Westerberg 
218391c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
2184284652a4SMika Westerberg 		} else if (port->xdomain) {
2185f9cad07bSMika Westerberg 			tb_port_configure_xdomain(port, port->xdomain);
2186284652a4SMika Westerberg 		}
218791c0c120SMika Westerberg 	}
218891c0c120SMika Westerberg }
218991c0c120SMika Westerberg 
21909d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
219123dd5bb4SAndreas Noever {
21929d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
219393f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
219443bddb26SMika Westerberg 	unsigned int usb3_delay = 0;
219543bddb26SMika Westerberg 	LIST_HEAD(tunnels);
21969d3cce0bSMika Westerberg 
2197daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
219823dd5bb4SAndreas Noever 
219923dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
2200356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
220123dd5bb4SAndreas Noever 
220223dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
220323dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
220423dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
220591c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
220643bddb26SMika Westerberg 
220743bddb26SMika Westerberg 	/*
220843bddb26SMika Westerberg 	 * If we get here from suspend to disk the boot firmware or the
220943bddb26SMika Westerberg 	 * restore kernel might have created tunnels of its own. Since
221043bddb26SMika Westerberg 	 * we cannot be sure they are usable for us we find and tear
221143bddb26SMika Westerberg 	 * them down.
221243bddb26SMika Westerberg 	 */
221343bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
221443bddb26SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
221543bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel))
221643bddb26SMika Westerberg 			usb3_delay = 500;
221743bddb26SMika Westerberg 		tb_tunnel_deactivate(tunnel);
221843bddb26SMika Westerberg 		tb_tunnel_free(tunnel);
221943bddb26SMika Westerberg 	}
222043bddb26SMika Westerberg 
222143bddb26SMika Westerberg 	/* Re-create our tunnels now */
222243bddb26SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
222343bddb26SMika Westerberg 		/* USB3 requires delay before it can be re-activated */
222443bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel)) {
222543bddb26SMika Westerberg 			msleep(usb3_delay);
222643bddb26SMika Westerberg 			/* Only need to do it once */
222743bddb26SMika Westerberg 			usb3_delay = 0;
222843bddb26SMika Westerberg 		}
222993f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
223043bddb26SMika Westerberg 	}
22319d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
223223dd5bb4SAndreas Noever 		/*
223323dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
223423dd5bb4SAndreas Noever 		 * 100ms works for me...
223523dd5bb4SAndreas Noever 		 */
2236daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
223723dd5bb4SAndreas Noever 		msleep(100);
223823dd5bb4SAndreas Noever 	}
223923dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
22409d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
2241daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
22429d3cce0bSMika Westerberg 
22439d3cce0bSMika Westerberg 	return 0;
22449d3cce0bSMika Westerberg }
22459d3cce0bSMika Westerberg 
22467ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
22477ea4cd6bSMika Westerberg {
2248b433d010SMika Westerberg 	struct tb_port *port;
2249b433d010SMika Westerberg 	int ret = 0;
22507ea4cd6bSMika Westerberg 
2251b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
22527ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
22537ea4cd6bSMika Westerberg 			continue;
22547ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
2255dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
22567ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
2257284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
22587ea4cd6bSMika Westerberg 			port->xdomain = NULL;
22597ea4cd6bSMika Westerberg 			ret++;
22607ea4cd6bSMika Westerberg 		} else if (port->remote) {
22617ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
22627ea4cd6bSMika Westerberg 		}
22637ea4cd6bSMika Westerberg 	}
22647ea4cd6bSMika Westerberg 
22657ea4cd6bSMika Westerberg 	return ret;
22667ea4cd6bSMika Westerberg }
22677ea4cd6bSMika Westerberg 
2268884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
2269884e4d57SMika Westerberg {
2270884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2271884e4d57SMika Westerberg 
2272884e4d57SMika Westerberg 	tcm->hotplug_active = false;
2273884e4d57SMika Westerberg 	return 0;
2274884e4d57SMika Westerberg }
2275884e4d57SMika Westerberg 
2276884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
2277884e4d57SMika Westerberg {
2278884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2279884e4d57SMika Westerberg 
2280884e4d57SMika Westerberg 	tcm->hotplug_active = true;
2281884e4d57SMika Westerberg 	return 0;
2282884e4d57SMika Westerberg }
2283884e4d57SMika Westerberg 
22847ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
22857ea4cd6bSMika Westerberg {
22867ea4cd6bSMika Westerberg 	/*
22877ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
22887ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
22897ea4cd6bSMika Westerberg 	 * need to run another rescan.
22907ea4cd6bSMika Westerberg 	 */
22917ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
22927ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
22937ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
22947ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
22957ea4cd6bSMika Westerberg }
22967ea4cd6bSMika Westerberg 
22976ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
22986ac6faeeSMika Westerberg {
22996ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
23006ac6faeeSMika Westerberg 
23016ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
23026ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
23036ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
23046ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
23056ac6faeeSMika Westerberg 
23066ac6faeeSMika Westerberg 	return 0;
23076ac6faeeSMika Westerberg }
23086ac6faeeSMika Westerberg 
23096ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
23106ac6faeeSMika Westerberg {
23116ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
23126ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
23136ac6faeeSMika Westerberg 
23146ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
23156ac6faeeSMika Westerberg 	if (tb->root_switch) {
23166ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
23176ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
23186ac6faeeSMika Westerberg 	}
23196ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
23206ac6faeeSMika Westerberg }
23216ac6faeeSMika Westerberg 
23226ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
23236ac6faeeSMika Westerberg {
23246ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
23256ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
23266ac6faeeSMika Westerberg 
23276ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
23286ac6faeeSMika Westerberg 	tb_switch_resume(tb->root_switch);
23296ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
23306ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
23316ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
23326ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
23336ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
23346ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
23356ac6faeeSMika Westerberg 
23366ac6faeeSMika Westerberg 	/*
23376ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
23386ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
23396ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
23406ac6faeeSMika Westerberg 	 */
23416ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
23426ac6faeeSMika Westerberg 	return 0;
23436ac6faeeSMika Westerberg }
23446ac6faeeSMika Westerberg 
23459d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
23469d3cce0bSMika Westerberg 	.start = tb_start,
23479d3cce0bSMika Westerberg 	.stop = tb_stop,
23489d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
23499d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
2350884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
2351884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
23527ea4cd6bSMika Westerberg 	.complete = tb_complete,
23536ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
23546ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
235581a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
23563da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
235799cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
23587ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
23597ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
23609d3cce0bSMika Westerberg };
23619d3cce0bSMika Westerberg 
2362349bfe08SMika Westerberg /*
2363349bfe08SMika Westerberg  * During suspend the Thunderbolt controller is reset and all PCIe
2364349bfe08SMika Westerberg  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2365349bfe08SMika Westerberg  * during resume. This adds device links between the tunneled PCIe
2366349bfe08SMika Westerberg  * downstream ports and the NHI so that the device core will make sure
2367349bfe08SMika Westerberg  * NHI is resumed first before the rest.
2368349bfe08SMika Westerberg  */
2369349bfe08SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi)
2370349bfe08SMika Westerberg {
2371349bfe08SMika Westerberg 	struct pci_dev *upstream, *pdev;
2372349bfe08SMika Westerberg 
2373349bfe08SMika Westerberg 	if (!x86_apple_machine)
2374349bfe08SMika Westerberg 		return;
2375349bfe08SMika Westerberg 
2376349bfe08SMika Westerberg 	switch (nhi->pdev->device) {
2377349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2378349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2379349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2380349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2381349bfe08SMika Westerberg 		break;
2382349bfe08SMika Westerberg 	default:
2383349bfe08SMika Westerberg 		return;
2384349bfe08SMika Westerberg 	}
2385349bfe08SMika Westerberg 
2386349bfe08SMika Westerberg 	upstream = pci_upstream_bridge(nhi->pdev);
2387349bfe08SMika Westerberg 	while (upstream) {
2388349bfe08SMika Westerberg 		if (!pci_is_pcie(upstream))
2389349bfe08SMika Westerberg 			return;
2390349bfe08SMika Westerberg 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2391349bfe08SMika Westerberg 			break;
2392349bfe08SMika Westerberg 		upstream = pci_upstream_bridge(upstream);
2393349bfe08SMika Westerberg 	}
2394349bfe08SMika Westerberg 
2395349bfe08SMika Westerberg 	if (!upstream)
2396349bfe08SMika Westerberg 		return;
2397349bfe08SMika Westerberg 
2398349bfe08SMika Westerberg 	/*
2399349bfe08SMika Westerberg 	 * For each hotplug downstream port, create add device link
2400349bfe08SMika Westerberg 	 * back to NHI so that PCIe tunnels can be re-established after
2401349bfe08SMika Westerberg 	 * sleep.
2402349bfe08SMika Westerberg 	 */
2403349bfe08SMika Westerberg 	for_each_pci_bridge(pdev, upstream->subordinate) {
2404349bfe08SMika Westerberg 		const struct device_link *link;
2405349bfe08SMika Westerberg 
2406349bfe08SMika Westerberg 		if (!pci_is_pcie(pdev))
2407349bfe08SMika Westerberg 			continue;
2408349bfe08SMika Westerberg 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2409349bfe08SMika Westerberg 		    !pdev->is_hotplug_bridge)
2410349bfe08SMika Westerberg 			continue;
2411349bfe08SMika Westerberg 
2412349bfe08SMika Westerberg 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2413349bfe08SMika Westerberg 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
2414349bfe08SMika Westerberg 				       DL_FLAG_PM_RUNTIME);
2415349bfe08SMika Westerberg 		if (link) {
2416349bfe08SMika Westerberg 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2417349bfe08SMika Westerberg 				dev_name(&pdev->dev));
2418349bfe08SMika Westerberg 		} else {
2419349bfe08SMika Westerberg 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2420349bfe08SMika Westerberg 				 dev_name(&pdev->dev));
2421349bfe08SMika Westerberg 		}
2422349bfe08SMika Westerberg 	}
2423349bfe08SMika Westerberg }
2424349bfe08SMika Westerberg 
24259d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
24269d3cce0bSMika Westerberg {
24279d3cce0bSMika Westerberg 	struct tb_cm *tcm;
24289d3cce0bSMika Westerberg 	struct tb *tb;
24299d3cce0bSMika Westerberg 
24307f0a34d7SMika Westerberg 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
24319d3cce0bSMika Westerberg 	if (!tb)
24329d3cce0bSMika Westerberg 		return NULL;
24339d3cce0bSMika Westerberg 
2434c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
243599cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
2436c6da62a2SMika Westerberg 	else
2437c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
2438c6da62a2SMika Westerberg 
24399d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
24409d3cce0bSMika Westerberg 
24419d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
24429d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
24438afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
24446ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
24456ce35635SMika Westerberg 	tb_init_bandwidth_groups(tcm);
24469d3cce0bSMika Westerberg 
2447e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
2448e0258805SMika Westerberg 
2449349bfe08SMika Westerberg 	tb_apple_add_links(nhi);
2450349bfe08SMika Westerberg 	tb_acpi_add_links(nhi);
2451349bfe08SMika Westerberg 
24529d3cce0bSMika Westerberg 	return tb;
245323dd5bb4SAndreas Noever }
2454