xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 9650de73)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13349bfe08SMika Westerberg #include <linux/platform_data/x86/apple.h>
14d6cc51cdSAndreas Noever 
15d6cc51cdSAndreas Noever #include "tb.h"
167adf6097SAndreas Noever #include "tb_regs.h"
171752b9f7SMika Westerberg #include "tunnel.h"
18d6cc51cdSAndreas Noever 
197f0a34d7SMika Westerberg #define TB_TIMEOUT	100	/* ms */
206ce35635SMika Westerberg #define MAX_GROUPS	7	/* max Group_ID is 7 */
217f0a34d7SMika Westerberg 
229d3cce0bSMika Westerberg /**
239d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
249d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
258afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
269d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
279d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
289d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
299d3cce0bSMika Westerberg  *		    after cfg has been paused.
306ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
316ac6faeeSMika Westerberg  *		 runtime resume
326ce35635SMika Westerberg  * @groups: Bandwidth groups used in this domain.
339d3cce0bSMika Westerberg  */
349d3cce0bSMika Westerberg struct tb_cm {
359d3cce0bSMika Westerberg 	struct list_head tunnel_list;
368afe909bSMika Westerberg 	struct list_head dp_resources;
379d3cce0bSMika Westerberg 	bool hotplug_active;
386ac6faeeSMika Westerberg 	struct delayed_work remove_work;
396ce35635SMika Westerberg 	struct tb_bandwidth_group groups[MAX_GROUPS];
409d3cce0bSMika Westerberg };
419da672a4SAndreas Noever 
426ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
436ac6faeeSMika Westerberg {
446ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
456ac6faeeSMika Westerberg }
466ac6faeeSMika Westerberg 
474f807e47SMika Westerberg struct tb_hotplug_event {
484f807e47SMika Westerberg 	struct work_struct work;
494f807e47SMika Westerberg 	struct tb *tb;
504f807e47SMika Westerberg 	u64 route;
514f807e47SMika Westerberg 	u8 port;
524f807e47SMika Westerberg 	bool unplug;
534f807e47SMika Westerberg };
544f807e47SMika Westerberg 
556ce35635SMika Westerberg static void tb_init_bandwidth_groups(struct tb_cm *tcm)
566ce35635SMika Westerberg {
576ce35635SMika Westerberg 	int i;
586ce35635SMika Westerberg 
596ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
606ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
616ce35635SMika Westerberg 
626ce35635SMika Westerberg 		group->tb = tcm_to_tb(tcm);
636ce35635SMika Westerberg 		group->index = i + 1;
646ce35635SMika Westerberg 		INIT_LIST_HEAD(&group->ports);
656ce35635SMika Westerberg 	}
666ce35635SMika Westerberg }
676ce35635SMika Westerberg 
686ce35635SMika Westerberg static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
696ce35635SMika Westerberg 					   struct tb_port *in)
706ce35635SMika Westerberg {
716ce35635SMika Westerberg 	if (!group || WARN_ON(in->group))
726ce35635SMika Westerberg 		return;
736ce35635SMika Westerberg 
746ce35635SMika Westerberg 	in->group = group;
756ce35635SMika Westerberg 	list_add_tail(&in->group_list, &group->ports);
766ce35635SMika Westerberg 
776ce35635SMika Westerberg 	tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
786ce35635SMika Westerberg }
796ce35635SMika Westerberg 
806ce35635SMika Westerberg static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
816ce35635SMika Westerberg {
826ce35635SMika Westerberg 	int i;
836ce35635SMika Westerberg 
846ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
856ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
866ce35635SMika Westerberg 
876ce35635SMika Westerberg 		if (list_empty(&group->ports))
886ce35635SMika Westerberg 			return group;
896ce35635SMika Westerberg 	}
906ce35635SMika Westerberg 
916ce35635SMika Westerberg 	return NULL;
926ce35635SMika Westerberg }
936ce35635SMika Westerberg 
946ce35635SMika Westerberg static struct tb_bandwidth_group *
956ce35635SMika Westerberg tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
966ce35635SMika Westerberg 			  struct tb_port *out)
976ce35635SMika Westerberg {
986ce35635SMika Westerberg 	struct tb_bandwidth_group *group;
996ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
1006ce35635SMika Westerberg 
1016ce35635SMika Westerberg 	/*
1026ce35635SMika Westerberg 	 * Find all DP tunnels that go through all the same USB4 links
1036ce35635SMika Westerberg 	 * as this one. Because we always setup tunnels the same way we
1046ce35635SMika Westerberg 	 * can just check for the routers at both ends of the tunnels
1056ce35635SMika Westerberg 	 * and if they are the same we have a match.
1066ce35635SMika Westerberg 	 */
1076ce35635SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1086ce35635SMika Westerberg 		if (!tb_tunnel_is_dp(tunnel))
1096ce35635SMika Westerberg 			continue;
1106ce35635SMika Westerberg 
1116ce35635SMika Westerberg 		if (tunnel->src_port->sw == in->sw &&
1126ce35635SMika Westerberg 		    tunnel->dst_port->sw == out->sw) {
1136ce35635SMika Westerberg 			group = tunnel->src_port->group;
1146ce35635SMika Westerberg 			if (group) {
1156ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(group, in);
1166ce35635SMika Westerberg 				return group;
1176ce35635SMika Westerberg 			}
1186ce35635SMika Westerberg 		}
1196ce35635SMika Westerberg 	}
1206ce35635SMika Westerberg 
1216ce35635SMika Westerberg 	/* Pick up next available group then */
1226ce35635SMika Westerberg 	group = tb_find_free_bandwidth_group(tcm);
1236ce35635SMika Westerberg 	if (group)
1246ce35635SMika Westerberg 		tb_bandwidth_group_attach_port(group, in);
1256ce35635SMika Westerberg 	else
1266ce35635SMika Westerberg 		tb_port_warn(in, "no available bandwidth groups\n");
1276ce35635SMika Westerberg 
1286ce35635SMika Westerberg 	return group;
1296ce35635SMika Westerberg }
1306ce35635SMika Westerberg 
1316ce35635SMika Westerberg static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1326ce35635SMika Westerberg 					struct tb_port *out)
1336ce35635SMika Westerberg {
1346ce35635SMika Westerberg 	if (usb4_dp_port_bw_mode_enabled(in)) {
1356ce35635SMika Westerberg 		int index, i;
1366ce35635SMika Westerberg 
1376ce35635SMika Westerberg 		index = usb4_dp_port_group_id(in);
1386ce35635SMika Westerberg 		for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1396ce35635SMika Westerberg 			if (tcm->groups[i].index == index) {
1406ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1416ce35635SMika Westerberg 				return;
1426ce35635SMika Westerberg 			}
1436ce35635SMika Westerberg 		}
1446ce35635SMika Westerberg 	}
1456ce35635SMika Westerberg 
1466ce35635SMika Westerberg 	tb_attach_bandwidth_group(tcm, in, out);
1476ce35635SMika Westerberg }
1486ce35635SMika Westerberg 
1496ce35635SMika Westerberg static void tb_detach_bandwidth_group(struct tb_port *in)
1506ce35635SMika Westerberg {
1516ce35635SMika Westerberg 	struct tb_bandwidth_group *group = in->group;
1526ce35635SMika Westerberg 
1536ce35635SMika Westerberg 	if (group) {
1546ce35635SMika Westerberg 		in->group = NULL;
1556ce35635SMika Westerberg 		list_del_init(&in->group_list);
1566ce35635SMika Westerberg 
1576ce35635SMika Westerberg 		tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1586ce35635SMika Westerberg 	}
1596ce35635SMika Westerberg }
1606ce35635SMika Westerberg 
1614f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
1624f807e47SMika Westerberg 
1634f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
1644f807e47SMika Westerberg {
1654f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
1664f807e47SMika Westerberg 
1674f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1684f807e47SMika Westerberg 	if (!ev)
1694f807e47SMika Westerberg 		return;
1704f807e47SMika Westerberg 
1714f807e47SMika Westerberg 	ev->tb = tb;
1724f807e47SMika Westerberg 	ev->route = route;
1734f807e47SMika Westerberg 	ev->port = port;
1744f807e47SMika Westerberg 	ev->unplug = unplug;
1754f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
1764f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
1774f807e47SMika Westerberg }
1784f807e47SMika Westerberg 
1799da672a4SAndreas Noever /* enumeration & hot plug handling */
1809da672a4SAndreas Noever 
1818afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
1828afe909bSMika Westerberg {
1838afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
1848afe909bSMika Westerberg 	struct tb_port *port;
1858afe909bSMika Westerberg 
1868afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
1878afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
1888afe909bSMika Westerberg 			continue;
1898afe909bSMika Westerberg 
1908afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
1918afe909bSMika Westerberg 			continue;
1928afe909bSMika Westerberg 
1938afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
1948afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
1958afe909bSMika Westerberg 	}
1968afe909bSMika Westerberg }
1978afe909bSMika Westerberg 
1988afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
1998afe909bSMika Westerberg {
2008afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
2018afe909bSMika Westerberg 	struct tb_port *port, *tmp;
2028afe909bSMika Westerberg 
2038afe909bSMika Westerberg 	/* Clear children resources first */
2048afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
2058afe909bSMika Westerberg 		if (tb_port_has_remote(port))
2068afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
2078afe909bSMika Westerberg 	}
2088afe909bSMika Westerberg 
2098afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
2108afe909bSMika Westerberg 		if (port->sw == sw) {
2118afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
2128afe909bSMika Westerberg 			list_del_init(&port->list);
2138afe909bSMika Westerberg 		}
2148afe909bSMika Westerberg 	}
2158afe909bSMika Westerberg }
2168afe909bSMika Westerberg 
217b60e31bfSSanjay R Mehta static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218b60e31bfSSanjay R Mehta {
219b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
220b60e31bfSSanjay R Mehta 	struct tb_port *p;
221b60e31bfSSanjay R Mehta 
222b60e31bfSSanjay R Mehta 	list_for_each_entry(p, &tcm->dp_resources, list) {
223b60e31bfSSanjay R Mehta 		if (p == port)
224b60e31bfSSanjay R Mehta 			return;
225b60e31bfSSanjay R Mehta 	}
226b60e31bfSSanjay R Mehta 
227b60e31bfSSanjay R Mehta 	tb_port_dbg(port, "DP %s resource available discovered\n",
228b60e31bfSSanjay R Mehta 		    tb_port_is_dpin(port) ? "IN" : "OUT");
229b60e31bfSSanjay R Mehta 	list_add_tail(&port->list, &tcm->dp_resources);
230b60e31bfSSanjay R Mehta }
231b60e31bfSSanjay R Mehta 
232b60e31bfSSanjay R Mehta static void tb_discover_dp_resources(struct tb *tb)
233b60e31bfSSanjay R Mehta {
234b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
235b60e31bfSSanjay R Mehta 	struct tb_tunnel *tunnel;
236b60e31bfSSanjay R Mehta 
237b60e31bfSSanjay R Mehta 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238b60e31bfSSanjay R Mehta 		if (tb_tunnel_is_dp(tunnel))
239b60e31bfSSanjay R Mehta 			tb_discover_dp_resource(tb, tunnel->dst_port);
240b60e31bfSSanjay R Mehta 	}
241b60e31bfSSanjay R Mehta }
242b60e31bfSSanjay R Mehta 
2431a9b6cb8SMika Westerberg static int tb_enable_clx(struct tb_switch *sw)
2441a9b6cb8SMika Westerberg {
2451a9b6cb8SMika Westerberg 	int ret;
2461a9b6cb8SMika Westerberg 
2471a9b6cb8SMika Westerberg 	/*
248*9650de73SMika Westerberg 	 * Currently only enable CLx for the first link. This is enough
249*9650de73SMika Westerberg 	 * to allow the CPU to save energy at least on Intel hardware
250*9650de73SMika Westerberg 	 * and makes it slightly simpler to implement. We may change
251*9650de73SMika Westerberg 	 * this in the future to cover the whole topology if it turns
252*9650de73SMika Westerberg 	 * out to be beneficial.
253*9650de73SMika Westerberg 	 */
254*9650de73SMika Westerberg 	if (sw->config.depth != 1)
255*9650de73SMika Westerberg 		return 0;
256*9650de73SMika Westerberg 
257*9650de73SMika Westerberg 	/*
2581a9b6cb8SMika Westerberg 	 * CL0s and CL1 are enabled and supported together.
2591a9b6cb8SMika Westerberg 	 * Silently ignore CLx enabling in case CLx is not supported.
2601a9b6cb8SMika Westerberg 	 */
26135627353SMika Westerberg 	ret = tb_switch_clx_enable(sw, TB_CL0S | TB_CL1);
2621a9b6cb8SMika Westerberg 	return ret == -EOPNOTSUPP ? 0 : ret;
2631a9b6cb8SMika Westerberg }
2641a9b6cb8SMika Westerberg 
2657d283f41SMika Westerberg static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
2667d283f41SMika Westerberg {
2677d283f41SMika Westerberg 	struct tb_switch *sw;
2687d283f41SMika Westerberg 
2697d283f41SMika Westerberg 	sw = tb_to_switch(dev);
2707d283f41SMika Westerberg 	if (sw) {
2717d283f41SMika Westerberg 		tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
27212a14f2fSMika Westerberg 					tb_switch_clx_is_enabled(sw, TB_CL1));
2737d283f41SMika Westerberg 		if (tb_switch_tmu_enable(sw))
2747d283f41SMika Westerberg 			tb_sw_warn(sw, "failed to increase TMU rate\n");
2757d283f41SMika Westerberg 	}
2767d283f41SMika Westerberg 
2777d283f41SMika Westerberg 	return 0;
2787d283f41SMika Westerberg }
2797d283f41SMika Westerberg 
2807d283f41SMika Westerberg static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
2817d283f41SMika Westerberg {
2827d283f41SMika Westerberg 	struct tb_switch *sw;
2837d283f41SMika Westerberg 
2847d283f41SMika Westerberg 	if (!tunnel)
2857d283f41SMika Westerberg 		return;
2867d283f41SMika Westerberg 
2877d283f41SMika Westerberg 	/*
2887d283f41SMika Westerberg 	 * Once first DP tunnel is established we change the TMU
2897d283f41SMika Westerberg 	 * accuracy of first depth child routers (and the host router)
2907d283f41SMika Westerberg 	 * to the highest. This is needed for the DP tunneling to work
2917d283f41SMika Westerberg 	 * but also allows CL0s.
2927d283f41SMika Westerberg 	 */
2937d283f41SMika Westerberg 	sw = tunnel->tb->root_switch;
2947d283f41SMika Westerberg 	device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
2957d283f41SMika Westerberg }
2967d283f41SMika Westerberg 
2974e7b4955SMika Westerberg static int tb_enable_tmu(struct tb_switch *sw)
2984e7b4955SMika Westerberg {
2994e7b4955SMika Westerberg 	int ret;
3004e7b4955SMika Westerberg 
3014e7b4955SMika Westerberg 	/*
3024e7b4955SMika Westerberg 	 * If CL1 is enabled then we need to configure the TMU accuracy
3034e7b4955SMika Westerberg 	 * level to normal. Otherwise we keep the TMU running at the
3044e7b4955SMika Westerberg 	 * highest accuracy.
3054e7b4955SMika Westerberg 	 */
30612a14f2fSMika Westerberg 	if (tb_switch_clx_is_enabled(sw, TB_CL1))
307ef34add8SMika Westerberg 		ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
3084e7b4955SMika Westerberg 	else
309ef34add8SMika Westerberg 		ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
310ef34add8SMika Westerberg 	if (ret)
311ef34add8SMika Westerberg 		return ret;
3124e7b4955SMika Westerberg 
3134e7b4955SMika Westerberg 	/* If it is already enabled in correct mode, don't touch it */
3144e7b4955SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
3154e7b4955SMika Westerberg 		return 0;
3164e7b4955SMika Westerberg 
3174e7b4955SMika Westerberg 	ret = tb_switch_tmu_disable(sw);
3184e7b4955SMika Westerberg 	if (ret)
3194e7b4955SMika Westerberg 		return ret;
3204e7b4955SMika Westerberg 
3214e7b4955SMika Westerberg 	ret = tb_switch_tmu_post_time(sw);
3224e7b4955SMika Westerberg 	if (ret)
3234e7b4955SMika Westerberg 		return ret;
3244e7b4955SMika Westerberg 
3254e7b4955SMika Westerberg 	return tb_switch_tmu_enable(sw);
3264e7b4955SMika Westerberg }
3274e7b4955SMika Westerberg 
32843bddb26SMika Westerberg static void tb_switch_discover_tunnels(struct tb_switch *sw,
32943bddb26SMika Westerberg 				       struct list_head *list,
33043bddb26SMika Westerberg 				       bool alloc_hopids)
3310414bec5SMika Westerberg {
3320414bec5SMika Westerberg 	struct tb *tb = sw->tb;
3330414bec5SMika Westerberg 	struct tb_port *port;
3340414bec5SMika Westerberg 
335b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3360414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
3370414bec5SMika Westerberg 
3380414bec5SMika Westerberg 		switch (port->config.type) {
3394f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
34043bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
3417d283f41SMika Westerberg 			tb_increase_tmu_accuracy(tunnel);
3424f807e47SMika Westerberg 			break;
3434f807e47SMika Westerberg 
3440414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
34543bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
3460414bec5SMika Westerberg 			break;
3470414bec5SMika Westerberg 
348e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
34943bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
350e6f81858SRajmohan Mani 			break;
351e6f81858SRajmohan Mani 
3520414bec5SMika Westerberg 		default:
3530414bec5SMika Westerberg 			break;
3540414bec5SMika Westerberg 		}
3550414bec5SMika Westerberg 
35643bddb26SMika Westerberg 		if (tunnel)
35743bddb26SMika Westerberg 			list_add_tail(&tunnel->list, list);
35843bddb26SMika Westerberg 	}
3594f807e47SMika Westerberg 
36043bddb26SMika Westerberg 	tb_switch_for_each_port(sw, port) {
36143bddb26SMika Westerberg 		if (tb_port_has_remote(port)) {
36243bddb26SMika Westerberg 			tb_switch_discover_tunnels(port->remote->sw, list,
36343bddb26SMika Westerberg 						   alloc_hopids);
36443bddb26SMika Westerberg 		}
36543bddb26SMika Westerberg 	}
36643bddb26SMika Westerberg }
36743bddb26SMika Westerberg 
36843bddb26SMika Westerberg static void tb_discover_tunnels(struct tb *tb)
36943bddb26SMika Westerberg {
37043bddb26SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
37143bddb26SMika Westerberg 	struct tb_tunnel *tunnel;
37243bddb26SMika Westerberg 
37343bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
37443bddb26SMika Westerberg 
37543bddb26SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
3764f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
3770414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
3780414bec5SMika Westerberg 
3790414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
3800414bec5SMika Westerberg 				parent->boot = true;
3810414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
3820414bec5SMika Westerberg 			}
383c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
3846ce35635SMika Westerberg 			struct tb_port *in = tunnel->src_port;
3856ce35635SMika Westerberg 			struct tb_port *out = tunnel->dst_port;
3866ce35635SMika Westerberg 
387c94732bdSMika Westerberg 			/* Keep the domain from powering down */
3886ce35635SMika Westerberg 			pm_runtime_get_sync(&in->sw->dev);
3896ce35635SMika Westerberg 			pm_runtime_get_sync(&out->sw->dev);
3906ce35635SMika Westerberg 
3916ce35635SMika Westerberg 			tb_discover_bandwidth_group(tcm, in, out);
3924f807e47SMika Westerberg 		}
3930414bec5SMika Westerberg 	}
3940414bec5SMika Westerberg }
3959da672a4SAndreas Noever 
396f9cad07bSMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
397284652a4SMika Westerberg {
398284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
399f9cad07bSMika Westerberg 		return usb4_port_configure_xdomain(port, xd);
400284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
401284652a4SMika Westerberg }
402284652a4SMika Westerberg 
403284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
404284652a4SMika Westerberg {
405284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
406284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
407284652a4SMika Westerberg 	else
408284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
409341d4518SMika Westerberg 
410341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
411284652a4SMika Westerberg }
412284652a4SMika Westerberg 
4137ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
4147ea4cd6bSMika Westerberg {
4157ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
4167ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
4177ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
4187ea4cd6bSMika Westerberg 	u64 route;
4197ea4cd6bSMika Westerberg 
4205ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
4215ca67688SMika Westerberg 		return;
4225ca67688SMika Westerberg 
4237ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
4247ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
4257ea4cd6bSMika Westerberg 	if (xd) {
4267ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
4277ea4cd6bSMika Westerberg 		return;
4287ea4cd6bSMika Westerberg 	}
4297ea4cd6bSMika Westerberg 
4307ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
4317ea4cd6bSMika Westerberg 			      NULL);
4327ea4cd6bSMika Westerberg 	if (xd) {
4337ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
434f9cad07bSMika Westerberg 		tb_port_configure_xdomain(port, xd);
4357ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
4367ea4cd6bSMika Westerberg 	}
4377ea4cd6bSMika Westerberg }
4387ea4cd6bSMika Westerberg 
439e6f81858SRajmohan Mani /**
440e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
441e6f81858SRajmohan Mani  * @sw: Switch to find the port on
442e6f81858SRajmohan Mani  * @type: Port type to look for
443e6f81858SRajmohan Mani  */
444e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
445e6f81858SRajmohan Mani 					   enum tb_port_type type)
446e6f81858SRajmohan Mani {
447e6f81858SRajmohan Mani 	struct tb_port *port;
448e6f81858SRajmohan Mani 
449e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
450e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
451e6f81858SRajmohan Mani 			continue;
452e6f81858SRajmohan Mani 		if (port->config.type != type)
453e6f81858SRajmohan Mani 			continue;
454e6f81858SRajmohan Mani 		if (!port->cap_adap)
455e6f81858SRajmohan Mani 			continue;
456e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
457e6f81858SRajmohan Mani 			continue;
458e6f81858SRajmohan Mani 		return port;
459e6f81858SRajmohan Mani 	}
460e6f81858SRajmohan Mani 	return NULL;
461e6f81858SRajmohan Mani }
462e6f81858SRajmohan Mani 
463e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
464e6f81858SRajmohan Mani 					 const struct tb_port *port)
465e6f81858SRajmohan Mani {
466e6f81858SRajmohan Mani 	struct tb_port *down;
467e6f81858SRajmohan Mani 
468e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
46977cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
470e6f81858SRajmohan Mani 		return down;
47177cfa40fSMika Westerberg 	return NULL;
472e6f81858SRajmohan Mani }
473e6f81858SRajmohan Mani 
4740bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
4750bd680cdSMika Westerberg 					struct tb_port *src_port,
4760bd680cdSMika Westerberg 					struct tb_port *dst_port)
4770bd680cdSMika Westerberg {
4780bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
4790bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4800bd680cdSMika Westerberg 
4810bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4820bd680cdSMika Westerberg 		if (tunnel->type == type &&
4830bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
4840bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
4850bd680cdSMika Westerberg 			return tunnel;
4860bd680cdSMika Westerberg 		}
4870bd680cdSMika Westerberg 	}
4880bd680cdSMika Westerberg 
4890bd680cdSMika Westerberg 	return NULL;
4900bd680cdSMika Westerberg }
4910bd680cdSMika Westerberg 
4920bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
4930bd680cdSMika Westerberg 						   struct tb_port *src_port,
4940bd680cdSMika Westerberg 						   struct tb_port *dst_port)
4950bd680cdSMika Westerberg {
4960bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
4970bd680cdSMika Westerberg 	struct tb_switch *sw;
4980bd680cdSMika Westerberg 
4990bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
5000bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
5010bd680cdSMika Westerberg 		sw = dst_port->sw;
5020bd680cdSMika Westerberg 	else
5030bd680cdSMika Westerberg 		sw = src_port->sw;
5040bd680cdSMika Westerberg 
5050bd680cdSMika Westerberg 	/* Can't be the host router */
5060bd680cdSMika Westerberg 	if (sw == tb->root_switch)
5070bd680cdSMika Westerberg 		return NULL;
5080bd680cdSMika Westerberg 
5090bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
5100bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
5110bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
5120bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
5130bd680cdSMika Westerberg 	if (!usb3_down)
5140bd680cdSMika Westerberg 		return NULL;
5150bd680cdSMika Westerberg 
5160bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
5170bd680cdSMika Westerberg }
5180bd680cdSMika Westerberg 
5190bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
5200bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
5210bd680cdSMika Westerberg {
5220bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
5230bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5240bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5250bd680cdSMika Westerberg 	struct tb_port *port;
5260bd680cdSMika Westerberg 
5272426fdf7SMika Westerberg 	tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
5282426fdf7SMika Westerberg 	       tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
5292426fdf7SMika Westerberg 	       dst_port->port);
5300bd680cdSMika Westerberg 
5310bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
5326ce35635SMika Westerberg 	if (tunnel && tunnel->src_port != src_port &&
5336ce35635SMika Westerberg 	    tunnel->dst_port != dst_port) {
5340bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
5350bd680cdSMika Westerberg 						   &usb3_consumed_down);
5360bd680cdSMika Westerberg 		if (ret)
5370bd680cdSMika Westerberg 			return ret;
5380bd680cdSMika Westerberg 	} else {
5390bd680cdSMika Westerberg 		usb3_consumed_up = 0;
5400bd680cdSMika Westerberg 		usb3_consumed_down = 0;
5410bd680cdSMika Westerberg 	}
5420bd680cdSMika Westerberg 
5430bd680cdSMika Westerberg 	*available_up = *available_down = 40000;
5440bd680cdSMika Westerberg 
5450bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
5460bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
5470bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
5480bd680cdSMika Westerberg 
5490bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
5500bd680cdSMika Westerberg 			continue;
5510bd680cdSMika Westerberg 
5520bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
5530bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
5540bd680cdSMika Westerberg 		} else {
5550bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
5560bd680cdSMika Westerberg 			if (link_speed < 0)
5570bd680cdSMika Westerberg 				return link_speed;
5580bd680cdSMika Westerberg 		}
5590bd680cdSMika Westerberg 
5600bd680cdSMika Westerberg 		link_width = port->bonded ? 2 : 1;
5610bd680cdSMika Westerberg 
5620bd680cdSMika Westerberg 		up_bw = link_speed * link_width * 1000; /* Mb/s */
5630bd680cdSMika Westerberg 		/* Leave 10% guard band */
5640bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
5650bd680cdSMika Westerberg 		down_bw = up_bw;
5660bd680cdSMika Westerberg 
5672426fdf7SMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
5682426fdf7SMika Westerberg 			    down_bw);
5690bd680cdSMika Westerberg 
5700bd680cdSMika Westerberg 		/*
5710bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
5720bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
5730bd680cdSMika Westerberg 		 */
5740bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
5750bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
5760bd680cdSMika Westerberg 
5776ce35635SMika Westerberg 			if (tb_tunnel_is_invalid(tunnel))
5786ce35635SMika Westerberg 				continue;
5796ce35635SMika Westerberg 
5800bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
5810bd680cdSMika Westerberg 				continue;
5820bd680cdSMika Westerberg 
5830bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
5840bd680cdSMika Westerberg 				continue;
5850bd680cdSMika Westerberg 
5866ce35635SMika Westerberg 			/*
5876ce35635SMika Westerberg 			 * Ignore the DP tunnel between src_port and
5886ce35635SMika Westerberg 			 * dst_port because it is the same tunnel and we
5896ce35635SMika Westerberg 			 * may be re-calculating estimated bandwidth.
5906ce35635SMika Westerberg 			 */
5916ce35635SMika Westerberg 			if (tunnel->src_port == src_port &&
5926ce35635SMika Westerberg 			    tunnel->dst_port == dst_port)
5936ce35635SMika Westerberg 				continue;
5946ce35635SMika Westerberg 
5950bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
5960bd680cdSMika Westerberg 							   &dp_consumed_up,
5970bd680cdSMika Westerberg 							   &dp_consumed_down);
5980bd680cdSMika Westerberg 			if (ret)
5990bd680cdSMika Westerberg 				return ret;
6000bd680cdSMika Westerberg 
6010bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
6020bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
6030bd680cdSMika Westerberg 		}
6040bd680cdSMika Westerberg 
6050bd680cdSMika Westerberg 		/*
6060bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
6070bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
6080bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
6090bd680cdSMika Westerberg 		 * crosses the port.
6100bd680cdSMika Westerberg 		 */
6110bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
6120bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
6130bd680cdSMika Westerberg 
6140bd680cdSMika Westerberg 		if (up_bw < *available_up)
6150bd680cdSMika Westerberg 			*available_up = up_bw;
6160bd680cdSMika Westerberg 		if (down_bw < *available_down)
6170bd680cdSMika Westerberg 			*available_down = down_bw;
6180bd680cdSMika Westerberg 	}
6190bd680cdSMika Westerberg 
6200bd680cdSMika Westerberg 	if (*available_up < 0)
6210bd680cdSMika Westerberg 		*available_up = 0;
6220bd680cdSMika Westerberg 	if (*available_down < 0)
6230bd680cdSMika Westerberg 		*available_down = 0;
6240bd680cdSMika Westerberg 
6250bd680cdSMika Westerberg 	return 0;
6260bd680cdSMika Westerberg }
6270bd680cdSMika Westerberg 
6280bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
6290bd680cdSMika Westerberg 					    struct tb_port *src_port,
6300bd680cdSMika Westerberg 					    struct tb_port *dst_port)
6310bd680cdSMika Westerberg {
6320bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
6330bd680cdSMika Westerberg 
6340bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6350bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
6360bd680cdSMika Westerberg }
6370bd680cdSMika Westerberg 
6380bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
6390bd680cdSMika Westerberg 				      struct tb_port *dst_port)
6400bd680cdSMika Westerberg {
6410bd680cdSMika Westerberg 	int ret, available_up, available_down;
6420bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
6430bd680cdSMika Westerberg 
6440bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6450bd680cdSMika Westerberg 	if (!tunnel)
6460bd680cdSMika Westerberg 		return;
6470bd680cdSMika Westerberg 
6480bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
6490bd680cdSMika Westerberg 
6500bd680cdSMika Westerberg 	/*
6510bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
6520bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
6530bd680cdSMika Westerberg 	 */
6540bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
6550bd680cdSMika Westerberg 				     &available_up, &available_down);
6560bd680cdSMika Westerberg 	if (ret) {
6570bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
6580bd680cdSMika Westerberg 		return;
6590bd680cdSMika Westerberg 	}
6600bd680cdSMika Westerberg 
6610bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
6620bd680cdSMika Westerberg 	       available_up, available_down);
6630bd680cdSMika Westerberg 
6640bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
6650bd680cdSMika Westerberg }
6660bd680cdSMika Westerberg 
667e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
668e6f81858SRajmohan Mani {
669e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
6700bd680cdSMika Westerberg 	int ret, available_up, available_down;
671e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
672e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
673e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
674e6f81858SRajmohan Mani 
675c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
676c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
677c6da62a2SMika Westerberg 		return 0;
678c6da62a2SMika Westerberg 	}
679c6da62a2SMika Westerberg 
680e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
681e6f81858SRajmohan Mani 	if (!up)
682e6f81858SRajmohan Mani 		return 0;
683e6f81858SRajmohan Mani 
684bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
685bbcf40b3SMika Westerberg 		return 0;
686bbcf40b3SMika Westerberg 
687e6f81858SRajmohan Mani 	/*
688e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
689e6f81858SRajmohan Mani 	 * be found right above this switch.
690e6f81858SRajmohan Mani 	 */
6917ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
692e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
693e6f81858SRajmohan Mani 	if (!down)
694e6f81858SRajmohan Mani 		return 0;
695e6f81858SRajmohan Mani 
696e6f81858SRajmohan Mani 	if (tb_route(parent)) {
697e6f81858SRajmohan Mani 		struct tb_port *parent_up;
698e6f81858SRajmohan Mani 		/*
699e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
700e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
701e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
702e6f81858SRajmohan Mani 		 */
703e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
704e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
705e6f81858SRajmohan Mani 			return 0;
7060bd680cdSMika Westerberg 
7070bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
7080bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
7090bd680cdSMika Westerberg 		if (ret)
7100bd680cdSMika Westerberg 			return ret;
711e6f81858SRajmohan Mani 	}
712e6f81858SRajmohan Mani 
7130bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
7140bd680cdSMika Westerberg 				     &available_down);
7150bd680cdSMika Westerberg 	if (ret)
7160bd680cdSMika Westerberg 		goto err_reclaim;
7170bd680cdSMika Westerberg 
7180bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
7190bd680cdSMika Westerberg 		    available_up, available_down);
7200bd680cdSMika Westerberg 
7210bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
7220bd680cdSMika Westerberg 				      available_down);
7230bd680cdSMika Westerberg 	if (!tunnel) {
7240bd680cdSMika Westerberg 		ret = -ENOMEM;
7250bd680cdSMika Westerberg 		goto err_reclaim;
7260bd680cdSMika Westerberg 	}
727e6f81858SRajmohan Mani 
728e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
729e6f81858SRajmohan Mani 		tb_port_info(up,
730e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
7310bd680cdSMika Westerberg 		ret = -EIO;
7320bd680cdSMika Westerberg 		goto err_free;
733e6f81858SRajmohan Mani 	}
734e6f81858SRajmohan Mani 
735e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
7360bd680cdSMika Westerberg 	if (tb_route(parent))
7370bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
7380bd680cdSMika Westerberg 
739e6f81858SRajmohan Mani 	return 0;
7400bd680cdSMika Westerberg 
7410bd680cdSMika Westerberg err_free:
7420bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
7430bd680cdSMika Westerberg err_reclaim:
7440bd680cdSMika Westerberg 	if (tb_route(parent))
7450bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
7460bd680cdSMika Westerberg 
7470bd680cdSMika Westerberg 	return ret;
748e6f81858SRajmohan Mani }
749e6f81858SRajmohan Mani 
750e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
751e6f81858SRajmohan Mani {
752e6f81858SRajmohan Mani 	struct tb_port *port;
753e6f81858SRajmohan Mani 	int ret;
754e6f81858SRajmohan Mani 
755c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
756c6da62a2SMika Westerberg 		return 0;
757c6da62a2SMika Westerberg 
758e6f81858SRajmohan Mani 	if (tb_route(sw)) {
759e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
760e6f81858SRajmohan Mani 		if (ret)
761e6f81858SRajmohan Mani 			return ret;
762e6f81858SRajmohan Mani 	}
763e6f81858SRajmohan Mani 
764e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
765e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
766e6f81858SRajmohan Mani 			continue;
767e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
768e6f81858SRajmohan Mani 		if (ret)
769e6f81858SRajmohan Mani 			return ret;
770e6f81858SRajmohan Mani 	}
771e6f81858SRajmohan Mani 
772e6f81858SRajmohan Mani 	return 0;
773e6f81858SRajmohan Mani }
774e6f81858SRajmohan Mani 
7759da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
7769da672a4SAndreas Noever 
777877e50b3SLee Jones /*
7789da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
7799da672a4SAndreas Noever  */
7809da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
7819da672a4SAndreas Noever {
782b433d010SMika Westerberg 	struct tb_port *port;
783b433d010SMika Westerberg 
7846ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
7856ac6faeeSMika Westerberg 
786b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
787b433d010SMika Westerberg 		tb_scan_port(port);
7886ac6faeeSMika Westerberg 
7896ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
7906ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
7919da672a4SAndreas Noever }
7929da672a4SAndreas Noever 
793877e50b3SLee Jones /*
7949da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
7959da672a4SAndreas Noever  */
7969da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
7979da672a4SAndreas Noever {
79899cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
799dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
8003fe95742SMika Westerberg 	bool discovery = false;
8019da672a4SAndreas Noever 	struct tb_switch *sw;
802dfe40ca4SMika Westerberg 
8039da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
8049da672a4SAndreas Noever 		return;
8054f807e47SMika Westerberg 
8064f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
8074f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
8084f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
8094f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
8104f807e47SMika Westerberg 				 false);
8114f807e47SMika Westerberg 		return;
8124f807e47SMika Westerberg 	}
8134f807e47SMika Westerberg 
8149da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
8159da672a4SAndreas Noever 		return;
816343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
817343fcb8cSAndreas Noever 		return; /*
818343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
819343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
820343fcb8cSAndreas Noever 			 */
82123257cfcSMika Westerberg 
82223257cfcSMika Westerberg 	if (port->usb4)
82323257cfcSMika Westerberg 		pm_runtime_get_sync(&port->usb4->dev);
82423257cfcSMika Westerberg 
8259da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
82623257cfcSMika Westerberg 		goto out_rpm_put;
8279da672a4SAndreas Noever 	if (port->remote) {
8287ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
82923257cfcSMika Westerberg 		goto out_rpm_put;
8309da672a4SAndreas Noever 	}
831dacb1287SKranthi Kuntala 
8323fb10ea4SRajmohan Mani 	tb_retimer_scan(port, true);
833dacb1287SKranthi Kuntala 
834bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
835bfe778acSMika Westerberg 			     tb_downstream_route(port));
8367ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
8377ea4cd6bSMika Westerberg 		/*
8387ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
8397ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
8407ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
8417ea4cd6bSMika Westerberg 		 */
8427ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
8437ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
84423257cfcSMika Westerberg 		goto out_rpm_put;
8457ea4cd6bSMika Westerberg 	}
846bfe778acSMika Westerberg 
847bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
848bfe778acSMika Westerberg 		tb_switch_put(sw);
84923257cfcSMika Westerberg 		goto out_rpm_put;
850bfe778acSMika Westerberg 	}
851bfe778acSMika Westerberg 
85299cabbb0SMika Westerberg 	/*
8537ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
8547ea4cd6bSMika Westerberg 	 * first.
8557ea4cd6bSMika Westerberg 	 */
8567ea4cd6bSMika Westerberg 	if (port->xdomain) {
8577ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
858284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
8597ea4cd6bSMika Westerberg 		port->xdomain = NULL;
8607ea4cd6bSMika Westerberg 	}
8617ea4cd6bSMika Westerberg 
8627ea4cd6bSMika Westerberg 	/*
86399cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
86499cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
86599cabbb0SMika Westerberg 	 * the boot firmware.
86699cabbb0SMika Westerberg 	 */
8673fe95742SMika Westerberg 	if (!tcm->hotplug_active) {
86899cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
8693fe95742SMika Westerberg 		discovery = true;
8703fe95742SMika Westerberg 	}
871f67cf491SMika Westerberg 
8726ac6faeeSMika Westerberg 	/*
8736ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
8746ac6faeeSMika Westerberg 	 * can support runtime PM.
8756ac6faeeSMika Westerberg 	 */
8766ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
8776ac6faeeSMika Westerberg 
878bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
879bfe778acSMika Westerberg 		tb_switch_put(sw);
88023257cfcSMika Westerberg 		goto out_rpm_put;
881bfe778acSMika Westerberg 	}
882bfe778acSMika Westerberg 
883dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
884dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
885dfe40ca4SMika Westerberg 	port->remote = upstream_port;
886dfe40ca4SMika Westerberg 	upstream_port->remote = port;
887dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
888dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
889dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
890dfe40ca4SMika Westerberg 	}
891dfe40ca4SMika Westerberg 
89291c0c120SMika Westerberg 	/* Enable lane bonding if supported */
8932ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
894de462039SMika Westerberg 	/* Set the link configured */
895de462039SMika Westerberg 	tb_switch_configure_link(sw);
896b017a46dSGil Fine 	/*
897b017a46dSGil Fine 	 * CL0s and CL1 are enabled and supported together.
898b017a46dSGil Fine 	 * Silently ignore CLx enabling in case CLx is not supported.
899b017a46dSGil Fine 	 */
9001a9b6cb8SMika Westerberg 	if (discovery)
9013fe95742SMika Westerberg 		tb_sw_dbg(sw, "discovery, not touching CL states\n");
9021a9b6cb8SMika Westerberg 	else if (tb_enable_clx(sw))
9031a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to enable CL states\n");
9048a90e4faSGil Fine 
905cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
906cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
907cf29b9afSRajmohan Mani 
908dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
9093fb10ea4SRajmohan Mani 	tb_retimer_scan(upstream_port, true);
910dacb1287SKranthi Kuntala 
911e6f81858SRajmohan Mani 	/*
912e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
913e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
914e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
915e6f81858SRajmohan Mani 	 * any new.
916e6f81858SRajmohan Mani 	 */
917e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
918e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
919e6f81858SRajmohan Mani 
920e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
9219da672a4SAndreas Noever 	tb_scan_switch(sw);
92223257cfcSMika Westerberg 
92323257cfcSMika Westerberg out_rpm_put:
92423257cfcSMika Westerberg 	if (port->usb4) {
92523257cfcSMika Westerberg 		pm_runtime_mark_last_busy(&port->usb4->dev);
92623257cfcSMika Westerberg 		pm_runtime_put_autosuspend(&port->usb4->dev);
92723257cfcSMika Westerberg 	}
9289da672a4SAndreas Noever }
9299da672a4SAndreas Noever 
9308afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
9318afe909bSMika Westerberg {
9320bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
9330bd680cdSMika Westerberg 	struct tb *tb;
9340bd680cdSMika Westerberg 
9358afe909bSMika Westerberg 	if (!tunnel)
9368afe909bSMika Westerberg 		return;
9378afe909bSMika Westerberg 
9388afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
9398afe909bSMika Westerberg 	list_del(&tunnel->list);
9408afe909bSMika Westerberg 
9410bd680cdSMika Westerberg 	tb = tunnel->tb;
9420bd680cdSMika Westerberg 	src_port = tunnel->src_port;
9430bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
9448afe909bSMika Westerberg 
9450bd680cdSMika Westerberg 	switch (tunnel->type) {
9460bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
9476ce35635SMika Westerberg 		tb_detach_bandwidth_group(src_port);
9480bd680cdSMika Westerberg 		/*
9490bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
9500bd680cdSMika Westerberg 		 * deallocated properly.
9510bd680cdSMika Westerberg 		 */
9520bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
9536ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
9546ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
9556ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
9566ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
9576ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
9580bd680cdSMika Westerberg 		fallthrough;
9590bd680cdSMika Westerberg 
9600bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
9610bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
9620bd680cdSMika Westerberg 		break;
9630bd680cdSMika Westerberg 
9640bd680cdSMika Westerberg 	default:
9650bd680cdSMika Westerberg 		/*
9660bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
9670bd680cdSMika Westerberg 		 * bandwidth.
9680bd680cdSMika Westerberg 		 */
9690bd680cdSMika Westerberg 		break;
9708afe909bSMika Westerberg 	}
9718afe909bSMika Westerberg 
9728afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
9734f807e47SMika Westerberg }
9744f807e47SMika Westerberg 
975877e50b3SLee Jones /*
9763364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
9773364f0c1SAndreas Noever  */
9783364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
9793364f0c1SAndreas Noever {
9809d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
98193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
98293f36adeSMika Westerberg 	struct tb_tunnel *n;
9839d3cce0bSMika Westerberg 
9849d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
9858afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
9868afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
9873364f0c1SAndreas Noever 	}
9883364f0c1SAndreas Noever }
9893364f0c1SAndreas Noever 
990877e50b3SLee Jones /*
99123dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
99223dd5bb4SAndreas Noever  */
99323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
99423dd5bb4SAndreas Noever {
995b433d010SMika Westerberg 	struct tb_port *port;
996dfe40ca4SMika Westerberg 
997b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
998dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
99923dd5bb4SAndreas Noever 			continue;
1000dfe40ca4SMika Westerberg 
100123dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
1002dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
10038afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1004de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
100591c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1006bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
100723dd5bb4SAndreas Noever 			port->remote = NULL;
1008dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1009dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
101023dd5bb4SAndreas Noever 		} else {
101123dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
101223dd5bb4SAndreas Noever 		}
101323dd5bb4SAndreas Noever 	}
101423dd5bb4SAndreas Noever }
101523dd5bb4SAndreas Noever 
101699cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
101799cabbb0SMika Westerberg 					 const struct tb_port *port)
10183364f0c1SAndreas Noever {
1019b0407983SMika Westerberg 	struct tb_port *down = NULL;
1020b0407983SMika Westerberg 
102199cabbb0SMika Westerberg 	/*
102299cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
1023b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
102499cabbb0SMika Westerberg 	 */
1025b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
1026b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
1027b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
102899cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
102999cabbb0SMika Westerberg 		int index;
103099cabbb0SMika Westerberg 
103199cabbb0SMika Westerberg 		/*
103299cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
103399cabbb0SMika Westerberg 		 * per controller.
103499cabbb0SMika Westerberg 		 */
10357bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
10367bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
103799cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
103817a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
103999cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
10407bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
10417bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
104299cabbb0SMika Westerberg 		else
104399cabbb0SMika Westerberg 			goto out;
104499cabbb0SMika Westerberg 
104599cabbb0SMika Westerberg 		/* Validate the hard-coding */
104699cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
104799cabbb0SMika Westerberg 			goto out;
1048b0407983SMika Westerberg 
1049b0407983SMika Westerberg 		down = &sw->ports[index];
1050b0407983SMika Westerberg 	}
1051b0407983SMika Westerberg 
1052b0407983SMika Westerberg 	if (down) {
1053b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
105499cabbb0SMika Westerberg 			goto out;
10559cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
105699cabbb0SMika Westerberg 			goto out;
105799cabbb0SMika Westerberg 
1058b0407983SMika Westerberg 		return down;
105999cabbb0SMika Westerberg 	}
106099cabbb0SMika Westerberg 
106199cabbb0SMika Westerberg out:
1062e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
106399cabbb0SMika Westerberg }
106499cabbb0SMika Westerberg 
10656ce35635SMika Westerberg static void
10666ce35635SMika Westerberg tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
10676ce35635SMika Westerberg {
10686ce35635SMika Westerberg 	struct tb_tunnel *first_tunnel;
10696ce35635SMika Westerberg 	struct tb *tb = group->tb;
10706ce35635SMika Westerberg 	struct tb_port *in;
10716ce35635SMika Westerberg 	int ret;
10726ce35635SMika Westerberg 
10736ce35635SMika Westerberg 	tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
10746ce35635SMika Westerberg 	       group->index);
10756ce35635SMika Westerberg 
10766ce35635SMika Westerberg 	first_tunnel = NULL;
10776ce35635SMika Westerberg 	list_for_each_entry(in, &group->ports, group_list) {
10786ce35635SMika Westerberg 		int estimated_bw, estimated_up, estimated_down;
10796ce35635SMika Westerberg 		struct tb_tunnel *tunnel;
10806ce35635SMika Westerberg 		struct tb_port *out;
10816ce35635SMika Westerberg 
10826ce35635SMika Westerberg 		if (!usb4_dp_port_bw_mode_enabled(in))
10836ce35635SMika Westerberg 			continue;
10846ce35635SMika Westerberg 
10856ce35635SMika Westerberg 		tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
10866ce35635SMika Westerberg 		if (WARN_ON(!tunnel))
10876ce35635SMika Westerberg 			break;
10886ce35635SMika Westerberg 
10896ce35635SMika Westerberg 		if (!first_tunnel) {
10906ce35635SMika Westerberg 			/*
10916ce35635SMika Westerberg 			 * Since USB3 bandwidth is shared by all DP
10926ce35635SMika Westerberg 			 * tunnels under the host router USB4 port, even
10936ce35635SMika Westerberg 			 * if they do not begin from the host router, we
10946ce35635SMika Westerberg 			 * can release USB3 bandwidth just once and not
10956ce35635SMika Westerberg 			 * for each tunnel separately.
10966ce35635SMika Westerberg 			 */
10976ce35635SMika Westerberg 			first_tunnel = tunnel;
10986ce35635SMika Westerberg 			ret = tb_release_unused_usb3_bandwidth(tb,
10996ce35635SMika Westerberg 				first_tunnel->src_port, first_tunnel->dst_port);
11006ce35635SMika Westerberg 			if (ret) {
11016ce35635SMika Westerberg 				tb_port_warn(in,
11026ce35635SMika Westerberg 					"failed to release unused bandwidth\n");
11036ce35635SMika Westerberg 				break;
11046ce35635SMika Westerberg 			}
11056ce35635SMika Westerberg 		}
11066ce35635SMika Westerberg 
11076ce35635SMika Westerberg 		out = tunnel->dst_port;
11086ce35635SMika Westerberg 		ret = tb_available_bandwidth(tb, in, out, &estimated_up,
11096ce35635SMika Westerberg 					     &estimated_down);
11106ce35635SMika Westerberg 		if (ret) {
11116ce35635SMika Westerberg 			tb_port_warn(in,
11126ce35635SMika Westerberg 				"failed to re-calculate estimated bandwidth\n");
11136ce35635SMika Westerberg 			break;
11146ce35635SMika Westerberg 		}
11156ce35635SMika Westerberg 
11166ce35635SMika Westerberg 		/*
11176ce35635SMika Westerberg 		 * Estimated bandwidth includes:
11186ce35635SMika Westerberg 		 *  - already allocated bandwidth for the DP tunnel
11196ce35635SMika Westerberg 		 *  - available bandwidth along the path
11206ce35635SMika Westerberg 		 *  - bandwidth allocated for USB 3.x but not used.
11216ce35635SMika Westerberg 		 */
11226ce35635SMika Westerberg 		tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
11236ce35635SMika Westerberg 			    estimated_up, estimated_down);
11246ce35635SMika Westerberg 
11256ce35635SMika Westerberg 		if (in->sw->config.depth < out->sw->config.depth)
11266ce35635SMika Westerberg 			estimated_bw = estimated_down;
11276ce35635SMika Westerberg 		else
11286ce35635SMika Westerberg 			estimated_bw = estimated_up;
11296ce35635SMika Westerberg 
11306ce35635SMika Westerberg 		if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
11316ce35635SMika Westerberg 			tb_port_warn(in, "failed to update estimated bandwidth\n");
11326ce35635SMika Westerberg 	}
11336ce35635SMika Westerberg 
11346ce35635SMika Westerberg 	if (first_tunnel)
11356ce35635SMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
11366ce35635SMika Westerberg 					  first_tunnel->dst_port);
11376ce35635SMika Westerberg 
11386ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
11396ce35635SMika Westerberg }
11406ce35635SMika Westerberg 
11416ce35635SMika Westerberg static void tb_recalc_estimated_bandwidth(struct tb *tb)
11426ce35635SMika Westerberg {
11436ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
11446ce35635SMika Westerberg 	int i;
11456ce35635SMika Westerberg 
11466ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
11476ce35635SMika Westerberg 
11486ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
11496ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
11506ce35635SMika Westerberg 
11516ce35635SMika Westerberg 		if (!list_empty(&group->ports))
11526ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth_for_group(group);
11536ce35635SMika Westerberg 	}
11546ce35635SMika Westerberg 
11556ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth re-calculation done\n");
11566ce35635SMika Westerberg }
11576ce35635SMika Westerberg 
1158e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1159e876f34aSMika Westerberg {
1160e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
1161e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1162e876f34aSMika Westerberg 
1163e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
1164e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1165e876f34aSMika Westerberg 
1166e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1167e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
1168e876f34aSMika Westerberg 			continue;
1169e876f34aSMika Westerberg 
1170e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
1171b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP OUT in use\n");
1172e876f34aSMika Westerberg 			continue;
1173e876f34aSMika Westerberg 		}
1174e876f34aSMika Westerberg 
1175e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
1176e876f34aSMika Westerberg 
1177e876f34aSMika Westerberg 		/*
1178e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
1179e876f34aSMika Westerberg 		 * the same host router downstream port.
1180e876f34aSMika Westerberg 		 */
1181e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
1182e876f34aSMika Westerberg 			struct tb_port *p;
1183e876f34aSMika Westerberg 
1184e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
1185e876f34aSMika Westerberg 			if (p != host_port)
1186e876f34aSMika Westerberg 				continue;
1187e876f34aSMika Westerberg 		}
1188e876f34aSMika Westerberg 
1189e876f34aSMika Westerberg 		return port;
1190e876f34aSMika Westerberg 	}
1191e876f34aSMika Westerberg 
1192e876f34aSMika Westerberg 	return NULL;
1193e876f34aSMika Westerberg }
1194e876f34aSMika Westerberg 
11958afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
11964f807e47SMika Westerberg {
11979d2d0a5cSMika Westerberg 	int available_up, available_down, ret, link_nr;
11984f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
11998afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
12004f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
12014f807e47SMika Westerberg 
1202c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
1203c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1204c6da62a2SMika Westerberg 		return;
1205c6da62a2SMika Westerberg 	}
1206c6da62a2SMika Westerberg 
12078afe909bSMika Westerberg 	/*
12088afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
12098afe909bSMika Westerberg 	 * establish a DP tunnel between them.
12108afe909bSMika Westerberg 	 */
12118afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
12124f807e47SMika Westerberg 
12138afe909bSMika Westerberg 	in = NULL;
12148afe909bSMika Westerberg 	out = NULL;
12158afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1216e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
1217e876f34aSMika Westerberg 			continue;
1218e876f34aSMika Westerberg 
12198afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
1220b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP IN in use\n");
12218afe909bSMika Westerberg 			continue;
12228afe909bSMika Westerberg 		}
12238afe909bSMika Westerberg 
1224e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
12258afe909bSMika Westerberg 
1226e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
1227e876f34aSMika Westerberg 		if (out) {
12288afe909bSMika Westerberg 			in = port;
1229e876f34aSMika Westerberg 			break;
1230e876f34aSMika Westerberg 		}
12318afe909bSMika Westerberg 	}
12328afe909bSMika Westerberg 
12338afe909bSMika Westerberg 	if (!in) {
12348afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
12358afe909bSMika Westerberg 		return;
12368afe909bSMika Westerberg 	}
12378afe909bSMika Westerberg 	if (!out) {
12388afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
12398afe909bSMika Westerberg 		return;
12408afe909bSMika Westerberg 	}
12418afe909bSMika Westerberg 
12426ac6faeeSMika Westerberg 	/*
12439d2d0a5cSMika Westerberg 	 * This is only applicable to links that are not bonded (so
12449d2d0a5cSMika Westerberg 	 * when Thunderbolt 1 hardware is involved somewhere in the
12459d2d0a5cSMika Westerberg 	 * topology). For these try to share the DP bandwidth between
12469d2d0a5cSMika Westerberg 	 * the two lanes.
12479d2d0a5cSMika Westerberg 	 */
12489d2d0a5cSMika Westerberg 	link_nr = 1;
12499d2d0a5cSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
12509d2d0a5cSMika Westerberg 		if (tb_tunnel_is_dp(tunnel)) {
12519d2d0a5cSMika Westerberg 			link_nr = 0;
12529d2d0a5cSMika Westerberg 			break;
12539d2d0a5cSMika Westerberg 		}
12549d2d0a5cSMika Westerberg 	}
12559d2d0a5cSMika Westerberg 
12569d2d0a5cSMika Westerberg 	/*
12576ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
12586ac6faeeSMika Westerberg 	 * both ends of the tunnel.
12596ac6faeeSMika Westerberg 	 *
12606ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
12616ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
12626ac6faeeSMika Westerberg 	 * tunnel is active.
12636ac6faeeSMika Westerberg 	 */
12646ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
12656ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
12666ac6faeeSMika Westerberg 
12678afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
12688afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
12696ac6faeeSMika Westerberg 		goto err_rpm_put;
12708afe909bSMika Westerberg 	}
12714f807e47SMika Westerberg 
12726ce35635SMika Westerberg 	if (!tb_attach_bandwidth_group(tcm, in, out))
12736ce35635SMika Westerberg 		goto err_dealloc_dp;
12746ce35635SMika Westerberg 
12750bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
12760bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
12770bd680cdSMika Westerberg 	if (ret) {
12780bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
12796ce35635SMika Westerberg 		goto err_detach_group;
1280a11b88adSMika Westerberg 	}
1281a11b88adSMika Westerberg 
12826ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
12830bd680cdSMika Westerberg 	if (ret)
12846ce35635SMika Westerberg 		goto err_reclaim_usb;
1285a11b88adSMika Westerberg 
12860bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
12870bd680cdSMika Westerberg 	       available_up, available_down);
12880bd680cdSMika Westerberg 
12899d2d0a5cSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
12909d2d0a5cSMika Westerberg 				    available_down);
12914f807e47SMika Westerberg 	if (!tunnel) {
12928afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
12936ce35635SMika Westerberg 		goto err_reclaim_usb;
12944f807e47SMika Westerberg 	}
12954f807e47SMika Westerberg 
12964f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
12974f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
12980bd680cdSMika Westerberg 		goto err_free;
12994f807e47SMika Westerberg 	}
13004f807e47SMika Westerberg 
13014f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
13020bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
13036ce35635SMika Westerberg 
13046ce35635SMika Westerberg 	/* Update the domain with the new bandwidth estimation */
13056ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
13066ce35635SMika Westerberg 
13073084b48fSGil Fine 	/*
13083084b48fSGil Fine 	 * In case of DP tunnel exists, change host router's 1st children
13093084b48fSGil Fine 	 * TMU mode to HiFi for CL0s to work.
13103084b48fSGil Fine 	 */
13117d283f41SMika Westerberg 	tb_increase_tmu_accuracy(tunnel);
13128afe909bSMika Westerberg 	return;
13138afe909bSMika Westerberg 
13140bd680cdSMika Westerberg err_free:
13150bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
13166ce35635SMika Westerberg err_reclaim_usb:
13170bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
13186ce35635SMika Westerberg err_detach_group:
13196ce35635SMika Westerberg 	tb_detach_bandwidth_group(in);
13200bd680cdSMika Westerberg err_dealloc_dp:
13218afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
13226ac6faeeSMika Westerberg err_rpm_put:
13236ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
13246ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
13256ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
13266ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
13274f807e47SMika Westerberg }
13284f807e47SMika Westerberg 
13298afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
13304f807e47SMika Westerberg {
13318afe909bSMika Westerberg 	struct tb_port *in, *out;
13328afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
13338afe909bSMika Westerberg 
13348afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
13358afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
13368afe909bSMika Westerberg 		in = port;
13378afe909bSMika Westerberg 		out = NULL;
13388afe909bSMika Westerberg 	} else {
13398afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
13408afe909bSMika Westerberg 		in = NULL;
13418afe909bSMika Westerberg 		out = port;
13428afe909bSMika Westerberg 	}
13438afe909bSMika Westerberg 
13448afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
13458afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
13468afe909bSMika Westerberg 	list_del_init(&port->list);
13478afe909bSMika Westerberg 
13488afe909bSMika Westerberg 	/*
13498afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
13508afe909bSMika Westerberg 	 * to create another tunnel.
13518afe909bSMika Westerberg 	 */
13526ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
13538afe909bSMika Westerberg 	tb_tunnel_dp(tb);
13548afe909bSMika Westerberg }
13558afe909bSMika Westerberg 
13568afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
13578afe909bSMika Westerberg {
13588afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
13598afe909bSMika Westerberg 	struct tb_port *p;
13608afe909bSMika Westerberg 
13618afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
13628afe909bSMika Westerberg 		return;
13638afe909bSMika Westerberg 
13648afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
13658afe909bSMika Westerberg 		if (p == port)
13668afe909bSMika Westerberg 			return;
13678afe909bSMika Westerberg 	}
13688afe909bSMika Westerberg 
13698afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
13708afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
13718afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
13728afe909bSMika Westerberg 
13738afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
13748afe909bSMika Westerberg 	tb_tunnel_dp(tb);
13754f807e47SMika Westerberg }
13764f807e47SMika Westerberg 
137781a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
137881a2e3e4SMika Westerberg {
137981a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
138081a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
138181a2e3e4SMika Westerberg 
138281a2e3e4SMika Westerberg 	/*
138381a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
138481a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
138581a2e3e4SMika Westerberg 	 */
138681a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
138781a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
138881a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
138981a2e3e4SMika Westerberg 	}
139081a2e3e4SMika Westerberg 
139181a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
139281a2e3e4SMika Westerberg 		struct tb_port *port;
139381a2e3e4SMika Westerberg 
139481a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
139581a2e3e4SMika Westerberg 					struct tb_port, list);
139681a2e3e4SMika Westerberg 		list_del_init(&port->list);
139781a2e3e4SMika Westerberg 	}
139881a2e3e4SMika Westerberg }
139981a2e3e4SMika Westerberg 
14003da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
14013da88be2SMika Westerberg {
14023da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
14033da88be2SMika Westerberg 	struct tb_port *up;
14043da88be2SMika Westerberg 
14053da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
14063da88be2SMika Westerberg 	if (WARN_ON(!up))
14073da88be2SMika Westerberg 		return -ENODEV;
14083da88be2SMika Westerberg 
14093da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
14103da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
14113da88be2SMika Westerberg 		return -ENODEV;
14123da88be2SMika Westerberg 
141330a4eca6SMika Westerberg 	tb_switch_xhci_disconnect(sw);
141430a4eca6SMika Westerberg 
14153da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
14163da88be2SMika Westerberg 	list_del(&tunnel->list);
14173da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
14183da88be2SMika Westerberg 	return 0;
14193da88be2SMika Westerberg }
14203da88be2SMika Westerberg 
142199cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
142299cabbb0SMika Westerberg {
142399cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
14249d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
142599cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
14269d3cce0bSMika Westerberg 
1427386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
142899cabbb0SMika Westerberg 	if (!up)
142999cabbb0SMika Westerberg 		return 0;
14303364f0c1SAndreas Noever 
143199cabbb0SMika Westerberg 	/*
143299cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
143399cabbb0SMika Westerberg 	 * be found right above this switch.
143499cabbb0SMika Westerberg 	 */
14357ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
14367ce54221SGil Fine 	down = tb_find_pcie_down(tb_switch_parent(sw), port);
143799cabbb0SMika Westerberg 	if (!down)
143899cabbb0SMika Westerberg 		return 0;
14393364f0c1SAndreas Noever 
144099cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
144199cabbb0SMika Westerberg 	if (!tunnel)
144299cabbb0SMika Westerberg 		return -ENOMEM;
14433364f0c1SAndreas Noever 
144493f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
144599cabbb0SMika Westerberg 		tb_port_info(up,
14463364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
144793f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
144899cabbb0SMika Westerberg 		return -EIO;
14493364f0c1SAndreas Noever 	}
14503364f0c1SAndreas Noever 
145143f977bcSGil Fine 	/*
145243f977bcSGil Fine 	 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
145343f977bcSGil Fine 	 * here.
145443f977bcSGil Fine 	 */
145543f977bcSGil Fine 	if (tb_switch_pcie_l1_enable(sw))
145643f977bcSGil Fine 		tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
145743f977bcSGil Fine 
145830a4eca6SMika Westerberg 	if (tb_switch_xhci_connect(sw))
145930a4eca6SMika Westerberg 		tb_sw_warn(sw, "failed to connect xHCI\n");
146030a4eca6SMika Westerberg 
146199cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
146299cabbb0SMika Westerberg 	return 0;
14633364f0c1SAndreas Noever }
14649da672a4SAndreas Noever 
1465180b0689SMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1466180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
1467180b0689SMika Westerberg 				    int receive_path, int receive_ring)
14687ea4cd6bSMika Westerberg {
14697ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
14707ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
14717ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
14727ea4cd6bSMika Westerberg 	struct tb_switch *sw;
14737ea4cd6bSMika Westerberg 
14747ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
14757ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1476386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
14777ea4cd6bSMika Westerberg 
14787ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
1479180b0689SMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1480180b0689SMika Westerberg 				     transmit_ring, receive_path, receive_ring);
14817ea4cd6bSMika Westerberg 	if (!tunnel) {
14827ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
14837ea4cd6bSMika Westerberg 		return -ENOMEM;
14847ea4cd6bSMika Westerberg 	}
14857ea4cd6bSMika Westerberg 
14867ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
14877ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
14887ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
14897ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
14907ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
14917ea4cd6bSMika Westerberg 		return -EIO;
14927ea4cd6bSMika Westerberg 	}
14937ea4cd6bSMika Westerberg 
14947ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
14957ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
14967ea4cd6bSMika Westerberg 	return 0;
14977ea4cd6bSMika Westerberg }
14987ea4cd6bSMika Westerberg 
1499180b0689SMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1500180b0689SMika Westerberg 					  int transmit_path, int transmit_ring,
1501180b0689SMika Westerberg 					  int receive_path, int receive_ring)
15027ea4cd6bSMika Westerberg {
1503180b0689SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1504180b0689SMika Westerberg 	struct tb_port *nhi_port, *dst_port;
1505180b0689SMika Westerberg 	struct tb_tunnel *tunnel, *n;
15067ea4cd6bSMika Westerberg 	struct tb_switch *sw;
15077ea4cd6bSMika Westerberg 
15087ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
15097ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1510180b0689SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
15117ea4cd6bSMika Westerberg 
1512180b0689SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1513180b0689SMika Westerberg 		if (!tb_tunnel_is_dma(tunnel))
1514180b0689SMika Westerberg 			continue;
1515180b0689SMika Westerberg 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1516180b0689SMika Westerberg 			continue;
1517180b0689SMika Westerberg 
1518180b0689SMika Westerberg 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1519180b0689SMika Westerberg 					receive_path, receive_ring))
15208afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
15217ea4cd6bSMika Westerberg 	}
1522180b0689SMika Westerberg }
15237ea4cd6bSMika Westerberg 
1524180b0689SMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1525180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
1526180b0689SMika Westerberg 				       int receive_path, int receive_ring)
15277ea4cd6bSMika Westerberg {
15287ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
15297ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
1530180b0689SMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1531180b0689SMika Westerberg 					      transmit_ring, receive_path,
1532180b0689SMika Westerberg 					      receive_ring);
15337ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
15347ea4cd6bSMika Westerberg 	}
15357ea4cd6bSMika Westerberg 	return 0;
15367ea4cd6bSMika Westerberg }
15377ea4cd6bSMika Westerberg 
1538d6cc51cdSAndreas Noever /* hotplug handling */
1539d6cc51cdSAndreas Noever 
1540877e50b3SLee Jones /*
1541d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1542d6cc51cdSAndreas Noever  *
1543d6cc51cdSAndreas Noever  * Executes on tb->wq.
1544d6cc51cdSAndreas Noever  */
1545d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1546d6cc51cdSAndreas Noever {
1547d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1548d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
15499d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1550053596d9SAndreas Noever 	struct tb_switch *sw;
1551053596d9SAndreas Noever 	struct tb_port *port;
1552284652a4SMika Westerberg 
15536ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
15546ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
15556ac6faeeSMika Westerberg 
1556d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
15579d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1558d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1559d6cc51cdSAndreas Noever 
15608f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1561053596d9SAndreas Noever 	if (!sw) {
1562053596d9SAndreas Noever 		tb_warn(tb,
1563053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1564053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1565053596d9SAndreas Noever 		goto out;
1566053596d9SAndreas Noever 	}
1567053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1568053596d9SAndreas Noever 		tb_warn(tb,
1569053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1570053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
15718f965efdSMika Westerberg 		goto put_sw;
1572053596d9SAndreas Noever 	}
1573053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1574053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1575dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1576053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
15778f965efdSMika Westerberg 		goto put_sw;
1578053596d9SAndreas Noever 	}
15796ac6faeeSMika Westerberg 
15806ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
15816ac6faeeSMika Westerberg 
1582053596d9SAndreas Noever 	if (ev->unplug) {
1583dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1584dacb1287SKranthi Kuntala 
1585dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
15867ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1587aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
15883364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
15898afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1590cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1591de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
159291c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1593bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1594053596d9SAndreas Noever 			port->remote = NULL;
1595dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1596dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
15978afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
15986ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth(tb);
15998afe909bSMika Westerberg 			tb_tunnel_dp(tb);
16007ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
16017ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
16027ea4cd6bSMika Westerberg 
16037ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
16047ea4cd6bSMika Westerberg 			/*
16057ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
16067ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
16077ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
16087ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
1609180b0689SMika Westerberg 			 * all the tunnels below.
16107ea4cd6bSMika Westerberg 			 */
16117ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
16127ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
16137ea4cd6bSMika Westerberg 			port->xdomain = NULL;
1614180b0689SMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
16157ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1616284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
16178afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
16188afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
161930a4eca6SMika Westerberg 		} else if (!port->port) {
162030a4eca6SMika Westerberg 			tb_sw_dbg(sw, "xHCI disconnect request\n");
162130a4eca6SMika Westerberg 			tb_switch_xhci_disconnect(sw);
1622053596d9SAndreas Noever 		} else {
162362efe699SMika Westerberg 			tb_port_dbg(port,
1624053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1625053596d9SAndreas Noever 		}
1626053596d9SAndreas Noever 	} else if (port->remote) {
162762efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
162830a4eca6SMika Westerberg 	} else if (!port->port && sw->authorized) {
162930a4eca6SMika Westerberg 		tb_sw_dbg(sw, "xHCI connect request\n");
163030a4eca6SMika Westerberg 		tb_switch_xhci_connect(sw);
1631053596d9SAndreas Noever 	} else {
1632344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
163362efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1634053596d9SAndreas Noever 			tb_scan_port(port);
163599cabbb0SMika Westerberg 			if (!port->remote)
163662efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
16378afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
16388afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1639053596d9SAndreas Noever 		}
1640344e0643SMika Westerberg 	}
16418f965efdSMika Westerberg 
16426ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
16436ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
16446ac6faeeSMika Westerberg 
16458f965efdSMika Westerberg put_sw:
16468f965efdSMika Westerberg 	tb_switch_put(sw);
1647d6cc51cdSAndreas Noever out:
1648d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
16496ac6faeeSMika Westerberg 
16506ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
16516ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
16526ac6faeeSMika Westerberg 
1653d6cc51cdSAndreas Noever 	kfree(ev);
1654d6cc51cdSAndreas Noever }
1655d6cc51cdSAndreas Noever 
16566ce35635SMika Westerberg static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
16576ce35635SMika Westerberg 				 int *requested_down)
16586ce35635SMika Westerberg {
16596ce35635SMika Westerberg 	int allocated_up, allocated_down, available_up, available_down, ret;
16606ce35635SMika Westerberg 	int requested_up_corrected, requested_down_corrected, granularity;
16616ce35635SMika Westerberg 	int max_up, max_down, max_up_rounded, max_down_rounded;
16626ce35635SMika Westerberg 	struct tb *tb = tunnel->tb;
16636ce35635SMika Westerberg 	struct tb_port *in, *out;
16646ce35635SMika Westerberg 
16656ce35635SMika Westerberg 	ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
16666ce35635SMika Westerberg 	if (ret)
16676ce35635SMika Westerberg 		return ret;
16686ce35635SMika Westerberg 
16696ce35635SMika Westerberg 	in = tunnel->src_port;
16706ce35635SMika Westerberg 	out = tunnel->dst_port;
16716ce35635SMika Westerberg 
16726ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
16736ce35635SMika Westerberg 		    allocated_up, allocated_down);
16746ce35635SMika Westerberg 
16756ce35635SMika Westerberg 	/*
16766ce35635SMika Westerberg 	 * If we get rounded up request from graphics side, say HBR2 x 4
16776ce35635SMika Westerberg 	 * that is 17500 instead of 17280 (this is because of the
16786ce35635SMika Westerberg 	 * granularity), we allow it too. Here the graphics has already
16796ce35635SMika Westerberg 	 * negotiated with the DPRX the maximum possible rates (which is
16806ce35635SMika Westerberg 	 * 17280 in this case).
16816ce35635SMika Westerberg 	 *
16826ce35635SMika Westerberg 	 * Since the link cannot go higher than 17280 we use that in our
16836ce35635SMika Westerberg 	 * calculations but the DP IN adapter Allocated BW write must be
16846ce35635SMika Westerberg 	 * the same value (17500) otherwise the adapter will mark it as
16856ce35635SMika Westerberg 	 * failed for graphics.
16866ce35635SMika Westerberg 	 */
16876ce35635SMika Westerberg 	ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
16886ce35635SMika Westerberg 	if (ret)
16896ce35635SMika Westerberg 		return ret;
16906ce35635SMika Westerberg 
16916ce35635SMika Westerberg 	ret = usb4_dp_port_granularity(in);
16926ce35635SMika Westerberg 	if (ret < 0)
16936ce35635SMika Westerberg 		return ret;
16946ce35635SMika Westerberg 	granularity = ret;
16956ce35635SMika Westerberg 
16966ce35635SMika Westerberg 	max_up_rounded = roundup(max_up, granularity);
16976ce35635SMika Westerberg 	max_down_rounded = roundup(max_down, granularity);
16986ce35635SMika Westerberg 
16996ce35635SMika Westerberg 	/*
17006ce35635SMika Westerberg 	 * This will "fix" the request down to the maximum supported
17016ce35635SMika Westerberg 	 * rate * lanes if it is at the maximum rounded up level.
17026ce35635SMika Westerberg 	 */
17036ce35635SMika Westerberg 	requested_up_corrected = *requested_up;
17046ce35635SMika Westerberg 	if (requested_up_corrected == max_up_rounded)
17056ce35635SMika Westerberg 		requested_up_corrected = max_up;
17066ce35635SMika Westerberg 	else if (requested_up_corrected < 0)
17076ce35635SMika Westerberg 		requested_up_corrected = 0;
17086ce35635SMika Westerberg 	requested_down_corrected = *requested_down;
17096ce35635SMika Westerberg 	if (requested_down_corrected == max_down_rounded)
17106ce35635SMika Westerberg 		requested_down_corrected = max_down;
17116ce35635SMika Westerberg 	else if (requested_down_corrected < 0)
17126ce35635SMika Westerberg 		requested_down_corrected = 0;
17136ce35635SMika Westerberg 
17146ce35635SMika Westerberg 	tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
17156ce35635SMika Westerberg 		    requested_up_corrected, requested_down_corrected);
17166ce35635SMika Westerberg 
17176ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
17186ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
17196ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
17206ce35635SMika Westerberg 			    requested_up_corrected, requested_down_corrected,
17216ce35635SMika Westerberg 			    max_up_rounded, max_down_rounded);
17226ce35635SMika Westerberg 		return -ENOBUFS;
17236ce35635SMika Westerberg 	}
17246ce35635SMika Westerberg 
17256ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
17266ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
17276ce35635SMika Westerberg 		/*
17286ce35635SMika Westerberg 		 * If requested bandwidth is less or equal than what is
17296ce35635SMika Westerberg 		 * currently allocated to that tunnel we simply change
17306ce35635SMika Westerberg 		 * the reservation of the tunnel. Since all the tunnels
17316ce35635SMika Westerberg 		 * going out from the same USB4 port are in the same
17326ce35635SMika Westerberg 		 * group the released bandwidth will be taken into
17336ce35635SMika Westerberg 		 * account for the other tunnels automatically below.
17346ce35635SMika Westerberg 		 */
17356ce35635SMika Westerberg 		return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
17366ce35635SMika Westerberg 						 requested_down);
17376ce35635SMika Westerberg 	}
17386ce35635SMika Westerberg 
17396ce35635SMika Westerberg 	/*
17406ce35635SMika Westerberg 	 * More bandwidth is requested. Release all the potential
17416ce35635SMika Westerberg 	 * bandwidth from USB3 first.
17426ce35635SMika Westerberg 	 */
17436ce35635SMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
17446ce35635SMika Westerberg 	if (ret)
17456ce35635SMika Westerberg 		return ret;
17466ce35635SMika Westerberg 
17476ce35635SMika Westerberg 	/*
17486ce35635SMika Westerberg 	 * Then go over all tunnels that cross the same USB4 ports (they
17496ce35635SMika Westerberg 	 * are also in the same group but we use the same function here
17506ce35635SMika Westerberg 	 * that we use with the normal bandwidth allocation).
17516ce35635SMika Westerberg 	 */
17526ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
17536ce35635SMika Westerberg 	if (ret)
17546ce35635SMika Westerberg 		goto reclaim;
17556ce35635SMika Westerberg 
17566ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
17576ce35635SMika Westerberg 		    available_up, available_down);
17586ce35635SMika Westerberg 
17596ce35635SMika Westerberg 	if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
17606ce35635SMika Westerberg 	    (*requested_down >= 0 && available_down >= requested_down_corrected)) {
17616ce35635SMika Westerberg 		ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
17626ce35635SMika Westerberg 						requested_down);
17636ce35635SMika Westerberg 	} else {
17646ce35635SMika Westerberg 		ret = -ENOBUFS;
17656ce35635SMika Westerberg 	}
17666ce35635SMika Westerberg 
17676ce35635SMika Westerberg reclaim:
17686ce35635SMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
17696ce35635SMika Westerberg 	return ret;
17706ce35635SMika Westerberg }
17716ce35635SMika Westerberg 
17726ce35635SMika Westerberg static void tb_handle_dp_bandwidth_request(struct work_struct *work)
17736ce35635SMika Westerberg {
17746ce35635SMika Westerberg 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
17756ce35635SMika Westerberg 	int requested_bw, requested_up, requested_down, ret;
17766ce35635SMika Westerberg 	struct tb_port *in, *out;
17776ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
17786ce35635SMika Westerberg 	struct tb *tb = ev->tb;
17796ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
17806ce35635SMika Westerberg 	struct tb_switch *sw;
17816ce35635SMika Westerberg 
17826ce35635SMika Westerberg 	pm_runtime_get_sync(&tb->dev);
17836ce35635SMika Westerberg 
17846ce35635SMika Westerberg 	mutex_lock(&tb->lock);
17856ce35635SMika Westerberg 	if (!tcm->hotplug_active)
17866ce35635SMika Westerberg 		goto unlock;
17876ce35635SMika Westerberg 
17886ce35635SMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
17896ce35635SMika Westerberg 	if (!sw) {
17906ce35635SMika Westerberg 		tb_warn(tb, "bandwidth request from non-existent router %llx\n",
17916ce35635SMika Westerberg 			ev->route);
17926ce35635SMika Westerberg 		goto unlock;
17936ce35635SMika Westerberg 	}
17946ce35635SMika Westerberg 
17956ce35635SMika Westerberg 	in = &sw->ports[ev->port];
17966ce35635SMika Westerberg 	if (!tb_port_is_dpin(in)) {
17976ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
17986ce35635SMika Westerberg 		goto unlock;
17996ce35635SMika Westerberg 	}
18006ce35635SMika Westerberg 
18016ce35635SMika Westerberg 	tb_port_dbg(in, "handling bandwidth allocation request\n");
18026ce35635SMika Westerberg 
18036ce35635SMika Westerberg 	if (!usb4_dp_port_bw_mode_enabled(in)) {
18046ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth allocation mode not enabled\n");
18056ce35635SMika Westerberg 		goto unlock;
18066ce35635SMika Westerberg 	}
18076ce35635SMika Westerberg 
1808ace75e18SMika Westerberg 	ret = usb4_dp_port_requested_bw(in);
1809ace75e18SMika Westerberg 	if (ret < 0) {
1810ace75e18SMika Westerberg 		if (ret == -ENODATA)
18116ce35635SMika Westerberg 			tb_port_dbg(in, "no bandwidth request active\n");
1812ace75e18SMika Westerberg 		else
1813ace75e18SMika Westerberg 			tb_port_warn(in, "failed to read requested bandwidth\n");
18146ce35635SMika Westerberg 		goto unlock;
18156ce35635SMika Westerberg 	}
1816ace75e18SMika Westerberg 	requested_bw = ret;
18176ce35635SMika Westerberg 
18186ce35635SMika Westerberg 	tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
18196ce35635SMika Westerberg 
18206ce35635SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
18216ce35635SMika Westerberg 	if (!tunnel) {
18226ce35635SMika Westerberg 		tb_port_warn(in, "failed to find tunnel\n");
18236ce35635SMika Westerberg 		goto unlock;
18246ce35635SMika Westerberg 	}
18256ce35635SMika Westerberg 
18266ce35635SMika Westerberg 	out = tunnel->dst_port;
18276ce35635SMika Westerberg 
18286ce35635SMika Westerberg 	if (in->sw->config.depth < out->sw->config.depth) {
18296ce35635SMika Westerberg 		requested_up = -1;
18306ce35635SMika Westerberg 		requested_down = requested_bw;
18316ce35635SMika Westerberg 	} else {
18326ce35635SMika Westerberg 		requested_up = requested_bw;
18336ce35635SMika Westerberg 		requested_down = -1;
18346ce35635SMika Westerberg 	}
18356ce35635SMika Westerberg 
18366ce35635SMika Westerberg 	ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
18376ce35635SMika Westerberg 	if (ret) {
18386ce35635SMika Westerberg 		if (ret == -ENOBUFS)
18396ce35635SMika Westerberg 			tb_port_warn(in, "not enough bandwidth available\n");
18406ce35635SMika Westerberg 		else
18416ce35635SMika Westerberg 			tb_port_warn(in, "failed to change bandwidth allocation\n");
18426ce35635SMika Westerberg 	} else {
18436ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
18446ce35635SMika Westerberg 			    requested_up, requested_down);
18456ce35635SMika Westerberg 
18466ce35635SMika Westerberg 		/* Update other clients about the allocation change */
18476ce35635SMika Westerberg 		tb_recalc_estimated_bandwidth(tb);
18486ce35635SMika Westerberg 	}
18496ce35635SMika Westerberg 
18506ce35635SMika Westerberg unlock:
18516ce35635SMika Westerberg 	mutex_unlock(&tb->lock);
18526ce35635SMika Westerberg 
18536ce35635SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
18546ce35635SMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
18556ce35635SMika Westerberg }
18566ce35635SMika Westerberg 
18576ce35635SMika Westerberg static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
18586ce35635SMika Westerberg {
18596ce35635SMika Westerberg 	struct tb_hotplug_event *ev;
18606ce35635SMika Westerberg 
18616ce35635SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
18626ce35635SMika Westerberg 	if (!ev)
18636ce35635SMika Westerberg 		return;
18646ce35635SMika Westerberg 
18656ce35635SMika Westerberg 	ev->tb = tb;
18666ce35635SMika Westerberg 	ev->route = route;
18676ce35635SMika Westerberg 	ev->port = port;
18686ce35635SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
18696ce35635SMika Westerberg 	queue_work(tb->wq, &ev->work);
18706ce35635SMika Westerberg }
18716ce35635SMika Westerberg 
18726ce35635SMika Westerberg static void tb_handle_notification(struct tb *tb, u64 route,
18736ce35635SMika Westerberg 				   const struct cfg_error_pkg *error)
18746ce35635SMika Westerberg {
18756ce35635SMika Westerberg 	if (tb_cfg_ack_notification(tb->ctl, route, error))
18766ce35635SMika Westerberg 		tb_warn(tb, "could not ack notification on %llx\n", route);
18776ce35635SMika Westerberg 
18786ce35635SMika Westerberg 	switch (error->error) {
18796ce35635SMika Westerberg 	case TB_CFG_ERROR_DP_BW:
18806ce35635SMika Westerberg 		tb_queue_dp_bandwidth_request(tb, route, error->port);
18816ce35635SMika Westerberg 		break;
18826ce35635SMika Westerberg 
18836ce35635SMika Westerberg 	default:
18846ce35635SMika Westerberg 		/* Ack is enough */
18856ce35635SMika Westerberg 		return;
18866ce35635SMika Westerberg 	}
18876ce35635SMika Westerberg }
18886ce35635SMika Westerberg 
1889877e50b3SLee Jones /*
1890d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
1891d6cc51cdSAndreas Noever  *
1892d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
1893d6cc51cdSAndreas Noever  */
189481a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
189581a54b5eSMika Westerberg 			    const void *buf, size_t size)
1896d6cc51cdSAndreas Noever {
189781a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
18986ce35635SMika Westerberg 	u64 route = tb_cfg_get_route(&pkg->header);
189981a54b5eSMika Westerberg 
19006ce35635SMika Westerberg 	switch (type) {
19016ce35635SMika Westerberg 	case TB_CFG_PKG_ERROR:
19026ce35635SMika Westerberg 		tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
19036ce35635SMika Westerberg 		return;
19046ce35635SMika Westerberg 	case TB_CFG_PKG_EVENT:
19056ce35635SMika Westerberg 		break;
19066ce35635SMika Westerberg 	default:
190781a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
190881a54b5eSMika Westerberg 		return;
190981a54b5eSMika Westerberg 	}
191081a54b5eSMika Westerberg 
1911210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
191281a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
191381a54b5eSMika Westerberg 			pkg->port);
191481a54b5eSMika Westerberg 	}
191581a54b5eSMika Westerberg 
19164f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1917d6cc51cdSAndreas Noever }
1918d6cc51cdSAndreas Noever 
19199d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
1920d6cc51cdSAndreas Noever {
19219d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
192293f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
192393f36adeSMika Westerberg 	struct tb_tunnel *n;
19243364f0c1SAndreas Noever 
19256ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
19263364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
19277ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
19287ea4cd6bSMika Westerberg 		/*
19297ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
19307ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
19317ea4cd6bSMika Westerberg 		 * intact.
19327ea4cd6bSMika Westerberg 		 */
19337ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
19347ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
193593f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
19367ea4cd6bSMika Westerberg 	}
1937bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
19389d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1939d6cc51cdSAndreas Noever }
1940d6cc51cdSAndreas Noever 
194199cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
194299cabbb0SMika Westerberg {
194399cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
194499cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
194599cabbb0SMika Westerberg 
194699cabbb0SMika Westerberg 		/*
194799cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
194899cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
194999cabbb0SMika Westerberg 		 * send uevent to userspace.
195099cabbb0SMika Westerberg 		 */
195199cabbb0SMika Westerberg 		if (sw->boot)
195299cabbb0SMika Westerberg 			sw->authorized = 1;
195399cabbb0SMika Westerberg 
195499cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
195599cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
195699cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
195799cabbb0SMika Westerberg 	}
195899cabbb0SMika Westerberg 
195999cabbb0SMika Westerberg 	return 0;
196099cabbb0SMika Westerberg }
196199cabbb0SMika Westerberg 
19629d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
1963d6cc51cdSAndreas Noever {
19649d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1965bfe778acSMika Westerberg 	int ret;
1966d6cc51cdSAndreas Noever 
1967bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1968444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
1969444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
1970a25c8b2fSAndreas Noever 
1971e6b245ccSMika Westerberg 	/*
1972e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
1973e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
1974e6b245ccSMika Westerberg 	 * root switch.
19755172eb9aSSzuying Chen 	 *
19765172eb9aSSzuying Chen 	 * However, USB4 routers support NVM firmware upgrade if they
19775172eb9aSSzuying Chen 	 * implement the necessary router operations.
1978e6b245ccSMika Westerberg 	 */
19795172eb9aSSzuying Chen 	tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
19806ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
19816ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1982e6b245ccSMika Westerberg 
1983bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
1984bfe778acSMika Westerberg 	if (ret) {
1985bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1986bfe778acSMika Westerberg 		return ret;
1987bfe778acSMika Westerberg 	}
1988bfe778acSMika Westerberg 
1989bfe778acSMika Westerberg 	/* Announce the switch to the world */
1990bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
1991bfe778acSMika Westerberg 	if (ret) {
1992bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1993bfe778acSMika Westerberg 		return ret;
1994bfe778acSMika Westerberg 	}
1995bfe778acSMika Westerberg 
1996b017a46dSGil Fine 	/*
1997b017a46dSGil Fine 	 * To support highest CLx state, we set host router's TMU to
1998b017a46dSGil Fine 	 * Normal mode.
1999b017a46dSGil Fine 	 */
2000b017a46dSGil Fine 	tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
2001b017a46dSGil Fine 				false);
2002cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
2003cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
20049da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
20059da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
20060414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
200743bddb26SMika Westerberg 	tb_discover_tunnels(tb);
2008b60e31bfSSanjay R Mehta 	/* Add DP resources from the DP tunnels created by the boot firmware */
2009b60e31bfSSanjay R Mehta 	tb_discover_dp_resources(tb);
2010e6f81858SRajmohan Mani 	/*
2011e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
2012e6f81858SRajmohan Mani 	 * now for the whole topology.
2013e6f81858SRajmohan Mani 	 */
2014e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
20158afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
20168afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
201799cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
201899cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
201999cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
20209da672a4SAndreas Noever 
2021d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
20229d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
20239d3cce0bSMika Westerberg 	return 0;
2024d6cc51cdSAndreas Noever }
2025d6cc51cdSAndreas Noever 
20269d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
202723dd5bb4SAndreas Noever {
20289d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
20299d3cce0bSMika Westerberg 
2030daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
203181a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
20326ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
20339d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2034daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
20359d3cce0bSMika Westerberg 
20369d3cce0bSMika Westerberg 	return 0;
203723dd5bb4SAndreas Noever }
203823dd5bb4SAndreas Noever 
203991c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
204091c0c120SMika Westerberg {
204191c0c120SMika Westerberg 	struct tb_port *port;
204291c0c120SMika Westerberg 
20436ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
20446ac6faeeSMika Westerberg 	if (sw->is_unplugged)
20456ac6faeeSMika Westerberg 		return;
20466ac6faeeSMika Westerberg 
20471a9b6cb8SMika Westerberg 	if (tb_enable_clx(sw))
20481a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to re-enable CL states\n");
2049b017a46dSGil Fine 
2050cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
2051cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
2052cf29b9afSRajmohan Mani 
205391c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
2054284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
205591c0c120SMika Westerberg 			continue;
205691c0c120SMika Westerberg 
2057284652a4SMika Westerberg 		if (port->remote) {
20582ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
2059de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
206091c0c120SMika Westerberg 
206191c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
2062284652a4SMika Westerberg 		} else if (port->xdomain) {
2063f9cad07bSMika Westerberg 			tb_port_configure_xdomain(port, port->xdomain);
2064284652a4SMika Westerberg 		}
206591c0c120SMika Westerberg 	}
206691c0c120SMika Westerberg }
206791c0c120SMika Westerberg 
20689d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
206923dd5bb4SAndreas Noever {
20709d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
207193f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
207243bddb26SMika Westerberg 	unsigned int usb3_delay = 0;
207343bddb26SMika Westerberg 	LIST_HEAD(tunnels);
20749d3cce0bSMika Westerberg 
2075daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
207623dd5bb4SAndreas Noever 
207723dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
2078356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
207923dd5bb4SAndreas Noever 
208023dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
208123dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
208223dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
208391c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
208443bddb26SMika Westerberg 
208543bddb26SMika Westerberg 	/*
208643bddb26SMika Westerberg 	 * If we get here from suspend to disk the boot firmware or the
208743bddb26SMika Westerberg 	 * restore kernel might have created tunnels of its own. Since
208843bddb26SMika Westerberg 	 * we cannot be sure they are usable for us we find and tear
208943bddb26SMika Westerberg 	 * them down.
209043bddb26SMika Westerberg 	 */
209143bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
209243bddb26SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
209343bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel))
209443bddb26SMika Westerberg 			usb3_delay = 500;
209543bddb26SMika Westerberg 		tb_tunnel_deactivate(tunnel);
209643bddb26SMika Westerberg 		tb_tunnel_free(tunnel);
209743bddb26SMika Westerberg 	}
209843bddb26SMika Westerberg 
209943bddb26SMika Westerberg 	/* Re-create our tunnels now */
210043bddb26SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
210143bddb26SMika Westerberg 		/* USB3 requires delay before it can be re-activated */
210243bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel)) {
210343bddb26SMika Westerberg 			msleep(usb3_delay);
210443bddb26SMika Westerberg 			/* Only need to do it once */
210543bddb26SMika Westerberg 			usb3_delay = 0;
210643bddb26SMika Westerberg 		}
210793f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
210843bddb26SMika Westerberg 	}
21099d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
211023dd5bb4SAndreas Noever 		/*
211123dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
211223dd5bb4SAndreas Noever 		 * 100ms works for me...
211323dd5bb4SAndreas Noever 		 */
2114daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
211523dd5bb4SAndreas Noever 		msleep(100);
211623dd5bb4SAndreas Noever 	}
211723dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
21189d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
2119daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
21209d3cce0bSMika Westerberg 
21219d3cce0bSMika Westerberg 	return 0;
21229d3cce0bSMika Westerberg }
21239d3cce0bSMika Westerberg 
21247ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
21257ea4cd6bSMika Westerberg {
2126b433d010SMika Westerberg 	struct tb_port *port;
2127b433d010SMika Westerberg 	int ret = 0;
21287ea4cd6bSMika Westerberg 
2129b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
21307ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
21317ea4cd6bSMika Westerberg 			continue;
21327ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
2133dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
21347ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
2135284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
21367ea4cd6bSMika Westerberg 			port->xdomain = NULL;
21377ea4cd6bSMika Westerberg 			ret++;
21387ea4cd6bSMika Westerberg 		} else if (port->remote) {
21397ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
21407ea4cd6bSMika Westerberg 		}
21417ea4cd6bSMika Westerberg 	}
21427ea4cd6bSMika Westerberg 
21437ea4cd6bSMika Westerberg 	return ret;
21447ea4cd6bSMika Westerberg }
21457ea4cd6bSMika Westerberg 
2146884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
2147884e4d57SMika Westerberg {
2148884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2149884e4d57SMika Westerberg 
2150884e4d57SMika Westerberg 	tcm->hotplug_active = false;
2151884e4d57SMika Westerberg 	return 0;
2152884e4d57SMika Westerberg }
2153884e4d57SMika Westerberg 
2154884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
2155884e4d57SMika Westerberg {
2156884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2157884e4d57SMika Westerberg 
2158884e4d57SMika Westerberg 	tcm->hotplug_active = true;
2159884e4d57SMika Westerberg 	return 0;
2160884e4d57SMika Westerberg }
2161884e4d57SMika Westerberg 
21627ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
21637ea4cd6bSMika Westerberg {
21647ea4cd6bSMika Westerberg 	/*
21657ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
21667ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
21677ea4cd6bSMika Westerberg 	 * need to run another rescan.
21687ea4cd6bSMika Westerberg 	 */
21697ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
21707ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
21717ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
21727ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
21737ea4cd6bSMika Westerberg }
21747ea4cd6bSMika Westerberg 
21756ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
21766ac6faeeSMika Westerberg {
21776ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
21786ac6faeeSMika Westerberg 
21796ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
21806ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
21816ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
21826ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
21836ac6faeeSMika Westerberg 
21846ac6faeeSMika Westerberg 	return 0;
21856ac6faeeSMika Westerberg }
21866ac6faeeSMika Westerberg 
21876ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
21886ac6faeeSMika Westerberg {
21896ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
21906ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
21916ac6faeeSMika Westerberg 
21926ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
21936ac6faeeSMika Westerberg 	if (tb->root_switch) {
21946ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
21956ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
21966ac6faeeSMika Westerberg 	}
21976ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
21986ac6faeeSMika Westerberg }
21996ac6faeeSMika Westerberg 
22006ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
22016ac6faeeSMika Westerberg {
22026ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
22036ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
22046ac6faeeSMika Westerberg 
22056ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
22066ac6faeeSMika Westerberg 	tb_switch_resume(tb->root_switch);
22076ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
22086ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
22096ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
22106ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
22116ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
22126ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
22136ac6faeeSMika Westerberg 
22146ac6faeeSMika Westerberg 	/*
22156ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
22166ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
22176ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
22186ac6faeeSMika Westerberg 	 */
22196ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
22206ac6faeeSMika Westerberg 	return 0;
22216ac6faeeSMika Westerberg }
22226ac6faeeSMika Westerberg 
22239d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
22249d3cce0bSMika Westerberg 	.start = tb_start,
22259d3cce0bSMika Westerberg 	.stop = tb_stop,
22269d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
22279d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
2228884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
2229884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
22307ea4cd6bSMika Westerberg 	.complete = tb_complete,
22316ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
22326ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
223381a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
22343da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
223599cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
22367ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
22377ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
22389d3cce0bSMika Westerberg };
22399d3cce0bSMika Westerberg 
2240349bfe08SMika Westerberg /*
2241349bfe08SMika Westerberg  * During suspend the Thunderbolt controller is reset and all PCIe
2242349bfe08SMika Westerberg  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2243349bfe08SMika Westerberg  * during resume. This adds device links between the tunneled PCIe
2244349bfe08SMika Westerberg  * downstream ports and the NHI so that the device core will make sure
2245349bfe08SMika Westerberg  * NHI is resumed first before the rest.
2246349bfe08SMika Westerberg  */
2247349bfe08SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi)
2248349bfe08SMika Westerberg {
2249349bfe08SMika Westerberg 	struct pci_dev *upstream, *pdev;
2250349bfe08SMika Westerberg 
2251349bfe08SMika Westerberg 	if (!x86_apple_machine)
2252349bfe08SMika Westerberg 		return;
2253349bfe08SMika Westerberg 
2254349bfe08SMika Westerberg 	switch (nhi->pdev->device) {
2255349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2256349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2257349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2258349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2259349bfe08SMika Westerberg 		break;
2260349bfe08SMika Westerberg 	default:
2261349bfe08SMika Westerberg 		return;
2262349bfe08SMika Westerberg 	}
2263349bfe08SMika Westerberg 
2264349bfe08SMika Westerberg 	upstream = pci_upstream_bridge(nhi->pdev);
2265349bfe08SMika Westerberg 	while (upstream) {
2266349bfe08SMika Westerberg 		if (!pci_is_pcie(upstream))
2267349bfe08SMika Westerberg 			return;
2268349bfe08SMika Westerberg 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2269349bfe08SMika Westerberg 			break;
2270349bfe08SMika Westerberg 		upstream = pci_upstream_bridge(upstream);
2271349bfe08SMika Westerberg 	}
2272349bfe08SMika Westerberg 
2273349bfe08SMika Westerberg 	if (!upstream)
2274349bfe08SMika Westerberg 		return;
2275349bfe08SMika Westerberg 
2276349bfe08SMika Westerberg 	/*
2277349bfe08SMika Westerberg 	 * For each hotplug downstream port, create add device link
2278349bfe08SMika Westerberg 	 * back to NHI so that PCIe tunnels can be re-established after
2279349bfe08SMika Westerberg 	 * sleep.
2280349bfe08SMika Westerberg 	 */
2281349bfe08SMika Westerberg 	for_each_pci_bridge(pdev, upstream->subordinate) {
2282349bfe08SMika Westerberg 		const struct device_link *link;
2283349bfe08SMika Westerberg 
2284349bfe08SMika Westerberg 		if (!pci_is_pcie(pdev))
2285349bfe08SMika Westerberg 			continue;
2286349bfe08SMika Westerberg 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2287349bfe08SMika Westerberg 		    !pdev->is_hotplug_bridge)
2288349bfe08SMika Westerberg 			continue;
2289349bfe08SMika Westerberg 
2290349bfe08SMika Westerberg 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2291349bfe08SMika Westerberg 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
2292349bfe08SMika Westerberg 				       DL_FLAG_PM_RUNTIME);
2293349bfe08SMika Westerberg 		if (link) {
2294349bfe08SMika Westerberg 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2295349bfe08SMika Westerberg 				dev_name(&pdev->dev));
2296349bfe08SMika Westerberg 		} else {
2297349bfe08SMika Westerberg 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2298349bfe08SMika Westerberg 				 dev_name(&pdev->dev));
2299349bfe08SMika Westerberg 		}
2300349bfe08SMika Westerberg 	}
2301349bfe08SMika Westerberg }
2302349bfe08SMika Westerberg 
23039d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
23049d3cce0bSMika Westerberg {
23059d3cce0bSMika Westerberg 	struct tb_cm *tcm;
23069d3cce0bSMika Westerberg 	struct tb *tb;
23079d3cce0bSMika Westerberg 
23087f0a34d7SMika Westerberg 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
23099d3cce0bSMika Westerberg 	if (!tb)
23109d3cce0bSMika Westerberg 		return NULL;
23119d3cce0bSMika Westerberg 
2312c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
231399cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
2314c6da62a2SMika Westerberg 	else
2315c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
2316c6da62a2SMika Westerberg 
23179d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
23189d3cce0bSMika Westerberg 
23199d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
23209d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
23218afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
23226ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
23236ce35635SMika Westerberg 	tb_init_bandwidth_groups(tcm);
23249d3cce0bSMika Westerberg 
2325e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
2326e0258805SMika Westerberg 
2327349bfe08SMika Westerberg 	tb_apple_add_links(nhi);
2328349bfe08SMika Westerberg 	tb_acpi_add_links(nhi);
2329349bfe08SMika Westerberg 
23309d3cce0bSMika Westerberg 	return tb;
233123dd5bb4SAndreas Noever }
2332