xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 35627353)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13349bfe08SMika Westerberg #include <linux/platform_data/x86/apple.h>
14d6cc51cdSAndreas Noever 
15d6cc51cdSAndreas Noever #include "tb.h"
167adf6097SAndreas Noever #include "tb_regs.h"
171752b9f7SMika Westerberg #include "tunnel.h"
18d6cc51cdSAndreas Noever 
197f0a34d7SMika Westerberg #define TB_TIMEOUT	100	/* ms */
206ce35635SMika Westerberg #define MAX_GROUPS	7	/* max Group_ID is 7 */
217f0a34d7SMika Westerberg 
229d3cce0bSMika Westerberg /**
239d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
249d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
258afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
269d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
279d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
289d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
299d3cce0bSMika Westerberg  *		    after cfg has been paused.
306ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
316ac6faeeSMika Westerberg  *		 runtime resume
326ce35635SMika Westerberg  * @groups: Bandwidth groups used in this domain.
339d3cce0bSMika Westerberg  */
349d3cce0bSMika Westerberg struct tb_cm {
359d3cce0bSMika Westerberg 	struct list_head tunnel_list;
368afe909bSMika Westerberg 	struct list_head dp_resources;
379d3cce0bSMika Westerberg 	bool hotplug_active;
386ac6faeeSMika Westerberg 	struct delayed_work remove_work;
396ce35635SMika Westerberg 	struct tb_bandwidth_group groups[MAX_GROUPS];
409d3cce0bSMika Westerberg };
419da672a4SAndreas Noever 
426ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
436ac6faeeSMika Westerberg {
446ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
456ac6faeeSMika Westerberg }
466ac6faeeSMika Westerberg 
474f807e47SMika Westerberg struct tb_hotplug_event {
484f807e47SMika Westerberg 	struct work_struct work;
494f807e47SMika Westerberg 	struct tb *tb;
504f807e47SMika Westerberg 	u64 route;
514f807e47SMika Westerberg 	u8 port;
524f807e47SMika Westerberg 	bool unplug;
534f807e47SMika Westerberg };
544f807e47SMika Westerberg 
556ce35635SMika Westerberg static void tb_init_bandwidth_groups(struct tb_cm *tcm)
566ce35635SMika Westerberg {
576ce35635SMika Westerberg 	int i;
586ce35635SMika Westerberg 
596ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
606ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
616ce35635SMika Westerberg 
626ce35635SMika Westerberg 		group->tb = tcm_to_tb(tcm);
636ce35635SMika Westerberg 		group->index = i + 1;
646ce35635SMika Westerberg 		INIT_LIST_HEAD(&group->ports);
656ce35635SMika Westerberg 	}
666ce35635SMika Westerberg }
676ce35635SMika Westerberg 
686ce35635SMika Westerberg static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
696ce35635SMika Westerberg 					   struct tb_port *in)
706ce35635SMika Westerberg {
716ce35635SMika Westerberg 	if (!group || WARN_ON(in->group))
726ce35635SMika Westerberg 		return;
736ce35635SMika Westerberg 
746ce35635SMika Westerberg 	in->group = group;
756ce35635SMika Westerberg 	list_add_tail(&in->group_list, &group->ports);
766ce35635SMika Westerberg 
776ce35635SMika Westerberg 	tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
786ce35635SMika Westerberg }
796ce35635SMika Westerberg 
806ce35635SMika Westerberg static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
816ce35635SMika Westerberg {
826ce35635SMika Westerberg 	int i;
836ce35635SMika Westerberg 
846ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
856ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
866ce35635SMika Westerberg 
876ce35635SMika Westerberg 		if (list_empty(&group->ports))
886ce35635SMika Westerberg 			return group;
896ce35635SMika Westerberg 	}
906ce35635SMika Westerberg 
916ce35635SMika Westerberg 	return NULL;
926ce35635SMika Westerberg }
936ce35635SMika Westerberg 
946ce35635SMika Westerberg static struct tb_bandwidth_group *
956ce35635SMika Westerberg tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
966ce35635SMika Westerberg 			  struct tb_port *out)
976ce35635SMika Westerberg {
986ce35635SMika Westerberg 	struct tb_bandwidth_group *group;
996ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
1006ce35635SMika Westerberg 
1016ce35635SMika Westerberg 	/*
1026ce35635SMika Westerberg 	 * Find all DP tunnels that go through all the same USB4 links
1036ce35635SMika Westerberg 	 * as this one. Because we always setup tunnels the same way we
1046ce35635SMika Westerberg 	 * can just check for the routers at both ends of the tunnels
1056ce35635SMika Westerberg 	 * and if they are the same we have a match.
1066ce35635SMika Westerberg 	 */
1076ce35635SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1086ce35635SMika Westerberg 		if (!tb_tunnel_is_dp(tunnel))
1096ce35635SMika Westerberg 			continue;
1106ce35635SMika Westerberg 
1116ce35635SMika Westerberg 		if (tunnel->src_port->sw == in->sw &&
1126ce35635SMika Westerberg 		    tunnel->dst_port->sw == out->sw) {
1136ce35635SMika Westerberg 			group = tunnel->src_port->group;
1146ce35635SMika Westerberg 			if (group) {
1156ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(group, in);
1166ce35635SMika Westerberg 				return group;
1176ce35635SMika Westerberg 			}
1186ce35635SMika Westerberg 		}
1196ce35635SMika Westerberg 	}
1206ce35635SMika Westerberg 
1216ce35635SMika Westerberg 	/* Pick up next available group then */
1226ce35635SMika Westerberg 	group = tb_find_free_bandwidth_group(tcm);
1236ce35635SMika Westerberg 	if (group)
1246ce35635SMika Westerberg 		tb_bandwidth_group_attach_port(group, in);
1256ce35635SMika Westerberg 	else
1266ce35635SMika Westerberg 		tb_port_warn(in, "no available bandwidth groups\n");
1276ce35635SMika Westerberg 
1286ce35635SMika Westerberg 	return group;
1296ce35635SMika Westerberg }
1306ce35635SMika Westerberg 
1316ce35635SMika Westerberg static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1326ce35635SMika Westerberg 					struct tb_port *out)
1336ce35635SMika Westerberg {
1346ce35635SMika Westerberg 	if (usb4_dp_port_bw_mode_enabled(in)) {
1356ce35635SMika Westerberg 		int index, i;
1366ce35635SMika Westerberg 
1376ce35635SMika Westerberg 		index = usb4_dp_port_group_id(in);
1386ce35635SMika Westerberg 		for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1396ce35635SMika Westerberg 			if (tcm->groups[i].index == index) {
1406ce35635SMika Westerberg 				tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1416ce35635SMika Westerberg 				return;
1426ce35635SMika Westerberg 			}
1436ce35635SMika Westerberg 		}
1446ce35635SMika Westerberg 	}
1456ce35635SMika Westerberg 
1466ce35635SMika Westerberg 	tb_attach_bandwidth_group(tcm, in, out);
1476ce35635SMika Westerberg }
1486ce35635SMika Westerberg 
1496ce35635SMika Westerberg static void tb_detach_bandwidth_group(struct tb_port *in)
1506ce35635SMika Westerberg {
1516ce35635SMika Westerberg 	struct tb_bandwidth_group *group = in->group;
1526ce35635SMika Westerberg 
1536ce35635SMika Westerberg 	if (group) {
1546ce35635SMika Westerberg 		in->group = NULL;
1556ce35635SMika Westerberg 		list_del_init(&in->group_list);
1566ce35635SMika Westerberg 
1576ce35635SMika Westerberg 		tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1586ce35635SMika Westerberg 	}
1596ce35635SMika Westerberg }
1606ce35635SMika Westerberg 
1614f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
1624f807e47SMika Westerberg 
1634f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
1644f807e47SMika Westerberg {
1654f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
1664f807e47SMika Westerberg 
1674f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1684f807e47SMika Westerberg 	if (!ev)
1694f807e47SMika Westerberg 		return;
1704f807e47SMika Westerberg 
1714f807e47SMika Westerberg 	ev->tb = tb;
1724f807e47SMika Westerberg 	ev->route = route;
1734f807e47SMika Westerberg 	ev->port = port;
1744f807e47SMika Westerberg 	ev->unplug = unplug;
1754f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
1764f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
1774f807e47SMika Westerberg }
1784f807e47SMika Westerberg 
1799da672a4SAndreas Noever /* enumeration & hot plug handling */
1809da672a4SAndreas Noever 
1818afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
1828afe909bSMika Westerberg {
1838afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
1848afe909bSMika Westerberg 	struct tb_port *port;
1858afe909bSMika Westerberg 
1868afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
1878afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
1888afe909bSMika Westerberg 			continue;
1898afe909bSMika Westerberg 
1908afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
1918afe909bSMika Westerberg 			continue;
1928afe909bSMika Westerberg 
1938afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
1948afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
1958afe909bSMika Westerberg 	}
1968afe909bSMika Westerberg }
1978afe909bSMika Westerberg 
1988afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
1998afe909bSMika Westerberg {
2008afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
2018afe909bSMika Westerberg 	struct tb_port *port, *tmp;
2028afe909bSMika Westerberg 
2038afe909bSMika Westerberg 	/* Clear children resources first */
2048afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
2058afe909bSMika Westerberg 		if (tb_port_has_remote(port))
2068afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
2078afe909bSMika Westerberg 	}
2088afe909bSMika Westerberg 
2098afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
2108afe909bSMika Westerberg 		if (port->sw == sw) {
2118afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
2128afe909bSMika Westerberg 			list_del_init(&port->list);
2138afe909bSMika Westerberg 		}
2148afe909bSMika Westerberg 	}
2158afe909bSMika Westerberg }
2168afe909bSMika Westerberg 
217b60e31bfSSanjay R Mehta static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218b60e31bfSSanjay R Mehta {
219b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
220b60e31bfSSanjay R Mehta 	struct tb_port *p;
221b60e31bfSSanjay R Mehta 
222b60e31bfSSanjay R Mehta 	list_for_each_entry(p, &tcm->dp_resources, list) {
223b60e31bfSSanjay R Mehta 		if (p == port)
224b60e31bfSSanjay R Mehta 			return;
225b60e31bfSSanjay R Mehta 	}
226b60e31bfSSanjay R Mehta 
227b60e31bfSSanjay R Mehta 	tb_port_dbg(port, "DP %s resource available discovered\n",
228b60e31bfSSanjay R Mehta 		    tb_port_is_dpin(port) ? "IN" : "OUT");
229b60e31bfSSanjay R Mehta 	list_add_tail(&port->list, &tcm->dp_resources);
230b60e31bfSSanjay R Mehta }
231b60e31bfSSanjay R Mehta 
232b60e31bfSSanjay R Mehta static void tb_discover_dp_resources(struct tb *tb)
233b60e31bfSSanjay R Mehta {
234b60e31bfSSanjay R Mehta 	struct tb_cm *tcm = tb_priv(tb);
235b60e31bfSSanjay R Mehta 	struct tb_tunnel *tunnel;
236b60e31bfSSanjay R Mehta 
237b60e31bfSSanjay R Mehta 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238b60e31bfSSanjay R Mehta 		if (tb_tunnel_is_dp(tunnel))
239b60e31bfSSanjay R Mehta 			tb_discover_dp_resource(tb, tunnel->dst_port);
240b60e31bfSSanjay R Mehta 	}
241b60e31bfSSanjay R Mehta }
242b60e31bfSSanjay R Mehta 
2431a9b6cb8SMika Westerberg static int tb_enable_clx(struct tb_switch *sw)
2441a9b6cb8SMika Westerberg {
2451a9b6cb8SMika Westerberg 	int ret;
2461a9b6cb8SMika Westerberg 
2471a9b6cb8SMika Westerberg 	/*
2481a9b6cb8SMika Westerberg 	 * CL0s and CL1 are enabled and supported together.
2491a9b6cb8SMika Westerberg 	 * Silently ignore CLx enabling in case CLx is not supported.
2501a9b6cb8SMika Westerberg 	 */
251*35627353SMika Westerberg 	ret = tb_switch_clx_enable(sw, TB_CL0S | TB_CL1);
2521a9b6cb8SMika Westerberg 	return ret == -EOPNOTSUPP ? 0 : ret;
2531a9b6cb8SMika Westerberg }
2541a9b6cb8SMika Westerberg 
2557d283f41SMika Westerberg static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
2567d283f41SMika Westerberg {
2577d283f41SMika Westerberg 	struct tb_switch *sw;
2587d283f41SMika Westerberg 
2597d283f41SMika Westerberg 	sw = tb_to_switch(dev);
2607d283f41SMika Westerberg 	if (sw) {
2617d283f41SMika Westerberg 		tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
26212a14f2fSMika Westerberg 					tb_switch_clx_is_enabled(sw, TB_CL1));
2637d283f41SMika Westerberg 		if (tb_switch_tmu_enable(sw))
2647d283f41SMika Westerberg 			tb_sw_warn(sw, "failed to increase TMU rate\n");
2657d283f41SMika Westerberg 	}
2667d283f41SMika Westerberg 
2677d283f41SMika Westerberg 	return 0;
2687d283f41SMika Westerberg }
2697d283f41SMika Westerberg 
2707d283f41SMika Westerberg static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
2717d283f41SMika Westerberg {
2727d283f41SMika Westerberg 	struct tb_switch *sw;
2737d283f41SMika Westerberg 
2747d283f41SMika Westerberg 	if (!tunnel)
2757d283f41SMika Westerberg 		return;
2767d283f41SMika Westerberg 
2777d283f41SMika Westerberg 	/*
2787d283f41SMika Westerberg 	 * Once first DP tunnel is established we change the TMU
2797d283f41SMika Westerberg 	 * accuracy of first depth child routers (and the host router)
2807d283f41SMika Westerberg 	 * to the highest. This is needed for the DP tunneling to work
2817d283f41SMika Westerberg 	 * but also allows CL0s.
2827d283f41SMika Westerberg 	 */
2837d283f41SMika Westerberg 	sw = tunnel->tb->root_switch;
2847d283f41SMika Westerberg 	device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
2857d283f41SMika Westerberg }
2867d283f41SMika Westerberg 
2874e7b4955SMika Westerberg static int tb_enable_tmu(struct tb_switch *sw)
2884e7b4955SMika Westerberg {
2894e7b4955SMika Westerberg 	int ret;
2904e7b4955SMika Westerberg 
2914e7b4955SMika Westerberg 	/*
2924e7b4955SMika Westerberg 	 * If CL1 is enabled then we need to configure the TMU accuracy
2934e7b4955SMika Westerberg 	 * level to normal. Otherwise we keep the TMU running at the
2944e7b4955SMika Westerberg 	 * highest accuracy.
2954e7b4955SMika Westerberg 	 */
29612a14f2fSMika Westerberg 	if (tb_switch_clx_is_enabled(sw, TB_CL1))
297ef34add8SMika Westerberg 		ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
2984e7b4955SMika Westerberg 	else
299ef34add8SMika Westerberg 		ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
300ef34add8SMika Westerberg 	if (ret)
301ef34add8SMika Westerberg 		return ret;
3024e7b4955SMika Westerberg 
3034e7b4955SMika Westerberg 	/* If it is already enabled in correct mode, don't touch it */
3044e7b4955SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
3054e7b4955SMika Westerberg 		return 0;
3064e7b4955SMika Westerberg 
3074e7b4955SMika Westerberg 	ret = tb_switch_tmu_disable(sw);
3084e7b4955SMika Westerberg 	if (ret)
3094e7b4955SMika Westerberg 		return ret;
3104e7b4955SMika Westerberg 
3114e7b4955SMika Westerberg 	ret = tb_switch_tmu_post_time(sw);
3124e7b4955SMika Westerberg 	if (ret)
3134e7b4955SMika Westerberg 		return ret;
3144e7b4955SMika Westerberg 
3154e7b4955SMika Westerberg 	return tb_switch_tmu_enable(sw);
3164e7b4955SMika Westerberg }
3174e7b4955SMika Westerberg 
31843bddb26SMika Westerberg static void tb_switch_discover_tunnels(struct tb_switch *sw,
31943bddb26SMika Westerberg 				       struct list_head *list,
32043bddb26SMika Westerberg 				       bool alloc_hopids)
3210414bec5SMika Westerberg {
3220414bec5SMika Westerberg 	struct tb *tb = sw->tb;
3230414bec5SMika Westerberg 	struct tb_port *port;
3240414bec5SMika Westerberg 
325b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3260414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
3270414bec5SMika Westerberg 
3280414bec5SMika Westerberg 		switch (port->config.type) {
3294f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
33043bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
3317d283f41SMika Westerberg 			tb_increase_tmu_accuracy(tunnel);
3324f807e47SMika Westerberg 			break;
3334f807e47SMika Westerberg 
3340414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
33543bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
3360414bec5SMika Westerberg 			break;
3370414bec5SMika Westerberg 
338e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
33943bddb26SMika Westerberg 			tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
340e6f81858SRajmohan Mani 			break;
341e6f81858SRajmohan Mani 
3420414bec5SMika Westerberg 		default:
3430414bec5SMika Westerberg 			break;
3440414bec5SMika Westerberg 		}
3450414bec5SMika Westerberg 
34643bddb26SMika Westerberg 		if (tunnel)
34743bddb26SMika Westerberg 			list_add_tail(&tunnel->list, list);
34843bddb26SMika Westerberg 	}
3494f807e47SMika Westerberg 
35043bddb26SMika Westerberg 	tb_switch_for_each_port(sw, port) {
35143bddb26SMika Westerberg 		if (tb_port_has_remote(port)) {
35243bddb26SMika Westerberg 			tb_switch_discover_tunnels(port->remote->sw, list,
35343bddb26SMika Westerberg 						   alloc_hopids);
35443bddb26SMika Westerberg 		}
35543bddb26SMika Westerberg 	}
35643bddb26SMika Westerberg }
35743bddb26SMika Westerberg 
35843bddb26SMika Westerberg static void tb_discover_tunnels(struct tb *tb)
35943bddb26SMika Westerberg {
36043bddb26SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
36143bddb26SMika Westerberg 	struct tb_tunnel *tunnel;
36243bddb26SMika Westerberg 
36343bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
36443bddb26SMika Westerberg 
36543bddb26SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
3664f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
3670414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
3680414bec5SMika Westerberg 
3690414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
3700414bec5SMika Westerberg 				parent->boot = true;
3710414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
3720414bec5SMika Westerberg 			}
373c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
3746ce35635SMika Westerberg 			struct tb_port *in = tunnel->src_port;
3756ce35635SMika Westerberg 			struct tb_port *out = tunnel->dst_port;
3766ce35635SMika Westerberg 
377c94732bdSMika Westerberg 			/* Keep the domain from powering down */
3786ce35635SMika Westerberg 			pm_runtime_get_sync(&in->sw->dev);
3796ce35635SMika Westerberg 			pm_runtime_get_sync(&out->sw->dev);
3806ce35635SMika Westerberg 
3816ce35635SMika Westerberg 			tb_discover_bandwidth_group(tcm, in, out);
3824f807e47SMika Westerberg 		}
3830414bec5SMika Westerberg 	}
3840414bec5SMika Westerberg }
3859da672a4SAndreas Noever 
386f9cad07bSMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
387284652a4SMika Westerberg {
388284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
389f9cad07bSMika Westerberg 		return usb4_port_configure_xdomain(port, xd);
390284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
391284652a4SMika Westerberg }
392284652a4SMika Westerberg 
393284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
394284652a4SMika Westerberg {
395284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
396284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
397284652a4SMika Westerberg 	else
398284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
399341d4518SMika Westerberg 
400341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
401284652a4SMika Westerberg }
402284652a4SMika Westerberg 
4037ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
4047ea4cd6bSMika Westerberg {
4057ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
4067ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
4077ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
4087ea4cd6bSMika Westerberg 	u64 route;
4097ea4cd6bSMika Westerberg 
4105ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
4115ca67688SMika Westerberg 		return;
4125ca67688SMika Westerberg 
4137ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
4147ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
4157ea4cd6bSMika Westerberg 	if (xd) {
4167ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
4177ea4cd6bSMika Westerberg 		return;
4187ea4cd6bSMika Westerberg 	}
4197ea4cd6bSMika Westerberg 
4207ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
4217ea4cd6bSMika Westerberg 			      NULL);
4227ea4cd6bSMika Westerberg 	if (xd) {
4237ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
424f9cad07bSMika Westerberg 		tb_port_configure_xdomain(port, xd);
4257ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
4267ea4cd6bSMika Westerberg 	}
4277ea4cd6bSMika Westerberg }
4287ea4cd6bSMika Westerberg 
429e6f81858SRajmohan Mani /**
430e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
431e6f81858SRajmohan Mani  * @sw: Switch to find the port on
432e6f81858SRajmohan Mani  * @type: Port type to look for
433e6f81858SRajmohan Mani  */
434e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
435e6f81858SRajmohan Mani 					   enum tb_port_type type)
436e6f81858SRajmohan Mani {
437e6f81858SRajmohan Mani 	struct tb_port *port;
438e6f81858SRajmohan Mani 
439e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
440e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
441e6f81858SRajmohan Mani 			continue;
442e6f81858SRajmohan Mani 		if (port->config.type != type)
443e6f81858SRajmohan Mani 			continue;
444e6f81858SRajmohan Mani 		if (!port->cap_adap)
445e6f81858SRajmohan Mani 			continue;
446e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
447e6f81858SRajmohan Mani 			continue;
448e6f81858SRajmohan Mani 		return port;
449e6f81858SRajmohan Mani 	}
450e6f81858SRajmohan Mani 	return NULL;
451e6f81858SRajmohan Mani }
452e6f81858SRajmohan Mani 
453e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
454e6f81858SRajmohan Mani 					 const struct tb_port *port)
455e6f81858SRajmohan Mani {
456e6f81858SRajmohan Mani 	struct tb_port *down;
457e6f81858SRajmohan Mani 
458e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
45977cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
460e6f81858SRajmohan Mani 		return down;
46177cfa40fSMika Westerberg 	return NULL;
462e6f81858SRajmohan Mani }
463e6f81858SRajmohan Mani 
4640bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
4650bd680cdSMika Westerberg 					struct tb_port *src_port,
4660bd680cdSMika Westerberg 					struct tb_port *dst_port)
4670bd680cdSMika Westerberg {
4680bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
4690bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4700bd680cdSMika Westerberg 
4710bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4720bd680cdSMika Westerberg 		if (tunnel->type == type &&
4730bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
4740bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
4750bd680cdSMika Westerberg 			return tunnel;
4760bd680cdSMika Westerberg 		}
4770bd680cdSMika Westerberg 	}
4780bd680cdSMika Westerberg 
4790bd680cdSMika Westerberg 	return NULL;
4800bd680cdSMika Westerberg }
4810bd680cdSMika Westerberg 
4820bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
4830bd680cdSMika Westerberg 						   struct tb_port *src_port,
4840bd680cdSMika Westerberg 						   struct tb_port *dst_port)
4850bd680cdSMika Westerberg {
4860bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
4870bd680cdSMika Westerberg 	struct tb_switch *sw;
4880bd680cdSMika Westerberg 
4890bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
4900bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
4910bd680cdSMika Westerberg 		sw = dst_port->sw;
4920bd680cdSMika Westerberg 	else
4930bd680cdSMika Westerberg 		sw = src_port->sw;
4940bd680cdSMika Westerberg 
4950bd680cdSMika Westerberg 	/* Can't be the host router */
4960bd680cdSMika Westerberg 	if (sw == tb->root_switch)
4970bd680cdSMika Westerberg 		return NULL;
4980bd680cdSMika Westerberg 
4990bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
5000bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
5010bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
5020bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
5030bd680cdSMika Westerberg 	if (!usb3_down)
5040bd680cdSMika Westerberg 		return NULL;
5050bd680cdSMika Westerberg 
5060bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
5070bd680cdSMika Westerberg }
5080bd680cdSMika Westerberg 
5090bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
5100bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
5110bd680cdSMika Westerberg {
5120bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
5130bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
5140bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
5150bd680cdSMika Westerberg 	struct tb_port *port;
5160bd680cdSMika Westerberg 
5172426fdf7SMika Westerberg 	tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
5182426fdf7SMika Westerberg 	       tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
5192426fdf7SMika Westerberg 	       dst_port->port);
5200bd680cdSMika Westerberg 
5210bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
5226ce35635SMika Westerberg 	if (tunnel && tunnel->src_port != src_port &&
5236ce35635SMika Westerberg 	    tunnel->dst_port != dst_port) {
5240bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
5250bd680cdSMika Westerberg 						   &usb3_consumed_down);
5260bd680cdSMika Westerberg 		if (ret)
5270bd680cdSMika Westerberg 			return ret;
5280bd680cdSMika Westerberg 	} else {
5290bd680cdSMika Westerberg 		usb3_consumed_up = 0;
5300bd680cdSMika Westerberg 		usb3_consumed_down = 0;
5310bd680cdSMika Westerberg 	}
5320bd680cdSMika Westerberg 
5330bd680cdSMika Westerberg 	*available_up = *available_down = 40000;
5340bd680cdSMika Westerberg 
5350bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
5360bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
5370bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
5380bd680cdSMika Westerberg 
5390bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
5400bd680cdSMika Westerberg 			continue;
5410bd680cdSMika Westerberg 
5420bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
5430bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
5440bd680cdSMika Westerberg 		} else {
5450bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
5460bd680cdSMika Westerberg 			if (link_speed < 0)
5470bd680cdSMika Westerberg 				return link_speed;
5480bd680cdSMika Westerberg 		}
5490bd680cdSMika Westerberg 
5500bd680cdSMika Westerberg 		link_width = port->bonded ? 2 : 1;
5510bd680cdSMika Westerberg 
5520bd680cdSMika Westerberg 		up_bw = link_speed * link_width * 1000; /* Mb/s */
5530bd680cdSMika Westerberg 		/* Leave 10% guard band */
5540bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
5550bd680cdSMika Westerberg 		down_bw = up_bw;
5560bd680cdSMika Westerberg 
5572426fdf7SMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
5582426fdf7SMika Westerberg 			    down_bw);
5590bd680cdSMika Westerberg 
5600bd680cdSMika Westerberg 		/*
5610bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
5620bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
5630bd680cdSMika Westerberg 		 */
5640bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
5650bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
5660bd680cdSMika Westerberg 
5676ce35635SMika Westerberg 			if (tb_tunnel_is_invalid(tunnel))
5686ce35635SMika Westerberg 				continue;
5696ce35635SMika Westerberg 
5700bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
5710bd680cdSMika Westerberg 				continue;
5720bd680cdSMika Westerberg 
5730bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
5740bd680cdSMika Westerberg 				continue;
5750bd680cdSMika Westerberg 
5766ce35635SMika Westerberg 			/*
5776ce35635SMika Westerberg 			 * Ignore the DP tunnel between src_port and
5786ce35635SMika Westerberg 			 * dst_port because it is the same tunnel and we
5796ce35635SMika Westerberg 			 * may be re-calculating estimated bandwidth.
5806ce35635SMika Westerberg 			 */
5816ce35635SMika Westerberg 			if (tunnel->src_port == src_port &&
5826ce35635SMika Westerberg 			    tunnel->dst_port == dst_port)
5836ce35635SMika Westerberg 				continue;
5846ce35635SMika Westerberg 
5850bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
5860bd680cdSMika Westerberg 							   &dp_consumed_up,
5870bd680cdSMika Westerberg 							   &dp_consumed_down);
5880bd680cdSMika Westerberg 			if (ret)
5890bd680cdSMika Westerberg 				return ret;
5900bd680cdSMika Westerberg 
5910bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
5920bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
5930bd680cdSMika Westerberg 		}
5940bd680cdSMika Westerberg 
5950bd680cdSMika Westerberg 		/*
5960bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
5970bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
5980bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
5990bd680cdSMika Westerberg 		 * crosses the port.
6000bd680cdSMika Westerberg 		 */
6010bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
6020bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
6030bd680cdSMika Westerberg 
6040bd680cdSMika Westerberg 		if (up_bw < *available_up)
6050bd680cdSMika Westerberg 			*available_up = up_bw;
6060bd680cdSMika Westerberg 		if (down_bw < *available_down)
6070bd680cdSMika Westerberg 			*available_down = down_bw;
6080bd680cdSMika Westerberg 	}
6090bd680cdSMika Westerberg 
6100bd680cdSMika Westerberg 	if (*available_up < 0)
6110bd680cdSMika Westerberg 		*available_up = 0;
6120bd680cdSMika Westerberg 	if (*available_down < 0)
6130bd680cdSMika Westerberg 		*available_down = 0;
6140bd680cdSMika Westerberg 
6150bd680cdSMika Westerberg 	return 0;
6160bd680cdSMika Westerberg }
6170bd680cdSMika Westerberg 
6180bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
6190bd680cdSMika Westerberg 					    struct tb_port *src_port,
6200bd680cdSMika Westerberg 					    struct tb_port *dst_port)
6210bd680cdSMika Westerberg {
6220bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
6230bd680cdSMika Westerberg 
6240bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6250bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
6260bd680cdSMika Westerberg }
6270bd680cdSMika Westerberg 
6280bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
6290bd680cdSMika Westerberg 				      struct tb_port *dst_port)
6300bd680cdSMika Westerberg {
6310bd680cdSMika Westerberg 	int ret, available_up, available_down;
6320bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
6330bd680cdSMika Westerberg 
6340bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6350bd680cdSMika Westerberg 	if (!tunnel)
6360bd680cdSMika Westerberg 		return;
6370bd680cdSMika Westerberg 
6380bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
6390bd680cdSMika Westerberg 
6400bd680cdSMika Westerberg 	/*
6410bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
6420bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
6430bd680cdSMika Westerberg 	 */
6440bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
6450bd680cdSMika Westerberg 				     &available_up, &available_down);
6460bd680cdSMika Westerberg 	if (ret) {
6470bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
6480bd680cdSMika Westerberg 		return;
6490bd680cdSMika Westerberg 	}
6500bd680cdSMika Westerberg 
6510bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
6520bd680cdSMika Westerberg 	       available_up, available_down);
6530bd680cdSMika Westerberg 
6540bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
6550bd680cdSMika Westerberg }
6560bd680cdSMika Westerberg 
657e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
658e6f81858SRajmohan Mani {
659e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
6600bd680cdSMika Westerberg 	int ret, available_up, available_down;
661e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
662e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
663e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
664e6f81858SRajmohan Mani 
665c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
666c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
667c6da62a2SMika Westerberg 		return 0;
668c6da62a2SMika Westerberg 	}
669c6da62a2SMika Westerberg 
670e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
671e6f81858SRajmohan Mani 	if (!up)
672e6f81858SRajmohan Mani 		return 0;
673e6f81858SRajmohan Mani 
674bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
675bbcf40b3SMika Westerberg 		return 0;
676bbcf40b3SMika Westerberg 
677e6f81858SRajmohan Mani 	/*
678e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
679e6f81858SRajmohan Mani 	 * be found right above this switch.
680e6f81858SRajmohan Mani 	 */
6817ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
682e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
683e6f81858SRajmohan Mani 	if (!down)
684e6f81858SRajmohan Mani 		return 0;
685e6f81858SRajmohan Mani 
686e6f81858SRajmohan Mani 	if (tb_route(parent)) {
687e6f81858SRajmohan Mani 		struct tb_port *parent_up;
688e6f81858SRajmohan Mani 		/*
689e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
690e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
691e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
692e6f81858SRajmohan Mani 		 */
693e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
694e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
695e6f81858SRajmohan Mani 			return 0;
6960bd680cdSMika Westerberg 
6970bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
6980bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
6990bd680cdSMika Westerberg 		if (ret)
7000bd680cdSMika Westerberg 			return ret;
701e6f81858SRajmohan Mani 	}
702e6f81858SRajmohan Mani 
7030bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
7040bd680cdSMika Westerberg 				     &available_down);
7050bd680cdSMika Westerberg 	if (ret)
7060bd680cdSMika Westerberg 		goto err_reclaim;
7070bd680cdSMika Westerberg 
7080bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
7090bd680cdSMika Westerberg 		    available_up, available_down);
7100bd680cdSMika Westerberg 
7110bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
7120bd680cdSMika Westerberg 				      available_down);
7130bd680cdSMika Westerberg 	if (!tunnel) {
7140bd680cdSMika Westerberg 		ret = -ENOMEM;
7150bd680cdSMika Westerberg 		goto err_reclaim;
7160bd680cdSMika Westerberg 	}
717e6f81858SRajmohan Mani 
718e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
719e6f81858SRajmohan Mani 		tb_port_info(up,
720e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
7210bd680cdSMika Westerberg 		ret = -EIO;
7220bd680cdSMika Westerberg 		goto err_free;
723e6f81858SRajmohan Mani 	}
724e6f81858SRajmohan Mani 
725e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
7260bd680cdSMika Westerberg 	if (tb_route(parent))
7270bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
7280bd680cdSMika Westerberg 
729e6f81858SRajmohan Mani 	return 0;
7300bd680cdSMika Westerberg 
7310bd680cdSMika Westerberg err_free:
7320bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
7330bd680cdSMika Westerberg err_reclaim:
7340bd680cdSMika Westerberg 	if (tb_route(parent))
7350bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
7360bd680cdSMika Westerberg 
7370bd680cdSMika Westerberg 	return ret;
738e6f81858SRajmohan Mani }
739e6f81858SRajmohan Mani 
740e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
741e6f81858SRajmohan Mani {
742e6f81858SRajmohan Mani 	struct tb_port *port;
743e6f81858SRajmohan Mani 	int ret;
744e6f81858SRajmohan Mani 
745c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
746c6da62a2SMika Westerberg 		return 0;
747c6da62a2SMika Westerberg 
748e6f81858SRajmohan Mani 	if (tb_route(sw)) {
749e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
750e6f81858SRajmohan Mani 		if (ret)
751e6f81858SRajmohan Mani 			return ret;
752e6f81858SRajmohan Mani 	}
753e6f81858SRajmohan Mani 
754e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
755e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
756e6f81858SRajmohan Mani 			continue;
757e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
758e6f81858SRajmohan Mani 		if (ret)
759e6f81858SRajmohan Mani 			return ret;
760e6f81858SRajmohan Mani 	}
761e6f81858SRajmohan Mani 
762e6f81858SRajmohan Mani 	return 0;
763e6f81858SRajmohan Mani }
764e6f81858SRajmohan Mani 
7659da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
7669da672a4SAndreas Noever 
767877e50b3SLee Jones /*
7689da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
7699da672a4SAndreas Noever  */
7709da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
7719da672a4SAndreas Noever {
772b433d010SMika Westerberg 	struct tb_port *port;
773b433d010SMika Westerberg 
7746ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
7756ac6faeeSMika Westerberg 
776b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
777b433d010SMika Westerberg 		tb_scan_port(port);
7786ac6faeeSMika Westerberg 
7796ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
7806ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
7819da672a4SAndreas Noever }
7829da672a4SAndreas Noever 
783877e50b3SLee Jones /*
7849da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
7859da672a4SAndreas Noever  */
7869da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
7879da672a4SAndreas Noever {
78899cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
789dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
7903fe95742SMika Westerberg 	bool discovery = false;
7919da672a4SAndreas Noever 	struct tb_switch *sw;
792dfe40ca4SMika Westerberg 
7939da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
7949da672a4SAndreas Noever 		return;
7954f807e47SMika Westerberg 
7964f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
7974f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
7984f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
7994f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
8004f807e47SMika Westerberg 				 false);
8014f807e47SMika Westerberg 		return;
8024f807e47SMika Westerberg 	}
8034f807e47SMika Westerberg 
8049da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
8059da672a4SAndreas Noever 		return;
806343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
807343fcb8cSAndreas Noever 		return; /*
808343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
809343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
810343fcb8cSAndreas Noever 			 */
81123257cfcSMika Westerberg 
81223257cfcSMika Westerberg 	if (port->usb4)
81323257cfcSMika Westerberg 		pm_runtime_get_sync(&port->usb4->dev);
81423257cfcSMika Westerberg 
8159da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
81623257cfcSMika Westerberg 		goto out_rpm_put;
8179da672a4SAndreas Noever 	if (port->remote) {
8187ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
81923257cfcSMika Westerberg 		goto out_rpm_put;
8209da672a4SAndreas Noever 	}
821dacb1287SKranthi Kuntala 
8223fb10ea4SRajmohan Mani 	tb_retimer_scan(port, true);
823dacb1287SKranthi Kuntala 
824bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
825bfe778acSMika Westerberg 			     tb_downstream_route(port));
8267ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
8277ea4cd6bSMika Westerberg 		/*
8287ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
8297ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
8307ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
8317ea4cd6bSMika Westerberg 		 */
8327ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
8337ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
83423257cfcSMika Westerberg 		goto out_rpm_put;
8357ea4cd6bSMika Westerberg 	}
836bfe778acSMika Westerberg 
837bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
838bfe778acSMika Westerberg 		tb_switch_put(sw);
83923257cfcSMika Westerberg 		goto out_rpm_put;
840bfe778acSMika Westerberg 	}
841bfe778acSMika Westerberg 
84299cabbb0SMika Westerberg 	/*
8437ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
8447ea4cd6bSMika Westerberg 	 * first.
8457ea4cd6bSMika Westerberg 	 */
8467ea4cd6bSMika Westerberg 	if (port->xdomain) {
8477ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
848284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
8497ea4cd6bSMika Westerberg 		port->xdomain = NULL;
8507ea4cd6bSMika Westerberg 	}
8517ea4cd6bSMika Westerberg 
8527ea4cd6bSMika Westerberg 	/*
85399cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
85499cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
85599cabbb0SMika Westerberg 	 * the boot firmware.
85699cabbb0SMika Westerberg 	 */
8573fe95742SMika Westerberg 	if (!tcm->hotplug_active) {
85899cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
8593fe95742SMika Westerberg 		discovery = true;
8603fe95742SMika Westerberg 	}
861f67cf491SMika Westerberg 
8626ac6faeeSMika Westerberg 	/*
8636ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
8646ac6faeeSMika Westerberg 	 * can support runtime PM.
8656ac6faeeSMika Westerberg 	 */
8666ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
8676ac6faeeSMika Westerberg 
868bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
869bfe778acSMika Westerberg 		tb_switch_put(sw);
87023257cfcSMika Westerberg 		goto out_rpm_put;
871bfe778acSMika Westerberg 	}
872bfe778acSMika Westerberg 
873dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
874dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
875dfe40ca4SMika Westerberg 	port->remote = upstream_port;
876dfe40ca4SMika Westerberg 	upstream_port->remote = port;
877dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
878dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
879dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
880dfe40ca4SMika Westerberg 	}
881dfe40ca4SMika Westerberg 
88291c0c120SMika Westerberg 	/* Enable lane bonding if supported */
8832ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
884de462039SMika Westerberg 	/* Set the link configured */
885de462039SMika Westerberg 	tb_switch_configure_link(sw);
886b017a46dSGil Fine 	/*
887b017a46dSGil Fine 	 * CL0s and CL1 are enabled and supported together.
888b017a46dSGil Fine 	 * Silently ignore CLx enabling in case CLx is not supported.
889b017a46dSGil Fine 	 */
8901a9b6cb8SMika Westerberg 	if (discovery)
8913fe95742SMika Westerberg 		tb_sw_dbg(sw, "discovery, not touching CL states\n");
8921a9b6cb8SMika Westerberg 	else if (tb_enable_clx(sw))
8931a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to enable CL states\n");
8948a90e4faSGil Fine 
895cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
896cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
897cf29b9afSRajmohan Mani 
898dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
8993fb10ea4SRajmohan Mani 	tb_retimer_scan(upstream_port, true);
900dacb1287SKranthi Kuntala 
901e6f81858SRajmohan Mani 	/*
902e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
903e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
904e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
905e6f81858SRajmohan Mani 	 * any new.
906e6f81858SRajmohan Mani 	 */
907e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
908e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
909e6f81858SRajmohan Mani 
910e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
9119da672a4SAndreas Noever 	tb_scan_switch(sw);
91223257cfcSMika Westerberg 
91323257cfcSMika Westerberg out_rpm_put:
91423257cfcSMika Westerberg 	if (port->usb4) {
91523257cfcSMika Westerberg 		pm_runtime_mark_last_busy(&port->usb4->dev);
91623257cfcSMika Westerberg 		pm_runtime_put_autosuspend(&port->usb4->dev);
91723257cfcSMika Westerberg 	}
9189da672a4SAndreas Noever }
9199da672a4SAndreas Noever 
9208afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
9218afe909bSMika Westerberg {
9220bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
9230bd680cdSMika Westerberg 	struct tb *tb;
9240bd680cdSMika Westerberg 
9258afe909bSMika Westerberg 	if (!tunnel)
9268afe909bSMika Westerberg 		return;
9278afe909bSMika Westerberg 
9288afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
9298afe909bSMika Westerberg 	list_del(&tunnel->list);
9308afe909bSMika Westerberg 
9310bd680cdSMika Westerberg 	tb = tunnel->tb;
9320bd680cdSMika Westerberg 	src_port = tunnel->src_port;
9330bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
9348afe909bSMika Westerberg 
9350bd680cdSMika Westerberg 	switch (tunnel->type) {
9360bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
9376ce35635SMika Westerberg 		tb_detach_bandwidth_group(src_port);
9380bd680cdSMika Westerberg 		/*
9390bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
9400bd680cdSMika Westerberg 		 * deallocated properly.
9410bd680cdSMika Westerberg 		 */
9420bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
9436ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
9446ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
9456ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
9466ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
9476ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
9480bd680cdSMika Westerberg 		fallthrough;
9490bd680cdSMika Westerberg 
9500bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
9510bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
9520bd680cdSMika Westerberg 		break;
9530bd680cdSMika Westerberg 
9540bd680cdSMika Westerberg 	default:
9550bd680cdSMika Westerberg 		/*
9560bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
9570bd680cdSMika Westerberg 		 * bandwidth.
9580bd680cdSMika Westerberg 		 */
9590bd680cdSMika Westerberg 		break;
9608afe909bSMika Westerberg 	}
9618afe909bSMika Westerberg 
9628afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
9634f807e47SMika Westerberg }
9644f807e47SMika Westerberg 
965877e50b3SLee Jones /*
9663364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
9673364f0c1SAndreas Noever  */
9683364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
9693364f0c1SAndreas Noever {
9709d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
97193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
97293f36adeSMika Westerberg 	struct tb_tunnel *n;
9739d3cce0bSMika Westerberg 
9749d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
9758afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
9768afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
9773364f0c1SAndreas Noever 	}
9783364f0c1SAndreas Noever }
9793364f0c1SAndreas Noever 
980877e50b3SLee Jones /*
98123dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
98223dd5bb4SAndreas Noever  */
98323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
98423dd5bb4SAndreas Noever {
985b433d010SMika Westerberg 	struct tb_port *port;
986dfe40ca4SMika Westerberg 
987b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
988dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
98923dd5bb4SAndreas Noever 			continue;
990dfe40ca4SMika Westerberg 
99123dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
992dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
9938afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
994de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
99591c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
996bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
99723dd5bb4SAndreas Noever 			port->remote = NULL;
998dfe40ca4SMika Westerberg 			if (port->dual_link_port)
999dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
100023dd5bb4SAndreas Noever 		} else {
100123dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
100223dd5bb4SAndreas Noever 		}
100323dd5bb4SAndreas Noever 	}
100423dd5bb4SAndreas Noever }
100523dd5bb4SAndreas Noever 
100699cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
100799cabbb0SMika Westerberg 					 const struct tb_port *port)
10083364f0c1SAndreas Noever {
1009b0407983SMika Westerberg 	struct tb_port *down = NULL;
1010b0407983SMika Westerberg 
101199cabbb0SMika Westerberg 	/*
101299cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
1013b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
101499cabbb0SMika Westerberg 	 */
1015b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
1016b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
1017b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
101899cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
101999cabbb0SMika Westerberg 		int index;
102099cabbb0SMika Westerberg 
102199cabbb0SMika Westerberg 		/*
102299cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
102399cabbb0SMika Westerberg 		 * per controller.
102499cabbb0SMika Westerberg 		 */
10257bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
10267bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
102799cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
102817a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
102999cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
10307bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
10317bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
103299cabbb0SMika Westerberg 		else
103399cabbb0SMika Westerberg 			goto out;
103499cabbb0SMika Westerberg 
103599cabbb0SMika Westerberg 		/* Validate the hard-coding */
103699cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
103799cabbb0SMika Westerberg 			goto out;
1038b0407983SMika Westerberg 
1039b0407983SMika Westerberg 		down = &sw->ports[index];
1040b0407983SMika Westerberg 	}
1041b0407983SMika Westerberg 
1042b0407983SMika Westerberg 	if (down) {
1043b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
104499cabbb0SMika Westerberg 			goto out;
10459cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
104699cabbb0SMika Westerberg 			goto out;
104799cabbb0SMika Westerberg 
1048b0407983SMika Westerberg 		return down;
104999cabbb0SMika Westerberg 	}
105099cabbb0SMika Westerberg 
105199cabbb0SMika Westerberg out:
1052e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
105399cabbb0SMika Westerberg }
105499cabbb0SMika Westerberg 
10556ce35635SMika Westerberg static void
10566ce35635SMika Westerberg tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
10576ce35635SMika Westerberg {
10586ce35635SMika Westerberg 	struct tb_tunnel *first_tunnel;
10596ce35635SMika Westerberg 	struct tb *tb = group->tb;
10606ce35635SMika Westerberg 	struct tb_port *in;
10616ce35635SMika Westerberg 	int ret;
10626ce35635SMika Westerberg 
10636ce35635SMika Westerberg 	tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
10646ce35635SMika Westerberg 	       group->index);
10656ce35635SMika Westerberg 
10666ce35635SMika Westerberg 	first_tunnel = NULL;
10676ce35635SMika Westerberg 	list_for_each_entry(in, &group->ports, group_list) {
10686ce35635SMika Westerberg 		int estimated_bw, estimated_up, estimated_down;
10696ce35635SMika Westerberg 		struct tb_tunnel *tunnel;
10706ce35635SMika Westerberg 		struct tb_port *out;
10716ce35635SMika Westerberg 
10726ce35635SMika Westerberg 		if (!usb4_dp_port_bw_mode_enabled(in))
10736ce35635SMika Westerberg 			continue;
10746ce35635SMika Westerberg 
10756ce35635SMika Westerberg 		tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
10766ce35635SMika Westerberg 		if (WARN_ON(!tunnel))
10776ce35635SMika Westerberg 			break;
10786ce35635SMika Westerberg 
10796ce35635SMika Westerberg 		if (!first_tunnel) {
10806ce35635SMika Westerberg 			/*
10816ce35635SMika Westerberg 			 * Since USB3 bandwidth is shared by all DP
10826ce35635SMika Westerberg 			 * tunnels under the host router USB4 port, even
10836ce35635SMika Westerberg 			 * if they do not begin from the host router, we
10846ce35635SMika Westerberg 			 * can release USB3 bandwidth just once and not
10856ce35635SMika Westerberg 			 * for each tunnel separately.
10866ce35635SMika Westerberg 			 */
10876ce35635SMika Westerberg 			first_tunnel = tunnel;
10886ce35635SMika Westerberg 			ret = tb_release_unused_usb3_bandwidth(tb,
10896ce35635SMika Westerberg 				first_tunnel->src_port, first_tunnel->dst_port);
10906ce35635SMika Westerberg 			if (ret) {
10916ce35635SMika Westerberg 				tb_port_warn(in,
10926ce35635SMika Westerberg 					"failed to release unused bandwidth\n");
10936ce35635SMika Westerberg 				break;
10946ce35635SMika Westerberg 			}
10956ce35635SMika Westerberg 		}
10966ce35635SMika Westerberg 
10976ce35635SMika Westerberg 		out = tunnel->dst_port;
10986ce35635SMika Westerberg 		ret = tb_available_bandwidth(tb, in, out, &estimated_up,
10996ce35635SMika Westerberg 					     &estimated_down);
11006ce35635SMika Westerberg 		if (ret) {
11016ce35635SMika Westerberg 			tb_port_warn(in,
11026ce35635SMika Westerberg 				"failed to re-calculate estimated bandwidth\n");
11036ce35635SMika Westerberg 			break;
11046ce35635SMika Westerberg 		}
11056ce35635SMika Westerberg 
11066ce35635SMika Westerberg 		/*
11076ce35635SMika Westerberg 		 * Estimated bandwidth includes:
11086ce35635SMika Westerberg 		 *  - already allocated bandwidth for the DP tunnel
11096ce35635SMika Westerberg 		 *  - available bandwidth along the path
11106ce35635SMika Westerberg 		 *  - bandwidth allocated for USB 3.x but not used.
11116ce35635SMika Westerberg 		 */
11126ce35635SMika Westerberg 		tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
11136ce35635SMika Westerberg 			    estimated_up, estimated_down);
11146ce35635SMika Westerberg 
11156ce35635SMika Westerberg 		if (in->sw->config.depth < out->sw->config.depth)
11166ce35635SMika Westerberg 			estimated_bw = estimated_down;
11176ce35635SMika Westerberg 		else
11186ce35635SMika Westerberg 			estimated_bw = estimated_up;
11196ce35635SMika Westerberg 
11206ce35635SMika Westerberg 		if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
11216ce35635SMika Westerberg 			tb_port_warn(in, "failed to update estimated bandwidth\n");
11226ce35635SMika Westerberg 	}
11236ce35635SMika Westerberg 
11246ce35635SMika Westerberg 	if (first_tunnel)
11256ce35635SMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
11266ce35635SMika Westerberg 					  first_tunnel->dst_port);
11276ce35635SMika Westerberg 
11286ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
11296ce35635SMika Westerberg }
11306ce35635SMika Westerberg 
11316ce35635SMika Westerberg static void tb_recalc_estimated_bandwidth(struct tb *tb)
11326ce35635SMika Westerberg {
11336ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
11346ce35635SMika Westerberg 	int i;
11356ce35635SMika Westerberg 
11366ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
11376ce35635SMika Westerberg 
11386ce35635SMika Westerberg 	for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
11396ce35635SMika Westerberg 		struct tb_bandwidth_group *group = &tcm->groups[i];
11406ce35635SMika Westerberg 
11416ce35635SMika Westerberg 		if (!list_empty(&group->ports))
11426ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth_for_group(group);
11436ce35635SMika Westerberg 	}
11446ce35635SMika Westerberg 
11456ce35635SMika Westerberg 	tb_dbg(tb, "bandwidth re-calculation done\n");
11466ce35635SMika Westerberg }
11476ce35635SMika Westerberg 
1148e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1149e876f34aSMika Westerberg {
1150e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
1151e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1152e876f34aSMika Westerberg 
1153e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
1154e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1155e876f34aSMika Westerberg 
1156e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1157e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
1158e876f34aSMika Westerberg 			continue;
1159e876f34aSMika Westerberg 
1160e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
1161b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP OUT in use\n");
1162e876f34aSMika Westerberg 			continue;
1163e876f34aSMika Westerberg 		}
1164e876f34aSMika Westerberg 
1165e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
1166e876f34aSMika Westerberg 
1167e876f34aSMika Westerberg 		/*
1168e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
1169e876f34aSMika Westerberg 		 * the same host router downstream port.
1170e876f34aSMika Westerberg 		 */
1171e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
1172e876f34aSMika Westerberg 			struct tb_port *p;
1173e876f34aSMika Westerberg 
1174e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
1175e876f34aSMika Westerberg 			if (p != host_port)
1176e876f34aSMika Westerberg 				continue;
1177e876f34aSMika Westerberg 		}
1178e876f34aSMika Westerberg 
1179e876f34aSMika Westerberg 		return port;
1180e876f34aSMika Westerberg 	}
1181e876f34aSMika Westerberg 
1182e876f34aSMika Westerberg 	return NULL;
1183e876f34aSMika Westerberg }
1184e876f34aSMika Westerberg 
11858afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
11864f807e47SMika Westerberg {
11879d2d0a5cSMika Westerberg 	int available_up, available_down, ret, link_nr;
11884f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
11898afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
11904f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
11914f807e47SMika Westerberg 
1192c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
1193c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1194c6da62a2SMika Westerberg 		return;
1195c6da62a2SMika Westerberg 	}
1196c6da62a2SMika Westerberg 
11978afe909bSMika Westerberg 	/*
11988afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
11998afe909bSMika Westerberg 	 * establish a DP tunnel between them.
12008afe909bSMika Westerberg 	 */
12018afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
12024f807e47SMika Westerberg 
12038afe909bSMika Westerberg 	in = NULL;
12048afe909bSMika Westerberg 	out = NULL;
12058afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
1206e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
1207e876f34aSMika Westerberg 			continue;
1208e876f34aSMika Westerberg 
12098afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
1210b0ef48fcSMika Westerberg 			tb_port_dbg(port, "DP IN in use\n");
12118afe909bSMika Westerberg 			continue;
12128afe909bSMika Westerberg 		}
12138afe909bSMika Westerberg 
1214e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
12158afe909bSMika Westerberg 
1216e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
1217e876f34aSMika Westerberg 		if (out) {
12188afe909bSMika Westerberg 			in = port;
1219e876f34aSMika Westerberg 			break;
1220e876f34aSMika Westerberg 		}
12218afe909bSMika Westerberg 	}
12228afe909bSMika Westerberg 
12238afe909bSMika Westerberg 	if (!in) {
12248afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
12258afe909bSMika Westerberg 		return;
12268afe909bSMika Westerberg 	}
12278afe909bSMika Westerberg 	if (!out) {
12288afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
12298afe909bSMika Westerberg 		return;
12308afe909bSMika Westerberg 	}
12318afe909bSMika Westerberg 
12326ac6faeeSMika Westerberg 	/*
12339d2d0a5cSMika Westerberg 	 * This is only applicable to links that are not bonded (so
12349d2d0a5cSMika Westerberg 	 * when Thunderbolt 1 hardware is involved somewhere in the
12359d2d0a5cSMika Westerberg 	 * topology). For these try to share the DP bandwidth between
12369d2d0a5cSMika Westerberg 	 * the two lanes.
12379d2d0a5cSMika Westerberg 	 */
12389d2d0a5cSMika Westerberg 	link_nr = 1;
12399d2d0a5cSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
12409d2d0a5cSMika Westerberg 		if (tb_tunnel_is_dp(tunnel)) {
12419d2d0a5cSMika Westerberg 			link_nr = 0;
12429d2d0a5cSMika Westerberg 			break;
12439d2d0a5cSMika Westerberg 		}
12449d2d0a5cSMika Westerberg 	}
12459d2d0a5cSMika Westerberg 
12469d2d0a5cSMika Westerberg 	/*
12476ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
12486ac6faeeSMika Westerberg 	 * both ends of the tunnel.
12496ac6faeeSMika Westerberg 	 *
12506ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
12516ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
12526ac6faeeSMika Westerberg 	 * tunnel is active.
12536ac6faeeSMika Westerberg 	 */
12546ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
12556ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
12566ac6faeeSMika Westerberg 
12578afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
12588afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
12596ac6faeeSMika Westerberg 		goto err_rpm_put;
12608afe909bSMika Westerberg 	}
12614f807e47SMika Westerberg 
12626ce35635SMika Westerberg 	if (!tb_attach_bandwidth_group(tcm, in, out))
12636ce35635SMika Westerberg 		goto err_dealloc_dp;
12646ce35635SMika Westerberg 
12650bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
12660bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
12670bd680cdSMika Westerberg 	if (ret) {
12680bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
12696ce35635SMika Westerberg 		goto err_detach_group;
1270a11b88adSMika Westerberg 	}
1271a11b88adSMika Westerberg 
12726ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
12730bd680cdSMika Westerberg 	if (ret)
12746ce35635SMika Westerberg 		goto err_reclaim_usb;
1275a11b88adSMika Westerberg 
12760bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
12770bd680cdSMika Westerberg 	       available_up, available_down);
12780bd680cdSMika Westerberg 
12799d2d0a5cSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
12809d2d0a5cSMika Westerberg 				    available_down);
12814f807e47SMika Westerberg 	if (!tunnel) {
12828afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
12836ce35635SMika Westerberg 		goto err_reclaim_usb;
12844f807e47SMika Westerberg 	}
12854f807e47SMika Westerberg 
12864f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
12874f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
12880bd680cdSMika Westerberg 		goto err_free;
12894f807e47SMika Westerberg 	}
12904f807e47SMika Westerberg 
12914f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
12920bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
12936ce35635SMika Westerberg 
12946ce35635SMika Westerberg 	/* Update the domain with the new bandwidth estimation */
12956ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
12966ce35635SMika Westerberg 
12973084b48fSGil Fine 	/*
12983084b48fSGil Fine 	 * In case of DP tunnel exists, change host router's 1st children
12993084b48fSGil Fine 	 * TMU mode to HiFi for CL0s to work.
13003084b48fSGil Fine 	 */
13017d283f41SMika Westerberg 	tb_increase_tmu_accuracy(tunnel);
13028afe909bSMika Westerberg 	return;
13038afe909bSMika Westerberg 
13040bd680cdSMika Westerberg err_free:
13050bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
13066ce35635SMika Westerberg err_reclaim_usb:
13070bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
13086ce35635SMika Westerberg err_detach_group:
13096ce35635SMika Westerberg 	tb_detach_bandwidth_group(in);
13100bd680cdSMika Westerberg err_dealloc_dp:
13118afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
13126ac6faeeSMika Westerberg err_rpm_put:
13136ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
13146ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
13156ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
13166ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
13174f807e47SMika Westerberg }
13184f807e47SMika Westerberg 
13198afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
13204f807e47SMika Westerberg {
13218afe909bSMika Westerberg 	struct tb_port *in, *out;
13228afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
13238afe909bSMika Westerberg 
13248afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
13258afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
13268afe909bSMika Westerberg 		in = port;
13278afe909bSMika Westerberg 		out = NULL;
13288afe909bSMika Westerberg 	} else {
13298afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
13308afe909bSMika Westerberg 		in = NULL;
13318afe909bSMika Westerberg 		out = port;
13328afe909bSMika Westerberg 	}
13338afe909bSMika Westerberg 
13348afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
13358afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
13368afe909bSMika Westerberg 	list_del_init(&port->list);
13378afe909bSMika Westerberg 
13388afe909bSMika Westerberg 	/*
13398afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
13408afe909bSMika Westerberg 	 * to create another tunnel.
13418afe909bSMika Westerberg 	 */
13426ce35635SMika Westerberg 	tb_recalc_estimated_bandwidth(tb);
13438afe909bSMika Westerberg 	tb_tunnel_dp(tb);
13448afe909bSMika Westerberg }
13458afe909bSMika Westerberg 
13468afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
13478afe909bSMika Westerberg {
13488afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
13498afe909bSMika Westerberg 	struct tb_port *p;
13508afe909bSMika Westerberg 
13518afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
13528afe909bSMika Westerberg 		return;
13538afe909bSMika Westerberg 
13548afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
13558afe909bSMika Westerberg 		if (p == port)
13568afe909bSMika Westerberg 			return;
13578afe909bSMika Westerberg 	}
13588afe909bSMika Westerberg 
13598afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
13608afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
13618afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
13628afe909bSMika Westerberg 
13638afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
13648afe909bSMika Westerberg 	tb_tunnel_dp(tb);
13654f807e47SMika Westerberg }
13664f807e47SMika Westerberg 
136781a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
136881a2e3e4SMika Westerberg {
136981a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
137081a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
137181a2e3e4SMika Westerberg 
137281a2e3e4SMika Westerberg 	/*
137381a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
137481a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
137581a2e3e4SMika Westerberg 	 */
137681a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
137781a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
137881a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
137981a2e3e4SMika Westerberg 	}
138081a2e3e4SMika Westerberg 
138181a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
138281a2e3e4SMika Westerberg 		struct tb_port *port;
138381a2e3e4SMika Westerberg 
138481a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
138581a2e3e4SMika Westerberg 					struct tb_port, list);
138681a2e3e4SMika Westerberg 		list_del_init(&port->list);
138781a2e3e4SMika Westerberg 	}
138881a2e3e4SMika Westerberg }
138981a2e3e4SMika Westerberg 
13903da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
13913da88be2SMika Westerberg {
13923da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
13933da88be2SMika Westerberg 	struct tb_port *up;
13943da88be2SMika Westerberg 
13953da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
13963da88be2SMika Westerberg 	if (WARN_ON(!up))
13973da88be2SMika Westerberg 		return -ENODEV;
13983da88be2SMika Westerberg 
13993da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
14003da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
14013da88be2SMika Westerberg 		return -ENODEV;
14023da88be2SMika Westerberg 
140330a4eca6SMika Westerberg 	tb_switch_xhci_disconnect(sw);
140430a4eca6SMika Westerberg 
14053da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
14063da88be2SMika Westerberg 	list_del(&tunnel->list);
14073da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
14083da88be2SMika Westerberg 	return 0;
14093da88be2SMika Westerberg }
14103da88be2SMika Westerberg 
141199cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
141299cabbb0SMika Westerberg {
141399cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
14149d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
141599cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
14169d3cce0bSMika Westerberg 
1417386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
141899cabbb0SMika Westerberg 	if (!up)
141999cabbb0SMika Westerberg 		return 0;
14203364f0c1SAndreas Noever 
142199cabbb0SMika Westerberg 	/*
142299cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
142399cabbb0SMika Westerberg 	 * be found right above this switch.
142499cabbb0SMika Westerberg 	 */
14257ce54221SGil Fine 	port = tb_switch_downstream_port(sw);
14267ce54221SGil Fine 	down = tb_find_pcie_down(tb_switch_parent(sw), port);
142799cabbb0SMika Westerberg 	if (!down)
142899cabbb0SMika Westerberg 		return 0;
14293364f0c1SAndreas Noever 
143099cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
143199cabbb0SMika Westerberg 	if (!tunnel)
143299cabbb0SMika Westerberg 		return -ENOMEM;
14333364f0c1SAndreas Noever 
143493f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
143599cabbb0SMika Westerberg 		tb_port_info(up,
14363364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
143793f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
143899cabbb0SMika Westerberg 		return -EIO;
14393364f0c1SAndreas Noever 	}
14403364f0c1SAndreas Noever 
144143f977bcSGil Fine 	/*
144243f977bcSGil Fine 	 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
144343f977bcSGil Fine 	 * here.
144443f977bcSGil Fine 	 */
144543f977bcSGil Fine 	if (tb_switch_pcie_l1_enable(sw))
144643f977bcSGil Fine 		tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
144743f977bcSGil Fine 
144830a4eca6SMika Westerberg 	if (tb_switch_xhci_connect(sw))
144930a4eca6SMika Westerberg 		tb_sw_warn(sw, "failed to connect xHCI\n");
145030a4eca6SMika Westerberg 
145199cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
145299cabbb0SMika Westerberg 	return 0;
14533364f0c1SAndreas Noever }
14549da672a4SAndreas Noever 
1455180b0689SMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1456180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
1457180b0689SMika Westerberg 				    int receive_path, int receive_ring)
14587ea4cd6bSMika Westerberg {
14597ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
14607ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
14617ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
14627ea4cd6bSMika Westerberg 	struct tb_switch *sw;
14637ea4cd6bSMika Westerberg 
14647ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
14657ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1466386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
14677ea4cd6bSMika Westerberg 
14687ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
1469180b0689SMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1470180b0689SMika Westerberg 				     transmit_ring, receive_path, receive_ring);
14717ea4cd6bSMika Westerberg 	if (!tunnel) {
14727ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
14737ea4cd6bSMika Westerberg 		return -ENOMEM;
14747ea4cd6bSMika Westerberg 	}
14757ea4cd6bSMika Westerberg 
14767ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
14777ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
14787ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
14797ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
14807ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
14817ea4cd6bSMika Westerberg 		return -EIO;
14827ea4cd6bSMika Westerberg 	}
14837ea4cd6bSMika Westerberg 
14847ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
14857ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
14867ea4cd6bSMika Westerberg 	return 0;
14877ea4cd6bSMika Westerberg }
14887ea4cd6bSMika Westerberg 
1489180b0689SMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1490180b0689SMika Westerberg 					  int transmit_path, int transmit_ring,
1491180b0689SMika Westerberg 					  int receive_path, int receive_ring)
14927ea4cd6bSMika Westerberg {
1493180b0689SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1494180b0689SMika Westerberg 	struct tb_port *nhi_port, *dst_port;
1495180b0689SMika Westerberg 	struct tb_tunnel *tunnel, *n;
14967ea4cd6bSMika Westerberg 	struct tb_switch *sw;
14977ea4cd6bSMika Westerberg 
14987ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
14997ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1500180b0689SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
15017ea4cd6bSMika Westerberg 
1502180b0689SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1503180b0689SMika Westerberg 		if (!tb_tunnel_is_dma(tunnel))
1504180b0689SMika Westerberg 			continue;
1505180b0689SMika Westerberg 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1506180b0689SMika Westerberg 			continue;
1507180b0689SMika Westerberg 
1508180b0689SMika Westerberg 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1509180b0689SMika Westerberg 					receive_path, receive_ring))
15108afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
15117ea4cd6bSMika Westerberg 	}
1512180b0689SMika Westerberg }
15137ea4cd6bSMika Westerberg 
1514180b0689SMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1515180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
1516180b0689SMika Westerberg 				       int receive_path, int receive_ring)
15177ea4cd6bSMika Westerberg {
15187ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
15197ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
1520180b0689SMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1521180b0689SMika Westerberg 					      transmit_ring, receive_path,
1522180b0689SMika Westerberg 					      receive_ring);
15237ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
15247ea4cd6bSMika Westerberg 	}
15257ea4cd6bSMika Westerberg 	return 0;
15267ea4cd6bSMika Westerberg }
15277ea4cd6bSMika Westerberg 
1528d6cc51cdSAndreas Noever /* hotplug handling */
1529d6cc51cdSAndreas Noever 
1530877e50b3SLee Jones /*
1531d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1532d6cc51cdSAndreas Noever  *
1533d6cc51cdSAndreas Noever  * Executes on tb->wq.
1534d6cc51cdSAndreas Noever  */
1535d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1536d6cc51cdSAndreas Noever {
1537d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1538d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
15399d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1540053596d9SAndreas Noever 	struct tb_switch *sw;
1541053596d9SAndreas Noever 	struct tb_port *port;
1542284652a4SMika Westerberg 
15436ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
15446ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
15456ac6faeeSMika Westerberg 
1546d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
15479d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1548d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1549d6cc51cdSAndreas Noever 
15508f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1551053596d9SAndreas Noever 	if (!sw) {
1552053596d9SAndreas Noever 		tb_warn(tb,
1553053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1554053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1555053596d9SAndreas Noever 		goto out;
1556053596d9SAndreas Noever 	}
1557053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1558053596d9SAndreas Noever 		tb_warn(tb,
1559053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1560053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
15618f965efdSMika Westerberg 		goto put_sw;
1562053596d9SAndreas Noever 	}
1563053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1564053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1565dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1566053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
15678f965efdSMika Westerberg 		goto put_sw;
1568053596d9SAndreas Noever 	}
15696ac6faeeSMika Westerberg 
15706ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
15716ac6faeeSMika Westerberg 
1572053596d9SAndreas Noever 	if (ev->unplug) {
1573dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1574dacb1287SKranthi Kuntala 
1575dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
15767ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1577aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
15783364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
15798afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1580cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1581de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
158291c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1583bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1584053596d9SAndreas Noever 			port->remote = NULL;
1585dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1586dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
15878afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
15886ce35635SMika Westerberg 			tb_recalc_estimated_bandwidth(tb);
15898afe909bSMika Westerberg 			tb_tunnel_dp(tb);
15907ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
15917ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
15927ea4cd6bSMika Westerberg 
15937ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
15947ea4cd6bSMika Westerberg 			/*
15957ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
15967ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
15977ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
15987ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
1599180b0689SMika Westerberg 			 * all the tunnels below.
16007ea4cd6bSMika Westerberg 			 */
16017ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
16027ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
16037ea4cd6bSMika Westerberg 			port->xdomain = NULL;
1604180b0689SMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
16057ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1606284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
16078afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
16088afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
160930a4eca6SMika Westerberg 		} else if (!port->port) {
161030a4eca6SMika Westerberg 			tb_sw_dbg(sw, "xHCI disconnect request\n");
161130a4eca6SMika Westerberg 			tb_switch_xhci_disconnect(sw);
1612053596d9SAndreas Noever 		} else {
161362efe699SMika Westerberg 			tb_port_dbg(port,
1614053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1615053596d9SAndreas Noever 		}
1616053596d9SAndreas Noever 	} else if (port->remote) {
161762efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
161830a4eca6SMika Westerberg 	} else if (!port->port && sw->authorized) {
161930a4eca6SMika Westerberg 		tb_sw_dbg(sw, "xHCI connect request\n");
162030a4eca6SMika Westerberg 		tb_switch_xhci_connect(sw);
1621053596d9SAndreas Noever 	} else {
1622344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
162362efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1624053596d9SAndreas Noever 			tb_scan_port(port);
162599cabbb0SMika Westerberg 			if (!port->remote)
162662efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
16278afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
16288afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1629053596d9SAndreas Noever 		}
1630344e0643SMika Westerberg 	}
16318f965efdSMika Westerberg 
16326ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
16336ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
16346ac6faeeSMika Westerberg 
16358f965efdSMika Westerberg put_sw:
16368f965efdSMika Westerberg 	tb_switch_put(sw);
1637d6cc51cdSAndreas Noever out:
1638d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
16396ac6faeeSMika Westerberg 
16406ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
16416ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
16426ac6faeeSMika Westerberg 
1643d6cc51cdSAndreas Noever 	kfree(ev);
1644d6cc51cdSAndreas Noever }
1645d6cc51cdSAndreas Noever 
16466ce35635SMika Westerberg static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
16476ce35635SMika Westerberg 				 int *requested_down)
16486ce35635SMika Westerberg {
16496ce35635SMika Westerberg 	int allocated_up, allocated_down, available_up, available_down, ret;
16506ce35635SMika Westerberg 	int requested_up_corrected, requested_down_corrected, granularity;
16516ce35635SMika Westerberg 	int max_up, max_down, max_up_rounded, max_down_rounded;
16526ce35635SMika Westerberg 	struct tb *tb = tunnel->tb;
16536ce35635SMika Westerberg 	struct tb_port *in, *out;
16546ce35635SMika Westerberg 
16556ce35635SMika Westerberg 	ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
16566ce35635SMika Westerberg 	if (ret)
16576ce35635SMika Westerberg 		return ret;
16586ce35635SMika Westerberg 
16596ce35635SMika Westerberg 	in = tunnel->src_port;
16606ce35635SMika Westerberg 	out = tunnel->dst_port;
16616ce35635SMika Westerberg 
16626ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
16636ce35635SMika Westerberg 		    allocated_up, allocated_down);
16646ce35635SMika Westerberg 
16656ce35635SMika Westerberg 	/*
16666ce35635SMika Westerberg 	 * If we get rounded up request from graphics side, say HBR2 x 4
16676ce35635SMika Westerberg 	 * that is 17500 instead of 17280 (this is because of the
16686ce35635SMika Westerberg 	 * granularity), we allow it too. Here the graphics has already
16696ce35635SMika Westerberg 	 * negotiated with the DPRX the maximum possible rates (which is
16706ce35635SMika Westerberg 	 * 17280 in this case).
16716ce35635SMika Westerberg 	 *
16726ce35635SMika Westerberg 	 * Since the link cannot go higher than 17280 we use that in our
16736ce35635SMika Westerberg 	 * calculations but the DP IN adapter Allocated BW write must be
16746ce35635SMika Westerberg 	 * the same value (17500) otherwise the adapter will mark it as
16756ce35635SMika Westerberg 	 * failed for graphics.
16766ce35635SMika Westerberg 	 */
16776ce35635SMika Westerberg 	ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
16786ce35635SMika Westerberg 	if (ret)
16796ce35635SMika Westerberg 		return ret;
16806ce35635SMika Westerberg 
16816ce35635SMika Westerberg 	ret = usb4_dp_port_granularity(in);
16826ce35635SMika Westerberg 	if (ret < 0)
16836ce35635SMika Westerberg 		return ret;
16846ce35635SMika Westerberg 	granularity = ret;
16856ce35635SMika Westerberg 
16866ce35635SMika Westerberg 	max_up_rounded = roundup(max_up, granularity);
16876ce35635SMika Westerberg 	max_down_rounded = roundup(max_down, granularity);
16886ce35635SMika Westerberg 
16896ce35635SMika Westerberg 	/*
16906ce35635SMika Westerberg 	 * This will "fix" the request down to the maximum supported
16916ce35635SMika Westerberg 	 * rate * lanes if it is at the maximum rounded up level.
16926ce35635SMika Westerberg 	 */
16936ce35635SMika Westerberg 	requested_up_corrected = *requested_up;
16946ce35635SMika Westerberg 	if (requested_up_corrected == max_up_rounded)
16956ce35635SMika Westerberg 		requested_up_corrected = max_up;
16966ce35635SMika Westerberg 	else if (requested_up_corrected < 0)
16976ce35635SMika Westerberg 		requested_up_corrected = 0;
16986ce35635SMika Westerberg 	requested_down_corrected = *requested_down;
16996ce35635SMika Westerberg 	if (requested_down_corrected == max_down_rounded)
17006ce35635SMika Westerberg 		requested_down_corrected = max_down;
17016ce35635SMika Westerberg 	else if (requested_down_corrected < 0)
17026ce35635SMika Westerberg 		requested_down_corrected = 0;
17036ce35635SMika Westerberg 
17046ce35635SMika Westerberg 	tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
17056ce35635SMika Westerberg 		    requested_up_corrected, requested_down_corrected);
17066ce35635SMika Westerberg 
17076ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
17086ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
17096ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
17106ce35635SMika Westerberg 			    requested_up_corrected, requested_down_corrected,
17116ce35635SMika Westerberg 			    max_up_rounded, max_down_rounded);
17126ce35635SMika Westerberg 		return -ENOBUFS;
17136ce35635SMika Westerberg 	}
17146ce35635SMika Westerberg 
17156ce35635SMika Westerberg 	if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
17166ce35635SMika Westerberg 	    (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
17176ce35635SMika Westerberg 		/*
17186ce35635SMika Westerberg 		 * If requested bandwidth is less or equal than what is
17196ce35635SMika Westerberg 		 * currently allocated to that tunnel we simply change
17206ce35635SMika Westerberg 		 * the reservation of the tunnel. Since all the tunnels
17216ce35635SMika Westerberg 		 * going out from the same USB4 port are in the same
17226ce35635SMika Westerberg 		 * group the released bandwidth will be taken into
17236ce35635SMika Westerberg 		 * account for the other tunnels automatically below.
17246ce35635SMika Westerberg 		 */
17256ce35635SMika Westerberg 		return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
17266ce35635SMika Westerberg 						 requested_down);
17276ce35635SMika Westerberg 	}
17286ce35635SMika Westerberg 
17296ce35635SMika Westerberg 	/*
17306ce35635SMika Westerberg 	 * More bandwidth is requested. Release all the potential
17316ce35635SMika Westerberg 	 * bandwidth from USB3 first.
17326ce35635SMika Westerberg 	 */
17336ce35635SMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
17346ce35635SMika Westerberg 	if (ret)
17356ce35635SMika Westerberg 		return ret;
17366ce35635SMika Westerberg 
17376ce35635SMika Westerberg 	/*
17386ce35635SMika Westerberg 	 * Then go over all tunnels that cross the same USB4 ports (they
17396ce35635SMika Westerberg 	 * are also in the same group but we use the same function here
17406ce35635SMika Westerberg 	 * that we use with the normal bandwidth allocation).
17416ce35635SMika Westerberg 	 */
17426ce35635SMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
17436ce35635SMika Westerberg 	if (ret)
17446ce35635SMika Westerberg 		goto reclaim;
17456ce35635SMika Westerberg 
17466ce35635SMika Westerberg 	tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
17476ce35635SMika Westerberg 		    available_up, available_down);
17486ce35635SMika Westerberg 
17496ce35635SMika Westerberg 	if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
17506ce35635SMika Westerberg 	    (*requested_down >= 0 && available_down >= requested_down_corrected)) {
17516ce35635SMika Westerberg 		ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
17526ce35635SMika Westerberg 						requested_down);
17536ce35635SMika Westerberg 	} else {
17546ce35635SMika Westerberg 		ret = -ENOBUFS;
17556ce35635SMika Westerberg 	}
17566ce35635SMika Westerberg 
17576ce35635SMika Westerberg reclaim:
17586ce35635SMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
17596ce35635SMika Westerberg 	return ret;
17606ce35635SMika Westerberg }
17616ce35635SMika Westerberg 
17626ce35635SMika Westerberg static void tb_handle_dp_bandwidth_request(struct work_struct *work)
17636ce35635SMika Westerberg {
17646ce35635SMika Westerberg 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
17656ce35635SMika Westerberg 	int requested_bw, requested_up, requested_down, ret;
17666ce35635SMika Westerberg 	struct tb_port *in, *out;
17676ce35635SMika Westerberg 	struct tb_tunnel *tunnel;
17686ce35635SMika Westerberg 	struct tb *tb = ev->tb;
17696ce35635SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
17706ce35635SMika Westerberg 	struct tb_switch *sw;
17716ce35635SMika Westerberg 
17726ce35635SMika Westerberg 	pm_runtime_get_sync(&tb->dev);
17736ce35635SMika Westerberg 
17746ce35635SMika Westerberg 	mutex_lock(&tb->lock);
17756ce35635SMika Westerberg 	if (!tcm->hotplug_active)
17766ce35635SMika Westerberg 		goto unlock;
17776ce35635SMika Westerberg 
17786ce35635SMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
17796ce35635SMika Westerberg 	if (!sw) {
17806ce35635SMika Westerberg 		tb_warn(tb, "bandwidth request from non-existent router %llx\n",
17816ce35635SMika Westerberg 			ev->route);
17826ce35635SMika Westerberg 		goto unlock;
17836ce35635SMika Westerberg 	}
17846ce35635SMika Westerberg 
17856ce35635SMika Westerberg 	in = &sw->ports[ev->port];
17866ce35635SMika Westerberg 	if (!tb_port_is_dpin(in)) {
17876ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
17886ce35635SMika Westerberg 		goto unlock;
17896ce35635SMika Westerberg 	}
17906ce35635SMika Westerberg 
17916ce35635SMika Westerberg 	tb_port_dbg(in, "handling bandwidth allocation request\n");
17926ce35635SMika Westerberg 
17936ce35635SMika Westerberg 	if (!usb4_dp_port_bw_mode_enabled(in)) {
17946ce35635SMika Westerberg 		tb_port_warn(in, "bandwidth allocation mode not enabled\n");
17956ce35635SMika Westerberg 		goto unlock;
17966ce35635SMika Westerberg 	}
17976ce35635SMika Westerberg 
1798ace75e18SMika Westerberg 	ret = usb4_dp_port_requested_bw(in);
1799ace75e18SMika Westerberg 	if (ret < 0) {
1800ace75e18SMika Westerberg 		if (ret == -ENODATA)
18016ce35635SMika Westerberg 			tb_port_dbg(in, "no bandwidth request active\n");
1802ace75e18SMika Westerberg 		else
1803ace75e18SMika Westerberg 			tb_port_warn(in, "failed to read requested bandwidth\n");
18046ce35635SMika Westerberg 		goto unlock;
18056ce35635SMika Westerberg 	}
1806ace75e18SMika Westerberg 	requested_bw = ret;
18076ce35635SMika Westerberg 
18086ce35635SMika Westerberg 	tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
18096ce35635SMika Westerberg 
18106ce35635SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
18116ce35635SMika Westerberg 	if (!tunnel) {
18126ce35635SMika Westerberg 		tb_port_warn(in, "failed to find tunnel\n");
18136ce35635SMika Westerberg 		goto unlock;
18146ce35635SMika Westerberg 	}
18156ce35635SMika Westerberg 
18166ce35635SMika Westerberg 	out = tunnel->dst_port;
18176ce35635SMika Westerberg 
18186ce35635SMika Westerberg 	if (in->sw->config.depth < out->sw->config.depth) {
18196ce35635SMika Westerberg 		requested_up = -1;
18206ce35635SMika Westerberg 		requested_down = requested_bw;
18216ce35635SMika Westerberg 	} else {
18226ce35635SMika Westerberg 		requested_up = requested_bw;
18236ce35635SMika Westerberg 		requested_down = -1;
18246ce35635SMika Westerberg 	}
18256ce35635SMika Westerberg 
18266ce35635SMika Westerberg 	ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
18276ce35635SMika Westerberg 	if (ret) {
18286ce35635SMika Westerberg 		if (ret == -ENOBUFS)
18296ce35635SMika Westerberg 			tb_port_warn(in, "not enough bandwidth available\n");
18306ce35635SMika Westerberg 		else
18316ce35635SMika Westerberg 			tb_port_warn(in, "failed to change bandwidth allocation\n");
18326ce35635SMika Westerberg 	} else {
18336ce35635SMika Westerberg 		tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
18346ce35635SMika Westerberg 			    requested_up, requested_down);
18356ce35635SMika Westerberg 
18366ce35635SMika Westerberg 		/* Update other clients about the allocation change */
18376ce35635SMika Westerberg 		tb_recalc_estimated_bandwidth(tb);
18386ce35635SMika Westerberg 	}
18396ce35635SMika Westerberg 
18406ce35635SMika Westerberg unlock:
18416ce35635SMika Westerberg 	mutex_unlock(&tb->lock);
18426ce35635SMika Westerberg 
18436ce35635SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
18446ce35635SMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
18456ce35635SMika Westerberg }
18466ce35635SMika Westerberg 
18476ce35635SMika Westerberg static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
18486ce35635SMika Westerberg {
18496ce35635SMika Westerberg 	struct tb_hotplug_event *ev;
18506ce35635SMika Westerberg 
18516ce35635SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
18526ce35635SMika Westerberg 	if (!ev)
18536ce35635SMika Westerberg 		return;
18546ce35635SMika Westerberg 
18556ce35635SMika Westerberg 	ev->tb = tb;
18566ce35635SMika Westerberg 	ev->route = route;
18576ce35635SMika Westerberg 	ev->port = port;
18586ce35635SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
18596ce35635SMika Westerberg 	queue_work(tb->wq, &ev->work);
18606ce35635SMika Westerberg }
18616ce35635SMika Westerberg 
18626ce35635SMika Westerberg static void tb_handle_notification(struct tb *tb, u64 route,
18636ce35635SMika Westerberg 				   const struct cfg_error_pkg *error)
18646ce35635SMika Westerberg {
18656ce35635SMika Westerberg 	if (tb_cfg_ack_notification(tb->ctl, route, error))
18666ce35635SMika Westerberg 		tb_warn(tb, "could not ack notification on %llx\n", route);
18676ce35635SMika Westerberg 
18686ce35635SMika Westerberg 	switch (error->error) {
18696ce35635SMika Westerberg 	case TB_CFG_ERROR_DP_BW:
18706ce35635SMika Westerberg 		tb_queue_dp_bandwidth_request(tb, route, error->port);
18716ce35635SMika Westerberg 		break;
18726ce35635SMika Westerberg 
18736ce35635SMika Westerberg 	default:
18746ce35635SMika Westerberg 		/* Ack is enough */
18756ce35635SMika Westerberg 		return;
18766ce35635SMika Westerberg 	}
18776ce35635SMika Westerberg }
18786ce35635SMika Westerberg 
1879877e50b3SLee Jones /*
1880d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
1881d6cc51cdSAndreas Noever  *
1882d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
1883d6cc51cdSAndreas Noever  */
188481a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
188581a54b5eSMika Westerberg 			    const void *buf, size_t size)
1886d6cc51cdSAndreas Noever {
188781a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
18886ce35635SMika Westerberg 	u64 route = tb_cfg_get_route(&pkg->header);
188981a54b5eSMika Westerberg 
18906ce35635SMika Westerberg 	switch (type) {
18916ce35635SMika Westerberg 	case TB_CFG_PKG_ERROR:
18926ce35635SMika Westerberg 		tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
18936ce35635SMika Westerberg 		return;
18946ce35635SMika Westerberg 	case TB_CFG_PKG_EVENT:
18956ce35635SMika Westerberg 		break;
18966ce35635SMika Westerberg 	default:
189781a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
189881a54b5eSMika Westerberg 		return;
189981a54b5eSMika Westerberg 	}
190081a54b5eSMika Westerberg 
1901210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
190281a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
190381a54b5eSMika Westerberg 			pkg->port);
190481a54b5eSMika Westerberg 	}
190581a54b5eSMika Westerberg 
19064f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1907d6cc51cdSAndreas Noever }
1908d6cc51cdSAndreas Noever 
19099d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
1910d6cc51cdSAndreas Noever {
19119d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
191293f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
191393f36adeSMika Westerberg 	struct tb_tunnel *n;
19143364f0c1SAndreas Noever 
19156ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
19163364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
19177ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
19187ea4cd6bSMika Westerberg 		/*
19197ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
19207ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
19217ea4cd6bSMika Westerberg 		 * intact.
19227ea4cd6bSMika Westerberg 		 */
19237ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
19247ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
192593f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
19267ea4cd6bSMika Westerberg 	}
1927bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
19289d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1929d6cc51cdSAndreas Noever }
1930d6cc51cdSAndreas Noever 
193199cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
193299cabbb0SMika Westerberg {
193399cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
193499cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
193599cabbb0SMika Westerberg 
193699cabbb0SMika Westerberg 		/*
193799cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
193899cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
193999cabbb0SMika Westerberg 		 * send uevent to userspace.
194099cabbb0SMika Westerberg 		 */
194199cabbb0SMika Westerberg 		if (sw->boot)
194299cabbb0SMika Westerberg 			sw->authorized = 1;
194399cabbb0SMika Westerberg 
194499cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
194599cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
194699cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
194799cabbb0SMika Westerberg 	}
194899cabbb0SMika Westerberg 
194999cabbb0SMika Westerberg 	return 0;
195099cabbb0SMika Westerberg }
195199cabbb0SMika Westerberg 
19529d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
1953d6cc51cdSAndreas Noever {
19549d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1955bfe778acSMika Westerberg 	int ret;
1956d6cc51cdSAndreas Noever 
1957bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1958444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
1959444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
1960a25c8b2fSAndreas Noever 
1961e6b245ccSMika Westerberg 	/*
1962e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
1963e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
1964e6b245ccSMika Westerberg 	 * root switch.
19655172eb9aSSzuying Chen 	 *
19665172eb9aSSzuying Chen 	 * However, USB4 routers support NVM firmware upgrade if they
19675172eb9aSSzuying Chen 	 * implement the necessary router operations.
1968e6b245ccSMika Westerberg 	 */
19695172eb9aSSzuying Chen 	tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
19706ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
19716ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1972e6b245ccSMika Westerberg 
1973bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
1974bfe778acSMika Westerberg 	if (ret) {
1975bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1976bfe778acSMika Westerberg 		return ret;
1977bfe778acSMika Westerberg 	}
1978bfe778acSMika Westerberg 
1979bfe778acSMika Westerberg 	/* Announce the switch to the world */
1980bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
1981bfe778acSMika Westerberg 	if (ret) {
1982bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1983bfe778acSMika Westerberg 		return ret;
1984bfe778acSMika Westerberg 	}
1985bfe778acSMika Westerberg 
1986b017a46dSGil Fine 	/*
1987b017a46dSGil Fine 	 * To support highest CLx state, we set host router's TMU to
1988b017a46dSGil Fine 	 * Normal mode.
1989b017a46dSGil Fine 	 */
1990b017a46dSGil Fine 	tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1991b017a46dSGil Fine 				false);
1992cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
1993cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
19949da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
19959da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
19960414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
199743bddb26SMika Westerberg 	tb_discover_tunnels(tb);
1998b60e31bfSSanjay R Mehta 	/* Add DP resources from the DP tunnels created by the boot firmware */
1999b60e31bfSSanjay R Mehta 	tb_discover_dp_resources(tb);
2000e6f81858SRajmohan Mani 	/*
2001e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
2002e6f81858SRajmohan Mani 	 * now for the whole topology.
2003e6f81858SRajmohan Mani 	 */
2004e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
20058afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
20068afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
200799cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
200899cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
200999cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
20109da672a4SAndreas Noever 
2011d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
20129d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
20139d3cce0bSMika Westerberg 	return 0;
2014d6cc51cdSAndreas Noever }
2015d6cc51cdSAndreas Noever 
20169d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
201723dd5bb4SAndreas Noever {
20189d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
20199d3cce0bSMika Westerberg 
2020daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
202181a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
20226ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
20239d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2024daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
20259d3cce0bSMika Westerberg 
20269d3cce0bSMika Westerberg 	return 0;
202723dd5bb4SAndreas Noever }
202823dd5bb4SAndreas Noever 
202991c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
203091c0c120SMika Westerberg {
203191c0c120SMika Westerberg 	struct tb_port *port;
203291c0c120SMika Westerberg 
20336ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
20346ac6faeeSMika Westerberg 	if (sw->is_unplugged)
20356ac6faeeSMika Westerberg 		return;
20366ac6faeeSMika Westerberg 
20371a9b6cb8SMika Westerberg 	if (tb_enable_clx(sw))
20381a9b6cb8SMika Westerberg 		tb_sw_warn(sw, "failed to re-enable CL states\n");
2039b017a46dSGil Fine 
2040cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
2041cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
2042cf29b9afSRajmohan Mani 
204391c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
2044284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
204591c0c120SMika Westerberg 			continue;
204691c0c120SMika Westerberg 
2047284652a4SMika Westerberg 		if (port->remote) {
20482ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
2049de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
205091c0c120SMika Westerberg 
205191c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
2052284652a4SMika Westerberg 		} else if (port->xdomain) {
2053f9cad07bSMika Westerberg 			tb_port_configure_xdomain(port, port->xdomain);
2054284652a4SMika Westerberg 		}
205591c0c120SMika Westerberg 	}
205691c0c120SMika Westerberg }
205791c0c120SMika Westerberg 
20589d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
205923dd5bb4SAndreas Noever {
20609d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
206193f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
206243bddb26SMika Westerberg 	unsigned int usb3_delay = 0;
206343bddb26SMika Westerberg 	LIST_HEAD(tunnels);
20649d3cce0bSMika Westerberg 
2065daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
206623dd5bb4SAndreas Noever 
206723dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
2068356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
206923dd5bb4SAndreas Noever 
207023dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
207123dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
207223dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
207391c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
207443bddb26SMika Westerberg 
207543bddb26SMika Westerberg 	/*
207643bddb26SMika Westerberg 	 * If we get here from suspend to disk the boot firmware or the
207743bddb26SMika Westerberg 	 * restore kernel might have created tunnels of its own. Since
207843bddb26SMika Westerberg 	 * we cannot be sure they are usable for us we find and tear
207943bddb26SMika Westerberg 	 * them down.
208043bddb26SMika Westerberg 	 */
208143bddb26SMika Westerberg 	tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
208243bddb26SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
208343bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel))
208443bddb26SMika Westerberg 			usb3_delay = 500;
208543bddb26SMika Westerberg 		tb_tunnel_deactivate(tunnel);
208643bddb26SMika Westerberg 		tb_tunnel_free(tunnel);
208743bddb26SMika Westerberg 	}
208843bddb26SMika Westerberg 
208943bddb26SMika Westerberg 	/* Re-create our tunnels now */
209043bddb26SMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
209143bddb26SMika Westerberg 		/* USB3 requires delay before it can be re-activated */
209243bddb26SMika Westerberg 		if (tb_tunnel_is_usb3(tunnel)) {
209343bddb26SMika Westerberg 			msleep(usb3_delay);
209443bddb26SMika Westerberg 			/* Only need to do it once */
209543bddb26SMika Westerberg 			usb3_delay = 0;
209643bddb26SMika Westerberg 		}
209793f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
209843bddb26SMika Westerberg 	}
20999d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
210023dd5bb4SAndreas Noever 		/*
210123dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
210223dd5bb4SAndreas Noever 		 * 100ms works for me...
210323dd5bb4SAndreas Noever 		 */
2104daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
210523dd5bb4SAndreas Noever 		msleep(100);
210623dd5bb4SAndreas Noever 	}
210723dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
21089d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
2109daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
21109d3cce0bSMika Westerberg 
21119d3cce0bSMika Westerberg 	return 0;
21129d3cce0bSMika Westerberg }
21139d3cce0bSMika Westerberg 
21147ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
21157ea4cd6bSMika Westerberg {
2116b433d010SMika Westerberg 	struct tb_port *port;
2117b433d010SMika Westerberg 	int ret = 0;
21187ea4cd6bSMika Westerberg 
2119b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
21207ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
21217ea4cd6bSMika Westerberg 			continue;
21227ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
2123dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
21247ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
2125284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
21267ea4cd6bSMika Westerberg 			port->xdomain = NULL;
21277ea4cd6bSMika Westerberg 			ret++;
21287ea4cd6bSMika Westerberg 		} else if (port->remote) {
21297ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
21307ea4cd6bSMika Westerberg 		}
21317ea4cd6bSMika Westerberg 	}
21327ea4cd6bSMika Westerberg 
21337ea4cd6bSMika Westerberg 	return ret;
21347ea4cd6bSMika Westerberg }
21357ea4cd6bSMika Westerberg 
2136884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
2137884e4d57SMika Westerberg {
2138884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2139884e4d57SMika Westerberg 
2140884e4d57SMika Westerberg 	tcm->hotplug_active = false;
2141884e4d57SMika Westerberg 	return 0;
2142884e4d57SMika Westerberg }
2143884e4d57SMika Westerberg 
2144884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
2145884e4d57SMika Westerberg {
2146884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2147884e4d57SMika Westerberg 
2148884e4d57SMika Westerberg 	tcm->hotplug_active = true;
2149884e4d57SMika Westerberg 	return 0;
2150884e4d57SMika Westerberg }
2151884e4d57SMika Westerberg 
21527ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
21537ea4cd6bSMika Westerberg {
21547ea4cd6bSMika Westerberg 	/*
21557ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
21567ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
21577ea4cd6bSMika Westerberg 	 * need to run another rescan.
21587ea4cd6bSMika Westerberg 	 */
21597ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
21607ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
21617ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
21627ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
21637ea4cd6bSMika Westerberg }
21647ea4cd6bSMika Westerberg 
21656ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
21666ac6faeeSMika Westerberg {
21676ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
21686ac6faeeSMika Westerberg 
21696ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
21706ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
21716ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
21726ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
21736ac6faeeSMika Westerberg 
21746ac6faeeSMika Westerberg 	return 0;
21756ac6faeeSMika Westerberg }
21766ac6faeeSMika Westerberg 
21776ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
21786ac6faeeSMika Westerberg {
21796ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
21806ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
21816ac6faeeSMika Westerberg 
21826ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
21836ac6faeeSMika Westerberg 	if (tb->root_switch) {
21846ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
21856ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
21866ac6faeeSMika Westerberg 	}
21876ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
21886ac6faeeSMika Westerberg }
21896ac6faeeSMika Westerberg 
21906ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
21916ac6faeeSMika Westerberg {
21926ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
21936ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
21946ac6faeeSMika Westerberg 
21956ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
21966ac6faeeSMika Westerberg 	tb_switch_resume(tb->root_switch);
21976ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
21986ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
21996ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
22006ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
22016ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
22026ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
22036ac6faeeSMika Westerberg 
22046ac6faeeSMika Westerberg 	/*
22056ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
22066ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
22076ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
22086ac6faeeSMika Westerberg 	 */
22096ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
22106ac6faeeSMika Westerberg 	return 0;
22116ac6faeeSMika Westerberg }
22126ac6faeeSMika Westerberg 
22139d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
22149d3cce0bSMika Westerberg 	.start = tb_start,
22159d3cce0bSMika Westerberg 	.stop = tb_stop,
22169d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
22179d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
2218884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
2219884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
22207ea4cd6bSMika Westerberg 	.complete = tb_complete,
22216ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
22226ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
222381a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
22243da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
222599cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
22267ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
22277ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
22289d3cce0bSMika Westerberg };
22299d3cce0bSMika Westerberg 
2230349bfe08SMika Westerberg /*
2231349bfe08SMika Westerberg  * During suspend the Thunderbolt controller is reset and all PCIe
2232349bfe08SMika Westerberg  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2233349bfe08SMika Westerberg  * during resume. This adds device links between the tunneled PCIe
2234349bfe08SMika Westerberg  * downstream ports and the NHI so that the device core will make sure
2235349bfe08SMika Westerberg  * NHI is resumed first before the rest.
2236349bfe08SMika Westerberg  */
2237349bfe08SMika Westerberg static void tb_apple_add_links(struct tb_nhi *nhi)
2238349bfe08SMika Westerberg {
2239349bfe08SMika Westerberg 	struct pci_dev *upstream, *pdev;
2240349bfe08SMika Westerberg 
2241349bfe08SMika Westerberg 	if (!x86_apple_machine)
2242349bfe08SMika Westerberg 		return;
2243349bfe08SMika Westerberg 
2244349bfe08SMika Westerberg 	switch (nhi->pdev->device) {
2245349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2246349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2247349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2248349bfe08SMika Westerberg 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2249349bfe08SMika Westerberg 		break;
2250349bfe08SMika Westerberg 	default:
2251349bfe08SMika Westerberg 		return;
2252349bfe08SMika Westerberg 	}
2253349bfe08SMika Westerberg 
2254349bfe08SMika Westerberg 	upstream = pci_upstream_bridge(nhi->pdev);
2255349bfe08SMika Westerberg 	while (upstream) {
2256349bfe08SMika Westerberg 		if (!pci_is_pcie(upstream))
2257349bfe08SMika Westerberg 			return;
2258349bfe08SMika Westerberg 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2259349bfe08SMika Westerberg 			break;
2260349bfe08SMika Westerberg 		upstream = pci_upstream_bridge(upstream);
2261349bfe08SMika Westerberg 	}
2262349bfe08SMika Westerberg 
2263349bfe08SMika Westerberg 	if (!upstream)
2264349bfe08SMika Westerberg 		return;
2265349bfe08SMika Westerberg 
2266349bfe08SMika Westerberg 	/*
2267349bfe08SMika Westerberg 	 * For each hotplug downstream port, create add device link
2268349bfe08SMika Westerberg 	 * back to NHI so that PCIe tunnels can be re-established after
2269349bfe08SMika Westerberg 	 * sleep.
2270349bfe08SMika Westerberg 	 */
2271349bfe08SMika Westerberg 	for_each_pci_bridge(pdev, upstream->subordinate) {
2272349bfe08SMika Westerberg 		const struct device_link *link;
2273349bfe08SMika Westerberg 
2274349bfe08SMika Westerberg 		if (!pci_is_pcie(pdev))
2275349bfe08SMika Westerberg 			continue;
2276349bfe08SMika Westerberg 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2277349bfe08SMika Westerberg 		    !pdev->is_hotplug_bridge)
2278349bfe08SMika Westerberg 			continue;
2279349bfe08SMika Westerberg 
2280349bfe08SMika Westerberg 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2281349bfe08SMika Westerberg 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
2282349bfe08SMika Westerberg 				       DL_FLAG_PM_RUNTIME);
2283349bfe08SMika Westerberg 		if (link) {
2284349bfe08SMika Westerberg 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2285349bfe08SMika Westerberg 				dev_name(&pdev->dev));
2286349bfe08SMika Westerberg 		} else {
2287349bfe08SMika Westerberg 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2288349bfe08SMika Westerberg 				 dev_name(&pdev->dev));
2289349bfe08SMika Westerberg 		}
2290349bfe08SMika Westerberg 	}
2291349bfe08SMika Westerberg }
2292349bfe08SMika Westerberg 
22939d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
22949d3cce0bSMika Westerberg {
22959d3cce0bSMika Westerberg 	struct tb_cm *tcm;
22969d3cce0bSMika Westerberg 	struct tb *tb;
22979d3cce0bSMika Westerberg 
22987f0a34d7SMika Westerberg 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
22999d3cce0bSMika Westerberg 	if (!tb)
23009d3cce0bSMika Westerberg 		return NULL;
23019d3cce0bSMika Westerberg 
2302c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
230399cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
2304c6da62a2SMika Westerberg 	else
2305c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
2306c6da62a2SMika Westerberg 
23079d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
23089d3cce0bSMika Westerberg 
23099d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
23109d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
23118afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
23126ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
23136ce35635SMika Westerberg 	tb_init_bandwidth_groups(tcm);
23149d3cce0bSMika Westerberg 
2315e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
2316e0258805SMika Westerberg 
2317349bfe08SMika Westerberg 	tb_apple_add_links(nhi);
2318349bfe08SMika Westerberg 	tb_acpi_add_links(nhi);
2319349bfe08SMika Westerberg 
23209d3cce0bSMika Westerberg 	return tb;
232123dd5bb4SAndreas Noever }
2322