xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision c94732bd)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
126ac6faeeSMika Westerberg #include <linux/pm_runtime.h>
13d6cc51cdSAndreas Noever 
14d6cc51cdSAndreas Noever #include "tb.h"
157adf6097SAndreas Noever #include "tb_regs.h"
161752b9f7SMika Westerberg #include "tunnel.h"
17d6cc51cdSAndreas Noever 
189d3cce0bSMika Westerberg /**
199d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
209d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
218afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
229d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
239d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
249d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
259d3cce0bSMika Westerberg  *		    after cfg has been paused.
266ac6faeeSMika Westerberg  * @remove_work: Work used to remove any unplugged routers after
276ac6faeeSMika Westerberg  *		 runtime resume
289d3cce0bSMika Westerberg  */
299d3cce0bSMika Westerberg struct tb_cm {
309d3cce0bSMika Westerberg 	struct list_head tunnel_list;
318afe909bSMika Westerberg 	struct list_head dp_resources;
329d3cce0bSMika Westerberg 	bool hotplug_active;
336ac6faeeSMika Westerberg 	struct delayed_work remove_work;
349d3cce0bSMika Westerberg };
359da672a4SAndreas Noever 
366ac6faeeSMika Westerberg static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
376ac6faeeSMika Westerberg {
386ac6faeeSMika Westerberg 	return ((void *)tcm - sizeof(struct tb));
396ac6faeeSMika Westerberg }
406ac6faeeSMika Westerberg 
414f807e47SMika Westerberg struct tb_hotplug_event {
424f807e47SMika Westerberg 	struct work_struct work;
434f807e47SMika Westerberg 	struct tb *tb;
444f807e47SMika Westerberg 	u64 route;
454f807e47SMika Westerberg 	u8 port;
464f807e47SMika Westerberg 	bool unplug;
474f807e47SMika Westerberg };
484f807e47SMika Westerberg 
494f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
504f807e47SMika Westerberg 
514f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
524f807e47SMika Westerberg {
534f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
544f807e47SMika Westerberg 
554f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
564f807e47SMika Westerberg 	if (!ev)
574f807e47SMika Westerberg 		return;
584f807e47SMika Westerberg 
594f807e47SMika Westerberg 	ev->tb = tb;
604f807e47SMika Westerberg 	ev->route = route;
614f807e47SMika Westerberg 	ev->port = port;
624f807e47SMika Westerberg 	ev->unplug = unplug;
634f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
644f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
654f807e47SMika Westerberg }
664f807e47SMika Westerberg 
679da672a4SAndreas Noever /* enumeration & hot plug handling */
689da672a4SAndreas Noever 
698afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
708afe909bSMika Westerberg {
718afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
728afe909bSMika Westerberg 	struct tb_port *port;
738afe909bSMika Westerberg 
748afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
758afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
768afe909bSMika Westerberg 			continue;
778afe909bSMika Westerberg 
788afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
798afe909bSMika Westerberg 			continue;
808afe909bSMika Westerberg 
818afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
828afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
838afe909bSMika Westerberg 	}
848afe909bSMika Westerberg }
858afe909bSMika Westerberg 
868afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
878afe909bSMika Westerberg {
888afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
898afe909bSMika Westerberg 	struct tb_port *port, *tmp;
908afe909bSMika Westerberg 
918afe909bSMika Westerberg 	/* Clear children resources first */
928afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
938afe909bSMika Westerberg 		if (tb_port_has_remote(port))
948afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
958afe909bSMika Westerberg 	}
968afe909bSMika Westerberg 
978afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
988afe909bSMika Westerberg 		if (port->sw == sw) {
998afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
1008afe909bSMika Westerberg 			list_del_init(&port->list);
1018afe909bSMika Westerberg 		}
1028afe909bSMika Westerberg 	}
1038afe909bSMika Westerberg }
1048afe909bSMika Westerberg 
1050414bec5SMika Westerberg static void tb_discover_tunnels(struct tb_switch *sw)
1060414bec5SMika Westerberg {
1070414bec5SMika Westerberg 	struct tb *tb = sw->tb;
1080414bec5SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1090414bec5SMika Westerberg 	struct tb_port *port;
1100414bec5SMika Westerberg 
111b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1120414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
1130414bec5SMika Westerberg 
1140414bec5SMika Westerberg 		switch (port->config.type) {
1154f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
1164f807e47SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port);
1174f807e47SMika Westerberg 			break;
1184f807e47SMika Westerberg 
1190414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
1200414bec5SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port);
1210414bec5SMika Westerberg 			break;
1220414bec5SMika Westerberg 
123e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
124e6f81858SRajmohan Mani 			tunnel = tb_tunnel_discover_usb3(tb, port);
125e6f81858SRajmohan Mani 			break;
126e6f81858SRajmohan Mani 
1270414bec5SMika Westerberg 		default:
1280414bec5SMika Westerberg 			break;
1290414bec5SMika Westerberg 		}
1300414bec5SMika Westerberg 
1314f807e47SMika Westerberg 		if (!tunnel)
1324f807e47SMika Westerberg 			continue;
1334f807e47SMika Westerberg 
1344f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
1350414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
1360414bec5SMika Westerberg 
1370414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
1380414bec5SMika Westerberg 				parent->boot = true;
1390414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
1400414bec5SMika Westerberg 			}
141*c94732bdSMika Westerberg 		} else if (tb_tunnel_is_dp(tunnel)) {
142*c94732bdSMika Westerberg 			/* Keep the domain from powering down */
143*c94732bdSMika Westerberg 			pm_runtime_get_sync(&tunnel->src_port->sw->dev);
144*c94732bdSMika Westerberg 			pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
1454f807e47SMika Westerberg 		}
1460414bec5SMika Westerberg 
1470414bec5SMika Westerberg 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
1480414bec5SMika Westerberg 	}
1490414bec5SMika Westerberg 
150b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
151b433d010SMika Westerberg 		if (tb_port_has_remote(port))
152b433d010SMika Westerberg 			tb_discover_tunnels(port->remote->sw);
1530414bec5SMika Westerberg 	}
1540414bec5SMika Westerberg }
1559da672a4SAndreas Noever 
156284652a4SMika Westerberg static int tb_port_configure_xdomain(struct tb_port *port)
157284652a4SMika Westerberg {
158341d4518SMika Westerberg 	/*
159341d4518SMika Westerberg 	 * XDomain paths currently only support single lane so we must
160341d4518SMika Westerberg 	 * disable the other lane according to USB4 spec.
161341d4518SMika Westerberg 	 */
162341d4518SMika Westerberg 	tb_port_disable(port->dual_link_port);
163341d4518SMika Westerberg 
164284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
165284652a4SMika Westerberg 		return usb4_port_configure_xdomain(port);
166284652a4SMika Westerberg 	return tb_lc_configure_xdomain(port);
167284652a4SMika Westerberg }
168284652a4SMika Westerberg 
169284652a4SMika Westerberg static void tb_port_unconfigure_xdomain(struct tb_port *port)
170284652a4SMika Westerberg {
171284652a4SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
172284652a4SMika Westerberg 		usb4_port_unconfigure_xdomain(port);
173284652a4SMika Westerberg 	else
174284652a4SMika Westerberg 		tb_lc_unconfigure_xdomain(port);
175341d4518SMika Westerberg 
176341d4518SMika Westerberg 	tb_port_enable(port->dual_link_port);
177284652a4SMika Westerberg }
178284652a4SMika Westerberg 
1797ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
1807ea4cd6bSMika Westerberg {
1817ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
1827ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
1837ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
1847ea4cd6bSMika Westerberg 	u64 route;
1857ea4cd6bSMika Westerberg 
1865ca67688SMika Westerberg 	if (!tb_is_xdomain_enabled())
1875ca67688SMika Westerberg 		return;
1885ca67688SMika Westerberg 
1897ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
1907ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
1917ea4cd6bSMika Westerberg 	if (xd) {
1927ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
1937ea4cd6bSMika Westerberg 		return;
1947ea4cd6bSMika Westerberg 	}
1957ea4cd6bSMika Westerberg 
1967ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
1977ea4cd6bSMika Westerberg 			      NULL);
1987ea4cd6bSMika Westerberg 	if (xd) {
1997ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
200284652a4SMika Westerberg 		tb_port_configure_xdomain(port);
2017ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
2027ea4cd6bSMika Westerberg 	}
2037ea4cd6bSMika Westerberg }
2047ea4cd6bSMika Westerberg 
205cf29b9afSRajmohan Mani static int tb_enable_tmu(struct tb_switch *sw)
206cf29b9afSRajmohan Mani {
207cf29b9afSRajmohan Mani 	int ret;
208cf29b9afSRajmohan Mani 
209cf29b9afSRajmohan Mani 	/* If it is already enabled in correct mode, don't touch it */
210cf29b9afSRajmohan Mani 	if (tb_switch_tmu_is_enabled(sw))
211cf29b9afSRajmohan Mani 		return 0;
212cf29b9afSRajmohan Mani 
213cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_disable(sw);
214cf29b9afSRajmohan Mani 	if (ret)
215cf29b9afSRajmohan Mani 		return ret;
216cf29b9afSRajmohan Mani 
217cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_post_time(sw);
218cf29b9afSRajmohan Mani 	if (ret)
219cf29b9afSRajmohan Mani 		return ret;
220cf29b9afSRajmohan Mani 
221cf29b9afSRajmohan Mani 	return tb_switch_tmu_enable(sw);
222cf29b9afSRajmohan Mani }
223cf29b9afSRajmohan Mani 
224e6f81858SRajmohan Mani /**
225e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
226e6f81858SRajmohan Mani  * @sw: Switch to find the port on
227e6f81858SRajmohan Mani  * @type: Port type to look for
228e6f81858SRajmohan Mani  */
229e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
230e6f81858SRajmohan Mani 					   enum tb_port_type type)
231e6f81858SRajmohan Mani {
232e6f81858SRajmohan Mani 	struct tb_port *port;
233e6f81858SRajmohan Mani 
234e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
235e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
236e6f81858SRajmohan Mani 			continue;
237e6f81858SRajmohan Mani 		if (port->config.type != type)
238e6f81858SRajmohan Mani 			continue;
239e6f81858SRajmohan Mani 		if (!port->cap_adap)
240e6f81858SRajmohan Mani 			continue;
241e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
242e6f81858SRajmohan Mani 			continue;
243e6f81858SRajmohan Mani 		return port;
244e6f81858SRajmohan Mani 	}
245e6f81858SRajmohan Mani 	return NULL;
246e6f81858SRajmohan Mani }
247e6f81858SRajmohan Mani 
248e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
249e6f81858SRajmohan Mani 					 const struct tb_port *port)
250e6f81858SRajmohan Mani {
251e6f81858SRajmohan Mani 	struct tb_port *down;
252e6f81858SRajmohan Mani 
253e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
25477cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
255e6f81858SRajmohan Mani 		return down;
25677cfa40fSMika Westerberg 	return NULL;
257e6f81858SRajmohan Mani }
258e6f81858SRajmohan Mani 
2590bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
2600bd680cdSMika Westerberg 					struct tb_port *src_port,
2610bd680cdSMika Westerberg 					struct tb_port *dst_port)
2620bd680cdSMika Westerberg {
2630bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2640bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
2650bd680cdSMika Westerberg 
2660bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
2670bd680cdSMika Westerberg 		if (tunnel->type == type &&
2680bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
2690bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
2700bd680cdSMika Westerberg 			return tunnel;
2710bd680cdSMika Westerberg 		}
2720bd680cdSMika Westerberg 	}
2730bd680cdSMika Westerberg 
2740bd680cdSMika Westerberg 	return NULL;
2750bd680cdSMika Westerberg }
2760bd680cdSMika Westerberg 
2770bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
2780bd680cdSMika Westerberg 						   struct tb_port *src_port,
2790bd680cdSMika Westerberg 						   struct tb_port *dst_port)
2800bd680cdSMika Westerberg {
2810bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
2820bd680cdSMika Westerberg 	struct tb_switch *sw;
2830bd680cdSMika Westerberg 
2840bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
2850bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
2860bd680cdSMika Westerberg 		sw = dst_port->sw;
2870bd680cdSMika Westerberg 	else
2880bd680cdSMika Westerberg 		sw = src_port->sw;
2890bd680cdSMika Westerberg 
2900bd680cdSMika Westerberg 	/* Can't be the host router */
2910bd680cdSMika Westerberg 	if (sw == tb->root_switch)
2920bd680cdSMika Westerberg 		return NULL;
2930bd680cdSMika Westerberg 
2940bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
2950bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
2960bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
2970bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
2980bd680cdSMika Westerberg 	if (!usb3_down)
2990bd680cdSMika Westerberg 		return NULL;
3000bd680cdSMika Westerberg 
3010bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
3020bd680cdSMika Westerberg }
3030bd680cdSMika Westerberg 
3040bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
3050bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
3060bd680cdSMika Westerberg {
3070bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
3080bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
3090bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
3100bd680cdSMika Westerberg 	struct tb_port *port;
3110bd680cdSMika Westerberg 
3120bd680cdSMika Westerberg 	tb_port_dbg(dst_port, "calculating available bandwidth\n");
3130bd680cdSMika Westerberg 
3140bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
3150bd680cdSMika Westerberg 	if (tunnel) {
3160bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
3170bd680cdSMika Westerberg 						   &usb3_consumed_down);
3180bd680cdSMika Westerberg 		if (ret)
3190bd680cdSMika Westerberg 			return ret;
3200bd680cdSMika Westerberg 	} else {
3210bd680cdSMika Westerberg 		usb3_consumed_up = 0;
3220bd680cdSMika Westerberg 		usb3_consumed_down = 0;
3230bd680cdSMika Westerberg 	}
3240bd680cdSMika Westerberg 
3250bd680cdSMika Westerberg 	*available_up = *available_down = 40000;
3260bd680cdSMika Westerberg 
3270bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
3280bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
3290bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
3300bd680cdSMika Westerberg 
3310bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
3320bd680cdSMika Westerberg 			continue;
3330bd680cdSMika Westerberg 
3340bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
3350bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
3360bd680cdSMika Westerberg 		} else {
3370bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
3380bd680cdSMika Westerberg 			if (link_speed < 0)
3390bd680cdSMika Westerberg 				return link_speed;
3400bd680cdSMika Westerberg 		}
3410bd680cdSMika Westerberg 
3420bd680cdSMika Westerberg 		link_width = port->bonded ? 2 : 1;
3430bd680cdSMika Westerberg 
3440bd680cdSMika Westerberg 		up_bw = link_speed * link_width * 1000; /* Mb/s */
3450bd680cdSMika Westerberg 		/* Leave 10% guard band */
3460bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
3470bd680cdSMika Westerberg 		down_bw = up_bw;
3480bd680cdSMika Westerberg 
3490bd680cdSMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
3500bd680cdSMika Westerberg 
3510bd680cdSMika Westerberg 		/*
3520bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
3530bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
3540bd680cdSMika Westerberg 		 */
3550bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
3560bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
3570bd680cdSMika Westerberg 
3580bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
3590bd680cdSMika Westerberg 				continue;
3600bd680cdSMika Westerberg 
3610bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
3620bd680cdSMika Westerberg 				continue;
3630bd680cdSMika Westerberg 
3640bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
3650bd680cdSMika Westerberg 							   &dp_consumed_up,
3660bd680cdSMika Westerberg 							   &dp_consumed_down);
3670bd680cdSMika Westerberg 			if (ret)
3680bd680cdSMika Westerberg 				return ret;
3690bd680cdSMika Westerberg 
3700bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
3710bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
3720bd680cdSMika Westerberg 		}
3730bd680cdSMika Westerberg 
3740bd680cdSMika Westerberg 		/*
3750bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
3760bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
3770bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
3780bd680cdSMika Westerberg 		 * crosses the port.
3790bd680cdSMika Westerberg 		 */
3800bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
3810bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
3820bd680cdSMika Westerberg 
3830bd680cdSMika Westerberg 		if (up_bw < *available_up)
3840bd680cdSMika Westerberg 			*available_up = up_bw;
3850bd680cdSMika Westerberg 		if (down_bw < *available_down)
3860bd680cdSMika Westerberg 			*available_down = down_bw;
3870bd680cdSMika Westerberg 	}
3880bd680cdSMika Westerberg 
3890bd680cdSMika Westerberg 	if (*available_up < 0)
3900bd680cdSMika Westerberg 		*available_up = 0;
3910bd680cdSMika Westerberg 	if (*available_down < 0)
3920bd680cdSMika Westerberg 		*available_down = 0;
3930bd680cdSMika Westerberg 
3940bd680cdSMika Westerberg 	return 0;
3950bd680cdSMika Westerberg }
3960bd680cdSMika Westerberg 
3970bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
3980bd680cdSMika Westerberg 					    struct tb_port *src_port,
3990bd680cdSMika Westerberg 					    struct tb_port *dst_port)
4000bd680cdSMika Westerberg {
4010bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4020bd680cdSMika Westerberg 
4030bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
4040bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
4050bd680cdSMika Westerberg }
4060bd680cdSMika Westerberg 
4070bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
4080bd680cdSMika Westerberg 				      struct tb_port *dst_port)
4090bd680cdSMika Westerberg {
4100bd680cdSMika Westerberg 	int ret, available_up, available_down;
4110bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
4120bd680cdSMika Westerberg 
4130bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
4140bd680cdSMika Westerberg 	if (!tunnel)
4150bd680cdSMika Westerberg 		return;
4160bd680cdSMika Westerberg 
4170bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
4180bd680cdSMika Westerberg 
4190bd680cdSMika Westerberg 	/*
4200bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
4210bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
4220bd680cdSMika Westerberg 	 */
4230bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
4240bd680cdSMika Westerberg 				     &available_up, &available_down);
4250bd680cdSMika Westerberg 	if (ret) {
4260bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
4270bd680cdSMika Westerberg 		return;
4280bd680cdSMika Westerberg 	}
4290bd680cdSMika Westerberg 
4300bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
4310bd680cdSMika Westerberg 	       available_up, available_down);
4320bd680cdSMika Westerberg 
4330bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
4340bd680cdSMika Westerberg }
4350bd680cdSMika Westerberg 
436e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
437e6f81858SRajmohan Mani {
438e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
4390bd680cdSMika Westerberg 	int ret, available_up, available_down;
440e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
441e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
442e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
443e6f81858SRajmohan Mani 
444c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3()) {
445c6da62a2SMika Westerberg 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
446c6da62a2SMika Westerberg 		return 0;
447c6da62a2SMika Westerberg 	}
448c6da62a2SMika Westerberg 
449e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
450e6f81858SRajmohan Mani 	if (!up)
451e6f81858SRajmohan Mani 		return 0;
452e6f81858SRajmohan Mani 
453bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
454bbcf40b3SMika Westerberg 		return 0;
455bbcf40b3SMika Westerberg 
456e6f81858SRajmohan Mani 	/*
457e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
458e6f81858SRajmohan Mani 	 * be found right above this switch.
459e6f81858SRajmohan Mani 	 */
460e6f81858SRajmohan Mani 	port = tb_port_at(tb_route(sw), parent);
461e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
462e6f81858SRajmohan Mani 	if (!down)
463e6f81858SRajmohan Mani 		return 0;
464e6f81858SRajmohan Mani 
465e6f81858SRajmohan Mani 	if (tb_route(parent)) {
466e6f81858SRajmohan Mani 		struct tb_port *parent_up;
467e6f81858SRajmohan Mani 		/*
468e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
469e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
470e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
471e6f81858SRajmohan Mani 		 */
472e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
473e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
474e6f81858SRajmohan Mani 			return 0;
4750bd680cdSMika Westerberg 
4760bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
4770bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
4780bd680cdSMika Westerberg 		if (ret)
4790bd680cdSMika Westerberg 			return ret;
480e6f81858SRajmohan Mani 	}
481e6f81858SRajmohan Mani 
4820bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
4830bd680cdSMika Westerberg 				     &available_down);
4840bd680cdSMika Westerberg 	if (ret)
4850bd680cdSMika Westerberg 		goto err_reclaim;
4860bd680cdSMika Westerberg 
4870bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
4880bd680cdSMika Westerberg 		    available_up, available_down);
4890bd680cdSMika Westerberg 
4900bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
4910bd680cdSMika Westerberg 				      available_down);
4920bd680cdSMika Westerberg 	if (!tunnel) {
4930bd680cdSMika Westerberg 		ret = -ENOMEM;
4940bd680cdSMika Westerberg 		goto err_reclaim;
4950bd680cdSMika Westerberg 	}
496e6f81858SRajmohan Mani 
497e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
498e6f81858SRajmohan Mani 		tb_port_info(up,
499e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
5000bd680cdSMika Westerberg 		ret = -EIO;
5010bd680cdSMika Westerberg 		goto err_free;
502e6f81858SRajmohan Mani 	}
503e6f81858SRajmohan Mani 
504e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
5050bd680cdSMika Westerberg 	if (tb_route(parent))
5060bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
5070bd680cdSMika Westerberg 
508e6f81858SRajmohan Mani 	return 0;
5090bd680cdSMika Westerberg 
5100bd680cdSMika Westerberg err_free:
5110bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
5120bd680cdSMika Westerberg err_reclaim:
5130bd680cdSMika Westerberg 	if (tb_route(parent))
5140bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
5150bd680cdSMika Westerberg 
5160bd680cdSMika Westerberg 	return ret;
517e6f81858SRajmohan Mani }
518e6f81858SRajmohan Mani 
519e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
520e6f81858SRajmohan Mani {
521e6f81858SRajmohan Mani 	struct tb_port *port;
522e6f81858SRajmohan Mani 	int ret;
523e6f81858SRajmohan Mani 
524c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_usb3())
525c6da62a2SMika Westerberg 		return 0;
526c6da62a2SMika Westerberg 
527e6f81858SRajmohan Mani 	if (tb_route(sw)) {
528e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
529e6f81858SRajmohan Mani 		if (ret)
530e6f81858SRajmohan Mani 			return ret;
531e6f81858SRajmohan Mani 	}
532e6f81858SRajmohan Mani 
533e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
534e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
535e6f81858SRajmohan Mani 			continue;
536e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
537e6f81858SRajmohan Mani 		if (ret)
538e6f81858SRajmohan Mani 			return ret;
539e6f81858SRajmohan Mani 	}
540e6f81858SRajmohan Mani 
541e6f81858SRajmohan Mani 	return 0;
542e6f81858SRajmohan Mani }
543e6f81858SRajmohan Mani 
5449da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
5459da672a4SAndreas Noever 
546877e50b3SLee Jones /*
5479da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
5489da672a4SAndreas Noever  */
5499da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
5509da672a4SAndreas Noever {
551b433d010SMika Westerberg 	struct tb_port *port;
552b433d010SMika Westerberg 
5536ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
5546ac6faeeSMika Westerberg 
555b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
556b433d010SMika Westerberg 		tb_scan_port(port);
5576ac6faeeSMika Westerberg 
5586ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
5596ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
5609da672a4SAndreas Noever }
5619da672a4SAndreas Noever 
562877e50b3SLee Jones /*
5639da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
5649da672a4SAndreas Noever  */
5659da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
5669da672a4SAndreas Noever {
56799cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
568dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
5699da672a4SAndreas Noever 	struct tb_switch *sw;
570dfe40ca4SMika Westerberg 
5719da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
5729da672a4SAndreas Noever 		return;
5734f807e47SMika Westerberg 
5744f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
5754f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
5764f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
5774f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
5784f807e47SMika Westerberg 				 false);
5794f807e47SMika Westerberg 		return;
5804f807e47SMika Westerberg 	}
5814f807e47SMika Westerberg 
5829da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
5839da672a4SAndreas Noever 		return;
584343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
585343fcb8cSAndreas Noever 		return; /*
586343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
587343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
588343fcb8cSAndreas Noever 			 */
5899da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
5909da672a4SAndreas Noever 		return;
5919da672a4SAndreas Noever 	if (port->remote) {
5927ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
5939da672a4SAndreas Noever 		return;
5949da672a4SAndreas Noever 	}
595dacb1287SKranthi Kuntala 
596dacb1287SKranthi Kuntala 	tb_retimer_scan(port);
597dacb1287SKranthi Kuntala 
598bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
599bfe778acSMika Westerberg 			     tb_downstream_route(port));
6007ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
6017ea4cd6bSMika Westerberg 		/*
6027ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
6037ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
6047ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
6057ea4cd6bSMika Westerberg 		 */
6067ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
6077ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
6089da672a4SAndreas Noever 		return;
6097ea4cd6bSMika Westerberg 	}
610bfe778acSMika Westerberg 
611bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
612bfe778acSMika Westerberg 		tb_switch_put(sw);
613bfe778acSMika Westerberg 		return;
614bfe778acSMika Westerberg 	}
615bfe778acSMika Westerberg 
61699cabbb0SMika Westerberg 	/*
6177ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
6187ea4cd6bSMika Westerberg 	 * first.
6197ea4cd6bSMika Westerberg 	 */
6207ea4cd6bSMika Westerberg 	if (port->xdomain) {
6217ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
622284652a4SMika Westerberg 		tb_port_unconfigure_xdomain(port);
6237ea4cd6bSMika Westerberg 		port->xdomain = NULL;
6247ea4cd6bSMika Westerberg 	}
6257ea4cd6bSMika Westerberg 
6267ea4cd6bSMika Westerberg 	/*
62799cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
62899cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
62999cabbb0SMika Westerberg 	 * the boot firmware.
63099cabbb0SMika Westerberg 	 */
63199cabbb0SMika Westerberg 	if (!tcm->hotplug_active)
63299cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
633f67cf491SMika Westerberg 
6346ac6faeeSMika Westerberg 	/*
6356ac6faeeSMika Westerberg 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
6366ac6faeeSMika Westerberg 	 * can support runtime PM.
6376ac6faeeSMika Westerberg 	 */
6386ac6faeeSMika Westerberg 	sw->rpm = sw->generation > 1;
6396ac6faeeSMika Westerberg 
640bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
641bfe778acSMika Westerberg 		tb_switch_put(sw);
642bfe778acSMika Westerberg 		return;
643bfe778acSMika Westerberg 	}
644bfe778acSMika Westerberg 
645dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
646dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
647dfe40ca4SMika Westerberg 	port->remote = upstream_port;
648dfe40ca4SMika Westerberg 	upstream_port->remote = port;
649dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
650dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
651dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
652dfe40ca4SMika Westerberg 	}
653dfe40ca4SMika Westerberg 
65491c0c120SMika Westerberg 	/* Enable lane bonding if supported */
6552ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
656de462039SMika Westerberg 	/* Set the link configured */
657de462039SMika Westerberg 	tb_switch_configure_link(sw);
65891c0c120SMika Westerberg 
659cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
660cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
661cf29b9afSRajmohan Mani 
662dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
663dacb1287SKranthi Kuntala 	tb_retimer_scan(upstream_port);
664dacb1287SKranthi Kuntala 
665e6f81858SRajmohan Mani 	/*
666e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
667e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
668e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
669e6f81858SRajmohan Mani 	 * any new.
670e6f81858SRajmohan Mani 	 */
671e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
672e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
673e6f81858SRajmohan Mani 
674e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
6759da672a4SAndreas Noever 	tb_scan_switch(sw);
6769da672a4SAndreas Noever }
6779da672a4SAndreas Noever 
6788afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
6798afe909bSMika Westerberg {
6800bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
6810bd680cdSMika Westerberg 	struct tb *tb;
6820bd680cdSMika Westerberg 
6838afe909bSMika Westerberg 	if (!tunnel)
6848afe909bSMika Westerberg 		return;
6858afe909bSMika Westerberg 
6868afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
6878afe909bSMika Westerberg 	list_del(&tunnel->list);
6888afe909bSMika Westerberg 
6890bd680cdSMika Westerberg 	tb = tunnel->tb;
6900bd680cdSMika Westerberg 	src_port = tunnel->src_port;
6910bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
6928afe909bSMika Westerberg 
6930bd680cdSMika Westerberg 	switch (tunnel->type) {
6940bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
6950bd680cdSMika Westerberg 		/*
6960bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
6970bd680cdSMika Westerberg 		 * deallocated properly.
6980bd680cdSMika Westerberg 		 */
6990bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
7006ac6faeeSMika Westerberg 		/* Now we can allow the domain to runtime suspend again */
7016ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
7026ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
7036ac6faeeSMika Westerberg 		pm_runtime_mark_last_busy(&src_port->sw->dev);
7046ac6faeeSMika Westerberg 		pm_runtime_put_autosuspend(&src_port->sw->dev);
7050bd680cdSMika Westerberg 		fallthrough;
7060bd680cdSMika Westerberg 
7070bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
7080bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
7090bd680cdSMika Westerberg 		break;
7100bd680cdSMika Westerberg 
7110bd680cdSMika Westerberg 	default:
7120bd680cdSMika Westerberg 		/*
7130bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
7140bd680cdSMika Westerberg 		 * bandwidth.
7150bd680cdSMika Westerberg 		 */
7160bd680cdSMika Westerberg 		break;
7178afe909bSMika Westerberg 	}
7188afe909bSMika Westerberg 
7198afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
7204f807e47SMika Westerberg }
7214f807e47SMika Westerberg 
722877e50b3SLee Jones /*
7233364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
7243364f0c1SAndreas Noever  */
7253364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
7263364f0c1SAndreas Noever {
7279d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
72893f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
72993f36adeSMika Westerberg 	struct tb_tunnel *n;
7309d3cce0bSMika Westerberg 
7319d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
7328afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
7338afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
7343364f0c1SAndreas Noever 	}
7353364f0c1SAndreas Noever }
7363364f0c1SAndreas Noever 
737877e50b3SLee Jones /*
73823dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
73923dd5bb4SAndreas Noever  */
74023dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
74123dd5bb4SAndreas Noever {
742b433d010SMika Westerberg 	struct tb_port *port;
743dfe40ca4SMika Westerberg 
744b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
745dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
74623dd5bb4SAndreas Noever 			continue;
747dfe40ca4SMika Westerberg 
74823dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
749dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
7508afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
751de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
75291c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
753bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
75423dd5bb4SAndreas Noever 			port->remote = NULL;
755dfe40ca4SMika Westerberg 			if (port->dual_link_port)
756dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
75723dd5bb4SAndreas Noever 		} else {
75823dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
75923dd5bb4SAndreas Noever 		}
76023dd5bb4SAndreas Noever 	}
76123dd5bb4SAndreas Noever }
76223dd5bb4SAndreas Noever 
76399cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
76499cabbb0SMika Westerberg 					 const struct tb_port *port)
7653364f0c1SAndreas Noever {
766b0407983SMika Westerberg 	struct tb_port *down = NULL;
767b0407983SMika Westerberg 
76899cabbb0SMika Westerberg 	/*
76999cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
770b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
77199cabbb0SMika Westerberg 	 */
772b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
773b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
774b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
77599cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
77699cabbb0SMika Westerberg 		int index;
77799cabbb0SMika Westerberg 
77899cabbb0SMika Westerberg 		/*
77999cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
78099cabbb0SMika Westerberg 		 * per controller.
78199cabbb0SMika Westerberg 		 */
7827bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
7837bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
78499cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
78517a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
78699cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
7877bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
7887bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
78999cabbb0SMika Westerberg 		else
79099cabbb0SMika Westerberg 			goto out;
79199cabbb0SMika Westerberg 
79299cabbb0SMika Westerberg 		/* Validate the hard-coding */
79399cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
79499cabbb0SMika Westerberg 			goto out;
795b0407983SMika Westerberg 
796b0407983SMika Westerberg 		down = &sw->ports[index];
797b0407983SMika Westerberg 	}
798b0407983SMika Westerberg 
799b0407983SMika Westerberg 	if (down) {
800b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
80199cabbb0SMika Westerberg 			goto out;
8029cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
80399cabbb0SMika Westerberg 			goto out;
80499cabbb0SMika Westerberg 
805b0407983SMika Westerberg 		return down;
80699cabbb0SMika Westerberg 	}
80799cabbb0SMika Westerberg 
80899cabbb0SMika Westerberg out:
809e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
81099cabbb0SMika Westerberg }
81199cabbb0SMika Westerberg 
812e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
813e876f34aSMika Westerberg {
814e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
815e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
816e876f34aSMika Westerberg 
817e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
818e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
819e876f34aSMika Westerberg 
820e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
821e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
822e876f34aSMika Westerberg 			continue;
823e876f34aSMika Westerberg 
824e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
825e876f34aSMika Westerberg 			tb_port_dbg(port, "in use\n");
826e876f34aSMika Westerberg 			continue;
827e876f34aSMika Westerberg 		}
828e876f34aSMika Westerberg 
829e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
830e876f34aSMika Westerberg 
831e876f34aSMika Westerberg 		/*
832e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
833e876f34aSMika Westerberg 		 * the same host router downstream port.
834e876f34aSMika Westerberg 		 */
835e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
836e876f34aSMika Westerberg 			struct tb_port *p;
837e876f34aSMika Westerberg 
838e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
839e876f34aSMika Westerberg 			if (p != host_port)
840e876f34aSMika Westerberg 				continue;
841e876f34aSMika Westerberg 		}
842e876f34aSMika Westerberg 
843e876f34aSMika Westerberg 		return port;
844e876f34aSMika Westerberg 	}
845e876f34aSMika Westerberg 
846e876f34aSMika Westerberg 	return NULL;
847e876f34aSMika Westerberg }
848e876f34aSMika Westerberg 
8498afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
8504f807e47SMika Westerberg {
8510bd680cdSMika Westerberg 	int available_up, available_down, ret;
8524f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
8538afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
8544f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
8554f807e47SMika Westerberg 
856c6da62a2SMika Westerberg 	if (!tb_acpi_may_tunnel_dp()) {
857c6da62a2SMika Westerberg 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
858c6da62a2SMika Westerberg 		return;
859c6da62a2SMika Westerberg 	}
860c6da62a2SMika Westerberg 
8618afe909bSMika Westerberg 	/*
8628afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
8638afe909bSMika Westerberg 	 * establish a DP tunnel between them.
8648afe909bSMika Westerberg 	 */
8658afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
8664f807e47SMika Westerberg 
8678afe909bSMika Westerberg 	in = NULL;
8688afe909bSMika Westerberg 	out = NULL;
8698afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
870e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
871e876f34aSMika Westerberg 			continue;
872e876f34aSMika Westerberg 
8738afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
8748afe909bSMika Westerberg 			tb_port_dbg(port, "in use\n");
8758afe909bSMika Westerberg 			continue;
8768afe909bSMika Westerberg 		}
8778afe909bSMika Westerberg 
878e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
8798afe909bSMika Westerberg 
880e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
881e876f34aSMika Westerberg 		if (out) {
8828afe909bSMika Westerberg 			in = port;
883e876f34aSMika Westerberg 			break;
884e876f34aSMika Westerberg 		}
8858afe909bSMika Westerberg 	}
8868afe909bSMika Westerberg 
8878afe909bSMika Westerberg 	if (!in) {
8888afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
8898afe909bSMika Westerberg 		return;
8908afe909bSMika Westerberg 	}
8918afe909bSMika Westerberg 	if (!out) {
8928afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
8938afe909bSMika Westerberg 		return;
8948afe909bSMika Westerberg 	}
8958afe909bSMika Westerberg 
8966ac6faeeSMika Westerberg 	/*
8976ac6faeeSMika Westerberg 	 * DP stream needs the domain to be active so runtime resume
8986ac6faeeSMika Westerberg 	 * both ends of the tunnel.
8996ac6faeeSMika Westerberg 	 *
9006ac6faeeSMika Westerberg 	 * This should bring the routers in the middle active as well
9016ac6faeeSMika Westerberg 	 * and keeps the domain from runtime suspending while the DP
9026ac6faeeSMika Westerberg 	 * tunnel is active.
9036ac6faeeSMika Westerberg 	 */
9046ac6faeeSMika Westerberg 	pm_runtime_get_sync(&in->sw->dev);
9056ac6faeeSMika Westerberg 	pm_runtime_get_sync(&out->sw->dev);
9066ac6faeeSMika Westerberg 
9078afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
9088afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
9096ac6faeeSMika Westerberg 		goto err_rpm_put;
9108afe909bSMika Westerberg 	}
9114f807e47SMika Westerberg 
9120bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
9130bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
9140bd680cdSMika Westerberg 	if (ret) {
9150bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
9160bd680cdSMika Westerberg 		goto err_dealloc_dp;
917a11b88adSMika Westerberg 	}
918a11b88adSMika Westerberg 
9190bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up,
9200bd680cdSMika Westerberg 				     &available_down);
9210bd680cdSMika Westerberg 	if (ret)
9220bd680cdSMika Westerberg 		goto err_reclaim;
923a11b88adSMika Westerberg 
9240bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
9250bd680cdSMika Westerberg 	       available_up, available_down);
9260bd680cdSMika Westerberg 
9270bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
9284f807e47SMika Westerberg 	if (!tunnel) {
9298afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
9300bd680cdSMika Westerberg 		goto err_reclaim;
9314f807e47SMika Westerberg 	}
9324f807e47SMika Westerberg 
9334f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
9344f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
9350bd680cdSMika Westerberg 		goto err_free;
9364f807e47SMika Westerberg 	}
9374f807e47SMika Westerberg 
9384f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
9390bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
9408afe909bSMika Westerberg 	return;
9418afe909bSMika Westerberg 
9420bd680cdSMika Westerberg err_free:
9430bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
9440bd680cdSMika Westerberg err_reclaim:
9450bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
9460bd680cdSMika Westerberg err_dealloc_dp:
9478afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
9486ac6faeeSMika Westerberg err_rpm_put:
9496ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&out->sw->dev);
9506ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&out->sw->dev);
9516ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&in->sw->dev);
9526ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&in->sw->dev);
9534f807e47SMika Westerberg }
9544f807e47SMika Westerberg 
9558afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
9564f807e47SMika Westerberg {
9578afe909bSMika Westerberg 	struct tb_port *in, *out;
9588afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
9598afe909bSMika Westerberg 
9608afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
9618afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
9628afe909bSMika Westerberg 		in = port;
9638afe909bSMika Westerberg 		out = NULL;
9648afe909bSMika Westerberg 	} else {
9658afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
9668afe909bSMika Westerberg 		in = NULL;
9678afe909bSMika Westerberg 		out = port;
9688afe909bSMika Westerberg 	}
9698afe909bSMika Westerberg 
9708afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
9718afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
9728afe909bSMika Westerberg 	list_del_init(&port->list);
9738afe909bSMika Westerberg 
9748afe909bSMika Westerberg 	/*
9758afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
9768afe909bSMika Westerberg 	 * to create another tunnel.
9778afe909bSMika Westerberg 	 */
9788afe909bSMika Westerberg 	tb_tunnel_dp(tb);
9798afe909bSMika Westerberg }
9808afe909bSMika Westerberg 
9818afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
9828afe909bSMika Westerberg {
9838afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
9848afe909bSMika Westerberg 	struct tb_port *p;
9858afe909bSMika Westerberg 
9868afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
9878afe909bSMika Westerberg 		return;
9888afe909bSMika Westerberg 
9898afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
9908afe909bSMika Westerberg 		if (p == port)
9918afe909bSMika Westerberg 			return;
9928afe909bSMika Westerberg 	}
9938afe909bSMika Westerberg 
9948afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
9958afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
9968afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
9978afe909bSMika Westerberg 
9988afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
9998afe909bSMika Westerberg 	tb_tunnel_dp(tb);
10004f807e47SMika Westerberg }
10014f807e47SMika Westerberg 
100281a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
100381a2e3e4SMika Westerberg {
100481a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
100581a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
100681a2e3e4SMika Westerberg 
100781a2e3e4SMika Westerberg 	/*
100881a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
100981a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
101081a2e3e4SMika Westerberg 	 */
101181a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
101281a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
101381a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
101481a2e3e4SMika Westerberg 	}
101581a2e3e4SMika Westerberg 
101681a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
101781a2e3e4SMika Westerberg 		struct tb_port *port;
101881a2e3e4SMika Westerberg 
101981a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
102081a2e3e4SMika Westerberg 					struct tb_port, list);
102181a2e3e4SMika Westerberg 		list_del_init(&port->list);
102281a2e3e4SMika Westerberg 	}
102381a2e3e4SMika Westerberg }
102481a2e3e4SMika Westerberg 
10253da88be2SMika Westerberg static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
10263da88be2SMika Westerberg {
10273da88be2SMika Westerberg 	struct tb_tunnel *tunnel;
10283da88be2SMika Westerberg 	struct tb_port *up;
10293da88be2SMika Westerberg 
10303da88be2SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
10313da88be2SMika Westerberg 	if (WARN_ON(!up))
10323da88be2SMika Westerberg 		return -ENODEV;
10333da88be2SMika Westerberg 
10343da88be2SMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
10353da88be2SMika Westerberg 	if (WARN_ON(!tunnel))
10363da88be2SMika Westerberg 		return -ENODEV;
10373da88be2SMika Westerberg 
10383da88be2SMika Westerberg 	tb_tunnel_deactivate(tunnel);
10393da88be2SMika Westerberg 	list_del(&tunnel->list);
10403da88be2SMika Westerberg 	tb_tunnel_free(tunnel);
10413da88be2SMika Westerberg 	return 0;
10423da88be2SMika Westerberg }
10433da88be2SMika Westerberg 
104499cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
104599cabbb0SMika Westerberg {
104699cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
10479d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
104899cabbb0SMika Westerberg 	struct tb_switch *parent_sw;
104999cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
10509d3cce0bSMika Westerberg 
1051386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
105299cabbb0SMika Westerberg 	if (!up)
105399cabbb0SMika Westerberg 		return 0;
10543364f0c1SAndreas Noever 
105599cabbb0SMika Westerberg 	/*
105699cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
105799cabbb0SMika Westerberg 	 * be found right above this switch.
105899cabbb0SMika Westerberg 	 */
105999cabbb0SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
106099cabbb0SMika Westerberg 	port = tb_port_at(tb_route(sw), parent_sw);
106199cabbb0SMika Westerberg 	down = tb_find_pcie_down(parent_sw, port);
106299cabbb0SMika Westerberg 	if (!down)
106399cabbb0SMika Westerberg 		return 0;
10643364f0c1SAndreas Noever 
106599cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
106699cabbb0SMika Westerberg 	if (!tunnel)
106799cabbb0SMika Westerberg 		return -ENOMEM;
10683364f0c1SAndreas Noever 
106993f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
107099cabbb0SMika Westerberg 		tb_port_info(up,
10713364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
107293f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
107399cabbb0SMika Westerberg 		return -EIO;
10743364f0c1SAndreas Noever 	}
10753364f0c1SAndreas Noever 
107699cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
107799cabbb0SMika Westerberg 	return 0;
10783364f0c1SAndreas Noever }
10799da672a4SAndreas Noever 
10807ea4cd6bSMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
10817ea4cd6bSMika Westerberg {
10827ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
10837ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
10847ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
10857ea4cd6bSMika Westerberg 	struct tb_switch *sw;
10867ea4cd6bSMika Westerberg 
10877ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
10887ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
1089386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
10907ea4cd6bSMika Westerberg 
10917ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
10927ea4cd6bSMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
10937ea4cd6bSMika Westerberg 				     xd->transmit_path, xd->receive_ring,
10947ea4cd6bSMika Westerberg 				     xd->receive_path);
10957ea4cd6bSMika Westerberg 	if (!tunnel) {
10967ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
10977ea4cd6bSMika Westerberg 		return -ENOMEM;
10987ea4cd6bSMika Westerberg 	}
10997ea4cd6bSMika Westerberg 
11007ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
11017ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
11027ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
11037ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
11047ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11057ea4cd6bSMika Westerberg 		return -EIO;
11067ea4cd6bSMika Westerberg 	}
11077ea4cd6bSMika Westerberg 
11087ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
11097ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
11107ea4cd6bSMika Westerberg 	return 0;
11117ea4cd6bSMika Westerberg }
11127ea4cd6bSMika Westerberg 
11137ea4cd6bSMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
11147ea4cd6bSMika Westerberg {
11157ea4cd6bSMika Westerberg 	struct tb_port *dst_port;
11168afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
11177ea4cd6bSMika Westerberg 	struct tb_switch *sw;
11187ea4cd6bSMika Westerberg 
11197ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
11207ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
11217ea4cd6bSMika Westerberg 
11227ea4cd6bSMika Westerberg 	/*
11237ea4cd6bSMika Westerberg 	 * It is possible that the tunnel was already teared down (in
11247ea4cd6bSMika Westerberg 	 * case of cable disconnect) so it is fine if we cannot find it
11257ea4cd6bSMika Westerberg 	 * here anymore.
11267ea4cd6bSMika Westerberg 	 */
11278afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
11288afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
11297ea4cd6bSMika Westerberg }
11307ea4cd6bSMika Westerberg 
11317ea4cd6bSMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
11327ea4cd6bSMika Westerberg {
11337ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
11347ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
11357ea4cd6bSMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd);
11367ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
11377ea4cd6bSMika Westerberg 	}
11387ea4cd6bSMika Westerberg 	return 0;
11397ea4cd6bSMika Westerberg }
11407ea4cd6bSMika Westerberg 
1141d6cc51cdSAndreas Noever /* hotplug handling */
1142d6cc51cdSAndreas Noever 
1143877e50b3SLee Jones /*
1144d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1145d6cc51cdSAndreas Noever  *
1146d6cc51cdSAndreas Noever  * Executes on tb->wq.
1147d6cc51cdSAndreas Noever  */
1148d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1149d6cc51cdSAndreas Noever {
1150d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1151d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
11529d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1153053596d9SAndreas Noever 	struct tb_switch *sw;
1154053596d9SAndreas Noever 	struct tb_port *port;
1155284652a4SMika Westerberg 
11566ac6faeeSMika Westerberg 	/* Bring the domain back from sleep if it was suspended */
11576ac6faeeSMika Westerberg 	pm_runtime_get_sync(&tb->dev);
11586ac6faeeSMika Westerberg 
1159d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
11609d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1161d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1162d6cc51cdSAndreas Noever 
11638f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1164053596d9SAndreas Noever 	if (!sw) {
1165053596d9SAndreas Noever 		tb_warn(tb,
1166053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1167053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1168053596d9SAndreas Noever 		goto out;
1169053596d9SAndreas Noever 	}
1170053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1171053596d9SAndreas Noever 		tb_warn(tb,
1172053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1173053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
11748f965efdSMika Westerberg 		goto put_sw;
1175053596d9SAndreas Noever 	}
1176053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1177053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1178dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1179053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
11808f965efdSMika Westerberg 		goto put_sw;
1181053596d9SAndreas Noever 	}
11826ac6faeeSMika Westerberg 
11836ac6faeeSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
11846ac6faeeSMika Westerberg 
1185053596d9SAndreas Noever 	if (ev->unplug) {
1186dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1187dacb1287SKranthi Kuntala 
1188dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
11897ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1190aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
11913364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
11928afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1193cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
1194de462039SMika Westerberg 			tb_switch_unconfigure_link(port->remote->sw);
119591c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1196bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1197053596d9SAndreas Noever 			port->remote = NULL;
1198dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1199dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
12008afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
12018afe909bSMika Westerberg 			tb_tunnel_dp(tb);
12027ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
12037ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
12047ea4cd6bSMika Westerberg 
12057ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
12067ea4cd6bSMika Westerberg 			/*
12077ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
12087ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
12097ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
12107ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
12117ea4cd6bSMika Westerberg 			 * the path below.
12127ea4cd6bSMika Westerberg 			 */
12137ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
12147ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
12157ea4cd6bSMika Westerberg 			port->xdomain = NULL;
12167ea4cd6bSMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd);
12177ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
1218284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
12198afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
12208afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
1221053596d9SAndreas Noever 		} else {
122262efe699SMika Westerberg 			tb_port_dbg(port,
1223053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1224053596d9SAndreas Noever 		}
1225053596d9SAndreas Noever 	} else if (port->remote) {
122662efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1227053596d9SAndreas Noever 	} else {
1228344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
122962efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1230053596d9SAndreas Noever 			tb_scan_port(port);
123199cabbb0SMika Westerberg 			if (!port->remote)
123262efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
12338afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
12348afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1235053596d9SAndreas Noever 		}
1236344e0643SMika Westerberg 	}
12378f965efdSMika Westerberg 
12386ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
12396ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
12406ac6faeeSMika Westerberg 
12418f965efdSMika Westerberg put_sw:
12428f965efdSMika Westerberg 	tb_switch_put(sw);
1243d6cc51cdSAndreas Noever out:
1244d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
12456ac6faeeSMika Westerberg 
12466ac6faeeSMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
12476ac6faeeSMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
12486ac6faeeSMika Westerberg 
1249d6cc51cdSAndreas Noever 	kfree(ev);
1250d6cc51cdSAndreas Noever }
1251d6cc51cdSAndreas Noever 
1252877e50b3SLee Jones /*
1253d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
1254d6cc51cdSAndreas Noever  *
1255d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
1256d6cc51cdSAndreas Noever  */
125781a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
125881a54b5eSMika Westerberg 			    const void *buf, size_t size)
1259d6cc51cdSAndreas Noever {
126081a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
126181a54b5eSMika Westerberg 	u64 route;
126281a54b5eSMika Westerberg 
126381a54b5eSMika Westerberg 	if (type != TB_CFG_PKG_EVENT) {
126481a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
126581a54b5eSMika Westerberg 		return;
126681a54b5eSMika Westerberg 	}
126781a54b5eSMika Westerberg 
126881a54b5eSMika Westerberg 	route = tb_cfg_get_route(&pkg->header);
126981a54b5eSMika Westerberg 
1270210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
127181a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
127281a54b5eSMika Westerberg 			pkg->port);
127381a54b5eSMika Westerberg 	}
127481a54b5eSMika Westerberg 
12754f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1276d6cc51cdSAndreas Noever }
1277d6cc51cdSAndreas Noever 
12789d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
1279d6cc51cdSAndreas Noever {
12809d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
128193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
128293f36adeSMika Westerberg 	struct tb_tunnel *n;
12833364f0c1SAndreas Noever 
12846ac6faeeSMika Westerberg 	cancel_delayed_work(&tcm->remove_work);
12853364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
12867ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
12877ea4cd6bSMika Westerberg 		/*
12887ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
12897ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
12907ea4cd6bSMika Westerberg 		 * intact.
12917ea4cd6bSMika Westerberg 		 */
12927ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
12937ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
129493f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
12957ea4cd6bSMika Westerberg 	}
1296bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
12979d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1298d6cc51cdSAndreas Noever }
1299d6cc51cdSAndreas Noever 
130099cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
130199cabbb0SMika Westerberg {
130299cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
130399cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
130499cabbb0SMika Westerberg 
130599cabbb0SMika Westerberg 		/*
130699cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
130799cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
130899cabbb0SMika Westerberg 		 * send uevent to userspace.
130999cabbb0SMika Westerberg 		 */
131099cabbb0SMika Westerberg 		if (sw->boot)
131199cabbb0SMika Westerberg 			sw->authorized = 1;
131299cabbb0SMika Westerberg 
131399cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
131499cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
131599cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
131699cabbb0SMika Westerberg 	}
131799cabbb0SMika Westerberg 
131899cabbb0SMika Westerberg 	return 0;
131999cabbb0SMika Westerberg }
132099cabbb0SMika Westerberg 
13219d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
1322d6cc51cdSAndreas Noever {
13239d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1324bfe778acSMika Westerberg 	int ret;
1325d6cc51cdSAndreas Noever 
1326bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1327444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
1328444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
1329a25c8b2fSAndreas Noever 
1330e6b245ccSMika Westerberg 	/*
1331e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
1332e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
1333e6b245ccSMika Westerberg 	 * root switch.
1334e6b245ccSMika Westerberg 	 */
1335e6b245ccSMika Westerberg 	tb->root_switch->no_nvm_upgrade = true;
13366ac6faeeSMika Westerberg 	/* All USB4 routers support runtime PM */
13376ac6faeeSMika Westerberg 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1338e6b245ccSMika Westerberg 
1339bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
1340bfe778acSMika Westerberg 	if (ret) {
1341bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1342bfe778acSMika Westerberg 		return ret;
1343bfe778acSMika Westerberg 	}
1344bfe778acSMika Westerberg 
1345bfe778acSMika Westerberg 	/* Announce the switch to the world */
1346bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
1347bfe778acSMika Westerberg 	if (ret) {
1348bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1349bfe778acSMika Westerberg 		return ret;
1350bfe778acSMika Westerberg 	}
1351bfe778acSMika Westerberg 
1352cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
1353cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
13549da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
13559da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
13560414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
13570414bec5SMika Westerberg 	tb_discover_tunnels(tb->root_switch);
1358e6f81858SRajmohan Mani 	/*
1359e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
1360e6f81858SRajmohan Mani 	 * now for the whole topology.
1361e6f81858SRajmohan Mani 	 */
1362e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
13638afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
13648afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
136599cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
136699cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
136799cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
13689da672a4SAndreas Noever 
1369d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
13709d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
13719d3cce0bSMika Westerberg 	return 0;
1372d6cc51cdSAndreas Noever }
1373d6cc51cdSAndreas Noever 
13749d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
137523dd5bb4SAndreas Noever {
13769d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
13779d3cce0bSMika Westerberg 
1378daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
137981a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
13806ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, false);
13819d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1382daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
13839d3cce0bSMika Westerberg 
13849d3cce0bSMika Westerberg 	return 0;
138523dd5bb4SAndreas Noever }
138623dd5bb4SAndreas Noever 
138791c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
138891c0c120SMika Westerberg {
138991c0c120SMika Westerberg 	struct tb_port *port;
139091c0c120SMika Westerberg 
13916ac6faeeSMika Westerberg 	/* No need to restore if the router is already unplugged */
13926ac6faeeSMika Westerberg 	if (sw->is_unplugged)
13936ac6faeeSMika Westerberg 		return;
13946ac6faeeSMika Westerberg 
1395cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
1396cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1397cf29b9afSRajmohan Mani 
139891c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1399284652a4SMika Westerberg 		if (!tb_port_has_remote(port) && !port->xdomain)
140091c0c120SMika Westerberg 			continue;
140191c0c120SMika Westerberg 
1402284652a4SMika Westerberg 		if (port->remote) {
14032ca3263aSMika Westerberg 			tb_switch_lane_bonding_enable(port->remote->sw);
1404de462039SMika Westerberg 			tb_switch_configure_link(port->remote->sw);
140591c0c120SMika Westerberg 
140691c0c120SMika Westerberg 			tb_restore_children(port->remote->sw);
1407284652a4SMika Westerberg 		} else if (port->xdomain) {
1408284652a4SMika Westerberg 			tb_port_configure_xdomain(port);
1409284652a4SMika Westerberg 		}
141091c0c120SMika Westerberg 	}
141191c0c120SMika Westerberg }
141291c0c120SMika Westerberg 
14139d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
141423dd5bb4SAndreas Noever {
14159d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
141693f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
14179d3cce0bSMika Westerberg 
1418daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
141923dd5bb4SAndreas Noever 
142023dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
1421356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
142223dd5bb4SAndreas Noever 
142323dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
142423dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
142523dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
142691c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
14279d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
142893f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
14299d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
143023dd5bb4SAndreas Noever 		/*
143123dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
143223dd5bb4SAndreas Noever 		 * 100ms works for me...
143323dd5bb4SAndreas Noever 		 */
1434daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
143523dd5bb4SAndreas Noever 		msleep(100);
143623dd5bb4SAndreas Noever 	}
143723dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
14389d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
1439daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
14409d3cce0bSMika Westerberg 
14419d3cce0bSMika Westerberg 	return 0;
14429d3cce0bSMika Westerberg }
14439d3cce0bSMika Westerberg 
14447ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
14457ea4cd6bSMika Westerberg {
1446b433d010SMika Westerberg 	struct tb_port *port;
1447b433d010SMika Westerberg 	int ret = 0;
14487ea4cd6bSMika Westerberg 
1449b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
14507ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
14517ea4cd6bSMika Westerberg 			continue;
14527ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
1453dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
14547ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
1455284652a4SMika Westerberg 			tb_port_unconfigure_xdomain(port);
14567ea4cd6bSMika Westerberg 			port->xdomain = NULL;
14577ea4cd6bSMika Westerberg 			ret++;
14587ea4cd6bSMika Westerberg 		} else if (port->remote) {
14597ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
14607ea4cd6bSMika Westerberg 		}
14617ea4cd6bSMika Westerberg 	}
14627ea4cd6bSMika Westerberg 
14637ea4cd6bSMika Westerberg 	return ret;
14647ea4cd6bSMika Westerberg }
14657ea4cd6bSMika Westerberg 
1466884e4d57SMika Westerberg static int tb_freeze_noirq(struct tb *tb)
1467884e4d57SMika Westerberg {
1468884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1469884e4d57SMika Westerberg 
1470884e4d57SMika Westerberg 	tcm->hotplug_active = false;
1471884e4d57SMika Westerberg 	return 0;
1472884e4d57SMika Westerberg }
1473884e4d57SMika Westerberg 
1474884e4d57SMika Westerberg static int tb_thaw_noirq(struct tb *tb)
1475884e4d57SMika Westerberg {
1476884e4d57SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1477884e4d57SMika Westerberg 
1478884e4d57SMika Westerberg 	tcm->hotplug_active = true;
1479884e4d57SMika Westerberg 	return 0;
1480884e4d57SMika Westerberg }
1481884e4d57SMika Westerberg 
14827ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
14837ea4cd6bSMika Westerberg {
14847ea4cd6bSMika Westerberg 	/*
14857ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
14867ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
14877ea4cd6bSMika Westerberg 	 * need to run another rescan.
14887ea4cd6bSMika Westerberg 	 */
14897ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
14907ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
14917ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
14927ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
14937ea4cd6bSMika Westerberg }
14947ea4cd6bSMika Westerberg 
14956ac6faeeSMika Westerberg static int tb_runtime_suspend(struct tb *tb)
14966ac6faeeSMika Westerberg {
14976ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
14986ac6faeeSMika Westerberg 
14996ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
15006ac6faeeSMika Westerberg 	tb_switch_suspend(tb->root_switch, true);
15016ac6faeeSMika Westerberg 	tcm->hotplug_active = false;
15026ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
15036ac6faeeSMika Westerberg 
15046ac6faeeSMika Westerberg 	return 0;
15056ac6faeeSMika Westerberg }
15066ac6faeeSMika Westerberg 
15076ac6faeeSMika Westerberg static void tb_remove_work(struct work_struct *work)
15086ac6faeeSMika Westerberg {
15096ac6faeeSMika Westerberg 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
15106ac6faeeSMika Westerberg 	struct tb *tb = tcm_to_tb(tcm);
15116ac6faeeSMika Westerberg 
15126ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
15136ac6faeeSMika Westerberg 	if (tb->root_switch) {
15146ac6faeeSMika Westerberg 		tb_free_unplugged_children(tb->root_switch);
15156ac6faeeSMika Westerberg 		tb_free_unplugged_xdomains(tb->root_switch);
15166ac6faeeSMika Westerberg 	}
15176ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
15186ac6faeeSMika Westerberg }
15196ac6faeeSMika Westerberg 
15206ac6faeeSMika Westerberg static int tb_runtime_resume(struct tb *tb)
15216ac6faeeSMika Westerberg {
15226ac6faeeSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
15236ac6faeeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
15246ac6faeeSMika Westerberg 
15256ac6faeeSMika Westerberg 	mutex_lock(&tb->lock);
15266ac6faeeSMika Westerberg 	tb_switch_resume(tb->root_switch);
15276ac6faeeSMika Westerberg 	tb_free_invalid_tunnels(tb);
15286ac6faeeSMika Westerberg 	tb_restore_children(tb->root_switch);
15296ac6faeeSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
15306ac6faeeSMika Westerberg 		tb_tunnel_restart(tunnel);
15316ac6faeeSMika Westerberg 	tcm->hotplug_active = true;
15326ac6faeeSMika Westerberg 	mutex_unlock(&tb->lock);
15336ac6faeeSMika Westerberg 
15346ac6faeeSMika Westerberg 	/*
15356ac6faeeSMika Westerberg 	 * Schedule cleanup of any unplugged devices. Run this in a
15366ac6faeeSMika Westerberg 	 * separate thread to avoid possible deadlock if the device
15376ac6faeeSMika Westerberg 	 * removal runtime resumes the unplugged device.
15386ac6faeeSMika Westerberg 	 */
15396ac6faeeSMika Westerberg 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
15406ac6faeeSMika Westerberg 	return 0;
15416ac6faeeSMika Westerberg }
15426ac6faeeSMika Westerberg 
15439d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
15449d3cce0bSMika Westerberg 	.start = tb_start,
15459d3cce0bSMika Westerberg 	.stop = tb_stop,
15469d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
15479d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
1548884e4d57SMika Westerberg 	.freeze_noirq = tb_freeze_noirq,
1549884e4d57SMika Westerberg 	.thaw_noirq = tb_thaw_noirq,
15507ea4cd6bSMika Westerberg 	.complete = tb_complete,
15516ac6faeeSMika Westerberg 	.runtime_suspend = tb_runtime_suspend,
15526ac6faeeSMika Westerberg 	.runtime_resume = tb_runtime_resume,
155381a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
15543da88be2SMika Westerberg 	.disapprove_switch = tb_disconnect_pci,
155599cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
15567ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
15577ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
15589d3cce0bSMika Westerberg };
15599d3cce0bSMika Westerberg 
15609d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
15619d3cce0bSMika Westerberg {
15629d3cce0bSMika Westerberg 	struct tb_cm *tcm;
15639d3cce0bSMika Westerberg 	struct tb *tb;
15649d3cce0bSMika Westerberg 
15659d3cce0bSMika Westerberg 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
15669d3cce0bSMika Westerberg 	if (!tb)
15679d3cce0bSMika Westerberg 		return NULL;
15689d3cce0bSMika Westerberg 
1569c6da62a2SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
157099cabbb0SMika Westerberg 		tb->security_level = TB_SECURITY_USER;
1571c6da62a2SMika Westerberg 	else
1572c6da62a2SMika Westerberg 		tb->security_level = TB_SECURITY_NOPCIE;
1573c6da62a2SMika Westerberg 
15749d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
15759d3cce0bSMika Westerberg 
15769d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
15779d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
15788afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
15796ac6faeeSMika Westerberg 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
15809d3cce0bSMika Westerberg 
1581e0258805SMika Westerberg 	tb_dbg(tb, "using software connection manager\n");
1582e0258805SMika Westerberg 
15839d3cce0bSMika Westerberg 	return tb;
158423dd5bb4SAndreas Noever }
1585