xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 81a2e3e4)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
12d6cc51cdSAndreas Noever 
13d6cc51cdSAndreas Noever #include "tb.h"
147adf6097SAndreas Noever #include "tb_regs.h"
151752b9f7SMika Westerberg #include "tunnel.h"
16d6cc51cdSAndreas Noever 
179d3cce0bSMika Westerberg /**
189d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
199d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
208afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
219d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
229d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
239d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
249d3cce0bSMika Westerberg  *		    after cfg has been paused.
259d3cce0bSMika Westerberg  */
269d3cce0bSMika Westerberg struct tb_cm {
279d3cce0bSMika Westerberg 	struct list_head tunnel_list;
288afe909bSMika Westerberg 	struct list_head dp_resources;
299d3cce0bSMika Westerberg 	bool hotplug_active;
309d3cce0bSMika Westerberg };
319da672a4SAndreas Noever 
324f807e47SMika Westerberg struct tb_hotplug_event {
334f807e47SMika Westerberg 	struct work_struct work;
344f807e47SMika Westerberg 	struct tb *tb;
354f807e47SMika Westerberg 	u64 route;
364f807e47SMika Westerberg 	u8 port;
374f807e47SMika Westerberg 	bool unplug;
384f807e47SMika Westerberg };
394f807e47SMika Westerberg 
404f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
414f807e47SMika Westerberg 
424f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
434f807e47SMika Westerberg {
444f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
454f807e47SMika Westerberg 
464f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
474f807e47SMika Westerberg 	if (!ev)
484f807e47SMika Westerberg 		return;
494f807e47SMika Westerberg 
504f807e47SMika Westerberg 	ev->tb = tb;
514f807e47SMika Westerberg 	ev->route = route;
524f807e47SMika Westerberg 	ev->port = port;
534f807e47SMika Westerberg 	ev->unplug = unplug;
544f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
554f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
564f807e47SMika Westerberg }
574f807e47SMika Westerberg 
589da672a4SAndreas Noever /* enumeration & hot plug handling */
599da672a4SAndreas Noever 
608afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
618afe909bSMika Westerberg {
628afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
638afe909bSMika Westerberg 	struct tb_port *port;
648afe909bSMika Westerberg 
658afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
668afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
678afe909bSMika Westerberg 			continue;
688afe909bSMika Westerberg 
698afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
708afe909bSMika Westerberg 			continue;
718afe909bSMika Westerberg 
728afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
738afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
748afe909bSMika Westerberg 	}
758afe909bSMika Westerberg }
768afe909bSMika Westerberg 
778afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
788afe909bSMika Westerberg {
798afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
808afe909bSMika Westerberg 	struct tb_port *port, *tmp;
818afe909bSMika Westerberg 
828afe909bSMika Westerberg 	/* Clear children resources first */
838afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
848afe909bSMika Westerberg 		if (tb_port_has_remote(port))
858afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
868afe909bSMika Westerberg 	}
878afe909bSMika Westerberg 
888afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
898afe909bSMika Westerberg 		if (port->sw == sw) {
908afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
918afe909bSMika Westerberg 			list_del_init(&port->list);
928afe909bSMika Westerberg 		}
938afe909bSMika Westerberg 	}
948afe909bSMika Westerberg }
958afe909bSMika Westerberg 
960414bec5SMika Westerberg static void tb_discover_tunnels(struct tb_switch *sw)
970414bec5SMika Westerberg {
980414bec5SMika Westerberg 	struct tb *tb = sw->tb;
990414bec5SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1000414bec5SMika Westerberg 	struct tb_port *port;
1010414bec5SMika Westerberg 
102b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1030414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
1040414bec5SMika Westerberg 
1050414bec5SMika Westerberg 		switch (port->config.type) {
1064f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
1074f807e47SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port);
1084f807e47SMika Westerberg 			break;
1094f807e47SMika Westerberg 
1100414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
1110414bec5SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port);
1120414bec5SMika Westerberg 			break;
1130414bec5SMika Westerberg 
114e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
115e6f81858SRajmohan Mani 			tunnel = tb_tunnel_discover_usb3(tb, port);
116e6f81858SRajmohan Mani 			break;
117e6f81858SRajmohan Mani 
1180414bec5SMika Westerberg 		default:
1190414bec5SMika Westerberg 			break;
1200414bec5SMika Westerberg 		}
1210414bec5SMika Westerberg 
1224f807e47SMika Westerberg 		if (!tunnel)
1234f807e47SMika Westerberg 			continue;
1244f807e47SMika Westerberg 
1254f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
1260414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
1270414bec5SMika Westerberg 
1280414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
1290414bec5SMika Westerberg 				parent->boot = true;
1300414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
1310414bec5SMika Westerberg 			}
1324f807e47SMika Westerberg 		}
1330414bec5SMika Westerberg 
1340414bec5SMika Westerberg 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
1350414bec5SMika Westerberg 	}
1360414bec5SMika Westerberg 
137b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
138b433d010SMika Westerberg 		if (tb_port_has_remote(port))
139b433d010SMika Westerberg 			tb_discover_tunnels(port->remote->sw);
1400414bec5SMika Westerberg 	}
1410414bec5SMika Westerberg }
1429da672a4SAndreas Noever 
1437ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
1447ea4cd6bSMika Westerberg {
1457ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
1467ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
1477ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
1487ea4cd6bSMika Westerberg 	u64 route;
1497ea4cd6bSMika Westerberg 
1507ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
1517ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
1527ea4cd6bSMika Westerberg 	if (xd) {
1537ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
1547ea4cd6bSMika Westerberg 		return;
1557ea4cd6bSMika Westerberg 	}
1567ea4cd6bSMika Westerberg 
1577ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
1587ea4cd6bSMika Westerberg 			      NULL);
1597ea4cd6bSMika Westerberg 	if (xd) {
1607ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
1617ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
1627ea4cd6bSMika Westerberg 	}
1637ea4cd6bSMika Westerberg }
1647ea4cd6bSMika Westerberg 
165cf29b9afSRajmohan Mani static int tb_enable_tmu(struct tb_switch *sw)
166cf29b9afSRajmohan Mani {
167cf29b9afSRajmohan Mani 	int ret;
168cf29b9afSRajmohan Mani 
169cf29b9afSRajmohan Mani 	/* If it is already enabled in correct mode, don't touch it */
170cf29b9afSRajmohan Mani 	if (tb_switch_tmu_is_enabled(sw))
171cf29b9afSRajmohan Mani 		return 0;
172cf29b9afSRajmohan Mani 
173cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_disable(sw);
174cf29b9afSRajmohan Mani 	if (ret)
175cf29b9afSRajmohan Mani 		return ret;
176cf29b9afSRajmohan Mani 
177cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_post_time(sw);
178cf29b9afSRajmohan Mani 	if (ret)
179cf29b9afSRajmohan Mani 		return ret;
180cf29b9afSRajmohan Mani 
181cf29b9afSRajmohan Mani 	return tb_switch_tmu_enable(sw);
182cf29b9afSRajmohan Mani }
183cf29b9afSRajmohan Mani 
184e6f81858SRajmohan Mani /**
185e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
186e6f81858SRajmohan Mani  * @sw: Switch to find the port on
187e6f81858SRajmohan Mani  * @type: Port type to look for
188e6f81858SRajmohan Mani  */
189e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
190e6f81858SRajmohan Mani 					   enum tb_port_type type)
191e6f81858SRajmohan Mani {
192e6f81858SRajmohan Mani 	struct tb_port *port;
193e6f81858SRajmohan Mani 
194e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
195e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
196e6f81858SRajmohan Mani 			continue;
197e6f81858SRajmohan Mani 		if (port->config.type != type)
198e6f81858SRajmohan Mani 			continue;
199e6f81858SRajmohan Mani 		if (!port->cap_adap)
200e6f81858SRajmohan Mani 			continue;
201e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
202e6f81858SRajmohan Mani 			continue;
203e6f81858SRajmohan Mani 		return port;
204e6f81858SRajmohan Mani 	}
205e6f81858SRajmohan Mani 	return NULL;
206e6f81858SRajmohan Mani }
207e6f81858SRajmohan Mani 
208e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
209e6f81858SRajmohan Mani 					 const struct tb_port *port)
210e6f81858SRajmohan Mani {
211e6f81858SRajmohan Mani 	struct tb_port *down;
212e6f81858SRajmohan Mani 
213e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
21477cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
215e6f81858SRajmohan Mani 		return down;
21677cfa40fSMika Westerberg 	return NULL;
217e6f81858SRajmohan Mani }
218e6f81858SRajmohan Mani 
2190bd680cdSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
2200bd680cdSMika Westerberg 					struct tb_port *src_port,
2210bd680cdSMika Westerberg 					struct tb_port *dst_port)
2220bd680cdSMika Westerberg {
2230bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2240bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
2250bd680cdSMika Westerberg 
2260bd680cdSMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
2270bd680cdSMika Westerberg 		if (tunnel->type == type &&
2280bd680cdSMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
2290bd680cdSMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
2300bd680cdSMika Westerberg 			return tunnel;
2310bd680cdSMika Westerberg 		}
2320bd680cdSMika Westerberg 	}
2330bd680cdSMika Westerberg 
2340bd680cdSMika Westerberg 	return NULL;
2350bd680cdSMika Westerberg }
2360bd680cdSMika Westerberg 
2370bd680cdSMika Westerberg static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
2380bd680cdSMika Westerberg 						   struct tb_port *src_port,
2390bd680cdSMika Westerberg 						   struct tb_port *dst_port)
2400bd680cdSMika Westerberg {
2410bd680cdSMika Westerberg 	struct tb_port *port, *usb3_down;
2420bd680cdSMika Westerberg 	struct tb_switch *sw;
2430bd680cdSMika Westerberg 
2440bd680cdSMika Westerberg 	/* Pick the router that is deepest in the topology */
2450bd680cdSMika Westerberg 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
2460bd680cdSMika Westerberg 		sw = dst_port->sw;
2470bd680cdSMika Westerberg 	else
2480bd680cdSMika Westerberg 		sw = src_port->sw;
2490bd680cdSMika Westerberg 
2500bd680cdSMika Westerberg 	/* Can't be the host router */
2510bd680cdSMika Westerberg 	if (sw == tb->root_switch)
2520bd680cdSMika Westerberg 		return NULL;
2530bd680cdSMika Westerberg 
2540bd680cdSMika Westerberg 	/* Find the downstream USB4 port that leads to this router */
2550bd680cdSMika Westerberg 	port = tb_port_at(tb_route(sw), tb->root_switch);
2560bd680cdSMika Westerberg 	/* Find the corresponding host router USB3 downstream port */
2570bd680cdSMika Westerberg 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
2580bd680cdSMika Westerberg 	if (!usb3_down)
2590bd680cdSMika Westerberg 		return NULL;
2600bd680cdSMika Westerberg 
2610bd680cdSMika Westerberg 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
2620bd680cdSMika Westerberg }
2630bd680cdSMika Westerberg 
2640bd680cdSMika Westerberg static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
2650bd680cdSMika Westerberg 	struct tb_port *dst_port, int *available_up, int *available_down)
2660bd680cdSMika Westerberg {
2670bd680cdSMika Westerberg 	int usb3_consumed_up, usb3_consumed_down, ret;
2680bd680cdSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
2690bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
2700bd680cdSMika Westerberg 	struct tb_port *port;
2710bd680cdSMika Westerberg 
2720bd680cdSMika Westerberg 	tb_port_dbg(dst_port, "calculating available bandwidth\n");
2730bd680cdSMika Westerberg 
2740bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
2750bd680cdSMika Westerberg 	if (tunnel) {
2760bd680cdSMika Westerberg 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
2770bd680cdSMika Westerberg 						   &usb3_consumed_down);
2780bd680cdSMika Westerberg 		if (ret)
2790bd680cdSMika Westerberg 			return ret;
2800bd680cdSMika Westerberg 	} else {
2810bd680cdSMika Westerberg 		usb3_consumed_up = 0;
2820bd680cdSMika Westerberg 		usb3_consumed_down = 0;
2830bd680cdSMika Westerberg 	}
2840bd680cdSMika Westerberg 
2850bd680cdSMika Westerberg 	*available_up = *available_down = 40000;
2860bd680cdSMika Westerberg 
2870bd680cdSMika Westerberg 	/* Find the minimum available bandwidth over all links */
2880bd680cdSMika Westerberg 	tb_for_each_port_on_path(src_port, dst_port, port) {
2890bd680cdSMika Westerberg 		int link_speed, link_width, up_bw, down_bw;
2900bd680cdSMika Westerberg 
2910bd680cdSMika Westerberg 		if (!tb_port_is_null(port))
2920bd680cdSMika Westerberg 			continue;
2930bd680cdSMika Westerberg 
2940bd680cdSMika Westerberg 		if (tb_is_upstream_port(port)) {
2950bd680cdSMika Westerberg 			link_speed = port->sw->link_speed;
2960bd680cdSMika Westerberg 		} else {
2970bd680cdSMika Westerberg 			link_speed = tb_port_get_link_speed(port);
2980bd680cdSMika Westerberg 			if (link_speed < 0)
2990bd680cdSMika Westerberg 				return link_speed;
3000bd680cdSMika Westerberg 		}
3010bd680cdSMika Westerberg 
3020bd680cdSMika Westerberg 		link_width = port->bonded ? 2 : 1;
3030bd680cdSMika Westerberg 
3040bd680cdSMika Westerberg 		up_bw = link_speed * link_width * 1000; /* Mb/s */
3050bd680cdSMika Westerberg 		/* Leave 10% guard band */
3060bd680cdSMika Westerberg 		up_bw -= up_bw / 10;
3070bd680cdSMika Westerberg 		down_bw = up_bw;
3080bd680cdSMika Westerberg 
3090bd680cdSMika Westerberg 		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
3100bd680cdSMika Westerberg 
3110bd680cdSMika Westerberg 		/*
3120bd680cdSMika Westerberg 		 * Find all DP tunnels that cross the port and reduce
3130bd680cdSMika Westerberg 		 * their consumed bandwidth from the available.
3140bd680cdSMika Westerberg 		 */
3150bd680cdSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
3160bd680cdSMika Westerberg 			int dp_consumed_up, dp_consumed_down;
3170bd680cdSMika Westerberg 
3180bd680cdSMika Westerberg 			if (!tb_tunnel_is_dp(tunnel))
3190bd680cdSMika Westerberg 				continue;
3200bd680cdSMika Westerberg 
3210bd680cdSMika Westerberg 			if (!tb_tunnel_port_on_path(tunnel, port))
3220bd680cdSMika Westerberg 				continue;
3230bd680cdSMika Westerberg 
3240bd680cdSMika Westerberg 			ret = tb_tunnel_consumed_bandwidth(tunnel,
3250bd680cdSMika Westerberg 							   &dp_consumed_up,
3260bd680cdSMika Westerberg 							   &dp_consumed_down);
3270bd680cdSMika Westerberg 			if (ret)
3280bd680cdSMika Westerberg 				return ret;
3290bd680cdSMika Westerberg 
3300bd680cdSMika Westerberg 			up_bw -= dp_consumed_up;
3310bd680cdSMika Westerberg 			down_bw -= dp_consumed_down;
3320bd680cdSMika Westerberg 		}
3330bd680cdSMika Westerberg 
3340bd680cdSMika Westerberg 		/*
3350bd680cdSMika Westerberg 		 * If USB3 is tunneled from the host router down to the
3360bd680cdSMika Westerberg 		 * branch leading to port we need to take USB3 consumed
3370bd680cdSMika Westerberg 		 * bandwidth into account regardless whether it actually
3380bd680cdSMika Westerberg 		 * crosses the port.
3390bd680cdSMika Westerberg 		 */
3400bd680cdSMika Westerberg 		up_bw -= usb3_consumed_up;
3410bd680cdSMika Westerberg 		down_bw -= usb3_consumed_down;
3420bd680cdSMika Westerberg 
3430bd680cdSMika Westerberg 		if (up_bw < *available_up)
3440bd680cdSMika Westerberg 			*available_up = up_bw;
3450bd680cdSMika Westerberg 		if (down_bw < *available_down)
3460bd680cdSMika Westerberg 			*available_down = down_bw;
3470bd680cdSMika Westerberg 	}
3480bd680cdSMika Westerberg 
3490bd680cdSMika Westerberg 	if (*available_up < 0)
3500bd680cdSMika Westerberg 		*available_up = 0;
3510bd680cdSMika Westerberg 	if (*available_down < 0)
3520bd680cdSMika Westerberg 		*available_down = 0;
3530bd680cdSMika Westerberg 
3540bd680cdSMika Westerberg 	return 0;
3550bd680cdSMika Westerberg }
3560bd680cdSMika Westerberg 
3570bd680cdSMika Westerberg static int tb_release_unused_usb3_bandwidth(struct tb *tb,
3580bd680cdSMika Westerberg 					    struct tb_port *src_port,
3590bd680cdSMika Westerberg 					    struct tb_port *dst_port)
3600bd680cdSMika Westerberg {
3610bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
3620bd680cdSMika Westerberg 
3630bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
3640bd680cdSMika Westerberg 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
3650bd680cdSMika Westerberg }
3660bd680cdSMika Westerberg 
3670bd680cdSMika Westerberg static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
3680bd680cdSMika Westerberg 				      struct tb_port *dst_port)
3690bd680cdSMika Westerberg {
3700bd680cdSMika Westerberg 	int ret, available_up, available_down;
3710bd680cdSMika Westerberg 	struct tb_tunnel *tunnel;
3720bd680cdSMika Westerberg 
3730bd680cdSMika Westerberg 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
3740bd680cdSMika Westerberg 	if (!tunnel)
3750bd680cdSMika Westerberg 		return;
3760bd680cdSMika Westerberg 
3770bd680cdSMika Westerberg 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
3780bd680cdSMika Westerberg 
3790bd680cdSMika Westerberg 	/*
3800bd680cdSMika Westerberg 	 * Calculate available bandwidth for the first hop USB3 tunnel.
3810bd680cdSMika Westerberg 	 * That determines the whole USB3 bandwidth for this branch.
3820bd680cdSMika Westerberg 	 */
3830bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
3840bd680cdSMika Westerberg 				     &available_up, &available_down);
3850bd680cdSMika Westerberg 	if (ret) {
3860bd680cdSMika Westerberg 		tb_warn(tb, "failed to calculate available bandwidth\n");
3870bd680cdSMika Westerberg 		return;
3880bd680cdSMika Westerberg 	}
3890bd680cdSMika Westerberg 
3900bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
3910bd680cdSMika Westerberg 	       available_up, available_down);
3920bd680cdSMika Westerberg 
3930bd680cdSMika Westerberg 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
3940bd680cdSMika Westerberg }
3950bd680cdSMika Westerberg 
396e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
397e6f81858SRajmohan Mani {
398e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
3990bd680cdSMika Westerberg 	int ret, available_up, available_down;
400e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
401e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
402e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
403e6f81858SRajmohan Mani 
404e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
405e6f81858SRajmohan Mani 	if (!up)
406e6f81858SRajmohan Mani 		return 0;
407e6f81858SRajmohan Mani 
408bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
409bbcf40b3SMika Westerberg 		return 0;
410bbcf40b3SMika Westerberg 
411e6f81858SRajmohan Mani 	/*
412e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
413e6f81858SRajmohan Mani 	 * be found right above this switch.
414e6f81858SRajmohan Mani 	 */
415e6f81858SRajmohan Mani 	port = tb_port_at(tb_route(sw), parent);
416e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
417e6f81858SRajmohan Mani 	if (!down)
418e6f81858SRajmohan Mani 		return 0;
419e6f81858SRajmohan Mani 
420e6f81858SRajmohan Mani 	if (tb_route(parent)) {
421e6f81858SRajmohan Mani 		struct tb_port *parent_up;
422e6f81858SRajmohan Mani 		/*
423e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
424e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
425e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
426e6f81858SRajmohan Mani 		 */
427e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
428e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
429e6f81858SRajmohan Mani 			return 0;
4300bd680cdSMika Westerberg 
4310bd680cdSMika Westerberg 		/* Make all unused bandwidth available for the new tunnel */
4320bd680cdSMika Westerberg 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
4330bd680cdSMika Westerberg 		if (ret)
4340bd680cdSMika Westerberg 			return ret;
435e6f81858SRajmohan Mani 	}
436e6f81858SRajmohan Mani 
4370bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, down, up, &available_up,
4380bd680cdSMika Westerberg 				     &available_down);
4390bd680cdSMika Westerberg 	if (ret)
4400bd680cdSMika Westerberg 		goto err_reclaim;
4410bd680cdSMika Westerberg 
4420bd680cdSMika Westerberg 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
4430bd680cdSMika Westerberg 		    available_up, available_down);
4440bd680cdSMika Westerberg 
4450bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
4460bd680cdSMika Westerberg 				      available_down);
4470bd680cdSMika Westerberg 	if (!tunnel) {
4480bd680cdSMika Westerberg 		ret = -ENOMEM;
4490bd680cdSMika Westerberg 		goto err_reclaim;
4500bd680cdSMika Westerberg 	}
451e6f81858SRajmohan Mani 
452e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
453e6f81858SRajmohan Mani 		tb_port_info(up,
454e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
4550bd680cdSMika Westerberg 		ret = -EIO;
4560bd680cdSMika Westerberg 		goto err_free;
457e6f81858SRajmohan Mani 	}
458e6f81858SRajmohan Mani 
459e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
4600bd680cdSMika Westerberg 	if (tb_route(parent))
4610bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
4620bd680cdSMika Westerberg 
463e6f81858SRajmohan Mani 	return 0;
4640bd680cdSMika Westerberg 
4650bd680cdSMika Westerberg err_free:
4660bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
4670bd680cdSMika Westerberg err_reclaim:
4680bd680cdSMika Westerberg 	if (tb_route(parent))
4690bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, down, up);
4700bd680cdSMika Westerberg 
4710bd680cdSMika Westerberg 	return ret;
472e6f81858SRajmohan Mani }
473e6f81858SRajmohan Mani 
474e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
475e6f81858SRajmohan Mani {
476e6f81858SRajmohan Mani 	struct tb_port *port;
477e6f81858SRajmohan Mani 	int ret;
478e6f81858SRajmohan Mani 
479e6f81858SRajmohan Mani 	if (tb_route(sw)) {
480e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
481e6f81858SRajmohan Mani 		if (ret)
482e6f81858SRajmohan Mani 			return ret;
483e6f81858SRajmohan Mani 	}
484e6f81858SRajmohan Mani 
485e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
486e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
487e6f81858SRajmohan Mani 			continue;
488e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
489e6f81858SRajmohan Mani 		if (ret)
490e6f81858SRajmohan Mani 			return ret;
491e6f81858SRajmohan Mani 	}
492e6f81858SRajmohan Mani 
493e6f81858SRajmohan Mani 	return 0;
494e6f81858SRajmohan Mani }
495e6f81858SRajmohan Mani 
4969da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
4979da672a4SAndreas Noever 
4989da672a4SAndreas Noever /**
4999da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
5009da672a4SAndreas Noever  */
5019da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
5029da672a4SAndreas Noever {
503b433d010SMika Westerberg 	struct tb_port *port;
504b433d010SMika Westerberg 
505b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
506b433d010SMika Westerberg 		tb_scan_port(port);
5079da672a4SAndreas Noever }
5089da672a4SAndreas Noever 
5099da672a4SAndreas Noever /**
5109da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
5119da672a4SAndreas Noever  */
5129da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
5139da672a4SAndreas Noever {
51499cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
515dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
5169da672a4SAndreas Noever 	struct tb_switch *sw;
517dfe40ca4SMika Westerberg 
5189da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
5199da672a4SAndreas Noever 		return;
5204f807e47SMika Westerberg 
5214f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
5224f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
5234f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
5244f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
5254f807e47SMika Westerberg 				 false);
5264f807e47SMika Westerberg 		return;
5274f807e47SMika Westerberg 	}
5284f807e47SMika Westerberg 
5299da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
5309da672a4SAndreas Noever 		return;
531343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
532343fcb8cSAndreas Noever 		return; /*
533343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
534343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
535343fcb8cSAndreas Noever 			 */
5369da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
5379da672a4SAndreas Noever 		return;
5389da672a4SAndreas Noever 	if (port->remote) {
5397ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
5409da672a4SAndreas Noever 		return;
5419da672a4SAndreas Noever 	}
542dacb1287SKranthi Kuntala 
543dacb1287SKranthi Kuntala 	tb_retimer_scan(port);
544dacb1287SKranthi Kuntala 
545bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
546bfe778acSMika Westerberg 			     tb_downstream_route(port));
5477ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
5487ea4cd6bSMika Westerberg 		/*
5497ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
5507ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
5517ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
5527ea4cd6bSMika Westerberg 		 */
5537ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
5547ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
5559da672a4SAndreas Noever 		return;
5567ea4cd6bSMika Westerberg 	}
557bfe778acSMika Westerberg 
558bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
559bfe778acSMika Westerberg 		tb_switch_put(sw);
560bfe778acSMika Westerberg 		return;
561bfe778acSMika Westerberg 	}
562bfe778acSMika Westerberg 
56399cabbb0SMika Westerberg 	/*
5647ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
5657ea4cd6bSMika Westerberg 	 * first.
5667ea4cd6bSMika Westerberg 	 */
5677ea4cd6bSMika Westerberg 	if (port->xdomain) {
5687ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
5697ea4cd6bSMika Westerberg 		port->xdomain = NULL;
5707ea4cd6bSMika Westerberg 	}
5717ea4cd6bSMika Westerberg 
5727ea4cd6bSMika Westerberg 	/*
57399cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
57499cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
57599cabbb0SMika Westerberg 	 * the boot firmware.
57699cabbb0SMika Westerberg 	 */
57799cabbb0SMika Westerberg 	if (!tcm->hotplug_active)
57899cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
579f67cf491SMika Westerberg 
580bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
581bfe778acSMika Westerberg 		tb_switch_put(sw);
582bfe778acSMika Westerberg 		return;
583bfe778acSMika Westerberg 	}
584bfe778acSMika Westerberg 
585dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
586dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
587dfe40ca4SMika Westerberg 	port->remote = upstream_port;
588dfe40ca4SMika Westerberg 	upstream_port->remote = port;
589dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
590dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
591dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
592dfe40ca4SMika Westerberg 	}
593dfe40ca4SMika Westerberg 
59491c0c120SMika Westerberg 	/* Enable lane bonding if supported */
5952ca3263aSMika Westerberg 	tb_switch_lane_bonding_enable(sw);
59691c0c120SMika Westerberg 
597cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
598cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
599cf29b9afSRajmohan Mani 
600dacb1287SKranthi Kuntala 	/* Scan upstream retimers */
601dacb1287SKranthi Kuntala 	tb_retimer_scan(upstream_port);
602dacb1287SKranthi Kuntala 
603e6f81858SRajmohan Mani 	/*
604e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
605e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
606e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
607e6f81858SRajmohan Mani 	 * any new.
608e6f81858SRajmohan Mani 	 */
609e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
610e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
611e6f81858SRajmohan Mani 
612e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
6139da672a4SAndreas Noever 	tb_scan_switch(sw);
6149da672a4SAndreas Noever }
6159da672a4SAndreas Noever 
6168afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
6178afe909bSMika Westerberg {
6180bd680cdSMika Westerberg 	struct tb_port *src_port, *dst_port;
6190bd680cdSMika Westerberg 	struct tb *tb;
6200bd680cdSMika Westerberg 
6218afe909bSMika Westerberg 	if (!tunnel)
6228afe909bSMika Westerberg 		return;
6238afe909bSMika Westerberg 
6248afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
6258afe909bSMika Westerberg 	list_del(&tunnel->list);
6268afe909bSMika Westerberg 
6270bd680cdSMika Westerberg 	tb = tunnel->tb;
6280bd680cdSMika Westerberg 	src_port = tunnel->src_port;
6290bd680cdSMika Westerberg 	dst_port = tunnel->dst_port;
6308afe909bSMika Westerberg 
6310bd680cdSMika Westerberg 	switch (tunnel->type) {
6320bd680cdSMika Westerberg 	case TB_TUNNEL_DP:
6330bd680cdSMika Westerberg 		/*
6340bd680cdSMika Westerberg 		 * In case of DP tunnel make sure the DP IN resource is
6350bd680cdSMika Westerberg 		 * deallocated properly.
6360bd680cdSMika Westerberg 		 */
6370bd680cdSMika Westerberg 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
6380bd680cdSMika Westerberg 		fallthrough;
6390bd680cdSMika Westerberg 
6400bd680cdSMika Westerberg 	case TB_TUNNEL_USB3:
6410bd680cdSMika Westerberg 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
6420bd680cdSMika Westerberg 		break;
6430bd680cdSMika Westerberg 
6440bd680cdSMika Westerberg 	default:
6450bd680cdSMika Westerberg 		/*
6460bd680cdSMika Westerberg 		 * PCIe and DMA tunnels do not consume guaranteed
6470bd680cdSMika Westerberg 		 * bandwidth.
6480bd680cdSMika Westerberg 		 */
6490bd680cdSMika Westerberg 		break;
6508afe909bSMika Westerberg 	}
6518afe909bSMika Westerberg 
6528afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
6534f807e47SMika Westerberg }
6544f807e47SMika Westerberg 
6553364f0c1SAndreas Noever /**
6563364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
6573364f0c1SAndreas Noever  */
6583364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
6593364f0c1SAndreas Noever {
6609d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
66193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
66293f36adeSMika Westerberg 	struct tb_tunnel *n;
6639d3cce0bSMika Westerberg 
6649d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
6658afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
6668afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
6673364f0c1SAndreas Noever 	}
6683364f0c1SAndreas Noever }
6693364f0c1SAndreas Noever 
6703364f0c1SAndreas Noever /**
67123dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
67223dd5bb4SAndreas Noever  */
67323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
67423dd5bb4SAndreas Noever {
675b433d010SMika Westerberg 	struct tb_port *port;
676dfe40ca4SMika Westerberg 
677b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
678dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
67923dd5bb4SAndreas Noever 			continue;
680dfe40ca4SMika Westerberg 
68123dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
682dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
6838afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
68491c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
685bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
68623dd5bb4SAndreas Noever 			port->remote = NULL;
687dfe40ca4SMika Westerberg 			if (port->dual_link_port)
688dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
68923dd5bb4SAndreas Noever 		} else {
69023dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
69123dd5bb4SAndreas Noever 		}
69223dd5bb4SAndreas Noever 	}
69323dd5bb4SAndreas Noever }
69423dd5bb4SAndreas Noever 
69599cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
69699cabbb0SMika Westerberg 					 const struct tb_port *port)
6973364f0c1SAndreas Noever {
698b0407983SMika Westerberg 	struct tb_port *down = NULL;
699b0407983SMika Westerberg 
70099cabbb0SMika Westerberg 	/*
70199cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
702b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
70399cabbb0SMika Westerberg 	 */
704b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
705b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
706b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
70799cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
70899cabbb0SMika Westerberg 		int index;
70999cabbb0SMika Westerberg 
71099cabbb0SMika Westerberg 		/*
71199cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
71299cabbb0SMika Westerberg 		 * per controller.
71399cabbb0SMika Westerberg 		 */
7147bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
7157bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
71699cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
71717a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
71899cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
7197bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
7207bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
72199cabbb0SMika Westerberg 		else
72299cabbb0SMika Westerberg 			goto out;
72399cabbb0SMika Westerberg 
72499cabbb0SMika Westerberg 		/* Validate the hard-coding */
72599cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
72699cabbb0SMika Westerberg 			goto out;
727b0407983SMika Westerberg 
728b0407983SMika Westerberg 		down = &sw->ports[index];
729b0407983SMika Westerberg 	}
730b0407983SMika Westerberg 
731b0407983SMika Westerberg 	if (down) {
732b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
73399cabbb0SMika Westerberg 			goto out;
7349cac51a0SMika Westerberg 		if (tb_pci_port_is_enabled(down))
73599cabbb0SMika Westerberg 			goto out;
73699cabbb0SMika Westerberg 
737b0407983SMika Westerberg 		return down;
73899cabbb0SMika Westerberg 	}
73999cabbb0SMika Westerberg 
74099cabbb0SMika Westerberg out:
741e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
74299cabbb0SMika Westerberg }
74399cabbb0SMika Westerberg 
744e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
745e876f34aSMika Westerberg {
746e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
747e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
748e876f34aSMika Westerberg 
749e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
750e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
751e876f34aSMika Westerberg 
752e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
753e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
754e876f34aSMika Westerberg 			continue;
755e876f34aSMika Westerberg 
756e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
757e876f34aSMika Westerberg 			tb_port_dbg(port, "in use\n");
758e876f34aSMika Westerberg 			continue;
759e876f34aSMika Westerberg 		}
760e876f34aSMika Westerberg 
761e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
762e876f34aSMika Westerberg 
763e876f34aSMika Westerberg 		/*
764e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
765e876f34aSMika Westerberg 		 * the same host router downstream port.
766e876f34aSMika Westerberg 		 */
767e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
768e876f34aSMika Westerberg 			struct tb_port *p;
769e876f34aSMika Westerberg 
770e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
771e876f34aSMika Westerberg 			if (p != host_port)
772e876f34aSMika Westerberg 				continue;
773e876f34aSMika Westerberg 		}
774e876f34aSMika Westerberg 
775e876f34aSMika Westerberg 		return port;
776e876f34aSMika Westerberg 	}
777e876f34aSMika Westerberg 
778e876f34aSMika Westerberg 	return NULL;
779e876f34aSMika Westerberg }
780e876f34aSMika Westerberg 
7818afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
7824f807e47SMika Westerberg {
7830bd680cdSMika Westerberg 	int available_up, available_down, ret;
7844f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
7858afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
7864f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
7874f807e47SMika Westerberg 
7888afe909bSMika Westerberg 	/*
7898afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
7908afe909bSMika Westerberg 	 * establish a DP tunnel between them.
7918afe909bSMika Westerberg 	 */
7928afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
7934f807e47SMika Westerberg 
7948afe909bSMika Westerberg 	in = NULL;
7958afe909bSMika Westerberg 	out = NULL;
7968afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
797e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
798e876f34aSMika Westerberg 			continue;
799e876f34aSMika Westerberg 
8008afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
8018afe909bSMika Westerberg 			tb_port_dbg(port, "in use\n");
8028afe909bSMika Westerberg 			continue;
8038afe909bSMika Westerberg 		}
8048afe909bSMika Westerberg 
805e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
8068afe909bSMika Westerberg 
807e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
808e876f34aSMika Westerberg 		if (out) {
8098afe909bSMika Westerberg 			in = port;
810e876f34aSMika Westerberg 			break;
811e876f34aSMika Westerberg 		}
8128afe909bSMika Westerberg 	}
8138afe909bSMika Westerberg 
8148afe909bSMika Westerberg 	if (!in) {
8158afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
8168afe909bSMika Westerberg 		return;
8178afe909bSMika Westerberg 	}
8188afe909bSMika Westerberg 	if (!out) {
8198afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
8208afe909bSMika Westerberg 		return;
8218afe909bSMika Westerberg 	}
8228afe909bSMika Westerberg 
8238afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
8248afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
8258afe909bSMika Westerberg 		return;
8268afe909bSMika Westerberg 	}
8274f807e47SMika Westerberg 
8280bd680cdSMika Westerberg 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
8290bd680cdSMika Westerberg 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
8300bd680cdSMika Westerberg 	if (ret) {
8310bd680cdSMika Westerberg 		tb_warn(tb, "failed to release unused bandwidth\n");
8320bd680cdSMika Westerberg 		goto err_dealloc_dp;
833a11b88adSMika Westerberg 	}
834a11b88adSMika Westerberg 
8350bd680cdSMika Westerberg 	ret = tb_available_bandwidth(tb, in, out, &available_up,
8360bd680cdSMika Westerberg 				     &available_down);
8370bd680cdSMika Westerberg 	if (ret)
8380bd680cdSMika Westerberg 		goto err_reclaim;
839a11b88adSMika Westerberg 
8400bd680cdSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
8410bd680cdSMika Westerberg 	       available_up, available_down);
8420bd680cdSMika Westerberg 
8430bd680cdSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
8444f807e47SMika Westerberg 	if (!tunnel) {
8458afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
8460bd680cdSMika Westerberg 		goto err_reclaim;
8474f807e47SMika Westerberg 	}
8484f807e47SMika Westerberg 
8494f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
8504f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
8510bd680cdSMika Westerberg 		goto err_free;
8524f807e47SMika Westerberg 	}
8534f807e47SMika Westerberg 
8544f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
8550bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
8568afe909bSMika Westerberg 	return;
8578afe909bSMika Westerberg 
8580bd680cdSMika Westerberg err_free:
8590bd680cdSMika Westerberg 	tb_tunnel_free(tunnel);
8600bd680cdSMika Westerberg err_reclaim:
8610bd680cdSMika Westerberg 	tb_reclaim_usb3_bandwidth(tb, in, out);
8620bd680cdSMika Westerberg err_dealloc_dp:
8638afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
8644f807e47SMika Westerberg }
8654f807e47SMika Westerberg 
8668afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
8674f807e47SMika Westerberg {
8688afe909bSMika Westerberg 	struct tb_port *in, *out;
8698afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
8708afe909bSMika Westerberg 
8718afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
8728afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
8738afe909bSMika Westerberg 		in = port;
8748afe909bSMika Westerberg 		out = NULL;
8758afe909bSMika Westerberg 	} else {
8768afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
8778afe909bSMika Westerberg 		in = NULL;
8788afe909bSMika Westerberg 		out = port;
8798afe909bSMika Westerberg 	}
8808afe909bSMika Westerberg 
8818afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
8828afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
8838afe909bSMika Westerberg 	list_del_init(&port->list);
8848afe909bSMika Westerberg 
8858afe909bSMika Westerberg 	/*
8868afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
8878afe909bSMika Westerberg 	 * to create another tunnel.
8888afe909bSMika Westerberg 	 */
8898afe909bSMika Westerberg 	tb_tunnel_dp(tb);
8908afe909bSMika Westerberg }
8918afe909bSMika Westerberg 
8928afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
8938afe909bSMika Westerberg {
8948afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
8958afe909bSMika Westerberg 	struct tb_port *p;
8968afe909bSMika Westerberg 
8978afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
8988afe909bSMika Westerberg 		return;
8998afe909bSMika Westerberg 
9008afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
9018afe909bSMika Westerberg 		if (p == port)
9028afe909bSMika Westerberg 			return;
9038afe909bSMika Westerberg 	}
9048afe909bSMika Westerberg 
9058afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
9068afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
9078afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
9088afe909bSMika Westerberg 
9098afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
9108afe909bSMika Westerberg 	tb_tunnel_dp(tb);
9114f807e47SMika Westerberg }
9124f807e47SMika Westerberg 
91381a2e3e4SMika Westerberg static void tb_disconnect_and_release_dp(struct tb *tb)
91481a2e3e4SMika Westerberg {
91581a2e3e4SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
91681a2e3e4SMika Westerberg 	struct tb_tunnel *tunnel, *n;
91781a2e3e4SMika Westerberg 
91881a2e3e4SMika Westerberg 	/*
91981a2e3e4SMika Westerberg 	 * Tear down all DP tunnels and release their resources. They
92081a2e3e4SMika Westerberg 	 * will be re-established after resume based on plug events.
92181a2e3e4SMika Westerberg 	 */
92281a2e3e4SMika Westerberg 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
92381a2e3e4SMika Westerberg 		if (tb_tunnel_is_dp(tunnel))
92481a2e3e4SMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
92581a2e3e4SMika Westerberg 	}
92681a2e3e4SMika Westerberg 
92781a2e3e4SMika Westerberg 	while (!list_empty(&tcm->dp_resources)) {
92881a2e3e4SMika Westerberg 		struct tb_port *port;
92981a2e3e4SMika Westerberg 
93081a2e3e4SMika Westerberg 		port = list_first_entry(&tcm->dp_resources,
93181a2e3e4SMika Westerberg 					struct tb_port, list);
93281a2e3e4SMika Westerberg 		list_del_init(&port->list);
93381a2e3e4SMika Westerberg 	}
93481a2e3e4SMika Westerberg }
93581a2e3e4SMika Westerberg 
93699cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
93799cabbb0SMika Westerberg {
93899cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
9399d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
94099cabbb0SMika Westerberg 	struct tb_switch *parent_sw;
94199cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
9429d3cce0bSMika Westerberg 
943386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
94499cabbb0SMika Westerberg 	if (!up)
94599cabbb0SMika Westerberg 		return 0;
9463364f0c1SAndreas Noever 
94799cabbb0SMika Westerberg 	/*
94899cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
94999cabbb0SMika Westerberg 	 * be found right above this switch.
95099cabbb0SMika Westerberg 	 */
95199cabbb0SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
95299cabbb0SMika Westerberg 	port = tb_port_at(tb_route(sw), parent_sw);
95399cabbb0SMika Westerberg 	down = tb_find_pcie_down(parent_sw, port);
95499cabbb0SMika Westerberg 	if (!down)
95599cabbb0SMika Westerberg 		return 0;
9563364f0c1SAndreas Noever 
95799cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
95899cabbb0SMika Westerberg 	if (!tunnel)
95999cabbb0SMika Westerberg 		return -ENOMEM;
9603364f0c1SAndreas Noever 
96193f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
96299cabbb0SMika Westerberg 		tb_port_info(up,
9633364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
96493f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
96599cabbb0SMika Westerberg 		return -EIO;
9663364f0c1SAndreas Noever 	}
9673364f0c1SAndreas Noever 
96899cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
96999cabbb0SMika Westerberg 	return 0;
9703364f0c1SAndreas Noever }
9719da672a4SAndreas Noever 
9727ea4cd6bSMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
9737ea4cd6bSMika Westerberg {
9747ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
9757ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
9767ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
9777ea4cd6bSMika Westerberg 	struct tb_switch *sw;
9787ea4cd6bSMika Westerberg 
9797ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
9807ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
981386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
9827ea4cd6bSMika Westerberg 
9837ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
9847ea4cd6bSMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
9857ea4cd6bSMika Westerberg 				     xd->transmit_path, xd->receive_ring,
9867ea4cd6bSMika Westerberg 				     xd->receive_path);
9877ea4cd6bSMika Westerberg 	if (!tunnel) {
9887ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
9897ea4cd6bSMika Westerberg 		return -ENOMEM;
9907ea4cd6bSMika Westerberg 	}
9917ea4cd6bSMika Westerberg 
9927ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
9937ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
9947ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
9957ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
9967ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
9977ea4cd6bSMika Westerberg 		return -EIO;
9987ea4cd6bSMika Westerberg 	}
9997ea4cd6bSMika Westerberg 
10007ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
10017ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
10027ea4cd6bSMika Westerberg 	return 0;
10037ea4cd6bSMika Westerberg }
10047ea4cd6bSMika Westerberg 
10057ea4cd6bSMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
10067ea4cd6bSMika Westerberg {
10077ea4cd6bSMika Westerberg 	struct tb_port *dst_port;
10088afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
10097ea4cd6bSMika Westerberg 	struct tb_switch *sw;
10107ea4cd6bSMika Westerberg 
10117ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
10127ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
10137ea4cd6bSMika Westerberg 
10147ea4cd6bSMika Westerberg 	/*
10157ea4cd6bSMika Westerberg 	 * It is possible that the tunnel was already teared down (in
10167ea4cd6bSMika Westerberg 	 * case of cable disconnect) so it is fine if we cannot find it
10177ea4cd6bSMika Westerberg 	 * here anymore.
10187ea4cd6bSMika Westerberg 	 */
10198afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
10208afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
10217ea4cd6bSMika Westerberg }
10227ea4cd6bSMika Westerberg 
10237ea4cd6bSMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
10247ea4cd6bSMika Westerberg {
10257ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
10267ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
10277ea4cd6bSMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd);
10287ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
10297ea4cd6bSMika Westerberg 	}
10307ea4cd6bSMika Westerberg 	return 0;
10317ea4cd6bSMika Westerberg }
10327ea4cd6bSMika Westerberg 
1033d6cc51cdSAndreas Noever /* hotplug handling */
1034d6cc51cdSAndreas Noever 
1035d6cc51cdSAndreas Noever /**
1036d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
1037d6cc51cdSAndreas Noever  *
1038d6cc51cdSAndreas Noever  * Executes on tb->wq.
1039d6cc51cdSAndreas Noever  */
1040d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
1041d6cc51cdSAndreas Noever {
1042d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1043d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
10449d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1045053596d9SAndreas Noever 	struct tb_switch *sw;
1046053596d9SAndreas Noever 	struct tb_port *port;
1047d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
10489d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
1049d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
1050d6cc51cdSAndreas Noever 
10518f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
1052053596d9SAndreas Noever 	if (!sw) {
1053053596d9SAndreas Noever 		tb_warn(tb,
1054053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1055053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
1056053596d9SAndreas Noever 		goto out;
1057053596d9SAndreas Noever 	}
1058053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
1059053596d9SAndreas Noever 		tb_warn(tb,
1060053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1061053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
10628f965efdSMika Westerberg 		goto put_sw;
1063053596d9SAndreas Noever 	}
1064053596d9SAndreas Noever 	port = &sw->ports[ev->port];
1065053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
1066dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1067053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
10688f965efdSMika Westerberg 		goto put_sw;
1069053596d9SAndreas Noever 	}
1070053596d9SAndreas Noever 	if (ev->unplug) {
1071dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
1072dacb1287SKranthi Kuntala 
1073dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
10747ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
1075aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
10763364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
10778afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
1078cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
107991c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
1080bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
1081053596d9SAndreas Noever 			port->remote = NULL;
1082dfe40ca4SMika Westerberg 			if (port->dual_link_port)
1083dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
10848afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
10858afe909bSMika Westerberg 			tb_tunnel_dp(tb);
10867ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
10877ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
10887ea4cd6bSMika Westerberg 
10897ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
10907ea4cd6bSMika Westerberg 			/*
10917ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
10927ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
10937ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
10947ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
10957ea4cd6bSMika Westerberg 			 * the path below.
10967ea4cd6bSMika Westerberg 			 */
10977ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
10987ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
10997ea4cd6bSMika Westerberg 			port->xdomain = NULL;
11007ea4cd6bSMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd);
11017ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
11028afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
11038afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
1104053596d9SAndreas Noever 		} else {
110562efe699SMika Westerberg 			tb_port_dbg(port,
1106053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
1107053596d9SAndreas Noever 		}
1108053596d9SAndreas Noever 	} else if (port->remote) {
110962efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1110053596d9SAndreas Noever 	} else {
1111344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
111262efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
1113053596d9SAndreas Noever 			tb_scan_port(port);
111499cabbb0SMika Westerberg 			if (!port->remote)
111562efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
11168afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
11178afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
1118053596d9SAndreas Noever 		}
1119344e0643SMika Westerberg 	}
11208f965efdSMika Westerberg 
11218f965efdSMika Westerberg put_sw:
11228f965efdSMika Westerberg 	tb_switch_put(sw);
1123d6cc51cdSAndreas Noever out:
1124d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
1125d6cc51cdSAndreas Noever 	kfree(ev);
1126d6cc51cdSAndreas Noever }
1127d6cc51cdSAndreas Noever 
1128d6cc51cdSAndreas Noever /**
1129d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
1130d6cc51cdSAndreas Noever  *
1131d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
1132d6cc51cdSAndreas Noever  */
113381a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
113481a54b5eSMika Westerberg 			    const void *buf, size_t size)
1135d6cc51cdSAndreas Noever {
113681a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
113781a54b5eSMika Westerberg 	u64 route;
113881a54b5eSMika Westerberg 
113981a54b5eSMika Westerberg 	if (type != TB_CFG_PKG_EVENT) {
114081a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
114181a54b5eSMika Westerberg 		return;
114281a54b5eSMika Westerberg 	}
114381a54b5eSMika Westerberg 
114481a54b5eSMika Westerberg 	route = tb_cfg_get_route(&pkg->header);
114581a54b5eSMika Westerberg 
1146210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
114781a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
114881a54b5eSMika Westerberg 			pkg->port);
114981a54b5eSMika Westerberg 	}
115081a54b5eSMika Westerberg 
11514f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1152d6cc51cdSAndreas Noever }
1153d6cc51cdSAndreas Noever 
11549d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
1155d6cc51cdSAndreas Noever {
11569d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
115793f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
115893f36adeSMika Westerberg 	struct tb_tunnel *n;
11593364f0c1SAndreas Noever 
11603364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
11617ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
11627ea4cd6bSMika Westerberg 		/*
11637ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
11647ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
11657ea4cd6bSMika Westerberg 		 * intact.
11667ea4cd6bSMika Westerberg 		 */
11677ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
11687ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
116993f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
11707ea4cd6bSMika Westerberg 	}
1171bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
11729d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1173d6cc51cdSAndreas Noever }
1174d6cc51cdSAndreas Noever 
117599cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
117699cabbb0SMika Westerberg {
117799cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
117899cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
117999cabbb0SMika Westerberg 
118099cabbb0SMika Westerberg 		/*
118199cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
118299cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
118399cabbb0SMika Westerberg 		 * send uevent to userspace.
118499cabbb0SMika Westerberg 		 */
118599cabbb0SMika Westerberg 		if (sw->boot)
118699cabbb0SMika Westerberg 			sw->authorized = 1;
118799cabbb0SMika Westerberg 
118899cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
118999cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
119099cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
119199cabbb0SMika Westerberg 	}
119299cabbb0SMika Westerberg 
119399cabbb0SMika Westerberg 	return 0;
119499cabbb0SMika Westerberg }
119599cabbb0SMika Westerberg 
11969d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
1197d6cc51cdSAndreas Noever {
11989d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1199bfe778acSMika Westerberg 	int ret;
1200d6cc51cdSAndreas Noever 
1201bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1202444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
1203444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
1204a25c8b2fSAndreas Noever 
1205e6b245ccSMika Westerberg 	/*
1206e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
1207e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
1208e6b245ccSMika Westerberg 	 * root switch.
1209e6b245ccSMika Westerberg 	 */
1210e6b245ccSMika Westerberg 	tb->root_switch->no_nvm_upgrade = true;
1211e6b245ccSMika Westerberg 
1212bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
1213bfe778acSMika Westerberg 	if (ret) {
1214bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1215bfe778acSMika Westerberg 		return ret;
1216bfe778acSMika Westerberg 	}
1217bfe778acSMika Westerberg 
1218bfe778acSMika Westerberg 	/* Announce the switch to the world */
1219bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
1220bfe778acSMika Westerberg 	if (ret) {
1221bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1222bfe778acSMika Westerberg 		return ret;
1223bfe778acSMika Westerberg 	}
1224bfe778acSMika Westerberg 
1225cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
1226cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
12279da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
12289da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
12290414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
12300414bec5SMika Westerberg 	tb_discover_tunnels(tb->root_switch);
1231e6f81858SRajmohan Mani 	/*
1232e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
1233e6f81858SRajmohan Mani 	 * now for the whole topology.
1234e6f81858SRajmohan Mani 	 */
1235e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
12368afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
12378afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
123899cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
123999cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
124099cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
12419da672a4SAndreas Noever 
1242d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
12439d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
12449d3cce0bSMika Westerberg 	return 0;
1245d6cc51cdSAndreas Noever }
1246d6cc51cdSAndreas Noever 
12479d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
124823dd5bb4SAndreas Noever {
12499d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
12509d3cce0bSMika Westerberg 
1251daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
125281a2e3e4SMika Westerberg 	tb_disconnect_and_release_dp(tb);
125323dd5bb4SAndreas Noever 	tb_switch_suspend(tb->root_switch);
12549d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1255daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
12569d3cce0bSMika Westerberg 
12579d3cce0bSMika Westerberg 	return 0;
125823dd5bb4SAndreas Noever }
125923dd5bb4SAndreas Noever 
126091c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
126191c0c120SMika Westerberg {
126291c0c120SMika Westerberg 	struct tb_port *port;
126391c0c120SMika Westerberg 
1264cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
1265cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1266cf29b9afSRajmohan Mani 
126791c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
126891c0c120SMika Westerberg 		if (!tb_port_has_remote(port))
126991c0c120SMika Westerberg 			continue;
127091c0c120SMika Westerberg 
12712ca3263aSMika Westerberg 		tb_switch_lane_bonding_enable(port->remote->sw);
127291c0c120SMika Westerberg 
127391c0c120SMika Westerberg 		tb_restore_children(port->remote->sw);
127491c0c120SMika Westerberg 	}
127591c0c120SMika Westerberg }
127691c0c120SMika Westerberg 
12779d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
127823dd5bb4SAndreas Noever {
12799d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
128093f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
12819d3cce0bSMika Westerberg 
1282daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
128323dd5bb4SAndreas Noever 
128423dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
1285356b6c4eSMika Westerberg 	tb_switch_reset(tb->root_switch);
128623dd5bb4SAndreas Noever 
128723dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
128823dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
128923dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
129091c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
12919d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
129293f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
12939d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
129423dd5bb4SAndreas Noever 		/*
129523dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
129623dd5bb4SAndreas Noever 		 * 100ms works for me...
129723dd5bb4SAndreas Noever 		 */
1298daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
129923dd5bb4SAndreas Noever 		msleep(100);
130023dd5bb4SAndreas Noever 	}
130123dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
13029d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
1303daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
13049d3cce0bSMika Westerberg 
13059d3cce0bSMika Westerberg 	return 0;
13069d3cce0bSMika Westerberg }
13079d3cce0bSMika Westerberg 
13087ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
13097ea4cd6bSMika Westerberg {
1310b433d010SMika Westerberg 	struct tb_port *port;
1311b433d010SMika Westerberg 	int ret = 0;
13127ea4cd6bSMika Westerberg 
1313b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
13147ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
13157ea4cd6bSMika Westerberg 			continue;
13167ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
1317dacb1287SKranthi Kuntala 			tb_retimer_remove_all(port);
13187ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
13197ea4cd6bSMika Westerberg 			port->xdomain = NULL;
13207ea4cd6bSMika Westerberg 			ret++;
13217ea4cd6bSMika Westerberg 		} else if (port->remote) {
13227ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
13237ea4cd6bSMika Westerberg 		}
13247ea4cd6bSMika Westerberg 	}
13257ea4cd6bSMika Westerberg 
13267ea4cd6bSMika Westerberg 	return ret;
13277ea4cd6bSMika Westerberg }
13287ea4cd6bSMika Westerberg 
13297ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
13307ea4cd6bSMika Westerberg {
13317ea4cd6bSMika Westerberg 	/*
13327ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
13337ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
13347ea4cd6bSMika Westerberg 	 * need to run another rescan.
13357ea4cd6bSMika Westerberg 	 */
13367ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
13377ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
13387ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
13397ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
13407ea4cd6bSMika Westerberg }
13417ea4cd6bSMika Westerberg 
13429d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
13439d3cce0bSMika Westerberg 	.start = tb_start,
13449d3cce0bSMika Westerberg 	.stop = tb_stop,
13459d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
13469d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
13477ea4cd6bSMika Westerberg 	.complete = tb_complete,
134881a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
134999cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
13507ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
13517ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
13529d3cce0bSMika Westerberg };
13539d3cce0bSMika Westerberg 
13549d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
13559d3cce0bSMika Westerberg {
13569d3cce0bSMika Westerberg 	struct tb_cm *tcm;
13579d3cce0bSMika Westerberg 	struct tb *tb;
13589d3cce0bSMika Westerberg 
13599d3cce0bSMika Westerberg 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
13609d3cce0bSMika Westerberg 	if (!tb)
13619d3cce0bSMika Westerberg 		return NULL;
13629d3cce0bSMika Westerberg 
136399cabbb0SMika Westerberg 	tb->security_level = TB_SECURITY_USER;
13649d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
13659d3cce0bSMika Westerberg 
13669d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
13679d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
13688afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
13699d3cce0bSMika Westerberg 
13709d3cce0bSMika Westerberg 	return tb;
137123dd5bb4SAndreas Noever }
1372