xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 77cfa40f)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d6cc51cdSAndreas Noever /*
399cabbb0SMika Westerberg  * Thunderbolt driver - bus logic (NHI independent)
4d6cc51cdSAndreas Noever  *
5d6cc51cdSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
699cabbb0SMika Westerberg  * Copyright (C) 2019, Intel Corporation
7d6cc51cdSAndreas Noever  */
8d6cc51cdSAndreas Noever 
9d6cc51cdSAndreas Noever #include <linux/slab.h>
10d6cc51cdSAndreas Noever #include <linux/errno.h>
11d6cc51cdSAndreas Noever #include <linux/delay.h>
12d6cc51cdSAndreas Noever 
13d6cc51cdSAndreas Noever #include "tb.h"
147adf6097SAndreas Noever #include "tb_regs.h"
151752b9f7SMika Westerberg #include "tunnel.h"
16d6cc51cdSAndreas Noever 
179d3cce0bSMika Westerberg /**
189d3cce0bSMika Westerberg  * struct tb_cm - Simple Thunderbolt connection manager
199d3cce0bSMika Westerberg  * @tunnel_list: List of active tunnels
208afe909bSMika Westerberg  * @dp_resources: List of available DP resources for DP tunneling
219d3cce0bSMika Westerberg  * @hotplug_active: tb_handle_hotplug will stop progressing plug
229d3cce0bSMika Westerberg  *		    events and exit if this is not set (it needs to
239d3cce0bSMika Westerberg  *		    acquire the lock one more time). Used to drain wq
249d3cce0bSMika Westerberg  *		    after cfg has been paused.
259d3cce0bSMika Westerberg  */
269d3cce0bSMika Westerberg struct tb_cm {
279d3cce0bSMika Westerberg 	struct list_head tunnel_list;
288afe909bSMika Westerberg 	struct list_head dp_resources;
299d3cce0bSMika Westerberg 	bool hotplug_active;
309d3cce0bSMika Westerberg };
319da672a4SAndreas Noever 
324f807e47SMika Westerberg struct tb_hotplug_event {
334f807e47SMika Westerberg 	struct work_struct work;
344f807e47SMika Westerberg 	struct tb *tb;
354f807e47SMika Westerberg 	u64 route;
364f807e47SMika Westerberg 	u8 port;
374f807e47SMika Westerberg 	bool unplug;
384f807e47SMika Westerberg };
394f807e47SMika Westerberg 
404f807e47SMika Westerberg static void tb_handle_hotplug(struct work_struct *work);
414f807e47SMika Westerberg 
424f807e47SMika Westerberg static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
434f807e47SMika Westerberg {
444f807e47SMika Westerberg 	struct tb_hotplug_event *ev;
454f807e47SMika Westerberg 
464f807e47SMika Westerberg 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
474f807e47SMika Westerberg 	if (!ev)
484f807e47SMika Westerberg 		return;
494f807e47SMika Westerberg 
504f807e47SMika Westerberg 	ev->tb = tb;
514f807e47SMika Westerberg 	ev->route = route;
524f807e47SMika Westerberg 	ev->port = port;
534f807e47SMika Westerberg 	ev->unplug = unplug;
544f807e47SMika Westerberg 	INIT_WORK(&ev->work, tb_handle_hotplug);
554f807e47SMika Westerberg 	queue_work(tb->wq, &ev->work);
564f807e47SMika Westerberg }
574f807e47SMika Westerberg 
589da672a4SAndreas Noever /* enumeration & hot plug handling */
599da672a4SAndreas Noever 
608afe909bSMika Westerberg static void tb_add_dp_resources(struct tb_switch *sw)
618afe909bSMika Westerberg {
628afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
638afe909bSMika Westerberg 	struct tb_port *port;
648afe909bSMika Westerberg 
658afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
668afe909bSMika Westerberg 		if (!tb_port_is_dpin(port))
678afe909bSMika Westerberg 			continue;
688afe909bSMika Westerberg 
698afe909bSMika Westerberg 		if (!tb_switch_query_dp_resource(sw, port))
708afe909bSMika Westerberg 			continue;
718afe909bSMika Westerberg 
728afe909bSMika Westerberg 		list_add_tail(&port->list, &tcm->dp_resources);
738afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource available\n");
748afe909bSMika Westerberg 	}
758afe909bSMika Westerberg }
768afe909bSMika Westerberg 
778afe909bSMika Westerberg static void tb_remove_dp_resources(struct tb_switch *sw)
788afe909bSMika Westerberg {
798afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(sw->tb);
808afe909bSMika Westerberg 	struct tb_port *port, *tmp;
818afe909bSMika Westerberg 
828afe909bSMika Westerberg 	/* Clear children resources first */
838afe909bSMika Westerberg 	tb_switch_for_each_port(sw, port) {
848afe909bSMika Westerberg 		if (tb_port_has_remote(port))
858afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
868afe909bSMika Westerberg 	}
878afe909bSMika Westerberg 
888afe909bSMika Westerberg 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
898afe909bSMika Westerberg 		if (port->sw == sw) {
908afe909bSMika Westerberg 			tb_port_dbg(port, "DP OUT resource unavailable\n");
918afe909bSMika Westerberg 			list_del_init(&port->list);
928afe909bSMika Westerberg 		}
938afe909bSMika Westerberg 	}
948afe909bSMika Westerberg }
958afe909bSMika Westerberg 
960414bec5SMika Westerberg static void tb_discover_tunnels(struct tb_switch *sw)
970414bec5SMika Westerberg {
980414bec5SMika Westerberg 	struct tb *tb = sw->tb;
990414bec5SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
1000414bec5SMika Westerberg 	struct tb_port *port;
1010414bec5SMika Westerberg 
102b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
1030414bec5SMika Westerberg 		struct tb_tunnel *tunnel = NULL;
1040414bec5SMika Westerberg 
1050414bec5SMika Westerberg 		switch (port->config.type) {
1064f807e47SMika Westerberg 		case TB_TYPE_DP_HDMI_IN:
1074f807e47SMika Westerberg 			tunnel = tb_tunnel_discover_dp(tb, port);
1084f807e47SMika Westerberg 			break;
1094f807e47SMika Westerberg 
1100414bec5SMika Westerberg 		case TB_TYPE_PCIE_DOWN:
1110414bec5SMika Westerberg 			tunnel = tb_tunnel_discover_pci(tb, port);
1120414bec5SMika Westerberg 			break;
1130414bec5SMika Westerberg 
114e6f81858SRajmohan Mani 		case TB_TYPE_USB3_DOWN:
115e6f81858SRajmohan Mani 			tunnel = tb_tunnel_discover_usb3(tb, port);
116e6f81858SRajmohan Mani 			break;
117e6f81858SRajmohan Mani 
1180414bec5SMika Westerberg 		default:
1190414bec5SMika Westerberg 			break;
1200414bec5SMika Westerberg 		}
1210414bec5SMika Westerberg 
1224f807e47SMika Westerberg 		if (!tunnel)
1234f807e47SMika Westerberg 			continue;
1244f807e47SMika Westerberg 
1254f807e47SMika Westerberg 		if (tb_tunnel_is_pci(tunnel)) {
1260414bec5SMika Westerberg 			struct tb_switch *parent = tunnel->dst_port->sw;
1270414bec5SMika Westerberg 
1280414bec5SMika Westerberg 			while (parent != tunnel->src_port->sw) {
1290414bec5SMika Westerberg 				parent->boot = true;
1300414bec5SMika Westerberg 				parent = tb_switch_parent(parent);
1310414bec5SMika Westerberg 			}
1324f807e47SMika Westerberg 		}
1330414bec5SMika Westerberg 
1340414bec5SMika Westerberg 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
1350414bec5SMika Westerberg 	}
1360414bec5SMika Westerberg 
137b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
138b433d010SMika Westerberg 		if (tb_port_has_remote(port))
139b433d010SMika Westerberg 			tb_discover_tunnels(port->remote->sw);
1400414bec5SMika Westerberg 	}
1410414bec5SMika Westerberg }
1429da672a4SAndreas Noever 
1437ea4cd6bSMika Westerberg static void tb_scan_xdomain(struct tb_port *port)
1447ea4cd6bSMika Westerberg {
1457ea4cd6bSMika Westerberg 	struct tb_switch *sw = port->sw;
1467ea4cd6bSMika Westerberg 	struct tb *tb = sw->tb;
1477ea4cd6bSMika Westerberg 	struct tb_xdomain *xd;
1487ea4cd6bSMika Westerberg 	u64 route;
1497ea4cd6bSMika Westerberg 
1507ea4cd6bSMika Westerberg 	route = tb_downstream_route(port);
1517ea4cd6bSMika Westerberg 	xd = tb_xdomain_find_by_route(tb, route);
1527ea4cd6bSMika Westerberg 	if (xd) {
1537ea4cd6bSMika Westerberg 		tb_xdomain_put(xd);
1547ea4cd6bSMika Westerberg 		return;
1557ea4cd6bSMika Westerberg 	}
1567ea4cd6bSMika Westerberg 
1577ea4cd6bSMika Westerberg 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
1587ea4cd6bSMika Westerberg 			      NULL);
1597ea4cd6bSMika Westerberg 	if (xd) {
1607ea4cd6bSMika Westerberg 		tb_port_at(route, sw)->xdomain = xd;
1617ea4cd6bSMika Westerberg 		tb_xdomain_add(xd);
1627ea4cd6bSMika Westerberg 	}
1637ea4cd6bSMika Westerberg }
1647ea4cd6bSMika Westerberg 
165cf29b9afSRajmohan Mani static int tb_enable_tmu(struct tb_switch *sw)
166cf29b9afSRajmohan Mani {
167cf29b9afSRajmohan Mani 	int ret;
168cf29b9afSRajmohan Mani 
169cf29b9afSRajmohan Mani 	/* If it is already enabled in correct mode, don't touch it */
170cf29b9afSRajmohan Mani 	if (tb_switch_tmu_is_enabled(sw))
171cf29b9afSRajmohan Mani 		return 0;
172cf29b9afSRajmohan Mani 
173cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_disable(sw);
174cf29b9afSRajmohan Mani 	if (ret)
175cf29b9afSRajmohan Mani 		return ret;
176cf29b9afSRajmohan Mani 
177cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_post_time(sw);
178cf29b9afSRajmohan Mani 	if (ret)
179cf29b9afSRajmohan Mani 		return ret;
180cf29b9afSRajmohan Mani 
181cf29b9afSRajmohan Mani 	return tb_switch_tmu_enable(sw);
182cf29b9afSRajmohan Mani }
183cf29b9afSRajmohan Mani 
184e6f81858SRajmohan Mani /**
185e6f81858SRajmohan Mani  * tb_find_unused_port() - return the first inactive port on @sw
186e6f81858SRajmohan Mani  * @sw: Switch to find the port on
187e6f81858SRajmohan Mani  * @type: Port type to look for
188e6f81858SRajmohan Mani  */
189e6f81858SRajmohan Mani static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
190e6f81858SRajmohan Mani 					   enum tb_port_type type)
191e6f81858SRajmohan Mani {
192e6f81858SRajmohan Mani 	struct tb_port *port;
193e6f81858SRajmohan Mani 
194e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
195e6f81858SRajmohan Mani 		if (tb_is_upstream_port(port))
196e6f81858SRajmohan Mani 			continue;
197e6f81858SRajmohan Mani 		if (port->config.type != type)
198e6f81858SRajmohan Mani 			continue;
199e6f81858SRajmohan Mani 		if (!port->cap_adap)
200e6f81858SRajmohan Mani 			continue;
201e6f81858SRajmohan Mani 		if (tb_port_is_enabled(port))
202e6f81858SRajmohan Mani 			continue;
203e6f81858SRajmohan Mani 		return port;
204e6f81858SRajmohan Mani 	}
205e6f81858SRajmohan Mani 	return NULL;
206e6f81858SRajmohan Mani }
207e6f81858SRajmohan Mani 
208e6f81858SRajmohan Mani static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
209e6f81858SRajmohan Mani 					 const struct tb_port *port)
210e6f81858SRajmohan Mani {
211e6f81858SRajmohan Mani 	struct tb_port *down;
212e6f81858SRajmohan Mani 
213e6f81858SRajmohan Mani 	down = usb4_switch_map_usb3_down(sw, port);
21477cfa40fSMika Westerberg 	if (down && !tb_usb3_port_is_enabled(down))
215e6f81858SRajmohan Mani 		return down;
21677cfa40fSMika Westerberg 	return NULL;
217e6f81858SRajmohan Mani }
218e6f81858SRajmohan Mani 
219e6f81858SRajmohan Mani static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
220e6f81858SRajmohan Mani {
221e6f81858SRajmohan Mani 	struct tb_switch *parent = tb_switch_parent(sw);
222e6f81858SRajmohan Mani 	struct tb_port *up, *down, *port;
223e6f81858SRajmohan Mani 	struct tb_cm *tcm = tb_priv(tb);
224e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
225e6f81858SRajmohan Mani 
226e6f81858SRajmohan Mani 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
227e6f81858SRajmohan Mani 	if (!up)
228e6f81858SRajmohan Mani 		return 0;
229e6f81858SRajmohan Mani 
230bbcf40b3SMika Westerberg 	if (!sw->link_usb4)
231bbcf40b3SMika Westerberg 		return 0;
232bbcf40b3SMika Westerberg 
233e6f81858SRajmohan Mani 	/*
234e6f81858SRajmohan Mani 	 * Look up available down port. Since we are chaining it should
235e6f81858SRajmohan Mani 	 * be found right above this switch.
236e6f81858SRajmohan Mani 	 */
237e6f81858SRajmohan Mani 	port = tb_port_at(tb_route(sw), parent);
238e6f81858SRajmohan Mani 	down = tb_find_usb3_down(parent, port);
239e6f81858SRajmohan Mani 	if (!down)
240e6f81858SRajmohan Mani 		return 0;
241e6f81858SRajmohan Mani 
242e6f81858SRajmohan Mani 	if (tb_route(parent)) {
243e6f81858SRajmohan Mani 		struct tb_port *parent_up;
244e6f81858SRajmohan Mani 		/*
245e6f81858SRajmohan Mani 		 * Check first that the parent switch has its upstream USB3
246e6f81858SRajmohan Mani 		 * port enabled. Otherwise the chain is not complete and
247e6f81858SRajmohan Mani 		 * there is no point setting up a new tunnel.
248e6f81858SRajmohan Mani 		 */
249e6f81858SRajmohan Mani 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
250e6f81858SRajmohan Mani 		if (!parent_up || !tb_port_is_enabled(parent_up))
251e6f81858SRajmohan Mani 			return 0;
252e6f81858SRajmohan Mani 	}
253e6f81858SRajmohan Mani 
254e6f81858SRajmohan Mani 	tunnel = tb_tunnel_alloc_usb3(tb, up, down);
255e6f81858SRajmohan Mani 	if (!tunnel)
256e6f81858SRajmohan Mani 		return -ENOMEM;
257e6f81858SRajmohan Mani 
258e6f81858SRajmohan Mani 	if (tb_tunnel_activate(tunnel)) {
259e6f81858SRajmohan Mani 		tb_port_info(up,
260e6f81858SRajmohan Mani 			     "USB3 tunnel activation failed, aborting\n");
261e6f81858SRajmohan Mani 		tb_tunnel_free(tunnel);
262e6f81858SRajmohan Mani 		return -EIO;
263e6f81858SRajmohan Mani 	}
264e6f81858SRajmohan Mani 
265e6f81858SRajmohan Mani 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
266e6f81858SRajmohan Mani 	return 0;
267e6f81858SRajmohan Mani }
268e6f81858SRajmohan Mani 
269e6f81858SRajmohan Mani static int tb_create_usb3_tunnels(struct tb_switch *sw)
270e6f81858SRajmohan Mani {
271e6f81858SRajmohan Mani 	struct tb_port *port;
272e6f81858SRajmohan Mani 	int ret;
273e6f81858SRajmohan Mani 
274e6f81858SRajmohan Mani 	if (tb_route(sw)) {
275e6f81858SRajmohan Mani 		ret = tb_tunnel_usb3(sw->tb, sw);
276e6f81858SRajmohan Mani 		if (ret)
277e6f81858SRajmohan Mani 			return ret;
278e6f81858SRajmohan Mani 	}
279e6f81858SRajmohan Mani 
280e6f81858SRajmohan Mani 	tb_switch_for_each_port(sw, port) {
281e6f81858SRajmohan Mani 		if (!tb_port_has_remote(port))
282e6f81858SRajmohan Mani 			continue;
283e6f81858SRajmohan Mani 		ret = tb_create_usb3_tunnels(port->remote->sw);
284e6f81858SRajmohan Mani 		if (ret)
285e6f81858SRajmohan Mani 			return ret;
286e6f81858SRajmohan Mani 	}
287e6f81858SRajmohan Mani 
288e6f81858SRajmohan Mani 	return 0;
289e6f81858SRajmohan Mani }
290e6f81858SRajmohan Mani 
2919da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port);
2929da672a4SAndreas Noever 
2939da672a4SAndreas Noever /**
2949da672a4SAndreas Noever  * tb_scan_switch() - scan for and initialize downstream switches
2959da672a4SAndreas Noever  */
2969da672a4SAndreas Noever static void tb_scan_switch(struct tb_switch *sw)
2979da672a4SAndreas Noever {
298b433d010SMika Westerberg 	struct tb_port *port;
299b433d010SMika Westerberg 
300b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port)
301b433d010SMika Westerberg 		tb_scan_port(port);
3029da672a4SAndreas Noever }
3039da672a4SAndreas Noever 
3049da672a4SAndreas Noever /**
3059da672a4SAndreas Noever  * tb_scan_port() - check for and initialize switches below port
3069da672a4SAndreas Noever  */
3079da672a4SAndreas Noever static void tb_scan_port(struct tb_port *port)
3089da672a4SAndreas Noever {
30999cabbb0SMika Westerberg 	struct tb_cm *tcm = tb_priv(port->sw->tb);
310dfe40ca4SMika Westerberg 	struct tb_port *upstream_port;
3119da672a4SAndreas Noever 	struct tb_switch *sw;
312dfe40ca4SMika Westerberg 
3139da672a4SAndreas Noever 	if (tb_is_upstream_port(port))
3149da672a4SAndreas Noever 		return;
3154f807e47SMika Westerberg 
3164f807e47SMika Westerberg 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
3174f807e47SMika Westerberg 	    !tb_dp_port_is_enabled(port)) {
3184f807e47SMika Westerberg 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
3194f807e47SMika Westerberg 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
3204f807e47SMika Westerberg 				 false);
3214f807e47SMika Westerberg 		return;
3224f807e47SMika Westerberg 	}
3234f807e47SMika Westerberg 
3249da672a4SAndreas Noever 	if (port->config.type != TB_TYPE_PORT)
3259da672a4SAndreas Noever 		return;
326343fcb8cSAndreas Noever 	if (port->dual_link_port && port->link_nr)
327343fcb8cSAndreas Noever 		return; /*
328343fcb8cSAndreas Noever 			 * Downstream switch is reachable through two ports.
329343fcb8cSAndreas Noever 			 * Only scan on the primary port (link_nr == 0).
330343fcb8cSAndreas Noever 			 */
3319da672a4SAndreas Noever 	if (tb_wait_for_port(port, false) <= 0)
3329da672a4SAndreas Noever 		return;
3339da672a4SAndreas Noever 	if (port->remote) {
3347ea4cd6bSMika Westerberg 		tb_port_dbg(port, "port already has a remote\n");
3359da672a4SAndreas Noever 		return;
3369da672a4SAndreas Noever 	}
337bfe778acSMika Westerberg 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
338bfe778acSMika Westerberg 			     tb_downstream_route(port));
3397ea4cd6bSMika Westerberg 	if (IS_ERR(sw)) {
3407ea4cd6bSMika Westerberg 		/*
3417ea4cd6bSMika Westerberg 		 * If there is an error accessing the connected switch
3427ea4cd6bSMika Westerberg 		 * it may be connected to another domain. Also we allow
3437ea4cd6bSMika Westerberg 		 * the other domain to be connected to a max depth switch.
3447ea4cd6bSMika Westerberg 		 */
3457ea4cd6bSMika Westerberg 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
3467ea4cd6bSMika Westerberg 			tb_scan_xdomain(port);
3479da672a4SAndreas Noever 		return;
3487ea4cd6bSMika Westerberg 	}
349bfe778acSMika Westerberg 
350bfe778acSMika Westerberg 	if (tb_switch_configure(sw)) {
351bfe778acSMika Westerberg 		tb_switch_put(sw);
352bfe778acSMika Westerberg 		return;
353bfe778acSMika Westerberg 	}
354bfe778acSMika Westerberg 
35599cabbb0SMika Westerberg 	/*
3567ea4cd6bSMika Westerberg 	 * If there was previously another domain connected remove it
3577ea4cd6bSMika Westerberg 	 * first.
3587ea4cd6bSMika Westerberg 	 */
3597ea4cd6bSMika Westerberg 	if (port->xdomain) {
3607ea4cd6bSMika Westerberg 		tb_xdomain_remove(port->xdomain);
3617ea4cd6bSMika Westerberg 		port->xdomain = NULL;
3627ea4cd6bSMika Westerberg 	}
3637ea4cd6bSMika Westerberg 
3647ea4cd6bSMika Westerberg 	/*
36599cabbb0SMika Westerberg 	 * Do not send uevents until we have discovered all existing
36699cabbb0SMika Westerberg 	 * tunnels and know which switches were authorized already by
36799cabbb0SMika Westerberg 	 * the boot firmware.
36899cabbb0SMika Westerberg 	 */
36999cabbb0SMika Westerberg 	if (!tcm->hotplug_active)
37099cabbb0SMika Westerberg 		dev_set_uevent_suppress(&sw->dev, true);
371f67cf491SMika Westerberg 
372bfe778acSMika Westerberg 	if (tb_switch_add(sw)) {
373bfe778acSMika Westerberg 		tb_switch_put(sw);
374bfe778acSMika Westerberg 		return;
375bfe778acSMika Westerberg 	}
376bfe778acSMika Westerberg 
377dfe40ca4SMika Westerberg 	/* Link the switches using both links if available */
378dfe40ca4SMika Westerberg 	upstream_port = tb_upstream_port(sw);
379dfe40ca4SMika Westerberg 	port->remote = upstream_port;
380dfe40ca4SMika Westerberg 	upstream_port->remote = port;
381dfe40ca4SMika Westerberg 	if (port->dual_link_port && upstream_port->dual_link_port) {
382dfe40ca4SMika Westerberg 		port->dual_link_port->remote = upstream_port->dual_link_port;
383dfe40ca4SMika Westerberg 		upstream_port->dual_link_port->remote = port->dual_link_port;
384dfe40ca4SMika Westerberg 	}
385dfe40ca4SMika Westerberg 
38691c0c120SMika Westerberg 	/* Enable lane bonding if supported */
38791c0c120SMika Westerberg 	if (tb_switch_lane_bonding_enable(sw))
38891c0c120SMika Westerberg 		tb_sw_warn(sw, "failed to enable lane bonding\n");
38991c0c120SMika Westerberg 
390cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
391cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to enable TMU\n");
392cf29b9afSRajmohan Mani 
393e6f81858SRajmohan Mani 	/*
394e6f81858SRajmohan Mani 	 * Create USB 3.x tunnels only when the switch is plugged to the
395e6f81858SRajmohan Mani 	 * domain. This is because we scan the domain also during discovery
396e6f81858SRajmohan Mani 	 * and want to discover existing USB 3.x tunnels before we create
397e6f81858SRajmohan Mani 	 * any new.
398e6f81858SRajmohan Mani 	 */
399e6f81858SRajmohan Mani 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
400e6f81858SRajmohan Mani 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
401e6f81858SRajmohan Mani 
402e876f34aSMika Westerberg 	tb_add_dp_resources(sw);
4039da672a4SAndreas Noever 	tb_scan_switch(sw);
4049da672a4SAndreas Noever }
4059da672a4SAndreas Noever 
4068afe909bSMika Westerberg static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
4078afe909bSMika Westerberg 					struct tb_port *src_port,
4088afe909bSMika Westerberg 					struct tb_port *dst_port)
4094f807e47SMika Westerberg {
4104f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
4114f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
4124f807e47SMika Westerberg 
4134f807e47SMika Westerberg 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4144f807e47SMika Westerberg 		if (tunnel->type == type &&
4154f807e47SMika Westerberg 		    ((src_port && src_port == tunnel->src_port) ||
4164f807e47SMika Westerberg 		     (dst_port && dst_port == tunnel->dst_port))) {
4178afe909bSMika Westerberg 			return tunnel;
4184f807e47SMika Westerberg 		}
4194f807e47SMika Westerberg 	}
4204f807e47SMika Westerberg 
4218afe909bSMika Westerberg 	return NULL;
4228afe909bSMika Westerberg }
4238afe909bSMika Westerberg 
4248afe909bSMika Westerberg static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
4258afe909bSMika Westerberg {
4268afe909bSMika Westerberg 	if (!tunnel)
4278afe909bSMika Westerberg 		return;
4288afe909bSMika Westerberg 
4298afe909bSMika Westerberg 	tb_tunnel_deactivate(tunnel);
4308afe909bSMika Westerberg 	list_del(&tunnel->list);
4318afe909bSMika Westerberg 
4328afe909bSMika Westerberg 	/*
4338afe909bSMika Westerberg 	 * In case of DP tunnel make sure the DP IN resource is deallocated
4348afe909bSMika Westerberg 	 * properly.
4358afe909bSMika Westerberg 	 */
4368afe909bSMika Westerberg 	if (tb_tunnel_is_dp(tunnel)) {
4378afe909bSMika Westerberg 		struct tb_port *in = tunnel->src_port;
4388afe909bSMika Westerberg 
4398afe909bSMika Westerberg 		tb_switch_dealloc_dp_resource(in->sw, in);
4408afe909bSMika Westerberg 	}
4418afe909bSMika Westerberg 
4428afe909bSMika Westerberg 	tb_tunnel_free(tunnel);
4434f807e47SMika Westerberg }
4444f807e47SMika Westerberg 
4453364f0c1SAndreas Noever /**
4463364f0c1SAndreas Noever  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
4473364f0c1SAndreas Noever  */
4483364f0c1SAndreas Noever static void tb_free_invalid_tunnels(struct tb *tb)
4493364f0c1SAndreas Noever {
4509d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
45193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
45293f36adeSMika Westerberg 	struct tb_tunnel *n;
4539d3cce0bSMika Westerberg 
4549d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
4558afe909bSMika Westerberg 		if (tb_tunnel_is_invalid(tunnel))
4568afe909bSMika Westerberg 			tb_deactivate_and_free_tunnel(tunnel);
4573364f0c1SAndreas Noever 	}
4583364f0c1SAndreas Noever }
4593364f0c1SAndreas Noever 
4603364f0c1SAndreas Noever /**
46123dd5bb4SAndreas Noever  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
46223dd5bb4SAndreas Noever  */
46323dd5bb4SAndreas Noever static void tb_free_unplugged_children(struct tb_switch *sw)
46423dd5bb4SAndreas Noever {
465b433d010SMika Westerberg 	struct tb_port *port;
466dfe40ca4SMika Westerberg 
467b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
468dfe40ca4SMika Westerberg 		if (!tb_port_has_remote(port))
46923dd5bb4SAndreas Noever 			continue;
470dfe40ca4SMika Westerberg 
47123dd5bb4SAndreas Noever 		if (port->remote->sw->is_unplugged) {
4728afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
47391c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
474bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
47523dd5bb4SAndreas Noever 			port->remote = NULL;
476dfe40ca4SMika Westerberg 			if (port->dual_link_port)
477dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
47823dd5bb4SAndreas Noever 		} else {
47923dd5bb4SAndreas Noever 			tb_free_unplugged_children(port->remote->sw);
48023dd5bb4SAndreas Noever 		}
48123dd5bb4SAndreas Noever 	}
48223dd5bb4SAndreas Noever }
48323dd5bb4SAndreas Noever 
48499cabbb0SMika Westerberg static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
48599cabbb0SMika Westerberg 					 const struct tb_port *port)
4863364f0c1SAndreas Noever {
487b0407983SMika Westerberg 	struct tb_port *down = NULL;
488b0407983SMika Westerberg 
48999cabbb0SMika Westerberg 	/*
49099cabbb0SMika Westerberg 	 * To keep plugging devices consistently in the same PCIe
491b0407983SMika Westerberg 	 * hierarchy, do mapping here for switch downstream PCIe ports.
49299cabbb0SMika Westerberg 	 */
493b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
494b0407983SMika Westerberg 		down = usb4_switch_map_pcie_down(sw, port);
495b0407983SMika Westerberg 	} else if (!tb_route(sw)) {
49699cabbb0SMika Westerberg 		int phy_port = tb_phy_port_from_link(port->port);
49799cabbb0SMika Westerberg 		int index;
49899cabbb0SMika Westerberg 
49999cabbb0SMika Westerberg 		/*
50099cabbb0SMika Westerberg 		 * Hard-coded Thunderbolt port to PCIe down port mapping
50199cabbb0SMika Westerberg 		 * per controller.
50299cabbb0SMika Westerberg 		 */
5037bffd97eSMika Westerberg 		if (tb_switch_is_cactus_ridge(sw) ||
5047bffd97eSMika Westerberg 		    tb_switch_is_alpine_ridge(sw))
50599cabbb0SMika Westerberg 			index = !phy_port ? 6 : 7;
50617a8f815SMika Westerberg 		else if (tb_switch_is_falcon_ridge(sw))
50799cabbb0SMika Westerberg 			index = !phy_port ? 6 : 8;
5087bffd97eSMika Westerberg 		else if (tb_switch_is_titan_ridge(sw))
5097bffd97eSMika Westerberg 			index = !phy_port ? 8 : 9;
51099cabbb0SMika Westerberg 		else
51199cabbb0SMika Westerberg 			goto out;
51299cabbb0SMika Westerberg 
51399cabbb0SMika Westerberg 		/* Validate the hard-coding */
51499cabbb0SMika Westerberg 		if (WARN_ON(index > sw->config.max_port_number))
51599cabbb0SMika Westerberg 			goto out;
516b0407983SMika Westerberg 
517b0407983SMika Westerberg 		down = &sw->ports[index];
518b0407983SMika Westerberg 	}
519b0407983SMika Westerberg 
520b0407983SMika Westerberg 	if (down) {
521b0407983SMika Westerberg 		if (WARN_ON(!tb_port_is_pcie_down(down)))
52299cabbb0SMika Westerberg 			goto out;
523b0407983SMika Westerberg 		if (WARN_ON(tb_pci_port_is_enabled(down)))
52499cabbb0SMika Westerberg 			goto out;
52599cabbb0SMika Westerberg 
526b0407983SMika Westerberg 		return down;
52799cabbb0SMika Westerberg 	}
52899cabbb0SMika Westerberg 
52999cabbb0SMika Westerberg out:
530e78db6f0SMika Westerberg 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
53199cabbb0SMika Westerberg }
53299cabbb0SMika Westerberg 
533a11b88adSMika Westerberg static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
534a11b88adSMika Westerberg 			   struct tb_port *out)
535a11b88adSMika Westerberg {
536a11b88adSMika Westerberg 	struct tb_switch *sw = out->sw;
537a11b88adSMika Westerberg 	struct tb_tunnel *tunnel;
538a11b88adSMika Westerberg 	int bw, available_bw = 40000;
539a11b88adSMika Westerberg 
540a11b88adSMika Westerberg 	while (sw && sw != in->sw) {
541a11b88adSMika Westerberg 		bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
542a11b88adSMika Westerberg 		/* Leave 10% guard band */
543a11b88adSMika Westerberg 		bw -= bw / 10;
544a11b88adSMika Westerberg 
545a11b88adSMika Westerberg 		/*
546a11b88adSMika Westerberg 		 * Check for any active DP tunnels that go through this
547a11b88adSMika Westerberg 		 * switch and reduce their consumed bandwidth from
548a11b88adSMika Westerberg 		 * available.
549a11b88adSMika Westerberg 		 */
550a11b88adSMika Westerberg 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
551a11b88adSMika Westerberg 			int consumed_bw;
552a11b88adSMika Westerberg 
553a11b88adSMika Westerberg 			if (!tb_tunnel_switch_on_path(tunnel, sw))
554a11b88adSMika Westerberg 				continue;
555a11b88adSMika Westerberg 
556a11b88adSMika Westerberg 			consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
557a11b88adSMika Westerberg 			if (consumed_bw < 0)
558a11b88adSMika Westerberg 				return consumed_bw;
559a11b88adSMika Westerberg 
560a11b88adSMika Westerberg 			bw -= consumed_bw;
561a11b88adSMika Westerberg 		}
562a11b88adSMika Westerberg 
563a11b88adSMika Westerberg 		if (bw < available_bw)
564a11b88adSMika Westerberg 			available_bw = bw;
565a11b88adSMika Westerberg 
566a11b88adSMika Westerberg 		sw = tb_switch_parent(sw);
567a11b88adSMika Westerberg 	}
568a11b88adSMika Westerberg 
569a11b88adSMika Westerberg 	return available_bw;
570a11b88adSMika Westerberg }
571a11b88adSMika Westerberg 
572e876f34aSMika Westerberg static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
573e876f34aSMika Westerberg {
574e876f34aSMika Westerberg 	struct tb_port *host_port, *port;
575e876f34aSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
576e876f34aSMika Westerberg 
577e876f34aSMika Westerberg 	host_port = tb_route(in->sw) ?
578e876f34aSMika Westerberg 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
579e876f34aSMika Westerberg 
580e876f34aSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
581e876f34aSMika Westerberg 		if (!tb_port_is_dpout(port))
582e876f34aSMika Westerberg 			continue;
583e876f34aSMika Westerberg 
584e876f34aSMika Westerberg 		if (tb_port_is_enabled(port)) {
585e876f34aSMika Westerberg 			tb_port_dbg(port, "in use\n");
586e876f34aSMika Westerberg 			continue;
587e876f34aSMika Westerberg 		}
588e876f34aSMika Westerberg 
589e876f34aSMika Westerberg 		tb_port_dbg(port, "DP OUT available\n");
590e876f34aSMika Westerberg 
591e876f34aSMika Westerberg 		/*
592e876f34aSMika Westerberg 		 * Keep the DP tunnel under the topology starting from
593e876f34aSMika Westerberg 		 * the same host router downstream port.
594e876f34aSMika Westerberg 		 */
595e876f34aSMika Westerberg 		if (host_port && tb_route(port->sw)) {
596e876f34aSMika Westerberg 			struct tb_port *p;
597e876f34aSMika Westerberg 
598e876f34aSMika Westerberg 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
599e876f34aSMika Westerberg 			if (p != host_port)
600e876f34aSMika Westerberg 				continue;
601e876f34aSMika Westerberg 		}
602e876f34aSMika Westerberg 
603e876f34aSMika Westerberg 		return port;
604e876f34aSMika Westerberg 	}
605e876f34aSMika Westerberg 
606e876f34aSMika Westerberg 	return NULL;
607e876f34aSMika Westerberg }
608e876f34aSMika Westerberg 
6098afe909bSMika Westerberg static void tb_tunnel_dp(struct tb *tb)
6104f807e47SMika Westerberg {
6114f807e47SMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
6128afe909bSMika Westerberg 	struct tb_port *port, *in, *out;
6134f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
614a11b88adSMika Westerberg 	int available_bw;
6154f807e47SMika Westerberg 
6168afe909bSMika Westerberg 	/*
6178afe909bSMika Westerberg 	 * Find pair of inactive DP IN and DP OUT adapters and then
6188afe909bSMika Westerberg 	 * establish a DP tunnel between them.
6198afe909bSMika Westerberg 	 */
6208afe909bSMika Westerberg 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
6214f807e47SMika Westerberg 
6228afe909bSMika Westerberg 	in = NULL;
6238afe909bSMika Westerberg 	out = NULL;
6248afe909bSMika Westerberg 	list_for_each_entry(port, &tcm->dp_resources, list) {
625e876f34aSMika Westerberg 		if (!tb_port_is_dpin(port))
626e876f34aSMika Westerberg 			continue;
627e876f34aSMika Westerberg 
6288afe909bSMika Westerberg 		if (tb_port_is_enabled(port)) {
6298afe909bSMika Westerberg 			tb_port_dbg(port, "in use\n");
6308afe909bSMika Westerberg 			continue;
6318afe909bSMika Westerberg 		}
6328afe909bSMika Westerberg 
633e876f34aSMika Westerberg 		tb_port_dbg(port, "DP IN available\n");
6348afe909bSMika Westerberg 
635e876f34aSMika Westerberg 		out = tb_find_dp_out(tb, port);
636e876f34aSMika Westerberg 		if (out) {
6378afe909bSMika Westerberg 			in = port;
638e876f34aSMika Westerberg 			break;
639e876f34aSMika Westerberg 		}
6408afe909bSMika Westerberg 	}
6418afe909bSMika Westerberg 
6428afe909bSMika Westerberg 	if (!in) {
6438afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
6448afe909bSMika Westerberg 		return;
6458afe909bSMika Westerberg 	}
6468afe909bSMika Westerberg 	if (!out) {
6478afe909bSMika Westerberg 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
6488afe909bSMika Westerberg 		return;
6498afe909bSMika Westerberg 	}
6508afe909bSMika Westerberg 
6518afe909bSMika Westerberg 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
6528afe909bSMika Westerberg 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
6538afe909bSMika Westerberg 		return;
6548afe909bSMika Westerberg 	}
6554f807e47SMika Westerberg 
656a11b88adSMika Westerberg 	/* Calculate available bandwidth between in and out */
657a11b88adSMika Westerberg 	available_bw = tb_available_bw(tcm, in, out);
658a11b88adSMika Westerberg 	if (available_bw < 0) {
659a11b88adSMika Westerberg 		tb_warn(tb, "failed to determine available bandwidth\n");
660a11b88adSMika Westerberg 		return;
661a11b88adSMika Westerberg 	}
662a11b88adSMika Westerberg 
663a11b88adSMika Westerberg 	tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
664a11b88adSMika Westerberg 	       available_bw);
665a11b88adSMika Westerberg 
666a11b88adSMika Westerberg 	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
6674f807e47SMika Westerberg 	if (!tunnel) {
6688afe909bSMika Westerberg 		tb_port_dbg(out, "could not allocate DP tunnel\n");
6698afe909bSMika Westerberg 		goto dealloc_dp;
6704f807e47SMika Westerberg 	}
6714f807e47SMika Westerberg 
6724f807e47SMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
6734f807e47SMika Westerberg 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
6744f807e47SMika Westerberg 		tb_tunnel_free(tunnel);
6758afe909bSMika Westerberg 		goto dealloc_dp;
6764f807e47SMika Westerberg 	}
6774f807e47SMika Westerberg 
6784f807e47SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
6798afe909bSMika Westerberg 	return;
6808afe909bSMika Westerberg 
6818afe909bSMika Westerberg dealloc_dp:
6828afe909bSMika Westerberg 	tb_switch_dealloc_dp_resource(in->sw, in);
6834f807e47SMika Westerberg }
6844f807e47SMika Westerberg 
6858afe909bSMika Westerberg static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
6864f807e47SMika Westerberg {
6878afe909bSMika Westerberg 	struct tb_port *in, *out;
6888afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
6898afe909bSMika Westerberg 
6908afe909bSMika Westerberg 	if (tb_port_is_dpin(port)) {
6918afe909bSMika Westerberg 		tb_port_dbg(port, "DP IN resource unavailable\n");
6928afe909bSMika Westerberg 		in = port;
6938afe909bSMika Westerberg 		out = NULL;
6948afe909bSMika Westerberg 	} else {
6958afe909bSMika Westerberg 		tb_port_dbg(port, "DP OUT resource unavailable\n");
6968afe909bSMika Westerberg 		in = NULL;
6978afe909bSMika Westerberg 		out = port;
6988afe909bSMika Westerberg 	}
6998afe909bSMika Westerberg 
7008afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
7018afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
7028afe909bSMika Westerberg 	list_del_init(&port->list);
7038afe909bSMika Westerberg 
7048afe909bSMika Westerberg 	/*
7058afe909bSMika Westerberg 	 * See if there is another DP OUT port that can be used for
7068afe909bSMika Westerberg 	 * to create another tunnel.
7078afe909bSMika Westerberg 	 */
7088afe909bSMika Westerberg 	tb_tunnel_dp(tb);
7098afe909bSMika Westerberg }
7108afe909bSMika Westerberg 
7118afe909bSMika Westerberg static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
7128afe909bSMika Westerberg {
7138afe909bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
7148afe909bSMika Westerberg 	struct tb_port *p;
7158afe909bSMika Westerberg 
7168afe909bSMika Westerberg 	if (tb_port_is_enabled(port))
7178afe909bSMika Westerberg 		return;
7188afe909bSMika Westerberg 
7198afe909bSMika Westerberg 	list_for_each_entry(p, &tcm->dp_resources, list) {
7208afe909bSMika Westerberg 		if (p == port)
7218afe909bSMika Westerberg 			return;
7228afe909bSMika Westerberg 	}
7238afe909bSMika Westerberg 
7248afe909bSMika Westerberg 	tb_port_dbg(port, "DP %s resource available\n",
7258afe909bSMika Westerberg 		    tb_port_is_dpin(port) ? "IN" : "OUT");
7268afe909bSMika Westerberg 	list_add_tail(&port->list, &tcm->dp_resources);
7278afe909bSMika Westerberg 
7288afe909bSMika Westerberg 	/* Look for suitable DP IN <-> DP OUT pairs now */
7298afe909bSMika Westerberg 	tb_tunnel_dp(tb);
7304f807e47SMika Westerberg }
7314f807e47SMika Westerberg 
73299cabbb0SMika Westerberg static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
73399cabbb0SMika Westerberg {
73499cabbb0SMika Westerberg 	struct tb_port *up, *down, *port;
7359d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
73699cabbb0SMika Westerberg 	struct tb_switch *parent_sw;
73799cabbb0SMika Westerberg 	struct tb_tunnel *tunnel;
7389d3cce0bSMika Westerberg 
739386e5e29SMika Westerberg 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
74099cabbb0SMika Westerberg 	if (!up)
74199cabbb0SMika Westerberg 		return 0;
7423364f0c1SAndreas Noever 
74399cabbb0SMika Westerberg 	/*
74499cabbb0SMika Westerberg 	 * Look up available down port. Since we are chaining it should
74599cabbb0SMika Westerberg 	 * be found right above this switch.
74699cabbb0SMika Westerberg 	 */
74799cabbb0SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
74899cabbb0SMika Westerberg 	port = tb_port_at(tb_route(sw), parent_sw);
74999cabbb0SMika Westerberg 	down = tb_find_pcie_down(parent_sw, port);
75099cabbb0SMika Westerberg 	if (!down)
75199cabbb0SMika Westerberg 		return 0;
7523364f0c1SAndreas Noever 
75399cabbb0SMika Westerberg 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
75499cabbb0SMika Westerberg 	if (!tunnel)
75599cabbb0SMika Westerberg 		return -ENOMEM;
7563364f0c1SAndreas Noever 
75793f36adeSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
75899cabbb0SMika Westerberg 		tb_port_info(up,
7593364f0c1SAndreas Noever 			     "PCIe tunnel activation failed, aborting\n");
76093f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
76199cabbb0SMika Westerberg 		return -EIO;
7623364f0c1SAndreas Noever 	}
7633364f0c1SAndreas Noever 
76499cabbb0SMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
76599cabbb0SMika Westerberg 	return 0;
7663364f0c1SAndreas Noever }
7679da672a4SAndreas Noever 
7687ea4cd6bSMika Westerberg static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
7697ea4cd6bSMika Westerberg {
7707ea4cd6bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
7717ea4cd6bSMika Westerberg 	struct tb_port *nhi_port, *dst_port;
7727ea4cd6bSMika Westerberg 	struct tb_tunnel *tunnel;
7737ea4cd6bSMika Westerberg 	struct tb_switch *sw;
7747ea4cd6bSMika Westerberg 
7757ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
7767ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
777386e5e29SMika Westerberg 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
7787ea4cd6bSMika Westerberg 
7797ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
7807ea4cd6bSMika Westerberg 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
7817ea4cd6bSMika Westerberg 				     xd->transmit_path, xd->receive_ring,
7827ea4cd6bSMika Westerberg 				     xd->receive_path);
7837ea4cd6bSMika Westerberg 	if (!tunnel) {
7847ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
7857ea4cd6bSMika Westerberg 		return -ENOMEM;
7867ea4cd6bSMika Westerberg 	}
7877ea4cd6bSMika Westerberg 
7887ea4cd6bSMika Westerberg 	if (tb_tunnel_activate(tunnel)) {
7897ea4cd6bSMika Westerberg 		tb_port_info(nhi_port,
7907ea4cd6bSMika Westerberg 			     "DMA tunnel activation failed, aborting\n");
7917ea4cd6bSMika Westerberg 		tb_tunnel_free(tunnel);
7927ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
7937ea4cd6bSMika Westerberg 		return -EIO;
7947ea4cd6bSMika Westerberg 	}
7957ea4cd6bSMika Westerberg 
7967ea4cd6bSMika Westerberg 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
7977ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
7987ea4cd6bSMika Westerberg 	return 0;
7997ea4cd6bSMika Westerberg }
8007ea4cd6bSMika Westerberg 
8017ea4cd6bSMika Westerberg static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
8027ea4cd6bSMika Westerberg {
8037ea4cd6bSMika Westerberg 	struct tb_port *dst_port;
8048afe909bSMika Westerberg 	struct tb_tunnel *tunnel;
8057ea4cd6bSMika Westerberg 	struct tb_switch *sw;
8067ea4cd6bSMika Westerberg 
8077ea4cd6bSMika Westerberg 	sw = tb_to_switch(xd->dev.parent);
8087ea4cd6bSMika Westerberg 	dst_port = tb_port_at(xd->route, sw);
8097ea4cd6bSMika Westerberg 
8107ea4cd6bSMika Westerberg 	/*
8117ea4cd6bSMika Westerberg 	 * It is possible that the tunnel was already teared down (in
8127ea4cd6bSMika Westerberg 	 * case of cable disconnect) so it is fine if we cannot find it
8137ea4cd6bSMika Westerberg 	 * here anymore.
8147ea4cd6bSMika Westerberg 	 */
8158afe909bSMika Westerberg 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
8168afe909bSMika Westerberg 	tb_deactivate_and_free_tunnel(tunnel);
8177ea4cd6bSMika Westerberg }
8187ea4cd6bSMika Westerberg 
8197ea4cd6bSMika Westerberg static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
8207ea4cd6bSMika Westerberg {
8217ea4cd6bSMika Westerberg 	if (!xd->is_unplugged) {
8227ea4cd6bSMika Westerberg 		mutex_lock(&tb->lock);
8237ea4cd6bSMika Westerberg 		__tb_disconnect_xdomain_paths(tb, xd);
8247ea4cd6bSMika Westerberg 		mutex_unlock(&tb->lock);
8257ea4cd6bSMika Westerberg 	}
8267ea4cd6bSMika Westerberg 	return 0;
8277ea4cd6bSMika Westerberg }
8287ea4cd6bSMika Westerberg 
829d6cc51cdSAndreas Noever /* hotplug handling */
830d6cc51cdSAndreas Noever 
831d6cc51cdSAndreas Noever /**
832d6cc51cdSAndreas Noever  * tb_handle_hotplug() - handle hotplug event
833d6cc51cdSAndreas Noever  *
834d6cc51cdSAndreas Noever  * Executes on tb->wq.
835d6cc51cdSAndreas Noever  */
836d6cc51cdSAndreas Noever static void tb_handle_hotplug(struct work_struct *work)
837d6cc51cdSAndreas Noever {
838d6cc51cdSAndreas Noever 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
839d6cc51cdSAndreas Noever 	struct tb *tb = ev->tb;
8409d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
841053596d9SAndreas Noever 	struct tb_switch *sw;
842053596d9SAndreas Noever 	struct tb_port *port;
843d6cc51cdSAndreas Noever 	mutex_lock(&tb->lock);
8449d3cce0bSMika Westerberg 	if (!tcm->hotplug_active)
845d6cc51cdSAndreas Noever 		goto out; /* during init, suspend or shutdown */
846d6cc51cdSAndreas Noever 
8478f965efdSMika Westerberg 	sw = tb_switch_find_by_route(tb, ev->route);
848053596d9SAndreas Noever 	if (!sw) {
849053596d9SAndreas Noever 		tb_warn(tb,
850053596d9SAndreas Noever 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
851053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
852053596d9SAndreas Noever 		goto out;
853053596d9SAndreas Noever 	}
854053596d9SAndreas Noever 	if (ev->port > sw->config.max_port_number) {
855053596d9SAndreas Noever 		tb_warn(tb,
856053596d9SAndreas Noever 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
857053596d9SAndreas Noever 			ev->route, ev->port, ev->unplug);
8588f965efdSMika Westerberg 		goto put_sw;
859053596d9SAndreas Noever 	}
860053596d9SAndreas Noever 	port = &sw->ports[ev->port];
861053596d9SAndreas Noever 	if (tb_is_upstream_port(port)) {
862dfe40ca4SMika Westerberg 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
863053596d9SAndreas Noever 		       ev->route, ev->port, ev->unplug);
8648f965efdSMika Westerberg 		goto put_sw;
865053596d9SAndreas Noever 	}
866053596d9SAndreas Noever 	if (ev->unplug) {
867dfe40ca4SMika Westerberg 		if (tb_port_has_remote(port)) {
8687ea4cd6bSMika Westerberg 			tb_port_dbg(port, "switch unplugged\n");
869aae20bb6SLukas Wunner 			tb_sw_set_unplugged(port->remote->sw);
8703364f0c1SAndreas Noever 			tb_free_invalid_tunnels(tb);
8718afe909bSMika Westerberg 			tb_remove_dp_resources(port->remote->sw);
872cf29b9afSRajmohan Mani 			tb_switch_tmu_disable(port->remote->sw);
87391c0c120SMika Westerberg 			tb_switch_lane_bonding_disable(port->remote->sw);
874bfe778acSMika Westerberg 			tb_switch_remove(port->remote->sw);
875053596d9SAndreas Noever 			port->remote = NULL;
876dfe40ca4SMika Westerberg 			if (port->dual_link_port)
877dfe40ca4SMika Westerberg 				port->dual_link_port->remote = NULL;
8788afe909bSMika Westerberg 			/* Maybe we can create another DP tunnel */
8798afe909bSMika Westerberg 			tb_tunnel_dp(tb);
8807ea4cd6bSMika Westerberg 		} else if (port->xdomain) {
8817ea4cd6bSMika Westerberg 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
8827ea4cd6bSMika Westerberg 
8837ea4cd6bSMika Westerberg 			tb_port_dbg(port, "xdomain unplugged\n");
8847ea4cd6bSMika Westerberg 			/*
8857ea4cd6bSMika Westerberg 			 * Service drivers are unbound during
8867ea4cd6bSMika Westerberg 			 * tb_xdomain_remove() so setting XDomain as
8877ea4cd6bSMika Westerberg 			 * unplugged here prevents deadlock if they call
8887ea4cd6bSMika Westerberg 			 * tb_xdomain_disable_paths(). We will tear down
8897ea4cd6bSMika Westerberg 			 * the path below.
8907ea4cd6bSMika Westerberg 			 */
8917ea4cd6bSMika Westerberg 			xd->is_unplugged = true;
8927ea4cd6bSMika Westerberg 			tb_xdomain_remove(xd);
8937ea4cd6bSMika Westerberg 			port->xdomain = NULL;
8947ea4cd6bSMika Westerberg 			__tb_disconnect_xdomain_paths(tb, xd);
8957ea4cd6bSMika Westerberg 			tb_xdomain_put(xd);
8968afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
8978afe909bSMika Westerberg 			tb_dp_resource_unavailable(tb, port);
898053596d9SAndreas Noever 		} else {
89962efe699SMika Westerberg 			tb_port_dbg(port,
900053596d9SAndreas Noever 				   "got unplug event for disconnected port, ignoring\n");
901053596d9SAndreas Noever 		}
902053596d9SAndreas Noever 	} else if (port->remote) {
90362efe699SMika Westerberg 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
904053596d9SAndreas Noever 	} else {
905344e0643SMika Westerberg 		if (tb_port_is_null(port)) {
90662efe699SMika Westerberg 			tb_port_dbg(port, "hotplug: scanning\n");
907053596d9SAndreas Noever 			tb_scan_port(port);
90899cabbb0SMika Westerberg 			if (!port->remote)
90962efe699SMika Westerberg 				tb_port_dbg(port, "hotplug: no switch found\n");
9108afe909bSMika Westerberg 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
9118afe909bSMika Westerberg 			tb_dp_resource_available(tb, port);
912053596d9SAndreas Noever 		}
913344e0643SMika Westerberg 	}
9148f965efdSMika Westerberg 
9158f965efdSMika Westerberg put_sw:
9168f965efdSMika Westerberg 	tb_switch_put(sw);
917d6cc51cdSAndreas Noever out:
918d6cc51cdSAndreas Noever 	mutex_unlock(&tb->lock);
919d6cc51cdSAndreas Noever 	kfree(ev);
920d6cc51cdSAndreas Noever }
921d6cc51cdSAndreas Noever 
922d6cc51cdSAndreas Noever /**
923d6cc51cdSAndreas Noever  * tb_schedule_hotplug_handler() - callback function for the control channel
924d6cc51cdSAndreas Noever  *
925d6cc51cdSAndreas Noever  * Delegates to tb_handle_hotplug.
926d6cc51cdSAndreas Noever  */
92781a54b5eSMika Westerberg static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
92881a54b5eSMika Westerberg 			    const void *buf, size_t size)
929d6cc51cdSAndreas Noever {
93081a54b5eSMika Westerberg 	const struct cfg_event_pkg *pkg = buf;
93181a54b5eSMika Westerberg 	u64 route;
93281a54b5eSMika Westerberg 
93381a54b5eSMika Westerberg 	if (type != TB_CFG_PKG_EVENT) {
93481a54b5eSMika Westerberg 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
93581a54b5eSMika Westerberg 		return;
93681a54b5eSMika Westerberg 	}
93781a54b5eSMika Westerberg 
93881a54b5eSMika Westerberg 	route = tb_cfg_get_route(&pkg->header);
93981a54b5eSMika Westerberg 
940210e9f56SMika Westerberg 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
94181a54b5eSMika Westerberg 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
94281a54b5eSMika Westerberg 			pkg->port);
94381a54b5eSMika Westerberg 	}
94481a54b5eSMika Westerberg 
9454f807e47SMika Westerberg 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
946d6cc51cdSAndreas Noever }
947d6cc51cdSAndreas Noever 
9489d3cce0bSMika Westerberg static void tb_stop(struct tb *tb)
949d6cc51cdSAndreas Noever {
9509d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
95193f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
95293f36adeSMika Westerberg 	struct tb_tunnel *n;
9533364f0c1SAndreas Noever 
9543364f0c1SAndreas Noever 	/* tunnels are only present after everything has been initialized */
9557ea4cd6bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
9567ea4cd6bSMika Westerberg 		/*
9577ea4cd6bSMika Westerberg 		 * DMA tunnels require the driver to be functional so we
9587ea4cd6bSMika Westerberg 		 * tear them down. Other protocol tunnels can be left
9597ea4cd6bSMika Westerberg 		 * intact.
9607ea4cd6bSMika Westerberg 		 */
9617ea4cd6bSMika Westerberg 		if (tb_tunnel_is_dma(tunnel))
9627ea4cd6bSMika Westerberg 			tb_tunnel_deactivate(tunnel);
96393f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
9647ea4cd6bSMika Westerberg 	}
965bfe778acSMika Westerberg 	tb_switch_remove(tb->root_switch);
9669d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
967d6cc51cdSAndreas Noever }
968d6cc51cdSAndreas Noever 
96999cabbb0SMika Westerberg static int tb_scan_finalize_switch(struct device *dev, void *data)
97099cabbb0SMika Westerberg {
97199cabbb0SMika Westerberg 	if (tb_is_switch(dev)) {
97299cabbb0SMika Westerberg 		struct tb_switch *sw = tb_to_switch(dev);
97399cabbb0SMika Westerberg 
97499cabbb0SMika Westerberg 		/*
97599cabbb0SMika Westerberg 		 * If we found that the switch was already setup by the
97699cabbb0SMika Westerberg 		 * boot firmware, mark it as authorized now before we
97799cabbb0SMika Westerberg 		 * send uevent to userspace.
97899cabbb0SMika Westerberg 		 */
97999cabbb0SMika Westerberg 		if (sw->boot)
98099cabbb0SMika Westerberg 			sw->authorized = 1;
98199cabbb0SMika Westerberg 
98299cabbb0SMika Westerberg 		dev_set_uevent_suppress(dev, false);
98399cabbb0SMika Westerberg 		kobject_uevent(&dev->kobj, KOBJ_ADD);
98499cabbb0SMika Westerberg 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
98599cabbb0SMika Westerberg 	}
98699cabbb0SMika Westerberg 
98799cabbb0SMika Westerberg 	return 0;
98899cabbb0SMika Westerberg }
98999cabbb0SMika Westerberg 
9909d3cce0bSMika Westerberg static int tb_start(struct tb *tb)
991d6cc51cdSAndreas Noever {
9929d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
993bfe778acSMika Westerberg 	int ret;
994d6cc51cdSAndreas Noever 
995bfe778acSMika Westerberg 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
996444ac384SMika Westerberg 	if (IS_ERR(tb->root_switch))
997444ac384SMika Westerberg 		return PTR_ERR(tb->root_switch);
998a25c8b2fSAndreas Noever 
999e6b245ccSMika Westerberg 	/*
1000e6b245ccSMika Westerberg 	 * ICM firmware upgrade needs running firmware and in native
1001e6b245ccSMika Westerberg 	 * mode that is not available so disable firmware upgrade of the
1002e6b245ccSMika Westerberg 	 * root switch.
1003e6b245ccSMika Westerberg 	 */
1004e6b245ccSMika Westerberg 	tb->root_switch->no_nvm_upgrade = true;
1005e6b245ccSMika Westerberg 
1006bfe778acSMika Westerberg 	ret = tb_switch_configure(tb->root_switch);
1007bfe778acSMika Westerberg 	if (ret) {
1008bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1009bfe778acSMika Westerberg 		return ret;
1010bfe778acSMika Westerberg 	}
1011bfe778acSMika Westerberg 
1012bfe778acSMika Westerberg 	/* Announce the switch to the world */
1013bfe778acSMika Westerberg 	ret = tb_switch_add(tb->root_switch);
1014bfe778acSMika Westerberg 	if (ret) {
1015bfe778acSMika Westerberg 		tb_switch_put(tb->root_switch);
1016bfe778acSMika Westerberg 		return ret;
1017bfe778acSMika Westerberg 	}
1018bfe778acSMika Westerberg 
1019cf29b9afSRajmohan Mani 	/* Enable TMU if it is off */
1020cf29b9afSRajmohan Mani 	tb_switch_tmu_enable(tb->root_switch);
10219da672a4SAndreas Noever 	/* Full scan to discover devices added before the driver was loaded. */
10229da672a4SAndreas Noever 	tb_scan_switch(tb->root_switch);
10230414bec5SMika Westerberg 	/* Find out tunnels created by the boot firmware */
10240414bec5SMika Westerberg 	tb_discover_tunnels(tb->root_switch);
1025e6f81858SRajmohan Mani 	/*
1026e6f81858SRajmohan Mani 	 * If the boot firmware did not create USB 3.x tunnels create them
1027e6f81858SRajmohan Mani 	 * now for the whole topology.
1028e6f81858SRajmohan Mani 	 */
1029e6f81858SRajmohan Mani 	tb_create_usb3_tunnels(tb->root_switch);
10308afe909bSMika Westerberg 	/* Add DP IN resources for the root switch */
10318afe909bSMika Westerberg 	tb_add_dp_resources(tb->root_switch);
103299cabbb0SMika Westerberg 	/* Make the discovered switches available to the userspace */
103399cabbb0SMika Westerberg 	device_for_each_child(&tb->root_switch->dev, NULL,
103499cabbb0SMika Westerberg 			      tb_scan_finalize_switch);
10359da672a4SAndreas Noever 
1036d6cc51cdSAndreas Noever 	/* Allow tb_handle_hotplug to progress events */
10379d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
10389d3cce0bSMika Westerberg 	return 0;
1039d6cc51cdSAndreas Noever }
1040d6cc51cdSAndreas Noever 
10419d3cce0bSMika Westerberg static int tb_suspend_noirq(struct tb *tb)
104223dd5bb4SAndreas Noever {
10439d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
10449d3cce0bSMika Westerberg 
1045daa5140fSMika Westerberg 	tb_dbg(tb, "suspending...\n");
104623dd5bb4SAndreas Noever 	tb_switch_suspend(tb->root_switch);
10479d3cce0bSMika Westerberg 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1048daa5140fSMika Westerberg 	tb_dbg(tb, "suspend finished\n");
10499d3cce0bSMika Westerberg 
10509d3cce0bSMika Westerberg 	return 0;
105123dd5bb4SAndreas Noever }
105223dd5bb4SAndreas Noever 
105391c0c120SMika Westerberg static void tb_restore_children(struct tb_switch *sw)
105491c0c120SMika Westerberg {
105591c0c120SMika Westerberg 	struct tb_port *port;
105691c0c120SMika Westerberg 
1057cf29b9afSRajmohan Mani 	if (tb_enable_tmu(sw))
1058cf29b9afSRajmohan Mani 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1059cf29b9afSRajmohan Mani 
106091c0c120SMika Westerberg 	tb_switch_for_each_port(sw, port) {
106191c0c120SMika Westerberg 		if (!tb_port_has_remote(port))
106291c0c120SMika Westerberg 			continue;
106391c0c120SMika Westerberg 
106491c0c120SMika Westerberg 		if (tb_switch_lane_bonding_enable(port->remote->sw))
106591c0c120SMika Westerberg 			dev_warn(&sw->dev, "failed to restore lane bonding\n");
106691c0c120SMika Westerberg 
106791c0c120SMika Westerberg 		tb_restore_children(port->remote->sw);
106891c0c120SMika Westerberg 	}
106991c0c120SMika Westerberg }
107091c0c120SMika Westerberg 
10719d3cce0bSMika Westerberg static int tb_resume_noirq(struct tb *tb)
107223dd5bb4SAndreas Noever {
10739d3cce0bSMika Westerberg 	struct tb_cm *tcm = tb_priv(tb);
107493f36adeSMika Westerberg 	struct tb_tunnel *tunnel, *n;
10759d3cce0bSMika Westerberg 
1076daa5140fSMika Westerberg 	tb_dbg(tb, "resuming...\n");
107723dd5bb4SAndreas Noever 
107823dd5bb4SAndreas Noever 	/* remove any pci devices the firmware might have setup */
107923dd5bb4SAndreas Noever 	tb_switch_reset(tb, 0);
108023dd5bb4SAndreas Noever 
108123dd5bb4SAndreas Noever 	tb_switch_resume(tb->root_switch);
108223dd5bb4SAndreas Noever 	tb_free_invalid_tunnels(tb);
108323dd5bb4SAndreas Noever 	tb_free_unplugged_children(tb->root_switch);
108491c0c120SMika Westerberg 	tb_restore_children(tb->root_switch);
10859d3cce0bSMika Westerberg 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
108693f36adeSMika Westerberg 		tb_tunnel_restart(tunnel);
10879d3cce0bSMika Westerberg 	if (!list_empty(&tcm->tunnel_list)) {
108823dd5bb4SAndreas Noever 		/*
108923dd5bb4SAndreas Noever 		 * the pcie links need some time to get going.
109023dd5bb4SAndreas Noever 		 * 100ms works for me...
109123dd5bb4SAndreas Noever 		 */
1092daa5140fSMika Westerberg 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
109323dd5bb4SAndreas Noever 		msleep(100);
109423dd5bb4SAndreas Noever 	}
109523dd5bb4SAndreas Noever 	 /* Allow tb_handle_hotplug to progress events */
10969d3cce0bSMika Westerberg 	tcm->hotplug_active = true;
1097daa5140fSMika Westerberg 	tb_dbg(tb, "resume finished\n");
10989d3cce0bSMika Westerberg 
10999d3cce0bSMika Westerberg 	return 0;
11009d3cce0bSMika Westerberg }
11019d3cce0bSMika Westerberg 
11027ea4cd6bSMika Westerberg static int tb_free_unplugged_xdomains(struct tb_switch *sw)
11037ea4cd6bSMika Westerberg {
1104b433d010SMika Westerberg 	struct tb_port *port;
1105b433d010SMika Westerberg 	int ret = 0;
11067ea4cd6bSMika Westerberg 
1107b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
11087ea4cd6bSMika Westerberg 		if (tb_is_upstream_port(port))
11097ea4cd6bSMika Westerberg 			continue;
11107ea4cd6bSMika Westerberg 		if (port->xdomain && port->xdomain->is_unplugged) {
11117ea4cd6bSMika Westerberg 			tb_xdomain_remove(port->xdomain);
11127ea4cd6bSMika Westerberg 			port->xdomain = NULL;
11137ea4cd6bSMika Westerberg 			ret++;
11147ea4cd6bSMika Westerberg 		} else if (port->remote) {
11157ea4cd6bSMika Westerberg 			ret += tb_free_unplugged_xdomains(port->remote->sw);
11167ea4cd6bSMika Westerberg 		}
11177ea4cd6bSMika Westerberg 	}
11187ea4cd6bSMika Westerberg 
11197ea4cd6bSMika Westerberg 	return ret;
11207ea4cd6bSMika Westerberg }
11217ea4cd6bSMika Westerberg 
11227ea4cd6bSMika Westerberg static void tb_complete(struct tb *tb)
11237ea4cd6bSMika Westerberg {
11247ea4cd6bSMika Westerberg 	/*
11257ea4cd6bSMika Westerberg 	 * Release any unplugged XDomains and if there is a case where
11267ea4cd6bSMika Westerberg 	 * another domain is swapped in place of unplugged XDomain we
11277ea4cd6bSMika Westerberg 	 * need to run another rescan.
11287ea4cd6bSMika Westerberg 	 */
11297ea4cd6bSMika Westerberg 	mutex_lock(&tb->lock);
11307ea4cd6bSMika Westerberg 	if (tb_free_unplugged_xdomains(tb->root_switch))
11317ea4cd6bSMika Westerberg 		tb_scan_switch(tb->root_switch);
11327ea4cd6bSMika Westerberg 	mutex_unlock(&tb->lock);
11337ea4cd6bSMika Westerberg }
11347ea4cd6bSMika Westerberg 
11359d3cce0bSMika Westerberg static const struct tb_cm_ops tb_cm_ops = {
11369d3cce0bSMika Westerberg 	.start = tb_start,
11379d3cce0bSMika Westerberg 	.stop = tb_stop,
11389d3cce0bSMika Westerberg 	.suspend_noirq = tb_suspend_noirq,
11399d3cce0bSMika Westerberg 	.resume_noirq = tb_resume_noirq,
11407ea4cd6bSMika Westerberg 	.complete = tb_complete,
114181a54b5eSMika Westerberg 	.handle_event = tb_handle_event,
114299cabbb0SMika Westerberg 	.approve_switch = tb_tunnel_pci,
11437ea4cd6bSMika Westerberg 	.approve_xdomain_paths = tb_approve_xdomain_paths,
11447ea4cd6bSMika Westerberg 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
11459d3cce0bSMika Westerberg };
11469d3cce0bSMika Westerberg 
11479d3cce0bSMika Westerberg struct tb *tb_probe(struct tb_nhi *nhi)
11489d3cce0bSMika Westerberg {
11499d3cce0bSMika Westerberg 	struct tb_cm *tcm;
11509d3cce0bSMika Westerberg 	struct tb *tb;
11519d3cce0bSMika Westerberg 
11529d3cce0bSMika Westerberg 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
11539d3cce0bSMika Westerberg 	if (!tb)
11549d3cce0bSMika Westerberg 		return NULL;
11559d3cce0bSMika Westerberg 
115699cabbb0SMika Westerberg 	tb->security_level = TB_SECURITY_USER;
11579d3cce0bSMika Westerberg 	tb->cm_ops = &tb_cm_ops;
11589d3cce0bSMika Westerberg 
11599d3cce0bSMika Westerberg 	tcm = tb_priv(tb);
11609d3cce0bSMika Westerberg 	INIT_LIST_HEAD(&tcm->tunnel_list);
11618afe909bSMika Westerberg 	INIT_LIST_HEAD(&tcm->dp_resources);
11629d3cce0bSMika Westerberg 
11639d3cce0bSMika Westerberg 	return tb;
116423dd5bb4SAndreas Noever }
1165