xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 6f4eaea2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17 
18 /**
19  * struct tb_cm - Simple Thunderbolt connection manager
20  * @tunnel_list: List of active tunnels
21  * @dp_resources: List of available DP resources for DP tunneling
22  * @hotplug_active: tb_handle_hotplug will stop progressing plug
23  *		    events and exit if this is not set (it needs to
24  *		    acquire the lock one more time). Used to drain wq
25  *		    after cfg has been paused.
26  * @remove_work: Work used to remove any unplugged routers after
27  *		 runtime resume
28  */
29 struct tb_cm {
30 	struct list_head tunnel_list;
31 	struct list_head dp_resources;
32 	bool hotplug_active;
33 	struct delayed_work remove_work;
34 };
35 
36 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
37 {
38 	return ((void *)tcm - sizeof(struct tb));
39 }
40 
41 struct tb_hotplug_event {
42 	struct work_struct work;
43 	struct tb *tb;
44 	u64 route;
45 	u8 port;
46 	bool unplug;
47 };
48 
49 static void tb_handle_hotplug(struct work_struct *work);
50 
51 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
52 {
53 	struct tb_hotplug_event *ev;
54 
55 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
56 	if (!ev)
57 		return;
58 
59 	ev->tb = tb;
60 	ev->route = route;
61 	ev->port = port;
62 	ev->unplug = unplug;
63 	INIT_WORK(&ev->work, tb_handle_hotplug);
64 	queue_work(tb->wq, &ev->work);
65 }
66 
67 /* enumeration & hot plug handling */
68 
69 static void tb_add_dp_resources(struct tb_switch *sw)
70 {
71 	struct tb_cm *tcm = tb_priv(sw->tb);
72 	struct tb_port *port;
73 
74 	tb_switch_for_each_port(sw, port) {
75 		if (!tb_port_is_dpin(port))
76 			continue;
77 
78 		if (!tb_switch_query_dp_resource(sw, port))
79 			continue;
80 
81 		list_add_tail(&port->list, &tcm->dp_resources);
82 		tb_port_dbg(port, "DP IN resource available\n");
83 	}
84 }
85 
86 static void tb_remove_dp_resources(struct tb_switch *sw)
87 {
88 	struct tb_cm *tcm = tb_priv(sw->tb);
89 	struct tb_port *port, *tmp;
90 
91 	/* Clear children resources first */
92 	tb_switch_for_each_port(sw, port) {
93 		if (tb_port_has_remote(port))
94 			tb_remove_dp_resources(port->remote->sw);
95 	}
96 
97 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
98 		if (port->sw == sw) {
99 			tb_port_dbg(port, "DP OUT resource unavailable\n");
100 			list_del_init(&port->list);
101 		}
102 	}
103 }
104 
105 static void tb_discover_tunnels(struct tb_switch *sw)
106 {
107 	struct tb *tb = sw->tb;
108 	struct tb_cm *tcm = tb_priv(tb);
109 	struct tb_port *port;
110 
111 	tb_switch_for_each_port(sw, port) {
112 		struct tb_tunnel *tunnel = NULL;
113 
114 		switch (port->config.type) {
115 		case TB_TYPE_DP_HDMI_IN:
116 			tunnel = tb_tunnel_discover_dp(tb, port);
117 			break;
118 
119 		case TB_TYPE_PCIE_DOWN:
120 			tunnel = tb_tunnel_discover_pci(tb, port);
121 			break;
122 
123 		case TB_TYPE_USB3_DOWN:
124 			tunnel = tb_tunnel_discover_usb3(tb, port);
125 			break;
126 
127 		default:
128 			break;
129 		}
130 
131 		if (!tunnel)
132 			continue;
133 
134 		if (tb_tunnel_is_pci(tunnel)) {
135 			struct tb_switch *parent = tunnel->dst_port->sw;
136 
137 			while (parent != tunnel->src_port->sw) {
138 				parent->boot = true;
139 				parent = tb_switch_parent(parent);
140 			}
141 		}
142 
143 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
144 	}
145 
146 	tb_switch_for_each_port(sw, port) {
147 		if (tb_port_has_remote(port))
148 			tb_discover_tunnels(port->remote->sw);
149 	}
150 }
151 
152 static int tb_port_configure_xdomain(struct tb_port *port)
153 {
154 	/*
155 	 * XDomain paths currently only support single lane so we must
156 	 * disable the other lane according to USB4 spec.
157 	 */
158 	tb_port_disable(port->dual_link_port);
159 
160 	if (tb_switch_is_usb4(port->sw))
161 		return usb4_port_configure_xdomain(port);
162 	return tb_lc_configure_xdomain(port);
163 }
164 
165 static void tb_port_unconfigure_xdomain(struct tb_port *port)
166 {
167 	if (tb_switch_is_usb4(port->sw))
168 		usb4_port_unconfigure_xdomain(port);
169 	else
170 		tb_lc_unconfigure_xdomain(port);
171 
172 	tb_port_enable(port->dual_link_port);
173 }
174 
175 static void tb_scan_xdomain(struct tb_port *port)
176 {
177 	struct tb_switch *sw = port->sw;
178 	struct tb *tb = sw->tb;
179 	struct tb_xdomain *xd;
180 	u64 route;
181 
182 	if (!tb_is_xdomain_enabled())
183 		return;
184 
185 	route = tb_downstream_route(port);
186 	xd = tb_xdomain_find_by_route(tb, route);
187 	if (xd) {
188 		tb_xdomain_put(xd);
189 		return;
190 	}
191 
192 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
193 			      NULL);
194 	if (xd) {
195 		tb_port_at(route, sw)->xdomain = xd;
196 		tb_port_configure_xdomain(port);
197 		tb_xdomain_add(xd);
198 	}
199 }
200 
201 static int tb_enable_tmu(struct tb_switch *sw)
202 {
203 	int ret;
204 
205 	/* If it is already enabled in correct mode, don't touch it */
206 	if (tb_switch_tmu_is_enabled(sw))
207 		return 0;
208 
209 	ret = tb_switch_tmu_disable(sw);
210 	if (ret)
211 		return ret;
212 
213 	ret = tb_switch_tmu_post_time(sw);
214 	if (ret)
215 		return ret;
216 
217 	return tb_switch_tmu_enable(sw);
218 }
219 
220 /**
221  * tb_find_unused_port() - return the first inactive port on @sw
222  * @sw: Switch to find the port on
223  * @type: Port type to look for
224  */
225 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
226 					   enum tb_port_type type)
227 {
228 	struct tb_port *port;
229 
230 	tb_switch_for_each_port(sw, port) {
231 		if (tb_is_upstream_port(port))
232 			continue;
233 		if (port->config.type != type)
234 			continue;
235 		if (!port->cap_adap)
236 			continue;
237 		if (tb_port_is_enabled(port))
238 			continue;
239 		return port;
240 	}
241 	return NULL;
242 }
243 
244 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
245 					 const struct tb_port *port)
246 {
247 	struct tb_port *down;
248 
249 	down = usb4_switch_map_usb3_down(sw, port);
250 	if (down && !tb_usb3_port_is_enabled(down))
251 		return down;
252 	return NULL;
253 }
254 
255 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
256 					struct tb_port *src_port,
257 					struct tb_port *dst_port)
258 {
259 	struct tb_cm *tcm = tb_priv(tb);
260 	struct tb_tunnel *tunnel;
261 
262 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
263 		if (tunnel->type == type &&
264 		    ((src_port && src_port == tunnel->src_port) ||
265 		     (dst_port && dst_port == tunnel->dst_port))) {
266 			return tunnel;
267 		}
268 	}
269 
270 	return NULL;
271 }
272 
273 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
274 						   struct tb_port *src_port,
275 						   struct tb_port *dst_port)
276 {
277 	struct tb_port *port, *usb3_down;
278 	struct tb_switch *sw;
279 
280 	/* Pick the router that is deepest in the topology */
281 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
282 		sw = dst_port->sw;
283 	else
284 		sw = src_port->sw;
285 
286 	/* Can't be the host router */
287 	if (sw == tb->root_switch)
288 		return NULL;
289 
290 	/* Find the downstream USB4 port that leads to this router */
291 	port = tb_port_at(tb_route(sw), tb->root_switch);
292 	/* Find the corresponding host router USB3 downstream port */
293 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
294 	if (!usb3_down)
295 		return NULL;
296 
297 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
298 }
299 
300 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
301 	struct tb_port *dst_port, int *available_up, int *available_down)
302 {
303 	int usb3_consumed_up, usb3_consumed_down, ret;
304 	struct tb_cm *tcm = tb_priv(tb);
305 	struct tb_tunnel *tunnel;
306 	struct tb_port *port;
307 
308 	tb_port_dbg(dst_port, "calculating available bandwidth\n");
309 
310 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
311 	if (tunnel) {
312 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
313 						   &usb3_consumed_down);
314 		if (ret)
315 			return ret;
316 	} else {
317 		usb3_consumed_up = 0;
318 		usb3_consumed_down = 0;
319 	}
320 
321 	*available_up = *available_down = 40000;
322 
323 	/* Find the minimum available bandwidth over all links */
324 	tb_for_each_port_on_path(src_port, dst_port, port) {
325 		int link_speed, link_width, up_bw, down_bw;
326 
327 		if (!tb_port_is_null(port))
328 			continue;
329 
330 		if (tb_is_upstream_port(port)) {
331 			link_speed = port->sw->link_speed;
332 		} else {
333 			link_speed = tb_port_get_link_speed(port);
334 			if (link_speed < 0)
335 				return link_speed;
336 		}
337 
338 		link_width = port->bonded ? 2 : 1;
339 
340 		up_bw = link_speed * link_width * 1000; /* Mb/s */
341 		/* Leave 10% guard band */
342 		up_bw -= up_bw / 10;
343 		down_bw = up_bw;
344 
345 		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
346 
347 		/*
348 		 * Find all DP tunnels that cross the port and reduce
349 		 * their consumed bandwidth from the available.
350 		 */
351 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
352 			int dp_consumed_up, dp_consumed_down;
353 
354 			if (!tb_tunnel_is_dp(tunnel))
355 				continue;
356 
357 			if (!tb_tunnel_port_on_path(tunnel, port))
358 				continue;
359 
360 			ret = tb_tunnel_consumed_bandwidth(tunnel,
361 							   &dp_consumed_up,
362 							   &dp_consumed_down);
363 			if (ret)
364 				return ret;
365 
366 			up_bw -= dp_consumed_up;
367 			down_bw -= dp_consumed_down;
368 		}
369 
370 		/*
371 		 * If USB3 is tunneled from the host router down to the
372 		 * branch leading to port we need to take USB3 consumed
373 		 * bandwidth into account regardless whether it actually
374 		 * crosses the port.
375 		 */
376 		up_bw -= usb3_consumed_up;
377 		down_bw -= usb3_consumed_down;
378 
379 		if (up_bw < *available_up)
380 			*available_up = up_bw;
381 		if (down_bw < *available_down)
382 			*available_down = down_bw;
383 	}
384 
385 	if (*available_up < 0)
386 		*available_up = 0;
387 	if (*available_down < 0)
388 		*available_down = 0;
389 
390 	return 0;
391 }
392 
393 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
394 					    struct tb_port *src_port,
395 					    struct tb_port *dst_port)
396 {
397 	struct tb_tunnel *tunnel;
398 
399 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
400 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
401 }
402 
403 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
404 				      struct tb_port *dst_port)
405 {
406 	int ret, available_up, available_down;
407 	struct tb_tunnel *tunnel;
408 
409 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
410 	if (!tunnel)
411 		return;
412 
413 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
414 
415 	/*
416 	 * Calculate available bandwidth for the first hop USB3 tunnel.
417 	 * That determines the whole USB3 bandwidth for this branch.
418 	 */
419 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
420 				     &available_up, &available_down);
421 	if (ret) {
422 		tb_warn(tb, "failed to calculate available bandwidth\n");
423 		return;
424 	}
425 
426 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
427 	       available_up, available_down);
428 
429 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
430 }
431 
432 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
433 {
434 	struct tb_switch *parent = tb_switch_parent(sw);
435 	int ret, available_up, available_down;
436 	struct tb_port *up, *down, *port;
437 	struct tb_cm *tcm = tb_priv(tb);
438 	struct tb_tunnel *tunnel;
439 
440 	if (!tb_acpi_may_tunnel_usb3()) {
441 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
442 		return 0;
443 	}
444 
445 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
446 	if (!up)
447 		return 0;
448 
449 	if (!sw->link_usb4)
450 		return 0;
451 
452 	/*
453 	 * Look up available down port. Since we are chaining it should
454 	 * be found right above this switch.
455 	 */
456 	port = tb_port_at(tb_route(sw), parent);
457 	down = tb_find_usb3_down(parent, port);
458 	if (!down)
459 		return 0;
460 
461 	if (tb_route(parent)) {
462 		struct tb_port *parent_up;
463 		/*
464 		 * Check first that the parent switch has its upstream USB3
465 		 * port enabled. Otherwise the chain is not complete and
466 		 * there is no point setting up a new tunnel.
467 		 */
468 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
469 		if (!parent_up || !tb_port_is_enabled(parent_up))
470 			return 0;
471 
472 		/* Make all unused bandwidth available for the new tunnel */
473 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
474 		if (ret)
475 			return ret;
476 	}
477 
478 	ret = tb_available_bandwidth(tb, down, up, &available_up,
479 				     &available_down);
480 	if (ret)
481 		goto err_reclaim;
482 
483 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
484 		    available_up, available_down);
485 
486 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
487 				      available_down);
488 	if (!tunnel) {
489 		ret = -ENOMEM;
490 		goto err_reclaim;
491 	}
492 
493 	if (tb_tunnel_activate(tunnel)) {
494 		tb_port_info(up,
495 			     "USB3 tunnel activation failed, aborting\n");
496 		ret = -EIO;
497 		goto err_free;
498 	}
499 
500 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
501 	if (tb_route(parent))
502 		tb_reclaim_usb3_bandwidth(tb, down, up);
503 
504 	return 0;
505 
506 err_free:
507 	tb_tunnel_free(tunnel);
508 err_reclaim:
509 	if (tb_route(parent))
510 		tb_reclaim_usb3_bandwidth(tb, down, up);
511 
512 	return ret;
513 }
514 
515 static int tb_create_usb3_tunnels(struct tb_switch *sw)
516 {
517 	struct tb_port *port;
518 	int ret;
519 
520 	if (!tb_acpi_may_tunnel_usb3())
521 		return 0;
522 
523 	if (tb_route(sw)) {
524 		ret = tb_tunnel_usb3(sw->tb, sw);
525 		if (ret)
526 			return ret;
527 	}
528 
529 	tb_switch_for_each_port(sw, port) {
530 		if (!tb_port_has_remote(port))
531 			continue;
532 		ret = tb_create_usb3_tunnels(port->remote->sw);
533 		if (ret)
534 			return ret;
535 	}
536 
537 	return 0;
538 }
539 
540 static void tb_scan_port(struct tb_port *port);
541 
542 /*
543  * tb_scan_switch() - scan for and initialize downstream switches
544  */
545 static void tb_scan_switch(struct tb_switch *sw)
546 {
547 	struct tb_port *port;
548 
549 	pm_runtime_get_sync(&sw->dev);
550 
551 	tb_switch_for_each_port(sw, port)
552 		tb_scan_port(port);
553 
554 	pm_runtime_mark_last_busy(&sw->dev);
555 	pm_runtime_put_autosuspend(&sw->dev);
556 }
557 
558 /*
559  * tb_scan_port() - check for and initialize switches below port
560  */
561 static void tb_scan_port(struct tb_port *port)
562 {
563 	struct tb_cm *tcm = tb_priv(port->sw->tb);
564 	struct tb_port *upstream_port;
565 	struct tb_switch *sw;
566 
567 	if (tb_is_upstream_port(port))
568 		return;
569 
570 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
571 	    !tb_dp_port_is_enabled(port)) {
572 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
573 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
574 				 false);
575 		return;
576 	}
577 
578 	if (port->config.type != TB_TYPE_PORT)
579 		return;
580 	if (port->dual_link_port && port->link_nr)
581 		return; /*
582 			 * Downstream switch is reachable through two ports.
583 			 * Only scan on the primary port (link_nr == 0).
584 			 */
585 	if (tb_wait_for_port(port, false) <= 0)
586 		return;
587 	if (port->remote) {
588 		tb_port_dbg(port, "port already has a remote\n");
589 		return;
590 	}
591 
592 	tb_retimer_scan(port);
593 
594 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
595 			     tb_downstream_route(port));
596 	if (IS_ERR(sw)) {
597 		/*
598 		 * If there is an error accessing the connected switch
599 		 * it may be connected to another domain. Also we allow
600 		 * the other domain to be connected to a max depth switch.
601 		 */
602 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
603 			tb_scan_xdomain(port);
604 		return;
605 	}
606 
607 	if (tb_switch_configure(sw)) {
608 		tb_switch_put(sw);
609 		return;
610 	}
611 
612 	/*
613 	 * If there was previously another domain connected remove it
614 	 * first.
615 	 */
616 	if (port->xdomain) {
617 		tb_xdomain_remove(port->xdomain);
618 		tb_port_unconfigure_xdomain(port);
619 		port->xdomain = NULL;
620 	}
621 
622 	/*
623 	 * Do not send uevents until we have discovered all existing
624 	 * tunnels and know which switches were authorized already by
625 	 * the boot firmware.
626 	 */
627 	if (!tcm->hotplug_active)
628 		dev_set_uevent_suppress(&sw->dev, true);
629 
630 	/*
631 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
632 	 * can support runtime PM.
633 	 */
634 	sw->rpm = sw->generation > 1;
635 
636 	if (tb_switch_add(sw)) {
637 		tb_switch_put(sw);
638 		return;
639 	}
640 
641 	/* Link the switches using both links if available */
642 	upstream_port = tb_upstream_port(sw);
643 	port->remote = upstream_port;
644 	upstream_port->remote = port;
645 	if (port->dual_link_port && upstream_port->dual_link_port) {
646 		port->dual_link_port->remote = upstream_port->dual_link_port;
647 		upstream_port->dual_link_port->remote = port->dual_link_port;
648 	}
649 
650 	/* Enable lane bonding if supported */
651 	tb_switch_lane_bonding_enable(sw);
652 	/* Set the link configured */
653 	tb_switch_configure_link(sw);
654 
655 	if (tb_enable_tmu(sw))
656 		tb_sw_warn(sw, "failed to enable TMU\n");
657 
658 	/* Scan upstream retimers */
659 	tb_retimer_scan(upstream_port);
660 
661 	/*
662 	 * Create USB 3.x tunnels only when the switch is plugged to the
663 	 * domain. This is because we scan the domain also during discovery
664 	 * and want to discover existing USB 3.x tunnels before we create
665 	 * any new.
666 	 */
667 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
668 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
669 
670 	tb_add_dp_resources(sw);
671 	tb_scan_switch(sw);
672 }
673 
674 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
675 {
676 	struct tb_port *src_port, *dst_port;
677 	struct tb *tb;
678 
679 	if (!tunnel)
680 		return;
681 
682 	tb_tunnel_deactivate(tunnel);
683 	list_del(&tunnel->list);
684 
685 	tb = tunnel->tb;
686 	src_port = tunnel->src_port;
687 	dst_port = tunnel->dst_port;
688 
689 	switch (tunnel->type) {
690 	case TB_TUNNEL_DP:
691 		/*
692 		 * In case of DP tunnel make sure the DP IN resource is
693 		 * deallocated properly.
694 		 */
695 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
696 		/* Now we can allow the domain to runtime suspend again */
697 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
698 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
699 		pm_runtime_mark_last_busy(&src_port->sw->dev);
700 		pm_runtime_put_autosuspend(&src_port->sw->dev);
701 		fallthrough;
702 
703 	case TB_TUNNEL_USB3:
704 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
705 		break;
706 
707 	default:
708 		/*
709 		 * PCIe and DMA tunnels do not consume guaranteed
710 		 * bandwidth.
711 		 */
712 		break;
713 	}
714 
715 	tb_tunnel_free(tunnel);
716 }
717 
718 /*
719  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
720  */
721 static void tb_free_invalid_tunnels(struct tb *tb)
722 {
723 	struct tb_cm *tcm = tb_priv(tb);
724 	struct tb_tunnel *tunnel;
725 	struct tb_tunnel *n;
726 
727 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
728 		if (tb_tunnel_is_invalid(tunnel))
729 			tb_deactivate_and_free_tunnel(tunnel);
730 	}
731 }
732 
733 /*
734  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
735  */
736 static void tb_free_unplugged_children(struct tb_switch *sw)
737 {
738 	struct tb_port *port;
739 
740 	tb_switch_for_each_port(sw, port) {
741 		if (!tb_port_has_remote(port))
742 			continue;
743 
744 		if (port->remote->sw->is_unplugged) {
745 			tb_retimer_remove_all(port);
746 			tb_remove_dp_resources(port->remote->sw);
747 			tb_switch_unconfigure_link(port->remote->sw);
748 			tb_switch_lane_bonding_disable(port->remote->sw);
749 			tb_switch_remove(port->remote->sw);
750 			port->remote = NULL;
751 			if (port->dual_link_port)
752 				port->dual_link_port->remote = NULL;
753 		} else {
754 			tb_free_unplugged_children(port->remote->sw);
755 		}
756 	}
757 }
758 
759 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
760 					 const struct tb_port *port)
761 {
762 	struct tb_port *down = NULL;
763 
764 	/*
765 	 * To keep plugging devices consistently in the same PCIe
766 	 * hierarchy, do mapping here for switch downstream PCIe ports.
767 	 */
768 	if (tb_switch_is_usb4(sw)) {
769 		down = usb4_switch_map_pcie_down(sw, port);
770 	} else if (!tb_route(sw)) {
771 		int phy_port = tb_phy_port_from_link(port->port);
772 		int index;
773 
774 		/*
775 		 * Hard-coded Thunderbolt port to PCIe down port mapping
776 		 * per controller.
777 		 */
778 		if (tb_switch_is_cactus_ridge(sw) ||
779 		    tb_switch_is_alpine_ridge(sw))
780 			index = !phy_port ? 6 : 7;
781 		else if (tb_switch_is_falcon_ridge(sw))
782 			index = !phy_port ? 6 : 8;
783 		else if (tb_switch_is_titan_ridge(sw))
784 			index = !phy_port ? 8 : 9;
785 		else
786 			goto out;
787 
788 		/* Validate the hard-coding */
789 		if (WARN_ON(index > sw->config.max_port_number))
790 			goto out;
791 
792 		down = &sw->ports[index];
793 	}
794 
795 	if (down) {
796 		if (WARN_ON(!tb_port_is_pcie_down(down)))
797 			goto out;
798 		if (tb_pci_port_is_enabled(down))
799 			goto out;
800 
801 		return down;
802 	}
803 
804 out:
805 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
806 }
807 
808 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
809 {
810 	struct tb_port *host_port, *port;
811 	struct tb_cm *tcm = tb_priv(tb);
812 
813 	host_port = tb_route(in->sw) ?
814 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
815 
816 	list_for_each_entry(port, &tcm->dp_resources, list) {
817 		if (!tb_port_is_dpout(port))
818 			continue;
819 
820 		if (tb_port_is_enabled(port)) {
821 			tb_port_dbg(port, "in use\n");
822 			continue;
823 		}
824 
825 		tb_port_dbg(port, "DP OUT available\n");
826 
827 		/*
828 		 * Keep the DP tunnel under the topology starting from
829 		 * the same host router downstream port.
830 		 */
831 		if (host_port && tb_route(port->sw)) {
832 			struct tb_port *p;
833 
834 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
835 			if (p != host_port)
836 				continue;
837 		}
838 
839 		return port;
840 	}
841 
842 	return NULL;
843 }
844 
845 static void tb_tunnel_dp(struct tb *tb)
846 {
847 	int available_up, available_down, ret;
848 	struct tb_cm *tcm = tb_priv(tb);
849 	struct tb_port *port, *in, *out;
850 	struct tb_tunnel *tunnel;
851 
852 	if (!tb_acpi_may_tunnel_dp()) {
853 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
854 		return;
855 	}
856 
857 	/*
858 	 * Find pair of inactive DP IN and DP OUT adapters and then
859 	 * establish a DP tunnel between them.
860 	 */
861 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
862 
863 	in = NULL;
864 	out = NULL;
865 	list_for_each_entry(port, &tcm->dp_resources, list) {
866 		if (!tb_port_is_dpin(port))
867 			continue;
868 
869 		if (tb_port_is_enabled(port)) {
870 			tb_port_dbg(port, "in use\n");
871 			continue;
872 		}
873 
874 		tb_port_dbg(port, "DP IN available\n");
875 
876 		out = tb_find_dp_out(tb, port);
877 		if (out) {
878 			in = port;
879 			break;
880 		}
881 	}
882 
883 	if (!in) {
884 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
885 		return;
886 	}
887 	if (!out) {
888 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
889 		return;
890 	}
891 
892 	/*
893 	 * DP stream needs the domain to be active so runtime resume
894 	 * both ends of the tunnel.
895 	 *
896 	 * This should bring the routers in the middle active as well
897 	 * and keeps the domain from runtime suspending while the DP
898 	 * tunnel is active.
899 	 */
900 	pm_runtime_get_sync(&in->sw->dev);
901 	pm_runtime_get_sync(&out->sw->dev);
902 
903 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
904 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
905 		goto err_rpm_put;
906 	}
907 
908 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
909 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
910 	if (ret) {
911 		tb_warn(tb, "failed to release unused bandwidth\n");
912 		goto err_dealloc_dp;
913 	}
914 
915 	ret = tb_available_bandwidth(tb, in, out, &available_up,
916 				     &available_down);
917 	if (ret)
918 		goto err_reclaim;
919 
920 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
921 	       available_up, available_down);
922 
923 	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
924 	if (!tunnel) {
925 		tb_port_dbg(out, "could not allocate DP tunnel\n");
926 		goto err_reclaim;
927 	}
928 
929 	if (tb_tunnel_activate(tunnel)) {
930 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
931 		goto err_free;
932 	}
933 
934 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
935 	tb_reclaim_usb3_bandwidth(tb, in, out);
936 	return;
937 
938 err_free:
939 	tb_tunnel_free(tunnel);
940 err_reclaim:
941 	tb_reclaim_usb3_bandwidth(tb, in, out);
942 err_dealloc_dp:
943 	tb_switch_dealloc_dp_resource(in->sw, in);
944 err_rpm_put:
945 	pm_runtime_mark_last_busy(&out->sw->dev);
946 	pm_runtime_put_autosuspend(&out->sw->dev);
947 	pm_runtime_mark_last_busy(&in->sw->dev);
948 	pm_runtime_put_autosuspend(&in->sw->dev);
949 }
950 
951 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
952 {
953 	struct tb_port *in, *out;
954 	struct tb_tunnel *tunnel;
955 
956 	if (tb_port_is_dpin(port)) {
957 		tb_port_dbg(port, "DP IN resource unavailable\n");
958 		in = port;
959 		out = NULL;
960 	} else {
961 		tb_port_dbg(port, "DP OUT resource unavailable\n");
962 		in = NULL;
963 		out = port;
964 	}
965 
966 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
967 	tb_deactivate_and_free_tunnel(tunnel);
968 	list_del_init(&port->list);
969 
970 	/*
971 	 * See if there is another DP OUT port that can be used for
972 	 * to create another tunnel.
973 	 */
974 	tb_tunnel_dp(tb);
975 }
976 
977 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
978 {
979 	struct tb_cm *tcm = tb_priv(tb);
980 	struct tb_port *p;
981 
982 	if (tb_port_is_enabled(port))
983 		return;
984 
985 	list_for_each_entry(p, &tcm->dp_resources, list) {
986 		if (p == port)
987 			return;
988 	}
989 
990 	tb_port_dbg(port, "DP %s resource available\n",
991 		    tb_port_is_dpin(port) ? "IN" : "OUT");
992 	list_add_tail(&port->list, &tcm->dp_resources);
993 
994 	/* Look for suitable DP IN <-> DP OUT pairs now */
995 	tb_tunnel_dp(tb);
996 }
997 
998 static void tb_disconnect_and_release_dp(struct tb *tb)
999 {
1000 	struct tb_cm *tcm = tb_priv(tb);
1001 	struct tb_tunnel *tunnel, *n;
1002 
1003 	/*
1004 	 * Tear down all DP tunnels and release their resources. They
1005 	 * will be re-established after resume based on plug events.
1006 	 */
1007 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1008 		if (tb_tunnel_is_dp(tunnel))
1009 			tb_deactivate_and_free_tunnel(tunnel);
1010 	}
1011 
1012 	while (!list_empty(&tcm->dp_resources)) {
1013 		struct tb_port *port;
1014 
1015 		port = list_first_entry(&tcm->dp_resources,
1016 					struct tb_port, list);
1017 		list_del_init(&port->list);
1018 	}
1019 }
1020 
1021 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1022 {
1023 	struct tb_tunnel *tunnel;
1024 	struct tb_port *up;
1025 
1026 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1027 	if (WARN_ON(!up))
1028 		return -ENODEV;
1029 
1030 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1031 	if (WARN_ON(!tunnel))
1032 		return -ENODEV;
1033 
1034 	tb_tunnel_deactivate(tunnel);
1035 	list_del(&tunnel->list);
1036 	tb_tunnel_free(tunnel);
1037 	return 0;
1038 }
1039 
1040 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1041 {
1042 	struct tb_port *up, *down, *port;
1043 	struct tb_cm *tcm = tb_priv(tb);
1044 	struct tb_switch *parent_sw;
1045 	struct tb_tunnel *tunnel;
1046 
1047 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1048 	if (!up)
1049 		return 0;
1050 
1051 	/*
1052 	 * Look up available down port. Since we are chaining it should
1053 	 * be found right above this switch.
1054 	 */
1055 	parent_sw = tb_to_switch(sw->dev.parent);
1056 	port = tb_port_at(tb_route(sw), parent_sw);
1057 	down = tb_find_pcie_down(parent_sw, port);
1058 	if (!down)
1059 		return 0;
1060 
1061 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
1062 	if (!tunnel)
1063 		return -ENOMEM;
1064 
1065 	if (tb_tunnel_activate(tunnel)) {
1066 		tb_port_info(up,
1067 			     "PCIe tunnel activation failed, aborting\n");
1068 		tb_tunnel_free(tunnel);
1069 		return -EIO;
1070 	}
1071 
1072 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
1073 	return 0;
1074 }
1075 
1076 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1077 {
1078 	struct tb_cm *tcm = tb_priv(tb);
1079 	struct tb_port *nhi_port, *dst_port;
1080 	struct tb_tunnel *tunnel;
1081 	struct tb_switch *sw;
1082 
1083 	sw = tb_to_switch(xd->dev.parent);
1084 	dst_port = tb_port_at(xd->route, sw);
1085 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1086 
1087 	mutex_lock(&tb->lock);
1088 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
1089 				     xd->transmit_path, xd->receive_ring,
1090 				     xd->receive_path);
1091 	if (!tunnel) {
1092 		mutex_unlock(&tb->lock);
1093 		return -ENOMEM;
1094 	}
1095 
1096 	if (tb_tunnel_activate(tunnel)) {
1097 		tb_port_info(nhi_port,
1098 			     "DMA tunnel activation failed, aborting\n");
1099 		tb_tunnel_free(tunnel);
1100 		mutex_unlock(&tb->lock);
1101 		return -EIO;
1102 	}
1103 
1104 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
1105 	mutex_unlock(&tb->lock);
1106 	return 0;
1107 }
1108 
1109 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1110 {
1111 	struct tb_port *dst_port;
1112 	struct tb_tunnel *tunnel;
1113 	struct tb_switch *sw;
1114 
1115 	sw = tb_to_switch(xd->dev.parent);
1116 	dst_port = tb_port_at(xd->route, sw);
1117 
1118 	/*
1119 	 * It is possible that the tunnel was already teared down (in
1120 	 * case of cable disconnect) so it is fine if we cannot find it
1121 	 * here anymore.
1122 	 */
1123 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
1124 	tb_deactivate_and_free_tunnel(tunnel);
1125 }
1126 
1127 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1128 {
1129 	if (!xd->is_unplugged) {
1130 		mutex_lock(&tb->lock);
1131 		__tb_disconnect_xdomain_paths(tb, xd);
1132 		mutex_unlock(&tb->lock);
1133 	}
1134 	return 0;
1135 }
1136 
1137 /* hotplug handling */
1138 
1139 /*
1140  * tb_handle_hotplug() - handle hotplug event
1141  *
1142  * Executes on tb->wq.
1143  */
1144 static void tb_handle_hotplug(struct work_struct *work)
1145 {
1146 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1147 	struct tb *tb = ev->tb;
1148 	struct tb_cm *tcm = tb_priv(tb);
1149 	struct tb_switch *sw;
1150 	struct tb_port *port;
1151 
1152 	/* Bring the domain back from sleep if it was suspended */
1153 	pm_runtime_get_sync(&tb->dev);
1154 
1155 	mutex_lock(&tb->lock);
1156 	if (!tcm->hotplug_active)
1157 		goto out; /* during init, suspend or shutdown */
1158 
1159 	sw = tb_switch_find_by_route(tb, ev->route);
1160 	if (!sw) {
1161 		tb_warn(tb,
1162 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1163 			ev->route, ev->port, ev->unplug);
1164 		goto out;
1165 	}
1166 	if (ev->port > sw->config.max_port_number) {
1167 		tb_warn(tb,
1168 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1169 			ev->route, ev->port, ev->unplug);
1170 		goto put_sw;
1171 	}
1172 	port = &sw->ports[ev->port];
1173 	if (tb_is_upstream_port(port)) {
1174 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1175 		       ev->route, ev->port, ev->unplug);
1176 		goto put_sw;
1177 	}
1178 
1179 	pm_runtime_get_sync(&sw->dev);
1180 
1181 	if (ev->unplug) {
1182 		tb_retimer_remove_all(port);
1183 
1184 		if (tb_port_has_remote(port)) {
1185 			tb_port_dbg(port, "switch unplugged\n");
1186 			tb_sw_set_unplugged(port->remote->sw);
1187 			tb_free_invalid_tunnels(tb);
1188 			tb_remove_dp_resources(port->remote->sw);
1189 			tb_switch_tmu_disable(port->remote->sw);
1190 			tb_switch_unconfigure_link(port->remote->sw);
1191 			tb_switch_lane_bonding_disable(port->remote->sw);
1192 			tb_switch_remove(port->remote->sw);
1193 			port->remote = NULL;
1194 			if (port->dual_link_port)
1195 				port->dual_link_port->remote = NULL;
1196 			/* Maybe we can create another DP tunnel */
1197 			tb_tunnel_dp(tb);
1198 		} else if (port->xdomain) {
1199 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1200 
1201 			tb_port_dbg(port, "xdomain unplugged\n");
1202 			/*
1203 			 * Service drivers are unbound during
1204 			 * tb_xdomain_remove() so setting XDomain as
1205 			 * unplugged here prevents deadlock if they call
1206 			 * tb_xdomain_disable_paths(). We will tear down
1207 			 * the path below.
1208 			 */
1209 			xd->is_unplugged = true;
1210 			tb_xdomain_remove(xd);
1211 			port->xdomain = NULL;
1212 			__tb_disconnect_xdomain_paths(tb, xd);
1213 			tb_xdomain_put(xd);
1214 			tb_port_unconfigure_xdomain(port);
1215 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1216 			tb_dp_resource_unavailable(tb, port);
1217 		} else {
1218 			tb_port_dbg(port,
1219 				   "got unplug event for disconnected port, ignoring\n");
1220 		}
1221 	} else if (port->remote) {
1222 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1223 	} else {
1224 		if (tb_port_is_null(port)) {
1225 			tb_port_dbg(port, "hotplug: scanning\n");
1226 			tb_scan_port(port);
1227 			if (!port->remote)
1228 				tb_port_dbg(port, "hotplug: no switch found\n");
1229 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1230 			tb_dp_resource_available(tb, port);
1231 		}
1232 	}
1233 
1234 	pm_runtime_mark_last_busy(&sw->dev);
1235 	pm_runtime_put_autosuspend(&sw->dev);
1236 
1237 put_sw:
1238 	tb_switch_put(sw);
1239 out:
1240 	mutex_unlock(&tb->lock);
1241 
1242 	pm_runtime_mark_last_busy(&tb->dev);
1243 	pm_runtime_put_autosuspend(&tb->dev);
1244 
1245 	kfree(ev);
1246 }
1247 
1248 /*
1249  * tb_schedule_hotplug_handler() - callback function for the control channel
1250  *
1251  * Delegates to tb_handle_hotplug.
1252  */
1253 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1254 			    const void *buf, size_t size)
1255 {
1256 	const struct cfg_event_pkg *pkg = buf;
1257 	u64 route;
1258 
1259 	if (type != TB_CFG_PKG_EVENT) {
1260 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1261 		return;
1262 	}
1263 
1264 	route = tb_cfg_get_route(&pkg->header);
1265 
1266 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1267 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1268 			pkg->port);
1269 	}
1270 
1271 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1272 }
1273 
1274 static void tb_stop(struct tb *tb)
1275 {
1276 	struct tb_cm *tcm = tb_priv(tb);
1277 	struct tb_tunnel *tunnel;
1278 	struct tb_tunnel *n;
1279 
1280 	cancel_delayed_work(&tcm->remove_work);
1281 	/* tunnels are only present after everything has been initialized */
1282 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1283 		/*
1284 		 * DMA tunnels require the driver to be functional so we
1285 		 * tear them down. Other protocol tunnels can be left
1286 		 * intact.
1287 		 */
1288 		if (tb_tunnel_is_dma(tunnel))
1289 			tb_tunnel_deactivate(tunnel);
1290 		tb_tunnel_free(tunnel);
1291 	}
1292 	tb_switch_remove(tb->root_switch);
1293 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1294 }
1295 
1296 static int tb_scan_finalize_switch(struct device *dev, void *data)
1297 {
1298 	if (tb_is_switch(dev)) {
1299 		struct tb_switch *sw = tb_to_switch(dev);
1300 
1301 		/*
1302 		 * If we found that the switch was already setup by the
1303 		 * boot firmware, mark it as authorized now before we
1304 		 * send uevent to userspace.
1305 		 */
1306 		if (sw->boot)
1307 			sw->authorized = 1;
1308 
1309 		dev_set_uevent_suppress(dev, false);
1310 		kobject_uevent(&dev->kobj, KOBJ_ADD);
1311 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 static int tb_start(struct tb *tb)
1318 {
1319 	struct tb_cm *tcm = tb_priv(tb);
1320 	int ret;
1321 
1322 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1323 	if (IS_ERR(tb->root_switch))
1324 		return PTR_ERR(tb->root_switch);
1325 
1326 	/*
1327 	 * ICM firmware upgrade needs running firmware and in native
1328 	 * mode that is not available so disable firmware upgrade of the
1329 	 * root switch.
1330 	 */
1331 	tb->root_switch->no_nvm_upgrade = true;
1332 	/* All USB4 routers support runtime PM */
1333 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1334 
1335 	ret = tb_switch_configure(tb->root_switch);
1336 	if (ret) {
1337 		tb_switch_put(tb->root_switch);
1338 		return ret;
1339 	}
1340 
1341 	/* Announce the switch to the world */
1342 	ret = tb_switch_add(tb->root_switch);
1343 	if (ret) {
1344 		tb_switch_put(tb->root_switch);
1345 		return ret;
1346 	}
1347 
1348 	/* Enable TMU if it is off */
1349 	tb_switch_tmu_enable(tb->root_switch);
1350 	/* Full scan to discover devices added before the driver was loaded. */
1351 	tb_scan_switch(tb->root_switch);
1352 	/* Find out tunnels created by the boot firmware */
1353 	tb_discover_tunnels(tb->root_switch);
1354 	/*
1355 	 * If the boot firmware did not create USB 3.x tunnels create them
1356 	 * now for the whole topology.
1357 	 */
1358 	tb_create_usb3_tunnels(tb->root_switch);
1359 	/* Add DP IN resources for the root switch */
1360 	tb_add_dp_resources(tb->root_switch);
1361 	/* Make the discovered switches available to the userspace */
1362 	device_for_each_child(&tb->root_switch->dev, NULL,
1363 			      tb_scan_finalize_switch);
1364 
1365 	/* Allow tb_handle_hotplug to progress events */
1366 	tcm->hotplug_active = true;
1367 	return 0;
1368 }
1369 
1370 static int tb_suspend_noirq(struct tb *tb)
1371 {
1372 	struct tb_cm *tcm = tb_priv(tb);
1373 
1374 	tb_dbg(tb, "suspending...\n");
1375 	tb_disconnect_and_release_dp(tb);
1376 	tb_switch_suspend(tb->root_switch, false);
1377 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1378 	tb_dbg(tb, "suspend finished\n");
1379 
1380 	return 0;
1381 }
1382 
1383 static void tb_restore_children(struct tb_switch *sw)
1384 {
1385 	struct tb_port *port;
1386 
1387 	/* No need to restore if the router is already unplugged */
1388 	if (sw->is_unplugged)
1389 		return;
1390 
1391 	if (tb_enable_tmu(sw))
1392 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1393 
1394 	tb_switch_for_each_port(sw, port) {
1395 		if (!tb_port_has_remote(port) && !port->xdomain)
1396 			continue;
1397 
1398 		if (port->remote) {
1399 			tb_switch_lane_bonding_enable(port->remote->sw);
1400 			tb_switch_configure_link(port->remote->sw);
1401 
1402 			tb_restore_children(port->remote->sw);
1403 		} else if (port->xdomain) {
1404 			tb_port_configure_xdomain(port);
1405 		}
1406 	}
1407 }
1408 
1409 static int tb_resume_noirq(struct tb *tb)
1410 {
1411 	struct tb_cm *tcm = tb_priv(tb);
1412 	struct tb_tunnel *tunnel, *n;
1413 
1414 	tb_dbg(tb, "resuming...\n");
1415 
1416 	/* remove any pci devices the firmware might have setup */
1417 	tb_switch_reset(tb->root_switch);
1418 
1419 	tb_switch_resume(tb->root_switch);
1420 	tb_free_invalid_tunnels(tb);
1421 	tb_free_unplugged_children(tb->root_switch);
1422 	tb_restore_children(tb->root_switch);
1423 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1424 		tb_tunnel_restart(tunnel);
1425 	if (!list_empty(&tcm->tunnel_list)) {
1426 		/*
1427 		 * the pcie links need some time to get going.
1428 		 * 100ms works for me...
1429 		 */
1430 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1431 		msleep(100);
1432 	}
1433 	 /* Allow tb_handle_hotplug to progress events */
1434 	tcm->hotplug_active = true;
1435 	tb_dbg(tb, "resume finished\n");
1436 
1437 	return 0;
1438 }
1439 
1440 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1441 {
1442 	struct tb_port *port;
1443 	int ret = 0;
1444 
1445 	tb_switch_for_each_port(sw, port) {
1446 		if (tb_is_upstream_port(port))
1447 			continue;
1448 		if (port->xdomain && port->xdomain->is_unplugged) {
1449 			tb_retimer_remove_all(port);
1450 			tb_xdomain_remove(port->xdomain);
1451 			tb_port_unconfigure_xdomain(port);
1452 			port->xdomain = NULL;
1453 			ret++;
1454 		} else if (port->remote) {
1455 			ret += tb_free_unplugged_xdomains(port->remote->sw);
1456 		}
1457 	}
1458 
1459 	return ret;
1460 }
1461 
1462 static int tb_freeze_noirq(struct tb *tb)
1463 {
1464 	struct tb_cm *tcm = tb_priv(tb);
1465 
1466 	tcm->hotplug_active = false;
1467 	return 0;
1468 }
1469 
1470 static int tb_thaw_noirq(struct tb *tb)
1471 {
1472 	struct tb_cm *tcm = tb_priv(tb);
1473 
1474 	tcm->hotplug_active = true;
1475 	return 0;
1476 }
1477 
1478 static void tb_complete(struct tb *tb)
1479 {
1480 	/*
1481 	 * Release any unplugged XDomains and if there is a case where
1482 	 * another domain is swapped in place of unplugged XDomain we
1483 	 * need to run another rescan.
1484 	 */
1485 	mutex_lock(&tb->lock);
1486 	if (tb_free_unplugged_xdomains(tb->root_switch))
1487 		tb_scan_switch(tb->root_switch);
1488 	mutex_unlock(&tb->lock);
1489 }
1490 
1491 static int tb_runtime_suspend(struct tb *tb)
1492 {
1493 	struct tb_cm *tcm = tb_priv(tb);
1494 
1495 	mutex_lock(&tb->lock);
1496 	tb_switch_suspend(tb->root_switch, true);
1497 	tcm->hotplug_active = false;
1498 	mutex_unlock(&tb->lock);
1499 
1500 	return 0;
1501 }
1502 
1503 static void tb_remove_work(struct work_struct *work)
1504 {
1505 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1506 	struct tb *tb = tcm_to_tb(tcm);
1507 
1508 	mutex_lock(&tb->lock);
1509 	if (tb->root_switch) {
1510 		tb_free_unplugged_children(tb->root_switch);
1511 		tb_free_unplugged_xdomains(tb->root_switch);
1512 	}
1513 	mutex_unlock(&tb->lock);
1514 }
1515 
1516 static int tb_runtime_resume(struct tb *tb)
1517 {
1518 	struct tb_cm *tcm = tb_priv(tb);
1519 	struct tb_tunnel *tunnel, *n;
1520 
1521 	mutex_lock(&tb->lock);
1522 	tb_switch_resume(tb->root_switch);
1523 	tb_free_invalid_tunnels(tb);
1524 	tb_restore_children(tb->root_switch);
1525 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1526 		tb_tunnel_restart(tunnel);
1527 	tcm->hotplug_active = true;
1528 	mutex_unlock(&tb->lock);
1529 
1530 	/*
1531 	 * Schedule cleanup of any unplugged devices. Run this in a
1532 	 * separate thread to avoid possible deadlock if the device
1533 	 * removal runtime resumes the unplugged device.
1534 	 */
1535 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1536 	return 0;
1537 }
1538 
1539 static const struct tb_cm_ops tb_cm_ops = {
1540 	.start = tb_start,
1541 	.stop = tb_stop,
1542 	.suspend_noirq = tb_suspend_noirq,
1543 	.resume_noirq = tb_resume_noirq,
1544 	.freeze_noirq = tb_freeze_noirq,
1545 	.thaw_noirq = tb_thaw_noirq,
1546 	.complete = tb_complete,
1547 	.runtime_suspend = tb_runtime_suspend,
1548 	.runtime_resume = tb_runtime_resume,
1549 	.handle_event = tb_handle_event,
1550 	.disapprove_switch = tb_disconnect_pci,
1551 	.approve_switch = tb_tunnel_pci,
1552 	.approve_xdomain_paths = tb_approve_xdomain_paths,
1553 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1554 };
1555 
1556 struct tb *tb_probe(struct tb_nhi *nhi)
1557 {
1558 	struct tb_cm *tcm;
1559 	struct tb *tb;
1560 
1561 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
1562 	if (!tb)
1563 		return NULL;
1564 
1565 	if (tb_acpi_may_tunnel_pcie())
1566 		tb->security_level = TB_SECURITY_USER;
1567 	else
1568 		tb->security_level = TB_SECURITY_NOPCIE;
1569 
1570 	tb->cm_ops = &tb_cm_ops;
1571 
1572 	tcm = tb_priv(tb);
1573 	INIT_LIST_HEAD(&tcm->tunnel_list);
1574 	INIT_LIST_HEAD(&tcm->dp_resources);
1575 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1576 
1577 	tb_dbg(tb, "using software connection manager\n");
1578 
1579 	return tb;
1580 }
1581