xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision 6d425d7c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14 
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18 
19 #define TB_TIMEOUT	100 /* ms */
20 
21 /**
22  * struct tb_cm - Simple Thunderbolt connection manager
23  * @tunnel_list: List of active tunnels
24  * @dp_resources: List of available DP resources for DP tunneling
25  * @hotplug_active: tb_handle_hotplug will stop progressing plug
26  *		    events and exit if this is not set (it needs to
27  *		    acquire the lock one more time). Used to drain wq
28  *		    after cfg has been paused.
29  * @remove_work: Work used to remove any unplugged routers after
30  *		 runtime resume
31  */
32 struct tb_cm {
33 	struct list_head tunnel_list;
34 	struct list_head dp_resources;
35 	bool hotplug_active;
36 	struct delayed_work remove_work;
37 };
38 
39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
40 {
41 	return ((void *)tcm - sizeof(struct tb));
42 }
43 
44 struct tb_hotplug_event {
45 	struct work_struct work;
46 	struct tb *tb;
47 	u64 route;
48 	u8 port;
49 	bool unplug;
50 };
51 
52 static void tb_handle_hotplug(struct work_struct *work);
53 
54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
55 {
56 	struct tb_hotplug_event *ev;
57 
58 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
59 	if (!ev)
60 		return;
61 
62 	ev->tb = tb;
63 	ev->route = route;
64 	ev->port = port;
65 	ev->unplug = unplug;
66 	INIT_WORK(&ev->work, tb_handle_hotplug);
67 	queue_work(tb->wq, &ev->work);
68 }
69 
70 /* enumeration & hot plug handling */
71 
72 static void tb_add_dp_resources(struct tb_switch *sw)
73 {
74 	struct tb_cm *tcm = tb_priv(sw->tb);
75 	struct tb_port *port;
76 
77 	tb_switch_for_each_port(sw, port) {
78 		if (!tb_port_is_dpin(port))
79 			continue;
80 
81 		if (!tb_switch_query_dp_resource(sw, port))
82 			continue;
83 
84 		list_add_tail(&port->list, &tcm->dp_resources);
85 		tb_port_dbg(port, "DP IN resource available\n");
86 	}
87 }
88 
89 static void tb_remove_dp_resources(struct tb_switch *sw)
90 {
91 	struct tb_cm *tcm = tb_priv(sw->tb);
92 	struct tb_port *port, *tmp;
93 
94 	/* Clear children resources first */
95 	tb_switch_for_each_port(sw, port) {
96 		if (tb_port_has_remote(port))
97 			tb_remove_dp_resources(port->remote->sw);
98 	}
99 
100 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
101 		if (port->sw == sw) {
102 			tb_port_dbg(port, "DP OUT resource unavailable\n");
103 			list_del_init(&port->list);
104 		}
105 	}
106 }
107 
108 static void tb_discover_tunnels(struct tb_switch *sw)
109 {
110 	struct tb *tb = sw->tb;
111 	struct tb_cm *tcm = tb_priv(tb);
112 	struct tb_port *port;
113 
114 	tb_switch_for_each_port(sw, port) {
115 		struct tb_tunnel *tunnel = NULL;
116 
117 		switch (port->config.type) {
118 		case TB_TYPE_DP_HDMI_IN:
119 			tunnel = tb_tunnel_discover_dp(tb, port);
120 			break;
121 
122 		case TB_TYPE_PCIE_DOWN:
123 			tunnel = tb_tunnel_discover_pci(tb, port);
124 			break;
125 
126 		case TB_TYPE_USB3_DOWN:
127 			tunnel = tb_tunnel_discover_usb3(tb, port);
128 			break;
129 
130 		default:
131 			break;
132 		}
133 
134 		if (!tunnel)
135 			continue;
136 
137 		if (tb_tunnel_is_pci(tunnel)) {
138 			struct tb_switch *parent = tunnel->dst_port->sw;
139 
140 			while (parent != tunnel->src_port->sw) {
141 				parent->boot = true;
142 				parent = tb_switch_parent(parent);
143 			}
144 		} else if (tb_tunnel_is_dp(tunnel)) {
145 			/* Keep the domain from powering down */
146 			pm_runtime_get_sync(&tunnel->src_port->sw->dev);
147 			pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
148 		}
149 
150 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
151 	}
152 
153 	tb_switch_for_each_port(sw, port) {
154 		if (tb_port_has_remote(port))
155 			tb_discover_tunnels(port->remote->sw);
156 	}
157 }
158 
159 static int tb_port_configure_xdomain(struct tb_port *port)
160 {
161 	/*
162 	 * XDomain paths currently only support single lane so we must
163 	 * disable the other lane according to USB4 spec.
164 	 */
165 	tb_port_disable(port->dual_link_port);
166 
167 	if (tb_switch_is_usb4(port->sw))
168 		return usb4_port_configure_xdomain(port);
169 	return tb_lc_configure_xdomain(port);
170 }
171 
172 static void tb_port_unconfigure_xdomain(struct tb_port *port)
173 {
174 	if (tb_switch_is_usb4(port->sw))
175 		usb4_port_unconfigure_xdomain(port);
176 	else
177 		tb_lc_unconfigure_xdomain(port);
178 
179 	tb_port_enable(port->dual_link_port);
180 }
181 
182 static void tb_scan_xdomain(struct tb_port *port)
183 {
184 	struct tb_switch *sw = port->sw;
185 	struct tb *tb = sw->tb;
186 	struct tb_xdomain *xd;
187 	u64 route;
188 
189 	if (!tb_is_xdomain_enabled())
190 		return;
191 
192 	route = tb_downstream_route(port);
193 	xd = tb_xdomain_find_by_route(tb, route);
194 	if (xd) {
195 		tb_xdomain_put(xd);
196 		return;
197 	}
198 
199 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
200 			      NULL);
201 	if (xd) {
202 		tb_port_at(route, sw)->xdomain = xd;
203 		tb_port_configure_xdomain(port);
204 		tb_xdomain_add(xd);
205 	}
206 }
207 
208 static int tb_enable_tmu(struct tb_switch *sw)
209 {
210 	int ret;
211 
212 	/* If it is already enabled in correct mode, don't touch it */
213 	if (tb_switch_tmu_is_enabled(sw))
214 		return 0;
215 
216 	ret = tb_switch_tmu_disable(sw);
217 	if (ret)
218 		return ret;
219 
220 	ret = tb_switch_tmu_post_time(sw);
221 	if (ret)
222 		return ret;
223 
224 	return tb_switch_tmu_enable(sw);
225 }
226 
227 /**
228  * tb_find_unused_port() - return the first inactive port on @sw
229  * @sw: Switch to find the port on
230  * @type: Port type to look for
231  */
232 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
233 					   enum tb_port_type type)
234 {
235 	struct tb_port *port;
236 
237 	tb_switch_for_each_port(sw, port) {
238 		if (tb_is_upstream_port(port))
239 			continue;
240 		if (port->config.type != type)
241 			continue;
242 		if (!port->cap_adap)
243 			continue;
244 		if (tb_port_is_enabled(port))
245 			continue;
246 		return port;
247 	}
248 	return NULL;
249 }
250 
251 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
252 					 const struct tb_port *port)
253 {
254 	struct tb_port *down;
255 
256 	down = usb4_switch_map_usb3_down(sw, port);
257 	if (down && !tb_usb3_port_is_enabled(down))
258 		return down;
259 	return NULL;
260 }
261 
262 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
263 					struct tb_port *src_port,
264 					struct tb_port *dst_port)
265 {
266 	struct tb_cm *tcm = tb_priv(tb);
267 	struct tb_tunnel *tunnel;
268 
269 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
270 		if (tunnel->type == type &&
271 		    ((src_port && src_port == tunnel->src_port) ||
272 		     (dst_port && dst_port == tunnel->dst_port))) {
273 			return tunnel;
274 		}
275 	}
276 
277 	return NULL;
278 }
279 
280 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
281 						   struct tb_port *src_port,
282 						   struct tb_port *dst_port)
283 {
284 	struct tb_port *port, *usb3_down;
285 	struct tb_switch *sw;
286 
287 	/* Pick the router that is deepest in the topology */
288 	if (dst_port->sw->config.depth > src_port->sw->config.depth)
289 		sw = dst_port->sw;
290 	else
291 		sw = src_port->sw;
292 
293 	/* Can't be the host router */
294 	if (sw == tb->root_switch)
295 		return NULL;
296 
297 	/* Find the downstream USB4 port that leads to this router */
298 	port = tb_port_at(tb_route(sw), tb->root_switch);
299 	/* Find the corresponding host router USB3 downstream port */
300 	usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
301 	if (!usb3_down)
302 		return NULL;
303 
304 	return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
305 }
306 
307 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
308 	struct tb_port *dst_port, int *available_up, int *available_down)
309 {
310 	int usb3_consumed_up, usb3_consumed_down, ret;
311 	struct tb_cm *tcm = tb_priv(tb);
312 	struct tb_tunnel *tunnel;
313 	struct tb_port *port;
314 
315 	tb_port_dbg(dst_port, "calculating available bandwidth\n");
316 
317 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
318 	if (tunnel) {
319 		ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
320 						   &usb3_consumed_down);
321 		if (ret)
322 			return ret;
323 	} else {
324 		usb3_consumed_up = 0;
325 		usb3_consumed_down = 0;
326 	}
327 
328 	*available_up = *available_down = 40000;
329 
330 	/* Find the minimum available bandwidth over all links */
331 	tb_for_each_port_on_path(src_port, dst_port, port) {
332 		int link_speed, link_width, up_bw, down_bw;
333 
334 		if (!tb_port_is_null(port))
335 			continue;
336 
337 		if (tb_is_upstream_port(port)) {
338 			link_speed = port->sw->link_speed;
339 		} else {
340 			link_speed = tb_port_get_link_speed(port);
341 			if (link_speed < 0)
342 				return link_speed;
343 		}
344 
345 		link_width = port->bonded ? 2 : 1;
346 
347 		up_bw = link_speed * link_width * 1000; /* Mb/s */
348 		/* Leave 10% guard band */
349 		up_bw -= up_bw / 10;
350 		down_bw = up_bw;
351 
352 		tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
353 
354 		/*
355 		 * Find all DP tunnels that cross the port and reduce
356 		 * their consumed bandwidth from the available.
357 		 */
358 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
359 			int dp_consumed_up, dp_consumed_down;
360 
361 			if (!tb_tunnel_is_dp(tunnel))
362 				continue;
363 
364 			if (!tb_tunnel_port_on_path(tunnel, port))
365 				continue;
366 
367 			ret = tb_tunnel_consumed_bandwidth(tunnel,
368 							   &dp_consumed_up,
369 							   &dp_consumed_down);
370 			if (ret)
371 				return ret;
372 
373 			up_bw -= dp_consumed_up;
374 			down_bw -= dp_consumed_down;
375 		}
376 
377 		/*
378 		 * If USB3 is tunneled from the host router down to the
379 		 * branch leading to port we need to take USB3 consumed
380 		 * bandwidth into account regardless whether it actually
381 		 * crosses the port.
382 		 */
383 		up_bw -= usb3_consumed_up;
384 		down_bw -= usb3_consumed_down;
385 
386 		if (up_bw < *available_up)
387 			*available_up = up_bw;
388 		if (down_bw < *available_down)
389 			*available_down = down_bw;
390 	}
391 
392 	if (*available_up < 0)
393 		*available_up = 0;
394 	if (*available_down < 0)
395 		*available_down = 0;
396 
397 	return 0;
398 }
399 
400 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
401 					    struct tb_port *src_port,
402 					    struct tb_port *dst_port)
403 {
404 	struct tb_tunnel *tunnel;
405 
406 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
407 	return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
408 }
409 
410 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
411 				      struct tb_port *dst_port)
412 {
413 	int ret, available_up, available_down;
414 	struct tb_tunnel *tunnel;
415 
416 	tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
417 	if (!tunnel)
418 		return;
419 
420 	tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
421 
422 	/*
423 	 * Calculate available bandwidth for the first hop USB3 tunnel.
424 	 * That determines the whole USB3 bandwidth for this branch.
425 	 */
426 	ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
427 				     &available_up, &available_down);
428 	if (ret) {
429 		tb_warn(tb, "failed to calculate available bandwidth\n");
430 		return;
431 	}
432 
433 	tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
434 	       available_up, available_down);
435 
436 	tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
437 }
438 
439 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
440 {
441 	struct tb_switch *parent = tb_switch_parent(sw);
442 	int ret, available_up, available_down;
443 	struct tb_port *up, *down, *port;
444 	struct tb_cm *tcm = tb_priv(tb);
445 	struct tb_tunnel *tunnel;
446 
447 	if (!tb_acpi_may_tunnel_usb3()) {
448 		tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
449 		return 0;
450 	}
451 
452 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
453 	if (!up)
454 		return 0;
455 
456 	if (!sw->link_usb4)
457 		return 0;
458 
459 	/*
460 	 * Look up available down port. Since we are chaining it should
461 	 * be found right above this switch.
462 	 */
463 	port = tb_port_at(tb_route(sw), parent);
464 	down = tb_find_usb3_down(parent, port);
465 	if (!down)
466 		return 0;
467 
468 	if (tb_route(parent)) {
469 		struct tb_port *parent_up;
470 		/*
471 		 * Check first that the parent switch has its upstream USB3
472 		 * port enabled. Otherwise the chain is not complete and
473 		 * there is no point setting up a new tunnel.
474 		 */
475 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
476 		if (!parent_up || !tb_port_is_enabled(parent_up))
477 			return 0;
478 
479 		/* Make all unused bandwidth available for the new tunnel */
480 		ret = tb_release_unused_usb3_bandwidth(tb, down, up);
481 		if (ret)
482 			return ret;
483 	}
484 
485 	ret = tb_available_bandwidth(tb, down, up, &available_up,
486 				     &available_down);
487 	if (ret)
488 		goto err_reclaim;
489 
490 	tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
491 		    available_up, available_down);
492 
493 	tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
494 				      available_down);
495 	if (!tunnel) {
496 		ret = -ENOMEM;
497 		goto err_reclaim;
498 	}
499 
500 	if (tb_tunnel_activate(tunnel)) {
501 		tb_port_info(up,
502 			     "USB3 tunnel activation failed, aborting\n");
503 		ret = -EIO;
504 		goto err_free;
505 	}
506 
507 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
508 	if (tb_route(parent))
509 		tb_reclaim_usb3_bandwidth(tb, down, up);
510 
511 	return 0;
512 
513 err_free:
514 	tb_tunnel_free(tunnel);
515 err_reclaim:
516 	if (tb_route(parent))
517 		tb_reclaim_usb3_bandwidth(tb, down, up);
518 
519 	return ret;
520 }
521 
522 static int tb_create_usb3_tunnels(struct tb_switch *sw)
523 {
524 	struct tb_port *port;
525 	int ret;
526 
527 	if (!tb_acpi_may_tunnel_usb3())
528 		return 0;
529 
530 	if (tb_route(sw)) {
531 		ret = tb_tunnel_usb3(sw->tb, sw);
532 		if (ret)
533 			return ret;
534 	}
535 
536 	tb_switch_for_each_port(sw, port) {
537 		if (!tb_port_has_remote(port))
538 			continue;
539 		ret = tb_create_usb3_tunnels(port->remote->sw);
540 		if (ret)
541 			return ret;
542 	}
543 
544 	return 0;
545 }
546 
547 static void tb_scan_port(struct tb_port *port);
548 
549 /*
550  * tb_scan_switch() - scan for and initialize downstream switches
551  */
552 static void tb_scan_switch(struct tb_switch *sw)
553 {
554 	struct tb_port *port;
555 
556 	pm_runtime_get_sync(&sw->dev);
557 
558 	tb_switch_for_each_port(sw, port)
559 		tb_scan_port(port);
560 
561 	pm_runtime_mark_last_busy(&sw->dev);
562 	pm_runtime_put_autosuspend(&sw->dev);
563 }
564 
565 /*
566  * tb_scan_port() - check for and initialize switches below port
567  */
568 static void tb_scan_port(struct tb_port *port)
569 {
570 	struct tb_cm *tcm = tb_priv(port->sw->tb);
571 	struct tb_port *upstream_port;
572 	struct tb_switch *sw;
573 
574 	if (tb_is_upstream_port(port))
575 		return;
576 
577 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
578 	    !tb_dp_port_is_enabled(port)) {
579 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
580 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
581 				 false);
582 		return;
583 	}
584 
585 	if (port->config.type != TB_TYPE_PORT)
586 		return;
587 	if (port->dual_link_port && port->link_nr)
588 		return; /*
589 			 * Downstream switch is reachable through two ports.
590 			 * Only scan on the primary port (link_nr == 0).
591 			 */
592 	if (tb_wait_for_port(port, false) <= 0)
593 		return;
594 	if (port->remote) {
595 		tb_port_dbg(port, "port already has a remote\n");
596 		return;
597 	}
598 
599 	tb_retimer_scan(port, true);
600 
601 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
602 			     tb_downstream_route(port));
603 	if (IS_ERR(sw)) {
604 		/*
605 		 * If there is an error accessing the connected switch
606 		 * it may be connected to another domain. Also we allow
607 		 * the other domain to be connected to a max depth switch.
608 		 */
609 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
610 			tb_scan_xdomain(port);
611 		return;
612 	}
613 
614 	if (tb_switch_configure(sw)) {
615 		tb_switch_put(sw);
616 		return;
617 	}
618 
619 	/*
620 	 * If there was previously another domain connected remove it
621 	 * first.
622 	 */
623 	if (port->xdomain) {
624 		tb_xdomain_remove(port->xdomain);
625 		tb_port_unconfigure_xdomain(port);
626 		port->xdomain = NULL;
627 	}
628 
629 	/*
630 	 * Do not send uevents until we have discovered all existing
631 	 * tunnels and know which switches were authorized already by
632 	 * the boot firmware.
633 	 */
634 	if (!tcm->hotplug_active)
635 		dev_set_uevent_suppress(&sw->dev, true);
636 
637 	/*
638 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
639 	 * can support runtime PM.
640 	 */
641 	sw->rpm = sw->generation > 1;
642 
643 	if (tb_switch_add(sw)) {
644 		tb_switch_put(sw);
645 		return;
646 	}
647 
648 	/* Link the switches using both links if available */
649 	upstream_port = tb_upstream_port(sw);
650 	port->remote = upstream_port;
651 	upstream_port->remote = port;
652 	if (port->dual_link_port && upstream_port->dual_link_port) {
653 		port->dual_link_port->remote = upstream_port->dual_link_port;
654 		upstream_port->dual_link_port->remote = port->dual_link_port;
655 	}
656 
657 	/* Enable lane bonding if supported */
658 	tb_switch_lane_bonding_enable(sw);
659 	/* Set the link configured */
660 	tb_switch_configure_link(sw);
661 
662 	if (tb_enable_tmu(sw))
663 		tb_sw_warn(sw, "failed to enable TMU\n");
664 
665 	/* Scan upstream retimers */
666 	tb_retimer_scan(upstream_port, true);
667 
668 	/*
669 	 * Create USB 3.x tunnels only when the switch is plugged to the
670 	 * domain. This is because we scan the domain also during discovery
671 	 * and want to discover existing USB 3.x tunnels before we create
672 	 * any new.
673 	 */
674 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
675 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
676 
677 	tb_add_dp_resources(sw);
678 	tb_scan_switch(sw);
679 }
680 
681 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
682 {
683 	struct tb_port *src_port, *dst_port;
684 	struct tb *tb;
685 
686 	if (!tunnel)
687 		return;
688 
689 	tb_tunnel_deactivate(tunnel);
690 	list_del(&tunnel->list);
691 
692 	tb = tunnel->tb;
693 	src_port = tunnel->src_port;
694 	dst_port = tunnel->dst_port;
695 
696 	switch (tunnel->type) {
697 	case TB_TUNNEL_DP:
698 		/*
699 		 * In case of DP tunnel make sure the DP IN resource is
700 		 * deallocated properly.
701 		 */
702 		tb_switch_dealloc_dp_resource(src_port->sw, src_port);
703 		/* Now we can allow the domain to runtime suspend again */
704 		pm_runtime_mark_last_busy(&dst_port->sw->dev);
705 		pm_runtime_put_autosuspend(&dst_port->sw->dev);
706 		pm_runtime_mark_last_busy(&src_port->sw->dev);
707 		pm_runtime_put_autosuspend(&src_port->sw->dev);
708 		fallthrough;
709 
710 	case TB_TUNNEL_USB3:
711 		tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
712 		break;
713 
714 	default:
715 		/*
716 		 * PCIe and DMA tunnels do not consume guaranteed
717 		 * bandwidth.
718 		 */
719 		break;
720 	}
721 
722 	tb_tunnel_free(tunnel);
723 }
724 
725 /*
726  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
727  */
728 static void tb_free_invalid_tunnels(struct tb *tb)
729 {
730 	struct tb_cm *tcm = tb_priv(tb);
731 	struct tb_tunnel *tunnel;
732 	struct tb_tunnel *n;
733 
734 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
735 		if (tb_tunnel_is_invalid(tunnel))
736 			tb_deactivate_and_free_tunnel(tunnel);
737 	}
738 }
739 
740 /*
741  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
742  */
743 static void tb_free_unplugged_children(struct tb_switch *sw)
744 {
745 	struct tb_port *port;
746 
747 	tb_switch_for_each_port(sw, port) {
748 		if (!tb_port_has_remote(port))
749 			continue;
750 
751 		if (port->remote->sw->is_unplugged) {
752 			tb_retimer_remove_all(port);
753 			tb_remove_dp_resources(port->remote->sw);
754 			tb_switch_unconfigure_link(port->remote->sw);
755 			tb_switch_lane_bonding_disable(port->remote->sw);
756 			tb_switch_remove(port->remote->sw);
757 			port->remote = NULL;
758 			if (port->dual_link_port)
759 				port->dual_link_port->remote = NULL;
760 		} else {
761 			tb_free_unplugged_children(port->remote->sw);
762 		}
763 	}
764 }
765 
766 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
767 					 const struct tb_port *port)
768 {
769 	struct tb_port *down = NULL;
770 
771 	/*
772 	 * To keep plugging devices consistently in the same PCIe
773 	 * hierarchy, do mapping here for switch downstream PCIe ports.
774 	 */
775 	if (tb_switch_is_usb4(sw)) {
776 		down = usb4_switch_map_pcie_down(sw, port);
777 	} else if (!tb_route(sw)) {
778 		int phy_port = tb_phy_port_from_link(port->port);
779 		int index;
780 
781 		/*
782 		 * Hard-coded Thunderbolt port to PCIe down port mapping
783 		 * per controller.
784 		 */
785 		if (tb_switch_is_cactus_ridge(sw) ||
786 		    tb_switch_is_alpine_ridge(sw))
787 			index = !phy_port ? 6 : 7;
788 		else if (tb_switch_is_falcon_ridge(sw))
789 			index = !phy_port ? 6 : 8;
790 		else if (tb_switch_is_titan_ridge(sw))
791 			index = !phy_port ? 8 : 9;
792 		else
793 			goto out;
794 
795 		/* Validate the hard-coding */
796 		if (WARN_ON(index > sw->config.max_port_number))
797 			goto out;
798 
799 		down = &sw->ports[index];
800 	}
801 
802 	if (down) {
803 		if (WARN_ON(!tb_port_is_pcie_down(down)))
804 			goto out;
805 		if (tb_pci_port_is_enabled(down))
806 			goto out;
807 
808 		return down;
809 	}
810 
811 out:
812 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
813 }
814 
815 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
816 {
817 	struct tb_port *host_port, *port;
818 	struct tb_cm *tcm = tb_priv(tb);
819 
820 	host_port = tb_route(in->sw) ?
821 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
822 
823 	list_for_each_entry(port, &tcm->dp_resources, list) {
824 		if (!tb_port_is_dpout(port))
825 			continue;
826 
827 		if (tb_port_is_enabled(port)) {
828 			tb_port_dbg(port, "in use\n");
829 			continue;
830 		}
831 
832 		tb_port_dbg(port, "DP OUT available\n");
833 
834 		/*
835 		 * Keep the DP tunnel under the topology starting from
836 		 * the same host router downstream port.
837 		 */
838 		if (host_port && tb_route(port->sw)) {
839 			struct tb_port *p;
840 
841 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
842 			if (p != host_port)
843 				continue;
844 		}
845 
846 		return port;
847 	}
848 
849 	return NULL;
850 }
851 
852 static void tb_tunnel_dp(struct tb *tb)
853 {
854 	int available_up, available_down, ret;
855 	struct tb_cm *tcm = tb_priv(tb);
856 	struct tb_port *port, *in, *out;
857 	struct tb_tunnel *tunnel;
858 
859 	if (!tb_acpi_may_tunnel_dp()) {
860 		tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
861 		return;
862 	}
863 
864 	/*
865 	 * Find pair of inactive DP IN and DP OUT adapters and then
866 	 * establish a DP tunnel between them.
867 	 */
868 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
869 
870 	in = NULL;
871 	out = NULL;
872 	list_for_each_entry(port, &tcm->dp_resources, list) {
873 		if (!tb_port_is_dpin(port))
874 			continue;
875 
876 		if (tb_port_is_enabled(port)) {
877 			tb_port_dbg(port, "in use\n");
878 			continue;
879 		}
880 
881 		tb_port_dbg(port, "DP IN available\n");
882 
883 		out = tb_find_dp_out(tb, port);
884 		if (out) {
885 			in = port;
886 			break;
887 		}
888 	}
889 
890 	if (!in) {
891 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
892 		return;
893 	}
894 	if (!out) {
895 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
896 		return;
897 	}
898 
899 	/*
900 	 * DP stream needs the domain to be active so runtime resume
901 	 * both ends of the tunnel.
902 	 *
903 	 * This should bring the routers in the middle active as well
904 	 * and keeps the domain from runtime suspending while the DP
905 	 * tunnel is active.
906 	 */
907 	pm_runtime_get_sync(&in->sw->dev);
908 	pm_runtime_get_sync(&out->sw->dev);
909 
910 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
911 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
912 		goto err_rpm_put;
913 	}
914 
915 	/* Make all unused USB3 bandwidth available for the new DP tunnel */
916 	ret = tb_release_unused_usb3_bandwidth(tb, in, out);
917 	if (ret) {
918 		tb_warn(tb, "failed to release unused bandwidth\n");
919 		goto err_dealloc_dp;
920 	}
921 
922 	ret = tb_available_bandwidth(tb, in, out, &available_up,
923 				     &available_down);
924 	if (ret)
925 		goto err_reclaim;
926 
927 	tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
928 	       available_up, available_down);
929 
930 	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
931 	if (!tunnel) {
932 		tb_port_dbg(out, "could not allocate DP tunnel\n");
933 		goto err_reclaim;
934 	}
935 
936 	if (tb_tunnel_activate(tunnel)) {
937 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
938 		goto err_free;
939 	}
940 
941 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
942 	tb_reclaim_usb3_bandwidth(tb, in, out);
943 	return;
944 
945 err_free:
946 	tb_tunnel_free(tunnel);
947 err_reclaim:
948 	tb_reclaim_usb3_bandwidth(tb, in, out);
949 err_dealloc_dp:
950 	tb_switch_dealloc_dp_resource(in->sw, in);
951 err_rpm_put:
952 	pm_runtime_mark_last_busy(&out->sw->dev);
953 	pm_runtime_put_autosuspend(&out->sw->dev);
954 	pm_runtime_mark_last_busy(&in->sw->dev);
955 	pm_runtime_put_autosuspend(&in->sw->dev);
956 }
957 
958 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
959 {
960 	struct tb_port *in, *out;
961 	struct tb_tunnel *tunnel;
962 
963 	if (tb_port_is_dpin(port)) {
964 		tb_port_dbg(port, "DP IN resource unavailable\n");
965 		in = port;
966 		out = NULL;
967 	} else {
968 		tb_port_dbg(port, "DP OUT resource unavailable\n");
969 		in = NULL;
970 		out = port;
971 	}
972 
973 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
974 	tb_deactivate_and_free_tunnel(tunnel);
975 	list_del_init(&port->list);
976 
977 	/*
978 	 * See if there is another DP OUT port that can be used for
979 	 * to create another tunnel.
980 	 */
981 	tb_tunnel_dp(tb);
982 }
983 
984 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
985 {
986 	struct tb_cm *tcm = tb_priv(tb);
987 	struct tb_port *p;
988 
989 	if (tb_port_is_enabled(port))
990 		return;
991 
992 	list_for_each_entry(p, &tcm->dp_resources, list) {
993 		if (p == port)
994 			return;
995 	}
996 
997 	tb_port_dbg(port, "DP %s resource available\n",
998 		    tb_port_is_dpin(port) ? "IN" : "OUT");
999 	list_add_tail(&port->list, &tcm->dp_resources);
1000 
1001 	/* Look for suitable DP IN <-> DP OUT pairs now */
1002 	tb_tunnel_dp(tb);
1003 }
1004 
1005 static void tb_disconnect_and_release_dp(struct tb *tb)
1006 {
1007 	struct tb_cm *tcm = tb_priv(tb);
1008 	struct tb_tunnel *tunnel, *n;
1009 
1010 	/*
1011 	 * Tear down all DP tunnels and release their resources. They
1012 	 * will be re-established after resume based on plug events.
1013 	 */
1014 	list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1015 		if (tb_tunnel_is_dp(tunnel))
1016 			tb_deactivate_and_free_tunnel(tunnel);
1017 	}
1018 
1019 	while (!list_empty(&tcm->dp_resources)) {
1020 		struct tb_port *port;
1021 
1022 		port = list_first_entry(&tcm->dp_resources,
1023 					struct tb_port, list);
1024 		list_del_init(&port->list);
1025 	}
1026 }
1027 
1028 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1029 {
1030 	struct tb_tunnel *tunnel;
1031 	struct tb_port *up;
1032 
1033 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1034 	if (WARN_ON(!up))
1035 		return -ENODEV;
1036 
1037 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1038 	if (WARN_ON(!tunnel))
1039 		return -ENODEV;
1040 
1041 	tb_tunnel_deactivate(tunnel);
1042 	list_del(&tunnel->list);
1043 	tb_tunnel_free(tunnel);
1044 	return 0;
1045 }
1046 
1047 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1048 {
1049 	struct tb_port *up, *down, *port;
1050 	struct tb_cm *tcm = tb_priv(tb);
1051 	struct tb_switch *parent_sw;
1052 	struct tb_tunnel *tunnel;
1053 
1054 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1055 	if (!up)
1056 		return 0;
1057 
1058 	/*
1059 	 * Look up available down port. Since we are chaining it should
1060 	 * be found right above this switch.
1061 	 */
1062 	parent_sw = tb_to_switch(sw->dev.parent);
1063 	port = tb_port_at(tb_route(sw), parent_sw);
1064 	down = tb_find_pcie_down(parent_sw, port);
1065 	if (!down)
1066 		return 0;
1067 
1068 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
1069 	if (!tunnel)
1070 		return -ENOMEM;
1071 
1072 	if (tb_tunnel_activate(tunnel)) {
1073 		tb_port_info(up,
1074 			     "PCIe tunnel activation failed, aborting\n");
1075 		tb_tunnel_free(tunnel);
1076 		return -EIO;
1077 	}
1078 
1079 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
1080 	return 0;
1081 }
1082 
1083 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1084 				    int transmit_path, int transmit_ring,
1085 				    int receive_path, int receive_ring)
1086 {
1087 	struct tb_cm *tcm = tb_priv(tb);
1088 	struct tb_port *nhi_port, *dst_port;
1089 	struct tb_tunnel *tunnel;
1090 	struct tb_switch *sw;
1091 
1092 	sw = tb_to_switch(xd->dev.parent);
1093 	dst_port = tb_port_at(xd->route, sw);
1094 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1095 
1096 	mutex_lock(&tb->lock);
1097 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1098 				     transmit_ring, receive_path, receive_ring);
1099 	if (!tunnel) {
1100 		mutex_unlock(&tb->lock);
1101 		return -ENOMEM;
1102 	}
1103 
1104 	if (tb_tunnel_activate(tunnel)) {
1105 		tb_port_info(nhi_port,
1106 			     "DMA tunnel activation failed, aborting\n");
1107 		tb_tunnel_free(tunnel);
1108 		mutex_unlock(&tb->lock);
1109 		return -EIO;
1110 	}
1111 
1112 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
1113 	mutex_unlock(&tb->lock);
1114 	return 0;
1115 }
1116 
1117 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1118 					  int transmit_path, int transmit_ring,
1119 					  int receive_path, int receive_ring)
1120 {
1121 	struct tb_cm *tcm = tb_priv(tb);
1122 	struct tb_port *nhi_port, *dst_port;
1123 	struct tb_tunnel *tunnel, *n;
1124 	struct tb_switch *sw;
1125 
1126 	sw = tb_to_switch(xd->dev.parent);
1127 	dst_port = tb_port_at(xd->route, sw);
1128 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1129 
1130 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1131 		if (!tb_tunnel_is_dma(tunnel))
1132 			continue;
1133 		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1134 			continue;
1135 
1136 		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1137 					receive_path, receive_ring))
1138 			tb_deactivate_and_free_tunnel(tunnel);
1139 	}
1140 }
1141 
1142 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1143 				       int transmit_path, int transmit_ring,
1144 				       int receive_path, int receive_ring)
1145 {
1146 	if (!xd->is_unplugged) {
1147 		mutex_lock(&tb->lock);
1148 		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1149 					      transmit_ring, receive_path,
1150 					      receive_ring);
1151 		mutex_unlock(&tb->lock);
1152 	}
1153 	return 0;
1154 }
1155 
1156 /* hotplug handling */
1157 
1158 /*
1159  * tb_handle_hotplug() - handle hotplug event
1160  *
1161  * Executes on tb->wq.
1162  */
1163 static void tb_handle_hotplug(struct work_struct *work)
1164 {
1165 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1166 	struct tb *tb = ev->tb;
1167 	struct tb_cm *tcm = tb_priv(tb);
1168 	struct tb_switch *sw;
1169 	struct tb_port *port;
1170 
1171 	/* Bring the domain back from sleep if it was suspended */
1172 	pm_runtime_get_sync(&tb->dev);
1173 
1174 	mutex_lock(&tb->lock);
1175 	if (!tcm->hotplug_active)
1176 		goto out; /* during init, suspend or shutdown */
1177 
1178 	sw = tb_switch_find_by_route(tb, ev->route);
1179 	if (!sw) {
1180 		tb_warn(tb,
1181 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1182 			ev->route, ev->port, ev->unplug);
1183 		goto out;
1184 	}
1185 	if (ev->port > sw->config.max_port_number) {
1186 		tb_warn(tb,
1187 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
1188 			ev->route, ev->port, ev->unplug);
1189 		goto put_sw;
1190 	}
1191 	port = &sw->ports[ev->port];
1192 	if (tb_is_upstream_port(port)) {
1193 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1194 		       ev->route, ev->port, ev->unplug);
1195 		goto put_sw;
1196 	}
1197 
1198 	pm_runtime_get_sync(&sw->dev);
1199 
1200 	if (ev->unplug) {
1201 		tb_retimer_remove_all(port);
1202 
1203 		if (tb_port_has_remote(port)) {
1204 			tb_port_dbg(port, "switch unplugged\n");
1205 			tb_sw_set_unplugged(port->remote->sw);
1206 			tb_free_invalid_tunnels(tb);
1207 			tb_remove_dp_resources(port->remote->sw);
1208 			tb_switch_tmu_disable(port->remote->sw);
1209 			tb_switch_unconfigure_link(port->remote->sw);
1210 			tb_switch_lane_bonding_disable(port->remote->sw);
1211 			tb_switch_remove(port->remote->sw);
1212 			port->remote = NULL;
1213 			if (port->dual_link_port)
1214 				port->dual_link_port->remote = NULL;
1215 			/* Maybe we can create another DP tunnel */
1216 			tb_tunnel_dp(tb);
1217 		} else if (port->xdomain) {
1218 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1219 
1220 			tb_port_dbg(port, "xdomain unplugged\n");
1221 			/*
1222 			 * Service drivers are unbound during
1223 			 * tb_xdomain_remove() so setting XDomain as
1224 			 * unplugged here prevents deadlock if they call
1225 			 * tb_xdomain_disable_paths(). We will tear down
1226 			 * all the tunnels below.
1227 			 */
1228 			xd->is_unplugged = true;
1229 			tb_xdomain_remove(xd);
1230 			port->xdomain = NULL;
1231 			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1232 			tb_xdomain_put(xd);
1233 			tb_port_unconfigure_xdomain(port);
1234 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1235 			tb_dp_resource_unavailable(tb, port);
1236 		} else {
1237 			tb_port_dbg(port,
1238 				   "got unplug event for disconnected port, ignoring\n");
1239 		}
1240 	} else if (port->remote) {
1241 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1242 	} else {
1243 		if (tb_port_is_null(port)) {
1244 			tb_port_dbg(port, "hotplug: scanning\n");
1245 			tb_scan_port(port);
1246 			if (!port->remote)
1247 				tb_port_dbg(port, "hotplug: no switch found\n");
1248 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1249 			tb_dp_resource_available(tb, port);
1250 		}
1251 	}
1252 
1253 	pm_runtime_mark_last_busy(&sw->dev);
1254 	pm_runtime_put_autosuspend(&sw->dev);
1255 
1256 put_sw:
1257 	tb_switch_put(sw);
1258 out:
1259 	mutex_unlock(&tb->lock);
1260 
1261 	pm_runtime_mark_last_busy(&tb->dev);
1262 	pm_runtime_put_autosuspend(&tb->dev);
1263 
1264 	kfree(ev);
1265 }
1266 
1267 /*
1268  * tb_schedule_hotplug_handler() - callback function for the control channel
1269  *
1270  * Delegates to tb_handle_hotplug.
1271  */
1272 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1273 			    const void *buf, size_t size)
1274 {
1275 	const struct cfg_event_pkg *pkg = buf;
1276 	u64 route;
1277 
1278 	if (type != TB_CFG_PKG_EVENT) {
1279 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1280 		return;
1281 	}
1282 
1283 	route = tb_cfg_get_route(&pkg->header);
1284 
1285 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1286 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1287 			pkg->port);
1288 	}
1289 
1290 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1291 }
1292 
1293 static void tb_stop(struct tb *tb)
1294 {
1295 	struct tb_cm *tcm = tb_priv(tb);
1296 	struct tb_tunnel *tunnel;
1297 	struct tb_tunnel *n;
1298 
1299 	cancel_delayed_work(&tcm->remove_work);
1300 	/* tunnels are only present after everything has been initialized */
1301 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1302 		/*
1303 		 * DMA tunnels require the driver to be functional so we
1304 		 * tear them down. Other protocol tunnels can be left
1305 		 * intact.
1306 		 */
1307 		if (tb_tunnel_is_dma(tunnel))
1308 			tb_tunnel_deactivate(tunnel);
1309 		tb_tunnel_free(tunnel);
1310 	}
1311 	tb_switch_remove(tb->root_switch);
1312 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1313 }
1314 
1315 static int tb_scan_finalize_switch(struct device *dev, void *data)
1316 {
1317 	if (tb_is_switch(dev)) {
1318 		struct tb_switch *sw = tb_to_switch(dev);
1319 
1320 		/*
1321 		 * If we found that the switch was already setup by the
1322 		 * boot firmware, mark it as authorized now before we
1323 		 * send uevent to userspace.
1324 		 */
1325 		if (sw->boot)
1326 			sw->authorized = 1;
1327 
1328 		dev_set_uevent_suppress(dev, false);
1329 		kobject_uevent(&dev->kobj, KOBJ_ADD);
1330 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 static int tb_start(struct tb *tb)
1337 {
1338 	struct tb_cm *tcm = tb_priv(tb);
1339 	int ret;
1340 
1341 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1342 	if (IS_ERR(tb->root_switch))
1343 		return PTR_ERR(tb->root_switch);
1344 
1345 	/*
1346 	 * ICM firmware upgrade needs running firmware and in native
1347 	 * mode that is not available so disable firmware upgrade of the
1348 	 * root switch.
1349 	 */
1350 	tb->root_switch->no_nvm_upgrade = true;
1351 	/* All USB4 routers support runtime PM */
1352 	tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1353 
1354 	ret = tb_switch_configure(tb->root_switch);
1355 	if (ret) {
1356 		tb_switch_put(tb->root_switch);
1357 		return ret;
1358 	}
1359 
1360 	/* Announce the switch to the world */
1361 	ret = tb_switch_add(tb->root_switch);
1362 	if (ret) {
1363 		tb_switch_put(tb->root_switch);
1364 		return ret;
1365 	}
1366 
1367 	/* Enable TMU if it is off */
1368 	tb_switch_tmu_enable(tb->root_switch);
1369 	/* Full scan to discover devices added before the driver was loaded. */
1370 	tb_scan_switch(tb->root_switch);
1371 	/* Find out tunnels created by the boot firmware */
1372 	tb_discover_tunnels(tb->root_switch);
1373 	/*
1374 	 * If the boot firmware did not create USB 3.x tunnels create them
1375 	 * now for the whole topology.
1376 	 */
1377 	tb_create_usb3_tunnels(tb->root_switch);
1378 	/* Add DP IN resources for the root switch */
1379 	tb_add_dp_resources(tb->root_switch);
1380 	/* Make the discovered switches available to the userspace */
1381 	device_for_each_child(&tb->root_switch->dev, NULL,
1382 			      tb_scan_finalize_switch);
1383 
1384 	/* Allow tb_handle_hotplug to progress events */
1385 	tcm->hotplug_active = true;
1386 	return 0;
1387 }
1388 
1389 static int tb_suspend_noirq(struct tb *tb)
1390 {
1391 	struct tb_cm *tcm = tb_priv(tb);
1392 
1393 	tb_dbg(tb, "suspending...\n");
1394 	tb_disconnect_and_release_dp(tb);
1395 	tb_switch_suspend(tb->root_switch, false);
1396 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1397 	tb_dbg(tb, "suspend finished\n");
1398 
1399 	return 0;
1400 }
1401 
1402 static void tb_restore_children(struct tb_switch *sw)
1403 {
1404 	struct tb_port *port;
1405 
1406 	/* No need to restore if the router is already unplugged */
1407 	if (sw->is_unplugged)
1408 		return;
1409 
1410 	if (tb_enable_tmu(sw))
1411 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1412 
1413 	tb_switch_for_each_port(sw, port) {
1414 		if (!tb_port_has_remote(port) && !port->xdomain)
1415 			continue;
1416 
1417 		if (port->remote) {
1418 			tb_switch_lane_bonding_enable(port->remote->sw);
1419 			tb_switch_configure_link(port->remote->sw);
1420 
1421 			tb_restore_children(port->remote->sw);
1422 		} else if (port->xdomain) {
1423 			tb_port_configure_xdomain(port);
1424 		}
1425 	}
1426 }
1427 
1428 static int tb_resume_noirq(struct tb *tb)
1429 {
1430 	struct tb_cm *tcm = tb_priv(tb);
1431 	struct tb_tunnel *tunnel, *n;
1432 
1433 	tb_dbg(tb, "resuming...\n");
1434 
1435 	/* remove any pci devices the firmware might have setup */
1436 	tb_switch_reset(tb->root_switch);
1437 
1438 	tb_switch_resume(tb->root_switch);
1439 	tb_free_invalid_tunnels(tb);
1440 	tb_free_unplugged_children(tb->root_switch);
1441 	tb_restore_children(tb->root_switch);
1442 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1443 		tb_tunnel_restart(tunnel);
1444 	if (!list_empty(&tcm->tunnel_list)) {
1445 		/*
1446 		 * the pcie links need some time to get going.
1447 		 * 100ms works for me...
1448 		 */
1449 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1450 		msleep(100);
1451 	}
1452 	 /* Allow tb_handle_hotplug to progress events */
1453 	tcm->hotplug_active = true;
1454 	tb_dbg(tb, "resume finished\n");
1455 
1456 	return 0;
1457 }
1458 
1459 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1460 {
1461 	struct tb_port *port;
1462 	int ret = 0;
1463 
1464 	tb_switch_for_each_port(sw, port) {
1465 		if (tb_is_upstream_port(port))
1466 			continue;
1467 		if (port->xdomain && port->xdomain->is_unplugged) {
1468 			tb_retimer_remove_all(port);
1469 			tb_xdomain_remove(port->xdomain);
1470 			tb_port_unconfigure_xdomain(port);
1471 			port->xdomain = NULL;
1472 			ret++;
1473 		} else if (port->remote) {
1474 			ret += tb_free_unplugged_xdomains(port->remote->sw);
1475 		}
1476 	}
1477 
1478 	return ret;
1479 }
1480 
1481 static int tb_freeze_noirq(struct tb *tb)
1482 {
1483 	struct tb_cm *tcm = tb_priv(tb);
1484 
1485 	tcm->hotplug_active = false;
1486 	return 0;
1487 }
1488 
1489 static int tb_thaw_noirq(struct tb *tb)
1490 {
1491 	struct tb_cm *tcm = tb_priv(tb);
1492 
1493 	tcm->hotplug_active = true;
1494 	return 0;
1495 }
1496 
1497 static void tb_complete(struct tb *tb)
1498 {
1499 	/*
1500 	 * Release any unplugged XDomains and if there is a case where
1501 	 * another domain is swapped in place of unplugged XDomain we
1502 	 * need to run another rescan.
1503 	 */
1504 	mutex_lock(&tb->lock);
1505 	if (tb_free_unplugged_xdomains(tb->root_switch))
1506 		tb_scan_switch(tb->root_switch);
1507 	mutex_unlock(&tb->lock);
1508 }
1509 
1510 static int tb_runtime_suspend(struct tb *tb)
1511 {
1512 	struct tb_cm *tcm = tb_priv(tb);
1513 
1514 	mutex_lock(&tb->lock);
1515 	tb_switch_suspend(tb->root_switch, true);
1516 	tcm->hotplug_active = false;
1517 	mutex_unlock(&tb->lock);
1518 
1519 	return 0;
1520 }
1521 
1522 static void tb_remove_work(struct work_struct *work)
1523 {
1524 	struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1525 	struct tb *tb = tcm_to_tb(tcm);
1526 
1527 	mutex_lock(&tb->lock);
1528 	if (tb->root_switch) {
1529 		tb_free_unplugged_children(tb->root_switch);
1530 		tb_free_unplugged_xdomains(tb->root_switch);
1531 	}
1532 	mutex_unlock(&tb->lock);
1533 }
1534 
1535 static int tb_runtime_resume(struct tb *tb)
1536 {
1537 	struct tb_cm *tcm = tb_priv(tb);
1538 	struct tb_tunnel *tunnel, *n;
1539 
1540 	mutex_lock(&tb->lock);
1541 	tb_switch_resume(tb->root_switch);
1542 	tb_free_invalid_tunnels(tb);
1543 	tb_restore_children(tb->root_switch);
1544 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1545 		tb_tunnel_restart(tunnel);
1546 	tcm->hotplug_active = true;
1547 	mutex_unlock(&tb->lock);
1548 
1549 	/*
1550 	 * Schedule cleanup of any unplugged devices. Run this in a
1551 	 * separate thread to avoid possible deadlock if the device
1552 	 * removal runtime resumes the unplugged device.
1553 	 */
1554 	queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1555 	return 0;
1556 }
1557 
1558 static const struct tb_cm_ops tb_cm_ops = {
1559 	.start = tb_start,
1560 	.stop = tb_stop,
1561 	.suspend_noirq = tb_suspend_noirq,
1562 	.resume_noirq = tb_resume_noirq,
1563 	.freeze_noirq = tb_freeze_noirq,
1564 	.thaw_noirq = tb_thaw_noirq,
1565 	.complete = tb_complete,
1566 	.runtime_suspend = tb_runtime_suspend,
1567 	.runtime_resume = tb_runtime_resume,
1568 	.handle_event = tb_handle_event,
1569 	.disapprove_switch = tb_disconnect_pci,
1570 	.approve_switch = tb_tunnel_pci,
1571 	.approve_xdomain_paths = tb_approve_xdomain_paths,
1572 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1573 };
1574 
1575 /*
1576  * During suspend the Thunderbolt controller is reset and all PCIe
1577  * tunnels are lost. The NHI driver will try to reestablish all tunnels
1578  * during resume. This adds device links between the tunneled PCIe
1579  * downstream ports and the NHI so that the device core will make sure
1580  * NHI is resumed first before the rest.
1581  */
1582 static void tb_apple_add_links(struct tb_nhi *nhi)
1583 {
1584 	struct pci_dev *upstream, *pdev;
1585 
1586 	if (!x86_apple_machine)
1587 		return;
1588 
1589 	switch (nhi->pdev->device) {
1590 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1591 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1592 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1593 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1594 		break;
1595 	default:
1596 		return;
1597 	}
1598 
1599 	upstream = pci_upstream_bridge(nhi->pdev);
1600 	while (upstream) {
1601 		if (!pci_is_pcie(upstream))
1602 			return;
1603 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1604 			break;
1605 		upstream = pci_upstream_bridge(upstream);
1606 	}
1607 
1608 	if (!upstream)
1609 		return;
1610 
1611 	/*
1612 	 * For each hotplug downstream port, create add device link
1613 	 * back to NHI so that PCIe tunnels can be re-established after
1614 	 * sleep.
1615 	 */
1616 	for_each_pci_bridge(pdev, upstream->subordinate) {
1617 		const struct device_link *link;
1618 
1619 		if (!pci_is_pcie(pdev))
1620 			continue;
1621 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1622 		    !pdev->is_hotplug_bridge)
1623 			continue;
1624 
1625 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1626 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
1627 				       DL_FLAG_PM_RUNTIME);
1628 		if (link) {
1629 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1630 				dev_name(&pdev->dev));
1631 		} else {
1632 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1633 				 dev_name(&pdev->dev));
1634 		}
1635 	}
1636 }
1637 
1638 struct tb *tb_probe(struct tb_nhi *nhi)
1639 {
1640 	struct tb_cm *tcm;
1641 	struct tb *tb;
1642 
1643 	tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
1644 	if (!tb)
1645 		return NULL;
1646 
1647 	if (tb_acpi_may_tunnel_pcie())
1648 		tb->security_level = TB_SECURITY_USER;
1649 	else
1650 		tb->security_level = TB_SECURITY_NOPCIE;
1651 
1652 	tb->cm_ops = &tb_cm_ops;
1653 
1654 	tcm = tb_priv(tb);
1655 	INIT_LIST_HEAD(&tcm->tunnel_list);
1656 	INIT_LIST_HEAD(&tcm->dp_resources);
1657 	INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1658 
1659 	tb_dbg(tb, "using software connection manager\n");
1660 
1661 	tb_apple_add_links(nhi);
1662 	tb_acpi_add_links(nhi);
1663 
1664 	return tb;
1665 }
1666