xref: /openbmc/linux/drivers/thunderbolt/tb.c (revision bbcf40b3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 
13 #include "tb.h"
14 #include "tb_regs.h"
15 #include "tunnel.h"
16 
17 /**
18  * struct tb_cm - Simple Thunderbolt connection manager
19  * @tunnel_list: List of active tunnels
20  * @dp_resources: List of available DP resources for DP tunneling
21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
22  *		    events and exit if this is not set (it needs to
23  *		    acquire the lock one more time). Used to drain wq
24  *		    after cfg has been paused.
25  */
26 struct tb_cm {
27 	struct list_head tunnel_list;
28 	struct list_head dp_resources;
29 	bool hotplug_active;
30 };
31 
32 struct tb_hotplug_event {
33 	struct work_struct work;
34 	struct tb *tb;
35 	u64 route;
36 	u8 port;
37 	bool unplug;
38 };
39 
40 static void tb_handle_hotplug(struct work_struct *work);
41 
42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
43 {
44 	struct tb_hotplug_event *ev;
45 
46 	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
47 	if (!ev)
48 		return;
49 
50 	ev->tb = tb;
51 	ev->route = route;
52 	ev->port = port;
53 	ev->unplug = unplug;
54 	INIT_WORK(&ev->work, tb_handle_hotplug);
55 	queue_work(tb->wq, &ev->work);
56 }
57 
58 /* enumeration & hot plug handling */
59 
60 static void tb_add_dp_resources(struct tb_switch *sw)
61 {
62 	struct tb_cm *tcm = tb_priv(sw->tb);
63 	struct tb_port *port;
64 
65 	tb_switch_for_each_port(sw, port) {
66 		if (!tb_port_is_dpin(port))
67 			continue;
68 
69 		if (!tb_switch_query_dp_resource(sw, port))
70 			continue;
71 
72 		list_add_tail(&port->list, &tcm->dp_resources);
73 		tb_port_dbg(port, "DP IN resource available\n");
74 	}
75 }
76 
77 static void tb_remove_dp_resources(struct tb_switch *sw)
78 {
79 	struct tb_cm *tcm = tb_priv(sw->tb);
80 	struct tb_port *port, *tmp;
81 
82 	/* Clear children resources first */
83 	tb_switch_for_each_port(sw, port) {
84 		if (tb_port_has_remote(port))
85 			tb_remove_dp_resources(port->remote->sw);
86 	}
87 
88 	list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
89 		if (port->sw == sw) {
90 			tb_port_dbg(port, "DP OUT resource unavailable\n");
91 			list_del_init(&port->list);
92 		}
93 	}
94 }
95 
96 static void tb_discover_tunnels(struct tb_switch *sw)
97 {
98 	struct tb *tb = sw->tb;
99 	struct tb_cm *tcm = tb_priv(tb);
100 	struct tb_port *port;
101 
102 	tb_switch_for_each_port(sw, port) {
103 		struct tb_tunnel *tunnel = NULL;
104 
105 		switch (port->config.type) {
106 		case TB_TYPE_DP_HDMI_IN:
107 			tunnel = tb_tunnel_discover_dp(tb, port);
108 			break;
109 
110 		case TB_TYPE_PCIE_DOWN:
111 			tunnel = tb_tunnel_discover_pci(tb, port);
112 			break;
113 
114 		case TB_TYPE_USB3_DOWN:
115 			tunnel = tb_tunnel_discover_usb3(tb, port);
116 			break;
117 
118 		default:
119 			break;
120 		}
121 
122 		if (!tunnel)
123 			continue;
124 
125 		if (tb_tunnel_is_pci(tunnel)) {
126 			struct tb_switch *parent = tunnel->dst_port->sw;
127 
128 			while (parent != tunnel->src_port->sw) {
129 				parent->boot = true;
130 				parent = tb_switch_parent(parent);
131 			}
132 		}
133 
134 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
135 	}
136 
137 	tb_switch_for_each_port(sw, port) {
138 		if (tb_port_has_remote(port))
139 			tb_discover_tunnels(port->remote->sw);
140 	}
141 }
142 
143 static void tb_scan_xdomain(struct tb_port *port)
144 {
145 	struct tb_switch *sw = port->sw;
146 	struct tb *tb = sw->tb;
147 	struct tb_xdomain *xd;
148 	u64 route;
149 
150 	route = tb_downstream_route(port);
151 	xd = tb_xdomain_find_by_route(tb, route);
152 	if (xd) {
153 		tb_xdomain_put(xd);
154 		return;
155 	}
156 
157 	xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
158 			      NULL);
159 	if (xd) {
160 		tb_port_at(route, sw)->xdomain = xd;
161 		tb_xdomain_add(xd);
162 	}
163 }
164 
165 static int tb_enable_tmu(struct tb_switch *sw)
166 {
167 	int ret;
168 
169 	/* If it is already enabled in correct mode, don't touch it */
170 	if (tb_switch_tmu_is_enabled(sw))
171 		return 0;
172 
173 	ret = tb_switch_tmu_disable(sw);
174 	if (ret)
175 		return ret;
176 
177 	ret = tb_switch_tmu_post_time(sw);
178 	if (ret)
179 		return ret;
180 
181 	return tb_switch_tmu_enable(sw);
182 }
183 
184 /**
185  * tb_find_unused_port() - return the first inactive port on @sw
186  * @sw: Switch to find the port on
187  * @type: Port type to look for
188  */
189 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
190 					   enum tb_port_type type)
191 {
192 	struct tb_port *port;
193 
194 	tb_switch_for_each_port(sw, port) {
195 		if (tb_is_upstream_port(port))
196 			continue;
197 		if (port->config.type != type)
198 			continue;
199 		if (!port->cap_adap)
200 			continue;
201 		if (tb_port_is_enabled(port))
202 			continue;
203 		return port;
204 	}
205 	return NULL;
206 }
207 
208 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
209 					const struct tb_port *port)
210 {
211 	struct tb_port *down;
212 
213 	down = usb4_switch_map_usb3_down(sw, port);
214 	if (down) {
215 		if (WARN_ON(!tb_port_is_usb3_down(down)))
216 			goto out;
217 		if (WARN_ON(tb_usb3_port_is_enabled(down)))
218 			goto out;
219 
220 		return down;
221 	}
222 
223 out:
224 	return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
225 }
226 
227 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
228 {
229 	struct tb_switch *parent = tb_switch_parent(sw);
230 	struct tb_port *up, *down, *port;
231 	struct tb_cm *tcm = tb_priv(tb);
232 	struct tb_tunnel *tunnel;
233 
234 	up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
235 	if (!up)
236 		return 0;
237 
238 	if (!sw->link_usb4)
239 		return 0;
240 
241 	/*
242 	 * Look up available down port. Since we are chaining it should
243 	 * be found right above this switch.
244 	 */
245 	port = tb_port_at(tb_route(sw), parent);
246 	down = tb_find_usb3_down(parent, port);
247 	if (!down)
248 		return 0;
249 
250 	if (tb_route(parent)) {
251 		struct tb_port *parent_up;
252 		/*
253 		 * Check first that the parent switch has its upstream USB3
254 		 * port enabled. Otherwise the chain is not complete and
255 		 * there is no point setting up a new tunnel.
256 		 */
257 		parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
258 		if (!parent_up || !tb_port_is_enabled(parent_up))
259 			return 0;
260 	}
261 
262 	tunnel = tb_tunnel_alloc_usb3(tb, up, down);
263 	if (!tunnel)
264 		return -ENOMEM;
265 
266 	if (tb_tunnel_activate(tunnel)) {
267 		tb_port_info(up,
268 			     "USB3 tunnel activation failed, aborting\n");
269 		tb_tunnel_free(tunnel);
270 		return -EIO;
271 	}
272 
273 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
274 	return 0;
275 }
276 
277 static int tb_create_usb3_tunnels(struct tb_switch *sw)
278 {
279 	struct tb_port *port;
280 	int ret;
281 
282 	if (tb_route(sw)) {
283 		ret = tb_tunnel_usb3(sw->tb, sw);
284 		if (ret)
285 			return ret;
286 	}
287 
288 	tb_switch_for_each_port(sw, port) {
289 		if (!tb_port_has_remote(port))
290 			continue;
291 		ret = tb_create_usb3_tunnels(port->remote->sw);
292 		if (ret)
293 			return ret;
294 	}
295 
296 	return 0;
297 }
298 
299 static void tb_scan_port(struct tb_port *port);
300 
301 /**
302  * tb_scan_switch() - scan for and initialize downstream switches
303  */
304 static void tb_scan_switch(struct tb_switch *sw)
305 {
306 	struct tb_port *port;
307 
308 	tb_switch_for_each_port(sw, port)
309 		tb_scan_port(port);
310 }
311 
312 /**
313  * tb_scan_port() - check for and initialize switches below port
314  */
315 static void tb_scan_port(struct tb_port *port)
316 {
317 	struct tb_cm *tcm = tb_priv(port->sw->tb);
318 	struct tb_port *upstream_port;
319 	struct tb_switch *sw;
320 
321 	if (tb_is_upstream_port(port))
322 		return;
323 
324 	if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
325 	    !tb_dp_port_is_enabled(port)) {
326 		tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
327 		tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
328 				 false);
329 		return;
330 	}
331 
332 	if (port->config.type != TB_TYPE_PORT)
333 		return;
334 	if (port->dual_link_port && port->link_nr)
335 		return; /*
336 			 * Downstream switch is reachable through two ports.
337 			 * Only scan on the primary port (link_nr == 0).
338 			 */
339 	if (tb_wait_for_port(port, false) <= 0)
340 		return;
341 	if (port->remote) {
342 		tb_port_dbg(port, "port already has a remote\n");
343 		return;
344 	}
345 	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
346 			     tb_downstream_route(port));
347 	if (IS_ERR(sw)) {
348 		/*
349 		 * If there is an error accessing the connected switch
350 		 * it may be connected to another domain. Also we allow
351 		 * the other domain to be connected to a max depth switch.
352 		 */
353 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
354 			tb_scan_xdomain(port);
355 		return;
356 	}
357 
358 	if (tb_switch_configure(sw)) {
359 		tb_switch_put(sw);
360 		return;
361 	}
362 
363 	/*
364 	 * If there was previously another domain connected remove it
365 	 * first.
366 	 */
367 	if (port->xdomain) {
368 		tb_xdomain_remove(port->xdomain);
369 		port->xdomain = NULL;
370 	}
371 
372 	/*
373 	 * Do not send uevents until we have discovered all existing
374 	 * tunnels and know which switches were authorized already by
375 	 * the boot firmware.
376 	 */
377 	if (!tcm->hotplug_active)
378 		dev_set_uevent_suppress(&sw->dev, true);
379 
380 	if (tb_switch_add(sw)) {
381 		tb_switch_put(sw);
382 		return;
383 	}
384 
385 	/* Link the switches using both links if available */
386 	upstream_port = tb_upstream_port(sw);
387 	port->remote = upstream_port;
388 	upstream_port->remote = port;
389 	if (port->dual_link_port && upstream_port->dual_link_port) {
390 		port->dual_link_port->remote = upstream_port->dual_link_port;
391 		upstream_port->dual_link_port->remote = port->dual_link_port;
392 	}
393 
394 	/* Enable lane bonding if supported */
395 	if (tb_switch_lane_bonding_enable(sw))
396 		tb_sw_warn(sw, "failed to enable lane bonding\n");
397 
398 	if (tb_enable_tmu(sw))
399 		tb_sw_warn(sw, "failed to enable TMU\n");
400 
401 	/*
402 	 * Create USB 3.x tunnels only when the switch is plugged to the
403 	 * domain. This is because we scan the domain also during discovery
404 	 * and want to discover existing USB 3.x tunnels before we create
405 	 * any new.
406 	 */
407 	if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
408 		tb_sw_warn(sw, "USB3 tunnel creation failed\n");
409 
410 	tb_add_dp_resources(sw);
411 	tb_scan_switch(sw);
412 }
413 
414 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
415 					struct tb_port *src_port,
416 					struct tb_port *dst_port)
417 {
418 	struct tb_cm *tcm = tb_priv(tb);
419 	struct tb_tunnel *tunnel;
420 
421 	list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
422 		if (tunnel->type == type &&
423 		    ((src_port && src_port == tunnel->src_port) ||
424 		     (dst_port && dst_port == tunnel->dst_port))) {
425 			return tunnel;
426 		}
427 	}
428 
429 	return NULL;
430 }
431 
432 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
433 {
434 	if (!tunnel)
435 		return;
436 
437 	tb_tunnel_deactivate(tunnel);
438 	list_del(&tunnel->list);
439 
440 	/*
441 	 * In case of DP tunnel make sure the DP IN resource is deallocated
442 	 * properly.
443 	 */
444 	if (tb_tunnel_is_dp(tunnel)) {
445 		struct tb_port *in = tunnel->src_port;
446 
447 		tb_switch_dealloc_dp_resource(in->sw, in);
448 	}
449 
450 	tb_tunnel_free(tunnel);
451 }
452 
453 /**
454  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
455  */
456 static void tb_free_invalid_tunnels(struct tb *tb)
457 {
458 	struct tb_cm *tcm = tb_priv(tb);
459 	struct tb_tunnel *tunnel;
460 	struct tb_tunnel *n;
461 
462 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
463 		if (tb_tunnel_is_invalid(tunnel))
464 			tb_deactivate_and_free_tunnel(tunnel);
465 	}
466 }
467 
468 /**
469  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
470  */
471 static void tb_free_unplugged_children(struct tb_switch *sw)
472 {
473 	struct tb_port *port;
474 
475 	tb_switch_for_each_port(sw, port) {
476 		if (!tb_port_has_remote(port))
477 			continue;
478 
479 		if (port->remote->sw->is_unplugged) {
480 			tb_remove_dp_resources(port->remote->sw);
481 			tb_switch_lane_bonding_disable(port->remote->sw);
482 			tb_switch_remove(port->remote->sw);
483 			port->remote = NULL;
484 			if (port->dual_link_port)
485 				port->dual_link_port->remote = NULL;
486 		} else {
487 			tb_free_unplugged_children(port->remote->sw);
488 		}
489 	}
490 }
491 
492 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
493 					 const struct tb_port *port)
494 {
495 	struct tb_port *down = NULL;
496 
497 	/*
498 	 * To keep plugging devices consistently in the same PCIe
499 	 * hierarchy, do mapping here for switch downstream PCIe ports.
500 	 */
501 	if (tb_switch_is_usb4(sw)) {
502 		down = usb4_switch_map_pcie_down(sw, port);
503 	} else if (!tb_route(sw)) {
504 		int phy_port = tb_phy_port_from_link(port->port);
505 		int index;
506 
507 		/*
508 		 * Hard-coded Thunderbolt port to PCIe down port mapping
509 		 * per controller.
510 		 */
511 		if (tb_switch_is_cactus_ridge(sw) ||
512 		    tb_switch_is_alpine_ridge(sw))
513 			index = !phy_port ? 6 : 7;
514 		else if (tb_switch_is_falcon_ridge(sw))
515 			index = !phy_port ? 6 : 8;
516 		else if (tb_switch_is_titan_ridge(sw))
517 			index = !phy_port ? 8 : 9;
518 		else
519 			goto out;
520 
521 		/* Validate the hard-coding */
522 		if (WARN_ON(index > sw->config.max_port_number))
523 			goto out;
524 
525 		down = &sw->ports[index];
526 	}
527 
528 	if (down) {
529 		if (WARN_ON(!tb_port_is_pcie_down(down)))
530 			goto out;
531 		if (WARN_ON(tb_pci_port_is_enabled(down)))
532 			goto out;
533 
534 		return down;
535 	}
536 
537 out:
538 	return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
539 }
540 
541 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
542 			   struct tb_port *out)
543 {
544 	struct tb_switch *sw = out->sw;
545 	struct tb_tunnel *tunnel;
546 	int bw, available_bw = 40000;
547 
548 	while (sw && sw != in->sw) {
549 		bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
550 		/* Leave 10% guard band */
551 		bw -= bw / 10;
552 
553 		/*
554 		 * Check for any active DP tunnels that go through this
555 		 * switch and reduce their consumed bandwidth from
556 		 * available.
557 		 */
558 		list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
559 			int consumed_bw;
560 
561 			if (!tb_tunnel_switch_on_path(tunnel, sw))
562 				continue;
563 
564 			consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
565 			if (consumed_bw < 0)
566 				return consumed_bw;
567 
568 			bw -= consumed_bw;
569 		}
570 
571 		if (bw < available_bw)
572 			available_bw = bw;
573 
574 		sw = tb_switch_parent(sw);
575 	}
576 
577 	return available_bw;
578 }
579 
580 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
581 {
582 	struct tb_port *host_port, *port;
583 	struct tb_cm *tcm = tb_priv(tb);
584 
585 	host_port = tb_route(in->sw) ?
586 		tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
587 
588 	list_for_each_entry(port, &tcm->dp_resources, list) {
589 		if (!tb_port_is_dpout(port))
590 			continue;
591 
592 		if (tb_port_is_enabled(port)) {
593 			tb_port_dbg(port, "in use\n");
594 			continue;
595 		}
596 
597 		tb_port_dbg(port, "DP OUT available\n");
598 
599 		/*
600 		 * Keep the DP tunnel under the topology starting from
601 		 * the same host router downstream port.
602 		 */
603 		if (host_port && tb_route(port->sw)) {
604 			struct tb_port *p;
605 
606 			p = tb_port_at(tb_route(port->sw), tb->root_switch);
607 			if (p != host_port)
608 				continue;
609 		}
610 
611 		return port;
612 	}
613 
614 	return NULL;
615 }
616 
617 static void tb_tunnel_dp(struct tb *tb)
618 {
619 	struct tb_cm *tcm = tb_priv(tb);
620 	struct tb_port *port, *in, *out;
621 	struct tb_tunnel *tunnel;
622 	int available_bw;
623 
624 	/*
625 	 * Find pair of inactive DP IN and DP OUT adapters and then
626 	 * establish a DP tunnel between them.
627 	 */
628 	tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
629 
630 	in = NULL;
631 	out = NULL;
632 	list_for_each_entry(port, &tcm->dp_resources, list) {
633 		if (!tb_port_is_dpin(port))
634 			continue;
635 
636 		if (tb_port_is_enabled(port)) {
637 			tb_port_dbg(port, "in use\n");
638 			continue;
639 		}
640 
641 		tb_port_dbg(port, "DP IN available\n");
642 
643 		out = tb_find_dp_out(tb, port);
644 		if (out) {
645 			in = port;
646 			break;
647 		}
648 	}
649 
650 	if (!in) {
651 		tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
652 		return;
653 	}
654 	if (!out) {
655 		tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
656 		return;
657 	}
658 
659 	if (tb_switch_alloc_dp_resource(in->sw, in)) {
660 		tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
661 		return;
662 	}
663 
664 	/* Calculate available bandwidth between in and out */
665 	available_bw = tb_available_bw(tcm, in, out);
666 	if (available_bw < 0) {
667 		tb_warn(tb, "failed to determine available bandwidth\n");
668 		return;
669 	}
670 
671 	tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
672 	       available_bw);
673 
674 	tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
675 	if (!tunnel) {
676 		tb_port_dbg(out, "could not allocate DP tunnel\n");
677 		goto dealloc_dp;
678 	}
679 
680 	if (tb_tunnel_activate(tunnel)) {
681 		tb_port_info(out, "DP tunnel activation failed, aborting\n");
682 		tb_tunnel_free(tunnel);
683 		goto dealloc_dp;
684 	}
685 
686 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
687 	return;
688 
689 dealloc_dp:
690 	tb_switch_dealloc_dp_resource(in->sw, in);
691 }
692 
693 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
694 {
695 	struct tb_port *in, *out;
696 	struct tb_tunnel *tunnel;
697 
698 	if (tb_port_is_dpin(port)) {
699 		tb_port_dbg(port, "DP IN resource unavailable\n");
700 		in = port;
701 		out = NULL;
702 	} else {
703 		tb_port_dbg(port, "DP OUT resource unavailable\n");
704 		in = NULL;
705 		out = port;
706 	}
707 
708 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
709 	tb_deactivate_and_free_tunnel(tunnel);
710 	list_del_init(&port->list);
711 
712 	/*
713 	 * See if there is another DP OUT port that can be used for
714 	 * to create another tunnel.
715 	 */
716 	tb_tunnel_dp(tb);
717 }
718 
719 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
720 {
721 	struct tb_cm *tcm = tb_priv(tb);
722 	struct tb_port *p;
723 
724 	if (tb_port_is_enabled(port))
725 		return;
726 
727 	list_for_each_entry(p, &tcm->dp_resources, list) {
728 		if (p == port)
729 			return;
730 	}
731 
732 	tb_port_dbg(port, "DP %s resource available\n",
733 		    tb_port_is_dpin(port) ? "IN" : "OUT");
734 	list_add_tail(&port->list, &tcm->dp_resources);
735 
736 	/* Look for suitable DP IN <-> DP OUT pairs now */
737 	tb_tunnel_dp(tb);
738 }
739 
740 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
741 {
742 	struct tb_port *up, *down, *port;
743 	struct tb_cm *tcm = tb_priv(tb);
744 	struct tb_switch *parent_sw;
745 	struct tb_tunnel *tunnel;
746 
747 	up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
748 	if (!up)
749 		return 0;
750 
751 	/*
752 	 * Look up available down port. Since we are chaining it should
753 	 * be found right above this switch.
754 	 */
755 	parent_sw = tb_to_switch(sw->dev.parent);
756 	port = tb_port_at(tb_route(sw), parent_sw);
757 	down = tb_find_pcie_down(parent_sw, port);
758 	if (!down)
759 		return 0;
760 
761 	tunnel = tb_tunnel_alloc_pci(tb, up, down);
762 	if (!tunnel)
763 		return -ENOMEM;
764 
765 	if (tb_tunnel_activate(tunnel)) {
766 		tb_port_info(up,
767 			     "PCIe tunnel activation failed, aborting\n");
768 		tb_tunnel_free(tunnel);
769 		return -EIO;
770 	}
771 
772 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
773 	return 0;
774 }
775 
776 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
777 {
778 	struct tb_cm *tcm = tb_priv(tb);
779 	struct tb_port *nhi_port, *dst_port;
780 	struct tb_tunnel *tunnel;
781 	struct tb_switch *sw;
782 
783 	sw = tb_to_switch(xd->dev.parent);
784 	dst_port = tb_port_at(xd->route, sw);
785 	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
786 
787 	mutex_lock(&tb->lock);
788 	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
789 				     xd->transmit_path, xd->receive_ring,
790 				     xd->receive_path);
791 	if (!tunnel) {
792 		mutex_unlock(&tb->lock);
793 		return -ENOMEM;
794 	}
795 
796 	if (tb_tunnel_activate(tunnel)) {
797 		tb_port_info(nhi_port,
798 			     "DMA tunnel activation failed, aborting\n");
799 		tb_tunnel_free(tunnel);
800 		mutex_unlock(&tb->lock);
801 		return -EIO;
802 	}
803 
804 	list_add_tail(&tunnel->list, &tcm->tunnel_list);
805 	mutex_unlock(&tb->lock);
806 	return 0;
807 }
808 
809 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
810 {
811 	struct tb_port *dst_port;
812 	struct tb_tunnel *tunnel;
813 	struct tb_switch *sw;
814 
815 	sw = tb_to_switch(xd->dev.parent);
816 	dst_port = tb_port_at(xd->route, sw);
817 
818 	/*
819 	 * It is possible that the tunnel was already teared down (in
820 	 * case of cable disconnect) so it is fine if we cannot find it
821 	 * here anymore.
822 	 */
823 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
824 	tb_deactivate_and_free_tunnel(tunnel);
825 }
826 
827 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
828 {
829 	if (!xd->is_unplugged) {
830 		mutex_lock(&tb->lock);
831 		__tb_disconnect_xdomain_paths(tb, xd);
832 		mutex_unlock(&tb->lock);
833 	}
834 	return 0;
835 }
836 
837 /* hotplug handling */
838 
839 /**
840  * tb_handle_hotplug() - handle hotplug event
841  *
842  * Executes on tb->wq.
843  */
844 static void tb_handle_hotplug(struct work_struct *work)
845 {
846 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
847 	struct tb *tb = ev->tb;
848 	struct tb_cm *tcm = tb_priv(tb);
849 	struct tb_switch *sw;
850 	struct tb_port *port;
851 	mutex_lock(&tb->lock);
852 	if (!tcm->hotplug_active)
853 		goto out; /* during init, suspend or shutdown */
854 
855 	sw = tb_switch_find_by_route(tb, ev->route);
856 	if (!sw) {
857 		tb_warn(tb,
858 			"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
859 			ev->route, ev->port, ev->unplug);
860 		goto out;
861 	}
862 	if (ev->port > sw->config.max_port_number) {
863 		tb_warn(tb,
864 			"hotplug event from non existent port %llx:%x (unplug: %d)\n",
865 			ev->route, ev->port, ev->unplug);
866 		goto put_sw;
867 	}
868 	port = &sw->ports[ev->port];
869 	if (tb_is_upstream_port(port)) {
870 		tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
871 		       ev->route, ev->port, ev->unplug);
872 		goto put_sw;
873 	}
874 	if (ev->unplug) {
875 		if (tb_port_has_remote(port)) {
876 			tb_port_dbg(port, "switch unplugged\n");
877 			tb_sw_set_unplugged(port->remote->sw);
878 			tb_free_invalid_tunnels(tb);
879 			tb_remove_dp_resources(port->remote->sw);
880 			tb_switch_tmu_disable(port->remote->sw);
881 			tb_switch_lane_bonding_disable(port->remote->sw);
882 			tb_switch_remove(port->remote->sw);
883 			port->remote = NULL;
884 			if (port->dual_link_port)
885 				port->dual_link_port->remote = NULL;
886 			/* Maybe we can create another DP tunnel */
887 			tb_tunnel_dp(tb);
888 		} else if (port->xdomain) {
889 			struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
890 
891 			tb_port_dbg(port, "xdomain unplugged\n");
892 			/*
893 			 * Service drivers are unbound during
894 			 * tb_xdomain_remove() so setting XDomain as
895 			 * unplugged here prevents deadlock if they call
896 			 * tb_xdomain_disable_paths(). We will tear down
897 			 * the path below.
898 			 */
899 			xd->is_unplugged = true;
900 			tb_xdomain_remove(xd);
901 			port->xdomain = NULL;
902 			__tb_disconnect_xdomain_paths(tb, xd);
903 			tb_xdomain_put(xd);
904 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
905 			tb_dp_resource_unavailable(tb, port);
906 		} else {
907 			tb_port_dbg(port,
908 				   "got unplug event for disconnected port, ignoring\n");
909 		}
910 	} else if (port->remote) {
911 		tb_port_dbg(port, "got plug event for connected port, ignoring\n");
912 	} else {
913 		if (tb_port_is_null(port)) {
914 			tb_port_dbg(port, "hotplug: scanning\n");
915 			tb_scan_port(port);
916 			if (!port->remote)
917 				tb_port_dbg(port, "hotplug: no switch found\n");
918 		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
919 			tb_dp_resource_available(tb, port);
920 		}
921 	}
922 
923 put_sw:
924 	tb_switch_put(sw);
925 out:
926 	mutex_unlock(&tb->lock);
927 	kfree(ev);
928 }
929 
930 /**
931  * tb_schedule_hotplug_handler() - callback function for the control channel
932  *
933  * Delegates to tb_handle_hotplug.
934  */
935 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
936 			    const void *buf, size_t size)
937 {
938 	const struct cfg_event_pkg *pkg = buf;
939 	u64 route;
940 
941 	if (type != TB_CFG_PKG_EVENT) {
942 		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
943 		return;
944 	}
945 
946 	route = tb_cfg_get_route(&pkg->header);
947 
948 	if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
949 		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
950 			pkg->port);
951 	}
952 
953 	tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
954 }
955 
956 static void tb_stop(struct tb *tb)
957 {
958 	struct tb_cm *tcm = tb_priv(tb);
959 	struct tb_tunnel *tunnel;
960 	struct tb_tunnel *n;
961 
962 	/* tunnels are only present after everything has been initialized */
963 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
964 		/*
965 		 * DMA tunnels require the driver to be functional so we
966 		 * tear them down. Other protocol tunnels can be left
967 		 * intact.
968 		 */
969 		if (tb_tunnel_is_dma(tunnel))
970 			tb_tunnel_deactivate(tunnel);
971 		tb_tunnel_free(tunnel);
972 	}
973 	tb_switch_remove(tb->root_switch);
974 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
975 }
976 
977 static int tb_scan_finalize_switch(struct device *dev, void *data)
978 {
979 	if (tb_is_switch(dev)) {
980 		struct tb_switch *sw = tb_to_switch(dev);
981 
982 		/*
983 		 * If we found that the switch was already setup by the
984 		 * boot firmware, mark it as authorized now before we
985 		 * send uevent to userspace.
986 		 */
987 		if (sw->boot)
988 			sw->authorized = 1;
989 
990 		dev_set_uevent_suppress(dev, false);
991 		kobject_uevent(&dev->kobj, KOBJ_ADD);
992 		device_for_each_child(dev, NULL, tb_scan_finalize_switch);
993 	}
994 
995 	return 0;
996 }
997 
998 static int tb_start(struct tb *tb)
999 {
1000 	struct tb_cm *tcm = tb_priv(tb);
1001 	int ret;
1002 
1003 	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1004 	if (IS_ERR(tb->root_switch))
1005 		return PTR_ERR(tb->root_switch);
1006 
1007 	/*
1008 	 * ICM firmware upgrade needs running firmware and in native
1009 	 * mode that is not available so disable firmware upgrade of the
1010 	 * root switch.
1011 	 */
1012 	tb->root_switch->no_nvm_upgrade = true;
1013 
1014 	ret = tb_switch_configure(tb->root_switch);
1015 	if (ret) {
1016 		tb_switch_put(tb->root_switch);
1017 		return ret;
1018 	}
1019 
1020 	/* Announce the switch to the world */
1021 	ret = tb_switch_add(tb->root_switch);
1022 	if (ret) {
1023 		tb_switch_put(tb->root_switch);
1024 		return ret;
1025 	}
1026 
1027 	/* Enable TMU if it is off */
1028 	tb_switch_tmu_enable(tb->root_switch);
1029 	/* Full scan to discover devices added before the driver was loaded. */
1030 	tb_scan_switch(tb->root_switch);
1031 	/* Find out tunnels created by the boot firmware */
1032 	tb_discover_tunnels(tb->root_switch);
1033 	/*
1034 	 * If the boot firmware did not create USB 3.x tunnels create them
1035 	 * now for the whole topology.
1036 	 */
1037 	tb_create_usb3_tunnels(tb->root_switch);
1038 	/* Add DP IN resources for the root switch */
1039 	tb_add_dp_resources(tb->root_switch);
1040 	/* Make the discovered switches available to the userspace */
1041 	device_for_each_child(&tb->root_switch->dev, NULL,
1042 			      tb_scan_finalize_switch);
1043 
1044 	/* Allow tb_handle_hotplug to progress events */
1045 	tcm->hotplug_active = true;
1046 	return 0;
1047 }
1048 
1049 static int tb_suspend_noirq(struct tb *tb)
1050 {
1051 	struct tb_cm *tcm = tb_priv(tb);
1052 
1053 	tb_dbg(tb, "suspending...\n");
1054 	tb_switch_suspend(tb->root_switch);
1055 	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1056 	tb_dbg(tb, "suspend finished\n");
1057 
1058 	return 0;
1059 }
1060 
1061 static void tb_restore_children(struct tb_switch *sw)
1062 {
1063 	struct tb_port *port;
1064 
1065 	if (tb_enable_tmu(sw))
1066 		tb_sw_warn(sw, "failed to restore TMU configuration\n");
1067 
1068 	tb_switch_for_each_port(sw, port) {
1069 		if (!tb_port_has_remote(port))
1070 			continue;
1071 
1072 		if (tb_switch_lane_bonding_enable(port->remote->sw))
1073 			dev_warn(&sw->dev, "failed to restore lane bonding\n");
1074 
1075 		tb_restore_children(port->remote->sw);
1076 	}
1077 }
1078 
1079 static int tb_resume_noirq(struct tb *tb)
1080 {
1081 	struct tb_cm *tcm = tb_priv(tb);
1082 	struct tb_tunnel *tunnel, *n;
1083 
1084 	tb_dbg(tb, "resuming...\n");
1085 
1086 	/* remove any pci devices the firmware might have setup */
1087 	tb_switch_reset(tb, 0);
1088 
1089 	tb_switch_resume(tb->root_switch);
1090 	tb_free_invalid_tunnels(tb);
1091 	tb_free_unplugged_children(tb->root_switch);
1092 	tb_restore_children(tb->root_switch);
1093 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1094 		tb_tunnel_restart(tunnel);
1095 	if (!list_empty(&tcm->tunnel_list)) {
1096 		/*
1097 		 * the pcie links need some time to get going.
1098 		 * 100ms works for me...
1099 		 */
1100 		tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1101 		msleep(100);
1102 	}
1103 	 /* Allow tb_handle_hotplug to progress events */
1104 	tcm->hotplug_active = true;
1105 	tb_dbg(tb, "resume finished\n");
1106 
1107 	return 0;
1108 }
1109 
1110 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1111 {
1112 	struct tb_port *port;
1113 	int ret = 0;
1114 
1115 	tb_switch_for_each_port(sw, port) {
1116 		if (tb_is_upstream_port(port))
1117 			continue;
1118 		if (port->xdomain && port->xdomain->is_unplugged) {
1119 			tb_xdomain_remove(port->xdomain);
1120 			port->xdomain = NULL;
1121 			ret++;
1122 		} else if (port->remote) {
1123 			ret += tb_free_unplugged_xdomains(port->remote->sw);
1124 		}
1125 	}
1126 
1127 	return ret;
1128 }
1129 
1130 static void tb_complete(struct tb *tb)
1131 {
1132 	/*
1133 	 * Release any unplugged XDomains and if there is a case where
1134 	 * another domain is swapped in place of unplugged XDomain we
1135 	 * need to run another rescan.
1136 	 */
1137 	mutex_lock(&tb->lock);
1138 	if (tb_free_unplugged_xdomains(tb->root_switch))
1139 		tb_scan_switch(tb->root_switch);
1140 	mutex_unlock(&tb->lock);
1141 }
1142 
1143 static const struct tb_cm_ops tb_cm_ops = {
1144 	.start = tb_start,
1145 	.stop = tb_stop,
1146 	.suspend_noirq = tb_suspend_noirq,
1147 	.resume_noirq = tb_resume_noirq,
1148 	.complete = tb_complete,
1149 	.handle_event = tb_handle_event,
1150 	.approve_switch = tb_tunnel_pci,
1151 	.approve_xdomain_paths = tb_approve_xdomain_paths,
1152 	.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1153 };
1154 
1155 struct tb *tb_probe(struct tb_nhi *nhi)
1156 {
1157 	struct tb_cm *tcm;
1158 	struct tb *tb;
1159 
1160 	tb = tb_domain_alloc(nhi, sizeof(*tcm));
1161 	if (!tb)
1162 		return NULL;
1163 
1164 	tb->security_level = TB_SECURITY_USER;
1165 	tb->cm_ops = &tb_cm_ops;
1166 
1167 	tcm = tb_priv(tb);
1168 	INIT_LIST_HEAD(&tcm->tunnel_list);
1169 	INIT_LIST_HEAD(&tcm->dp_resources);
1170 
1171 	return tb;
1172 }
1173