1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - bus logic (NHI independent)
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT 100 /* ms */
20
21 /*
22 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
23 * direction. This is 40G - 10% guard band bandwidth.
24 */
25 #define TB_ASYM_MIN (40000 * 90 / 100)
26
27 /*
28 * Threshold bandwidth (in Mb/s) that is used to switch the links to
29 * asymmetric and back. This is selected as 45G which means when the
30 * request is higher than this, we switch the link to asymmetric, and
31 * when it is less than this we switch it back. The 45G is selected so
32 * that we still have 27G (of the total 72G) for bulk PCIe traffic when
33 * switching back to symmetric.
34 */
35 #define TB_ASYM_THRESHOLD 45000
36
37 #define MAX_GROUPS 7 /* max Group_ID is 7 */
38
39 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
40 module_param_named(asym_threshold, asym_threshold, uint, 0444);
41 MODULE_PARM_DESC(asym_threshold,
42 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
43 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
44
45 /**
46 * struct tb_cm - Simple Thunderbolt connection manager
47 * @tunnel_list: List of active tunnels
48 * @dp_resources: List of available DP resources for DP tunneling
49 * @hotplug_active: tb_handle_hotplug will stop progressing plug
50 * events and exit if this is not set (it needs to
51 * acquire the lock one more time). Used to drain wq
52 * after cfg has been paused.
53 * @remove_work: Work used to remove any unplugged routers after
54 * runtime resume
55 * @groups: Bandwidth groups used in this domain.
56 */
57 struct tb_cm {
58 struct list_head tunnel_list;
59 struct list_head dp_resources;
60 bool hotplug_active;
61 struct delayed_work remove_work;
62 struct tb_bandwidth_group groups[MAX_GROUPS];
63 };
64
tcm_to_tb(struct tb_cm * tcm)65 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
66 {
67 return ((void *)tcm - sizeof(struct tb));
68 }
69
70 struct tb_hotplug_event {
71 struct work_struct work;
72 struct tb *tb;
73 u64 route;
74 u8 port;
75 bool unplug;
76 };
77
tb_init_bandwidth_groups(struct tb_cm * tcm)78 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
79 {
80 int i;
81
82 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
83 struct tb_bandwidth_group *group = &tcm->groups[i];
84
85 group->tb = tcm_to_tb(tcm);
86 group->index = i + 1;
87 INIT_LIST_HEAD(&group->ports);
88 }
89 }
90
tb_bandwidth_group_attach_port(struct tb_bandwidth_group * group,struct tb_port * in)91 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
92 struct tb_port *in)
93 {
94 if (!group || WARN_ON(in->group))
95 return;
96
97 in->group = group;
98 list_add_tail(&in->group_list, &group->ports);
99
100 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
101 }
102
tb_find_free_bandwidth_group(struct tb_cm * tcm)103 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
104 {
105 int i;
106
107 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
108 struct tb_bandwidth_group *group = &tcm->groups[i];
109
110 if (list_empty(&group->ports))
111 return group;
112 }
113
114 return NULL;
115 }
116
117 static struct tb_bandwidth_group *
tb_attach_bandwidth_group(struct tb_cm * tcm,struct tb_port * in,struct tb_port * out)118 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
119 struct tb_port *out)
120 {
121 struct tb_bandwidth_group *group;
122 struct tb_tunnel *tunnel;
123
124 /*
125 * Find all DP tunnels that go through all the same USB4 links
126 * as this one. Because we always setup tunnels the same way we
127 * can just check for the routers at both ends of the tunnels
128 * and if they are the same we have a match.
129 */
130 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
131 if (!tb_tunnel_is_dp(tunnel))
132 continue;
133
134 if (tunnel->src_port->sw == in->sw &&
135 tunnel->dst_port->sw == out->sw) {
136 group = tunnel->src_port->group;
137 if (group) {
138 tb_bandwidth_group_attach_port(group, in);
139 return group;
140 }
141 }
142 }
143
144 /* Pick up next available group then */
145 group = tb_find_free_bandwidth_group(tcm);
146 if (group)
147 tb_bandwidth_group_attach_port(group, in);
148 else
149 tb_port_warn(in, "no available bandwidth groups\n");
150
151 return group;
152 }
153
tb_discover_bandwidth_group(struct tb_cm * tcm,struct tb_port * in,struct tb_port * out)154 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
155 struct tb_port *out)
156 {
157 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
158 int index, i;
159
160 index = usb4_dp_port_group_id(in);
161 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
162 if (tcm->groups[i].index == index) {
163 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
164 return;
165 }
166 }
167 }
168
169 tb_attach_bandwidth_group(tcm, in, out);
170 }
171
tb_detach_bandwidth_group(struct tb_port * in)172 static void tb_detach_bandwidth_group(struct tb_port *in)
173 {
174 struct tb_bandwidth_group *group = in->group;
175
176 if (group) {
177 in->group = NULL;
178 list_del_init(&in->group_list);
179
180 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
181 }
182 }
183
184 static void tb_handle_hotplug(struct work_struct *work);
185
tb_queue_hotplug(struct tb * tb,u64 route,u8 port,bool unplug)186 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
187 {
188 struct tb_hotplug_event *ev;
189
190 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
191 if (!ev)
192 return;
193
194 ev->tb = tb;
195 ev->route = route;
196 ev->port = port;
197 ev->unplug = unplug;
198 INIT_WORK(&ev->work, tb_handle_hotplug);
199 queue_work(tb->wq, &ev->work);
200 }
201
202 /* enumeration & hot plug handling */
203
tb_add_dp_resources(struct tb_switch * sw)204 static void tb_add_dp_resources(struct tb_switch *sw)
205 {
206 struct tb_cm *tcm = tb_priv(sw->tb);
207 struct tb_port *port;
208
209 tb_switch_for_each_port(sw, port) {
210 if (!tb_port_is_dpin(port))
211 continue;
212
213 if (!tb_switch_query_dp_resource(sw, port))
214 continue;
215
216 list_add_tail(&port->list, &tcm->dp_resources);
217 tb_port_dbg(port, "DP IN resource available\n");
218 }
219 }
220
tb_remove_dp_resources(struct tb_switch * sw)221 static void tb_remove_dp_resources(struct tb_switch *sw)
222 {
223 struct tb_cm *tcm = tb_priv(sw->tb);
224 struct tb_port *port, *tmp;
225
226 /* Clear children resources first */
227 tb_switch_for_each_port(sw, port) {
228 if (tb_port_has_remote(port))
229 tb_remove_dp_resources(port->remote->sw);
230 }
231
232 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
233 if (port->sw == sw) {
234 tb_port_dbg(port, "DP OUT resource unavailable\n");
235 list_del_init(&port->list);
236 }
237 }
238 }
239
tb_discover_dp_resource(struct tb * tb,struct tb_port * port)240 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
241 {
242 struct tb_cm *tcm = tb_priv(tb);
243 struct tb_port *p;
244
245 list_for_each_entry(p, &tcm->dp_resources, list) {
246 if (p == port)
247 return;
248 }
249
250 tb_port_dbg(port, "DP %s resource available discovered\n",
251 tb_port_is_dpin(port) ? "IN" : "OUT");
252 list_add_tail(&port->list, &tcm->dp_resources);
253 }
254
tb_discover_dp_resources(struct tb * tb)255 static void tb_discover_dp_resources(struct tb *tb)
256 {
257 struct tb_cm *tcm = tb_priv(tb);
258 struct tb_tunnel *tunnel;
259
260 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
261 if (tb_tunnel_is_dp(tunnel))
262 tb_discover_dp_resource(tb, tunnel->dst_port);
263 }
264 }
265
266 /* Enables CL states up to host router */
tb_enable_clx(struct tb_switch * sw)267 static int tb_enable_clx(struct tb_switch *sw)
268 {
269 struct tb_cm *tcm = tb_priv(sw->tb);
270 unsigned int clx = TB_CL0S | TB_CL1;
271 const struct tb_tunnel *tunnel;
272 int ret;
273
274 /*
275 * Currently only enable CLx for the first link. This is enough
276 * to allow the CPU to save energy at least on Intel hardware
277 * and makes it slightly simpler to implement. We may change
278 * this in the future to cover the whole topology if it turns
279 * out to be beneficial.
280 */
281 while (sw && tb_switch_depth(sw) > 1)
282 sw = tb_switch_parent(sw);
283
284 if (!sw)
285 return 0;
286
287 if (tb_switch_depth(sw) != 1)
288 return 0;
289
290 /*
291 * If we are re-enabling then check if there is an active DMA
292 * tunnel and in that case bail out.
293 */
294 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
295 if (tb_tunnel_is_dma(tunnel)) {
296 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
297 return 0;
298 }
299 }
300
301 /*
302 * Initially try with CL2. If that's not supported by the
303 * topology try with CL0s and CL1 and then give up.
304 */
305 ret = tb_switch_clx_enable(sw, clx | TB_CL2);
306 if (ret == -EOPNOTSUPP)
307 ret = tb_switch_clx_enable(sw, clx);
308 return ret == -EOPNOTSUPP ? 0 : ret;
309 }
310
311 /**
312 * tb_disable_clx() - Disable CL states up to host router
313 * @sw: Router to start
314 *
315 * Disables CL states from @sw up to the host router. Returns true if
316 * any CL state were disabled. This can be used to figure out whether
317 * the link was setup by us or the boot firmware so we don't
318 * accidentally enable them if they were not enabled during discovery.
319 */
tb_disable_clx(struct tb_switch * sw)320 static bool tb_disable_clx(struct tb_switch *sw)
321 {
322 bool disabled = false;
323
324 do {
325 int ret;
326
327 ret = tb_switch_clx_disable(sw);
328 if (ret > 0)
329 disabled = true;
330 else if (ret < 0)
331 tb_sw_warn(sw, "failed to disable CL states\n");
332
333 sw = tb_switch_parent(sw);
334 } while (sw);
335
336 return disabled;
337 }
338
tb_increase_switch_tmu_accuracy(struct device * dev,void * data)339 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
340 {
341 struct tb_switch *sw;
342
343 sw = tb_to_switch(dev);
344 if (!sw)
345 return 0;
346
347 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
348 enum tb_switch_tmu_mode mode;
349 int ret;
350
351 if (tb_switch_clx_is_enabled(sw, TB_CL1))
352 mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
353 else
354 mode = TB_SWITCH_TMU_MODE_HIFI_BI;
355
356 ret = tb_switch_tmu_configure(sw, mode);
357 if (ret)
358 return ret;
359
360 return tb_switch_tmu_enable(sw);
361 }
362
363 return 0;
364 }
365
tb_increase_tmu_accuracy(struct tb_tunnel * tunnel)366 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
367 {
368 struct tb_switch *sw;
369
370 if (!tunnel)
371 return;
372
373 /*
374 * Once first DP tunnel is established we change the TMU
375 * accuracy of first depth child routers (and the host router)
376 * to the highest. This is needed for the DP tunneling to work
377 * but also allows CL0s.
378 *
379 * If both routers are v2 then we don't need to do anything as
380 * they are using enhanced TMU mode that allows all CLx.
381 */
382 sw = tunnel->tb->root_switch;
383 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
384 }
385
tb_enable_tmu(struct tb_switch * sw)386 static int tb_enable_tmu(struct tb_switch *sw)
387 {
388 int ret;
389
390 /*
391 * If both routers at the end of the link are v2 we simply
392 * enable the enhanched uni-directional mode. That covers all
393 * the CL states. For v1 and before we need to use the normal
394 * rate to allow CL1 (when supported). Otherwise we keep the TMU
395 * running at the highest accuracy.
396 */
397 ret = tb_switch_tmu_configure(sw,
398 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
399 if (ret == -EOPNOTSUPP) {
400 if (tb_switch_clx_is_enabled(sw, TB_CL1))
401 ret = tb_switch_tmu_configure(sw,
402 TB_SWITCH_TMU_MODE_LOWRES);
403 else
404 ret = tb_switch_tmu_configure(sw,
405 TB_SWITCH_TMU_MODE_HIFI_BI);
406 }
407 if (ret)
408 return ret;
409
410 /* If it is already enabled in correct mode, don't touch it */
411 if (tb_switch_tmu_is_enabled(sw))
412 return 0;
413
414 ret = tb_switch_tmu_disable(sw);
415 if (ret)
416 return ret;
417
418 ret = tb_switch_tmu_post_time(sw);
419 if (ret)
420 return ret;
421
422 return tb_switch_tmu_enable(sw);
423 }
424
tb_switch_discover_tunnels(struct tb_switch * sw,struct list_head * list,bool alloc_hopids)425 static void tb_switch_discover_tunnels(struct tb_switch *sw,
426 struct list_head *list,
427 bool alloc_hopids)
428 {
429 struct tb *tb = sw->tb;
430 struct tb_port *port;
431
432 tb_switch_for_each_port(sw, port) {
433 struct tb_tunnel *tunnel = NULL;
434
435 switch (port->config.type) {
436 case TB_TYPE_DP_HDMI_IN:
437 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
438 tb_increase_tmu_accuracy(tunnel);
439 break;
440
441 case TB_TYPE_PCIE_DOWN:
442 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
443 break;
444
445 case TB_TYPE_USB3_DOWN:
446 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
447 break;
448
449 default:
450 break;
451 }
452
453 if (tunnel)
454 list_add_tail(&tunnel->list, list);
455 }
456
457 tb_switch_for_each_port(sw, port) {
458 if (tb_port_has_remote(port)) {
459 tb_switch_discover_tunnels(port->remote->sw, list,
460 alloc_hopids);
461 }
462 }
463 }
464
tb_discover_tunnels(struct tb * tb)465 static void tb_discover_tunnels(struct tb *tb)
466 {
467 struct tb_cm *tcm = tb_priv(tb);
468 struct tb_tunnel *tunnel;
469
470 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
471
472 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
473 if (tb_tunnel_is_pci(tunnel)) {
474 struct tb_switch *parent = tunnel->dst_port->sw;
475
476 while (parent != tunnel->src_port->sw) {
477 parent->boot = true;
478 parent = tb_switch_parent(parent);
479 }
480 } else if (tb_tunnel_is_dp(tunnel)) {
481 struct tb_port *in = tunnel->src_port;
482 struct tb_port *out = tunnel->dst_port;
483
484 /* Keep the domain from powering down */
485 pm_runtime_get_sync(&in->sw->dev);
486 pm_runtime_get_sync(&out->sw->dev);
487
488 tb_discover_bandwidth_group(tcm, in, out);
489 }
490 }
491 }
492
tb_port_configure_xdomain(struct tb_port * port,struct tb_xdomain * xd)493 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
494 {
495 if (tb_switch_is_usb4(port->sw))
496 return usb4_port_configure_xdomain(port, xd);
497 return tb_lc_configure_xdomain(port);
498 }
499
tb_port_unconfigure_xdomain(struct tb_port * port)500 static void tb_port_unconfigure_xdomain(struct tb_port *port)
501 {
502 if (tb_switch_is_usb4(port->sw))
503 usb4_port_unconfigure_xdomain(port);
504 else
505 tb_lc_unconfigure_xdomain(port);
506
507 tb_port_enable(port->dual_link_port);
508 }
509
tb_scan_xdomain(struct tb_port * port)510 static void tb_scan_xdomain(struct tb_port *port)
511 {
512 struct tb_switch *sw = port->sw;
513 struct tb *tb = sw->tb;
514 struct tb_xdomain *xd;
515 u64 route;
516
517 if (!tb_is_xdomain_enabled())
518 return;
519
520 route = tb_downstream_route(port);
521 xd = tb_xdomain_find_by_route(tb, route);
522 if (xd) {
523 tb_xdomain_put(xd);
524 return;
525 }
526
527 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
528 NULL);
529 if (xd) {
530 tb_port_at(route, sw)->xdomain = xd;
531 tb_port_configure_xdomain(port, xd);
532 tb_xdomain_add(xd);
533 }
534 }
535
536 /**
537 * tb_find_unused_port() - return the first inactive port on @sw
538 * @sw: Switch to find the port on
539 * @type: Port type to look for
540 */
tb_find_unused_port(struct tb_switch * sw,enum tb_port_type type)541 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
542 enum tb_port_type type)
543 {
544 struct tb_port *port;
545
546 tb_switch_for_each_port(sw, port) {
547 if (tb_is_upstream_port(port))
548 continue;
549 if (port->config.type != type)
550 continue;
551 if (!port->cap_adap)
552 continue;
553 if (tb_port_is_enabled(port))
554 continue;
555 return port;
556 }
557 return NULL;
558 }
559
tb_find_usb3_down(struct tb_switch * sw,const struct tb_port * port)560 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
561 const struct tb_port *port)
562 {
563 struct tb_port *down;
564
565 down = usb4_switch_map_usb3_down(sw, port);
566 if (down && !tb_usb3_port_is_enabled(down))
567 return down;
568 return NULL;
569 }
570
tb_find_tunnel(struct tb * tb,enum tb_tunnel_type type,struct tb_port * src_port,struct tb_port * dst_port)571 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
572 struct tb_port *src_port,
573 struct tb_port *dst_port)
574 {
575 struct tb_cm *tcm = tb_priv(tb);
576 struct tb_tunnel *tunnel;
577
578 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
579 if (tunnel->type == type &&
580 ((src_port && src_port == tunnel->src_port) ||
581 (dst_port && dst_port == tunnel->dst_port))) {
582 return tunnel;
583 }
584 }
585
586 return NULL;
587 }
588
tb_find_first_usb3_tunnel(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)589 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
590 struct tb_port *src_port,
591 struct tb_port *dst_port)
592 {
593 struct tb_port *port, *usb3_down;
594 struct tb_switch *sw;
595
596 /* Pick the router that is deepest in the topology */
597 if (tb_port_path_direction_downstream(src_port, dst_port))
598 sw = dst_port->sw;
599 else
600 sw = src_port->sw;
601
602 /* Can't be the host router */
603 if (sw == tb->root_switch)
604 return NULL;
605
606 /* Find the downstream USB4 port that leads to this router */
607 port = tb_port_at(tb_route(sw), tb->root_switch);
608 /* Find the corresponding host router USB3 downstream port */
609 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
610 if (!usb3_down)
611 return NULL;
612
613 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
614 }
615
616 /**
617 * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
618 * @tb: Domain structure
619 * @src_port: Source protocol adapter
620 * @dst_port: Destination protocol adapter
621 * @port: USB4 port the consumed bandwidth is calculated
622 * @consumed_up: Consumed upsream bandwidth (Mb/s)
623 * @consumed_down: Consumed downstream bandwidth (Mb/s)
624 *
625 * Calculates consumed USB3 and PCIe bandwidth at @port between path
626 * from @src_port to @dst_port. Does not take tunnel starting from
627 * @src_port and ending from @src_port into account.
628 */
tb_consumed_usb3_pcie_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,struct tb_port * port,int * consumed_up,int * consumed_down)629 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
630 struct tb_port *src_port,
631 struct tb_port *dst_port,
632 struct tb_port *port,
633 int *consumed_up,
634 int *consumed_down)
635 {
636 int pci_consumed_up, pci_consumed_down;
637 struct tb_tunnel *tunnel;
638
639 *consumed_up = *consumed_down = 0;
640
641 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
642 if (tunnel && tunnel->src_port != src_port &&
643 tunnel->dst_port != dst_port) {
644 int ret;
645
646 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
647 consumed_down);
648 if (ret)
649 return ret;
650 }
651
652 /*
653 * If there is anything reserved for PCIe bulk traffic take it
654 * into account here too.
655 */
656 if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
657 *consumed_up += pci_consumed_up;
658 *consumed_down += pci_consumed_down;
659 }
660
661 return 0;
662 }
663
664 /**
665 * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
666 * @tb: Domain structure
667 * @src_port: Source protocol adapter
668 * @dst_port: Destination protocol adapter
669 * @port: USB4 port the consumed bandwidth is calculated
670 * @consumed_up: Consumed upsream bandwidth (Mb/s)
671 * @consumed_down: Consumed downstream bandwidth (Mb/s)
672 *
673 * Calculates consumed DP bandwidth at @port between path from @src_port
674 * to @dst_port. Does not take tunnel starting from @src_port and ending
675 * from @src_port into account.
676 */
tb_consumed_dp_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,struct tb_port * port,int * consumed_up,int * consumed_down)677 static int tb_consumed_dp_bandwidth(struct tb *tb,
678 struct tb_port *src_port,
679 struct tb_port *dst_port,
680 struct tb_port *port,
681 int *consumed_up,
682 int *consumed_down)
683 {
684 struct tb_cm *tcm = tb_priv(tb);
685 struct tb_tunnel *tunnel;
686 int ret;
687
688 *consumed_up = *consumed_down = 0;
689
690 /*
691 * Find all DP tunnels that cross the port and reduce
692 * their consumed bandwidth from the available.
693 */
694 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
695 int dp_consumed_up, dp_consumed_down;
696
697 if (tb_tunnel_is_invalid(tunnel))
698 continue;
699
700 if (!tb_tunnel_is_dp(tunnel))
701 continue;
702
703 if (!tb_tunnel_port_on_path(tunnel, port))
704 continue;
705
706 /*
707 * Ignore the DP tunnel between src_port and dst_port
708 * because it is the same tunnel and we may be
709 * re-calculating estimated bandwidth.
710 */
711 if (tunnel->src_port == src_port &&
712 tunnel->dst_port == dst_port)
713 continue;
714
715 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
716 &dp_consumed_down);
717 if (ret)
718 return ret;
719
720 *consumed_up += dp_consumed_up;
721 *consumed_down += dp_consumed_down;
722 }
723
724 return 0;
725 }
726
tb_asym_supported(struct tb_port * src_port,struct tb_port * dst_port,struct tb_port * port)727 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
728 struct tb_port *port)
729 {
730 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
731 enum tb_link_width width;
732
733 if (tb_is_upstream_port(port))
734 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
735 else
736 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
737
738 return tb_port_width_supported(port, width);
739 }
740
741 /**
742 * tb_maximum_banwidth() - Maximum bandwidth over a single link
743 * @tb: Domain structure
744 * @src_port: Source protocol adapter
745 * @dst_port: Destination protocol adapter
746 * @port: USB4 port the total bandwidth is calculated
747 * @max_up: Maximum upstream bandwidth (Mb/s)
748 * @max_down: Maximum downstream bandwidth (Mb/s)
749 * @include_asym: Include bandwidth if the link is switched from
750 * symmetric to asymmetric
751 *
752 * Returns maximum possible bandwidth in @max_up and @max_down over a
753 * single link at @port. If @include_asym is set then includes the
754 * additional banwdith if the links are transitioned into asymmetric to
755 * direction from @src_port to @dst_port.
756 */
tb_maximum_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,struct tb_port * port,int * max_up,int * max_down,bool include_asym)757 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
758 struct tb_port *dst_port, struct tb_port *port,
759 int *max_up, int *max_down, bool include_asym)
760 {
761 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
762 int link_speed, link_width, up_bw, down_bw;
763
764 /*
765 * Can include asymmetric, only if it is actually supported by
766 * the lane adapter.
767 */
768 if (!tb_asym_supported(src_port, dst_port, port))
769 include_asym = false;
770
771 if (tb_is_upstream_port(port)) {
772 link_speed = port->sw->link_speed;
773 /*
774 * sw->link_width is from upstream perspective so we use
775 * the opposite for downstream of the host router.
776 */
777 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
778 up_bw = link_speed * 3 * 1000;
779 down_bw = link_speed * 1 * 1000;
780 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
781 up_bw = link_speed * 1 * 1000;
782 down_bw = link_speed * 3 * 1000;
783 } else if (include_asym) {
784 /*
785 * The link is symmetric at the moment but we
786 * can switch it to asymmetric as needed. Report
787 * this bandwidth as available (even though it
788 * is not yet enabled).
789 */
790 if (downstream) {
791 up_bw = link_speed * 1 * 1000;
792 down_bw = link_speed * 3 * 1000;
793 } else {
794 up_bw = link_speed * 3 * 1000;
795 down_bw = link_speed * 1 * 1000;
796 }
797 } else {
798 up_bw = link_speed * port->sw->link_width * 1000;
799 down_bw = up_bw;
800 }
801 } else {
802 link_speed = tb_port_get_link_speed(port);
803 if (link_speed < 0)
804 return link_speed;
805
806 link_width = tb_port_get_link_width(port);
807 if (link_width < 0)
808 return link_width;
809
810 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
811 up_bw = link_speed * 1 * 1000;
812 down_bw = link_speed * 3 * 1000;
813 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
814 up_bw = link_speed * 3 * 1000;
815 down_bw = link_speed * 1 * 1000;
816 } else if (include_asym) {
817 /*
818 * The link is symmetric at the moment but we
819 * can switch it to asymmetric as needed. Report
820 * this bandwidth as available (even though it
821 * is not yet enabled).
822 */
823 if (downstream) {
824 up_bw = link_speed * 1 * 1000;
825 down_bw = link_speed * 3 * 1000;
826 } else {
827 up_bw = link_speed * 3 * 1000;
828 down_bw = link_speed * 1 * 1000;
829 }
830 } else {
831 up_bw = link_speed * link_width * 1000;
832 down_bw = up_bw;
833 }
834 }
835
836 /* Leave 10% guard band */
837 *max_up = up_bw - up_bw / 10;
838 *max_down = down_bw - down_bw / 10;
839
840 tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
841 return 0;
842 }
843
844 /**
845 * tb_available_bandwidth() - Available bandwidth for tunneling
846 * @tb: Domain structure
847 * @src_port: Source protocol adapter
848 * @dst_port: Destination protocol adapter
849 * @available_up: Available bandwidth upstream (Mb/s)
850 * @available_down: Available bandwidth downstream (Mb/s)
851 * @include_asym: Include bandwidth if the link is switched from
852 * symmetric to asymmetric
853 *
854 * Calculates maximum available bandwidth for protocol tunneling between
855 * @src_port and @dst_port at the moment. This is minimum of maximum
856 * link bandwidth across all links reduced by currently consumed
857 * bandwidth on that link.
858 *
859 * If @include_asym is true then includes also bandwidth that can be
860 * added when the links are transitioned into asymmetric (but does not
861 * transition the links).
862 */
tb_available_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,int * available_up,int * available_down,bool include_asym)863 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
864 struct tb_port *dst_port, int *available_up,
865 int *available_down, bool include_asym)
866 {
867 struct tb_port *port;
868 int ret;
869
870 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
871 *available_up = *available_down = 120000;
872
873 /* Find the minimum available bandwidth over all links */
874 tb_for_each_port_on_path(src_port, dst_port, port) {
875 int max_up, max_down, consumed_up, consumed_down;
876
877 if (!tb_port_is_null(port))
878 continue;
879
880 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
881 &max_up, &max_down, include_asym);
882 if (ret)
883 return ret;
884
885 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
886 port, &consumed_up,
887 &consumed_down);
888 if (ret)
889 return ret;
890 max_up -= consumed_up;
891 max_down -= consumed_down;
892
893 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
894 &consumed_up, &consumed_down);
895 if (ret)
896 return ret;
897 max_up -= consumed_up;
898 max_down -= consumed_down;
899
900 if (max_up < *available_up)
901 *available_up = max_up;
902 if (max_down < *available_down)
903 *available_down = max_down;
904 }
905
906 if (*available_up < 0)
907 *available_up = 0;
908 if (*available_down < 0)
909 *available_down = 0;
910
911 return 0;
912 }
913
tb_release_unused_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)914 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
915 struct tb_port *src_port,
916 struct tb_port *dst_port)
917 {
918 struct tb_tunnel *tunnel;
919
920 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
921 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
922 }
923
tb_reclaim_usb3_bandwidth(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port)924 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
925 struct tb_port *dst_port)
926 {
927 int ret, available_up, available_down;
928 struct tb_tunnel *tunnel;
929
930 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
931 if (!tunnel)
932 return;
933
934 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
935
936 /*
937 * Calculate available bandwidth for the first hop USB3 tunnel.
938 * That determines the whole USB3 bandwidth for this branch.
939 */
940 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
941 &available_up, &available_down, false);
942 if (ret) {
943 tb_warn(tb, "failed to calculate available bandwidth\n");
944 return;
945 }
946
947 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
948 available_up, available_down);
949
950 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
951 }
952
tb_tunnel_usb3(struct tb * tb,struct tb_switch * sw)953 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
954 {
955 struct tb_switch *parent = tb_switch_parent(sw);
956 int ret, available_up, available_down;
957 struct tb_port *up, *down, *port;
958 struct tb_cm *tcm = tb_priv(tb);
959 struct tb_tunnel *tunnel;
960
961 if (!tb_acpi_may_tunnel_usb3()) {
962 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
963 return 0;
964 }
965
966 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
967 if (!up)
968 return 0;
969
970 if (!sw->link_usb4)
971 return 0;
972
973 /*
974 * Look up available down port. Since we are chaining it should
975 * be found right above this switch.
976 */
977 port = tb_switch_downstream_port(sw);
978 down = tb_find_usb3_down(parent, port);
979 if (!down)
980 return 0;
981
982 if (tb_route(parent)) {
983 struct tb_port *parent_up;
984 /*
985 * Check first that the parent switch has its upstream USB3
986 * port enabled. Otherwise the chain is not complete and
987 * there is no point setting up a new tunnel.
988 */
989 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
990 if (!parent_up || !tb_port_is_enabled(parent_up))
991 return 0;
992
993 /* Make all unused bandwidth available for the new tunnel */
994 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
995 if (ret)
996 return ret;
997 }
998
999 ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
1000 false);
1001 if (ret)
1002 goto err_reclaim;
1003
1004 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
1005 available_up, available_down);
1006
1007 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
1008 available_down);
1009 if (!tunnel) {
1010 ret = -ENOMEM;
1011 goto err_reclaim;
1012 }
1013
1014 if (tb_tunnel_activate(tunnel)) {
1015 tb_port_info(up,
1016 "USB3 tunnel activation failed, aborting\n");
1017 ret = -EIO;
1018 goto err_free;
1019 }
1020
1021 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1022 if (tb_route(parent))
1023 tb_reclaim_usb3_bandwidth(tb, down, up);
1024
1025 return 0;
1026
1027 err_free:
1028 tb_tunnel_free(tunnel);
1029 err_reclaim:
1030 if (tb_route(parent))
1031 tb_reclaim_usb3_bandwidth(tb, down, up);
1032
1033 return ret;
1034 }
1035
tb_create_usb3_tunnels(struct tb_switch * sw)1036 static int tb_create_usb3_tunnels(struct tb_switch *sw)
1037 {
1038 struct tb_port *port;
1039 int ret;
1040
1041 if (!tb_acpi_may_tunnel_usb3())
1042 return 0;
1043
1044 if (tb_route(sw)) {
1045 ret = tb_tunnel_usb3(sw->tb, sw);
1046 if (ret)
1047 return ret;
1048 }
1049
1050 tb_switch_for_each_port(sw, port) {
1051 if (!tb_port_has_remote(port))
1052 continue;
1053 ret = tb_create_usb3_tunnels(port->remote->sw);
1054 if (ret)
1055 return ret;
1056 }
1057
1058 return 0;
1059 }
1060
1061 /**
1062 * tb_configure_asym() - Transition links to asymmetric if needed
1063 * @tb: Domain structure
1064 * @src_port: Source adapter to start the transition
1065 * @dst_port: Destination adapter
1066 * @requested_up: Additional bandwidth (Mb/s) required upstream
1067 * @requested_down: Additional bandwidth (Mb/s) required downstream
1068 *
1069 * Transition links between @src_port and @dst_port into asymmetric, with
1070 * three lanes in the direction from @src_port towards @dst_port and one lane
1071 * in the opposite direction, if the bandwidth requirements
1072 * (requested + currently consumed) on that link exceed @asym_threshold.
1073 *
1074 * Must be called with available >= requested over all links.
1075 */
tb_configure_asym(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,int requested_up,int requested_down)1076 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
1077 struct tb_port *dst_port, int requested_up,
1078 int requested_down)
1079 {
1080 struct tb_switch *sw;
1081 bool clx, downstream;
1082 struct tb_port *up;
1083 int ret = 0;
1084
1085 if (!asym_threshold)
1086 return 0;
1087
1088 /* Disable CL states before doing any transitions */
1089 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1090 /* Pick up router deepest in the hierarchy */
1091 if (downstream)
1092 sw = dst_port->sw;
1093 else
1094 sw = src_port->sw;
1095
1096 clx = tb_disable_clx(sw);
1097
1098 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1099 int consumed_up, consumed_down;
1100 enum tb_link_width width;
1101
1102 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1103 &consumed_up, &consumed_down);
1104 if (ret)
1105 break;
1106
1107 if (downstream) {
1108 /*
1109 * Downstream so make sure upstream is within the 36G
1110 * (40G - guard band 10%), and the requested is above
1111 * what the threshold is.
1112 */
1113 if (consumed_up + requested_up >= TB_ASYM_MIN) {
1114 ret = -ENOBUFS;
1115 break;
1116 }
1117 /* Does consumed + requested exceed the threshold */
1118 if (consumed_down + requested_down < asym_threshold)
1119 continue;
1120
1121 width = TB_LINK_WIDTH_ASYM_RX;
1122 } else {
1123 /* Upstream, the opposite of above */
1124 if (consumed_down + requested_down >= TB_ASYM_MIN) {
1125 ret = -ENOBUFS;
1126 break;
1127 }
1128 if (consumed_up + requested_up < asym_threshold)
1129 continue;
1130
1131 width = TB_LINK_WIDTH_ASYM_TX;
1132 }
1133
1134 if (up->sw->link_width == width)
1135 continue;
1136
1137 if (!tb_port_width_supported(up, width))
1138 continue;
1139
1140 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1141
1142 /*
1143 * Here requested + consumed > threshold so we need to
1144 * transtion the link into asymmetric now.
1145 */
1146 ret = tb_switch_set_link_width(up->sw, width);
1147 if (ret) {
1148 tb_sw_warn(up->sw, "failed to set link width\n");
1149 break;
1150 }
1151 }
1152
1153 /* Re-enable CL states if they were previosly enabled */
1154 if (clx)
1155 tb_enable_clx(sw);
1156
1157 return ret;
1158 }
1159
1160 /**
1161 * tb_configure_sym() - Transition links to symmetric if possible
1162 * @tb: Domain structure
1163 * @src_port: Source adapter to start the transition
1164 * @dst_port: Destination adapter
1165 * @requested_up: New lower bandwidth request upstream (Mb/s)
1166 * @requested_down: New lower bandwidth request downstream (Mb/s)
1167 *
1168 * Goes over each link from @src_port to @dst_port and tries to
1169 * transition the link to symmetric if the currently consumed bandwidth
1170 * allows.
1171 */
tb_configure_sym(struct tb * tb,struct tb_port * src_port,struct tb_port * dst_port,int requested_up,int requested_down)1172 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1173 struct tb_port *dst_port, int requested_up,
1174 int requested_down)
1175 {
1176 struct tb_switch *sw;
1177 bool clx, downstream;
1178 struct tb_port *up;
1179 int ret = 0;
1180
1181 if (!asym_threshold)
1182 return 0;
1183
1184 /* Disable CL states before doing any transitions */
1185 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1186 /* Pick up router deepest in the hierarchy */
1187 if (downstream)
1188 sw = dst_port->sw;
1189 else
1190 sw = src_port->sw;
1191
1192 clx = tb_disable_clx(sw);
1193
1194 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1195 int consumed_up, consumed_down;
1196
1197 /* Already symmetric */
1198 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1199 continue;
1200 /* Unplugged, no need to switch */
1201 if (up->sw->is_unplugged)
1202 continue;
1203
1204 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1205 &consumed_up, &consumed_down);
1206 if (ret)
1207 break;
1208
1209 if (downstream) {
1210 /*
1211 * Downstream so we want the consumed_down < threshold.
1212 * Upstream traffic should be less than 36G (40G
1213 * guard band 10%) as the link was configured asymmetric
1214 * already.
1215 */
1216 if (consumed_down + requested_down >= asym_threshold)
1217 continue;
1218 } else {
1219 if (consumed_up + requested_up >= asym_threshold)
1220 continue;
1221 }
1222
1223 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1224 continue;
1225
1226 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1227
1228 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1229 if (ret) {
1230 tb_sw_warn(up->sw, "failed to set link width\n");
1231 break;
1232 }
1233 }
1234
1235 /* Re-enable CL states if they were previosly enabled */
1236 if (clx)
1237 tb_enable_clx(sw);
1238
1239 return ret;
1240 }
1241
tb_configure_link(struct tb_port * down,struct tb_port * up,struct tb_switch * sw)1242 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1243 struct tb_switch *sw)
1244 {
1245 struct tb *tb = sw->tb;
1246
1247 /* Link the routers using both links if available */
1248 down->remote = up;
1249 up->remote = down;
1250 if (down->dual_link_port && up->dual_link_port) {
1251 down->dual_link_port->remote = up->dual_link_port;
1252 up->dual_link_port->remote = down->dual_link_port;
1253 }
1254
1255 /*
1256 * Enable lane bonding if the link is currently two single lane
1257 * links.
1258 */
1259 if (sw->link_width < TB_LINK_WIDTH_DUAL)
1260 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1261
1262 /*
1263 * Device router that comes up as symmetric link is
1264 * connected deeper in the hierarchy, we transition the links
1265 * above into symmetric if bandwidth allows.
1266 */
1267 if (tb_switch_depth(sw) > 1 &&
1268 tb_port_get_link_generation(up) >= 4 &&
1269 up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1270 struct tb_port *host_port;
1271
1272 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1273 tb_configure_sym(tb, host_port, up, 0, 0);
1274 }
1275
1276 /* Set the link configured */
1277 tb_switch_configure_link(sw);
1278 }
1279
1280 static void tb_scan_port(struct tb_port *port);
1281
1282 /*
1283 * tb_scan_switch() - scan for and initialize downstream switches
1284 */
tb_scan_switch(struct tb_switch * sw)1285 static void tb_scan_switch(struct tb_switch *sw)
1286 {
1287 struct tb_port *port;
1288
1289 pm_runtime_get_sync(&sw->dev);
1290
1291 tb_switch_for_each_port(sw, port)
1292 tb_scan_port(port);
1293
1294 pm_runtime_mark_last_busy(&sw->dev);
1295 pm_runtime_put_autosuspend(&sw->dev);
1296 }
1297
1298 /*
1299 * tb_scan_port() - check for and initialize switches below port
1300 */
tb_scan_port(struct tb_port * port)1301 static void tb_scan_port(struct tb_port *port)
1302 {
1303 struct tb_cm *tcm = tb_priv(port->sw->tb);
1304 struct tb_port *upstream_port;
1305 bool discovery = false;
1306 struct tb_switch *sw;
1307
1308 if (tb_is_upstream_port(port))
1309 return;
1310
1311 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1312 !tb_dp_port_is_enabled(port)) {
1313 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1314 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1315 false);
1316 return;
1317 }
1318
1319 if (port->config.type != TB_TYPE_PORT)
1320 return;
1321 if (port->dual_link_port && port->link_nr)
1322 return; /*
1323 * Downstream switch is reachable through two ports.
1324 * Only scan on the primary port (link_nr == 0).
1325 */
1326
1327 if (port->usb4)
1328 pm_runtime_get_sync(&port->usb4->dev);
1329
1330 if (tb_wait_for_port(port, false) <= 0)
1331 goto out_rpm_put;
1332 if (port->remote) {
1333 tb_port_dbg(port, "port already has a remote\n");
1334 goto out_rpm_put;
1335 }
1336
1337 tb_retimer_scan(port, true);
1338
1339 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1340 tb_downstream_route(port));
1341 if (IS_ERR(sw)) {
1342 /*
1343 * If there is an error accessing the connected switch
1344 * it may be connected to another domain. Also we allow
1345 * the other domain to be connected to a max depth switch.
1346 */
1347 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1348 tb_scan_xdomain(port);
1349 goto out_rpm_put;
1350 }
1351
1352 if (tb_switch_configure(sw)) {
1353 tb_switch_put(sw);
1354 goto out_rpm_put;
1355 }
1356
1357 /*
1358 * If there was previously another domain connected remove it
1359 * first.
1360 */
1361 if (port->xdomain) {
1362 tb_xdomain_remove(port->xdomain);
1363 tb_port_unconfigure_xdomain(port);
1364 port->xdomain = NULL;
1365 }
1366
1367 /*
1368 * Do not send uevents until we have discovered all existing
1369 * tunnels and know which switches were authorized already by
1370 * the boot firmware.
1371 */
1372 if (!tcm->hotplug_active) {
1373 dev_set_uevent_suppress(&sw->dev, true);
1374 discovery = true;
1375 }
1376
1377 /*
1378 * At the moment Thunderbolt 2 and beyond (devices with LC) we
1379 * can support runtime PM.
1380 */
1381 sw->rpm = sw->generation > 1;
1382
1383 if (tb_switch_add(sw)) {
1384 tb_switch_put(sw);
1385 goto out_rpm_put;
1386 }
1387
1388 upstream_port = tb_upstream_port(sw);
1389 tb_configure_link(port, upstream_port, sw);
1390
1391 /*
1392 * CL0s and CL1 are enabled and supported together.
1393 * Silently ignore CLx enabling in case CLx is not supported.
1394 */
1395 if (discovery)
1396 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1397 else if (tb_enable_clx(sw))
1398 tb_sw_warn(sw, "failed to enable CL states\n");
1399
1400 if (tb_enable_tmu(sw))
1401 tb_sw_warn(sw, "failed to enable TMU\n");
1402
1403 /*
1404 * Configuration valid needs to be set after the TMU has been
1405 * enabled for the upstream port of the router so we do it here.
1406 */
1407 tb_switch_configuration_valid(sw);
1408
1409 /* Scan upstream retimers */
1410 tb_retimer_scan(upstream_port, true);
1411
1412 /*
1413 * Create USB 3.x tunnels only when the switch is plugged to the
1414 * domain. This is because we scan the domain also during discovery
1415 * and want to discover existing USB 3.x tunnels before we create
1416 * any new.
1417 */
1418 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1419 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1420
1421 tb_add_dp_resources(sw);
1422 tb_scan_switch(sw);
1423
1424 out_rpm_put:
1425 if (port->usb4) {
1426 pm_runtime_mark_last_busy(&port->usb4->dev);
1427 pm_runtime_put_autosuspend(&port->usb4->dev);
1428 }
1429 }
1430
tb_deactivate_and_free_tunnel(struct tb_tunnel * tunnel)1431 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1432 {
1433 struct tb_port *src_port, *dst_port;
1434 struct tb *tb;
1435
1436 if (!tunnel)
1437 return;
1438
1439 tb_tunnel_deactivate(tunnel);
1440 list_del(&tunnel->list);
1441
1442 tb = tunnel->tb;
1443 src_port = tunnel->src_port;
1444 dst_port = tunnel->dst_port;
1445
1446 switch (tunnel->type) {
1447 case TB_TUNNEL_DP:
1448 tb_detach_bandwidth_group(src_port);
1449 /*
1450 * In case of DP tunnel make sure the DP IN resource is
1451 * deallocated properly.
1452 */
1453 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1454 /*
1455 * If bandwidth on a link is < asym_threshold
1456 * transition the link to symmetric.
1457 */
1458 tb_configure_sym(tb, src_port, dst_port, 0, 0);
1459 /* Now we can allow the domain to runtime suspend again */
1460 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1461 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1462 pm_runtime_mark_last_busy(&src_port->sw->dev);
1463 pm_runtime_put_autosuspend(&src_port->sw->dev);
1464 fallthrough;
1465
1466 case TB_TUNNEL_USB3:
1467 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1468 break;
1469
1470 default:
1471 /*
1472 * PCIe and DMA tunnels do not consume guaranteed
1473 * bandwidth.
1474 */
1475 break;
1476 }
1477
1478 tb_tunnel_free(tunnel);
1479 }
1480
1481 /*
1482 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1483 */
tb_free_invalid_tunnels(struct tb * tb)1484 static void tb_free_invalid_tunnels(struct tb *tb)
1485 {
1486 struct tb_cm *tcm = tb_priv(tb);
1487 struct tb_tunnel *tunnel;
1488 struct tb_tunnel *n;
1489
1490 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1491 if (tb_tunnel_is_invalid(tunnel))
1492 tb_deactivate_and_free_tunnel(tunnel);
1493 }
1494 }
1495
1496 /*
1497 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1498 */
tb_free_unplugged_children(struct tb_switch * sw)1499 static void tb_free_unplugged_children(struct tb_switch *sw)
1500 {
1501 struct tb_port *port;
1502
1503 tb_switch_for_each_port(sw, port) {
1504 if (!tb_port_has_remote(port))
1505 continue;
1506
1507 if (port->remote->sw->is_unplugged) {
1508 tb_retimer_remove_all(port);
1509 tb_remove_dp_resources(port->remote->sw);
1510 tb_switch_unconfigure_link(port->remote->sw);
1511 tb_switch_set_link_width(port->remote->sw,
1512 TB_LINK_WIDTH_SINGLE);
1513 tb_switch_remove(port->remote->sw);
1514 port->remote = NULL;
1515 if (port->dual_link_port)
1516 port->dual_link_port->remote = NULL;
1517 } else {
1518 tb_free_unplugged_children(port->remote->sw);
1519 }
1520 }
1521 }
1522
tb_find_pcie_down(struct tb_switch * sw,const struct tb_port * port)1523 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1524 const struct tb_port *port)
1525 {
1526 struct tb_port *down = NULL;
1527
1528 /*
1529 * To keep plugging devices consistently in the same PCIe
1530 * hierarchy, do mapping here for switch downstream PCIe ports.
1531 */
1532 if (tb_switch_is_usb4(sw)) {
1533 down = usb4_switch_map_pcie_down(sw, port);
1534 } else if (!tb_route(sw)) {
1535 int phy_port = tb_phy_port_from_link(port->port);
1536 int index;
1537
1538 /*
1539 * Hard-coded Thunderbolt port to PCIe down port mapping
1540 * per controller.
1541 */
1542 if (tb_switch_is_cactus_ridge(sw) ||
1543 tb_switch_is_alpine_ridge(sw))
1544 index = !phy_port ? 6 : 7;
1545 else if (tb_switch_is_falcon_ridge(sw))
1546 index = !phy_port ? 6 : 8;
1547 else if (tb_switch_is_titan_ridge(sw))
1548 index = !phy_port ? 8 : 9;
1549 else
1550 goto out;
1551
1552 /* Validate the hard-coding */
1553 if (WARN_ON(index > sw->config.max_port_number))
1554 goto out;
1555
1556 down = &sw->ports[index];
1557 }
1558
1559 if (down) {
1560 if (WARN_ON(!tb_port_is_pcie_down(down)))
1561 goto out;
1562 if (tb_pci_port_is_enabled(down))
1563 goto out;
1564
1565 return down;
1566 }
1567
1568 out:
1569 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1570 }
1571
1572 static void
tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group * group)1573 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1574 {
1575 struct tb_tunnel *first_tunnel;
1576 struct tb *tb = group->tb;
1577 struct tb_port *in;
1578 int ret;
1579
1580 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1581 group->index);
1582
1583 first_tunnel = NULL;
1584 list_for_each_entry(in, &group->ports, group_list) {
1585 int estimated_bw, estimated_up, estimated_down;
1586 struct tb_tunnel *tunnel;
1587 struct tb_port *out;
1588
1589 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1590 continue;
1591
1592 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1593 if (WARN_ON(!tunnel))
1594 break;
1595
1596 if (!first_tunnel) {
1597 /*
1598 * Since USB3 bandwidth is shared by all DP
1599 * tunnels under the host router USB4 port, even
1600 * if they do not begin from the host router, we
1601 * can release USB3 bandwidth just once and not
1602 * for each tunnel separately.
1603 */
1604 first_tunnel = tunnel;
1605 ret = tb_release_unused_usb3_bandwidth(tb,
1606 first_tunnel->src_port, first_tunnel->dst_port);
1607 if (ret) {
1608 tb_port_warn(in,
1609 "failed to release unused bandwidth\n");
1610 break;
1611 }
1612 }
1613
1614 out = tunnel->dst_port;
1615 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1616 &estimated_down, true);
1617 if (ret) {
1618 tb_port_warn(in,
1619 "failed to re-calculate estimated bandwidth\n");
1620 break;
1621 }
1622
1623 /*
1624 * Estimated bandwidth includes:
1625 * - already allocated bandwidth for the DP tunnel
1626 * - available bandwidth along the path
1627 * - bandwidth allocated for USB 3.x but not used.
1628 */
1629 tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
1630 estimated_up, estimated_down);
1631
1632 if (tb_port_path_direction_downstream(in, out))
1633 estimated_bw = estimated_down;
1634 else
1635 estimated_bw = estimated_up;
1636
1637 if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
1638 tb_port_warn(in, "failed to update estimated bandwidth\n");
1639 }
1640
1641 if (first_tunnel)
1642 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1643 first_tunnel->dst_port);
1644
1645 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1646 }
1647
tb_recalc_estimated_bandwidth(struct tb * tb)1648 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1649 {
1650 struct tb_cm *tcm = tb_priv(tb);
1651 int i;
1652
1653 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1654
1655 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1656 struct tb_bandwidth_group *group = &tcm->groups[i];
1657
1658 if (!list_empty(&group->ports))
1659 tb_recalc_estimated_bandwidth_for_group(group);
1660 }
1661
1662 tb_dbg(tb, "bandwidth re-calculation done\n");
1663 }
1664
tb_find_dp_out(struct tb * tb,struct tb_port * in)1665 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1666 {
1667 struct tb_port *host_port, *port;
1668 struct tb_cm *tcm = tb_priv(tb);
1669
1670 host_port = tb_route(in->sw) ?
1671 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1672
1673 list_for_each_entry(port, &tcm->dp_resources, list) {
1674 if (!tb_port_is_dpout(port))
1675 continue;
1676
1677 if (tb_port_is_enabled(port)) {
1678 tb_port_dbg(port, "DP OUT in use\n");
1679 continue;
1680 }
1681
1682 tb_port_dbg(port, "DP OUT available\n");
1683
1684 /*
1685 * Keep the DP tunnel under the topology starting from
1686 * the same host router downstream port.
1687 */
1688 if (host_port && tb_route(port->sw)) {
1689 struct tb_port *p;
1690
1691 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1692 if (p != host_port)
1693 continue;
1694 }
1695
1696 return port;
1697 }
1698
1699 return NULL;
1700 }
1701
tb_tunnel_one_dp(struct tb * tb,struct tb_port * in,struct tb_port * out)1702 static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1703 struct tb_port *out)
1704 {
1705 int available_up, available_down, ret, link_nr;
1706 struct tb_cm *tcm = tb_priv(tb);
1707 int consumed_up, consumed_down;
1708 struct tb_tunnel *tunnel;
1709
1710 /*
1711 * This is only applicable to links that are not bonded (so
1712 * when Thunderbolt 1 hardware is involved somewhere in the
1713 * topology). For these try to share the DP bandwidth between
1714 * the two lanes.
1715 */
1716 link_nr = 1;
1717 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1718 if (tb_tunnel_is_dp(tunnel)) {
1719 link_nr = 0;
1720 break;
1721 }
1722 }
1723
1724 /*
1725 * DP stream needs the domain to be active so runtime resume
1726 * both ends of the tunnel.
1727 *
1728 * This should bring the routers in the middle active as well
1729 * and keeps the domain from runtime suspending while the DP
1730 * tunnel is active.
1731 */
1732 pm_runtime_get_sync(&in->sw->dev);
1733 pm_runtime_get_sync(&out->sw->dev);
1734
1735 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1736 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1737 goto err_rpm_put;
1738 }
1739
1740 if (!tb_attach_bandwidth_group(tcm, in, out))
1741 goto err_dealloc_dp;
1742
1743 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1744 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1745 if (ret) {
1746 tb_warn(tb, "failed to release unused bandwidth\n");
1747 goto err_detach_group;
1748 }
1749
1750 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1751 true);
1752 if (ret)
1753 goto err_reclaim_usb;
1754
1755 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1756 available_up, available_down);
1757
1758 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1759 available_down);
1760 if (!tunnel) {
1761 tb_port_dbg(out, "could not allocate DP tunnel\n");
1762 goto err_reclaim_usb;
1763 }
1764
1765 if (tb_tunnel_activate(tunnel)) {
1766 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1767 goto err_free;
1768 }
1769
1770 /* If fail reading tunnel's consumed bandwidth, tear it down */
1771 ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
1772 if (ret)
1773 goto err_deactivate;
1774
1775 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1776
1777 tb_reclaim_usb3_bandwidth(tb, in, out);
1778 /*
1779 * Transition the links to asymmetric if the consumption exceeds
1780 * the threshold.
1781 */
1782 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1783
1784 /* Update the domain with the new bandwidth estimation */
1785 tb_recalc_estimated_bandwidth(tb);
1786
1787 /*
1788 * In case of DP tunnel exists, change host router's 1st children
1789 * TMU mode to HiFi for CL0s to work.
1790 */
1791 tb_increase_tmu_accuracy(tunnel);
1792 return true;
1793
1794 err_deactivate:
1795 tb_tunnel_deactivate(tunnel);
1796 err_free:
1797 tb_tunnel_free(tunnel);
1798 err_reclaim_usb:
1799 tb_reclaim_usb3_bandwidth(tb, in, out);
1800 err_detach_group:
1801 tb_detach_bandwidth_group(in);
1802 err_dealloc_dp:
1803 tb_switch_dealloc_dp_resource(in->sw, in);
1804 err_rpm_put:
1805 pm_runtime_mark_last_busy(&out->sw->dev);
1806 pm_runtime_put_autosuspend(&out->sw->dev);
1807 pm_runtime_mark_last_busy(&in->sw->dev);
1808 pm_runtime_put_autosuspend(&in->sw->dev);
1809
1810 return false;
1811 }
1812
tb_tunnel_dp(struct tb * tb)1813 static void tb_tunnel_dp(struct tb *tb)
1814 {
1815 struct tb_cm *tcm = tb_priv(tb);
1816 struct tb_port *port, *in, *out;
1817
1818 if (!tb_acpi_may_tunnel_dp()) {
1819 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1820 return;
1821 }
1822
1823 /*
1824 * Find pair of inactive DP IN and DP OUT adapters and then
1825 * establish a DP tunnel between them.
1826 */
1827 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1828
1829 in = NULL;
1830 out = NULL;
1831 list_for_each_entry(port, &tcm->dp_resources, list) {
1832 if (!tb_port_is_dpin(port))
1833 continue;
1834
1835 if (tb_port_is_enabled(port)) {
1836 tb_port_dbg(port, "DP IN in use\n");
1837 continue;
1838 }
1839
1840 in = port;
1841 tb_port_dbg(in, "DP IN available\n");
1842
1843 out = tb_find_dp_out(tb, port);
1844 if (out)
1845 tb_tunnel_one_dp(tb, in, out);
1846 else
1847 tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
1848 }
1849
1850 if (!in)
1851 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1852 }
1853
tb_enter_redrive(struct tb_port * port)1854 static void tb_enter_redrive(struct tb_port *port)
1855 {
1856 struct tb_switch *sw = port->sw;
1857
1858 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1859 return;
1860
1861 /*
1862 * If we get hot-unplug for the DP IN port of the host router
1863 * and the DP resource is not available anymore it means there
1864 * is a monitor connected directly to the Type-C port and we are
1865 * in "redrive" mode. For this to work we cannot enter RTD3 so
1866 * we bump up the runtime PM reference count here.
1867 */
1868 if (!tb_port_is_dpin(port))
1869 return;
1870 if (tb_route(sw))
1871 return;
1872 if (!tb_switch_query_dp_resource(sw, port)) {
1873 port->redrive = true;
1874 pm_runtime_get(&sw->dev);
1875 tb_port_dbg(port, "enter redrive mode, keeping powered\n");
1876 }
1877 }
1878
tb_exit_redrive(struct tb_port * port)1879 static void tb_exit_redrive(struct tb_port *port)
1880 {
1881 struct tb_switch *sw = port->sw;
1882
1883 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1884 return;
1885
1886 if (!tb_port_is_dpin(port))
1887 return;
1888 if (tb_route(sw))
1889 return;
1890 if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
1891 port->redrive = false;
1892 pm_runtime_put(&sw->dev);
1893 tb_port_dbg(port, "exit redrive mode\n");
1894 }
1895 }
1896
tb_dp_resource_unavailable(struct tb * tb,struct tb_port * port)1897 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1898 {
1899 struct tb_port *in, *out;
1900 struct tb_tunnel *tunnel;
1901
1902 if (tb_port_is_dpin(port)) {
1903 tb_port_dbg(port, "DP IN resource unavailable\n");
1904 in = port;
1905 out = NULL;
1906 } else {
1907 tb_port_dbg(port, "DP OUT resource unavailable\n");
1908 in = NULL;
1909 out = port;
1910 }
1911
1912 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1913 if (tunnel)
1914 tb_deactivate_and_free_tunnel(tunnel);
1915 else
1916 tb_enter_redrive(port);
1917 list_del_init(&port->list);
1918
1919 /*
1920 * See if there is another DP OUT port that can be used for
1921 * to create another tunnel.
1922 */
1923 tb_recalc_estimated_bandwidth(tb);
1924 tb_tunnel_dp(tb);
1925 }
1926
tb_dp_resource_available(struct tb * tb,struct tb_port * port)1927 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1928 {
1929 struct tb_cm *tcm = tb_priv(tb);
1930 struct tb_port *p;
1931
1932 if (tb_port_is_enabled(port))
1933 return;
1934
1935 list_for_each_entry(p, &tcm->dp_resources, list) {
1936 if (p == port)
1937 return;
1938 }
1939
1940 tb_port_dbg(port, "DP %s resource available\n",
1941 tb_port_is_dpin(port) ? "IN" : "OUT");
1942 list_add_tail(&port->list, &tcm->dp_resources);
1943 tb_exit_redrive(port);
1944
1945 /* Look for suitable DP IN <-> DP OUT pairs now */
1946 tb_tunnel_dp(tb);
1947 }
1948
tb_disconnect_and_release_dp(struct tb * tb)1949 static void tb_disconnect_and_release_dp(struct tb *tb)
1950 {
1951 struct tb_cm *tcm = tb_priv(tb);
1952 struct tb_tunnel *tunnel, *n;
1953
1954 /*
1955 * Tear down all DP tunnels and release their resources. They
1956 * will be re-established after resume based on plug events.
1957 */
1958 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1959 if (tb_tunnel_is_dp(tunnel))
1960 tb_deactivate_and_free_tunnel(tunnel);
1961 }
1962
1963 while (!list_empty(&tcm->dp_resources)) {
1964 struct tb_port *port;
1965
1966 port = list_first_entry(&tcm->dp_resources,
1967 struct tb_port, list);
1968 list_del_init(&port->list);
1969 }
1970 }
1971
tb_disconnect_pci(struct tb * tb,struct tb_switch * sw)1972 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1973 {
1974 struct tb_tunnel *tunnel;
1975 struct tb_port *up;
1976
1977 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1978 if (WARN_ON(!up))
1979 return -ENODEV;
1980
1981 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1982 if (WARN_ON(!tunnel))
1983 return -ENODEV;
1984
1985 tb_switch_xhci_disconnect(sw);
1986
1987 tb_tunnel_deactivate(tunnel);
1988 list_del(&tunnel->list);
1989 tb_tunnel_free(tunnel);
1990 return 0;
1991 }
1992
tb_tunnel_pci(struct tb * tb,struct tb_switch * sw)1993 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1994 {
1995 struct tb_port *up, *down, *port;
1996 struct tb_cm *tcm = tb_priv(tb);
1997 struct tb_tunnel *tunnel;
1998
1999 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2000 if (!up)
2001 return 0;
2002
2003 /*
2004 * Look up available down port. Since we are chaining it should
2005 * be found right above this switch.
2006 */
2007 port = tb_switch_downstream_port(sw);
2008 down = tb_find_pcie_down(tb_switch_parent(sw), port);
2009 if (!down)
2010 return 0;
2011
2012 tunnel = tb_tunnel_alloc_pci(tb, up, down);
2013 if (!tunnel)
2014 return -ENOMEM;
2015
2016 if (tb_tunnel_activate(tunnel)) {
2017 tb_port_info(up,
2018 "PCIe tunnel activation failed, aborting\n");
2019 tb_tunnel_free(tunnel);
2020 return -EIO;
2021 }
2022
2023 /*
2024 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2025 * here.
2026 */
2027 if (tb_switch_pcie_l1_enable(sw))
2028 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2029
2030 if (tb_switch_xhci_connect(sw))
2031 tb_sw_warn(sw, "failed to connect xHCI\n");
2032
2033 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2034 return 0;
2035 }
2036
tb_approve_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)2037 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2038 int transmit_path, int transmit_ring,
2039 int receive_path, int receive_ring)
2040 {
2041 struct tb_cm *tcm = tb_priv(tb);
2042 struct tb_port *nhi_port, *dst_port;
2043 struct tb_tunnel *tunnel;
2044 struct tb_switch *sw;
2045 int ret;
2046
2047 sw = tb_to_switch(xd->dev.parent);
2048 dst_port = tb_port_at(xd->route, sw);
2049 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2050
2051 mutex_lock(&tb->lock);
2052
2053 /*
2054 * When tunneling DMA paths the link should not enter CL states
2055 * so disable them now.
2056 */
2057 tb_disable_clx(sw);
2058
2059 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2060 transmit_ring, receive_path, receive_ring);
2061 if (!tunnel) {
2062 ret = -ENOMEM;
2063 goto err_clx;
2064 }
2065
2066 if (tb_tunnel_activate(tunnel)) {
2067 tb_port_info(nhi_port,
2068 "DMA tunnel activation failed, aborting\n");
2069 ret = -EIO;
2070 goto err_free;
2071 }
2072
2073 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2074 mutex_unlock(&tb->lock);
2075 return 0;
2076
2077 err_free:
2078 tb_tunnel_free(tunnel);
2079 err_clx:
2080 tb_enable_clx(sw);
2081 mutex_unlock(&tb->lock);
2082
2083 return ret;
2084 }
2085
__tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)2086 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2087 int transmit_path, int transmit_ring,
2088 int receive_path, int receive_ring)
2089 {
2090 struct tb_cm *tcm = tb_priv(tb);
2091 struct tb_port *nhi_port, *dst_port;
2092 struct tb_tunnel *tunnel, *n;
2093 struct tb_switch *sw;
2094
2095 sw = tb_to_switch(xd->dev.parent);
2096 dst_port = tb_port_at(xd->route, sw);
2097 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2098
2099 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2100 if (!tb_tunnel_is_dma(tunnel))
2101 continue;
2102 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2103 continue;
2104
2105 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2106 receive_path, receive_ring))
2107 tb_deactivate_and_free_tunnel(tunnel);
2108 }
2109
2110 /*
2111 * Try to re-enable CL states now, it is OK if this fails
2112 * because we may still have another DMA tunnel active through
2113 * the same host router USB4 downstream port.
2114 */
2115 tb_enable_clx(sw);
2116 }
2117
tb_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)2118 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2119 int transmit_path, int transmit_ring,
2120 int receive_path, int receive_ring)
2121 {
2122 if (!xd->is_unplugged) {
2123 mutex_lock(&tb->lock);
2124 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2125 transmit_ring, receive_path,
2126 receive_ring);
2127 mutex_unlock(&tb->lock);
2128 }
2129 return 0;
2130 }
2131
2132 /* hotplug handling */
2133
2134 /*
2135 * tb_handle_hotplug() - handle hotplug event
2136 *
2137 * Executes on tb->wq.
2138 */
tb_handle_hotplug(struct work_struct * work)2139 static void tb_handle_hotplug(struct work_struct *work)
2140 {
2141 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2142 struct tb *tb = ev->tb;
2143 struct tb_cm *tcm = tb_priv(tb);
2144 struct tb_switch *sw;
2145 struct tb_port *port;
2146
2147 /* Bring the domain back from sleep if it was suspended */
2148 pm_runtime_get_sync(&tb->dev);
2149
2150 mutex_lock(&tb->lock);
2151 if (!tcm->hotplug_active)
2152 goto out; /* during init, suspend or shutdown */
2153
2154 sw = tb_switch_find_by_route(tb, ev->route);
2155 if (!sw) {
2156 tb_warn(tb,
2157 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2158 ev->route, ev->port, ev->unplug);
2159 goto out;
2160 }
2161 if (ev->port > sw->config.max_port_number) {
2162 tb_warn(tb,
2163 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2164 ev->route, ev->port, ev->unplug);
2165 goto put_sw;
2166 }
2167 port = &sw->ports[ev->port];
2168 if (tb_is_upstream_port(port)) {
2169 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2170 ev->route, ev->port, ev->unplug);
2171 goto put_sw;
2172 }
2173
2174 pm_runtime_get_sync(&sw->dev);
2175
2176 if (ev->unplug) {
2177 tb_retimer_remove_all(port);
2178
2179 if (tb_port_has_remote(port)) {
2180 tb_port_dbg(port, "switch unplugged\n");
2181 tb_sw_set_unplugged(port->remote->sw);
2182 tb_free_invalid_tunnels(tb);
2183 tb_remove_dp_resources(port->remote->sw);
2184 tb_switch_tmu_disable(port->remote->sw);
2185 tb_switch_unconfigure_link(port->remote->sw);
2186 tb_switch_set_link_width(port->remote->sw,
2187 TB_LINK_WIDTH_SINGLE);
2188 tb_switch_remove(port->remote->sw);
2189 port->remote = NULL;
2190 if (port->dual_link_port)
2191 port->dual_link_port->remote = NULL;
2192 /* Maybe we can create another DP tunnel */
2193 tb_recalc_estimated_bandwidth(tb);
2194 tb_tunnel_dp(tb);
2195 } else if (port->xdomain) {
2196 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2197
2198 tb_port_dbg(port, "xdomain unplugged\n");
2199 /*
2200 * Service drivers are unbound during
2201 * tb_xdomain_remove() so setting XDomain as
2202 * unplugged here prevents deadlock if they call
2203 * tb_xdomain_disable_paths(). We will tear down
2204 * all the tunnels below.
2205 */
2206 xd->is_unplugged = true;
2207 tb_xdomain_remove(xd);
2208 port->xdomain = NULL;
2209 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2210 tb_xdomain_put(xd);
2211 tb_port_unconfigure_xdomain(port);
2212 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2213 tb_dp_resource_unavailable(tb, port);
2214 } else if (!port->port) {
2215 tb_sw_dbg(sw, "xHCI disconnect request\n");
2216 tb_switch_xhci_disconnect(sw);
2217 } else {
2218 tb_port_dbg(port,
2219 "got unplug event for disconnected port, ignoring\n");
2220 }
2221 } else if (port->remote) {
2222 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2223 } else if (!port->port && sw->authorized) {
2224 tb_sw_dbg(sw, "xHCI connect request\n");
2225 tb_switch_xhci_connect(sw);
2226 } else {
2227 if (tb_port_is_null(port)) {
2228 tb_port_dbg(port, "hotplug: scanning\n");
2229 tb_scan_port(port);
2230 if (!port->remote)
2231 tb_port_dbg(port, "hotplug: no switch found\n");
2232 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2233 tb_dp_resource_available(tb, port);
2234 }
2235 }
2236
2237 pm_runtime_mark_last_busy(&sw->dev);
2238 pm_runtime_put_autosuspend(&sw->dev);
2239
2240 put_sw:
2241 tb_switch_put(sw);
2242 out:
2243 mutex_unlock(&tb->lock);
2244
2245 pm_runtime_mark_last_busy(&tb->dev);
2246 pm_runtime_put_autosuspend(&tb->dev);
2247
2248 kfree(ev);
2249 }
2250
tb_alloc_dp_bandwidth(struct tb_tunnel * tunnel,int * requested_up,int * requested_down)2251 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2252 int *requested_down)
2253 {
2254 int allocated_up, allocated_down, available_up, available_down, ret;
2255 int requested_up_corrected, requested_down_corrected, granularity;
2256 int max_up, max_down, max_up_rounded, max_down_rounded;
2257 struct tb *tb = tunnel->tb;
2258 struct tb_port *in, *out;
2259
2260 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2261 if (ret)
2262 return ret;
2263
2264 in = tunnel->src_port;
2265 out = tunnel->dst_port;
2266
2267 tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
2268 allocated_up, allocated_down);
2269
2270 /*
2271 * If we get rounded up request from graphics side, say HBR2 x 4
2272 * that is 17500 instead of 17280 (this is because of the
2273 * granularity), we allow it too. Here the graphics has already
2274 * negotiated with the DPRX the maximum possible rates (which is
2275 * 17280 in this case).
2276 *
2277 * Since the link cannot go higher than 17280 we use that in our
2278 * calculations but the DP IN adapter Allocated BW write must be
2279 * the same value (17500) otherwise the adapter will mark it as
2280 * failed for graphics.
2281 */
2282 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2283 if (ret)
2284 return ret;
2285
2286 ret = usb4_dp_port_granularity(in);
2287 if (ret < 0)
2288 return ret;
2289 granularity = ret;
2290
2291 max_up_rounded = roundup(max_up, granularity);
2292 max_down_rounded = roundup(max_down, granularity);
2293
2294 /*
2295 * This will "fix" the request down to the maximum supported
2296 * rate * lanes if it is at the maximum rounded up level.
2297 */
2298 requested_up_corrected = *requested_up;
2299 if (requested_up_corrected == max_up_rounded)
2300 requested_up_corrected = max_up;
2301 else if (requested_up_corrected < 0)
2302 requested_up_corrected = 0;
2303 requested_down_corrected = *requested_down;
2304 if (requested_down_corrected == max_down_rounded)
2305 requested_down_corrected = max_down;
2306 else if (requested_down_corrected < 0)
2307 requested_down_corrected = 0;
2308
2309 tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
2310 requested_up_corrected, requested_down_corrected);
2311
2312 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2313 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2314 tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2315 requested_up_corrected, requested_down_corrected,
2316 max_up_rounded, max_down_rounded);
2317 return -ENOBUFS;
2318 }
2319
2320 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2321 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2322 /*
2323 * If bandwidth on a link is < asym_threshold transition
2324 * the link to symmetric.
2325 */
2326 tb_configure_sym(tb, in, out, *requested_up, *requested_down);
2327 /*
2328 * If requested bandwidth is less or equal than what is
2329 * currently allocated to that tunnel we simply change
2330 * the reservation of the tunnel. Since all the tunnels
2331 * going out from the same USB4 port are in the same
2332 * group the released bandwidth will be taken into
2333 * account for the other tunnels automatically below.
2334 */
2335 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2336 requested_down);
2337 }
2338
2339 /*
2340 * More bandwidth is requested. Release all the potential
2341 * bandwidth from USB3 first.
2342 */
2343 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2344 if (ret)
2345 return ret;
2346
2347 /*
2348 * Then go over all tunnels that cross the same USB4 ports (they
2349 * are also in the same group but we use the same function here
2350 * that we use with the normal bandwidth allocation).
2351 */
2352 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2353 true);
2354 if (ret)
2355 goto reclaim;
2356
2357 tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
2358 available_up, available_down);
2359
2360 if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
2361 (*requested_down >= 0 && available_down >= requested_down_corrected)) {
2362 /*
2363 * If bandwidth on a link is >= asym_threshold
2364 * transition the link to asymmetric.
2365 */
2366 ret = tb_configure_asym(tb, in, out, *requested_up,
2367 *requested_down);
2368 if (ret) {
2369 tb_configure_sym(tb, in, out, 0, 0);
2370 return ret;
2371 }
2372
2373 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2374 requested_down);
2375 if (ret) {
2376 tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2377 tb_configure_sym(tb, in, out, 0, 0);
2378 }
2379 } else {
2380 ret = -ENOBUFS;
2381 }
2382
2383 reclaim:
2384 tb_reclaim_usb3_bandwidth(tb, in, out);
2385 return ret;
2386 }
2387
tb_handle_dp_bandwidth_request(struct work_struct * work)2388 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2389 {
2390 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2391 int requested_bw, requested_up, requested_down, ret;
2392 struct tb_port *in, *out;
2393 struct tb_tunnel *tunnel;
2394 struct tb *tb = ev->tb;
2395 struct tb_cm *tcm = tb_priv(tb);
2396 struct tb_switch *sw;
2397
2398 pm_runtime_get_sync(&tb->dev);
2399
2400 mutex_lock(&tb->lock);
2401 if (!tcm->hotplug_active)
2402 goto unlock;
2403
2404 sw = tb_switch_find_by_route(tb, ev->route);
2405 if (!sw) {
2406 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2407 ev->route);
2408 goto unlock;
2409 }
2410
2411 in = &sw->ports[ev->port];
2412 if (!tb_port_is_dpin(in)) {
2413 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2414 goto put_sw;
2415 }
2416
2417 tb_port_dbg(in, "handling bandwidth allocation request\n");
2418
2419 if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2420 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2421 goto put_sw;
2422 }
2423
2424 ret = usb4_dp_port_requested_bandwidth(in);
2425 if (ret < 0) {
2426 if (ret == -ENODATA)
2427 tb_port_dbg(in, "no bandwidth request active\n");
2428 else
2429 tb_port_warn(in, "failed to read requested bandwidth\n");
2430 goto put_sw;
2431 }
2432 requested_bw = ret;
2433
2434 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2435
2436 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2437 if (!tunnel) {
2438 tb_port_warn(in, "failed to find tunnel\n");
2439 goto put_sw;
2440 }
2441
2442 out = tunnel->dst_port;
2443
2444 if (tb_port_path_direction_downstream(in, out)) {
2445 requested_up = -1;
2446 requested_down = requested_bw;
2447 } else {
2448 requested_up = requested_bw;
2449 requested_down = -1;
2450 }
2451
2452 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2453 if (ret) {
2454 if (ret == -ENOBUFS)
2455 tb_port_warn(in, "not enough bandwidth available\n");
2456 else
2457 tb_port_warn(in, "failed to change bandwidth allocation\n");
2458 } else {
2459 tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
2460 requested_up, requested_down);
2461
2462 /* Update other clients about the allocation change */
2463 tb_recalc_estimated_bandwidth(tb);
2464 }
2465
2466 put_sw:
2467 tb_switch_put(sw);
2468 unlock:
2469 mutex_unlock(&tb->lock);
2470
2471 pm_runtime_mark_last_busy(&tb->dev);
2472 pm_runtime_put_autosuspend(&tb->dev);
2473
2474 kfree(ev);
2475 }
2476
tb_queue_dp_bandwidth_request(struct tb * tb,u64 route,u8 port)2477 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2478 {
2479 struct tb_hotplug_event *ev;
2480
2481 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2482 if (!ev)
2483 return;
2484
2485 ev->tb = tb;
2486 ev->route = route;
2487 ev->port = port;
2488 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2489 queue_work(tb->wq, &ev->work);
2490 }
2491
tb_handle_notification(struct tb * tb,u64 route,const struct cfg_error_pkg * error)2492 static void tb_handle_notification(struct tb *tb, u64 route,
2493 const struct cfg_error_pkg *error)
2494 {
2495
2496 switch (error->error) {
2497 case TB_CFG_ERROR_PCIE_WAKE:
2498 case TB_CFG_ERROR_DP_CON_CHANGE:
2499 case TB_CFG_ERROR_DPTX_DISCOVERY:
2500 if (tb_cfg_ack_notification(tb->ctl, route, error))
2501 tb_warn(tb, "could not ack notification on %llx\n",
2502 route);
2503 break;
2504
2505 case TB_CFG_ERROR_DP_BW:
2506 if (tb_cfg_ack_notification(tb->ctl, route, error))
2507 tb_warn(tb, "could not ack notification on %llx\n",
2508 route);
2509 tb_queue_dp_bandwidth_request(tb, route, error->port);
2510 break;
2511
2512 default:
2513 /* Ignore for now */
2514 break;
2515 }
2516 }
2517
2518 /*
2519 * tb_schedule_hotplug_handler() - callback function for the control channel
2520 *
2521 * Delegates to tb_handle_hotplug.
2522 */
tb_handle_event(struct tb * tb,enum tb_cfg_pkg_type type,const void * buf,size_t size)2523 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2524 const void *buf, size_t size)
2525 {
2526 const struct cfg_event_pkg *pkg = buf;
2527 u64 route = tb_cfg_get_route(&pkg->header);
2528
2529 switch (type) {
2530 case TB_CFG_PKG_ERROR:
2531 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2532 return;
2533 case TB_CFG_PKG_EVENT:
2534 break;
2535 default:
2536 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2537 return;
2538 }
2539
2540 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2541 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2542 pkg->port);
2543 }
2544
2545 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2546 }
2547
tb_stop(struct tb * tb)2548 static void tb_stop(struct tb *tb)
2549 {
2550 struct tb_cm *tcm = tb_priv(tb);
2551 struct tb_tunnel *tunnel;
2552 struct tb_tunnel *n;
2553
2554 cancel_delayed_work(&tcm->remove_work);
2555 /* tunnels are only present after everything has been initialized */
2556 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2557 /*
2558 * DMA tunnels require the driver to be functional so we
2559 * tear them down. Other protocol tunnels can be left
2560 * intact.
2561 */
2562 if (tb_tunnel_is_dma(tunnel))
2563 tb_tunnel_deactivate(tunnel);
2564 tb_tunnel_free(tunnel);
2565 }
2566 tb_switch_remove(tb->root_switch);
2567 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2568 }
2569
tb_scan_finalize_switch(struct device * dev,void * data)2570 static int tb_scan_finalize_switch(struct device *dev, void *data)
2571 {
2572 if (tb_is_switch(dev)) {
2573 struct tb_switch *sw = tb_to_switch(dev);
2574
2575 /*
2576 * If we found that the switch was already setup by the
2577 * boot firmware, mark it as authorized now before we
2578 * send uevent to userspace.
2579 */
2580 if (sw->boot)
2581 sw->authorized = 1;
2582
2583 dev_set_uevent_suppress(dev, false);
2584 kobject_uevent(&dev->kobj, KOBJ_ADD);
2585 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2586 }
2587
2588 return 0;
2589 }
2590
tb_start(struct tb * tb,bool reset)2591 static int tb_start(struct tb *tb, bool reset)
2592 {
2593 struct tb_cm *tcm = tb_priv(tb);
2594 int ret;
2595
2596 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2597 if (IS_ERR(tb->root_switch))
2598 return PTR_ERR(tb->root_switch);
2599
2600 /*
2601 * ICM firmware upgrade needs running firmware and in native
2602 * mode that is not available so disable firmware upgrade of the
2603 * root switch.
2604 *
2605 * However, USB4 routers support NVM firmware upgrade if they
2606 * implement the necessary router operations.
2607 */
2608 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2609 /* All USB4 routers support runtime PM */
2610 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2611
2612 ret = tb_switch_configure(tb->root_switch);
2613 if (ret) {
2614 tb_switch_put(tb->root_switch);
2615 return ret;
2616 }
2617
2618 /* Announce the switch to the world */
2619 ret = tb_switch_add(tb->root_switch);
2620 if (ret) {
2621 tb_switch_put(tb->root_switch);
2622 return ret;
2623 }
2624
2625 /*
2626 * To support highest CLx state, we set host router's TMU to
2627 * Normal mode.
2628 */
2629 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2630 /* Enable TMU if it is off */
2631 tb_switch_tmu_enable(tb->root_switch);
2632
2633 /*
2634 * Boot firmware might have created tunnels of its own. Since we
2635 * cannot be sure they are usable for us, tear them down and
2636 * reset the ports to handle it as new hotplug for USB4 v1
2637 * routers (for USB4 v2 and beyond we already do host reset).
2638 */
2639 if (reset && usb4_switch_version(tb->root_switch) == 1) {
2640 tb_switch_reset(tb->root_switch);
2641 } else {
2642 /* Full scan to discover devices added before the driver was loaded. */
2643 tb_scan_switch(tb->root_switch);
2644 /* Find out tunnels created by the boot firmware */
2645 tb_discover_tunnels(tb);
2646 /* Add DP resources from the DP tunnels created by the boot firmware */
2647 tb_discover_dp_resources(tb);
2648 }
2649
2650 /*
2651 * If the boot firmware did not create USB 3.x tunnels create them
2652 * now for the whole topology.
2653 */
2654 tb_create_usb3_tunnels(tb->root_switch);
2655 /* Add DP IN resources for the root switch */
2656 tb_add_dp_resources(tb->root_switch);
2657 /* Make the discovered switches available to the userspace */
2658 device_for_each_child(&tb->root_switch->dev, NULL,
2659 tb_scan_finalize_switch);
2660
2661 /* Allow tb_handle_hotplug to progress events */
2662 tcm->hotplug_active = true;
2663 return 0;
2664 }
2665
tb_suspend_noirq(struct tb * tb)2666 static int tb_suspend_noirq(struct tb *tb)
2667 {
2668 struct tb_cm *tcm = tb_priv(tb);
2669
2670 tb_dbg(tb, "suspending...\n");
2671 tb_disconnect_and_release_dp(tb);
2672 tb_switch_suspend(tb->root_switch, false);
2673 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2674 tb_dbg(tb, "suspend finished\n");
2675
2676 return 0;
2677 }
2678
tb_restore_children(struct tb_switch * sw)2679 static void tb_restore_children(struct tb_switch *sw)
2680 {
2681 struct tb_port *port;
2682
2683 /* No need to restore if the router is already unplugged */
2684 if (sw->is_unplugged)
2685 return;
2686
2687 if (tb_enable_clx(sw))
2688 tb_sw_warn(sw, "failed to re-enable CL states\n");
2689
2690 if (tb_enable_tmu(sw))
2691 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2692
2693 tb_switch_configuration_valid(sw);
2694
2695 tb_switch_for_each_port(sw, port) {
2696 if (!tb_port_has_remote(port) && !port->xdomain)
2697 continue;
2698
2699 if (port->remote) {
2700 tb_switch_set_link_width(port->remote->sw,
2701 port->remote->sw->link_width);
2702 tb_switch_configure_link(port->remote->sw);
2703
2704 tb_restore_children(port->remote->sw);
2705 } else if (port->xdomain) {
2706 tb_port_configure_xdomain(port, port->xdomain);
2707 }
2708 }
2709 }
2710
tb_resume_noirq(struct tb * tb)2711 static int tb_resume_noirq(struct tb *tb)
2712 {
2713 struct tb_cm *tcm = tb_priv(tb);
2714 struct tb_tunnel *tunnel, *n;
2715 unsigned int usb3_delay = 0;
2716 LIST_HEAD(tunnels);
2717
2718 tb_dbg(tb, "resuming...\n");
2719
2720 /*
2721 * For non-USB4 hosts (Apple systems) remove any PCIe devices
2722 * the firmware might have setup.
2723 */
2724 if (!tb_switch_is_usb4(tb->root_switch))
2725 tb_switch_reset(tb->root_switch);
2726
2727 tb_switch_resume(tb->root_switch, false);
2728 tb_free_invalid_tunnels(tb);
2729 tb_free_unplugged_children(tb->root_switch);
2730 tb_restore_children(tb->root_switch);
2731
2732 /*
2733 * If we get here from suspend to disk the boot firmware or the
2734 * restore kernel might have created tunnels of its own. Since
2735 * we cannot be sure they are usable for us we find and tear
2736 * them down.
2737 */
2738 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2739 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2740 if (tb_tunnel_is_usb3(tunnel))
2741 usb3_delay = 500;
2742 tb_tunnel_deactivate(tunnel);
2743 tb_tunnel_free(tunnel);
2744 }
2745
2746 /* Re-create our tunnels now */
2747 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2748 /* USB3 requires delay before it can be re-activated */
2749 if (tb_tunnel_is_usb3(tunnel)) {
2750 msleep(usb3_delay);
2751 /* Only need to do it once */
2752 usb3_delay = 0;
2753 }
2754 tb_tunnel_restart(tunnel);
2755 }
2756 if (!list_empty(&tcm->tunnel_list)) {
2757 /*
2758 * the pcie links need some time to get going.
2759 * 100ms works for me...
2760 */
2761 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2762 msleep(100);
2763 }
2764 /* Allow tb_handle_hotplug to progress events */
2765 tcm->hotplug_active = true;
2766 tb_dbg(tb, "resume finished\n");
2767
2768 return 0;
2769 }
2770
tb_free_unplugged_xdomains(struct tb_switch * sw)2771 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2772 {
2773 struct tb_port *port;
2774 int ret = 0;
2775
2776 tb_switch_for_each_port(sw, port) {
2777 if (tb_is_upstream_port(port))
2778 continue;
2779 if (port->xdomain && port->xdomain->is_unplugged) {
2780 tb_retimer_remove_all(port);
2781 tb_xdomain_remove(port->xdomain);
2782 tb_port_unconfigure_xdomain(port);
2783 port->xdomain = NULL;
2784 ret++;
2785 } else if (port->remote) {
2786 ret += tb_free_unplugged_xdomains(port->remote->sw);
2787 }
2788 }
2789
2790 return ret;
2791 }
2792
tb_freeze_noirq(struct tb * tb)2793 static int tb_freeze_noirq(struct tb *tb)
2794 {
2795 struct tb_cm *tcm = tb_priv(tb);
2796
2797 tcm->hotplug_active = false;
2798 return 0;
2799 }
2800
tb_thaw_noirq(struct tb * tb)2801 static int tb_thaw_noirq(struct tb *tb)
2802 {
2803 struct tb_cm *tcm = tb_priv(tb);
2804
2805 tcm->hotplug_active = true;
2806 return 0;
2807 }
2808
tb_complete(struct tb * tb)2809 static void tb_complete(struct tb *tb)
2810 {
2811 /*
2812 * Release any unplugged XDomains and if there is a case where
2813 * another domain is swapped in place of unplugged XDomain we
2814 * need to run another rescan.
2815 */
2816 mutex_lock(&tb->lock);
2817 if (tb_free_unplugged_xdomains(tb->root_switch))
2818 tb_scan_switch(tb->root_switch);
2819 mutex_unlock(&tb->lock);
2820 }
2821
tb_runtime_suspend(struct tb * tb)2822 static int tb_runtime_suspend(struct tb *tb)
2823 {
2824 struct tb_cm *tcm = tb_priv(tb);
2825
2826 mutex_lock(&tb->lock);
2827 tb_switch_suspend(tb->root_switch, true);
2828 tcm->hotplug_active = false;
2829 mutex_unlock(&tb->lock);
2830
2831 return 0;
2832 }
2833
tb_remove_work(struct work_struct * work)2834 static void tb_remove_work(struct work_struct *work)
2835 {
2836 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2837 struct tb *tb = tcm_to_tb(tcm);
2838
2839 mutex_lock(&tb->lock);
2840 if (tb->root_switch) {
2841 tb_free_unplugged_children(tb->root_switch);
2842 tb_free_unplugged_xdomains(tb->root_switch);
2843 }
2844 mutex_unlock(&tb->lock);
2845 }
2846
tb_runtime_resume(struct tb * tb)2847 static int tb_runtime_resume(struct tb *tb)
2848 {
2849 struct tb_cm *tcm = tb_priv(tb);
2850 struct tb_tunnel *tunnel, *n;
2851
2852 mutex_lock(&tb->lock);
2853 tb_switch_resume(tb->root_switch, true);
2854 tb_free_invalid_tunnels(tb);
2855 tb_restore_children(tb->root_switch);
2856 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2857 tb_tunnel_restart(tunnel);
2858 tcm->hotplug_active = true;
2859 mutex_unlock(&tb->lock);
2860
2861 /*
2862 * Schedule cleanup of any unplugged devices. Run this in a
2863 * separate thread to avoid possible deadlock if the device
2864 * removal runtime resumes the unplugged device.
2865 */
2866 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2867 return 0;
2868 }
2869
2870 static const struct tb_cm_ops tb_cm_ops = {
2871 .start = tb_start,
2872 .stop = tb_stop,
2873 .suspend_noirq = tb_suspend_noirq,
2874 .resume_noirq = tb_resume_noirq,
2875 .freeze_noirq = tb_freeze_noirq,
2876 .thaw_noirq = tb_thaw_noirq,
2877 .complete = tb_complete,
2878 .runtime_suspend = tb_runtime_suspend,
2879 .runtime_resume = tb_runtime_resume,
2880 .handle_event = tb_handle_event,
2881 .disapprove_switch = tb_disconnect_pci,
2882 .approve_switch = tb_tunnel_pci,
2883 .approve_xdomain_paths = tb_approve_xdomain_paths,
2884 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2885 };
2886
2887 /*
2888 * During suspend the Thunderbolt controller is reset and all PCIe
2889 * tunnels are lost. The NHI driver will try to reestablish all tunnels
2890 * during resume. This adds device links between the tunneled PCIe
2891 * downstream ports and the NHI so that the device core will make sure
2892 * NHI is resumed first before the rest.
2893 */
tb_apple_add_links(struct tb_nhi * nhi)2894 static bool tb_apple_add_links(struct tb_nhi *nhi)
2895 {
2896 struct pci_dev *upstream, *pdev;
2897 bool ret;
2898
2899 if (!x86_apple_machine)
2900 return false;
2901
2902 switch (nhi->pdev->device) {
2903 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2904 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2905 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2906 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2907 break;
2908 default:
2909 return false;
2910 }
2911
2912 upstream = pci_upstream_bridge(nhi->pdev);
2913 while (upstream) {
2914 if (!pci_is_pcie(upstream))
2915 return false;
2916 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2917 break;
2918 upstream = pci_upstream_bridge(upstream);
2919 }
2920
2921 if (!upstream)
2922 return false;
2923
2924 /*
2925 * For each hotplug downstream port, create add device link
2926 * back to NHI so that PCIe tunnels can be re-established after
2927 * sleep.
2928 */
2929 ret = false;
2930 for_each_pci_bridge(pdev, upstream->subordinate) {
2931 const struct device_link *link;
2932
2933 if (!pci_is_pcie(pdev))
2934 continue;
2935 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2936 !pdev->is_hotplug_bridge)
2937 continue;
2938
2939 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2940 DL_FLAG_AUTOREMOVE_SUPPLIER |
2941 DL_FLAG_PM_RUNTIME);
2942 if (link) {
2943 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2944 dev_name(&pdev->dev));
2945 ret = true;
2946 } else {
2947 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2948 dev_name(&pdev->dev));
2949 }
2950 }
2951
2952 return ret;
2953 }
2954
tb_probe(struct tb_nhi * nhi)2955 struct tb *tb_probe(struct tb_nhi *nhi)
2956 {
2957 struct tb_cm *tcm;
2958 struct tb *tb;
2959
2960 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2961 if (!tb)
2962 return NULL;
2963
2964 if (tb_acpi_may_tunnel_pcie())
2965 tb->security_level = TB_SECURITY_USER;
2966 else
2967 tb->security_level = TB_SECURITY_NOPCIE;
2968
2969 tb->cm_ops = &tb_cm_ops;
2970
2971 tcm = tb_priv(tb);
2972 INIT_LIST_HEAD(&tcm->tunnel_list);
2973 INIT_LIST_HEAD(&tcm->dp_resources);
2974 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2975 tb_init_bandwidth_groups(tcm);
2976
2977 tb_dbg(tb, "using software connection manager\n");
2978
2979 /*
2980 * Device links are needed to make sure we establish tunnels
2981 * before the PCIe/USB stack is resumed so complain here if we
2982 * found them missing.
2983 */
2984 if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
2985 tb_warn(tb, "device links to tunneled native ports are missing!\n");
2986
2987 return tb;
2988 }
2989