xref: /openbmc/linux/drivers/thunderbolt/tunnel.c (revision 18dcdadc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
14 
15 #include "tunnel.h"
16 #include "tb.h"
17 
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID			8
20 
21 #define TB_PCI_PATH_DOWN		0
22 #define TB_PCI_PATH_UP			1
23 
24 #define TB_PCI_PRIORITY			3
25 #define TB_PCI_WEIGHT			1
26 
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID			8
29 
30 #define TB_USB3_PATH_DOWN		0
31 #define TB_USB3_PATH_UP			1
32 
33 #define TB_USB3_PRIORITY		3
34 #define TB_USB3_WEIGHT			2
35 
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID		8
38 #define TB_DP_AUX_RX_HOPID		8
39 #define TB_DP_VIDEO_HOPID		9
40 
41 #define TB_DP_VIDEO_PATH_OUT		0
42 #define TB_DP_AUX_PATH_OUT		1
43 #define TB_DP_AUX_PATH_IN		2
44 
45 #define TB_DP_VIDEO_PRIORITY		1
46 #define TB_DP_VIDEO_WEIGHT		1
47 
48 #define TB_DP_AUX_PRIORITY		2
49 #define TB_DP_AUX_WEIGHT		1
50 
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS		6U
53 /*
54  * Number of credits we try to allocate for each DMA path if not limited
55  * by the host router baMaxHI.
56  */
57 #define TB_DMA_CREDITS			14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS		1
60 
61 #define TB_DMA_PRIORITY			5
62 #define TB_DMA_WEIGHT			1
63 
64 /*
65  * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66  * according to USB4 v2 Connection Manager guide. This ends up reserving
67  * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68  * account.
69  */
70 #define USB4_V2_PCI_MIN_BANDWIDTH	(1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH	(1500 * TB_USB3_WEIGHT)
72 
73 static unsigned int dma_credits = TB_DMA_CREDITS;
74 module_param(dma_credits, uint, 0444);
75 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
76                 __MODULE_STRING(TB_DMA_CREDITS) ")");
77 
78 static bool bw_alloc_mode = true;
79 module_param(bw_alloc_mode, bool, 0444);
80 MODULE_PARM_DESC(bw_alloc_mode,
81 		 "enable bandwidth allocation mode if supported (default: true)");
82 
83 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
84 
tb_usable_credits(const struct tb_port * port)85 static inline unsigned int tb_usable_credits(const struct tb_port *port)
86 {
87 	return port->total_credits - port->ctl_credits;
88 }
89 
90 /**
91  * tb_available_credits() - Available credits for PCIe and DMA
92  * @port: Lane adapter to check
93  * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
94  *		    streams possible through this lane adapter
95  */
tb_available_credits(const struct tb_port * port,size_t * max_dp_streams)96 static unsigned int tb_available_credits(const struct tb_port *port,
97 					 size_t *max_dp_streams)
98 {
99 	const struct tb_switch *sw = port->sw;
100 	int credits, usb3, pcie, spare;
101 	size_t ndp;
102 
103 	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
104 	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
105 
106 	if (tb_acpi_is_xdomain_allowed()) {
107 		spare = min_not_zero(sw->max_dma_credits, dma_credits);
108 		/* Add some credits for potential second DMA tunnel */
109 		spare += TB_MIN_DMA_CREDITS;
110 	} else {
111 		spare = 0;
112 	}
113 
114 	credits = tb_usable_credits(port);
115 	if (tb_acpi_may_tunnel_dp()) {
116 		/*
117 		 * Maximum number of DP streams possible through the
118 		 * lane adapter.
119 		 */
120 		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
121 			ndp = (credits - (usb3 + pcie + spare)) /
122 			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
123 		else
124 			ndp = 0;
125 	} else {
126 		ndp = 0;
127 	}
128 	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
129 	credits -= usb3;
130 
131 	if (max_dp_streams)
132 		*max_dp_streams = ndp;
133 
134 	return credits > 0 ? credits : 0;
135 }
136 
tb_tunnel_alloc(struct tb * tb,size_t npaths,enum tb_tunnel_type type)137 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
138 					 enum tb_tunnel_type type)
139 {
140 	struct tb_tunnel *tunnel;
141 
142 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
143 	if (!tunnel)
144 		return NULL;
145 
146 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
147 	if (!tunnel->paths) {
148 		tb_tunnel_free(tunnel);
149 		return NULL;
150 	}
151 
152 	INIT_LIST_HEAD(&tunnel->list);
153 	tunnel->tb = tb;
154 	tunnel->npaths = npaths;
155 	tunnel->type = type;
156 
157 	return tunnel;
158 }
159 
tb_pci_set_ext_encapsulation(struct tb_tunnel * tunnel,bool enable)160 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
161 {
162 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
163 	int ret;
164 
165 	/* Only supported of both routers are at least USB4 v2 */
166 	if (tb_port_get_link_generation(port) < 4)
167 		return 0;
168 
169 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
170 	if (ret)
171 		return ret;
172 
173 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
174 	if (ret)
175 		return ret;
176 
177 	tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
178 		      str_enabled_disabled(enable));
179 	return 0;
180 }
181 
tb_pci_activate(struct tb_tunnel * tunnel,bool activate)182 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
183 {
184 	int res;
185 
186 	if (activate) {
187 		res = tb_pci_set_ext_encapsulation(tunnel, activate);
188 		if (res)
189 			return res;
190 	}
191 
192 	res = tb_pci_port_enable(tunnel->src_port, activate);
193 	if (res)
194 		return res;
195 
196 	if (tb_port_is_pcie_up(tunnel->dst_port)) {
197 		res = tb_pci_port_enable(tunnel->dst_port, activate);
198 		if (res)
199 			return res;
200 	}
201 
202 	return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
203 }
204 
tb_pci_init_credits(struct tb_path_hop * hop)205 static int tb_pci_init_credits(struct tb_path_hop *hop)
206 {
207 	struct tb_port *port = hop->in_port;
208 	struct tb_switch *sw = port->sw;
209 	unsigned int credits;
210 
211 	if (tb_port_use_credit_allocation(port)) {
212 		unsigned int available;
213 
214 		available = tb_available_credits(port, NULL);
215 		credits = min(sw->max_pcie_credits, available);
216 
217 		if (credits < TB_MIN_PCIE_CREDITS)
218 			return -ENOSPC;
219 
220 		credits = max(TB_MIN_PCIE_CREDITS, credits);
221 	} else {
222 		if (tb_port_is_null(port))
223 			credits = port->bonded ? 32 : 16;
224 		else
225 			credits = 7;
226 	}
227 
228 	hop->initial_credits = credits;
229 	return 0;
230 }
231 
tb_pci_init_path(struct tb_path * path)232 static int tb_pci_init_path(struct tb_path *path)
233 {
234 	struct tb_path_hop *hop;
235 
236 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
237 	path->egress_shared_buffer = TB_PATH_NONE;
238 	path->ingress_fc_enable = TB_PATH_ALL;
239 	path->ingress_shared_buffer = TB_PATH_NONE;
240 	path->priority = TB_PCI_PRIORITY;
241 	path->weight = TB_PCI_WEIGHT;
242 	path->drop_packages = 0;
243 
244 	tb_path_for_each_hop(path, hop) {
245 		int ret;
246 
247 		ret = tb_pci_init_credits(hop);
248 		if (ret)
249 			return ret;
250 	}
251 
252 	return 0;
253 }
254 
255 /**
256  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
257  * @tb: Pointer to the domain structure
258  * @down: PCIe downstream adapter
259  * @alloc_hopid: Allocate HopIDs from visited ports
260  *
261  * If @down adapter is active, follows the tunnel to the PCIe upstream
262  * adapter and back. Returns the discovered tunnel or %NULL if there was
263  * no tunnel.
264  */
tb_tunnel_discover_pci(struct tb * tb,struct tb_port * down,bool alloc_hopid)265 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
266 					 bool alloc_hopid)
267 {
268 	struct tb_tunnel *tunnel;
269 	struct tb_path *path;
270 
271 	if (!tb_pci_port_is_enabled(down))
272 		return NULL;
273 
274 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
275 	if (!tunnel)
276 		return NULL;
277 
278 	tunnel->activate = tb_pci_activate;
279 	tunnel->src_port = down;
280 
281 	/*
282 	 * Discover both paths even if they are not complete. We will
283 	 * clean them up by calling tb_tunnel_deactivate() below in that
284 	 * case.
285 	 */
286 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
287 				&tunnel->dst_port, "PCIe Up", alloc_hopid);
288 	if (!path) {
289 		/* Just disable the downstream port */
290 		tb_pci_port_enable(down, false);
291 		goto err_free;
292 	}
293 	tunnel->paths[TB_PCI_PATH_UP] = path;
294 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
295 		goto err_free;
296 
297 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
298 				"PCIe Down", alloc_hopid);
299 	if (!path)
300 		goto err_deactivate;
301 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
302 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
303 		goto err_deactivate;
304 
305 	/* Validate that the tunnel is complete */
306 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
307 		tb_port_warn(tunnel->dst_port,
308 			     "path does not end on a PCIe adapter, cleaning up\n");
309 		goto err_deactivate;
310 	}
311 
312 	if (down != tunnel->src_port) {
313 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
314 		goto err_deactivate;
315 	}
316 
317 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
318 		tb_tunnel_warn(tunnel,
319 			       "tunnel is not fully activated, cleaning up\n");
320 		goto err_deactivate;
321 	}
322 
323 	tb_tunnel_dbg(tunnel, "discovered\n");
324 	return tunnel;
325 
326 err_deactivate:
327 	tb_tunnel_deactivate(tunnel);
328 err_free:
329 	tb_tunnel_free(tunnel);
330 
331 	return NULL;
332 }
333 
334 /**
335  * tb_tunnel_alloc_pci() - allocate a pci tunnel
336  * @tb: Pointer to the domain structure
337  * @up: PCIe upstream adapter port
338  * @down: PCIe downstream adapter port
339  *
340  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
341  * TB_TYPE_PCIE_DOWN.
342  *
343  * Return: Returns a tb_tunnel on success or NULL on failure.
344  */
tb_tunnel_alloc_pci(struct tb * tb,struct tb_port * up,struct tb_port * down)345 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
346 				      struct tb_port *down)
347 {
348 	struct tb_tunnel *tunnel;
349 	struct tb_path *path;
350 
351 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
352 	if (!tunnel)
353 		return NULL;
354 
355 	tunnel->activate = tb_pci_activate;
356 	tunnel->src_port = down;
357 	tunnel->dst_port = up;
358 
359 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
360 			     "PCIe Down");
361 	if (!path)
362 		goto err_free;
363 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
364 	if (tb_pci_init_path(path))
365 		goto err_free;
366 
367 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
368 			     "PCIe Up");
369 	if (!path)
370 		goto err_free;
371 	tunnel->paths[TB_PCI_PATH_UP] = path;
372 	if (tb_pci_init_path(path))
373 		goto err_free;
374 
375 	return tunnel;
376 
377 err_free:
378 	tb_tunnel_free(tunnel);
379 	return NULL;
380 }
381 
382 /**
383  * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
384  * @port: Lane 0 adapter
385  * @reserved_up: Upstream bandwidth in Mb/s to reserve
386  * @reserved_down: Downstream bandwidth in Mb/s to reserve
387  *
388  * Can be called to any connected lane 0 adapter to find out how much
389  * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
390  * Returns true if there is something to be reserved and writes the
391  * amount to @reserved_down/@reserved_up. Otherwise returns false and
392  * does not touch the parameters.
393  */
tb_tunnel_reserved_pci(struct tb_port * port,int * reserved_up,int * reserved_down)394 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
395 			    int *reserved_down)
396 {
397 	if (WARN_ON_ONCE(!port->remote))
398 		return false;
399 
400 	if (!tb_acpi_may_tunnel_pcie())
401 		return false;
402 
403 	if (tb_port_get_link_generation(port) < 4)
404 		return false;
405 
406 	/* Must have PCIe adapters */
407 	if (tb_is_upstream_port(port)) {
408 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
409 			return false;
410 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
411 			return false;
412 	} else {
413 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
414 			return false;
415 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
416 			return false;
417 	}
418 
419 	*reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
420 	*reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
421 
422 	tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
423 		    *reserved_down);
424 	return true;
425 }
426 
tb_dp_is_usb4(const struct tb_switch * sw)427 static bool tb_dp_is_usb4(const struct tb_switch *sw)
428 {
429 	/* Titan Ridge DP adapters need the same treatment as USB4 */
430 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
431 }
432 
tb_dp_cm_handshake(struct tb_port * in,struct tb_port * out,int timeout_msec)433 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
434 			      int timeout_msec)
435 {
436 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
437 	u32 val;
438 	int ret;
439 
440 	/* Both ends need to support this */
441 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
442 		return 0;
443 
444 	ret = tb_port_read(out, &val, TB_CFG_PORT,
445 			   out->cap_adap + DP_STATUS_CTRL, 1);
446 	if (ret)
447 		return ret;
448 
449 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
450 
451 	ret = tb_port_write(out, &val, TB_CFG_PORT,
452 			    out->cap_adap + DP_STATUS_CTRL, 1);
453 	if (ret)
454 		return ret;
455 
456 	do {
457 		ret = tb_port_read(out, &val, TB_CFG_PORT,
458 				   out->cap_adap + DP_STATUS_CTRL, 1);
459 		if (ret)
460 			return ret;
461 		if (!(val & DP_STATUS_CTRL_CMHS))
462 			return 0;
463 		usleep_range(100, 150);
464 	} while (ktime_before(ktime_get(), timeout));
465 
466 	return -ETIMEDOUT;
467 }
468 
469 /*
470  * Returns maximum possible rate from capability supporting only DP 2.0
471  * and below. Used when DP BW allocation mode is not enabled.
472  */
tb_dp_cap_get_rate(u32 val)473 static inline u32 tb_dp_cap_get_rate(u32 val)
474 {
475 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
476 
477 	switch (rate) {
478 	case DP_COMMON_CAP_RATE_RBR:
479 		return 1620;
480 	case DP_COMMON_CAP_RATE_HBR:
481 		return 2700;
482 	case DP_COMMON_CAP_RATE_HBR2:
483 		return 5400;
484 	case DP_COMMON_CAP_RATE_HBR3:
485 		return 8100;
486 	default:
487 		return 0;
488 	}
489 }
490 
491 /*
492  * Returns maximum possible rate from capability supporting DP 2.1
493  * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
494  * mode is enabled.
495  */
tb_dp_cap_get_rate_ext(u32 val)496 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
497 {
498 	if (val & DP_COMMON_CAP_UHBR20)
499 		return 20000;
500 	else if (val & DP_COMMON_CAP_UHBR13_5)
501 		return 13500;
502 	else if (val & DP_COMMON_CAP_UHBR10)
503 		return 10000;
504 
505 	return tb_dp_cap_get_rate(val);
506 }
507 
tb_dp_is_uhbr_rate(unsigned int rate)508 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
509 {
510 	return rate >= 10000;
511 }
512 
tb_dp_cap_set_rate(u32 val,u32 rate)513 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
514 {
515 	val &= ~DP_COMMON_CAP_RATE_MASK;
516 	switch (rate) {
517 	default:
518 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
519 		fallthrough;
520 	case 1620:
521 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
522 		break;
523 	case 2700:
524 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
525 		break;
526 	case 5400:
527 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
528 		break;
529 	case 8100:
530 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
531 		break;
532 	}
533 	return val;
534 }
535 
tb_dp_cap_get_lanes(u32 val)536 static inline u32 tb_dp_cap_get_lanes(u32 val)
537 {
538 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
539 
540 	switch (lanes) {
541 	case DP_COMMON_CAP_1_LANE:
542 		return 1;
543 	case DP_COMMON_CAP_2_LANES:
544 		return 2;
545 	case DP_COMMON_CAP_4_LANES:
546 		return 4;
547 	default:
548 		return 0;
549 	}
550 }
551 
tb_dp_cap_set_lanes(u32 val,u32 lanes)552 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
553 {
554 	val &= ~DP_COMMON_CAP_LANES_MASK;
555 	switch (lanes) {
556 	default:
557 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
558 		     lanes);
559 		fallthrough;
560 	case 1:
561 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
562 		break;
563 	case 2:
564 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
565 		break;
566 	case 4:
567 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
568 		break;
569 	}
570 	return val;
571 }
572 
tb_dp_bandwidth(unsigned int rate,unsigned int lanes)573 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
574 {
575 	/* Tunneling removes the DP 8b/10b 128/132b encoding */
576 	if (tb_dp_is_uhbr_rate(rate))
577 		return rate * lanes * 128 / 132;
578 	return rate * lanes * 8 / 10;
579 }
580 
tb_dp_reduce_bandwidth(int max_bw,u32 in_rate,u32 in_lanes,u32 out_rate,u32 out_lanes,u32 * new_rate,u32 * new_lanes)581 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
582 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
583 				  u32 *new_lanes)
584 {
585 	static const u32 dp_bw[][2] = {
586 		/* Mb/s, lanes */
587 		{ 8100, 4 }, /* 25920 Mb/s */
588 		{ 5400, 4 }, /* 17280 Mb/s */
589 		{ 8100, 2 }, /* 12960 Mb/s */
590 		{ 2700, 4 }, /* 8640 Mb/s */
591 		{ 5400, 2 }, /* 8640 Mb/s */
592 		{ 8100, 1 }, /* 6480 Mb/s */
593 		{ 1620, 4 }, /* 5184 Mb/s */
594 		{ 5400, 1 }, /* 4320 Mb/s */
595 		{ 2700, 2 }, /* 4320 Mb/s */
596 		{ 1620, 2 }, /* 2592 Mb/s */
597 		{ 2700, 1 }, /* 2160 Mb/s */
598 		{ 1620, 1 }, /* 1296 Mb/s */
599 	};
600 	unsigned int i;
601 
602 	/*
603 	 * Find a combination that can fit into max_bw and does not
604 	 * exceed the maximum rate and lanes supported by the DP OUT and
605 	 * DP IN adapters.
606 	 */
607 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
608 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
609 			continue;
610 
611 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
612 			continue;
613 
614 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
615 			*new_rate = dp_bw[i][0];
616 			*new_lanes = dp_bw[i][1];
617 			return 0;
618 		}
619 	}
620 
621 	return -ENOSR;
622 }
623 
tb_dp_xchg_caps(struct tb_tunnel * tunnel)624 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
625 {
626 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
627 	struct tb_port *out = tunnel->dst_port;
628 	struct tb_port *in = tunnel->src_port;
629 	int ret, max_bw;
630 
631 	/*
632 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
633 	 * newer generation hardware.
634 	 */
635 	if (in->sw->generation < 2 || out->sw->generation < 2)
636 		return 0;
637 
638 	/*
639 	 * Perform connection manager handshake between IN and OUT ports
640 	 * before capabilities exchange can take place.
641 	 */
642 	ret = tb_dp_cm_handshake(in, out, 3000);
643 	if (ret)
644 		return ret;
645 
646 	/* Read both DP_LOCAL_CAP registers */
647 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
648 			   in->cap_adap + DP_LOCAL_CAP, 1);
649 	if (ret)
650 		return ret;
651 
652 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
653 			   out->cap_adap + DP_LOCAL_CAP, 1);
654 	if (ret)
655 		return ret;
656 
657 	/* Write IN local caps to OUT remote caps */
658 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
659 			    out->cap_adap + DP_REMOTE_CAP, 1);
660 	if (ret)
661 		return ret;
662 
663 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
664 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
665 	tb_tunnel_dbg(tunnel,
666 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
667 		      in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
668 
669 	/*
670 	 * If the tunnel bandwidth is limited (max_bw is set) then see
671 	 * if we need to reduce bandwidth to fit there.
672 	 */
673 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
674 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
675 	bw = tb_dp_bandwidth(out_rate, out_lanes);
676 	tb_tunnel_dbg(tunnel,
677 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
678 		      out_rate, out_lanes, bw);
679 
680 	if (tb_port_path_direction_downstream(in, out))
681 		max_bw = tunnel->max_down;
682 	else
683 		max_bw = tunnel->max_up;
684 
685 	if (max_bw && bw > max_bw) {
686 		u32 new_rate, new_lanes, new_bw;
687 
688 		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
689 					     out_rate, out_lanes, &new_rate,
690 					     &new_lanes);
691 		if (ret) {
692 			tb_tunnel_info(tunnel, "not enough bandwidth\n");
693 			return ret;
694 		}
695 
696 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
697 		tb_tunnel_dbg(tunnel,
698 			      "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
699 			      new_rate, new_lanes, new_bw);
700 
701 		/*
702 		 * Set new rate and number of lanes before writing it to
703 		 * the IN port remote caps.
704 		 */
705 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
706 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
707 	}
708 
709 	/*
710 	 * Titan Ridge does not disable AUX timers when it gets
711 	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
712 	 * DP tunneling.
713 	 */
714 	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
715 		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
716 		tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
717 	}
718 
719 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
720 			     in->cap_adap + DP_REMOTE_CAP, 1);
721 }
722 
tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel * tunnel)723 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
724 {
725 	int ret, estimated_bw, granularity, tmp;
726 	struct tb_port *out = tunnel->dst_port;
727 	struct tb_port *in = tunnel->src_port;
728 	u32 out_dp_cap, out_rate, out_lanes;
729 	u32 in_dp_cap, in_rate, in_lanes;
730 	u32 rate, lanes;
731 
732 	if (!bw_alloc_mode)
733 		return 0;
734 
735 	ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
736 	if (ret)
737 		return ret;
738 
739 	ret = usb4_dp_port_set_group_id(in, in->group->index);
740 	if (ret)
741 		return ret;
742 
743 	/*
744 	 * Get the non-reduced rate and lanes based on the lowest
745 	 * capability of both adapters.
746 	 */
747 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
748 			   in->cap_adap + DP_LOCAL_CAP, 1);
749 	if (ret)
750 		return ret;
751 
752 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
753 			   out->cap_adap + DP_LOCAL_CAP, 1);
754 	if (ret)
755 		return ret;
756 
757 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
758 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
759 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
760 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
761 
762 	rate = min(in_rate, out_rate);
763 	lanes = min(in_lanes, out_lanes);
764 	tmp = tb_dp_bandwidth(rate, lanes);
765 
766 	tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
767 		      rate, lanes, tmp);
768 
769 	ret = usb4_dp_port_set_nrd(in, rate, lanes);
770 	if (ret)
771 		return ret;
772 
773 	/*
774 	 * Pick up granularity that supports maximum possible bandwidth.
775 	 * For that we use the UHBR rates too.
776 	 */
777 	in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
778 	out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
779 	rate = min(in_rate, out_rate);
780 	tmp = tb_dp_bandwidth(rate, lanes);
781 
782 	tb_tunnel_dbg(tunnel,
783 		      "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
784 		      rate, lanes, tmp);
785 
786 	for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
787 	     granularity *= 2)
788 		;
789 
790 	tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
791 
792 	/*
793 	 * Returns -EINVAL if granularity above is outside of the
794 	 * accepted ranges.
795 	 */
796 	ret = usb4_dp_port_set_granularity(in, granularity);
797 	if (ret)
798 		return ret;
799 
800 	/*
801 	 * Bandwidth estimation is pretty much what we have in
802 	 * max_up/down fields. For discovery we just read what the
803 	 * estimation was set to.
804 	 */
805 	if (tb_port_path_direction_downstream(in, out))
806 		estimated_bw = tunnel->max_down;
807 	else
808 		estimated_bw = tunnel->max_up;
809 
810 	tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
811 
812 	ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
813 	if (ret)
814 		return ret;
815 
816 	/* Initial allocation should be 0 according the spec */
817 	ret = usb4_dp_port_allocate_bandwidth(in, 0);
818 	if (ret)
819 		return ret;
820 
821 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
822 	return 0;
823 }
824 
tb_dp_init(struct tb_tunnel * tunnel)825 static int tb_dp_init(struct tb_tunnel *tunnel)
826 {
827 	struct tb_port *in = tunnel->src_port;
828 	struct tb_switch *sw = in->sw;
829 	struct tb *tb = in->sw->tb;
830 	int ret;
831 
832 	ret = tb_dp_xchg_caps(tunnel);
833 	if (ret)
834 		return ret;
835 
836 	if (!tb_switch_is_usb4(sw))
837 		return 0;
838 
839 	if (!usb4_dp_port_bandwidth_mode_supported(in))
840 		return 0;
841 
842 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
843 
844 	ret = usb4_dp_port_set_cm_id(in, tb->index);
845 	if (ret)
846 		return ret;
847 
848 	return tb_dp_bandwidth_alloc_mode_enable(tunnel);
849 }
850 
tb_dp_deinit(struct tb_tunnel * tunnel)851 static void tb_dp_deinit(struct tb_tunnel *tunnel)
852 {
853 	struct tb_port *in = tunnel->src_port;
854 
855 	if (!usb4_dp_port_bandwidth_mode_supported(in))
856 		return;
857 	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
858 		usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
859 		tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
860 	}
861 }
862 
tb_dp_activate(struct tb_tunnel * tunnel,bool active)863 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
864 {
865 	int ret;
866 
867 	if (active) {
868 		struct tb_path **paths;
869 		int last;
870 
871 		paths = tunnel->paths;
872 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
873 
874 		tb_dp_port_set_hops(tunnel->src_port,
875 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
876 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
877 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
878 
879 		tb_dp_port_set_hops(tunnel->dst_port,
880 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
881 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
882 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
883 	} else {
884 		tb_dp_port_hpd_clear(tunnel->src_port);
885 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
886 		if (tb_port_is_dpout(tunnel->dst_port))
887 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
888 	}
889 
890 	ret = tb_dp_port_enable(tunnel->src_port, active);
891 	if (ret)
892 		return ret;
893 
894 	if (tb_port_is_dpout(tunnel->dst_port))
895 		return tb_dp_port_enable(tunnel->dst_port, active);
896 
897 	return 0;
898 }
899 
900 /* max_bw is rounded up to next granularity */
tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_bw)901 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
902 						  int *max_bw)
903 {
904 	struct tb_port *in = tunnel->src_port;
905 	int ret, rate, lanes, nrd_bw;
906 	u32 cap;
907 
908 	/*
909 	 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
910 	 * read parameter values so this so we can use this to determine
911 	 * the maximum possible bandwidth over this link.
912 	 *
913 	 * See USB4 v2 spec 1.0 10.4.4.5.
914 	 */
915 	ret = tb_port_read(in, &cap, TB_CFG_PORT,
916 			   in->cap_adap + DP_LOCAL_CAP, 1);
917 	if (ret)
918 		return ret;
919 
920 	rate = tb_dp_cap_get_rate_ext(cap);
921 	if (tb_dp_is_uhbr_rate(rate)) {
922 		/*
923 		 * When UHBR is used there is no reduction in lanes so
924 		 * we can use this directly.
925 		 */
926 		lanes = tb_dp_cap_get_lanes(cap);
927 	} else {
928 		/*
929 		 * If there is no UHBR supported then check the
930 		 * non-reduced rate and lanes.
931 		 */
932 		ret = usb4_dp_port_nrd(in, &rate, &lanes);
933 		if (ret)
934 			return ret;
935 	}
936 
937 	nrd_bw = tb_dp_bandwidth(rate, lanes);
938 
939 	if (max_bw) {
940 		ret = usb4_dp_port_granularity(in);
941 		if (ret < 0)
942 			return ret;
943 		*max_bw = roundup(nrd_bw, ret);
944 	}
945 
946 	return nrd_bw;
947 }
948 
tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)949 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
950 						   int *consumed_up,
951 						   int *consumed_down)
952 {
953 	struct tb_port *out = tunnel->dst_port;
954 	struct tb_port *in = tunnel->src_port;
955 	int ret, allocated_bw, max_bw;
956 
957 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
958 		return -EOPNOTSUPP;
959 
960 	if (!tunnel->bw_mode)
961 		return -EOPNOTSUPP;
962 
963 	/* Read what was allocated previously if any */
964 	ret = usb4_dp_port_allocated_bandwidth(in);
965 	if (ret < 0)
966 		return ret;
967 	allocated_bw = ret;
968 
969 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
970 	if (ret < 0)
971 		return ret;
972 	if (allocated_bw == max_bw)
973 		allocated_bw = ret;
974 
975 	if (tb_port_path_direction_downstream(in, out)) {
976 		*consumed_up = 0;
977 		*consumed_down = allocated_bw;
978 	} else {
979 		*consumed_up = allocated_bw;
980 		*consumed_down = 0;
981 	}
982 
983 	return 0;
984 }
985 
tb_dp_allocated_bandwidth(struct tb_tunnel * tunnel,int * allocated_up,int * allocated_down)986 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
987 				     int *allocated_down)
988 {
989 	struct tb_port *out = tunnel->dst_port;
990 	struct tb_port *in = tunnel->src_port;
991 
992 	/*
993 	 * If we have already set the allocated bandwidth then use that.
994 	 * Otherwise we read it from the DPRX.
995 	 */
996 	if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
997 		int ret, allocated_bw, max_bw;
998 
999 		ret = usb4_dp_port_allocated_bandwidth(in);
1000 		if (ret < 0)
1001 			return ret;
1002 		allocated_bw = ret;
1003 
1004 		ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1005 		if (ret < 0)
1006 			return ret;
1007 		if (allocated_bw == max_bw)
1008 			allocated_bw = ret;
1009 
1010 		if (tb_port_path_direction_downstream(in, out)) {
1011 			*allocated_up = 0;
1012 			*allocated_down = allocated_bw;
1013 		} else {
1014 			*allocated_up = allocated_bw;
1015 			*allocated_down = 0;
1016 		}
1017 		return 0;
1018 	}
1019 
1020 	return tunnel->consumed_bandwidth(tunnel, allocated_up,
1021 					  allocated_down);
1022 }
1023 
tb_dp_alloc_bandwidth(struct tb_tunnel * tunnel,int * alloc_up,int * alloc_down)1024 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1025 				 int *alloc_down)
1026 {
1027 	struct tb_port *out = tunnel->dst_port;
1028 	struct tb_port *in = tunnel->src_port;
1029 	int max_bw, ret, tmp;
1030 
1031 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1032 		return -EOPNOTSUPP;
1033 
1034 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1035 	if (ret < 0)
1036 		return ret;
1037 
1038 	if (tb_port_path_direction_downstream(in, out)) {
1039 		tmp = min(*alloc_down, max_bw);
1040 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1041 		if (ret)
1042 			return ret;
1043 		*alloc_down = tmp;
1044 		*alloc_up = 0;
1045 	} else {
1046 		tmp = min(*alloc_up, max_bw);
1047 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1048 		if (ret)
1049 			return ret;
1050 		*alloc_down = 0;
1051 		*alloc_up = tmp;
1052 	}
1053 
1054 	/* Now we can use BW mode registers to figure out the bandwidth */
1055 	/* TODO: need to handle discovery too */
1056 	tunnel->bw_mode = true;
1057 	return 0;
1058 }
1059 
tb_dp_read_dprx(struct tb_tunnel * tunnel,u32 * rate,u32 * lanes,int timeout_msec)1060 static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
1061 			   int timeout_msec)
1062 {
1063 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1064 	struct tb_port *in = tunnel->src_port;
1065 
1066 	/*
1067 	 * Wait for DPRX done. Normally it should be already set for
1068 	 * active tunnel.
1069 	 */
1070 	do {
1071 		u32 val;
1072 		int ret;
1073 
1074 		ret = tb_port_read(in, &val, TB_CFG_PORT,
1075 				   in->cap_adap + DP_COMMON_CAP, 1);
1076 		if (ret)
1077 			return ret;
1078 
1079 		if (val & DP_COMMON_CAP_DPRX_DONE) {
1080 			*rate = tb_dp_cap_get_rate(val);
1081 			*lanes = tb_dp_cap_get_lanes(val);
1082 
1083 			tb_tunnel_dbg(tunnel, "DPRX read done\n");
1084 			return 0;
1085 		}
1086 		usleep_range(100, 150);
1087 	} while (ktime_before(ktime_get(), timeout));
1088 
1089 	return -ETIMEDOUT;
1090 }
1091 
1092 /* Read cap from tunnel DP IN */
tb_dp_read_cap(struct tb_tunnel * tunnel,unsigned int cap,u32 * rate,u32 * lanes)1093 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1094 			  u32 *lanes)
1095 {
1096 	struct tb_port *in = tunnel->src_port;
1097 	u32 val;
1098 	int ret;
1099 
1100 	switch (cap) {
1101 	case DP_LOCAL_CAP:
1102 	case DP_REMOTE_CAP:
1103 		break;
1104 
1105 	default:
1106 		tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1107 		return -EINVAL;
1108 	}
1109 
1110 	/*
1111 	 * Read from the copied remote cap so that we take into account
1112 	 * if capabilities were reduced during exchange.
1113 	 */
1114 	ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1115 	if (ret)
1116 		return ret;
1117 
1118 	*rate = tb_dp_cap_get_rate(val);
1119 	*lanes = tb_dp_cap_get_lanes(val);
1120 	return 0;
1121 }
1122 
tb_dp_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_up,int * max_down)1123 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1124 				   int *max_down)
1125 {
1126 	struct tb_port *in = tunnel->src_port;
1127 	int ret;
1128 
1129 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1130 		return -EOPNOTSUPP;
1131 
1132 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1133 	if (ret < 0)
1134 		return ret;
1135 
1136 	if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1137 		*max_up = 0;
1138 		*max_down = ret;
1139 	} else {
1140 		*max_up = ret;
1141 		*max_down = 0;
1142 	}
1143 
1144 	return 0;
1145 }
1146 
tb_dp_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1147 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1148 				    int *consumed_down)
1149 {
1150 	struct tb_port *in = tunnel->src_port;
1151 	const struct tb_switch *sw = in->sw;
1152 	u32 rate = 0, lanes = 0;
1153 	int ret;
1154 
1155 	if (tb_dp_is_usb4(sw)) {
1156 		/*
1157 		 * On USB4 routers check if the bandwidth allocation
1158 		 * mode is enabled first and then read the bandwidth
1159 		 * through those registers.
1160 		 */
1161 		ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1162 							      consumed_down);
1163 		if (ret < 0) {
1164 			if (ret != -EOPNOTSUPP)
1165 				return ret;
1166 		} else if (!ret) {
1167 			return 0;
1168 		}
1169 		/*
1170 		 * Then see if the DPRX negotiation is ready and if yes
1171 		 * return that bandwidth (it may be smaller than the
1172 		 * reduced one). Otherwise return the remote (possibly
1173 		 * reduced) caps.
1174 		 */
1175 		ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
1176 		if (ret) {
1177 			if (ret == -ETIMEDOUT)
1178 				ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1179 						     &rate, &lanes);
1180 			if (ret)
1181 				return ret;
1182 		}
1183 	} else if (sw->generation >= 2) {
1184 		ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1185 		if (ret)
1186 			return ret;
1187 	} else {
1188 		/* No bandwidth management for legacy devices  */
1189 		*consumed_up = 0;
1190 		*consumed_down = 0;
1191 		return 0;
1192 	}
1193 
1194 	if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1195 		*consumed_up = 0;
1196 		*consumed_down = tb_dp_bandwidth(rate, lanes);
1197 	} else {
1198 		*consumed_up = tb_dp_bandwidth(rate, lanes);
1199 		*consumed_down = 0;
1200 	}
1201 
1202 	return 0;
1203 }
1204 
tb_dp_init_aux_credits(struct tb_path_hop * hop)1205 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1206 {
1207 	struct tb_port *port = hop->in_port;
1208 	struct tb_switch *sw = port->sw;
1209 
1210 	if (tb_port_use_credit_allocation(port))
1211 		hop->initial_credits = sw->min_dp_aux_credits;
1212 	else
1213 		hop->initial_credits = 1;
1214 }
1215 
tb_dp_init_aux_path(struct tb_path * path)1216 static void tb_dp_init_aux_path(struct tb_path *path)
1217 {
1218 	struct tb_path_hop *hop;
1219 
1220 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1221 	path->egress_shared_buffer = TB_PATH_NONE;
1222 	path->ingress_fc_enable = TB_PATH_ALL;
1223 	path->ingress_shared_buffer = TB_PATH_NONE;
1224 	path->priority = TB_DP_AUX_PRIORITY;
1225 	path->weight = TB_DP_AUX_WEIGHT;
1226 
1227 	tb_path_for_each_hop(path, hop)
1228 		tb_dp_init_aux_credits(hop);
1229 }
1230 
tb_dp_init_video_credits(struct tb_path_hop * hop)1231 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1232 {
1233 	struct tb_port *port = hop->in_port;
1234 	struct tb_switch *sw = port->sw;
1235 
1236 	if (tb_port_use_credit_allocation(port)) {
1237 		unsigned int nfc_credits;
1238 		size_t max_dp_streams;
1239 
1240 		tb_available_credits(port, &max_dp_streams);
1241 		/*
1242 		 * Read the number of currently allocated NFC credits
1243 		 * from the lane adapter. Since we only use them for DP
1244 		 * tunneling we can use that to figure out how many DP
1245 		 * tunnels already go through the lane adapter.
1246 		 */
1247 		nfc_credits = port->config.nfc_credits &
1248 				ADP_CS_4_NFC_BUFFERS_MASK;
1249 		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1250 			return -ENOSPC;
1251 
1252 		hop->nfc_credits = sw->min_dp_main_credits;
1253 	} else {
1254 		hop->nfc_credits = min(port->total_credits - 2, 12U);
1255 	}
1256 
1257 	return 0;
1258 }
1259 
tb_dp_init_video_path(struct tb_path * path)1260 static int tb_dp_init_video_path(struct tb_path *path)
1261 {
1262 	struct tb_path_hop *hop;
1263 
1264 	path->egress_fc_enable = TB_PATH_NONE;
1265 	path->egress_shared_buffer = TB_PATH_NONE;
1266 	path->ingress_fc_enable = TB_PATH_NONE;
1267 	path->ingress_shared_buffer = TB_PATH_NONE;
1268 	path->priority = TB_DP_VIDEO_PRIORITY;
1269 	path->weight = TB_DP_VIDEO_WEIGHT;
1270 
1271 	tb_path_for_each_hop(path, hop) {
1272 		int ret;
1273 
1274 		ret = tb_dp_init_video_credits(hop);
1275 		if (ret)
1276 			return ret;
1277 	}
1278 
1279 	return 0;
1280 }
1281 
tb_dp_dump(struct tb_tunnel * tunnel)1282 static void tb_dp_dump(struct tb_tunnel *tunnel)
1283 {
1284 	struct tb_port *in, *out;
1285 	u32 dp_cap, rate, lanes;
1286 
1287 	in = tunnel->src_port;
1288 	out = tunnel->dst_port;
1289 
1290 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1291 			 in->cap_adap + DP_LOCAL_CAP, 1))
1292 		return;
1293 
1294 	rate = tb_dp_cap_get_rate(dp_cap);
1295 	lanes = tb_dp_cap_get_lanes(dp_cap);
1296 
1297 	tb_tunnel_dbg(tunnel,
1298 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1299 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1300 
1301 	out = tunnel->dst_port;
1302 
1303 	if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1304 			 out->cap_adap + DP_LOCAL_CAP, 1))
1305 		return;
1306 
1307 	rate = tb_dp_cap_get_rate(dp_cap);
1308 	lanes = tb_dp_cap_get_lanes(dp_cap);
1309 
1310 	tb_tunnel_dbg(tunnel,
1311 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1312 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1313 
1314 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1315 			 in->cap_adap + DP_REMOTE_CAP, 1))
1316 		return;
1317 
1318 	rate = tb_dp_cap_get_rate(dp_cap);
1319 	lanes = tb_dp_cap_get_lanes(dp_cap);
1320 
1321 	tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1322 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1323 }
1324 
1325 /**
1326  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1327  * @tb: Pointer to the domain structure
1328  * @in: DP in adapter
1329  * @alloc_hopid: Allocate HopIDs from visited ports
1330  *
1331  * If @in adapter is active, follows the tunnel to the DP out adapter
1332  * and back. Returns the discovered tunnel or %NULL if there was no
1333  * tunnel.
1334  *
1335  * Return: DP tunnel or %NULL if no tunnel found.
1336  */
tb_tunnel_discover_dp(struct tb * tb,struct tb_port * in,bool alloc_hopid)1337 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1338 					bool alloc_hopid)
1339 {
1340 	struct tb_tunnel *tunnel;
1341 	struct tb_port *port;
1342 	struct tb_path *path;
1343 
1344 	if (!tb_dp_port_is_enabled(in))
1345 		return NULL;
1346 
1347 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1348 	if (!tunnel)
1349 		return NULL;
1350 
1351 	tunnel->init = tb_dp_init;
1352 	tunnel->deinit = tb_dp_deinit;
1353 	tunnel->activate = tb_dp_activate;
1354 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1355 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1356 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1357 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1358 	tunnel->src_port = in;
1359 
1360 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1361 				&tunnel->dst_port, "Video", alloc_hopid);
1362 	if (!path) {
1363 		/* Just disable the DP IN port */
1364 		tb_dp_port_enable(in, false);
1365 		goto err_free;
1366 	}
1367 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1368 	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
1369 		goto err_free;
1370 
1371 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1372 				alloc_hopid);
1373 	if (!path)
1374 		goto err_deactivate;
1375 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1376 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
1377 
1378 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1379 				&port, "AUX RX", alloc_hopid);
1380 	if (!path)
1381 		goto err_deactivate;
1382 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1383 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
1384 
1385 	/* Validate that the tunnel is complete */
1386 	if (!tb_port_is_dpout(tunnel->dst_port)) {
1387 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1388 		goto err_deactivate;
1389 	}
1390 
1391 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
1392 		goto err_deactivate;
1393 
1394 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1395 		goto err_deactivate;
1396 
1397 	if (port != tunnel->src_port) {
1398 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1399 		goto err_deactivate;
1400 	}
1401 
1402 	tb_dp_dump(tunnel);
1403 
1404 	tb_tunnel_dbg(tunnel, "discovered\n");
1405 	return tunnel;
1406 
1407 err_deactivate:
1408 	tb_tunnel_deactivate(tunnel);
1409 err_free:
1410 	tb_tunnel_free(tunnel);
1411 
1412 	return NULL;
1413 }
1414 
1415 /**
1416  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1417  * @tb: Pointer to the domain structure
1418  * @in: DP in adapter port
1419  * @out: DP out adapter port
1420  * @link_nr: Preferred lane adapter when the link is not bonded
1421  * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1422  *	    if not limited)
1423  * @max_down: Maximum available downstream bandwidth for the DP tunnel
1424  *	      (%0 if not limited)
1425  *
1426  * Allocates a tunnel between @in and @out that is capable of tunneling
1427  * Display Port traffic.
1428  *
1429  * Return: Returns a tb_tunnel on success or NULL on failure.
1430  */
tb_tunnel_alloc_dp(struct tb * tb,struct tb_port * in,struct tb_port * out,int link_nr,int max_up,int max_down)1431 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1432 				     struct tb_port *out, int link_nr,
1433 				     int max_up, int max_down)
1434 {
1435 	struct tb_tunnel *tunnel;
1436 	struct tb_path **paths;
1437 	struct tb_path *path;
1438 
1439 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
1440 		return NULL;
1441 
1442 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1443 	if (!tunnel)
1444 		return NULL;
1445 
1446 	tunnel->init = tb_dp_init;
1447 	tunnel->deinit = tb_dp_deinit;
1448 	tunnel->activate = tb_dp_activate;
1449 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1450 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1451 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1452 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1453 	tunnel->src_port = in;
1454 	tunnel->dst_port = out;
1455 	tunnel->max_up = max_up;
1456 	tunnel->max_down = max_down;
1457 
1458 	paths = tunnel->paths;
1459 
1460 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1461 			     link_nr, "Video");
1462 	if (!path)
1463 		goto err_free;
1464 	tb_dp_init_video_path(path);
1465 	paths[TB_DP_VIDEO_PATH_OUT] = path;
1466 
1467 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1468 			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1469 	if (!path)
1470 		goto err_free;
1471 	tb_dp_init_aux_path(path);
1472 	paths[TB_DP_AUX_PATH_OUT] = path;
1473 
1474 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1475 			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1476 	if (!path)
1477 		goto err_free;
1478 	tb_dp_init_aux_path(path);
1479 	paths[TB_DP_AUX_PATH_IN] = path;
1480 
1481 	return tunnel;
1482 
1483 err_free:
1484 	tb_tunnel_free(tunnel);
1485 	return NULL;
1486 }
1487 
tb_dma_available_credits(const struct tb_port * port)1488 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1489 {
1490 	const struct tb_switch *sw = port->sw;
1491 	int credits;
1492 
1493 	credits = tb_available_credits(port, NULL);
1494 	if (tb_acpi_may_tunnel_pcie())
1495 		credits -= sw->max_pcie_credits;
1496 	credits -= port->dma_credits;
1497 
1498 	return credits > 0 ? credits : 0;
1499 }
1500 
tb_dma_reserve_credits(struct tb_path_hop * hop,unsigned int credits)1501 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1502 {
1503 	struct tb_port *port = hop->in_port;
1504 
1505 	if (tb_port_use_credit_allocation(port)) {
1506 		unsigned int available = tb_dma_available_credits(port);
1507 
1508 		/*
1509 		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1510 		 * DMA path cannot be established.
1511 		 */
1512 		if (available < TB_MIN_DMA_CREDITS)
1513 			return -ENOSPC;
1514 
1515 		while (credits > available)
1516 			credits--;
1517 
1518 		tb_port_dbg(port, "reserving %u credits for DMA path\n",
1519 			    credits);
1520 
1521 		port->dma_credits += credits;
1522 	} else {
1523 		if (tb_port_is_null(port))
1524 			credits = port->bonded ? 14 : 6;
1525 		else
1526 			credits = min(port->total_credits, credits);
1527 	}
1528 
1529 	hop->initial_credits = credits;
1530 	return 0;
1531 }
1532 
1533 /* Path from lane adapter to NHI */
tb_dma_init_rx_path(struct tb_path * path,unsigned int credits)1534 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1535 {
1536 	struct tb_path_hop *hop;
1537 	unsigned int i, tmp;
1538 
1539 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1540 	path->ingress_fc_enable = TB_PATH_ALL;
1541 	path->egress_shared_buffer = TB_PATH_NONE;
1542 	path->ingress_shared_buffer = TB_PATH_NONE;
1543 	path->priority = TB_DMA_PRIORITY;
1544 	path->weight = TB_DMA_WEIGHT;
1545 	path->clear_fc = true;
1546 
1547 	/*
1548 	 * First lane adapter is the one connected to the remote host.
1549 	 * We don't tunnel other traffic over this link so can use all
1550 	 * the credits (except the ones reserved for control traffic).
1551 	 */
1552 	hop = &path->hops[0];
1553 	tmp = min(tb_usable_credits(hop->in_port), credits);
1554 	hop->initial_credits = tmp;
1555 	hop->in_port->dma_credits += tmp;
1556 
1557 	for (i = 1; i < path->path_length; i++) {
1558 		int ret;
1559 
1560 		ret = tb_dma_reserve_credits(&path->hops[i], credits);
1561 		if (ret)
1562 			return ret;
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 /* Path from NHI to lane adapter */
tb_dma_init_tx_path(struct tb_path * path,unsigned int credits)1569 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1570 {
1571 	struct tb_path_hop *hop;
1572 
1573 	path->egress_fc_enable = TB_PATH_ALL;
1574 	path->ingress_fc_enable = TB_PATH_ALL;
1575 	path->egress_shared_buffer = TB_PATH_NONE;
1576 	path->ingress_shared_buffer = TB_PATH_NONE;
1577 	path->priority = TB_DMA_PRIORITY;
1578 	path->weight = TB_DMA_WEIGHT;
1579 	path->clear_fc = true;
1580 
1581 	tb_path_for_each_hop(path, hop) {
1582 		int ret;
1583 
1584 		ret = tb_dma_reserve_credits(hop, credits);
1585 		if (ret)
1586 			return ret;
1587 	}
1588 
1589 	return 0;
1590 }
1591 
tb_dma_release_credits(struct tb_path_hop * hop)1592 static void tb_dma_release_credits(struct tb_path_hop *hop)
1593 {
1594 	struct tb_port *port = hop->in_port;
1595 
1596 	if (tb_port_use_credit_allocation(port)) {
1597 		port->dma_credits -= hop->initial_credits;
1598 
1599 		tb_port_dbg(port, "released %u DMA path credits\n",
1600 			    hop->initial_credits);
1601 	}
1602 }
1603 
tb_dma_deinit_path(struct tb_path * path)1604 static void tb_dma_deinit_path(struct tb_path *path)
1605 {
1606 	struct tb_path_hop *hop;
1607 
1608 	tb_path_for_each_hop(path, hop)
1609 		tb_dma_release_credits(hop);
1610 }
1611 
tb_dma_deinit(struct tb_tunnel * tunnel)1612 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1613 {
1614 	int i;
1615 
1616 	for (i = 0; i < tunnel->npaths; i++) {
1617 		if (!tunnel->paths[i])
1618 			continue;
1619 		tb_dma_deinit_path(tunnel->paths[i]);
1620 	}
1621 }
1622 
1623 /**
1624  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1625  * @tb: Pointer to the domain structure
1626  * @nhi: Host controller port
1627  * @dst: Destination null port which the other domain is connected to
1628  * @transmit_path: HopID used for transmitting packets
1629  * @transmit_ring: NHI ring number used to send packets towards the
1630  *		   other domain. Set to %-1 if TX path is not needed.
1631  * @receive_path: HopID used for receiving packets
1632  * @receive_ring: NHI ring number used to receive packets from the
1633  *		  other domain. Set to %-1 if RX path is not needed.
1634  *
1635  * Return: Returns a tb_tunnel on success or NULL on failure.
1636  */
tb_tunnel_alloc_dma(struct tb * tb,struct tb_port * nhi,struct tb_port * dst,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1637 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1638 				      struct tb_port *dst, int transmit_path,
1639 				      int transmit_ring, int receive_path,
1640 				      int receive_ring)
1641 {
1642 	struct tb_tunnel *tunnel;
1643 	size_t npaths = 0, i = 0;
1644 	struct tb_path *path;
1645 	int credits;
1646 
1647 	/* Ring 0 is reserved for control channel */
1648 	if (WARN_ON(!receive_ring || !transmit_ring))
1649 		return NULL;
1650 
1651 	if (receive_ring > 0)
1652 		npaths++;
1653 	if (transmit_ring > 0)
1654 		npaths++;
1655 
1656 	if (WARN_ON(!npaths))
1657 		return NULL;
1658 
1659 	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1660 	if (!tunnel)
1661 		return NULL;
1662 
1663 	tunnel->src_port = nhi;
1664 	tunnel->dst_port = dst;
1665 	tunnel->deinit = tb_dma_deinit;
1666 
1667 	credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1668 
1669 	if (receive_ring > 0) {
1670 		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1671 				     "DMA RX");
1672 		if (!path)
1673 			goto err_free;
1674 		tunnel->paths[i++] = path;
1675 		if (tb_dma_init_rx_path(path, credits)) {
1676 			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1677 			goto err_free;
1678 		}
1679 	}
1680 
1681 	if (transmit_ring > 0) {
1682 		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1683 				     "DMA TX");
1684 		if (!path)
1685 			goto err_free;
1686 		tunnel->paths[i++] = path;
1687 		if (tb_dma_init_tx_path(path, credits)) {
1688 			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1689 			goto err_free;
1690 		}
1691 	}
1692 
1693 	return tunnel;
1694 
1695 err_free:
1696 	tb_tunnel_free(tunnel);
1697 	return NULL;
1698 }
1699 
1700 /**
1701  * tb_tunnel_match_dma() - Match DMA tunnel
1702  * @tunnel: Tunnel to match
1703  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1704  * @transmit_ring: NHI ring number used to send packets towards the
1705  *		   other domain. Pass %-1 to ignore.
1706  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1707  * @receive_ring: NHI ring number used to receive packets from the
1708  *		  other domain. Pass %-1 to ignore.
1709  *
1710  * This function can be used to match specific DMA tunnel, if there are
1711  * multiple DMA tunnels going through the same XDomain connection.
1712  * Returns true if there is match and false otherwise.
1713  */
tb_tunnel_match_dma(const struct tb_tunnel * tunnel,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1714 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1715 			 int transmit_ring, int receive_path, int receive_ring)
1716 {
1717 	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1718 	int i;
1719 
1720 	if (!receive_ring || !transmit_ring)
1721 		return false;
1722 
1723 	for (i = 0; i < tunnel->npaths; i++) {
1724 		const struct tb_path *path = tunnel->paths[i];
1725 
1726 		if (!path)
1727 			continue;
1728 
1729 		if (tb_port_is_nhi(path->hops[0].in_port))
1730 			tx_path = path;
1731 		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1732 			rx_path = path;
1733 	}
1734 
1735 	if (transmit_ring > 0 || transmit_path > 0) {
1736 		if (!tx_path)
1737 			return false;
1738 		if (transmit_ring > 0 &&
1739 		    (tx_path->hops[0].in_hop_index != transmit_ring))
1740 			return false;
1741 		if (transmit_path > 0 &&
1742 		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1743 			return false;
1744 	}
1745 
1746 	if (receive_ring > 0 || receive_path > 0) {
1747 		if (!rx_path)
1748 			return false;
1749 		if (receive_path > 0 &&
1750 		    (rx_path->hops[0].in_hop_index != receive_path))
1751 			return false;
1752 		if (receive_ring > 0 &&
1753 		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1754 			return false;
1755 	}
1756 
1757 	return true;
1758 }
1759 
tb_usb3_max_link_rate(struct tb_port * up,struct tb_port * down)1760 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1761 {
1762 	int ret, up_max_rate, down_max_rate;
1763 
1764 	ret = usb4_usb3_port_max_link_rate(up);
1765 	if (ret < 0)
1766 		return ret;
1767 	up_max_rate = ret;
1768 
1769 	ret = usb4_usb3_port_max_link_rate(down);
1770 	if (ret < 0)
1771 		return ret;
1772 	down_max_rate = ret;
1773 
1774 	return min(up_max_rate, down_max_rate);
1775 }
1776 
tb_usb3_init(struct tb_tunnel * tunnel)1777 static int tb_usb3_init(struct tb_tunnel *tunnel)
1778 {
1779 	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1780 		      tunnel->allocated_up, tunnel->allocated_down);
1781 
1782 	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1783 						 &tunnel->allocated_up,
1784 						 &tunnel->allocated_down);
1785 }
1786 
tb_usb3_activate(struct tb_tunnel * tunnel,bool activate)1787 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1788 {
1789 	int res;
1790 
1791 	res = tb_usb3_port_enable(tunnel->src_port, activate);
1792 	if (res)
1793 		return res;
1794 
1795 	if (tb_port_is_usb3_up(tunnel->dst_port))
1796 		return tb_usb3_port_enable(tunnel->dst_port, activate);
1797 
1798 	return 0;
1799 }
1800 
tb_usb3_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1801 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1802 		int *consumed_up, int *consumed_down)
1803 {
1804 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1805 	int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1806 
1807 	/*
1808 	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1809 	 * take that it into account here.
1810 	 */
1811 	*consumed_up = tunnel->allocated_up *
1812 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1813 	*consumed_down = tunnel->allocated_down *
1814 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1815 
1816 	if (tb_port_get_link_generation(port) >= 4) {
1817 		*consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1818 		*consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1819 	}
1820 
1821 	return 0;
1822 }
1823 
tb_usb3_release_unused_bandwidth(struct tb_tunnel * tunnel)1824 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1825 {
1826 	int ret;
1827 
1828 	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1829 					       &tunnel->allocated_up,
1830 					       &tunnel->allocated_down);
1831 	if (ret)
1832 		return ret;
1833 
1834 	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1835 		      tunnel->allocated_up, tunnel->allocated_down);
1836 	return 0;
1837 }
1838 
tb_usb3_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)1839 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1840 						int *available_up,
1841 						int *available_down)
1842 {
1843 	int ret, max_rate, allocate_up, allocate_down;
1844 
1845 	ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1846 	if (ret < 0) {
1847 		tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1848 		return;
1849 	} else if (!ret) {
1850 		/* Use maximum link rate if the link valid is not set */
1851 		ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1852 		if (ret < 0) {
1853 			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1854 			return;
1855 		}
1856 	}
1857 
1858 	/*
1859 	 * 90% of the max rate can be allocated for isochronous
1860 	 * transfers.
1861 	 */
1862 	max_rate = ret * 90 / 100;
1863 
1864 	/* No need to reclaim if already at maximum */
1865 	if (tunnel->allocated_up >= max_rate &&
1866 	    tunnel->allocated_down >= max_rate)
1867 		return;
1868 
1869 	/* Don't go lower than what is already allocated */
1870 	allocate_up = min(max_rate, *available_up);
1871 	if (allocate_up < tunnel->allocated_up)
1872 		allocate_up = tunnel->allocated_up;
1873 
1874 	allocate_down = min(max_rate, *available_down);
1875 	if (allocate_down < tunnel->allocated_down)
1876 		allocate_down = tunnel->allocated_down;
1877 
1878 	/* If no changes no need to do more */
1879 	if (allocate_up == tunnel->allocated_up &&
1880 	    allocate_down == tunnel->allocated_down)
1881 		return;
1882 
1883 	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1884 						&allocate_down);
1885 	if (ret) {
1886 		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1887 		return;
1888 	}
1889 
1890 	tunnel->allocated_up = allocate_up;
1891 	*available_up -= tunnel->allocated_up;
1892 
1893 	tunnel->allocated_down = allocate_down;
1894 	*available_down -= tunnel->allocated_down;
1895 
1896 	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1897 		      tunnel->allocated_up, tunnel->allocated_down);
1898 }
1899 
tb_usb3_init_credits(struct tb_path_hop * hop)1900 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1901 {
1902 	struct tb_port *port = hop->in_port;
1903 	struct tb_switch *sw = port->sw;
1904 	unsigned int credits;
1905 
1906 	if (tb_port_use_credit_allocation(port)) {
1907 		credits = sw->max_usb3_credits;
1908 	} else {
1909 		if (tb_port_is_null(port))
1910 			credits = port->bonded ? 32 : 16;
1911 		else
1912 			credits = 7;
1913 	}
1914 
1915 	hop->initial_credits = credits;
1916 }
1917 
tb_usb3_init_path(struct tb_path * path)1918 static void tb_usb3_init_path(struct tb_path *path)
1919 {
1920 	struct tb_path_hop *hop;
1921 
1922 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1923 	path->egress_shared_buffer = TB_PATH_NONE;
1924 	path->ingress_fc_enable = TB_PATH_ALL;
1925 	path->ingress_shared_buffer = TB_PATH_NONE;
1926 	path->priority = TB_USB3_PRIORITY;
1927 	path->weight = TB_USB3_WEIGHT;
1928 	path->drop_packages = 0;
1929 
1930 	tb_path_for_each_hop(path, hop)
1931 		tb_usb3_init_credits(hop);
1932 }
1933 
1934 /**
1935  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1936  * @tb: Pointer to the domain structure
1937  * @down: USB3 downstream adapter
1938  * @alloc_hopid: Allocate HopIDs from visited ports
1939  *
1940  * If @down adapter is active, follows the tunnel to the USB3 upstream
1941  * adapter and back. Returns the discovered tunnel or %NULL if there was
1942  * no tunnel.
1943  */
tb_tunnel_discover_usb3(struct tb * tb,struct tb_port * down,bool alloc_hopid)1944 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1945 					  bool alloc_hopid)
1946 {
1947 	struct tb_tunnel *tunnel;
1948 	struct tb_path *path;
1949 
1950 	if (!tb_usb3_port_is_enabled(down))
1951 		return NULL;
1952 
1953 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1954 	if (!tunnel)
1955 		return NULL;
1956 
1957 	tunnel->activate = tb_usb3_activate;
1958 	tunnel->src_port = down;
1959 
1960 	/*
1961 	 * Discover both paths even if they are not complete. We will
1962 	 * clean them up by calling tb_tunnel_deactivate() below in that
1963 	 * case.
1964 	 */
1965 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1966 				&tunnel->dst_port, "USB3 Down", alloc_hopid);
1967 	if (!path) {
1968 		/* Just disable the downstream port */
1969 		tb_usb3_port_enable(down, false);
1970 		goto err_free;
1971 	}
1972 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1973 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1974 
1975 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1976 				"USB3 Up", alloc_hopid);
1977 	if (!path)
1978 		goto err_deactivate;
1979 	tunnel->paths[TB_USB3_PATH_UP] = path;
1980 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1981 
1982 	/* Validate that the tunnel is complete */
1983 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1984 		tb_port_warn(tunnel->dst_port,
1985 			     "path does not end on an USB3 adapter, cleaning up\n");
1986 		goto err_deactivate;
1987 	}
1988 
1989 	if (down != tunnel->src_port) {
1990 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1991 		goto err_deactivate;
1992 	}
1993 
1994 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1995 		tb_tunnel_warn(tunnel,
1996 			       "tunnel is not fully activated, cleaning up\n");
1997 		goto err_deactivate;
1998 	}
1999 
2000 	if (!tb_route(down->sw)) {
2001 		int ret;
2002 
2003 		/*
2004 		 * Read the initial bandwidth allocation for the first
2005 		 * hop tunnel.
2006 		 */
2007 		ret = usb4_usb3_port_allocated_bandwidth(down,
2008 			&tunnel->allocated_up, &tunnel->allocated_down);
2009 		if (ret)
2010 			goto err_deactivate;
2011 
2012 		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2013 			      tunnel->allocated_up, tunnel->allocated_down);
2014 
2015 		tunnel->init = tb_usb3_init;
2016 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2017 		tunnel->release_unused_bandwidth =
2018 			tb_usb3_release_unused_bandwidth;
2019 		tunnel->reclaim_available_bandwidth =
2020 			tb_usb3_reclaim_available_bandwidth;
2021 	}
2022 
2023 	tb_tunnel_dbg(tunnel, "discovered\n");
2024 	return tunnel;
2025 
2026 err_deactivate:
2027 	tb_tunnel_deactivate(tunnel);
2028 err_free:
2029 	tb_tunnel_free(tunnel);
2030 
2031 	return NULL;
2032 }
2033 
2034 /**
2035  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2036  * @tb: Pointer to the domain structure
2037  * @up: USB3 upstream adapter port
2038  * @down: USB3 downstream adapter port
2039  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2040  *	    if not limited).
2041  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2042  *	      (%0 if not limited).
2043  *
2044  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2045  * @TB_TYPE_USB3_DOWN.
2046  *
2047  * Return: Returns a tb_tunnel on success or %NULL on failure.
2048  */
tb_tunnel_alloc_usb3(struct tb * tb,struct tb_port * up,struct tb_port * down,int max_up,int max_down)2049 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2050 				       struct tb_port *down, int max_up,
2051 				       int max_down)
2052 {
2053 	struct tb_tunnel *tunnel;
2054 	struct tb_path *path;
2055 	int max_rate = 0;
2056 
2057 	/*
2058 	 * Check that we have enough bandwidth available for the new
2059 	 * USB3 tunnel.
2060 	 */
2061 	if (max_up > 0 || max_down > 0) {
2062 		max_rate = tb_usb3_max_link_rate(down, up);
2063 		if (max_rate < 0)
2064 			return NULL;
2065 
2066 		/* Only 90% can be allocated for USB3 isochronous transfers */
2067 		max_rate = max_rate * 90 / 100;
2068 		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2069 			    max_rate);
2070 
2071 		if (max_rate > max_up || max_rate > max_down) {
2072 			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2073 			return NULL;
2074 		}
2075 	}
2076 
2077 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2078 	if (!tunnel)
2079 		return NULL;
2080 
2081 	tunnel->activate = tb_usb3_activate;
2082 	tunnel->src_port = down;
2083 	tunnel->dst_port = up;
2084 	tunnel->max_up = max_up;
2085 	tunnel->max_down = max_down;
2086 
2087 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2088 			     "USB3 Down");
2089 	if (!path) {
2090 		tb_tunnel_free(tunnel);
2091 		return NULL;
2092 	}
2093 	tb_usb3_init_path(path);
2094 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2095 
2096 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2097 			     "USB3 Up");
2098 	if (!path) {
2099 		tb_tunnel_free(tunnel);
2100 		return NULL;
2101 	}
2102 	tb_usb3_init_path(path);
2103 	tunnel->paths[TB_USB3_PATH_UP] = path;
2104 
2105 	if (!tb_route(down->sw)) {
2106 		tunnel->allocated_up = max_rate;
2107 		tunnel->allocated_down = max_rate;
2108 
2109 		tunnel->init = tb_usb3_init;
2110 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2111 		tunnel->release_unused_bandwidth =
2112 			tb_usb3_release_unused_bandwidth;
2113 		tunnel->reclaim_available_bandwidth =
2114 			tb_usb3_reclaim_available_bandwidth;
2115 	}
2116 
2117 	return tunnel;
2118 }
2119 
2120 /**
2121  * tb_tunnel_free() - free a tunnel
2122  * @tunnel: Tunnel to be freed
2123  *
2124  * Frees a tunnel. The tunnel does not need to be deactivated.
2125  */
tb_tunnel_free(struct tb_tunnel * tunnel)2126 void tb_tunnel_free(struct tb_tunnel *tunnel)
2127 {
2128 	int i;
2129 
2130 	if (!tunnel)
2131 		return;
2132 
2133 	if (tunnel->deinit)
2134 		tunnel->deinit(tunnel);
2135 
2136 	for (i = 0; i < tunnel->npaths; i++) {
2137 		if (tunnel->paths[i])
2138 			tb_path_free(tunnel->paths[i]);
2139 	}
2140 
2141 	kfree(tunnel->paths);
2142 	kfree(tunnel);
2143 }
2144 
2145 /**
2146  * tb_tunnel_is_invalid - check whether an activated path is still valid
2147  * @tunnel: Tunnel to check
2148  */
tb_tunnel_is_invalid(struct tb_tunnel * tunnel)2149 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2150 {
2151 	int i;
2152 
2153 	for (i = 0; i < tunnel->npaths; i++) {
2154 		WARN_ON(!tunnel->paths[i]->activated);
2155 		if (tb_path_is_invalid(tunnel->paths[i]))
2156 			return true;
2157 	}
2158 
2159 	return false;
2160 }
2161 
2162 /**
2163  * tb_tunnel_restart() - activate a tunnel after a hardware reset
2164  * @tunnel: Tunnel to restart
2165  *
2166  * Return: 0 on success and negative errno in case if failure
2167  */
tb_tunnel_restart(struct tb_tunnel * tunnel)2168 int tb_tunnel_restart(struct tb_tunnel *tunnel)
2169 {
2170 	int res, i;
2171 
2172 	tb_tunnel_dbg(tunnel, "activating\n");
2173 
2174 	/*
2175 	 * Make sure all paths are properly disabled before enabling
2176 	 * them again.
2177 	 */
2178 	for (i = 0; i < tunnel->npaths; i++) {
2179 		if (tunnel->paths[i]->activated) {
2180 			tb_path_deactivate(tunnel->paths[i]);
2181 			tunnel->paths[i]->activated = false;
2182 		}
2183 	}
2184 
2185 	if (tunnel->init) {
2186 		res = tunnel->init(tunnel);
2187 		if (res)
2188 			return res;
2189 	}
2190 
2191 	for (i = 0; i < tunnel->npaths; i++) {
2192 		res = tb_path_activate(tunnel->paths[i]);
2193 		if (res)
2194 			goto err;
2195 	}
2196 
2197 	if (tunnel->activate) {
2198 		res = tunnel->activate(tunnel, true);
2199 		if (res)
2200 			goto err;
2201 	}
2202 
2203 	return 0;
2204 
2205 err:
2206 	tb_tunnel_warn(tunnel, "activation failed\n");
2207 	tb_tunnel_deactivate(tunnel);
2208 	return res;
2209 }
2210 
2211 /**
2212  * tb_tunnel_activate() - activate a tunnel
2213  * @tunnel: Tunnel to activate
2214  *
2215  * Return: Returns 0 on success or an error code on failure.
2216  */
tb_tunnel_activate(struct tb_tunnel * tunnel)2217 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2218 {
2219 	int i;
2220 
2221 	for (i = 0; i < tunnel->npaths; i++) {
2222 		if (tunnel->paths[i]->activated) {
2223 			tb_tunnel_WARN(tunnel,
2224 				       "trying to activate an already activated tunnel\n");
2225 			return -EINVAL;
2226 		}
2227 	}
2228 
2229 	return tb_tunnel_restart(tunnel);
2230 }
2231 
2232 /**
2233  * tb_tunnel_deactivate() - deactivate a tunnel
2234  * @tunnel: Tunnel to deactivate
2235  */
tb_tunnel_deactivate(struct tb_tunnel * tunnel)2236 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2237 {
2238 	int i;
2239 
2240 	tb_tunnel_dbg(tunnel, "deactivating\n");
2241 
2242 	if (tunnel->activate)
2243 		tunnel->activate(tunnel, false);
2244 
2245 	for (i = 0; i < tunnel->npaths; i++) {
2246 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
2247 			tb_path_deactivate(tunnel->paths[i]);
2248 	}
2249 }
2250 
2251 /**
2252  * tb_tunnel_port_on_path() - Does the tunnel go through port
2253  * @tunnel: Tunnel to check
2254  * @port: Port to check
2255  *
2256  * Returns true if @tunnel goes through @port (direction does not matter),
2257  * false otherwise.
2258  */
tb_tunnel_port_on_path(const struct tb_tunnel * tunnel,const struct tb_port * port)2259 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2260 			    const struct tb_port *port)
2261 {
2262 	int i;
2263 
2264 	for (i = 0; i < tunnel->npaths; i++) {
2265 		if (!tunnel->paths[i])
2266 			continue;
2267 
2268 		if (tb_path_port_on_path(tunnel->paths[i], port))
2269 			return true;
2270 	}
2271 
2272 	return false;
2273 }
2274 
tb_tunnel_is_active(const struct tb_tunnel * tunnel)2275 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2276 {
2277 	int i;
2278 
2279 	for (i = 0; i < tunnel->npaths; i++) {
2280 		if (!tunnel->paths[i])
2281 			return false;
2282 		if (!tunnel->paths[i]->activated)
2283 			return false;
2284 	}
2285 
2286 	return true;
2287 }
2288 
2289 /**
2290  * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2291  * @tunnel: Tunnel to check
2292  * @max_up: Maximum upstream bandwidth in Mb/s
2293  * @max_down: Maximum downstream bandwidth in Mb/s
2294  *
2295  * Returns maximum possible bandwidth this tunnel can go if not limited
2296  * by other bandwidth clients. If the tunnel does not support this
2297  * returns %-EOPNOTSUPP.
2298  */
tb_tunnel_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_up,int * max_down)2299 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2300 				int *max_down)
2301 {
2302 	if (!tb_tunnel_is_active(tunnel))
2303 		return -EINVAL;
2304 
2305 	if (tunnel->maximum_bandwidth)
2306 		return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2307 	return -EOPNOTSUPP;
2308 }
2309 
2310 /**
2311  * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2312  * @tunnel: Tunnel to check
2313  * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2314  * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2315  *		    stored here
2316  *
2317  * Returns the bandwidth allocated for the tunnel. This may be higher
2318  * than what the tunnel actually consumes.
2319  */
tb_tunnel_allocated_bandwidth(struct tb_tunnel * tunnel,int * allocated_up,int * allocated_down)2320 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2321 				  int *allocated_down)
2322 {
2323 	if (!tb_tunnel_is_active(tunnel))
2324 		return -EINVAL;
2325 
2326 	if (tunnel->allocated_bandwidth)
2327 		return tunnel->allocated_bandwidth(tunnel, allocated_up,
2328 						   allocated_down);
2329 	return -EOPNOTSUPP;
2330 }
2331 
2332 /**
2333  * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2334  * @tunnel: Tunnel whose bandwidth allocation to change
2335  * @alloc_up: New upstream bandwidth in Mb/s
2336  * @alloc_down: New downstream bandwidth in Mb/s
2337  *
2338  * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2339  * and updates @alloc_up and @alloc_down to that was actually allocated
2340  * (it may not be the same as passed originally). Returns negative errno
2341  * in case of failure.
2342  */
tb_tunnel_alloc_bandwidth(struct tb_tunnel * tunnel,int * alloc_up,int * alloc_down)2343 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2344 			      int *alloc_down)
2345 {
2346 	if (!tb_tunnel_is_active(tunnel))
2347 		return -EINVAL;
2348 
2349 	if (tunnel->alloc_bandwidth)
2350 		return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2351 
2352 	return -EOPNOTSUPP;
2353 }
2354 
2355 /**
2356  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2357  * @tunnel: Tunnel to check
2358  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2359  *		 Can be %NULL.
2360  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2361  *		   Can be %NULL.
2362  *
2363  * Stores the amount of isochronous bandwidth @tunnel consumes in
2364  * @consumed_up and @consumed_down. In case of success returns %0,
2365  * negative errno otherwise.
2366  */
tb_tunnel_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)2367 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2368 				 int *consumed_down)
2369 {
2370 	int up_bw = 0, down_bw = 0;
2371 
2372 	if (!tb_tunnel_is_active(tunnel))
2373 		goto out;
2374 
2375 	if (tunnel->consumed_bandwidth) {
2376 		int ret;
2377 
2378 		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2379 		if (ret)
2380 			return ret;
2381 
2382 		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2383 			      down_bw);
2384 	}
2385 
2386 out:
2387 	if (consumed_up)
2388 		*consumed_up = up_bw;
2389 	if (consumed_down)
2390 		*consumed_down = down_bw;
2391 
2392 	return 0;
2393 }
2394 
2395 /**
2396  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2397  * @tunnel: Tunnel whose unused bandwidth to release
2398  *
2399  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2400  * moment) this function makes it to release all the unused bandwidth.
2401  *
2402  * Returns %0 in case of success and negative errno otherwise.
2403  */
tb_tunnel_release_unused_bandwidth(struct tb_tunnel * tunnel)2404 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2405 {
2406 	if (!tb_tunnel_is_active(tunnel))
2407 		return 0;
2408 
2409 	if (tunnel->release_unused_bandwidth) {
2410 		int ret;
2411 
2412 		ret = tunnel->release_unused_bandwidth(tunnel);
2413 		if (ret)
2414 			return ret;
2415 	}
2416 
2417 	return 0;
2418 }
2419 
2420 /**
2421  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2422  * @tunnel: Tunnel reclaiming available bandwidth
2423  * @available_up: Available upstream bandwidth (in Mb/s)
2424  * @available_down: Available downstream bandwidth (in Mb/s)
2425  *
2426  * Reclaims bandwidth from @available_up and @available_down and updates
2427  * the variables accordingly (e.g decreases both according to what was
2428  * reclaimed by the tunnel). If nothing was reclaimed the values are
2429  * kept as is.
2430  */
tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)2431 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2432 					   int *available_up,
2433 					   int *available_down)
2434 {
2435 	if (!tb_tunnel_is_active(tunnel))
2436 		return;
2437 
2438 	if (tunnel->reclaim_available_bandwidth)
2439 		tunnel->reclaim_available_bandwidth(tunnel, available_up,
2440 						    available_down);
2441 }
2442 
tb_tunnel_type_name(const struct tb_tunnel * tunnel)2443 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2444 {
2445 	return tb_tunnel_names[tunnel->type];
2446 }
2447