xref: /openbmc/linux/drivers/thunderbolt/tunnel.c (revision 3a83e4e6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 
13 #include "tunnel.h"
14 #include "tb.h"
15 
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID			8
18 
19 #define TB_PCI_PATH_DOWN		0
20 #define TB_PCI_PATH_UP			1
21 
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID			8
24 
25 #define TB_USB3_PATH_DOWN		0
26 #define TB_USB3_PATH_UP			1
27 
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID		8
30 #define TB_DP_AUX_RX_HOPID		8
31 #define TB_DP_VIDEO_HOPID		9
32 
33 #define TB_DP_VIDEO_PATH_OUT		0
34 #define TB_DP_AUX_PATH_OUT		1
35 #define TB_DP_AUX_PATH_IN		2
36 
37 #define TB_DMA_PATH_OUT			0
38 #define TB_DMA_PATH_IN			1
39 
40 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
41 
42 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
43 	do {                                                            \
44 		struct tb_tunnel *__tunnel = (tunnel);                  \
45 		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
46 		      tb_route(__tunnel->src_port->sw),                 \
47 		      __tunnel->src_port->port,                         \
48 		      tb_route(__tunnel->dst_port->sw),                 \
49 		      __tunnel->dst_port->port,                         \
50 		      tb_tunnel_names[__tunnel->type],			\
51 		      ## arg);                                          \
52 	} while (0)
53 
54 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
55 	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
56 #define tb_tunnel_warn(tunnel, fmt, arg...) \
57 	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
58 #define tb_tunnel_info(tunnel, fmt, arg...) \
59 	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
60 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
61 	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
62 
63 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
64 					 enum tb_tunnel_type type)
65 {
66 	struct tb_tunnel *tunnel;
67 
68 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
69 	if (!tunnel)
70 		return NULL;
71 
72 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
73 	if (!tunnel->paths) {
74 		tb_tunnel_free(tunnel);
75 		return NULL;
76 	}
77 
78 	INIT_LIST_HEAD(&tunnel->list);
79 	tunnel->tb = tb;
80 	tunnel->npaths = npaths;
81 	tunnel->type = type;
82 
83 	return tunnel;
84 }
85 
86 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
87 {
88 	int res;
89 
90 	res = tb_pci_port_enable(tunnel->src_port, activate);
91 	if (res)
92 		return res;
93 
94 	if (tb_port_is_pcie_up(tunnel->dst_port))
95 		return tb_pci_port_enable(tunnel->dst_port, activate);
96 
97 	return 0;
98 }
99 
100 static int tb_initial_credits(const struct tb_switch *sw)
101 {
102 	/* If the path is complete sw is not NULL */
103 	if (sw) {
104 		/* More credits for faster link */
105 		switch (sw->link_speed * sw->link_width) {
106 		case 40:
107 			return 32;
108 		case 20:
109 			return 24;
110 		}
111 	}
112 
113 	return 16;
114 }
115 
116 static void tb_pci_init_path(struct tb_path *path)
117 {
118 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
119 	path->egress_shared_buffer = TB_PATH_NONE;
120 	path->ingress_fc_enable = TB_PATH_ALL;
121 	path->ingress_shared_buffer = TB_PATH_NONE;
122 	path->priority = 3;
123 	path->weight = 1;
124 	path->drop_packages = 0;
125 	path->nfc_credits = 0;
126 	path->hops[0].initial_credits = 7;
127 	if (path->path_length > 1)
128 		path->hops[1].initial_credits =
129 			tb_initial_credits(path->hops[1].in_port->sw);
130 }
131 
132 /**
133  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
134  * @tb: Pointer to the domain structure
135  * @down: PCIe downstream adapter
136  *
137  * If @down adapter is active, follows the tunnel to the PCIe upstream
138  * adapter and back. Returns the discovered tunnel or %NULL if there was
139  * no tunnel.
140  */
141 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
142 {
143 	struct tb_tunnel *tunnel;
144 	struct tb_path *path;
145 
146 	if (!tb_pci_port_is_enabled(down))
147 		return NULL;
148 
149 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
150 	if (!tunnel)
151 		return NULL;
152 
153 	tunnel->activate = tb_pci_activate;
154 	tunnel->src_port = down;
155 
156 	/*
157 	 * Discover both paths even if they are not complete. We will
158 	 * clean them up by calling tb_tunnel_deactivate() below in that
159 	 * case.
160 	 */
161 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
162 				&tunnel->dst_port, "PCIe Up");
163 	if (!path) {
164 		/* Just disable the downstream port */
165 		tb_pci_port_enable(down, false);
166 		goto err_free;
167 	}
168 	tunnel->paths[TB_PCI_PATH_UP] = path;
169 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
170 
171 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
172 				"PCIe Down");
173 	if (!path)
174 		goto err_deactivate;
175 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
176 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
177 
178 	/* Validate that the tunnel is complete */
179 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
180 		tb_port_warn(tunnel->dst_port,
181 			     "path does not end on a PCIe adapter, cleaning up\n");
182 		goto err_deactivate;
183 	}
184 
185 	if (down != tunnel->src_port) {
186 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
187 		goto err_deactivate;
188 	}
189 
190 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
191 		tb_tunnel_warn(tunnel,
192 			       "tunnel is not fully activated, cleaning up\n");
193 		goto err_deactivate;
194 	}
195 
196 	tb_tunnel_dbg(tunnel, "discovered\n");
197 	return tunnel;
198 
199 err_deactivate:
200 	tb_tunnel_deactivate(tunnel);
201 err_free:
202 	tb_tunnel_free(tunnel);
203 
204 	return NULL;
205 }
206 
207 /**
208  * tb_tunnel_alloc_pci() - allocate a pci tunnel
209  * @tb: Pointer to the domain structure
210  * @up: PCIe upstream adapter port
211  * @down: PCIe downstream adapter port
212  *
213  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
214  * TB_TYPE_PCIE_DOWN.
215  *
216  * Return: Returns a tb_tunnel on success or NULL on failure.
217  */
218 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
219 				      struct tb_port *down)
220 {
221 	struct tb_tunnel *tunnel;
222 	struct tb_path *path;
223 
224 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
225 	if (!tunnel)
226 		return NULL;
227 
228 	tunnel->activate = tb_pci_activate;
229 	tunnel->src_port = down;
230 	tunnel->dst_port = up;
231 
232 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
233 			     "PCIe Down");
234 	if (!path) {
235 		tb_tunnel_free(tunnel);
236 		return NULL;
237 	}
238 	tb_pci_init_path(path);
239 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
240 
241 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
242 			     "PCIe Up");
243 	if (!path) {
244 		tb_tunnel_free(tunnel);
245 		return NULL;
246 	}
247 	tb_pci_init_path(path);
248 	tunnel->paths[TB_PCI_PATH_UP] = path;
249 
250 	return tunnel;
251 }
252 
253 static bool tb_dp_is_usb4(const struct tb_switch *sw)
254 {
255 	/* Titan Ridge DP adapters need the same treatment as USB4 */
256 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
257 }
258 
259 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
260 {
261 	int timeout = 10;
262 	u32 val;
263 	int ret;
264 
265 	/* Both ends need to support this */
266 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
267 		return 0;
268 
269 	ret = tb_port_read(out, &val, TB_CFG_PORT,
270 			   out->cap_adap + DP_STATUS_CTRL, 1);
271 	if (ret)
272 		return ret;
273 
274 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
275 
276 	ret = tb_port_write(out, &val, TB_CFG_PORT,
277 			    out->cap_adap + DP_STATUS_CTRL, 1);
278 	if (ret)
279 		return ret;
280 
281 	do {
282 		ret = tb_port_read(out, &val, TB_CFG_PORT,
283 				   out->cap_adap + DP_STATUS_CTRL, 1);
284 		if (ret)
285 			return ret;
286 		if (!(val & DP_STATUS_CTRL_CMHS))
287 			return 0;
288 		usleep_range(10, 100);
289 	} while (timeout--);
290 
291 	return -ETIMEDOUT;
292 }
293 
294 static inline u32 tb_dp_cap_get_rate(u32 val)
295 {
296 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
297 
298 	switch (rate) {
299 	case DP_COMMON_CAP_RATE_RBR:
300 		return 1620;
301 	case DP_COMMON_CAP_RATE_HBR:
302 		return 2700;
303 	case DP_COMMON_CAP_RATE_HBR2:
304 		return 5400;
305 	case DP_COMMON_CAP_RATE_HBR3:
306 		return 8100;
307 	default:
308 		return 0;
309 	}
310 }
311 
312 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
313 {
314 	val &= ~DP_COMMON_CAP_RATE_MASK;
315 	switch (rate) {
316 	default:
317 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
318 		fallthrough;
319 	case 1620:
320 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
321 		break;
322 	case 2700:
323 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
324 		break;
325 	case 5400:
326 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
327 		break;
328 	case 8100:
329 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
330 		break;
331 	}
332 	return val;
333 }
334 
335 static inline u32 tb_dp_cap_get_lanes(u32 val)
336 {
337 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
338 
339 	switch (lanes) {
340 	case DP_COMMON_CAP_1_LANE:
341 		return 1;
342 	case DP_COMMON_CAP_2_LANES:
343 		return 2;
344 	case DP_COMMON_CAP_4_LANES:
345 		return 4;
346 	default:
347 		return 0;
348 	}
349 }
350 
351 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
352 {
353 	val &= ~DP_COMMON_CAP_LANES_MASK;
354 	switch (lanes) {
355 	default:
356 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
357 		     lanes);
358 		fallthrough;
359 	case 1:
360 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
361 		break;
362 	case 2:
363 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
364 		break;
365 	case 4:
366 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
367 		break;
368 	}
369 	return val;
370 }
371 
372 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
373 {
374 	/* Tunneling removes the DP 8b/10b encoding */
375 	return rate * lanes * 8 / 10;
376 }
377 
378 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
379 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
380 				  u32 *new_lanes)
381 {
382 	static const u32 dp_bw[][2] = {
383 		/* Mb/s, lanes */
384 		{ 8100, 4 }, /* 25920 Mb/s */
385 		{ 5400, 4 }, /* 17280 Mb/s */
386 		{ 8100, 2 }, /* 12960 Mb/s */
387 		{ 2700, 4 }, /* 8640 Mb/s */
388 		{ 5400, 2 }, /* 8640 Mb/s */
389 		{ 8100, 1 }, /* 6480 Mb/s */
390 		{ 1620, 4 }, /* 5184 Mb/s */
391 		{ 5400, 1 }, /* 4320 Mb/s */
392 		{ 2700, 2 }, /* 4320 Mb/s */
393 		{ 1620, 2 }, /* 2592 Mb/s */
394 		{ 2700, 1 }, /* 2160 Mb/s */
395 		{ 1620, 1 }, /* 1296 Mb/s */
396 	};
397 	unsigned int i;
398 
399 	/*
400 	 * Find a combination that can fit into max_bw and does not
401 	 * exceed the maximum rate and lanes supported by the DP OUT and
402 	 * DP IN adapters.
403 	 */
404 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
405 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
406 			continue;
407 
408 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
409 			continue;
410 
411 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
412 			*new_rate = dp_bw[i][0];
413 			*new_lanes = dp_bw[i][1];
414 			return 0;
415 		}
416 	}
417 
418 	return -ENOSR;
419 }
420 
421 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
422 {
423 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
424 	struct tb_port *out = tunnel->dst_port;
425 	struct tb_port *in = tunnel->src_port;
426 	int ret, max_bw;
427 
428 	/*
429 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
430 	 * newer generation hardware.
431 	 */
432 	if (in->sw->generation < 2 || out->sw->generation < 2)
433 		return 0;
434 
435 	/*
436 	 * Perform connection manager handshake between IN and OUT ports
437 	 * before capabilities exchange can take place.
438 	 */
439 	ret = tb_dp_cm_handshake(in, out);
440 	if (ret)
441 		return ret;
442 
443 	/* Read both DP_LOCAL_CAP registers */
444 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
445 			   in->cap_adap + DP_LOCAL_CAP, 1);
446 	if (ret)
447 		return ret;
448 
449 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
450 			   out->cap_adap + DP_LOCAL_CAP, 1);
451 	if (ret)
452 		return ret;
453 
454 	/* Write IN local caps to OUT remote caps */
455 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
456 			    out->cap_adap + DP_REMOTE_CAP, 1);
457 	if (ret)
458 		return ret;
459 
460 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
461 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
462 	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
463 		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
464 
465 	/*
466 	 * If the tunnel bandwidth is limited (max_bw is set) then see
467 	 * if we need to reduce bandwidth to fit there.
468 	 */
469 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
470 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
471 	bw = tb_dp_bandwidth(out_rate, out_lanes);
472 	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
473 		    out_rate, out_lanes, bw);
474 
475 	if (in->sw->config.depth < out->sw->config.depth)
476 		max_bw = tunnel->max_down;
477 	else
478 		max_bw = tunnel->max_up;
479 
480 	if (max_bw && bw > max_bw) {
481 		u32 new_rate, new_lanes, new_bw;
482 
483 		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
484 					     out_rate, out_lanes, &new_rate,
485 					     &new_lanes);
486 		if (ret) {
487 			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
488 			return ret;
489 		}
490 
491 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
492 		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
493 			    new_rate, new_lanes, new_bw);
494 
495 		/*
496 		 * Set new rate and number of lanes before writing it to
497 		 * the IN port remote caps.
498 		 */
499 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
500 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
501 	}
502 
503 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
504 			     in->cap_adap + DP_REMOTE_CAP, 1);
505 }
506 
507 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
508 {
509 	int ret;
510 
511 	if (active) {
512 		struct tb_path **paths;
513 		int last;
514 
515 		paths = tunnel->paths;
516 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
517 
518 		tb_dp_port_set_hops(tunnel->src_port,
519 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
520 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
521 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
522 
523 		tb_dp_port_set_hops(tunnel->dst_port,
524 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
525 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
526 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
527 	} else {
528 		tb_dp_port_hpd_clear(tunnel->src_port);
529 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
530 		if (tb_port_is_dpout(tunnel->dst_port))
531 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
532 	}
533 
534 	ret = tb_dp_port_enable(tunnel->src_port, active);
535 	if (ret)
536 		return ret;
537 
538 	if (tb_port_is_dpout(tunnel->dst_port))
539 		return tb_dp_port_enable(tunnel->dst_port, active);
540 
541 	return 0;
542 }
543 
544 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
545 				    int *consumed_down)
546 {
547 	struct tb_port *in = tunnel->src_port;
548 	const struct tb_switch *sw = in->sw;
549 	u32 val, rate = 0, lanes = 0;
550 	int ret;
551 
552 	if (tb_dp_is_usb4(sw)) {
553 		int timeout = 20;
554 
555 		/*
556 		 * Wait for DPRX done. Normally it should be already set
557 		 * for active tunnel.
558 		 */
559 		do {
560 			ret = tb_port_read(in, &val, TB_CFG_PORT,
561 					   in->cap_adap + DP_COMMON_CAP, 1);
562 			if (ret)
563 				return ret;
564 
565 			if (val & DP_COMMON_CAP_DPRX_DONE) {
566 				rate = tb_dp_cap_get_rate(val);
567 				lanes = tb_dp_cap_get_lanes(val);
568 				break;
569 			}
570 			msleep(250);
571 		} while (timeout--);
572 
573 		if (!timeout)
574 			return -ETIMEDOUT;
575 	} else if (sw->generation >= 2) {
576 		/*
577 		 * Read from the copied remote cap so that we take into
578 		 * account if capabilities were reduced during exchange.
579 		 */
580 		ret = tb_port_read(in, &val, TB_CFG_PORT,
581 				   in->cap_adap + DP_REMOTE_CAP, 1);
582 		if (ret)
583 			return ret;
584 
585 		rate = tb_dp_cap_get_rate(val);
586 		lanes = tb_dp_cap_get_lanes(val);
587 	} else {
588 		/* No bandwidth management for legacy devices  */
589 		*consumed_up = 0;
590 		*consumed_down = 0;
591 		return 0;
592 	}
593 
594 	if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
595 		*consumed_up = 0;
596 		*consumed_down = tb_dp_bandwidth(rate, lanes);
597 	} else {
598 		*consumed_up = tb_dp_bandwidth(rate, lanes);
599 		*consumed_down = 0;
600 	}
601 
602 	return 0;
603 }
604 
605 static void tb_dp_init_aux_path(struct tb_path *path)
606 {
607 	int i;
608 
609 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
610 	path->egress_shared_buffer = TB_PATH_NONE;
611 	path->ingress_fc_enable = TB_PATH_ALL;
612 	path->ingress_shared_buffer = TB_PATH_NONE;
613 	path->priority = 2;
614 	path->weight = 1;
615 
616 	for (i = 0; i < path->path_length; i++)
617 		path->hops[i].initial_credits = 1;
618 }
619 
620 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
621 {
622 	u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
623 
624 	path->egress_fc_enable = TB_PATH_NONE;
625 	path->egress_shared_buffer = TB_PATH_NONE;
626 	path->ingress_fc_enable = TB_PATH_NONE;
627 	path->ingress_shared_buffer = TB_PATH_NONE;
628 	path->priority = 1;
629 	path->weight = 1;
630 
631 	if (discover) {
632 		path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
633 	} else {
634 		u32 max_credits;
635 
636 		max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
637 			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
638 		/* Leave some credits for AUX path */
639 		path->nfc_credits = min(max_credits - 2, 12U);
640 	}
641 }
642 
643 /**
644  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
645  * @tb: Pointer to the domain structure
646  * @in: DP in adapter
647  *
648  * If @in adapter is active, follows the tunnel to the DP out adapter
649  * and back. Returns the discovered tunnel or %NULL if there was no
650  * tunnel.
651  *
652  * Return: DP tunnel or %NULL if no tunnel found.
653  */
654 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
655 {
656 	struct tb_tunnel *tunnel;
657 	struct tb_port *port;
658 	struct tb_path *path;
659 
660 	if (!tb_dp_port_is_enabled(in))
661 		return NULL;
662 
663 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
664 	if (!tunnel)
665 		return NULL;
666 
667 	tunnel->init = tb_dp_xchg_caps;
668 	tunnel->activate = tb_dp_activate;
669 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
670 	tunnel->src_port = in;
671 
672 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
673 				&tunnel->dst_port, "Video");
674 	if (!path) {
675 		/* Just disable the DP IN port */
676 		tb_dp_port_enable(in, false);
677 		goto err_free;
678 	}
679 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
680 	tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
681 
682 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
683 	if (!path)
684 		goto err_deactivate;
685 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
686 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
687 
688 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
689 				&port, "AUX RX");
690 	if (!path)
691 		goto err_deactivate;
692 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
693 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
694 
695 	/* Validate that the tunnel is complete */
696 	if (!tb_port_is_dpout(tunnel->dst_port)) {
697 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
698 		goto err_deactivate;
699 	}
700 
701 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
702 		goto err_deactivate;
703 
704 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
705 		goto err_deactivate;
706 
707 	if (port != tunnel->src_port) {
708 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
709 		goto err_deactivate;
710 	}
711 
712 	tb_tunnel_dbg(tunnel, "discovered\n");
713 	return tunnel;
714 
715 err_deactivate:
716 	tb_tunnel_deactivate(tunnel);
717 err_free:
718 	tb_tunnel_free(tunnel);
719 
720 	return NULL;
721 }
722 
723 /**
724  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
725  * @tb: Pointer to the domain structure
726  * @in: DP in adapter port
727  * @out: DP out adapter port
728  * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
729  *	    if not limited)
730  * @max_down: Maximum available downstream bandwidth for the DP tunnel
731  *	      (%0 if not limited)
732  *
733  * Allocates a tunnel between @in and @out that is capable of tunneling
734  * Display Port traffic.
735  *
736  * Return: Returns a tb_tunnel on success or NULL on failure.
737  */
738 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
739 				     struct tb_port *out, int max_up,
740 				     int max_down)
741 {
742 	struct tb_tunnel *tunnel;
743 	struct tb_path **paths;
744 	struct tb_path *path;
745 
746 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
747 		return NULL;
748 
749 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
750 	if (!tunnel)
751 		return NULL;
752 
753 	tunnel->init = tb_dp_xchg_caps;
754 	tunnel->activate = tb_dp_activate;
755 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
756 	tunnel->src_port = in;
757 	tunnel->dst_port = out;
758 	tunnel->max_up = max_up;
759 	tunnel->max_down = max_down;
760 
761 	paths = tunnel->paths;
762 
763 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
764 			     1, "Video");
765 	if (!path)
766 		goto err_free;
767 	tb_dp_init_video_path(path, false);
768 	paths[TB_DP_VIDEO_PATH_OUT] = path;
769 
770 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
771 			     TB_DP_AUX_TX_HOPID, 1, "AUX TX");
772 	if (!path)
773 		goto err_free;
774 	tb_dp_init_aux_path(path);
775 	paths[TB_DP_AUX_PATH_OUT] = path;
776 
777 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
778 			     TB_DP_AUX_RX_HOPID, 1, "AUX RX");
779 	if (!path)
780 		goto err_free;
781 	tb_dp_init_aux_path(path);
782 	paths[TB_DP_AUX_PATH_IN] = path;
783 
784 	return tunnel;
785 
786 err_free:
787 	tb_tunnel_free(tunnel);
788 	return NULL;
789 }
790 
791 static u32 tb_dma_credits(struct tb_port *nhi)
792 {
793 	u32 max_credits;
794 
795 	max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
796 		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
797 	return min(max_credits, 13U);
798 }
799 
800 static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
801 {
802 	struct tb_port *nhi = tunnel->src_port;
803 	u32 credits;
804 
805 	credits = active ? tb_dma_credits(nhi) : 0;
806 	return tb_port_set_initial_credits(nhi, credits);
807 }
808 
809 static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
810 			     unsigned int efc, u32 credits)
811 {
812 	int i;
813 
814 	path->egress_fc_enable = efc;
815 	path->ingress_fc_enable = TB_PATH_ALL;
816 	path->egress_shared_buffer = TB_PATH_NONE;
817 	path->ingress_shared_buffer = isb;
818 	path->priority = 5;
819 	path->weight = 1;
820 	path->clear_fc = true;
821 
822 	for (i = 0; i < path->path_length; i++)
823 		path->hops[i].initial_credits = credits;
824 }
825 
826 /**
827  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
828  * @tb: Pointer to the domain structure
829  * @nhi: Host controller port
830  * @dst: Destination null port which the other domain is connected to
831  * @transmit_ring: NHI ring number used to send packets towards the
832  *		   other domain
833  * @transmit_path: HopID used for transmitting packets
834  * @receive_ring: NHI ring number used to receive packets from the
835  *		  other domain
836  * @reveive_path: HopID used for receiving packets
837  *
838  * Return: Returns a tb_tunnel on success or NULL on failure.
839  */
840 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
841 				      struct tb_port *dst, int transmit_ring,
842 				      int transmit_path, int receive_ring,
843 				      int receive_path)
844 {
845 	struct tb_tunnel *tunnel;
846 	struct tb_path *path;
847 	u32 credits;
848 
849 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
850 	if (!tunnel)
851 		return NULL;
852 
853 	tunnel->activate = tb_dma_activate;
854 	tunnel->src_port = nhi;
855 	tunnel->dst_port = dst;
856 
857 	credits = tb_dma_credits(nhi);
858 
859 	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
860 	if (!path) {
861 		tb_tunnel_free(tunnel);
862 		return NULL;
863 	}
864 	tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
865 			 credits);
866 	tunnel->paths[TB_DMA_PATH_IN] = path;
867 
868 	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
869 	if (!path) {
870 		tb_tunnel_free(tunnel);
871 		return NULL;
872 	}
873 	tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
874 	tunnel->paths[TB_DMA_PATH_OUT] = path;
875 
876 	return tunnel;
877 }
878 
879 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
880 {
881 	int ret, up_max_rate, down_max_rate;
882 
883 	ret = usb4_usb3_port_max_link_rate(up);
884 	if (ret < 0)
885 		return ret;
886 	up_max_rate = ret;
887 
888 	ret = usb4_usb3_port_max_link_rate(down);
889 	if (ret < 0)
890 		return ret;
891 	down_max_rate = ret;
892 
893 	return min(up_max_rate, down_max_rate);
894 }
895 
896 static int tb_usb3_init(struct tb_tunnel *tunnel)
897 {
898 	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
899 		      tunnel->allocated_up, tunnel->allocated_down);
900 
901 	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
902 						 &tunnel->allocated_up,
903 						 &tunnel->allocated_down);
904 }
905 
906 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
907 {
908 	int res;
909 
910 	res = tb_usb3_port_enable(tunnel->src_port, activate);
911 	if (res)
912 		return res;
913 
914 	if (tb_port_is_usb3_up(tunnel->dst_port))
915 		return tb_usb3_port_enable(tunnel->dst_port, activate);
916 
917 	return 0;
918 }
919 
920 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
921 		int *consumed_up, int *consumed_down)
922 {
923 	/*
924 	 * PCIe tunneling affects the USB3 bandwidth so take that it
925 	 * into account here.
926 	 */
927 	*consumed_up = tunnel->allocated_up * (3 + 1) / 3;
928 	*consumed_down = tunnel->allocated_down * (3 + 1) / 3;
929 	return 0;
930 }
931 
932 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
933 {
934 	int ret;
935 
936 	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
937 					       &tunnel->allocated_up,
938 					       &tunnel->allocated_down);
939 	if (ret)
940 		return ret;
941 
942 	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
943 		      tunnel->allocated_up, tunnel->allocated_down);
944 	return 0;
945 }
946 
947 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
948 						int *available_up,
949 						int *available_down)
950 {
951 	int ret, max_rate, allocate_up, allocate_down;
952 
953 	ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
954 	if (ret < 0) {
955 		tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
956 		return;
957 	} else if (!ret) {
958 		/* Use maximum link rate if the link valid is not set */
959 		ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
960 		if (ret < 0) {
961 			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
962 			return;
963 		}
964 	}
965 
966 	/*
967 	 * 90% of the max rate can be allocated for isochronous
968 	 * transfers.
969 	 */
970 	max_rate = ret * 90 / 100;
971 
972 	/* No need to reclaim if already at maximum */
973 	if (tunnel->allocated_up >= max_rate &&
974 	    tunnel->allocated_down >= max_rate)
975 		return;
976 
977 	/* Don't go lower than what is already allocated */
978 	allocate_up = min(max_rate, *available_up);
979 	if (allocate_up < tunnel->allocated_up)
980 		allocate_up = tunnel->allocated_up;
981 
982 	allocate_down = min(max_rate, *available_down);
983 	if (allocate_down < tunnel->allocated_down)
984 		allocate_down = tunnel->allocated_down;
985 
986 	/* If no changes no need to do more */
987 	if (allocate_up == tunnel->allocated_up &&
988 	    allocate_down == tunnel->allocated_down)
989 		return;
990 
991 	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
992 						&allocate_down);
993 	if (ret) {
994 		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
995 		return;
996 	}
997 
998 	tunnel->allocated_up = allocate_up;
999 	*available_up -= tunnel->allocated_up;
1000 
1001 	tunnel->allocated_down = allocate_down;
1002 	*available_down -= tunnel->allocated_down;
1003 
1004 	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1005 		      tunnel->allocated_up, tunnel->allocated_down);
1006 }
1007 
1008 static void tb_usb3_init_path(struct tb_path *path)
1009 {
1010 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1011 	path->egress_shared_buffer = TB_PATH_NONE;
1012 	path->ingress_fc_enable = TB_PATH_ALL;
1013 	path->ingress_shared_buffer = TB_PATH_NONE;
1014 	path->priority = 3;
1015 	path->weight = 3;
1016 	path->drop_packages = 0;
1017 	path->nfc_credits = 0;
1018 	path->hops[0].initial_credits = 7;
1019 	if (path->path_length > 1)
1020 		path->hops[1].initial_credits =
1021 			tb_initial_credits(path->hops[1].in_port->sw);
1022 }
1023 
1024 /**
1025  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1026  * @tb: Pointer to the domain structure
1027  * @down: USB3 downstream adapter
1028  *
1029  * If @down adapter is active, follows the tunnel to the USB3 upstream
1030  * adapter and back. Returns the discovered tunnel or %NULL if there was
1031  * no tunnel.
1032  */
1033 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1034 {
1035 	struct tb_tunnel *tunnel;
1036 	struct tb_path *path;
1037 
1038 	if (!tb_usb3_port_is_enabled(down))
1039 		return NULL;
1040 
1041 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1042 	if (!tunnel)
1043 		return NULL;
1044 
1045 	tunnel->activate = tb_usb3_activate;
1046 	tunnel->src_port = down;
1047 
1048 	/*
1049 	 * Discover both paths even if they are not complete. We will
1050 	 * clean them up by calling tb_tunnel_deactivate() below in that
1051 	 * case.
1052 	 */
1053 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1054 				&tunnel->dst_port, "USB3 Down");
1055 	if (!path) {
1056 		/* Just disable the downstream port */
1057 		tb_usb3_port_enable(down, false);
1058 		goto err_free;
1059 	}
1060 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1061 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1062 
1063 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1064 				"USB3 Up");
1065 	if (!path)
1066 		goto err_deactivate;
1067 	tunnel->paths[TB_USB3_PATH_UP] = path;
1068 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1069 
1070 	/* Validate that the tunnel is complete */
1071 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1072 		tb_port_warn(tunnel->dst_port,
1073 			     "path does not end on an USB3 adapter, cleaning up\n");
1074 		goto err_deactivate;
1075 	}
1076 
1077 	if (down != tunnel->src_port) {
1078 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1079 		goto err_deactivate;
1080 	}
1081 
1082 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1083 		tb_tunnel_warn(tunnel,
1084 			       "tunnel is not fully activated, cleaning up\n");
1085 		goto err_deactivate;
1086 	}
1087 
1088 	if (!tb_route(down->sw)) {
1089 		int ret;
1090 
1091 		/*
1092 		 * Read the initial bandwidth allocation for the first
1093 		 * hop tunnel.
1094 		 */
1095 		ret = usb4_usb3_port_allocated_bandwidth(down,
1096 			&tunnel->allocated_up, &tunnel->allocated_down);
1097 		if (ret)
1098 			goto err_deactivate;
1099 
1100 		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1101 			      tunnel->allocated_up, tunnel->allocated_down);
1102 
1103 		tunnel->init = tb_usb3_init;
1104 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1105 		tunnel->release_unused_bandwidth =
1106 			tb_usb3_release_unused_bandwidth;
1107 		tunnel->reclaim_available_bandwidth =
1108 			tb_usb3_reclaim_available_bandwidth;
1109 	}
1110 
1111 	tb_tunnel_dbg(tunnel, "discovered\n");
1112 	return tunnel;
1113 
1114 err_deactivate:
1115 	tb_tunnel_deactivate(tunnel);
1116 err_free:
1117 	tb_tunnel_free(tunnel);
1118 
1119 	return NULL;
1120 }
1121 
1122 /**
1123  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1124  * @tb: Pointer to the domain structure
1125  * @up: USB3 upstream adapter port
1126  * @down: USB3 downstream adapter port
1127  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1128  *	    if not limited).
1129  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1130  *	      (%0 if not limited).
1131  *
1132  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1133  * @TB_TYPE_USB3_DOWN.
1134  *
1135  * Return: Returns a tb_tunnel on success or %NULL on failure.
1136  */
1137 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1138 				       struct tb_port *down, int max_up,
1139 				       int max_down)
1140 {
1141 	struct tb_tunnel *tunnel;
1142 	struct tb_path *path;
1143 	int max_rate = 0;
1144 
1145 	/*
1146 	 * Check that we have enough bandwidth available for the new
1147 	 * USB3 tunnel.
1148 	 */
1149 	if (max_up > 0 || max_down > 0) {
1150 		max_rate = tb_usb3_max_link_rate(down, up);
1151 		if (max_rate < 0)
1152 			return NULL;
1153 
1154 		/* Only 90% can be allocated for USB3 isochronous transfers */
1155 		max_rate = max_rate * 90 / 100;
1156 		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1157 			    max_rate);
1158 
1159 		if (max_rate > max_up || max_rate > max_down) {
1160 			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1161 			return NULL;
1162 		}
1163 	}
1164 
1165 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1166 	if (!tunnel)
1167 		return NULL;
1168 
1169 	tunnel->activate = tb_usb3_activate;
1170 	tunnel->src_port = down;
1171 	tunnel->dst_port = up;
1172 	tunnel->max_up = max_up;
1173 	tunnel->max_down = max_down;
1174 
1175 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1176 			     "USB3 Down");
1177 	if (!path) {
1178 		tb_tunnel_free(tunnel);
1179 		return NULL;
1180 	}
1181 	tb_usb3_init_path(path);
1182 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1183 
1184 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1185 			     "USB3 Up");
1186 	if (!path) {
1187 		tb_tunnel_free(tunnel);
1188 		return NULL;
1189 	}
1190 	tb_usb3_init_path(path);
1191 	tunnel->paths[TB_USB3_PATH_UP] = path;
1192 
1193 	if (!tb_route(down->sw)) {
1194 		tunnel->allocated_up = max_rate;
1195 		tunnel->allocated_down = max_rate;
1196 
1197 		tunnel->init = tb_usb3_init;
1198 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1199 		tunnel->release_unused_bandwidth =
1200 			tb_usb3_release_unused_bandwidth;
1201 		tunnel->reclaim_available_bandwidth =
1202 			tb_usb3_reclaim_available_bandwidth;
1203 	}
1204 
1205 	return tunnel;
1206 }
1207 
1208 /**
1209  * tb_tunnel_free() - free a tunnel
1210  * @tunnel: Tunnel to be freed
1211  *
1212  * Frees a tunnel. The tunnel does not need to be deactivated.
1213  */
1214 void tb_tunnel_free(struct tb_tunnel *tunnel)
1215 {
1216 	int i;
1217 
1218 	if (!tunnel)
1219 		return;
1220 
1221 	for (i = 0; i < tunnel->npaths; i++) {
1222 		if (tunnel->paths[i])
1223 			tb_path_free(tunnel->paths[i]);
1224 	}
1225 
1226 	kfree(tunnel->paths);
1227 	kfree(tunnel);
1228 }
1229 
1230 /**
1231  * tb_tunnel_is_invalid - check whether an activated path is still valid
1232  * @tunnel: Tunnel to check
1233  */
1234 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1235 {
1236 	int i;
1237 
1238 	for (i = 0; i < tunnel->npaths; i++) {
1239 		WARN_ON(!tunnel->paths[i]->activated);
1240 		if (tb_path_is_invalid(tunnel->paths[i]))
1241 			return true;
1242 	}
1243 
1244 	return false;
1245 }
1246 
1247 /**
1248  * tb_tunnel_restart() - activate a tunnel after a hardware reset
1249  * @tunnel: Tunnel to restart
1250  *
1251  * Return: 0 on success and negative errno in case if failure
1252  */
1253 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1254 {
1255 	int res, i;
1256 
1257 	tb_tunnel_dbg(tunnel, "activating\n");
1258 
1259 	/*
1260 	 * Make sure all paths are properly disabled before enabling
1261 	 * them again.
1262 	 */
1263 	for (i = 0; i < tunnel->npaths; i++) {
1264 		if (tunnel->paths[i]->activated) {
1265 			tb_path_deactivate(tunnel->paths[i]);
1266 			tunnel->paths[i]->activated = false;
1267 		}
1268 	}
1269 
1270 	if (tunnel->init) {
1271 		res = tunnel->init(tunnel);
1272 		if (res)
1273 			return res;
1274 	}
1275 
1276 	for (i = 0; i < tunnel->npaths; i++) {
1277 		res = tb_path_activate(tunnel->paths[i]);
1278 		if (res)
1279 			goto err;
1280 	}
1281 
1282 	if (tunnel->activate) {
1283 		res = tunnel->activate(tunnel, true);
1284 		if (res)
1285 			goto err;
1286 	}
1287 
1288 	return 0;
1289 
1290 err:
1291 	tb_tunnel_warn(tunnel, "activation failed\n");
1292 	tb_tunnel_deactivate(tunnel);
1293 	return res;
1294 }
1295 
1296 /**
1297  * tb_tunnel_activate() - activate a tunnel
1298  * @tunnel: Tunnel to activate
1299  *
1300  * Return: Returns 0 on success or an error code on failure.
1301  */
1302 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1303 {
1304 	int i;
1305 
1306 	for (i = 0; i < tunnel->npaths; i++) {
1307 		if (tunnel->paths[i]->activated) {
1308 			tb_tunnel_WARN(tunnel,
1309 				       "trying to activate an already activated tunnel\n");
1310 			return -EINVAL;
1311 		}
1312 	}
1313 
1314 	return tb_tunnel_restart(tunnel);
1315 }
1316 
1317 /**
1318  * tb_tunnel_deactivate() - deactivate a tunnel
1319  * @tunnel: Tunnel to deactivate
1320  */
1321 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1322 {
1323 	int i;
1324 
1325 	tb_tunnel_dbg(tunnel, "deactivating\n");
1326 
1327 	if (tunnel->activate)
1328 		tunnel->activate(tunnel, false);
1329 
1330 	for (i = 0; i < tunnel->npaths; i++) {
1331 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
1332 			tb_path_deactivate(tunnel->paths[i]);
1333 	}
1334 }
1335 
1336 /**
1337  * tb_tunnel_port_on_path() - Does the tunnel go through port
1338  * @tunnel: Tunnel to check
1339  * @port: Port to check
1340  *
1341  * Returns true if @tunnel goes through @port (direction does not matter),
1342  * false otherwise.
1343  */
1344 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1345 			    const struct tb_port *port)
1346 {
1347 	int i;
1348 
1349 	for (i = 0; i < tunnel->npaths; i++) {
1350 		if (!tunnel->paths[i])
1351 			continue;
1352 
1353 		if (tb_path_port_on_path(tunnel->paths[i], port))
1354 			return true;
1355 	}
1356 
1357 	return false;
1358 }
1359 
1360 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1361 {
1362 	int i;
1363 
1364 	for (i = 0; i < tunnel->npaths; i++) {
1365 		if (!tunnel->paths[i])
1366 			return false;
1367 		if (!tunnel->paths[i]->activated)
1368 			return false;
1369 	}
1370 
1371 	return true;
1372 }
1373 
1374 /**
1375  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1376  * @tunnel: Tunnel to check
1377  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1378  *		 Can be %NULL.
1379  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1380  *		   Can be %NULL.
1381  *
1382  * Stores the amount of isochronous bandwidth @tunnel consumes in
1383  * @consumed_up and @consumed_down. In case of success returns %0,
1384  * negative errno otherwise.
1385  */
1386 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1387 				 int *consumed_down)
1388 {
1389 	int up_bw = 0, down_bw = 0;
1390 
1391 	if (!tb_tunnel_is_active(tunnel))
1392 		goto out;
1393 
1394 	if (tunnel->consumed_bandwidth) {
1395 		int ret;
1396 
1397 		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1398 		if (ret)
1399 			return ret;
1400 
1401 		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1402 			      down_bw);
1403 	}
1404 
1405 out:
1406 	if (consumed_up)
1407 		*consumed_up = up_bw;
1408 	if (consumed_down)
1409 		*consumed_down = down_bw;
1410 
1411 	return 0;
1412 }
1413 
1414 /**
1415  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1416  * @tunnel: Tunnel whose unused bandwidth to release
1417  *
1418  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1419  * moment) this function makes it to release all the unused bandwidth.
1420  *
1421  * Returns %0 in case of success and negative errno otherwise.
1422  */
1423 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1424 {
1425 	if (!tb_tunnel_is_active(tunnel))
1426 		return 0;
1427 
1428 	if (tunnel->release_unused_bandwidth) {
1429 		int ret;
1430 
1431 		ret = tunnel->release_unused_bandwidth(tunnel);
1432 		if (ret)
1433 			return ret;
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 /**
1440  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1441  * @tunnel: Tunnel reclaiming available bandwidth
1442  * @available_up: Available upstream bandwidth (in Mb/s)
1443  * @available_down: Available downstream bandwidth (in Mb/s)
1444  *
1445  * Reclaims bandwidth from @available_up and @available_down and updates
1446  * the variables accordingly (e.g decreases both according to what was
1447  * reclaimed by the tunnel). If nothing was reclaimed the values are
1448  * kept as is.
1449  */
1450 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1451 					   int *available_up,
1452 					   int *available_down)
1453 {
1454 	if (!tb_tunnel_is_active(tunnel))
1455 		return;
1456 
1457 	if (tunnel->reclaim_available_bandwidth)
1458 		tunnel->reclaim_available_bandwidth(tunnel, available_up,
1459 						    available_down);
1460 }
1461