xref: /openbmc/linux/drivers/thunderbolt/tunnel.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 
13 #include "tunnel.h"
14 #include "tb.h"
15 
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID			8
18 
19 #define TB_PCI_PATH_DOWN		0
20 #define TB_PCI_PATH_UP			1
21 
22 /* DP adapters use HopID 8 for AUX and 9 for Video */
23 #define TB_DP_AUX_TX_HOPID		8
24 #define TB_DP_AUX_RX_HOPID		8
25 #define TB_DP_VIDEO_HOPID		9
26 
27 #define TB_DP_VIDEO_PATH_OUT		0
28 #define TB_DP_AUX_PATH_OUT		1
29 #define TB_DP_AUX_PATH_IN		2
30 
31 #define TB_DMA_PATH_OUT			0
32 #define TB_DMA_PATH_IN			1
33 
34 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
35 
36 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
37 	do {                                                            \
38 		struct tb_tunnel *__tunnel = (tunnel);                  \
39 		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
40 		      tb_route(__tunnel->src_port->sw),                 \
41 		      __tunnel->src_port->port,                         \
42 		      tb_route(__tunnel->dst_port->sw),                 \
43 		      __tunnel->dst_port->port,                         \
44 		      tb_tunnel_names[__tunnel->type],			\
45 		      ## arg);                                          \
46 	} while (0)
47 
48 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
49 	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
50 #define tb_tunnel_warn(tunnel, fmt, arg...) \
51 	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
52 #define tb_tunnel_info(tunnel, fmt, arg...) \
53 	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
54 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
55 	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
56 
57 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
58 					 enum tb_tunnel_type type)
59 {
60 	struct tb_tunnel *tunnel;
61 
62 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
63 	if (!tunnel)
64 		return NULL;
65 
66 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
67 	if (!tunnel->paths) {
68 		tb_tunnel_free(tunnel);
69 		return NULL;
70 	}
71 
72 	INIT_LIST_HEAD(&tunnel->list);
73 	tunnel->tb = tb;
74 	tunnel->npaths = npaths;
75 	tunnel->type = type;
76 
77 	return tunnel;
78 }
79 
80 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
81 {
82 	int res;
83 
84 	res = tb_pci_port_enable(tunnel->src_port, activate);
85 	if (res)
86 		return res;
87 
88 	if (tb_port_is_pcie_up(tunnel->dst_port))
89 		return tb_pci_port_enable(tunnel->dst_port, activate);
90 
91 	return 0;
92 }
93 
94 static int tb_initial_credits(const struct tb_switch *sw)
95 {
96 	/* If the path is complete sw is not NULL */
97 	if (sw) {
98 		/* More credits for faster link */
99 		switch (sw->link_speed * sw->link_width) {
100 		case 40:
101 			return 32;
102 		case 20:
103 			return 24;
104 		}
105 	}
106 
107 	return 16;
108 }
109 
110 static void tb_pci_init_path(struct tb_path *path)
111 {
112 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
113 	path->egress_shared_buffer = TB_PATH_NONE;
114 	path->ingress_fc_enable = TB_PATH_ALL;
115 	path->ingress_shared_buffer = TB_PATH_NONE;
116 	path->priority = 3;
117 	path->weight = 1;
118 	path->drop_packages = 0;
119 	path->nfc_credits = 0;
120 	path->hops[0].initial_credits = 7;
121 	path->hops[1].initial_credits =
122 		tb_initial_credits(path->hops[1].in_port->sw);
123 }
124 
125 /**
126  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
127  * @tb: Pointer to the domain structure
128  * @down: PCIe downstream adapter
129  *
130  * If @down adapter is active, follows the tunnel to the PCIe upstream
131  * adapter and back. Returns the discovered tunnel or %NULL if there was
132  * no tunnel.
133  */
134 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
135 {
136 	struct tb_tunnel *tunnel;
137 	struct tb_path *path;
138 
139 	if (!tb_pci_port_is_enabled(down))
140 		return NULL;
141 
142 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
143 	if (!tunnel)
144 		return NULL;
145 
146 	tunnel->activate = tb_pci_activate;
147 	tunnel->src_port = down;
148 
149 	/*
150 	 * Discover both paths even if they are not complete. We will
151 	 * clean them up by calling tb_tunnel_deactivate() below in that
152 	 * case.
153 	 */
154 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
155 				&tunnel->dst_port, "PCIe Up");
156 	if (!path) {
157 		/* Just disable the downstream port */
158 		tb_pci_port_enable(down, false);
159 		goto err_free;
160 	}
161 	tunnel->paths[TB_PCI_PATH_UP] = path;
162 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
163 
164 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
165 				"PCIe Down");
166 	if (!path)
167 		goto err_deactivate;
168 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
169 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
170 
171 	/* Validate that the tunnel is complete */
172 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
173 		tb_port_warn(tunnel->dst_port,
174 			     "path does not end on a PCIe adapter, cleaning up\n");
175 		goto err_deactivate;
176 	}
177 
178 	if (down != tunnel->src_port) {
179 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
180 		goto err_deactivate;
181 	}
182 
183 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
184 		tb_tunnel_warn(tunnel,
185 			       "tunnel is not fully activated, cleaning up\n");
186 		goto err_deactivate;
187 	}
188 
189 	tb_tunnel_dbg(tunnel, "discovered\n");
190 	return tunnel;
191 
192 err_deactivate:
193 	tb_tunnel_deactivate(tunnel);
194 err_free:
195 	tb_tunnel_free(tunnel);
196 
197 	return NULL;
198 }
199 
200 /**
201  * tb_tunnel_alloc_pci() - allocate a pci tunnel
202  * @tb: Pointer to the domain structure
203  * @up: PCIe upstream adapter port
204  * @down: PCIe downstream adapter port
205  *
206  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
207  * TB_TYPE_PCIE_DOWN.
208  *
209  * Return: Returns a tb_tunnel on success or NULL on failure.
210  */
211 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
212 				      struct tb_port *down)
213 {
214 	struct tb_tunnel *tunnel;
215 	struct tb_path *path;
216 
217 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
218 	if (!tunnel)
219 		return NULL;
220 
221 	tunnel->activate = tb_pci_activate;
222 	tunnel->src_port = down;
223 	tunnel->dst_port = up;
224 
225 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
226 			     "PCIe Down");
227 	if (!path) {
228 		tb_tunnel_free(tunnel);
229 		return NULL;
230 	}
231 	tb_pci_init_path(path);
232 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
233 
234 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
235 			     "PCIe Up");
236 	if (!path) {
237 		tb_tunnel_free(tunnel);
238 		return NULL;
239 	}
240 	tb_pci_init_path(path);
241 	tunnel->paths[TB_PCI_PATH_UP] = path;
242 
243 	return tunnel;
244 }
245 
246 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
247 {
248 	int timeout = 10;
249 	u32 val;
250 	int ret;
251 
252 	/* Both ends need to support this */
253 	if (!tb_switch_is_titan_ridge(in->sw) ||
254 	    !tb_switch_is_titan_ridge(out->sw))
255 		return 0;
256 
257 	ret = tb_port_read(out, &val, TB_CFG_PORT,
258 			   out->cap_adap + DP_STATUS_CTRL, 1);
259 	if (ret)
260 		return ret;
261 
262 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
263 
264 	ret = tb_port_write(out, &val, TB_CFG_PORT,
265 			    out->cap_adap + DP_STATUS_CTRL, 1);
266 	if (ret)
267 		return ret;
268 
269 	do {
270 		ret = tb_port_read(out, &val, TB_CFG_PORT,
271 				   out->cap_adap + DP_STATUS_CTRL, 1);
272 		if (ret)
273 			return ret;
274 		if (!(val & DP_STATUS_CTRL_CMHS))
275 			return 0;
276 		usleep_range(10, 100);
277 	} while (timeout--);
278 
279 	return -ETIMEDOUT;
280 }
281 
282 static inline u32 tb_dp_cap_get_rate(u32 val)
283 {
284 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
285 
286 	switch (rate) {
287 	case DP_COMMON_CAP_RATE_RBR:
288 		return 1620;
289 	case DP_COMMON_CAP_RATE_HBR:
290 		return 2700;
291 	case DP_COMMON_CAP_RATE_HBR2:
292 		return 5400;
293 	case DP_COMMON_CAP_RATE_HBR3:
294 		return 8100;
295 	default:
296 		return 0;
297 	}
298 }
299 
300 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
301 {
302 	val &= ~DP_COMMON_CAP_RATE_MASK;
303 	switch (rate) {
304 	default:
305 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
306 		/* Fallthrough */
307 	case 1620:
308 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
309 		break;
310 	case 2700:
311 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
312 		break;
313 	case 5400:
314 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
315 		break;
316 	case 8100:
317 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
318 		break;
319 	}
320 	return val;
321 }
322 
323 static inline u32 tb_dp_cap_get_lanes(u32 val)
324 {
325 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
326 
327 	switch (lanes) {
328 	case DP_COMMON_CAP_1_LANE:
329 		return 1;
330 	case DP_COMMON_CAP_2_LANES:
331 		return 2;
332 	case DP_COMMON_CAP_4_LANES:
333 		return 4;
334 	default:
335 		return 0;
336 	}
337 }
338 
339 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
340 {
341 	val &= ~DP_COMMON_CAP_LANES_MASK;
342 	switch (lanes) {
343 	default:
344 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
345 		     lanes);
346 		/* Fallthrough */
347 	case 1:
348 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
349 		break;
350 	case 2:
351 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
352 		break;
353 	case 4:
354 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
355 		break;
356 	}
357 	return val;
358 }
359 
360 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
361 {
362 	/* Tunneling removes the DP 8b/10b encoding */
363 	return rate * lanes * 8 / 10;
364 }
365 
366 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
367 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
368 				  u32 *new_lanes)
369 {
370 	static const u32 dp_bw[][2] = {
371 		/* Mb/s, lanes */
372 		{ 8100, 4 }, /* 25920 Mb/s */
373 		{ 5400, 4 }, /* 17280 Mb/s */
374 		{ 8100, 2 }, /* 12960 Mb/s */
375 		{ 2700, 4 }, /* 8640 Mb/s */
376 		{ 5400, 2 }, /* 8640 Mb/s */
377 		{ 8100, 1 }, /* 6480 Mb/s */
378 		{ 1620, 4 }, /* 5184 Mb/s */
379 		{ 5400, 1 }, /* 4320 Mb/s */
380 		{ 2700, 2 }, /* 4320 Mb/s */
381 		{ 1620, 2 }, /* 2592 Mb/s */
382 		{ 2700, 1 }, /* 2160 Mb/s */
383 		{ 1620, 1 }, /* 1296 Mb/s */
384 	};
385 	unsigned int i;
386 
387 	/*
388 	 * Find a combination that can fit into max_bw and does not
389 	 * exceed the maximum rate and lanes supported by the DP OUT and
390 	 * DP IN adapters.
391 	 */
392 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
393 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
394 			continue;
395 
396 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
397 			continue;
398 
399 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
400 			*new_rate = dp_bw[i][0];
401 			*new_lanes = dp_bw[i][1];
402 			return 0;
403 		}
404 	}
405 
406 	return -ENOSR;
407 }
408 
409 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
410 {
411 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
412 	struct tb_port *out = tunnel->dst_port;
413 	struct tb_port *in = tunnel->src_port;
414 	int ret;
415 
416 	/*
417 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
418 	 * newer generation hardware.
419 	 */
420 	if (in->sw->generation < 2 || out->sw->generation < 2)
421 		return 0;
422 
423 	/*
424 	 * Perform connection manager handshake between IN and OUT ports
425 	 * before capabilities exchange can take place.
426 	 */
427 	ret = tb_dp_cm_handshake(in, out);
428 	if (ret)
429 		return ret;
430 
431 	/* Read both DP_LOCAL_CAP registers */
432 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
433 			   in->cap_adap + DP_LOCAL_CAP, 1);
434 	if (ret)
435 		return ret;
436 
437 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
438 			   out->cap_adap + DP_LOCAL_CAP, 1);
439 	if (ret)
440 		return ret;
441 
442 	/* Write IN local caps to OUT remote caps */
443 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
444 			    out->cap_adap + DP_REMOTE_CAP, 1);
445 	if (ret)
446 		return ret;
447 
448 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
449 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
450 	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
451 		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
452 
453 	/*
454 	 * If the tunnel bandwidth is limited (max_bw is set) then see
455 	 * if we need to reduce bandwidth to fit there.
456 	 */
457 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
458 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
459 	bw = tb_dp_bandwidth(out_rate, out_lanes);
460 	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
461 		    out_rate, out_lanes, bw);
462 
463 	if (tunnel->max_bw && bw > tunnel->max_bw) {
464 		u32 new_rate, new_lanes, new_bw;
465 
466 		ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
467 					     out_rate, out_lanes, &new_rate,
468 					     &new_lanes);
469 		if (ret) {
470 			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
471 			return ret;
472 		}
473 
474 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
475 		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
476 			    new_rate, new_lanes, new_bw);
477 
478 		/*
479 		 * Set new rate and number of lanes before writing it to
480 		 * the IN port remote caps.
481 		 */
482 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
483 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
484 	}
485 
486 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
487 			     in->cap_adap + DP_REMOTE_CAP, 1);
488 }
489 
490 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
491 {
492 	int ret;
493 
494 	if (active) {
495 		struct tb_path **paths;
496 		int last;
497 
498 		paths = tunnel->paths;
499 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
500 
501 		tb_dp_port_set_hops(tunnel->src_port,
502 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
503 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
504 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
505 
506 		tb_dp_port_set_hops(tunnel->dst_port,
507 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
508 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
509 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
510 	} else {
511 		tb_dp_port_hpd_clear(tunnel->src_port);
512 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
513 		if (tb_port_is_dpout(tunnel->dst_port))
514 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
515 	}
516 
517 	ret = tb_dp_port_enable(tunnel->src_port, active);
518 	if (ret)
519 		return ret;
520 
521 	if (tb_port_is_dpout(tunnel->dst_port))
522 		return tb_dp_port_enable(tunnel->dst_port, active);
523 
524 	return 0;
525 }
526 
527 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
528 {
529 	struct tb_port *in = tunnel->src_port;
530 	const struct tb_switch *sw = in->sw;
531 	u32 val, rate = 0, lanes = 0;
532 	int ret;
533 
534 	if (tb_switch_is_titan_ridge(sw)) {
535 		int timeout = 10;
536 
537 		/*
538 		 * Wait for DPRX done. Normally it should be already set
539 		 * for active tunnel.
540 		 */
541 		do {
542 			ret = tb_port_read(in, &val, TB_CFG_PORT,
543 					   in->cap_adap + DP_COMMON_CAP, 1);
544 			if (ret)
545 				return ret;
546 
547 			if (val & DP_COMMON_CAP_DPRX_DONE) {
548 				rate = tb_dp_cap_get_rate(val);
549 				lanes = tb_dp_cap_get_lanes(val);
550 				break;
551 			}
552 			msleep(250);
553 		} while (timeout--);
554 
555 		if (!timeout)
556 			return -ETIMEDOUT;
557 	} else if (sw->generation >= 2) {
558 		/*
559 		 * Read from the copied remote cap so that we take into
560 		 * account if capabilities were reduced during exchange.
561 		 */
562 		ret = tb_port_read(in, &val, TB_CFG_PORT,
563 				   in->cap_adap + DP_REMOTE_CAP, 1);
564 		if (ret)
565 			return ret;
566 
567 		rate = tb_dp_cap_get_rate(val);
568 		lanes = tb_dp_cap_get_lanes(val);
569 	} else {
570 		/* No bandwidth management for legacy devices  */
571 		return 0;
572 	}
573 
574 	return tb_dp_bandwidth(rate, lanes);
575 }
576 
577 static void tb_dp_init_aux_path(struct tb_path *path)
578 {
579 	int i;
580 
581 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
582 	path->egress_shared_buffer = TB_PATH_NONE;
583 	path->ingress_fc_enable = TB_PATH_ALL;
584 	path->ingress_shared_buffer = TB_PATH_NONE;
585 	path->priority = 2;
586 	path->weight = 1;
587 
588 	for (i = 0; i < path->path_length; i++)
589 		path->hops[i].initial_credits = 1;
590 }
591 
592 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
593 {
594 	u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
595 
596 	path->egress_fc_enable = TB_PATH_NONE;
597 	path->egress_shared_buffer = TB_PATH_NONE;
598 	path->ingress_fc_enable = TB_PATH_NONE;
599 	path->ingress_shared_buffer = TB_PATH_NONE;
600 	path->priority = 1;
601 	path->weight = 1;
602 
603 	if (discover) {
604 		path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
605 	} else {
606 		u32 max_credits;
607 
608 		max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
609 			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
610 		/* Leave some credits for AUX path */
611 		path->nfc_credits = min(max_credits - 2, 12U);
612 	}
613 }
614 
615 /**
616  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
617  * @tb: Pointer to the domain structure
618  * @in: DP in adapter
619  *
620  * If @in adapter is active, follows the tunnel to the DP out adapter
621  * and back. Returns the discovered tunnel or %NULL if there was no
622  * tunnel.
623  *
624  * Return: DP tunnel or %NULL if no tunnel found.
625  */
626 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
627 {
628 	struct tb_tunnel *tunnel;
629 	struct tb_port *port;
630 	struct tb_path *path;
631 
632 	if (!tb_dp_port_is_enabled(in))
633 		return NULL;
634 
635 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
636 	if (!tunnel)
637 		return NULL;
638 
639 	tunnel->init = tb_dp_xchg_caps;
640 	tunnel->activate = tb_dp_activate;
641 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
642 	tunnel->src_port = in;
643 
644 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
645 				&tunnel->dst_port, "Video");
646 	if (!path) {
647 		/* Just disable the DP IN port */
648 		tb_dp_port_enable(in, false);
649 		goto err_free;
650 	}
651 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
652 	tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
653 
654 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
655 	if (!path)
656 		goto err_deactivate;
657 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
658 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
659 
660 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
661 				&port, "AUX RX");
662 	if (!path)
663 		goto err_deactivate;
664 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
665 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
666 
667 	/* Validate that the tunnel is complete */
668 	if (!tb_port_is_dpout(tunnel->dst_port)) {
669 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
670 		goto err_deactivate;
671 	}
672 
673 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
674 		goto err_deactivate;
675 
676 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
677 		goto err_deactivate;
678 
679 	if (port != tunnel->src_port) {
680 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
681 		goto err_deactivate;
682 	}
683 
684 	tb_tunnel_dbg(tunnel, "discovered\n");
685 	return tunnel;
686 
687 err_deactivate:
688 	tb_tunnel_deactivate(tunnel);
689 err_free:
690 	tb_tunnel_free(tunnel);
691 
692 	return NULL;
693 }
694 
695 /**
696  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
697  * @tb: Pointer to the domain structure
698  * @in: DP in adapter port
699  * @out: DP out adapter port
700  * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
701  *
702  * Allocates a tunnel between @in and @out that is capable of tunneling
703  * Display Port traffic.
704  *
705  * Return: Returns a tb_tunnel on success or NULL on failure.
706  */
707 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
708 				     struct tb_port *out, int max_bw)
709 {
710 	struct tb_tunnel *tunnel;
711 	struct tb_path **paths;
712 	struct tb_path *path;
713 
714 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
715 		return NULL;
716 
717 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
718 	if (!tunnel)
719 		return NULL;
720 
721 	tunnel->init = tb_dp_xchg_caps;
722 	tunnel->activate = tb_dp_activate;
723 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
724 	tunnel->src_port = in;
725 	tunnel->dst_port = out;
726 	tunnel->max_bw = max_bw;
727 
728 	paths = tunnel->paths;
729 
730 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
731 			     1, "Video");
732 	if (!path)
733 		goto err_free;
734 	tb_dp_init_video_path(path, false);
735 	paths[TB_DP_VIDEO_PATH_OUT] = path;
736 
737 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
738 			     TB_DP_AUX_TX_HOPID, 1, "AUX TX");
739 	if (!path)
740 		goto err_free;
741 	tb_dp_init_aux_path(path);
742 	paths[TB_DP_AUX_PATH_OUT] = path;
743 
744 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
745 			     TB_DP_AUX_RX_HOPID, 1, "AUX RX");
746 	if (!path)
747 		goto err_free;
748 	tb_dp_init_aux_path(path);
749 	paths[TB_DP_AUX_PATH_IN] = path;
750 
751 	return tunnel;
752 
753 err_free:
754 	tb_tunnel_free(tunnel);
755 	return NULL;
756 }
757 
758 static u32 tb_dma_credits(struct tb_port *nhi)
759 {
760 	u32 max_credits;
761 
762 	max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
763 		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
764 	return min(max_credits, 13U);
765 }
766 
767 static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
768 {
769 	struct tb_port *nhi = tunnel->src_port;
770 	u32 credits;
771 
772 	credits = active ? tb_dma_credits(nhi) : 0;
773 	return tb_port_set_initial_credits(nhi, credits);
774 }
775 
776 static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
777 			     unsigned int efc, u32 credits)
778 {
779 	int i;
780 
781 	path->egress_fc_enable = efc;
782 	path->ingress_fc_enable = TB_PATH_ALL;
783 	path->egress_shared_buffer = TB_PATH_NONE;
784 	path->ingress_shared_buffer = isb;
785 	path->priority = 5;
786 	path->weight = 1;
787 	path->clear_fc = true;
788 
789 	for (i = 0; i < path->path_length; i++)
790 		path->hops[i].initial_credits = credits;
791 }
792 
793 /**
794  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
795  * @tb: Pointer to the domain structure
796  * @nhi: Host controller port
797  * @dst: Destination null port which the other domain is connected to
798  * @transmit_ring: NHI ring number used to send packets towards the
799  *		   other domain
800  * @transmit_path: HopID used for transmitting packets
801  * @receive_ring: NHI ring number used to receive packets from the
802  *		  other domain
803  * @reveive_path: HopID used for receiving packets
804  *
805  * Return: Returns a tb_tunnel on success or NULL on failure.
806  */
807 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
808 				      struct tb_port *dst, int transmit_ring,
809 				      int transmit_path, int receive_ring,
810 				      int receive_path)
811 {
812 	struct tb_tunnel *tunnel;
813 	struct tb_path *path;
814 	u32 credits;
815 
816 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
817 	if (!tunnel)
818 		return NULL;
819 
820 	tunnel->activate = tb_dma_activate;
821 	tunnel->src_port = nhi;
822 	tunnel->dst_port = dst;
823 
824 	credits = tb_dma_credits(nhi);
825 
826 	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
827 	if (!path) {
828 		tb_tunnel_free(tunnel);
829 		return NULL;
830 	}
831 	tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
832 			 credits);
833 	tunnel->paths[TB_DMA_PATH_IN] = path;
834 
835 	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
836 	if (!path) {
837 		tb_tunnel_free(tunnel);
838 		return NULL;
839 	}
840 	tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
841 	tunnel->paths[TB_DMA_PATH_OUT] = path;
842 
843 	return tunnel;
844 }
845 
846 /**
847  * tb_tunnel_free() - free a tunnel
848  * @tunnel: Tunnel to be freed
849  *
850  * Frees a tunnel. The tunnel does not need to be deactivated.
851  */
852 void tb_tunnel_free(struct tb_tunnel *tunnel)
853 {
854 	int i;
855 
856 	if (!tunnel)
857 		return;
858 
859 	for (i = 0; i < tunnel->npaths; i++) {
860 		if (tunnel->paths[i])
861 			tb_path_free(tunnel->paths[i]);
862 	}
863 
864 	kfree(tunnel->paths);
865 	kfree(tunnel);
866 }
867 
868 /**
869  * tb_tunnel_is_invalid - check whether an activated path is still valid
870  * @tunnel: Tunnel to check
871  */
872 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
873 {
874 	int i;
875 
876 	for (i = 0; i < tunnel->npaths; i++) {
877 		WARN_ON(!tunnel->paths[i]->activated);
878 		if (tb_path_is_invalid(tunnel->paths[i]))
879 			return true;
880 	}
881 
882 	return false;
883 }
884 
885 /**
886  * tb_tunnel_restart() - activate a tunnel after a hardware reset
887  * @tunnel: Tunnel to restart
888  *
889  * Return: 0 on success and negative errno in case if failure
890  */
891 int tb_tunnel_restart(struct tb_tunnel *tunnel)
892 {
893 	int res, i;
894 
895 	tb_tunnel_dbg(tunnel, "activating\n");
896 
897 	/*
898 	 * Make sure all paths are properly disabled before enabling
899 	 * them again.
900 	 */
901 	for (i = 0; i < tunnel->npaths; i++) {
902 		if (tunnel->paths[i]->activated) {
903 			tb_path_deactivate(tunnel->paths[i]);
904 			tunnel->paths[i]->activated = false;
905 		}
906 	}
907 
908 	if (tunnel->init) {
909 		res = tunnel->init(tunnel);
910 		if (res)
911 			return res;
912 	}
913 
914 	for (i = 0; i < tunnel->npaths; i++) {
915 		res = tb_path_activate(tunnel->paths[i]);
916 		if (res)
917 			goto err;
918 	}
919 
920 	if (tunnel->activate) {
921 		res = tunnel->activate(tunnel, true);
922 		if (res)
923 			goto err;
924 	}
925 
926 	return 0;
927 
928 err:
929 	tb_tunnel_warn(tunnel, "activation failed\n");
930 	tb_tunnel_deactivate(tunnel);
931 	return res;
932 }
933 
934 /**
935  * tb_tunnel_activate() - activate a tunnel
936  * @tunnel: Tunnel to activate
937  *
938  * Return: Returns 0 on success or an error code on failure.
939  */
940 int tb_tunnel_activate(struct tb_tunnel *tunnel)
941 {
942 	int i;
943 
944 	for (i = 0; i < tunnel->npaths; i++) {
945 		if (tunnel->paths[i]->activated) {
946 			tb_tunnel_WARN(tunnel,
947 				       "trying to activate an already activated tunnel\n");
948 			return -EINVAL;
949 		}
950 	}
951 
952 	return tb_tunnel_restart(tunnel);
953 }
954 
955 /**
956  * tb_tunnel_deactivate() - deactivate a tunnel
957  * @tunnel: Tunnel to deactivate
958  */
959 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
960 {
961 	int i;
962 
963 	tb_tunnel_dbg(tunnel, "deactivating\n");
964 
965 	if (tunnel->activate)
966 		tunnel->activate(tunnel, false);
967 
968 	for (i = 0; i < tunnel->npaths; i++) {
969 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
970 			tb_path_deactivate(tunnel->paths[i]);
971 	}
972 }
973 
974 /**
975  * tb_tunnel_switch_on_path() - Does the tunnel go through switch
976  * @tunnel: Tunnel to check
977  * @sw: Switch to check
978  *
979  * Returns true if @tunnel goes through @sw (direction does not matter),
980  * false otherwise.
981  */
982 bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
983 			      const struct tb_switch *sw)
984 {
985 	int i;
986 
987 	for (i = 0; i < tunnel->npaths; i++) {
988 		if (!tunnel->paths[i])
989 			continue;
990 		if (tb_path_switch_on_path(tunnel->paths[i], sw))
991 			return true;
992 	}
993 
994 	return false;
995 }
996 
997 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
998 {
999 	int i;
1000 
1001 	for (i = 0; i < tunnel->npaths; i++) {
1002 		if (!tunnel->paths[i])
1003 			return false;
1004 		if (!tunnel->paths[i]->activated)
1005 			return false;
1006 	}
1007 
1008 	return true;
1009 }
1010 
1011 /**
1012  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1013  * @tunnel: Tunnel to check
1014  *
1015  * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
1016  * is not active or does consume bandwidth.
1017  */
1018 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
1019 {
1020 	if (!tb_tunnel_is_active(tunnel))
1021 		return 0;
1022 
1023 	if (tunnel->consumed_bandwidth) {
1024 		int ret = tunnel->consumed_bandwidth(tunnel);
1025 
1026 		tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);
1027 		return ret;
1028 	}
1029 
1030 	return 0;
1031 }
1032