xref: /openbmc/linux/drivers/thunderbolt/test.c (revision f1432cd2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KUnit tests
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7  */
8 
9 #include <kunit/test.h>
10 #include <linux/idr.h>
11 
12 #include "tb.h"
13 #include "tunnel.h"
14 
15 static int __ida_init(struct kunit_resource *res, void *context)
16 {
17 	struct ida *ida = context;
18 
19 	ida_init(ida);
20 	res->data = ida;
21 	return 0;
22 }
23 
24 static void __ida_destroy(struct kunit_resource *res)
25 {
26 	struct ida *ida = res->data;
27 
28 	ida_destroy(ida);
29 }
30 
31 static void kunit_ida_init(struct kunit *test, struct ida *ida)
32 {
33 	kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
34 }
35 
36 static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 				      u8 upstream_port, u8 max_port_number)
38 {
39 	struct tb_switch *sw;
40 	size_t size;
41 	int i;
42 
43 	sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
44 	if (!sw)
45 		return NULL;
46 
47 	sw->config.upstream_port_number = upstream_port;
48 	sw->config.depth = tb_route_length(route);
49 	sw->config.route_hi = upper_32_bits(route);
50 	sw->config.route_lo = lower_32_bits(route);
51 	sw->config.enabled = 0;
52 	sw->config.max_port_number = max_port_number;
53 
54 	size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 	sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
56 	if (!sw->ports)
57 		return NULL;
58 
59 	for (i = 0; i <= sw->config.max_port_number; i++) {
60 		sw->ports[i].sw = sw;
61 		sw->ports[i].port = i;
62 		sw->ports[i].config.port_number = i;
63 		if (i) {
64 			kunit_ida_init(test, &sw->ports[i].in_hopids);
65 			kunit_ida_init(test, &sw->ports[i].out_hopids);
66 		}
67 	}
68 
69 	return sw;
70 }
71 
72 static struct tb_switch *alloc_host(struct kunit *test)
73 {
74 	struct tb_switch *sw;
75 
76 	sw = alloc_switch(test, 0, 7, 13);
77 	if (!sw)
78 		return NULL;
79 
80 	sw->config.vendor_id = 0x8086;
81 	sw->config.device_id = 0x9a1b;
82 
83 	sw->ports[0].config.type = TB_TYPE_PORT;
84 	sw->ports[0].config.max_in_hop_id = 7;
85 	sw->ports[0].config.max_out_hop_id = 7;
86 
87 	sw->ports[1].config.type = TB_TYPE_PORT;
88 	sw->ports[1].config.max_in_hop_id = 19;
89 	sw->ports[1].config.max_out_hop_id = 19;
90 	sw->ports[1].total_credits = 60;
91 	sw->ports[1].ctl_credits = 2;
92 	sw->ports[1].dual_link_port = &sw->ports[2];
93 
94 	sw->ports[2].config.type = TB_TYPE_PORT;
95 	sw->ports[2].config.max_in_hop_id = 19;
96 	sw->ports[2].config.max_out_hop_id = 19;
97 	sw->ports[2].total_credits = 60;
98 	sw->ports[2].ctl_credits = 2;
99 	sw->ports[2].dual_link_port = &sw->ports[1];
100 	sw->ports[2].link_nr = 1;
101 
102 	sw->ports[3].config.type = TB_TYPE_PORT;
103 	sw->ports[3].config.max_in_hop_id = 19;
104 	sw->ports[3].config.max_out_hop_id = 19;
105 	sw->ports[3].total_credits = 60;
106 	sw->ports[3].ctl_credits = 2;
107 	sw->ports[3].dual_link_port = &sw->ports[4];
108 
109 	sw->ports[4].config.type = TB_TYPE_PORT;
110 	sw->ports[4].config.max_in_hop_id = 19;
111 	sw->ports[4].config.max_out_hop_id = 19;
112 	sw->ports[4].total_credits = 60;
113 	sw->ports[4].ctl_credits = 2;
114 	sw->ports[4].dual_link_port = &sw->ports[3];
115 	sw->ports[4].link_nr = 1;
116 
117 	sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
118 	sw->ports[5].config.max_in_hop_id = 9;
119 	sw->ports[5].config.max_out_hop_id = 9;
120 	sw->ports[5].cap_adap = -1;
121 
122 	sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
123 	sw->ports[6].config.max_in_hop_id = 9;
124 	sw->ports[6].config.max_out_hop_id = 9;
125 	sw->ports[6].cap_adap = -1;
126 
127 	sw->ports[7].config.type = TB_TYPE_NHI;
128 	sw->ports[7].config.max_in_hop_id = 11;
129 	sw->ports[7].config.max_out_hop_id = 11;
130 	sw->ports[7].config.nfc_credits = 0x41800000;
131 
132 	sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
133 	sw->ports[8].config.max_in_hop_id = 8;
134 	sw->ports[8].config.max_out_hop_id = 8;
135 
136 	sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
137 	sw->ports[9].config.max_in_hop_id = 8;
138 	sw->ports[9].config.max_out_hop_id = 8;
139 
140 	sw->ports[10].disabled = true;
141 	sw->ports[11].disabled = true;
142 
143 	sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
144 	sw->ports[12].config.max_in_hop_id = 8;
145 	sw->ports[12].config.max_out_hop_id = 8;
146 
147 	sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
148 	sw->ports[13].config.max_in_hop_id = 8;
149 	sw->ports[13].config.max_out_hop_id = 8;
150 
151 	return sw;
152 }
153 
154 static struct tb_switch *alloc_host_usb4(struct kunit *test)
155 {
156 	struct tb_switch *sw;
157 
158 	sw = alloc_host(test);
159 	if (!sw)
160 		return NULL;
161 
162 	sw->generation = 4;
163 	sw->credit_allocation = true;
164 	sw->max_usb3_credits = 32;
165 	sw->min_dp_aux_credits = 1;
166 	sw->min_dp_main_credits = 0;
167 	sw->max_pcie_credits = 64;
168 	sw->max_dma_credits = 14;
169 
170 	return sw;
171 }
172 
173 static struct tb_switch *alloc_dev_default(struct kunit *test,
174 					   struct tb_switch *parent,
175 					   u64 route, bool bonded)
176 {
177 	struct tb_port *port, *upstream_port;
178 	struct tb_switch *sw;
179 
180 	sw = alloc_switch(test, route, 1, 19);
181 	if (!sw)
182 		return NULL;
183 
184 	sw->config.vendor_id = 0x8086;
185 	sw->config.device_id = 0x15ef;
186 
187 	sw->ports[0].config.type = TB_TYPE_PORT;
188 	sw->ports[0].config.max_in_hop_id = 8;
189 	sw->ports[0].config.max_out_hop_id = 8;
190 
191 	sw->ports[1].config.type = TB_TYPE_PORT;
192 	sw->ports[1].config.max_in_hop_id = 19;
193 	sw->ports[1].config.max_out_hop_id = 19;
194 	sw->ports[1].total_credits = 60;
195 	sw->ports[1].ctl_credits = 2;
196 	sw->ports[1].dual_link_port = &sw->ports[2];
197 
198 	sw->ports[2].config.type = TB_TYPE_PORT;
199 	sw->ports[2].config.max_in_hop_id = 19;
200 	sw->ports[2].config.max_out_hop_id = 19;
201 	sw->ports[2].total_credits = 60;
202 	sw->ports[2].ctl_credits = 2;
203 	sw->ports[2].dual_link_port = &sw->ports[1];
204 	sw->ports[2].link_nr = 1;
205 
206 	sw->ports[3].config.type = TB_TYPE_PORT;
207 	sw->ports[3].config.max_in_hop_id = 19;
208 	sw->ports[3].config.max_out_hop_id = 19;
209 	sw->ports[3].total_credits = 60;
210 	sw->ports[3].ctl_credits = 2;
211 	sw->ports[3].dual_link_port = &sw->ports[4];
212 
213 	sw->ports[4].config.type = TB_TYPE_PORT;
214 	sw->ports[4].config.max_in_hop_id = 19;
215 	sw->ports[4].config.max_out_hop_id = 19;
216 	sw->ports[4].total_credits = 60;
217 	sw->ports[4].ctl_credits = 2;
218 	sw->ports[4].dual_link_port = &sw->ports[3];
219 	sw->ports[4].link_nr = 1;
220 
221 	sw->ports[5].config.type = TB_TYPE_PORT;
222 	sw->ports[5].config.max_in_hop_id = 19;
223 	sw->ports[5].config.max_out_hop_id = 19;
224 	sw->ports[5].total_credits = 60;
225 	sw->ports[5].ctl_credits = 2;
226 	sw->ports[5].dual_link_port = &sw->ports[6];
227 
228 	sw->ports[6].config.type = TB_TYPE_PORT;
229 	sw->ports[6].config.max_in_hop_id = 19;
230 	sw->ports[6].config.max_out_hop_id = 19;
231 	sw->ports[6].total_credits = 60;
232 	sw->ports[6].ctl_credits = 2;
233 	sw->ports[6].dual_link_port = &sw->ports[5];
234 	sw->ports[6].link_nr = 1;
235 
236 	sw->ports[7].config.type = TB_TYPE_PORT;
237 	sw->ports[7].config.max_in_hop_id = 19;
238 	sw->ports[7].config.max_out_hop_id = 19;
239 	sw->ports[7].total_credits = 60;
240 	sw->ports[7].ctl_credits = 2;
241 	sw->ports[7].dual_link_port = &sw->ports[8];
242 
243 	sw->ports[8].config.type = TB_TYPE_PORT;
244 	sw->ports[8].config.max_in_hop_id = 19;
245 	sw->ports[8].config.max_out_hop_id = 19;
246 	sw->ports[8].total_credits = 60;
247 	sw->ports[8].ctl_credits = 2;
248 	sw->ports[8].dual_link_port = &sw->ports[7];
249 	sw->ports[8].link_nr = 1;
250 
251 	sw->ports[9].config.type = TB_TYPE_PCIE_UP;
252 	sw->ports[9].config.max_in_hop_id = 8;
253 	sw->ports[9].config.max_out_hop_id = 8;
254 
255 	sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
256 	sw->ports[10].config.max_in_hop_id = 8;
257 	sw->ports[10].config.max_out_hop_id = 8;
258 
259 	sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
260 	sw->ports[11].config.max_in_hop_id = 8;
261 	sw->ports[11].config.max_out_hop_id = 8;
262 
263 	sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
264 	sw->ports[12].config.max_in_hop_id = 8;
265 	sw->ports[12].config.max_out_hop_id = 8;
266 
267 	sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
268 	sw->ports[13].config.max_in_hop_id = 9;
269 	sw->ports[13].config.max_out_hop_id = 9;
270 	sw->ports[13].cap_adap = -1;
271 
272 	sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
273 	sw->ports[14].config.max_in_hop_id = 9;
274 	sw->ports[14].config.max_out_hop_id = 9;
275 	sw->ports[14].cap_adap = -1;
276 
277 	sw->ports[15].disabled = true;
278 
279 	sw->ports[16].config.type = TB_TYPE_USB3_UP;
280 	sw->ports[16].config.max_in_hop_id = 8;
281 	sw->ports[16].config.max_out_hop_id = 8;
282 
283 	sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
284 	sw->ports[17].config.max_in_hop_id = 8;
285 	sw->ports[17].config.max_out_hop_id = 8;
286 
287 	sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
288 	sw->ports[18].config.max_in_hop_id = 8;
289 	sw->ports[18].config.max_out_hop_id = 8;
290 
291 	sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
292 	sw->ports[19].config.max_in_hop_id = 8;
293 	sw->ports[19].config.max_out_hop_id = 8;
294 
295 	if (!parent)
296 		return sw;
297 
298 	/* Link them */
299 	upstream_port = tb_upstream_port(sw);
300 	port = tb_port_at(route, parent);
301 	port->remote = upstream_port;
302 	upstream_port->remote = port;
303 	if (port->dual_link_port && upstream_port->dual_link_port) {
304 		port->dual_link_port->remote = upstream_port->dual_link_port;
305 		upstream_port->dual_link_port->remote = port->dual_link_port;
306 
307 		if (bonded) {
308 			/* Bonding is used */
309 			port->bonded = true;
310 			port->total_credits *= 2;
311 			port->dual_link_port->bonded = true;
312 			port->dual_link_port->total_credits = 0;
313 			upstream_port->bonded = true;
314 			upstream_port->total_credits *= 2;
315 			upstream_port->dual_link_port->bonded = true;
316 			upstream_port->dual_link_port->total_credits = 0;
317 		}
318 	}
319 
320 	return sw;
321 }
322 
323 static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
324 					     struct tb_switch *parent,
325 					     u64 route, bool bonded)
326 {
327 	struct tb_switch *sw;
328 
329 	sw = alloc_dev_default(test, parent, route, bonded);
330 	if (!sw)
331 		return NULL;
332 
333 	sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
334 	sw->ports[13].config.max_in_hop_id = 9;
335 	sw->ports[13].config.max_out_hop_id = 9;
336 
337 	sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
338 	sw->ports[14].config.max_in_hop_id = 9;
339 	sw->ports[14].config.max_out_hop_id = 9;
340 
341 	return sw;
342 }
343 
344 static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
345 					      struct tb_switch *parent,
346 					      u64 route, bool bonded)
347 {
348 	struct tb_switch *sw;
349 	int i;
350 
351 	sw = alloc_dev_default(test, parent, route, bonded);
352 	if (!sw)
353 		return NULL;
354 	/*
355 	 * Device with:
356 	 * 2x USB4 Adapters (adapters 1,2 and 3,4),
357 	 * 1x PCIe Upstream (adapter 9),
358 	 * 1x PCIe Downstream (adapter 10),
359 	 * 1x USB3 Upstream (adapter 16),
360 	 * 1x USB3 Downstream (adapter 17)
361 	 */
362 	for (i = 5; i <= 8; i++)
363 		sw->ports[i].disabled = true;
364 
365 	for (i = 11; i <= 14; i++)
366 		sw->ports[i].disabled = true;
367 
368 	sw->ports[13].cap_adap = 0;
369 	sw->ports[14].cap_adap = 0;
370 
371 	for (i = 18; i <= 19; i++)
372 		sw->ports[i].disabled = true;
373 
374 	sw->generation = 4;
375 	sw->credit_allocation = true;
376 	sw->max_usb3_credits = 109;
377 	sw->min_dp_aux_credits = 0;
378 	sw->min_dp_main_credits = 0;
379 	sw->max_pcie_credits = 30;
380 	sw->max_dma_credits = 1;
381 
382 	return sw;
383 }
384 
385 static struct tb_switch *alloc_dev_usb4(struct kunit *test,
386 					struct tb_switch *parent,
387 					u64 route, bool bonded)
388 {
389 	struct tb_switch *sw;
390 
391 	sw = alloc_dev_default(test, parent, route, bonded);
392 	if (!sw)
393 		return NULL;
394 
395 	sw->generation = 4;
396 	sw->credit_allocation = true;
397 	sw->max_usb3_credits = 14;
398 	sw->min_dp_aux_credits = 1;
399 	sw->min_dp_main_credits = 18;
400 	sw->max_pcie_credits = 32;
401 	sw->max_dma_credits = 14;
402 
403 	return sw;
404 }
405 
406 static void tb_test_path_basic(struct kunit *test)
407 {
408 	struct tb_port *src_port, *dst_port, *p;
409 	struct tb_switch *host;
410 
411 	host = alloc_host(test);
412 
413 	src_port = &host->ports[5];
414 	dst_port = src_port;
415 
416 	p = tb_next_port_on_path(src_port, dst_port, NULL);
417 	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
418 
419 	p = tb_next_port_on_path(src_port, dst_port, p);
420 	KUNIT_EXPECT_TRUE(test, !p);
421 }
422 
423 static void tb_test_path_not_connected_walk(struct kunit *test)
424 {
425 	struct tb_port *src_port, *dst_port, *p;
426 	struct tb_switch *host, *dev;
427 
428 	host = alloc_host(test);
429 	/* No connection between host and dev */
430 	dev = alloc_dev_default(test, NULL, 3, true);
431 
432 	src_port = &host->ports[12];
433 	dst_port = &dev->ports[16];
434 
435 	p = tb_next_port_on_path(src_port, dst_port, NULL);
436 	KUNIT_EXPECT_PTR_EQ(test, p, src_port);
437 
438 	p = tb_next_port_on_path(src_port, dst_port, p);
439 	KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
440 
441 	p = tb_next_port_on_path(src_port, dst_port, p);
442 	KUNIT_EXPECT_TRUE(test, !p);
443 
444 	/* Other direction */
445 
446 	p = tb_next_port_on_path(dst_port, src_port, NULL);
447 	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
448 
449 	p = tb_next_port_on_path(dst_port, src_port, p);
450 	KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
451 
452 	p = tb_next_port_on_path(dst_port, src_port, p);
453 	KUNIT_EXPECT_TRUE(test, !p);
454 }
455 
456 struct port_expectation {
457 	u64 route;
458 	u8 port;
459 	enum tb_port_type type;
460 };
461 
462 static void tb_test_path_single_hop_walk(struct kunit *test)
463 {
464 	/*
465 	 * Walks from Host PCIe downstream port to Device #1 PCIe
466 	 * upstream port.
467 	 *
468 	 *   [Host]
469 	 *   1 |
470 	 *   1 |
471 	 *  [Device]
472 	 */
473 	static const struct port_expectation test_data[] = {
474 		{ .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
475 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
476 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
477 		{ .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
478 	};
479 	struct tb_port *src_port, *dst_port, *p;
480 	struct tb_switch *host, *dev;
481 	int i;
482 
483 	host = alloc_host(test);
484 	dev = alloc_dev_default(test, host, 1, true);
485 
486 	src_port = &host->ports[8];
487 	dst_port = &dev->ports[9];
488 
489 	/* Walk both directions */
490 
491 	i = 0;
492 	tb_for_each_port_on_path(src_port, dst_port, p) {
493 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
494 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
495 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
496 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
497 				test_data[i].type);
498 		i++;
499 	}
500 
501 	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
502 
503 	i = ARRAY_SIZE(test_data) - 1;
504 	tb_for_each_port_on_path(dst_port, src_port, p) {
505 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
506 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
507 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
508 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
509 				test_data[i].type);
510 		i--;
511 	}
512 
513 	KUNIT_EXPECT_EQ(test, i, -1);
514 }
515 
516 static void tb_test_path_daisy_chain_walk(struct kunit *test)
517 {
518 	/*
519 	 * Walks from Host DP IN to Device #2 DP OUT.
520 	 *
521 	 *           [Host]
522 	 *            1 |
523 	 *            1 |
524 	 *         [Device #1]
525 	 *       3 /
526 	 *      1 /
527 	 * [Device #2]
528 	 */
529 	static const struct port_expectation test_data[] = {
530 		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
531 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
532 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
533 		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
534 		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
535 		{ .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
536 	};
537 	struct tb_port *src_port, *dst_port, *p;
538 	struct tb_switch *host, *dev1, *dev2;
539 	int i;
540 
541 	host = alloc_host(test);
542 	dev1 = alloc_dev_default(test, host, 0x1, true);
543 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
544 
545 	src_port = &host->ports[5];
546 	dst_port = &dev2->ports[13];
547 
548 	/* Walk both directions */
549 
550 	i = 0;
551 	tb_for_each_port_on_path(src_port, dst_port, p) {
552 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
553 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
554 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
555 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
556 				test_data[i].type);
557 		i++;
558 	}
559 
560 	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
561 
562 	i = ARRAY_SIZE(test_data) - 1;
563 	tb_for_each_port_on_path(dst_port, src_port, p) {
564 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
565 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
566 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
567 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
568 				test_data[i].type);
569 		i--;
570 	}
571 
572 	KUNIT_EXPECT_EQ(test, i, -1);
573 }
574 
575 static void tb_test_path_simple_tree_walk(struct kunit *test)
576 {
577 	/*
578 	 * Walks from Host DP IN to Device #3 DP OUT.
579 	 *
580 	 *           [Host]
581 	 *            1 |
582 	 *            1 |
583 	 *         [Device #1]
584 	 *       3 /   | 5  \ 7
585 	 *      1 /    |     \ 1
586 	 * [Device #2] |    [Device #4]
587 	 *             | 1
588 	 *         [Device #3]
589 	 */
590 	static const struct port_expectation test_data[] = {
591 		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
592 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
593 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
594 		{ .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
595 		{ .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
596 		{ .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
597 	};
598 	struct tb_port *src_port, *dst_port, *p;
599 	struct tb_switch *host, *dev1, *dev3;
600 	int i;
601 
602 	host = alloc_host(test);
603 	dev1 = alloc_dev_default(test, host, 0x1, true);
604 	alloc_dev_default(test, dev1, 0x301, true);
605 	dev3 = alloc_dev_default(test, dev1, 0x501, true);
606 	alloc_dev_default(test, dev1, 0x701, true);
607 
608 	src_port = &host->ports[5];
609 	dst_port = &dev3->ports[13];
610 
611 	/* Walk both directions */
612 
613 	i = 0;
614 	tb_for_each_port_on_path(src_port, dst_port, p) {
615 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
616 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
617 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
618 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
619 				test_data[i].type);
620 		i++;
621 	}
622 
623 	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
624 
625 	i = ARRAY_SIZE(test_data) - 1;
626 	tb_for_each_port_on_path(dst_port, src_port, p) {
627 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
628 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
629 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
630 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
631 				test_data[i].type);
632 		i--;
633 	}
634 
635 	KUNIT_EXPECT_EQ(test, i, -1);
636 }
637 
638 static void tb_test_path_complex_tree_walk(struct kunit *test)
639 {
640 	/*
641 	 * Walks from Device #3 DP IN to Device #9 DP OUT.
642 	 *
643 	 *           [Host]
644 	 *            1 |
645 	 *            1 |
646 	 *         [Device #1]
647 	 *       3 /   | 5  \ 7
648 	 *      1 /    |     \ 1
649 	 * [Device #2] |    [Device #5]
650 	 *    5 |      | 1         \ 7
651 	 *    1 |  [Device #4]      \ 1
652 	 * [Device #3]             [Device #6]
653 	 *                       3 /
654 	 *                      1 /
655 	 *                    [Device #7]
656 	 *                  3 /      | 5
657 	 *                 1 /       |
658 	 *               [Device #8] | 1
659 	 *                       [Device #9]
660 	 */
661 	static const struct port_expectation test_data[] = {
662 		{ .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
663 		{ .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
664 		{ .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
665 		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
666 		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
667 		{ .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
668 		{ .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
669 		{ .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
670 		{ .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
671 		{ .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
672 		{ .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
673 		{ .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
674 		{ .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
675 		{ .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
676 	};
677 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
678 	struct tb_port *src_port, *dst_port, *p;
679 	int i;
680 
681 	host = alloc_host(test);
682 	dev1 = alloc_dev_default(test, host, 0x1, true);
683 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
684 	dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
685 	alloc_dev_default(test, dev1, 0x501, true);
686 	dev5 = alloc_dev_default(test, dev1, 0x701, true);
687 	dev6 = alloc_dev_default(test, dev5, 0x70701, true);
688 	dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
689 	alloc_dev_default(test, dev7, 0x303070701, true);
690 	dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
691 
692 	src_port = &dev3->ports[13];
693 	dst_port = &dev9->ports[14];
694 
695 	/* Walk both directions */
696 
697 	i = 0;
698 	tb_for_each_port_on_path(src_port, dst_port, p) {
699 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
700 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
701 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
702 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
703 				test_data[i].type);
704 		i++;
705 	}
706 
707 	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
708 
709 	i = ARRAY_SIZE(test_data) - 1;
710 	tb_for_each_port_on_path(dst_port, src_port, p) {
711 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
712 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
713 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
714 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
715 				test_data[i].type);
716 		i--;
717 	}
718 
719 	KUNIT_EXPECT_EQ(test, i, -1);
720 }
721 
722 static void tb_test_path_max_length_walk(struct kunit *test)
723 {
724 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
725 	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
726 	struct tb_port *src_port, *dst_port, *p;
727 	int i;
728 
729 	/*
730 	 * Walks from Device #6 DP IN to Device #12 DP OUT.
731 	 *
732 	 *          [Host]
733 	 *         1 /  \ 3
734 	 *        1 /    \ 1
735 	 * [Device #1]   [Device #7]
736 	 *     3 |           | 3
737 	 *     1 |           | 1
738 	 * [Device #2]   [Device #8]
739 	 *     3 |           | 3
740 	 *     1 |           | 1
741 	 * [Device #3]   [Device #9]
742 	 *     3 |           | 3
743 	 *     1 |           | 1
744 	 * [Device #4]   [Device #10]
745 	 *     3 |           | 3
746 	 *     1 |           | 1
747 	 * [Device #5]   [Device #11]
748 	 *     3 |           | 3
749 	 *     1 |           | 1
750 	 * [Device #6]   [Device #12]
751 	 */
752 	static const struct port_expectation test_data[] = {
753 		{ .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
754 		{ .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
755 		{ .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
756 		{ .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
757 		{ .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
758 		{ .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
759 		{ .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
760 		{ .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
761 		{ .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
762 		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
763 		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
764 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
765 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
766 		{ .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
767 		{ .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
768 		{ .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
769 		{ .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
770 		{ .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
771 		{ .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
772 		{ .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
773 		{ .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
774 		{ .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
775 		{ .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
776 		{ .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
777 		{ .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
778 		{ .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
779 	};
780 
781 	host = alloc_host(test);
782 	dev1 = alloc_dev_default(test, host, 0x1, true);
783 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
784 	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
785 	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
786 	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
787 	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
788 	dev7 = alloc_dev_default(test, host, 0x3, true);
789 	dev8 = alloc_dev_default(test, dev7, 0x303, true);
790 	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
791 	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
792 	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
793 	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
794 
795 	src_port = &dev6->ports[13];
796 	dst_port = &dev12->ports[13];
797 
798 	/* Walk both directions */
799 
800 	i = 0;
801 	tb_for_each_port_on_path(src_port, dst_port, p) {
802 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
803 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
804 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
805 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
806 				test_data[i].type);
807 		i++;
808 	}
809 
810 	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
811 
812 	i = ARRAY_SIZE(test_data) - 1;
813 	tb_for_each_port_on_path(dst_port, src_port, p) {
814 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
815 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
816 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
817 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
818 				test_data[i].type);
819 		i--;
820 	}
821 
822 	KUNIT_EXPECT_EQ(test, i, -1);
823 }
824 
825 static void tb_test_path_not_connected(struct kunit *test)
826 {
827 	struct tb_switch *host, *dev1, *dev2;
828 	struct tb_port *down, *up;
829 	struct tb_path *path;
830 
831 	host = alloc_host(test);
832 	dev1 = alloc_dev_default(test, host, 0x3, false);
833 	/* Not connected to anything */
834 	dev2 = alloc_dev_default(test, NULL, 0x303, false);
835 
836 	down = &dev1->ports[10];
837 	up = &dev2->ports[9];
838 
839 	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
840 	KUNIT_ASSERT_NULL(test, path);
841 	path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
842 	KUNIT_ASSERT_NULL(test, path);
843 }
844 
845 struct hop_expectation {
846 	u64 route;
847 	u8 in_port;
848 	enum tb_port_type in_type;
849 	u8 out_port;
850 	enum tb_port_type out_type;
851 };
852 
853 static void tb_test_path_not_bonded_lane0(struct kunit *test)
854 {
855 	/*
856 	 * PCIe path from host to device using lane 0.
857 	 *
858 	 *   [Host]
859 	 *   3 |: 4
860 	 *   1 |: 2
861 	 *  [Device]
862 	 */
863 	static const struct hop_expectation test_data[] = {
864 		{
865 			.route = 0x0,
866 			.in_port = 9,
867 			.in_type = TB_TYPE_PCIE_DOWN,
868 			.out_port = 3,
869 			.out_type = TB_TYPE_PORT,
870 		},
871 		{
872 			.route = 0x3,
873 			.in_port = 1,
874 			.in_type = TB_TYPE_PORT,
875 			.out_port = 9,
876 			.out_type = TB_TYPE_PCIE_UP,
877 		},
878 	};
879 	struct tb_switch *host, *dev;
880 	struct tb_port *down, *up;
881 	struct tb_path *path;
882 	int i;
883 
884 	host = alloc_host(test);
885 	dev = alloc_dev_default(test, host, 0x3, false);
886 
887 	down = &host->ports[9];
888 	up = &dev->ports[9];
889 
890 	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
891 	KUNIT_ASSERT_NOT_NULL(test, path);
892 	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
893 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
894 		const struct tb_port *in_port, *out_port;
895 
896 		in_port = path->hops[i].in_port;
897 		out_port = path->hops[i].out_port;
898 
899 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
900 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
901 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
902 				test_data[i].in_type);
903 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
904 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
905 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
906 				test_data[i].out_type);
907 	}
908 	tb_path_free(path);
909 }
910 
911 static void tb_test_path_not_bonded_lane1(struct kunit *test)
912 {
913 	/*
914 	 * DP Video path from host to device using lane 1. Paths like
915 	 * these are only used with Thunderbolt 1 devices where lane
916 	 * bonding is not possible. USB4 specifically does not allow
917 	 * paths like this (you either use lane 0 where lane 1 is
918 	 * disabled or both lanes are bonded).
919 	 *
920 	 *   [Host]
921 	 *   1 :| 2
922 	 *   1 :| 2
923 	 *  [Device]
924 	 */
925 	static const struct hop_expectation test_data[] = {
926 		{
927 			.route = 0x0,
928 			.in_port = 5,
929 			.in_type = TB_TYPE_DP_HDMI_IN,
930 			.out_port = 2,
931 			.out_type = TB_TYPE_PORT,
932 		},
933 		{
934 			.route = 0x1,
935 			.in_port = 2,
936 			.in_type = TB_TYPE_PORT,
937 			.out_port = 13,
938 			.out_type = TB_TYPE_DP_HDMI_OUT,
939 		},
940 	};
941 	struct tb_switch *host, *dev;
942 	struct tb_port *in, *out;
943 	struct tb_path *path;
944 	int i;
945 
946 	host = alloc_host(test);
947 	dev = alloc_dev_default(test, host, 0x1, false);
948 
949 	in = &host->ports[5];
950 	out = &dev->ports[13];
951 
952 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
953 	KUNIT_ASSERT_NOT_NULL(test, path);
954 	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
955 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
956 		const struct tb_port *in_port, *out_port;
957 
958 		in_port = path->hops[i].in_port;
959 		out_port = path->hops[i].out_port;
960 
961 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
962 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
963 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
964 				test_data[i].in_type);
965 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
966 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
967 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
968 				test_data[i].out_type);
969 	}
970 	tb_path_free(path);
971 }
972 
973 static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
974 {
975 	/*
976 	 * DP Video path from host to device 3 using lane 1.
977 	 *
978 	 *    [Host]
979 	 *    1 :| 2
980 	 *    1 :| 2
981 	 *  [Device #1]
982 	 *    7 :| 8
983 	 *    1 :| 2
984 	 *  [Device #2]
985 	 *    5 :| 6
986 	 *    1 :| 2
987 	 *  [Device #3]
988 	 */
989 	static const struct hop_expectation test_data[] = {
990 		{
991 			.route = 0x0,
992 			.in_port = 5,
993 			.in_type = TB_TYPE_DP_HDMI_IN,
994 			.out_port = 2,
995 			.out_type = TB_TYPE_PORT,
996 		},
997 		{
998 			.route = 0x1,
999 			.in_port = 2,
1000 			.in_type = TB_TYPE_PORT,
1001 			.out_port = 8,
1002 			.out_type = TB_TYPE_PORT,
1003 		},
1004 		{
1005 			.route = 0x701,
1006 			.in_port = 2,
1007 			.in_type = TB_TYPE_PORT,
1008 			.out_port = 6,
1009 			.out_type = TB_TYPE_PORT,
1010 		},
1011 		{
1012 			.route = 0x50701,
1013 			.in_port = 2,
1014 			.in_type = TB_TYPE_PORT,
1015 			.out_port = 13,
1016 			.out_type = TB_TYPE_DP_HDMI_OUT,
1017 		},
1018 	};
1019 	struct tb_switch *host, *dev1, *dev2, *dev3;
1020 	struct tb_port *in, *out;
1021 	struct tb_path *path;
1022 	int i;
1023 
1024 	host = alloc_host(test);
1025 	dev1 = alloc_dev_default(test, host, 0x1, false);
1026 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1027 	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1028 
1029 	in = &host->ports[5];
1030 	out = &dev3->ports[13];
1031 
1032 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1033 	KUNIT_ASSERT_NOT_NULL(test, path);
1034 	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1035 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1036 		const struct tb_port *in_port, *out_port;
1037 
1038 		in_port = path->hops[i].in_port;
1039 		out_port = path->hops[i].out_port;
1040 
1041 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1042 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1043 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1044 				test_data[i].in_type);
1045 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1046 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1047 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1048 				test_data[i].out_type);
1049 	}
1050 	tb_path_free(path);
1051 }
1052 
1053 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1054 {
1055 	/*
1056 	 * DP Video path from device 3 to host using lane 1.
1057 	 *
1058 	 *    [Host]
1059 	 *    1 :| 2
1060 	 *    1 :| 2
1061 	 *  [Device #1]
1062 	 *    7 :| 8
1063 	 *    1 :| 2
1064 	 *  [Device #2]
1065 	 *    5 :| 6
1066 	 *    1 :| 2
1067 	 *  [Device #3]
1068 	 */
1069 	static const struct hop_expectation test_data[] = {
1070 		{
1071 			.route = 0x50701,
1072 			.in_port = 13,
1073 			.in_type = TB_TYPE_DP_HDMI_IN,
1074 			.out_port = 2,
1075 			.out_type = TB_TYPE_PORT,
1076 		},
1077 		{
1078 			.route = 0x701,
1079 			.in_port = 6,
1080 			.in_type = TB_TYPE_PORT,
1081 			.out_port = 2,
1082 			.out_type = TB_TYPE_PORT,
1083 		},
1084 		{
1085 			.route = 0x1,
1086 			.in_port = 8,
1087 			.in_type = TB_TYPE_PORT,
1088 			.out_port = 2,
1089 			.out_type = TB_TYPE_PORT,
1090 		},
1091 		{
1092 			.route = 0x0,
1093 			.in_port = 2,
1094 			.in_type = TB_TYPE_PORT,
1095 			.out_port = 5,
1096 			.out_type = TB_TYPE_DP_HDMI_IN,
1097 		},
1098 	};
1099 	struct tb_switch *host, *dev1, *dev2, *dev3;
1100 	struct tb_port *in, *out;
1101 	struct tb_path *path;
1102 	int i;
1103 
1104 	host = alloc_host(test);
1105 	dev1 = alloc_dev_default(test, host, 0x1, false);
1106 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1107 	dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1108 
1109 	in = &dev3->ports[13];
1110 	out = &host->ports[5];
1111 
1112 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1113 	KUNIT_ASSERT_NOT_NULL(test, path);
1114 	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1115 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1116 		const struct tb_port *in_port, *out_port;
1117 
1118 		in_port = path->hops[i].in_port;
1119 		out_port = path->hops[i].out_port;
1120 
1121 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1122 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1123 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1124 				test_data[i].in_type);
1125 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1126 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1127 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1128 				test_data[i].out_type);
1129 	}
1130 	tb_path_free(path);
1131 }
1132 
1133 static void tb_test_path_mixed_chain(struct kunit *test)
1134 {
1135 	/*
1136 	 * DP Video path from host to device 4 where first and last link
1137 	 * is bonded.
1138 	 *
1139 	 *    [Host]
1140 	 *    1 |
1141 	 *    1 |
1142 	 *  [Device #1]
1143 	 *    7 :| 8
1144 	 *    1 :| 2
1145 	 *  [Device #2]
1146 	 *    5 :| 6
1147 	 *    1 :| 2
1148 	 *  [Device #3]
1149 	 *    3 |
1150 	 *    1 |
1151 	 *  [Device #4]
1152 	 */
1153 	static const struct hop_expectation test_data[] = {
1154 		{
1155 			.route = 0x0,
1156 			.in_port = 5,
1157 			.in_type = TB_TYPE_DP_HDMI_IN,
1158 			.out_port = 1,
1159 			.out_type = TB_TYPE_PORT,
1160 		},
1161 		{
1162 			.route = 0x1,
1163 			.in_port = 1,
1164 			.in_type = TB_TYPE_PORT,
1165 			.out_port = 8,
1166 			.out_type = TB_TYPE_PORT,
1167 		},
1168 		{
1169 			.route = 0x701,
1170 			.in_port = 2,
1171 			.in_type = TB_TYPE_PORT,
1172 			.out_port = 6,
1173 			.out_type = TB_TYPE_PORT,
1174 		},
1175 		{
1176 			.route = 0x50701,
1177 			.in_port = 2,
1178 			.in_type = TB_TYPE_PORT,
1179 			.out_port = 3,
1180 			.out_type = TB_TYPE_PORT,
1181 		},
1182 		{
1183 			.route = 0x3050701,
1184 			.in_port = 1,
1185 			.in_type = TB_TYPE_PORT,
1186 			.out_port = 13,
1187 			.out_type = TB_TYPE_DP_HDMI_OUT,
1188 		},
1189 	};
1190 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1191 	struct tb_port *in, *out;
1192 	struct tb_path *path;
1193 	int i;
1194 
1195 	host = alloc_host(test);
1196 	dev1 = alloc_dev_default(test, host, 0x1, true);
1197 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1198 	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1199 	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1200 
1201 	in = &host->ports[5];
1202 	out = &dev4->ports[13];
1203 
1204 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1205 	KUNIT_ASSERT_NOT_NULL(test, path);
1206 	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1207 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1208 		const struct tb_port *in_port, *out_port;
1209 
1210 		in_port = path->hops[i].in_port;
1211 		out_port = path->hops[i].out_port;
1212 
1213 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1214 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1215 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1216 				test_data[i].in_type);
1217 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1218 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1219 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1220 				test_data[i].out_type);
1221 	}
1222 	tb_path_free(path);
1223 }
1224 
1225 static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1226 {
1227 	/*
1228 	 * DP Video path from device 4 to host where first and last link
1229 	 * is bonded.
1230 	 *
1231 	 *    [Host]
1232 	 *    1 |
1233 	 *    1 |
1234 	 *  [Device #1]
1235 	 *    7 :| 8
1236 	 *    1 :| 2
1237 	 *  [Device #2]
1238 	 *    5 :| 6
1239 	 *    1 :| 2
1240 	 *  [Device #3]
1241 	 *    3 |
1242 	 *    1 |
1243 	 *  [Device #4]
1244 	 */
1245 	static const struct hop_expectation test_data[] = {
1246 		{
1247 			.route = 0x3050701,
1248 			.in_port = 13,
1249 			.in_type = TB_TYPE_DP_HDMI_OUT,
1250 			.out_port = 1,
1251 			.out_type = TB_TYPE_PORT,
1252 		},
1253 		{
1254 			.route = 0x50701,
1255 			.in_port = 3,
1256 			.in_type = TB_TYPE_PORT,
1257 			.out_port = 2,
1258 			.out_type = TB_TYPE_PORT,
1259 		},
1260 		{
1261 			.route = 0x701,
1262 			.in_port = 6,
1263 			.in_type = TB_TYPE_PORT,
1264 			.out_port = 2,
1265 			.out_type = TB_TYPE_PORT,
1266 		},
1267 		{
1268 			.route = 0x1,
1269 			.in_port = 8,
1270 			.in_type = TB_TYPE_PORT,
1271 			.out_port = 1,
1272 			.out_type = TB_TYPE_PORT,
1273 		},
1274 		{
1275 			.route = 0x0,
1276 			.in_port = 1,
1277 			.in_type = TB_TYPE_PORT,
1278 			.out_port = 5,
1279 			.out_type = TB_TYPE_DP_HDMI_IN,
1280 		},
1281 	};
1282 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1283 	struct tb_port *in, *out;
1284 	struct tb_path *path;
1285 	int i;
1286 
1287 	host = alloc_host(test);
1288 	dev1 = alloc_dev_default(test, host, 0x1, true);
1289 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1290 	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1291 	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1292 
1293 	in = &dev4->ports[13];
1294 	out = &host->ports[5];
1295 
1296 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1297 	KUNIT_ASSERT_NOT_NULL(test, path);
1298 	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1299 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1300 		const struct tb_port *in_port, *out_port;
1301 
1302 		in_port = path->hops[i].in_port;
1303 		out_port = path->hops[i].out_port;
1304 
1305 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1306 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1307 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1308 				test_data[i].in_type);
1309 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1310 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1311 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1312 				test_data[i].out_type);
1313 	}
1314 	tb_path_free(path);
1315 }
1316 
1317 static void tb_test_tunnel_pcie(struct kunit *test)
1318 {
1319 	struct tb_switch *host, *dev1, *dev2;
1320 	struct tb_tunnel *tunnel1, *tunnel2;
1321 	struct tb_port *down, *up;
1322 
1323 	/*
1324 	 * Create PCIe tunnel between host and two devices.
1325 	 *
1326 	 *   [Host]
1327 	 *    1 |
1328 	 *    1 |
1329 	 *  [Device #1]
1330 	 *    5 |
1331 	 *    1 |
1332 	 *  [Device #2]
1333 	 */
1334 	host = alloc_host(test);
1335 	dev1 = alloc_dev_default(test, host, 0x1, true);
1336 	dev2 = alloc_dev_default(test, dev1, 0x501, true);
1337 
1338 	down = &host->ports[8];
1339 	up = &dev1->ports[9];
1340 	tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1341 	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1342 	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1343 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1344 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1345 	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1346 	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1347 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1348 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1349 	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1350 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1351 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1352 
1353 	down = &dev1->ports[10];
1354 	up = &dev2->ports[9];
1355 	tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1356 	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1357 	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1358 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1359 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1360 	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1361 	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1362 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1363 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1364 	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1365 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1366 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1367 
1368 	tb_tunnel_free(tunnel2);
1369 	tb_tunnel_free(tunnel1);
1370 }
1371 
1372 static void tb_test_tunnel_dp(struct kunit *test)
1373 {
1374 	struct tb_switch *host, *dev;
1375 	struct tb_port *in, *out;
1376 	struct tb_tunnel *tunnel;
1377 
1378 	/*
1379 	 * Create DP tunnel between Host and Device
1380 	 *
1381 	 *   [Host]
1382 	 *   1 |
1383 	 *   1 |
1384 	 *  [Device]
1385 	 */
1386 	host = alloc_host(test);
1387 	dev = alloc_dev_default(test, host, 0x3, true);
1388 
1389 	in = &host->ports[5];
1390 	out = &dev->ports[13];
1391 
1392 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1393 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1394 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1395 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1396 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1397 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1398 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1399 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1400 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1401 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1402 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1403 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1404 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1405 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1406 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1407 	tb_tunnel_free(tunnel);
1408 }
1409 
1410 static void tb_test_tunnel_dp_chain(struct kunit *test)
1411 {
1412 	struct tb_switch *host, *dev1, *dev4;
1413 	struct tb_port *in, *out;
1414 	struct tb_tunnel *tunnel;
1415 
1416 	/*
1417 	 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1418 	 *
1419 	 *           [Host]
1420 	 *            1 |
1421 	 *            1 |
1422 	 *         [Device #1]
1423 	 *       3 /   | 5  \ 7
1424 	 *      1 /    |     \ 1
1425 	 * [Device #2] |    [Device #4]
1426 	 *             | 1
1427 	 *         [Device #3]
1428 	 */
1429 	host = alloc_host(test);
1430 	dev1 = alloc_dev_default(test, host, 0x1, true);
1431 	alloc_dev_default(test, dev1, 0x301, true);
1432 	alloc_dev_default(test, dev1, 0x501, true);
1433 	dev4 = alloc_dev_default(test, dev1, 0x701, true);
1434 
1435 	in = &host->ports[5];
1436 	out = &dev4->ports[14];
1437 
1438 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1439 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1440 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1441 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1442 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1443 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1444 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1445 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1446 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1447 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1448 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1449 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1450 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1451 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1452 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1453 	tb_tunnel_free(tunnel);
1454 }
1455 
1456 static void tb_test_tunnel_dp_tree(struct kunit *test)
1457 {
1458 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1459 	struct tb_port *in, *out;
1460 	struct tb_tunnel *tunnel;
1461 
1462 	/*
1463 	 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1464 	 *
1465 	 *          [Host]
1466 	 *           3 |
1467 	 *           1 |
1468 	 *         [Device #1]
1469 	 *       3 /   | 5  \ 7
1470 	 *      1 /    |     \ 1
1471 	 * [Device #2] |    [Device #4]
1472 	 *             | 1
1473 	 *         [Device #3]
1474 	 *             | 5
1475 	 *             | 1
1476 	 *         [Device #5]
1477 	 */
1478 	host = alloc_host(test);
1479 	dev1 = alloc_dev_default(test, host, 0x3, true);
1480 	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1481 	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1482 	alloc_dev_default(test, dev1, 0x703, true);
1483 	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1484 
1485 	in = &dev2->ports[13];
1486 	out = &dev5->ports[13];
1487 
1488 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1489 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1490 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1491 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1492 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1493 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1494 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1495 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1496 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1497 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1498 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1499 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1500 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1501 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1502 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1503 	tb_tunnel_free(tunnel);
1504 }
1505 
1506 static void tb_test_tunnel_dp_max_length(struct kunit *test)
1507 {
1508 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1509 	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1510 	struct tb_port *in, *out;
1511 	struct tb_tunnel *tunnel;
1512 
1513 	/*
1514 	 * Creates DP tunnel from Device #6 to Device #12.
1515 	 *
1516 	 *          [Host]
1517 	 *         1 /  \ 3
1518 	 *        1 /    \ 1
1519 	 * [Device #1]   [Device #7]
1520 	 *     3 |           | 3
1521 	 *     1 |           | 1
1522 	 * [Device #2]   [Device #8]
1523 	 *     3 |           | 3
1524 	 *     1 |           | 1
1525 	 * [Device #3]   [Device #9]
1526 	 *     3 |           | 3
1527 	 *     1 |           | 1
1528 	 * [Device #4]   [Device #10]
1529 	 *     3 |           | 3
1530 	 *     1 |           | 1
1531 	 * [Device #5]   [Device #11]
1532 	 *     3 |           | 3
1533 	 *     1 |           | 1
1534 	 * [Device #6]   [Device #12]
1535 	 */
1536 	host = alloc_host(test);
1537 	dev1 = alloc_dev_default(test, host, 0x1, true);
1538 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
1539 	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1540 	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1541 	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1542 	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1543 	dev7 = alloc_dev_default(test, host, 0x3, true);
1544 	dev8 = alloc_dev_default(test, dev7, 0x303, true);
1545 	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1546 	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1547 	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1548 	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1549 
1550 	in = &dev6->ports[13];
1551 	out = &dev12->ports[13];
1552 
1553 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1554 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1555 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1556 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1557 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1558 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1559 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1560 	/* First hop */
1561 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1562 	/* Middle */
1563 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1564 			    &host->ports[1]);
1565 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1566 			    &host->ports[3]);
1567 	/* Last */
1568 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1569 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1570 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1571 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1572 			    &host->ports[1]);
1573 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1574 			    &host->ports[3]);
1575 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1576 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1577 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1578 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1579 			    &host->ports[3]);
1580 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1581 			    &host->ports[1]);
1582 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1583 	tb_tunnel_free(tunnel);
1584 }
1585 
1586 static void tb_test_tunnel_usb3(struct kunit *test)
1587 {
1588 	struct tb_switch *host, *dev1, *dev2;
1589 	struct tb_tunnel *tunnel1, *tunnel2;
1590 	struct tb_port *down, *up;
1591 
1592 	/*
1593 	 * Create USB3 tunnel between host and two devices.
1594 	 *
1595 	 *   [Host]
1596 	 *    1 |
1597 	 *    1 |
1598 	 *  [Device #1]
1599 	 *          \ 7
1600 	 *           \ 1
1601 	 *         [Device #2]
1602 	 */
1603 	host = alloc_host(test);
1604 	dev1 = alloc_dev_default(test, host, 0x1, true);
1605 	dev2 = alloc_dev_default(test, dev1, 0x701, true);
1606 
1607 	down = &host->ports[12];
1608 	up = &dev1->ports[16];
1609 	tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1610 	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1611 	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1612 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1613 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1614 	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1615 	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1616 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1617 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1618 	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1619 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1620 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1621 
1622 	down = &dev1->ports[17];
1623 	up = &dev2->ports[16];
1624 	tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1625 	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1626 	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1627 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1628 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1629 	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1630 	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1631 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1632 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1633 	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1634 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1635 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1636 
1637 	tb_tunnel_free(tunnel2);
1638 	tb_tunnel_free(tunnel1);
1639 }
1640 
1641 static void tb_test_tunnel_port_on_path(struct kunit *test)
1642 {
1643 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1644 	struct tb_port *in, *out, *port;
1645 	struct tb_tunnel *dp_tunnel;
1646 
1647 	/*
1648 	 *          [Host]
1649 	 *           3 |
1650 	 *           1 |
1651 	 *         [Device #1]
1652 	 *       3 /   | 5  \ 7
1653 	 *      1 /    |     \ 1
1654 	 * [Device #2] |    [Device #4]
1655 	 *             | 1
1656 	 *         [Device #3]
1657 	 *             | 5
1658 	 *             | 1
1659 	 *         [Device #5]
1660 	 */
1661 	host = alloc_host(test);
1662 	dev1 = alloc_dev_default(test, host, 0x3, true);
1663 	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1664 	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1665 	dev4 = alloc_dev_default(test, dev1, 0x703, true);
1666 	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1667 
1668 	in = &dev2->ports[13];
1669 	out = &dev5->ports[13];
1670 
1671 	dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1672 	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
1673 
1674 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1675 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1676 
1677 	port = &host->ports[8];
1678 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1679 
1680 	port = &host->ports[3];
1681 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1682 
1683 	port = &dev1->ports[1];
1684 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1685 
1686 	port = &dev1->ports[3];
1687 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1688 
1689 	port = &dev1->ports[5];
1690 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1691 
1692 	port = &dev1->ports[7];
1693 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1694 
1695 	port = &dev3->ports[1];
1696 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1697 
1698 	port = &dev5->ports[1];
1699 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1700 
1701 	port = &dev4->ports[1];
1702 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1703 
1704 	tb_tunnel_free(dp_tunnel);
1705 }
1706 
1707 static void tb_test_tunnel_dma(struct kunit *test)
1708 {
1709 	struct tb_port *nhi, *port;
1710 	struct tb_tunnel *tunnel;
1711 	struct tb_switch *host;
1712 
1713 	/*
1714 	 * Create DMA tunnel from NHI to port 1 and back.
1715 	 *
1716 	 *   [Host 1]
1717 	 *    1 ^ In HopID 1 -> Out HopID 8
1718 	 *      |
1719 	 *      v In HopID 8 -> Out HopID 1
1720 	 * ............ Domain border
1721 	 *      |
1722 	 *   [Host 2]
1723 	 */
1724 	host = alloc_host(test);
1725 	nhi = &host->ports[7];
1726 	port = &host->ports[1];
1727 
1728 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1729 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1730 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1731 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1732 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1733 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1734 	/* RX path */
1735 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1736 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1737 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1738 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1739 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1740 	/* TX path */
1741 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1742 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1743 	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1744 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1745 	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1746 
1747 	tb_tunnel_free(tunnel);
1748 }
1749 
1750 static void tb_test_tunnel_dma_rx(struct kunit *test)
1751 {
1752 	struct tb_port *nhi, *port;
1753 	struct tb_tunnel *tunnel;
1754 	struct tb_switch *host;
1755 
1756 	/*
1757 	 * Create DMA RX tunnel from port 1 to NHI.
1758 	 *
1759 	 *   [Host 1]
1760 	 *    1 ^
1761 	 *      |
1762 	 *      | In HopID 15 -> Out HopID 2
1763 	 * ............ Domain border
1764 	 *      |
1765 	 *   [Host 2]
1766 	 */
1767 	host = alloc_host(test);
1768 	nhi = &host->ports[7];
1769 	port = &host->ports[1];
1770 
1771 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1772 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1773 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1774 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1775 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1776 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1777 	/* RX path */
1778 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1779 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1780 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1781 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1782 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1783 
1784 	tb_tunnel_free(tunnel);
1785 }
1786 
1787 static void tb_test_tunnel_dma_tx(struct kunit *test)
1788 {
1789 	struct tb_port *nhi, *port;
1790 	struct tb_tunnel *tunnel;
1791 	struct tb_switch *host;
1792 
1793 	/*
1794 	 * Create DMA TX tunnel from NHI to port 1.
1795 	 *
1796 	 *   [Host 1]
1797 	 *    1 | In HopID 2 -> Out HopID 15
1798 	 *      |
1799 	 *      v
1800 	 * ............ Domain border
1801 	 *      |
1802 	 *   [Host 2]
1803 	 */
1804 	host = alloc_host(test);
1805 	nhi = &host->ports[7];
1806 	port = &host->ports[1];
1807 
1808 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1809 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1810 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1811 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1812 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1813 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1814 	/* TX path */
1815 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1816 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1817 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1818 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1819 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1820 
1821 	tb_tunnel_free(tunnel);
1822 }
1823 
1824 static void tb_test_tunnel_dma_chain(struct kunit *test)
1825 {
1826 	struct tb_switch *host, *dev1, *dev2;
1827 	struct tb_port *nhi, *port;
1828 	struct tb_tunnel *tunnel;
1829 
1830 	/*
1831 	 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1832 	 *
1833 	 *   [Host 1]
1834 	 *    1 ^ In HopID 1 -> Out HopID x
1835 	 *      |
1836 	 *    1 | In HopID x -> Out HopID 1
1837 	 *  [Device #1]
1838 	 *         7 \
1839 	 *          1 \
1840 	 *         [Device #2]
1841 	 *           3 | In HopID x -> Out HopID 8
1842 	 *             |
1843 	 *             v In HopID 8 -> Out HopID x
1844 	 * ............ Domain border
1845 	 *             |
1846 	 *          [Host 2]
1847 	 */
1848 	host = alloc_host(test);
1849 	dev1 = alloc_dev_default(test, host, 0x1, true);
1850 	dev2 = alloc_dev_default(test, dev1, 0x701, true);
1851 
1852 	nhi = &host->ports[7];
1853 	port = &dev2->ports[3];
1854 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1855 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1856 	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1857 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1858 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1859 	KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1860 	/* RX path */
1861 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1862 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1863 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1864 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1865 			    &dev2->ports[1]);
1866 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1867 			    &dev1->ports[7]);
1868 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1869 			    &dev1->ports[1]);
1870 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1871 			    &host->ports[1]);
1872 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1873 	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1874 	/* TX path */
1875 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1876 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1877 	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1878 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1879 			    &dev1->ports[1]);
1880 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1881 			    &dev1->ports[7]);
1882 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1883 			    &dev2->ports[1]);
1884 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1885 	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1886 
1887 	tb_tunnel_free(tunnel);
1888 }
1889 
1890 static void tb_test_tunnel_dma_match(struct kunit *test)
1891 {
1892 	struct tb_port *nhi, *port;
1893 	struct tb_tunnel *tunnel;
1894 	struct tb_switch *host;
1895 
1896 	host = alloc_host(test);
1897 	nhi = &host->ports[7];
1898 	port = &host->ports[1];
1899 
1900 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1901 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1902 
1903 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1904 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1905 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1906 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1907 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1908 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1909 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1910 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1911 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1912 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1913 
1914 	tb_tunnel_free(tunnel);
1915 
1916 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1917 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1918 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1919 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1920 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1921 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1922 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1923 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1924 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1925 
1926 	tb_tunnel_free(tunnel);
1927 
1928 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1929 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1930 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1931 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1932 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1933 	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1934 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1935 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1936 	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1937 
1938 	tb_tunnel_free(tunnel);
1939 }
1940 
1941 static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
1942 {
1943 	struct tb_switch *host, *dev;
1944 	struct tb_port *up, *down;
1945 	struct tb_tunnel *tunnel;
1946 	struct tb_path *path;
1947 
1948 	host = alloc_host(test);
1949 	dev = alloc_dev_default(test, host, 0x1, false);
1950 
1951 	down = &host->ports[8];
1952 	up = &dev->ports[9];
1953 	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1954 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1955 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1956 
1957 	path = tunnel->paths[0];
1958 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
1959 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1960 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1961 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1962 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1963 
1964 	path = tunnel->paths[1];
1965 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
1966 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1967 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1968 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1969 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1970 
1971 	tb_tunnel_free(tunnel);
1972 }
1973 
1974 static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
1975 {
1976 	struct tb_switch *host, *dev;
1977 	struct tb_port *up, *down;
1978 	struct tb_tunnel *tunnel;
1979 	struct tb_path *path;
1980 
1981 	host = alloc_host(test);
1982 	dev = alloc_dev_default(test, host, 0x1, true);
1983 
1984 	down = &host->ports[8];
1985 	up = &dev->ports[9];
1986 	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1987 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1988 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1989 
1990 	path = tunnel->paths[0];
1991 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
1992 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1993 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1994 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1995 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1996 
1997 	path = tunnel->paths[1];
1998 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
1999 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2000 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2001 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2002 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2003 
2004 	tb_tunnel_free(tunnel);
2005 }
2006 
2007 static void tb_test_credit_alloc_pcie(struct kunit *test)
2008 {
2009 	struct tb_switch *host, *dev;
2010 	struct tb_port *up, *down;
2011 	struct tb_tunnel *tunnel;
2012 	struct tb_path *path;
2013 
2014 	host = alloc_host_usb4(test);
2015 	dev = alloc_dev_usb4(test, host, 0x1, true);
2016 
2017 	down = &host->ports[8];
2018 	up = &dev->ports[9];
2019 	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2020 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2021 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2022 
2023 	path = tunnel->paths[0];
2024 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2025 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2026 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2027 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2028 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2029 
2030 	path = tunnel->paths[1];
2031 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2032 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2033 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2034 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2035 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2036 
2037 	tb_tunnel_free(tunnel);
2038 }
2039 
2040 static void tb_test_credit_alloc_without_dp(struct kunit *test)
2041 {
2042 	struct tb_switch *host, *dev;
2043 	struct tb_port *up, *down;
2044 	struct tb_tunnel *tunnel;
2045 	struct tb_path *path;
2046 
2047 	host = alloc_host_usb4(test);
2048 	dev = alloc_dev_without_dp(test, host, 0x1, true);
2049 
2050 	/*
2051 	 * The device has no DP therefore baMinDPmain = baMinDPaux = 0
2052 	 *
2053 	 * Create PCIe path with buffers less than baMaxPCIe.
2054 	 *
2055 	 * For a device with buffers configurations:
2056 	 * baMaxUSB3 = 109
2057 	 * baMinDPaux = 0
2058 	 * baMinDPmain = 0
2059 	 * baMaxPCIe = 30
2060 	 * baMaxHI = 1
2061 	 * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
2062 	 * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
2063 	 *		= Max(6, Min(30, 9) = 9
2064 	 */
2065 	down = &host->ports[8];
2066 	up = &dev->ports[9];
2067 	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2068 	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2069 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2070 
2071 	/* PCIe downstream path */
2072 	path = tunnel->paths[0];
2073 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2074 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2075 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2076 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2077 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
2078 
2079 	/* PCIe upstream path */
2080 	path = tunnel->paths[1];
2081 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2082 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2083 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2084 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2085 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2086 
2087 	tb_tunnel_free(tunnel);
2088 }
2089 
2090 static void tb_test_credit_alloc_dp(struct kunit *test)
2091 {
2092 	struct tb_switch *host, *dev;
2093 	struct tb_port *in, *out;
2094 	struct tb_tunnel *tunnel;
2095 	struct tb_path *path;
2096 
2097 	host = alloc_host_usb4(test);
2098 	dev = alloc_dev_usb4(test, host, 0x1, true);
2099 
2100 	in = &host->ports[5];
2101 	out = &dev->ports[14];
2102 
2103 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2104 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2105 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2106 
2107 	/* Video (main) path */
2108 	path = tunnel->paths[0];
2109 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2110 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2111 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2112 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2113 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2114 
2115 	/* AUX TX */
2116 	path = tunnel->paths[1];
2117 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2118 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2119 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2120 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2121 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2122 
2123 	/* AUX RX */
2124 	path = tunnel->paths[2];
2125 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2126 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2127 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2128 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2129 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2130 
2131 	tb_tunnel_free(tunnel);
2132 }
2133 
2134 static void tb_test_credit_alloc_usb3(struct kunit *test)
2135 {
2136 	struct tb_switch *host, *dev;
2137 	struct tb_port *up, *down;
2138 	struct tb_tunnel *tunnel;
2139 	struct tb_path *path;
2140 
2141 	host = alloc_host_usb4(test);
2142 	dev = alloc_dev_usb4(test, host, 0x1, true);
2143 
2144 	down = &host->ports[12];
2145 	up = &dev->ports[16];
2146 	tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2147 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2148 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2149 
2150 	path = tunnel->paths[0];
2151 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2152 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2153 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2154 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2155 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2156 
2157 	path = tunnel->paths[1];
2158 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2159 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2160 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2161 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2162 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2163 
2164 	tb_tunnel_free(tunnel);
2165 }
2166 
2167 static void tb_test_credit_alloc_dma(struct kunit *test)
2168 {
2169 	struct tb_switch *host, *dev;
2170 	struct tb_port *nhi, *port;
2171 	struct tb_tunnel *tunnel;
2172 	struct tb_path *path;
2173 
2174 	host = alloc_host_usb4(test);
2175 	dev = alloc_dev_usb4(test, host, 0x1, true);
2176 
2177 	nhi = &host->ports[7];
2178 	port = &dev->ports[3];
2179 
2180 	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2181 	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2182 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2183 
2184 	/* DMA RX */
2185 	path = tunnel->paths[0];
2186 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2187 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2188 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2189 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2190 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2191 
2192 	/* DMA TX */
2193 	path = tunnel->paths[1];
2194 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2195 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2196 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2197 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2198 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2199 
2200 	tb_tunnel_free(tunnel);
2201 }
2202 
2203 static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2204 {
2205 	struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2206 	struct tb_switch *host, *dev;
2207 	struct tb_port *nhi, *port;
2208 	struct tb_path *path;
2209 
2210 	host = alloc_host_usb4(test);
2211 	dev = alloc_dev_usb4(test, host, 0x1, true);
2212 
2213 	nhi = &host->ports[7];
2214 	port = &dev->ports[3];
2215 
2216 	/*
2217 	 * Create three DMA tunnels through the same ports. With the
2218 	 * default buffers we should be able to create two and the last
2219 	 * one fails.
2220 	 *
2221 	 * For default host we have following buffers for DMA:
2222 	 *
2223 	 *   120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2224 	 *
2225 	 * For device we have following:
2226 	 *
2227 	 *  120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2228 	 *
2229 	 * spare = 14 + 1 = 15
2230 	 *
2231 	 * So on host the first tunnel gets 14 and the second gets the
2232 	 * remaining 1 and then we run out of buffers.
2233 	 */
2234 	tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2235 	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
2236 	KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2237 
2238 	path = tunnel1->paths[0];
2239 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2240 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2241 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2242 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2243 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2244 
2245 	path = tunnel1->paths[1];
2246 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2247 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2248 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2249 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2250 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2251 
2252 	tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2253 	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
2254 	KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2255 
2256 	path = tunnel2->paths[0];
2257 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2258 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2259 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2260 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2261 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2262 
2263 	path = tunnel2->paths[1];
2264 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2265 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2266 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2267 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2268 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2269 
2270 	tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2271 	KUNIT_ASSERT_NULL(test, tunnel3);
2272 
2273 	/*
2274 	 * Release the first DMA tunnel. That should make 14 buffers
2275 	 * available for the next tunnel.
2276 	 */
2277 	tb_tunnel_free(tunnel1);
2278 
2279 	tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2280 	KUNIT_ASSERT_NOT_NULL(test, tunnel3);
2281 
2282 	path = tunnel3->paths[0];
2283 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2284 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2285 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2286 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2287 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2288 
2289 	path = tunnel3->paths[1];
2290 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2291 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2292 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2293 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2294 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2295 
2296 	tb_tunnel_free(tunnel3);
2297 	tb_tunnel_free(tunnel2);
2298 }
2299 
2300 static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2301 			struct tb_switch *host, struct tb_switch *dev)
2302 {
2303 	struct tb_port *up, *down;
2304 	struct tb_tunnel *pcie_tunnel;
2305 	struct tb_path *path;
2306 
2307 	down = &host->ports[8];
2308 	up = &dev->ports[9];
2309 	pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2310 	KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
2311 	KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2312 
2313 	path = pcie_tunnel->paths[0];
2314 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2315 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2316 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2317 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2318 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2319 
2320 	path = pcie_tunnel->paths[1];
2321 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2322 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2323 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2324 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2325 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2326 
2327 	return pcie_tunnel;
2328 }
2329 
2330 static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2331 			struct tb_switch *host, struct tb_switch *dev)
2332 {
2333 	struct tb_port *in, *out;
2334 	struct tb_tunnel *dp_tunnel1;
2335 	struct tb_path *path;
2336 
2337 	in = &host->ports[5];
2338 	out = &dev->ports[13];
2339 	dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2340 	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
2341 	KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2342 
2343 	path = dp_tunnel1->paths[0];
2344 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2345 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2346 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2347 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2348 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2349 
2350 	path = dp_tunnel1->paths[1];
2351 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2352 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2353 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2354 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2355 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2356 
2357 	path = dp_tunnel1->paths[2];
2358 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2359 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2360 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2361 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2362 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2363 
2364 	return dp_tunnel1;
2365 }
2366 
2367 static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2368 			struct tb_switch *host, struct tb_switch *dev)
2369 {
2370 	struct tb_port *in, *out;
2371 	struct tb_tunnel *dp_tunnel2;
2372 	struct tb_path *path;
2373 
2374 	in = &host->ports[6];
2375 	out = &dev->ports[14];
2376 	dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2377 	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
2378 	KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2379 
2380 	path = dp_tunnel2->paths[0];
2381 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2382 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2383 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2384 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2385 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2386 
2387 	path = dp_tunnel2->paths[1];
2388 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2389 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2390 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2391 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2392 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2393 
2394 	path = dp_tunnel2->paths[2];
2395 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2396 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2397 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2398 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2399 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2400 
2401 	return dp_tunnel2;
2402 }
2403 
2404 static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2405 			struct tb_switch *host, struct tb_switch *dev)
2406 {
2407 	struct tb_port *up, *down;
2408 	struct tb_tunnel *usb3_tunnel;
2409 	struct tb_path *path;
2410 
2411 	down = &host->ports[12];
2412 	up = &dev->ports[16];
2413 	usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2414 	KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
2415 	KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2416 
2417 	path = usb3_tunnel->paths[0];
2418 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2419 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2420 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2421 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2422 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2423 
2424 	path = usb3_tunnel->paths[1];
2425 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2426 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2427 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2428 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2429 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2430 
2431 	return usb3_tunnel;
2432 }
2433 
2434 static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2435 			struct tb_switch *host, struct tb_switch *dev)
2436 {
2437 	struct tb_port *nhi, *port;
2438 	struct tb_tunnel *dma_tunnel1;
2439 	struct tb_path *path;
2440 
2441 	nhi = &host->ports[7];
2442 	port = &dev->ports[3];
2443 	dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2444 	KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
2445 	KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2446 
2447 	path = dma_tunnel1->paths[0];
2448 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2449 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2450 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2451 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2452 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2453 
2454 	path = dma_tunnel1->paths[1];
2455 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2456 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2457 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2458 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2459 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2460 
2461 	return dma_tunnel1;
2462 }
2463 
2464 static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2465 			struct tb_switch *host, struct tb_switch *dev)
2466 {
2467 	struct tb_port *nhi, *port;
2468 	struct tb_tunnel *dma_tunnel2;
2469 	struct tb_path *path;
2470 
2471 	nhi = &host->ports[7];
2472 	port = &dev->ports[3];
2473 	dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2474 	KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
2475 	KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2476 
2477 	path = dma_tunnel2->paths[0];
2478 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2479 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2480 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2481 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2482 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2483 
2484 	path = dma_tunnel2->paths[1];
2485 	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2486 	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2487 	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2488 	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2489 	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2490 
2491 	return dma_tunnel2;
2492 }
2493 
2494 static void tb_test_credit_alloc_all(struct kunit *test)
2495 {
2496 	struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2497 	struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2498 	struct tb_switch *host, *dev;
2499 
2500 	/*
2501 	 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2502 	 * device. Expectation is that all these can be established with
2503 	 * the default credit allocation found in Intel hardware.
2504 	 */
2505 
2506 	host = alloc_host_usb4(test);
2507 	dev = alloc_dev_usb4(test, host, 0x1, true);
2508 
2509 	pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2510 	dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2511 	dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2512 	usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2513 	dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2514 	dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2515 
2516 	tb_tunnel_free(dma_tunnel2);
2517 	tb_tunnel_free(dma_tunnel1);
2518 	tb_tunnel_free(usb3_tunnel);
2519 	tb_tunnel_free(dp_tunnel2);
2520 	tb_tunnel_free(dp_tunnel1);
2521 	tb_tunnel_free(pcie_tunnel);
2522 }
2523 
2524 static const u32 root_directory[] = {
2525 	0x55584401,	/* "UXD" v1 */
2526 	0x00000018,	/* Root directory length */
2527 	0x76656e64,	/* "vend" */
2528 	0x6f726964,	/* "orid" */
2529 	0x76000001,	/* "v" R 1 */
2530 	0x00000a27,	/* Immediate value, ! Vendor ID */
2531 	0x76656e64,	/* "vend" */
2532 	0x6f726964,	/* "orid" */
2533 	0x74000003,	/* "t" R 3 */
2534 	0x0000001a,	/* Text leaf offset, (“Apple Inc.”) */
2535 	0x64657669,	/* "devi" */
2536 	0x63656964,	/* "ceid" */
2537 	0x76000001,	/* "v" R 1 */
2538 	0x0000000a,	/* Immediate value, ! Device ID */
2539 	0x64657669,	/* "devi" */
2540 	0x63656964,	/* "ceid" */
2541 	0x74000003,	/* "t" R 3 */
2542 	0x0000001d,	/* Text leaf offset, (“Macintosh”) */
2543 	0x64657669,	/* "devi" */
2544 	0x63657276,	/* "cerv" */
2545 	0x76000001,	/* "v" R 1 */
2546 	0x80000100,	/* Immediate value, Device Revision */
2547 	0x6e657477,	/* "netw" */
2548 	0x6f726b00,	/* "ork" */
2549 	0x44000014,	/* "D" R 20 */
2550 	0x00000021,	/* Directory data offset, (Network Directory) */
2551 	0x4170706c,	/* "Appl" */
2552 	0x6520496e,	/* "e In" */
2553 	0x632e0000,	/* "c." ! */
2554 	0x4d616369,	/* "Maci" */
2555 	0x6e746f73,	/* "ntos" */
2556 	0x68000000,	/* "h" */
2557 	0x00000000,	/* padding */
2558 	0xca8961c6,	/* Directory UUID, Network Directory */
2559 	0x9541ce1c,	/* Directory UUID, Network Directory */
2560 	0x5949b8bd,	/* Directory UUID, Network Directory */
2561 	0x4f5a5f2e,	/* Directory UUID, Network Directory */
2562 	0x70727463,	/* "prtc" */
2563 	0x69640000,	/* "id" */
2564 	0x76000001,	/* "v" R 1 */
2565 	0x00000001,	/* Immediate value, Network Protocol ID */
2566 	0x70727463,	/* "prtc" */
2567 	0x76657273,	/* "vers" */
2568 	0x76000001,	/* "v" R 1 */
2569 	0x00000001,	/* Immediate value, Network Protocol Version */
2570 	0x70727463,	/* "prtc" */
2571 	0x72657673,	/* "revs" */
2572 	0x76000001,	/* "v" R 1 */
2573 	0x00000001,	/* Immediate value, Network Protocol Revision */
2574 	0x70727463,	/* "prtc" */
2575 	0x73746e73,	/* "stns" */
2576 	0x76000001,	/* "v" R 1 */
2577 	0x00000000,	/* Immediate value, Network Protocol Settings */
2578 };
2579 
2580 static const uuid_t network_dir_uuid =
2581 	UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2582 		  0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2583 
2584 static void tb_test_property_parse(struct kunit *test)
2585 {
2586 	struct tb_property_dir *dir, *network_dir;
2587 	struct tb_property *p;
2588 
2589 	dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2590 	KUNIT_ASSERT_NOT_NULL(test, dir);
2591 
2592 	p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2593 	KUNIT_ASSERT_NULL(test, p);
2594 
2595 	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2596 	KUNIT_ASSERT_NOT_NULL(test, p);
2597 	KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2598 
2599 	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2600 	KUNIT_ASSERT_NOT_NULL(test, p);
2601 	KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2602 
2603 	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2604 	KUNIT_ASSERT_NOT_NULL(test, p);
2605 	KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2606 
2607 	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2608 	KUNIT_ASSERT_NOT_NULL(test, p);
2609 	KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2610 
2611 	p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2612 	KUNIT_ASSERT_NULL(test, p);
2613 
2614 	p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2615 	KUNIT_ASSERT_NOT_NULL(test, p);
2616 
2617 	network_dir = p->value.dir;
2618 	KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2619 
2620 	p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2621 	KUNIT_ASSERT_NOT_NULL(test, p);
2622 	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2623 
2624 	p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2625 	KUNIT_ASSERT_NOT_NULL(test, p);
2626 	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2627 
2628 	p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2629 	KUNIT_ASSERT_NOT_NULL(test, p);
2630 	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2631 
2632 	p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2633 	KUNIT_ASSERT_NOT_NULL(test, p);
2634 	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2635 
2636 	p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2637 	KUNIT_EXPECT_TRUE(test, !p);
2638 	p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2639 	KUNIT_EXPECT_TRUE(test, !p);
2640 
2641 	tb_property_free_dir(dir);
2642 }
2643 
2644 static void tb_test_property_format(struct kunit *test)
2645 {
2646 	struct tb_property_dir *dir;
2647 	ssize_t block_len;
2648 	u32 *block;
2649 	int ret, i;
2650 
2651 	dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2652 	KUNIT_ASSERT_NOT_NULL(test, dir);
2653 
2654 	ret = tb_property_format_dir(dir, NULL, 0);
2655 	KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2656 
2657 	block_len = ret;
2658 
2659 	block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2660 	KUNIT_ASSERT_NOT_NULL(test, block);
2661 
2662 	ret = tb_property_format_dir(dir, block, block_len);
2663 	KUNIT_EXPECT_EQ(test, ret, 0);
2664 
2665 	for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2666 		KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2667 
2668 	tb_property_free_dir(dir);
2669 }
2670 
2671 static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2672 			 struct tb_property_dir *d2)
2673 {
2674 	struct tb_property *p1, *p2, *tmp;
2675 	int n1, n2, i;
2676 
2677 	if (d1->uuid) {
2678 		KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
2679 		KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2680 	} else {
2681 		KUNIT_ASSERT_NULL(test, d2->uuid);
2682 	}
2683 
2684 	n1 = 0;
2685 	tb_property_for_each(d1, tmp)
2686 		n1++;
2687 	KUNIT_ASSERT_NE(test, n1, 0);
2688 
2689 	n2 = 0;
2690 	tb_property_for_each(d2, tmp)
2691 		n2++;
2692 	KUNIT_ASSERT_NE(test, n2, 0);
2693 
2694 	KUNIT_ASSERT_EQ(test, n1, n2);
2695 
2696 	p1 = NULL;
2697 	p2 = NULL;
2698 	for (i = 0; i < n1; i++) {
2699 		p1 = tb_property_get_next(d1, p1);
2700 		KUNIT_ASSERT_NOT_NULL(test, p1);
2701 		p2 = tb_property_get_next(d2, p2);
2702 		KUNIT_ASSERT_NOT_NULL(test, p2);
2703 
2704 		KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2705 		KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2706 		KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2707 
2708 		switch (p1->type) {
2709 		case TB_PROPERTY_TYPE_DIRECTORY:
2710 			KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
2711 			KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
2712 			compare_dirs(test, p1->value.dir, p2->value.dir);
2713 			break;
2714 
2715 		case TB_PROPERTY_TYPE_DATA:
2716 			KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
2717 			KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
2718 			KUNIT_ASSERT_TRUE(test,
2719 				!memcmp(p1->value.data, p2->value.data,
2720 					p1->length * 4)
2721 			);
2722 			break;
2723 
2724 		case TB_PROPERTY_TYPE_TEXT:
2725 			KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
2726 			KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
2727 			KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2728 			break;
2729 
2730 		case TB_PROPERTY_TYPE_VALUE:
2731 			KUNIT_ASSERT_EQ(test, p1->value.immediate,
2732 					p2->value.immediate);
2733 			break;
2734 		default:
2735 			KUNIT_FAIL(test, "unexpected property type");
2736 			break;
2737 		}
2738 	}
2739 }
2740 
2741 static void tb_test_property_copy(struct kunit *test)
2742 {
2743 	struct tb_property_dir *src, *dst;
2744 	u32 *block;
2745 	int ret, i;
2746 
2747 	src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2748 	KUNIT_ASSERT_NOT_NULL(test, src);
2749 
2750 	dst = tb_property_copy_dir(src);
2751 	KUNIT_ASSERT_NOT_NULL(test, dst);
2752 
2753 	/* Compare the structures */
2754 	compare_dirs(test, src, dst);
2755 
2756 	/* Compare the resulting property block */
2757 	ret = tb_property_format_dir(dst, NULL, 0);
2758 	KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2759 
2760 	block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2761 	KUNIT_ASSERT_NOT_NULL(test, block);
2762 
2763 	ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2764 	KUNIT_EXPECT_TRUE(test, !ret);
2765 
2766 	for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2767 		KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2768 
2769 	tb_property_free_dir(dst);
2770 	tb_property_free_dir(src);
2771 }
2772 
2773 static struct kunit_case tb_test_cases[] = {
2774 	KUNIT_CASE(tb_test_path_basic),
2775 	KUNIT_CASE(tb_test_path_not_connected_walk),
2776 	KUNIT_CASE(tb_test_path_single_hop_walk),
2777 	KUNIT_CASE(tb_test_path_daisy_chain_walk),
2778 	KUNIT_CASE(tb_test_path_simple_tree_walk),
2779 	KUNIT_CASE(tb_test_path_complex_tree_walk),
2780 	KUNIT_CASE(tb_test_path_max_length_walk),
2781 	KUNIT_CASE(tb_test_path_not_connected),
2782 	KUNIT_CASE(tb_test_path_not_bonded_lane0),
2783 	KUNIT_CASE(tb_test_path_not_bonded_lane1),
2784 	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2785 	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2786 	KUNIT_CASE(tb_test_path_mixed_chain),
2787 	KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2788 	KUNIT_CASE(tb_test_tunnel_pcie),
2789 	KUNIT_CASE(tb_test_tunnel_dp),
2790 	KUNIT_CASE(tb_test_tunnel_dp_chain),
2791 	KUNIT_CASE(tb_test_tunnel_dp_tree),
2792 	KUNIT_CASE(tb_test_tunnel_dp_max_length),
2793 	KUNIT_CASE(tb_test_tunnel_port_on_path),
2794 	KUNIT_CASE(tb_test_tunnel_usb3),
2795 	KUNIT_CASE(tb_test_tunnel_dma),
2796 	KUNIT_CASE(tb_test_tunnel_dma_rx),
2797 	KUNIT_CASE(tb_test_tunnel_dma_tx),
2798 	KUNIT_CASE(tb_test_tunnel_dma_chain),
2799 	KUNIT_CASE(tb_test_tunnel_dma_match),
2800 	KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2801 	KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2802 	KUNIT_CASE(tb_test_credit_alloc_pcie),
2803 	KUNIT_CASE(tb_test_credit_alloc_without_dp),
2804 	KUNIT_CASE(tb_test_credit_alloc_dp),
2805 	KUNIT_CASE(tb_test_credit_alloc_usb3),
2806 	KUNIT_CASE(tb_test_credit_alloc_dma),
2807 	KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2808 	KUNIT_CASE(tb_test_credit_alloc_all),
2809 	KUNIT_CASE(tb_test_property_parse),
2810 	KUNIT_CASE(tb_test_property_format),
2811 	KUNIT_CASE(tb_test_property_copy),
2812 	{ }
2813 };
2814 
2815 static struct kunit_suite tb_test_suite = {
2816 	.name = "thunderbolt",
2817 	.test_cases = tb_test_cases,
2818 };
2819 
2820 static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
2821 
2822 int tb_test_init(void)
2823 {
2824 	return __kunit_test_suites_init(tb_test_suites);
2825 }
2826 
2827 void tb_test_exit(void)
2828 {
2829 	return __kunit_test_suites_exit(tb_test_suites);
2830 }
2831