xref: /openbmc/linux/drivers/thunderbolt/usb4.c (revision a701d28e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * USB4 specific functionality
4  *
5  * Copyright (C) 2019, Intel Corporation
6  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7  *	    Rajmohan Mani <rajmohan.mani@intel.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
12 
13 #include "sb_regs.h"
14 #include "tb.h"
15 
16 #define USB4_DATA_DWORDS		16
17 #define USB4_DATA_RETRIES		3
18 
19 enum usb4_switch_op {
20 	USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
21 	USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
22 	USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
23 	USB4_SWITCH_OP_NVM_WRITE = 0x20,
24 	USB4_SWITCH_OP_NVM_AUTH = 0x21,
25 	USB4_SWITCH_OP_NVM_READ = 0x22,
26 	USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
27 	USB4_SWITCH_OP_DROM_READ = 0x24,
28 	USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
29 };
30 
31 enum usb4_sb_target {
32 	USB4_SB_TARGET_ROUTER,
33 	USB4_SB_TARGET_PARTNER,
34 	USB4_SB_TARGET_RETIMER,
35 };
36 
37 #define USB4_NVM_READ_OFFSET_MASK	GENMASK(23, 2)
38 #define USB4_NVM_READ_OFFSET_SHIFT	2
39 #define USB4_NVM_READ_LENGTH_MASK	GENMASK(27, 24)
40 #define USB4_NVM_READ_LENGTH_SHIFT	24
41 
42 #define USB4_NVM_SET_OFFSET_MASK	USB4_NVM_READ_OFFSET_MASK
43 #define USB4_NVM_SET_OFFSET_SHIFT	USB4_NVM_READ_OFFSET_SHIFT
44 
45 #define USB4_DROM_ADDRESS_MASK		GENMASK(14, 2)
46 #define USB4_DROM_ADDRESS_SHIFT		2
47 #define USB4_DROM_SIZE_MASK		GENMASK(19, 15)
48 #define USB4_DROM_SIZE_SHIFT		15
49 
50 #define USB4_NVM_SECTOR_SIZE_MASK	GENMASK(23, 0)
51 
52 typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
53 typedef int (*write_block_fn)(void *, const void *, size_t);
54 
55 static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
56 				    u32 value, int timeout_msec)
57 {
58 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
59 
60 	do {
61 		u32 val;
62 		int ret;
63 
64 		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
65 		if (ret)
66 			return ret;
67 
68 		if ((val & bit) == value)
69 			return 0;
70 
71 		usleep_range(50, 100);
72 	} while (ktime_before(ktime_get(), timeout));
73 
74 	return -ETIMEDOUT;
75 }
76 
77 static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
78 				    size_t dwords)
79 {
80 	if (dwords > USB4_DATA_DWORDS)
81 		return -EINVAL;
82 
83 	return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
84 }
85 
86 static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
87 				     size_t dwords)
88 {
89 	if (dwords > USB4_DATA_DWORDS)
90 		return -EINVAL;
91 
92 	return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
93 }
94 
95 static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
96 {
97 	return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
98 }
99 
100 static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
101 {
102 	return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
103 }
104 
105 static int usb4_do_read_data(u16 address, void *buf, size_t size,
106 			     read_block_fn read_block, void *read_block_data)
107 {
108 	unsigned int retries = USB4_DATA_RETRIES;
109 	unsigned int offset;
110 
111 	offset = address & 3;
112 	address = address & ~3;
113 
114 	do {
115 		size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
116 		unsigned int dwaddress, dwords;
117 		u8 data[USB4_DATA_DWORDS * 4];
118 		int ret;
119 
120 		dwaddress = address / 4;
121 		dwords = ALIGN(nbytes, 4) / 4;
122 
123 		ret = read_block(read_block_data, dwaddress, data, dwords);
124 		if (ret) {
125 			if (ret != -ENODEV && retries--)
126 				continue;
127 			return ret;
128 		}
129 
130 		memcpy(buf, data + offset, nbytes);
131 
132 		size -= nbytes;
133 		address += nbytes;
134 		buf += nbytes;
135 	} while (size > 0);
136 
137 	return 0;
138 }
139 
140 static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
141 	write_block_fn write_next_block, void *write_block_data)
142 {
143 	unsigned int retries = USB4_DATA_RETRIES;
144 	unsigned int offset;
145 
146 	offset = address & 3;
147 	address = address & ~3;
148 
149 	do {
150 		u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
151 		u8 data[USB4_DATA_DWORDS * 4];
152 		int ret;
153 
154 		memcpy(data + offset, buf, nbytes);
155 
156 		ret = write_next_block(write_block_data, data, nbytes / 4);
157 		if (ret) {
158 			if (ret == -ETIMEDOUT) {
159 				if (retries--)
160 					continue;
161 				ret = -EIO;
162 			}
163 			return ret;
164 		}
165 
166 		size -= nbytes;
167 		address += nbytes;
168 		buf += nbytes;
169 	} while (size > 0);
170 
171 	return 0;
172 }
173 
174 static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
175 {
176 	u32 val;
177 	int ret;
178 
179 	val = opcode | ROUTER_CS_26_OV;
180 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
181 	if (ret)
182 		return ret;
183 
184 	ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
185 	if (ret)
186 		return ret;
187 
188 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
189 	if (ret)
190 		return ret;
191 
192 	if (val & ROUTER_CS_26_ONS)
193 		return -EOPNOTSUPP;
194 
195 	*status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
196 	return 0;
197 }
198 
199 static void usb4_switch_check_wakes(struct tb_switch *sw)
200 {
201 	struct tb_port *port;
202 	bool wakeup = false;
203 	u32 val;
204 
205 	if (!device_may_wakeup(&sw->dev))
206 		return;
207 
208 	if (tb_route(sw)) {
209 		if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
210 			return;
211 
212 		tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
213 			  (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
214 			  (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
215 
216 		wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
217 	}
218 
219 	/* Check for any connected downstream ports for USB4 wake */
220 	tb_switch_for_each_port(sw, port) {
221 		if (!tb_port_has_remote(port))
222 			continue;
223 
224 		if (tb_port_read(port, &val, TB_CFG_PORT,
225 				 port->cap_usb4 + PORT_CS_18, 1))
226 			break;
227 
228 		tb_port_dbg(port, "USB4 wake: %s\n",
229 			    (val & PORT_CS_18_WOU4S) ? "yes" : "no");
230 
231 		if (val & PORT_CS_18_WOU4S)
232 			wakeup = true;
233 	}
234 
235 	if (wakeup)
236 		pm_wakeup_event(&sw->dev, 0);
237 }
238 
239 static bool link_is_usb4(struct tb_port *port)
240 {
241 	u32 val;
242 
243 	if (!port->cap_usb4)
244 		return false;
245 
246 	if (tb_port_read(port, &val, TB_CFG_PORT,
247 			 port->cap_usb4 + PORT_CS_18, 1))
248 		return false;
249 
250 	return !(val & PORT_CS_18_TCM);
251 }
252 
253 /**
254  * usb4_switch_setup() - Additional setup for USB4 device
255  * @sw: USB4 router to setup
256  *
257  * USB4 routers need additional settings in order to enable all the
258  * tunneling. This function enables USB and PCIe tunneling if it can be
259  * enabled (e.g the parent switch also supports them). If USB tunneling
260  * is not available for some reason (like that there is Thunderbolt 3
261  * switch upstream) then the internal xHCI controller is enabled
262  * instead.
263  */
264 int usb4_switch_setup(struct tb_switch *sw)
265 {
266 	struct tb_port *downstream_port;
267 	struct tb_switch *parent;
268 	bool tbt3, xhci;
269 	u32 val = 0;
270 	int ret;
271 
272 	usb4_switch_check_wakes(sw);
273 
274 	if (!tb_route(sw))
275 		return 0;
276 
277 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
278 	if (ret)
279 		return ret;
280 
281 	parent = tb_switch_parent(sw);
282 	downstream_port = tb_port_at(tb_route(sw), parent);
283 	sw->link_usb4 = link_is_usb4(downstream_port);
284 	tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
285 
286 	xhci = val & ROUTER_CS_6_HCI;
287 	tbt3 = !(val & ROUTER_CS_6_TNS);
288 
289 	tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
290 		  tbt3 ? "yes" : "no", xhci ? "yes" : "no");
291 
292 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
293 	if (ret)
294 		return ret;
295 
296 	if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
297 		val |= ROUTER_CS_5_UTO;
298 		xhci = false;
299 	}
300 
301 	/* Only enable PCIe tunneling if the parent router supports it */
302 	if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
303 		val |= ROUTER_CS_5_PTO;
304 		/*
305 		 * xHCI can be enabled if PCIe tunneling is supported
306 		 * and the parent does not have any USB3 dowstream
307 		 * adapters (so we cannot do USB 3.x tunneling).
308 		 */
309 		if (xhci)
310 			val |= ROUTER_CS_5_HCO;
311 	}
312 
313 	/* TBT3 supported by the CM */
314 	val |= ROUTER_CS_5_C3S;
315 	/* Tunneling configuration is ready now */
316 	val |= ROUTER_CS_5_CV;
317 
318 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
319 	if (ret)
320 		return ret;
321 
322 	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
323 					ROUTER_CS_6_CR, 50);
324 }
325 
326 /**
327  * usb4_switch_read_uid() - Read UID from USB4 router
328  * @sw: USB4 router
329  * @uid: UID is stored here
330  *
331  * Reads 64-bit UID from USB4 router config space.
332  */
333 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
334 {
335 	return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
336 }
337 
338 static int usb4_switch_drom_read_block(void *data,
339 				       unsigned int dwaddress, void *buf,
340 				       size_t dwords)
341 {
342 	struct tb_switch *sw = data;
343 	u8 status = 0;
344 	u32 metadata;
345 	int ret;
346 
347 	metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
348 	metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
349 		USB4_DROM_ADDRESS_MASK;
350 
351 	ret = usb4_switch_op_write_metadata(sw, metadata);
352 	if (ret)
353 		return ret;
354 
355 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
356 	if (ret)
357 		return ret;
358 
359 	if (status)
360 		return -EIO;
361 
362 	return usb4_switch_op_read_data(sw, buf, dwords);
363 }
364 
365 /**
366  * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
367  * @sw: USB4 router
368  * @address: Byte address inside DROM to start reading
369  * @buf: Buffer where the DROM content is stored
370  * @size: Number of bytes to read from DROM
371  *
372  * Uses USB4 router operations to read router DROM. For devices this
373  * should always work but for hosts it may return %-EOPNOTSUPP in which
374  * case the host router does not have DROM.
375  */
376 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
377 			  size_t size)
378 {
379 	return usb4_do_read_data(address, buf, size,
380 				 usb4_switch_drom_read_block, sw);
381 }
382 
383 /**
384  * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
385  * @sw: USB4 router
386  *
387  * Checks whether conditions are met so that lane bonding can be
388  * established with the upstream router. Call only for device routers.
389  */
390 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
391 {
392 	struct tb_port *up;
393 	int ret;
394 	u32 val;
395 
396 	up = tb_upstream_port(sw);
397 	ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
398 	if (ret)
399 		return false;
400 
401 	return !!(val & PORT_CS_18_BE);
402 }
403 
404 /**
405  * usb4_switch_set_wake() - Enabled/disable wake
406  * @sw: USB4 router
407  * @flags: Wakeup flags (%0 to disable)
408  *
409  * Enables/disables router to wake up from sleep.
410  */
411 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
412 {
413 	struct tb_port *port;
414 	u64 route = tb_route(sw);
415 	u32 val;
416 	int ret;
417 
418 	/*
419 	 * Enable wakes coming from all USB4 downstream ports (from
420 	 * child routers). For device routers do this also for the
421 	 * upstream USB4 port.
422 	 */
423 	tb_switch_for_each_port(sw, port) {
424 		if (!route && tb_is_upstream_port(port))
425 			continue;
426 
427 		ret = tb_port_read(port, &val, TB_CFG_PORT,
428 				   port->cap_usb4 + PORT_CS_19, 1);
429 		if (ret)
430 			return ret;
431 
432 		val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
433 
434 		if (flags & TB_WAKE_ON_CONNECT)
435 			val |= PORT_CS_19_WOC;
436 		if (flags & TB_WAKE_ON_DISCONNECT)
437 			val |= PORT_CS_19_WOD;
438 		if (flags & TB_WAKE_ON_USB4)
439 			val |= PORT_CS_19_WOU4;
440 
441 		ret = tb_port_write(port, &val, TB_CFG_PORT,
442 				    port->cap_usb4 + PORT_CS_19, 1);
443 		if (ret)
444 			return ret;
445 	}
446 
447 	/*
448 	 * Enable wakes from PCIe and USB 3.x on this router. Only
449 	 * needed for device routers.
450 	 */
451 	if (route) {
452 		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
453 		if (ret)
454 			return ret;
455 
456 		val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
457 		if (flags & TB_WAKE_ON_USB3)
458 			val |= ROUTER_CS_5_WOU;
459 		if (flags & TB_WAKE_ON_PCIE)
460 			val |= ROUTER_CS_5_WOP;
461 
462 		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
463 		if (ret)
464 			return ret;
465 	}
466 
467 	return 0;
468 }
469 
470 /**
471  * usb4_switch_set_sleep() - Prepare the router to enter sleep
472  * @sw: USB4 router
473  *
474  * Sets sleep bit for the router. Returns when the router sleep ready
475  * bit has been asserted.
476  */
477 int usb4_switch_set_sleep(struct tb_switch *sw)
478 {
479 	int ret;
480 	u32 val;
481 
482 	/* Set sleep bit and wait for sleep ready to be asserted */
483 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
484 	if (ret)
485 		return ret;
486 
487 	val |= ROUTER_CS_5_SLP;
488 
489 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
490 	if (ret)
491 		return ret;
492 
493 	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
494 					ROUTER_CS_6_SLPR, 500);
495 }
496 
497 /**
498  * usb4_switch_nvm_sector_size() - Return router NVM sector size
499  * @sw: USB4 router
500  *
501  * If the router supports NVM operations this function returns the NVM
502  * sector size in bytes. If NVM operations are not supported returns
503  * %-EOPNOTSUPP.
504  */
505 int usb4_switch_nvm_sector_size(struct tb_switch *sw)
506 {
507 	u32 metadata;
508 	u8 status;
509 	int ret;
510 
511 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
512 	if (ret)
513 		return ret;
514 
515 	if (status)
516 		return status == 0x2 ? -EOPNOTSUPP : -EIO;
517 
518 	ret = usb4_switch_op_read_metadata(sw, &metadata);
519 	if (ret)
520 		return ret;
521 
522 	return metadata & USB4_NVM_SECTOR_SIZE_MASK;
523 }
524 
525 static int usb4_switch_nvm_read_block(void *data,
526 	unsigned int dwaddress, void *buf, size_t dwords)
527 {
528 	struct tb_switch *sw = data;
529 	u8 status = 0;
530 	u32 metadata;
531 	int ret;
532 
533 	metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
534 		   USB4_NVM_READ_LENGTH_MASK;
535 	metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
536 		   USB4_NVM_READ_OFFSET_MASK;
537 
538 	ret = usb4_switch_op_write_metadata(sw, metadata);
539 	if (ret)
540 		return ret;
541 
542 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
543 	if (ret)
544 		return ret;
545 
546 	if (status)
547 		return -EIO;
548 
549 	return usb4_switch_op_read_data(sw, buf, dwords);
550 }
551 
552 /**
553  * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
554  * @sw: USB4 router
555  * @address: Starting address in bytes
556  * @buf: Read data is placed here
557  * @size: How many bytes to read
558  *
559  * Reads NVM contents of the router. If NVM is not supported returns
560  * %-EOPNOTSUPP.
561  */
562 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
563 			 size_t size)
564 {
565 	return usb4_do_read_data(address, buf, size,
566 				 usb4_switch_nvm_read_block, sw);
567 }
568 
569 static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
570 				      unsigned int address)
571 {
572 	u32 metadata, dwaddress;
573 	u8 status = 0;
574 	int ret;
575 
576 	dwaddress = address / 4;
577 	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
578 		   USB4_NVM_SET_OFFSET_MASK;
579 
580 	ret = usb4_switch_op_write_metadata(sw, metadata);
581 	if (ret)
582 		return ret;
583 
584 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
585 	if (ret)
586 		return ret;
587 
588 	return status ? -EIO : 0;
589 }
590 
591 static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
592 					    size_t dwords)
593 {
594 	struct tb_switch *sw = data;
595 	u8 status;
596 	int ret;
597 
598 	ret = usb4_switch_op_write_data(sw, buf, dwords);
599 	if (ret)
600 		return ret;
601 
602 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
603 	if (ret)
604 		return ret;
605 
606 	return status ? -EIO : 0;
607 }
608 
609 /**
610  * usb4_switch_nvm_write() - Write to the router NVM
611  * @sw: USB4 router
612  * @address: Start address where to write in bytes
613  * @buf: Pointer to the data to write
614  * @size: Size of @buf in bytes
615  *
616  * Writes @buf to the router NVM using USB4 router operations. If NVM
617  * write is not supported returns %-EOPNOTSUPP.
618  */
619 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
620 			  const void *buf, size_t size)
621 {
622 	int ret;
623 
624 	ret = usb4_switch_nvm_set_offset(sw, address);
625 	if (ret)
626 		return ret;
627 
628 	return usb4_do_write_data(address, buf, size,
629 				  usb4_switch_nvm_write_next_block, sw);
630 }
631 
632 /**
633  * usb4_switch_nvm_authenticate() - Authenticate new NVM
634  * @sw: USB4 router
635  *
636  * After the new NVM has been written via usb4_switch_nvm_write(), this
637  * function triggers NVM authentication process. If the authentication
638  * is successful the router is power cycled and the new NVM starts
639  * running. In case of failure returns negative errno.
640  */
641 int usb4_switch_nvm_authenticate(struct tb_switch *sw)
642 {
643 	u8 status = 0;
644 	int ret;
645 
646 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
647 	if (ret)
648 		return ret;
649 
650 	switch (status) {
651 	case 0x0:
652 		tb_sw_dbg(sw, "NVM authentication successful\n");
653 		return 0;
654 	case 0x1:
655 		return -EINVAL;
656 	case 0x2:
657 		return -EAGAIN;
658 	case 0x3:
659 		return -EOPNOTSUPP;
660 	default:
661 		return -EIO;
662 	}
663 }
664 
665 /**
666  * usb4_switch_query_dp_resource() - Query availability of DP IN resource
667  * @sw: USB4 router
668  * @in: DP IN adapter
669  *
670  * For DP tunneling this function can be used to query availability of
671  * DP IN resource. Returns true if the resource is available for DP
672  * tunneling, false otherwise.
673  */
674 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
675 {
676 	u8 status;
677 	int ret;
678 
679 	ret = usb4_switch_op_write_metadata(sw, in->port);
680 	if (ret)
681 		return false;
682 
683 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
684 	/*
685 	 * If DP resource allocation is not supported assume it is
686 	 * always available.
687 	 */
688 	if (ret == -EOPNOTSUPP)
689 		return true;
690 	else if (ret)
691 		return false;
692 
693 	return !status;
694 }
695 
696 /**
697  * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
698  * @sw: USB4 router
699  * @in: DP IN adapter
700  *
701  * Allocates DP IN resource for DP tunneling using USB4 router
702  * operations. If the resource was allocated returns %0. Otherwise
703  * returns negative errno, in particular %-EBUSY if the resource is
704  * already allocated.
705  */
706 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
707 {
708 	u8 status;
709 	int ret;
710 
711 	ret = usb4_switch_op_write_metadata(sw, in->port);
712 	if (ret)
713 		return ret;
714 
715 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
716 	if (ret == -EOPNOTSUPP)
717 		return 0;
718 	else if (ret)
719 		return ret;
720 
721 	return status ? -EBUSY : 0;
722 }
723 
724 /**
725  * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
726  * @sw: USB4 router
727  * @in: DP IN adapter
728  *
729  * Releases the previously allocated DP IN resource.
730  */
731 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
732 {
733 	u8 status;
734 	int ret;
735 
736 	ret = usb4_switch_op_write_metadata(sw, in->port);
737 	if (ret)
738 		return ret;
739 
740 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
741 	if (ret == -EOPNOTSUPP)
742 		return 0;
743 	else if (ret)
744 		return ret;
745 
746 	return status ? -EIO : 0;
747 }
748 
749 static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
750 {
751 	struct tb_port *p;
752 	int usb4_idx = 0;
753 
754 	/* Assume port is primary */
755 	tb_switch_for_each_port(sw, p) {
756 		if (!tb_port_is_null(p))
757 			continue;
758 		if (tb_is_upstream_port(p))
759 			continue;
760 		if (!p->link_nr) {
761 			if (p == port)
762 				break;
763 			usb4_idx++;
764 		}
765 	}
766 
767 	return usb4_idx;
768 }
769 
770 /**
771  * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
772  * @sw: USB4 router
773  * @port: USB4 port
774  *
775  * USB4 routers have direct mapping between USB4 ports and PCIe
776  * downstream adapters where the PCIe topology is extended. This
777  * function returns the corresponding downstream PCIe adapter or %NULL
778  * if no such mapping was possible.
779  */
780 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
781 					  const struct tb_port *port)
782 {
783 	int usb4_idx = usb4_port_idx(sw, port);
784 	struct tb_port *p;
785 	int pcie_idx = 0;
786 
787 	/* Find PCIe down port matching usb4_port */
788 	tb_switch_for_each_port(sw, p) {
789 		if (!tb_port_is_pcie_down(p))
790 			continue;
791 
792 		if (pcie_idx == usb4_idx)
793 			return p;
794 
795 		pcie_idx++;
796 	}
797 
798 	return NULL;
799 }
800 
801 /**
802  * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
803  * @sw: USB4 router
804  * @port: USB4 port
805  *
806  * USB4 routers have direct mapping between USB4 ports and USB 3.x
807  * downstream adapters where the USB 3.x topology is extended. This
808  * function returns the corresponding downstream USB 3.x adapter or
809  * %NULL if no such mapping was possible.
810  */
811 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
812 					  const struct tb_port *port)
813 {
814 	int usb4_idx = usb4_port_idx(sw, port);
815 	struct tb_port *p;
816 	int usb_idx = 0;
817 
818 	/* Find USB3 down port matching usb4_port */
819 	tb_switch_for_each_port(sw, p) {
820 		if (!tb_port_is_usb3_down(p))
821 			continue;
822 
823 		if (usb_idx == usb4_idx)
824 			return p;
825 
826 		usb_idx++;
827 	}
828 
829 	return NULL;
830 }
831 
832 /**
833  * usb4_port_unlock() - Unlock USB4 downstream port
834  * @port: USB4 port to unlock
835  *
836  * Unlocks USB4 downstream port so that the connection manager can
837  * access the router below this port.
838  */
839 int usb4_port_unlock(struct tb_port *port)
840 {
841 	int ret;
842 	u32 val;
843 
844 	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
845 	if (ret)
846 		return ret;
847 
848 	val &= ~ADP_CS_4_LCK;
849 	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
850 }
851 
852 static int usb4_port_set_configured(struct tb_port *port, bool configured)
853 {
854 	int ret;
855 	u32 val;
856 
857 	if (!port->cap_usb4)
858 		return -EINVAL;
859 
860 	ret = tb_port_read(port, &val, TB_CFG_PORT,
861 			   port->cap_usb4 + PORT_CS_19, 1);
862 	if (ret)
863 		return ret;
864 
865 	if (configured)
866 		val |= PORT_CS_19_PC;
867 	else
868 		val &= ~PORT_CS_19_PC;
869 
870 	return tb_port_write(port, &val, TB_CFG_PORT,
871 			     port->cap_usb4 + PORT_CS_19, 1);
872 }
873 
874 /**
875  * usb4_port_configure() - Set USB4 port configured
876  * @port: USB4 router
877  *
878  * Sets the USB4 link to be configured for power management purposes.
879  */
880 int usb4_port_configure(struct tb_port *port)
881 {
882 	return usb4_port_set_configured(port, true);
883 }
884 
885 /**
886  * usb4_port_unconfigure() - Set USB4 port unconfigured
887  * @port: USB4 router
888  *
889  * Sets the USB4 link to be unconfigured for power management purposes.
890  */
891 void usb4_port_unconfigure(struct tb_port *port)
892 {
893 	usb4_port_set_configured(port, false);
894 }
895 
896 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
897 {
898 	int ret;
899 	u32 val;
900 
901 	if (!port->cap_usb4)
902 		return -EINVAL;
903 
904 	ret = tb_port_read(port, &val, TB_CFG_PORT,
905 			   port->cap_usb4 + PORT_CS_19, 1);
906 	if (ret)
907 		return ret;
908 
909 	if (configured)
910 		val |= PORT_CS_19_PID;
911 	else
912 		val &= ~PORT_CS_19_PID;
913 
914 	return tb_port_write(port, &val, TB_CFG_PORT,
915 			     port->cap_usb4 + PORT_CS_19, 1);
916 }
917 
918 /**
919  * usb4_port_configure_xdomain() - Configure port for XDomain
920  * @port: USB4 port connected to another host
921  *
922  * Marks the USB4 port as being connected to another host. Returns %0 in
923  * success and negative errno in failure.
924  */
925 int usb4_port_configure_xdomain(struct tb_port *port)
926 {
927 	return usb4_set_xdomain_configured(port, true);
928 }
929 
930 /**
931  * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
932  * @port: USB4 port that was connected to another host
933  *
934  * Clears USB4 port from being marked as XDomain.
935  */
936 void usb4_port_unconfigure_xdomain(struct tb_port *port)
937 {
938 	usb4_set_xdomain_configured(port, false);
939 }
940 
941 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
942 				  u32 value, int timeout_msec)
943 {
944 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
945 
946 	do {
947 		u32 val;
948 		int ret;
949 
950 		ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
951 		if (ret)
952 			return ret;
953 
954 		if ((val & bit) == value)
955 			return 0;
956 
957 		usleep_range(50, 100);
958 	} while (ktime_before(ktime_get(), timeout));
959 
960 	return -ETIMEDOUT;
961 }
962 
963 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
964 {
965 	if (dwords > USB4_DATA_DWORDS)
966 		return -EINVAL;
967 
968 	return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
969 			    dwords);
970 }
971 
972 static int usb4_port_write_data(struct tb_port *port, const void *data,
973 				size_t dwords)
974 {
975 	if (dwords > USB4_DATA_DWORDS)
976 		return -EINVAL;
977 
978 	return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
979 			     dwords);
980 }
981 
982 static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
983 			     u8 index, u8 reg, void *buf, u8 size)
984 {
985 	size_t dwords = DIV_ROUND_UP(size, 4);
986 	int ret;
987 	u32 val;
988 
989 	if (!port->cap_usb4)
990 		return -EINVAL;
991 
992 	val = reg;
993 	val |= size << PORT_CS_1_LENGTH_SHIFT;
994 	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
995 	if (target == USB4_SB_TARGET_RETIMER)
996 		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
997 	val |= PORT_CS_1_PND;
998 
999 	ret = tb_port_write(port, &val, TB_CFG_PORT,
1000 			    port->cap_usb4 + PORT_CS_1, 1);
1001 	if (ret)
1002 		return ret;
1003 
1004 	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1005 				     PORT_CS_1_PND, 0, 500);
1006 	if (ret)
1007 		return ret;
1008 
1009 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1010 			    port->cap_usb4 + PORT_CS_1, 1);
1011 	if (ret)
1012 		return ret;
1013 
1014 	if (val & PORT_CS_1_NR)
1015 		return -ENODEV;
1016 	if (val & PORT_CS_1_RC)
1017 		return -EIO;
1018 
1019 	return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1020 }
1021 
1022 static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1023 			      u8 index, u8 reg, const void *buf, u8 size)
1024 {
1025 	size_t dwords = DIV_ROUND_UP(size, 4);
1026 	int ret;
1027 	u32 val;
1028 
1029 	if (!port->cap_usb4)
1030 		return -EINVAL;
1031 
1032 	if (buf) {
1033 		ret = usb4_port_write_data(port, buf, dwords);
1034 		if (ret)
1035 			return ret;
1036 	}
1037 
1038 	val = reg;
1039 	val |= size << PORT_CS_1_LENGTH_SHIFT;
1040 	val |= PORT_CS_1_WNR_WRITE;
1041 	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1042 	if (target == USB4_SB_TARGET_RETIMER)
1043 		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1044 	val |= PORT_CS_1_PND;
1045 
1046 	ret = tb_port_write(port, &val, TB_CFG_PORT,
1047 			    port->cap_usb4 + PORT_CS_1, 1);
1048 	if (ret)
1049 		return ret;
1050 
1051 	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1052 				     PORT_CS_1_PND, 0, 500);
1053 	if (ret)
1054 		return ret;
1055 
1056 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1057 			    port->cap_usb4 + PORT_CS_1, 1);
1058 	if (ret)
1059 		return ret;
1060 
1061 	if (val & PORT_CS_1_NR)
1062 		return -ENODEV;
1063 	if (val & PORT_CS_1_RC)
1064 		return -EIO;
1065 
1066 	return 0;
1067 }
1068 
1069 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1070 			   u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1071 {
1072 	ktime_t timeout;
1073 	u32 val;
1074 	int ret;
1075 
1076 	val = opcode;
1077 	ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1078 				 sizeof(val));
1079 	if (ret)
1080 		return ret;
1081 
1082 	timeout = ktime_add_ms(ktime_get(), timeout_msec);
1083 
1084 	do {
1085 		/* Check results */
1086 		ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1087 					&val, sizeof(val));
1088 		if (ret)
1089 			return ret;
1090 
1091 		switch (val) {
1092 		case 0:
1093 			return 0;
1094 
1095 		case USB4_SB_OPCODE_ERR:
1096 			return -EAGAIN;
1097 
1098 		case USB4_SB_OPCODE_ONS:
1099 			return -EOPNOTSUPP;
1100 
1101 		default:
1102 			if (val != opcode)
1103 				return -EIO;
1104 			break;
1105 		}
1106 	} while (ktime_before(ktime_get(), timeout));
1107 
1108 	return -ETIMEDOUT;
1109 }
1110 
1111 /**
1112  * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1113  * @port: USB4 port
1114  *
1115  * This forces the USB4 port to send broadcast RT transaction which
1116  * makes the retimers on the link to assign index to themselves. Returns
1117  * %0 in case of success and negative errno if there was an error.
1118  */
1119 int usb4_port_enumerate_retimers(struct tb_port *port)
1120 {
1121 	u32 val;
1122 
1123 	val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1124 	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1125 				  USB4_SB_OPCODE, &val, sizeof(val));
1126 }
1127 
1128 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1129 				       enum usb4_sb_opcode opcode,
1130 				       int timeout_msec)
1131 {
1132 	return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1133 			       timeout_msec);
1134 }
1135 
1136 /**
1137  * usb4_port_retimer_read() - Read from retimer sideband registers
1138  * @port: USB4 port
1139  * @index: Retimer index
1140  * @reg: Sideband register to read
1141  * @buf: Data from @reg is stored here
1142  * @size: Number of bytes to read
1143  *
1144  * Function reads retimer sideband registers starting from @reg. The
1145  * retimer is connected to @port at @index. Returns %0 in case of
1146  * success, and read data is copied to @buf. If there is no retimer
1147  * present at given @index returns %-ENODEV. In any other failure
1148  * returns negative errno.
1149  */
1150 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1151 			   u8 size)
1152 {
1153 	return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1154 				 size);
1155 }
1156 
1157 /**
1158  * usb4_port_retimer_write() - Write to retimer sideband registers
1159  * @port: USB4 port
1160  * @index: Retimer index
1161  * @reg: Sideband register to write
1162  * @buf: Data that is written starting from @reg
1163  * @size: Number of bytes to write
1164  *
1165  * Writes retimer sideband registers starting from @reg. The retimer is
1166  * connected to @port at @index. Returns %0 in case of success. If there
1167  * is no retimer present at given @index returns %-ENODEV. In any other
1168  * failure returns negative errno.
1169  */
1170 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1171 			    const void *buf, u8 size)
1172 {
1173 	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1174 				  size);
1175 }
1176 
1177 /**
1178  * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1179  * @port: USB4 port
1180  * @index: Retimer index
1181  *
1182  * If the retimer at @index is last one (connected directly to the
1183  * Type-C port) this function returns %1. If it is not returns %0. If
1184  * the retimer is not present returns %-ENODEV. Otherwise returns
1185  * negative errno.
1186  */
1187 int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1188 {
1189 	u32 metadata;
1190 	int ret;
1191 
1192 	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1193 				   500);
1194 	if (ret)
1195 		return ret;
1196 
1197 	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1198 				     sizeof(metadata));
1199 	return ret ? ret : metadata & 1;
1200 }
1201 
1202 /**
1203  * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1204  * @port: USB4 port
1205  * @index: Retimer index
1206  *
1207  * Reads NVM sector size (in bytes) of a retimer at @index. This
1208  * operation can be used to determine whether the retimer supports NVM
1209  * upgrade for example. Returns sector size in bytes or negative errno
1210  * in case of error. Specifically returns %-ENODEV if there is no
1211  * retimer at @index.
1212  */
1213 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1214 {
1215 	u32 metadata;
1216 	int ret;
1217 
1218 	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1219 				   500);
1220 	if (ret)
1221 		return ret;
1222 
1223 	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1224 				     sizeof(metadata));
1225 	return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1226 }
1227 
1228 static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1229 					    unsigned int address)
1230 {
1231 	u32 metadata, dwaddress;
1232 	int ret;
1233 
1234 	dwaddress = address / 4;
1235 	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1236 		  USB4_NVM_SET_OFFSET_MASK;
1237 
1238 	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1239 				      sizeof(metadata));
1240 	if (ret)
1241 		return ret;
1242 
1243 	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1244 				    500);
1245 }
1246 
1247 struct retimer_info {
1248 	struct tb_port *port;
1249 	u8 index;
1250 };
1251 
1252 static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
1253 						  size_t dwords)
1254 
1255 {
1256 	const struct retimer_info *info = data;
1257 	struct tb_port *port = info->port;
1258 	u8 index = info->index;
1259 	int ret;
1260 
1261 	ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1262 				      buf, dwords * 4);
1263 	if (ret)
1264 		return ret;
1265 
1266 	return usb4_port_retimer_op(port, index,
1267 			USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1268 }
1269 
1270 /**
1271  * usb4_port_retimer_nvm_write() - Write to retimer NVM
1272  * @port: USB4 port
1273  * @index: Retimer index
1274  * @address: Byte address where to start the write
1275  * @buf: Data to write
1276  * @size: Size in bytes how much to write
1277  *
1278  * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1279  * upgrade. Returns %0 if the data was written successfully and negative
1280  * errno in case of failure. Specifically returns %-ENODEV if there is
1281  * no retimer at @index.
1282  */
1283 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1284 				const void *buf, size_t size)
1285 {
1286 	struct retimer_info info = { .port = port, .index = index };
1287 	int ret;
1288 
1289 	ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1290 	if (ret)
1291 		return ret;
1292 
1293 	return usb4_do_write_data(address, buf, size,
1294 			usb4_port_retimer_nvm_write_next_block, &info);
1295 }
1296 
1297 /**
1298  * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1299  * @port: USB4 port
1300  * @index: Retimer index
1301  *
1302  * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1303  * this function can be used to trigger the NVM upgrade process. If
1304  * successful the retimer restarts with the new NVM and may not have the
1305  * index set so one needs to call usb4_port_enumerate_retimers() to
1306  * force index to be assigned.
1307  */
1308 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1309 {
1310 	u32 val;
1311 
1312 	/*
1313 	 * We need to use the raw operation here because once the
1314 	 * authentication completes the retimer index is not set anymore
1315 	 * so we do not get back the status now.
1316 	 */
1317 	val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1318 	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1319 				  USB4_SB_OPCODE, &val, sizeof(val));
1320 }
1321 
1322 /**
1323  * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1324  * @port: USB4 port
1325  * @index: Retimer index
1326  * @status: Raw status code read from metadata
1327  *
1328  * This can be called after usb4_port_retimer_nvm_authenticate() and
1329  * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1330  *
1331  * Returns %0 if the authentication status was successfully read. The
1332  * completion metadata (the result) is then stored into @status. If
1333  * reading the status fails, returns negative errno.
1334  */
1335 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1336 					      u32 *status)
1337 {
1338 	u32 metadata, val;
1339 	int ret;
1340 
1341 	ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1342 				     sizeof(val));
1343 	if (ret)
1344 		return ret;
1345 
1346 	switch (val) {
1347 	case 0:
1348 		*status = 0;
1349 		return 0;
1350 
1351 	case USB4_SB_OPCODE_ERR:
1352 		ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1353 					     &metadata, sizeof(metadata));
1354 		if (ret)
1355 			return ret;
1356 
1357 		*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1358 		return 0;
1359 
1360 	case USB4_SB_OPCODE_ONS:
1361 		return -EOPNOTSUPP;
1362 
1363 	default:
1364 		return -EIO;
1365 	}
1366 }
1367 
1368 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1369 					    void *buf, size_t dwords)
1370 {
1371 	const struct retimer_info *info = data;
1372 	struct tb_port *port = info->port;
1373 	u8 index = info->index;
1374 	u32 metadata;
1375 	int ret;
1376 
1377 	metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1378 	if (dwords < USB4_DATA_DWORDS)
1379 		metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1380 
1381 	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1382 				      sizeof(metadata));
1383 	if (ret)
1384 		return ret;
1385 
1386 	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1387 	if (ret)
1388 		return ret;
1389 
1390 	return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1391 				      dwords * 4);
1392 }
1393 
1394 /**
1395  * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1396  * @port: USB4 port
1397  * @index: Retimer index
1398  * @address: NVM address (in bytes) to start reading
1399  * @buf: Data read from NVM is stored here
1400  * @size: Number of bytes to read
1401  *
1402  * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1403  * read was successful and negative errno in case of failure.
1404  * Specifically returns %-ENODEV if there is no retimer at @index.
1405  */
1406 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1407 			       unsigned int address, void *buf, size_t size)
1408 {
1409 	struct retimer_info info = { .port = port, .index = index };
1410 
1411 	return usb4_do_read_data(address, buf, size,
1412 			usb4_port_retimer_nvm_read_block, &info);
1413 }
1414 
1415 /**
1416  * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1417  * @port: USB3 adapter port
1418  *
1419  * Return maximum supported link rate of a USB3 adapter in Mb/s.
1420  * Negative errno in case of error.
1421  */
1422 int usb4_usb3_port_max_link_rate(struct tb_port *port)
1423 {
1424 	int ret, lr;
1425 	u32 val;
1426 
1427 	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1428 		return -EINVAL;
1429 
1430 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1431 			   port->cap_adap + ADP_USB3_CS_4, 1);
1432 	if (ret)
1433 		return ret;
1434 
1435 	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1436 	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1437 }
1438 
1439 /**
1440  * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1441  * @port: USB3 adapter port
1442  *
1443  * Return actual established link rate of a USB3 adapter in Mb/s. If the
1444  * link is not up returns %0 and negative errno in case of failure.
1445  */
1446 int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1447 {
1448 	int ret, lr;
1449 	u32 val;
1450 
1451 	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1452 		return -EINVAL;
1453 
1454 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1455 			   port->cap_adap + ADP_USB3_CS_4, 1);
1456 	if (ret)
1457 		return ret;
1458 
1459 	if (!(val & ADP_USB3_CS_4_ULV))
1460 		return 0;
1461 
1462 	lr = val & ADP_USB3_CS_4_ALR_MASK;
1463 	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1464 }
1465 
1466 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1467 {
1468 	int ret;
1469 	u32 val;
1470 
1471 	if (!tb_port_is_usb3_down(port))
1472 		return -EINVAL;
1473 	if (tb_route(port->sw))
1474 		return -EINVAL;
1475 
1476 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1477 			   port->cap_adap + ADP_USB3_CS_2, 1);
1478 	if (ret)
1479 		return ret;
1480 
1481 	if (request)
1482 		val |= ADP_USB3_CS_2_CMR;
1483 	else
1484 		val &= ~ADP_USB3_CS_2_CMR;
1485 
1486 	ret = tb_port_write(port, &val, TB_CFG_PORT,
1487 			    port->cap_adap + ADP_USB3_CS_2, 1);
1488 	if (ret)
1489 		return ret;
1490 
1491 	/*
1492 	 * We can use val here directly as the CMR bit is in the same place
1493 	 * as HCA. Just mask out others.
1494 	 */
1495 	val &= ADP_USB3_CS_2_CMR;
1496 	return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1497 				      ADP_USB3_CS_1_HCA, val, 1500);
1498 }
1499 
1500 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1501 {
1502 	return usb4_usb3_port_cm_request(port, true);
1503 }
1504 
1505 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1506 {
1507 	return usb4_usb3_port_cm_request(port, false);
1508 }
1509 
1510 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1511 {
1512 	unsigned long uframes;
1513 
1514 	uframes = bw * 512UL << scale;
1515 	return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1516 }
1517 
1518 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1519 {
1520 	unsigned long uframes;
1521 
1522 	/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1523 	uframes = ((unsigned long)mbps * 1000 *  1000) / 8000;
1524 	return DIV_ROUND_UP(uframes, 512UL << scale);
1525 }
1526 
1527 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1528 						   int *upstream_bw,
1529 						   int *downstream_bw)
1530 {
1531 	u32 val, bw, scale;
1532 	int ret;
1533 
1534 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1535 			   port->cap_adap + ADP_USB3_CS_2, 1);
1536 	if (ret)
1537 		return ret;
1538 
1539 	ret = tb_port_read(port, &scale, TB_CFG_PORT,
1540 			   port->cap_adap + ADP_USB3_CS_3, 1);
1541 	if (ret)
1542 		return ret;
1543 
1544 	scale &= ADP_USB3_CS_3_SCALE_MASK;
1545 
1546 	bw = val & ADP_USB3_CS_2_AUBW_MASK;
1547 	*upstream_bw = usb3_bw_to_mbps(bw, scale);
1548 
1549 	bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
1550 	*downstream_bw = usb3_bw_to_mbps(bw, scale);
1551 
1552 	return 0;
1553 }
1554 
1555 /**
1556  * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
1557  * @port: USB3 adapter port
1558  * @upstream_bw: Allocated upstream bandwidth is stored here
1559  * @downstream_bw: Allocated downstream bandwidth is stored here
1560  *
1561  * Stores currently allocated USB3 bandwidth into @upstream_bw and
1562  * @downstream_bw in Mb/s. Returns %0 in case of success and negative
1563  * errno in failure.
1564  */
1565 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1566 				       int *downstream_bw)
1567 {
1568 	int ret;
1569 
1570 	ret = usb4_usb3_port_set_cm_request(port);
1571 	if (ret)
1572 		return ret;
1573 
1574 	ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
1575 						      downstream_bw);
1576 	usb4_usb3_port_clear_cm_request(port);
1577 
1578 	return ret;
1579 }
1580 
1581 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
1582 						  int *upstream_bw,
1583 						  int *downstream_bw)
1584 {
1585 	u32 val, bw, scale;
1586 	int ret;
1587 
1588 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1589 			   port->cap_adap + ADP_USB3_CS_1, 1);
1590 	if (ret)
1591 		return ret;
1592 
1593 	ret = tb_port_read(port, &scale, TB_CFG_PORT,
1594 			   port->cap_adap + ADP_USB3_CS_3, 1);
1595 	if (ret)
1596 		return ret;
1597 
1598 	scale &= ADP_USB3_CS_3_SCALE_MASK;
1599 
1600 	bw = val & ADP_USB3_CS_1_CUBW_MASK;
1601 	*upstream_bw = usb3_bw_to_mbps(bw, scale);
1602 
1603 	bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
1604 	*downstream_bw = usb3_bw_to_mbps(bw, scale);
1605 
1606 	return 0;
1607 }
1608 
1609 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
1610 						    int upstream_bw,
1611 						    int downstream_bw)
1612 {
1613 	u32 val, ubw, dbw, scale;
1614 	int ret;
1615 
1616 	/* Read the used scale, hardware default is 0 */
1617 	ret = tb_port_read(port, &scale, TB_CFG_PORT,
1618 			   port->cap_adap + ADP_USB3_CS_3, 1);
1619 	if (ret)
1620 		return ret;
1621 
1622 	scale &= ADP_USB3_CS_3_SCALE_MASK;
1623 	ubw = mbps_to_usb3_bw(upstream_bw, scale);
1624 	dbw = mbps_to_usb3_bw(downstream_bw, scale);
1625 
1626 	ret = tb_port_read(port, &val, TB_CFG_PORT,
1627 			   port->cap_adap + ADP_USB3_CS_2, 1);
1628 	if (ret)
1629 		return ret;
1630 
1631 	val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
1632 	val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
1633 	val |= ubw;
1634 
1635 	return tb_port_write(port, &val, TB_CFG_PORT,
1636 			     port->cap_adap + ADP_USB3_CS_2, 1);
1637 }
1638 
1639 /**
1640  * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
1641  * @port: USB3 adapter port
1642  * @upstream_bw: New upstream bandwidth
1643  * @downstream_bw: New downstream bandwidth
1644  *
1645  * This can be used to set how much bandwidth is allocated for the USB3
1646  * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
1647  * new values programmed to the USB3 adapter allocation registers. If
1648  * the values are lower than what is currently consumed the allocation
1649  * is set to what is currently consumed instead (consumed bandwidth
1650  * cannot be taken away by CM). The actual new values are returned in
1651  * @upstream_bw and @downstream_bw.
1652  *
1653  * Returns %0 in case of success and negative errno if there was a
1654  * failure.
1655  */
1656 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1657 				      int *downstream_bw)
1658 {
1659 	int ret, consumed_up, consumed_down, allocate_up, allocate_down;
1660 
1661 	ret = usb4_usb3_port_set_cm_request(port);
1662 	if (ret)
1663 		return ret;
1664 
1665 	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1666 						     &consumed_down);
1667 	if (ret)
1668 		goto err_request;
1669 
1670 	/* Don't allow it go lower than what is consumed */
1671 	allocate_up = max(*upstream_bw, consumed_up);
1672 	allocate_down = max(*downstream_bw, consumed_down);
1673 
1674 	ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
1675 						       allocate_down);
1676 	if (ret)
1677 		goto err_request;
1678 
1679 	*upstream_bw = allocate_up;
1680 	*downstream_bw = allocate_down;
1681 
1682 err_request:
1683 	usb4_usb3_port_clear_cm_request(port);
1684 	return ret;
1685 }
1686 
1687 /**
1688  * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
1689  * @port: USB3 adapter port
1690  * @upstream_bw: New allocated upstream bandwidth
1691  * @downstream_bw: New allocated downstream bandwidth
1692  *
1693  * Releases USB3 allocated bandwidth down to what is actually consumed.
1694  * The new bandwidth is returned in @upstream_bw and @downstream_bw.
1695  *
1696  * Returns 0% in success and negative errno in case of failure.
1697  */
1698 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1699 				     int *downstream_bw)
1700 {
1701 	int ret, consumed_up, consumed_down;
1702 
1703 	ret = usb4_usb3_port_set_cm_request(port);
1704 	if (ret)
1705 		return ret;
1706 
1707 	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1708 						     &consumed_down);
1709 	if (ret)
1710 		goto err_request;
1711 
1712 	/*
1713 	 * Always keep 1000 Mb/s to make sure xHCI has at least some
1714 	 * bandwidth available for isochronous traffic.
1715 	 */
1716 	if (consumed_up < 1000)
1717 		consumed_up = 1000;
1718 	if (consumed_down < 1000)
1719 		consumed_down = 1000;
1720 
1721 	ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
1722 						       consumed_down);
1723 	if (ret)
1724 		goto err_request;
1725 
1726 	*upstream_bw = consumed_up;
1727 	*downstream_bw = consumed_down;
1728 
1729 err_request:
1730 	usb4_usb3_port_clear_cm_request(port);
1731 	return ret;
1732 }
1733