xref: /openbmc/linux/drivers/thunderbolt/switch.c (revision 34d6f206a88c2651d216bd3487ac956a40b2ba8e)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2a25c8b2fSAndreas Noever /*
315c6784cSMika Westerberg  * Thunderbolt driver - switch/port utility functions
4a25c8b2fSAndreas Noever  *
5a25c8b2fSAndreas Noever  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
615c6784cSMika Westerberg  * Copyright (C) 2018, Intel Corporation
7a25c8b2fSAndreas Noever  */
8a25c8b2fSAndreas Noever 
9a25c8b2fSAndreas Noever #include <linux/delay.h>
10e6b245ccSMika Westerberg #include <linux/idr.h>
1187fa05b6SAndy Shevchenko #include <linux/module.h>
12e6b245ccSMika Westerberg #include <linux/nvmem-provider.h>
132d8ff0b5SMika Westerberg #include <linux/pm_runtime.h>
1409f11b6cSMika Westerberg #include <linux/sched/signal.h>
15e6b245ccSMika Westerberg #include <linux/sizes.h>
1610fefe56SSachin Kamat #include <linux/slab.h>
1787fa05b6SAndy Shevchenko #include <linux/string_helpers.h>
18a25c8b2fSAndreas Noever 
19a25c8b2fSAndreas Noever #include "tb.h"
20a25c8b2fSAndreas Noever 
21e6b245ccSMika Westerberg /* Switch NVM support */
22e6b245ccSMika Westerberg 
23e6b245ccSMika Westerberg struct nvm_auth_status {
24e6b245ccSMika Westerberg 	struct list_head list;
257c39ffe7SChristoph Hellwig 	uuid_t uuid;
26e6b245ccSMika Westerberg 	u32 status;
27e6b245ccSMika Westerberg };
28e6b245ccSMika Westerberg 
29e6b245ccSMika Westerberg /*
30e6b245ccSMika Westerberg  * Hold NVM authentication failure status per switch This information
31e6b245ccSMika Westerberg  * needs to stay around even when the switch gets power cycled so we
32e6b245ccSMika Westerberg  * keep it separately.
33e6b245ccSMika Westerberg  */
34e6b245ccSMika Westerberg static LIST_HEAD(nvm_auth_status_cache);
35e6b245ccSMika Westerberg static DEFINE_MUTEX(nvm_auth_status_lock);
36e6b245ccSMika Westerberg 
__nvm_get_auth_status(const struct tb_switch * sw)37e6b245ccSMika Westerberg static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
38e6b245ccSMika Westerberg {
39e6b245ccSMika Westerberg 	struct nvm_auth_status *st;
40e6b245ccSMika Westerberg 
41e6b245ccSMika Westerberg 	list_for_each_entry(st, &nvm_auth_status_cache, list) {
427c39ffe7SChristoph Hellwig 		if (uuid_equal(&st->uuid, sw->uuid))
43e6b245ccSMika Westerberg 			return st;
44e6b245ccSMika Westerberg 	}
45e6b245ccSMika Westerberg 
46e6b245ccSMika Westerberg 	return NULL;
47e6b245ccSMika Westerberg }
48e6b245ccSMika Westerberg 
nvm_get_auth_status(const struct tb_switch * sw,u32 * status)49e6b245ccSMika Westerberg static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
50e6b245ccSMika Westerberg {
51e6b245ccSMika Westerberg 	struct nvm_auth_status *st;
52e6b245ccSMika Westerberg 
53e6b245ccSMika Westerberg 	mutex_lock(&nvm_auth_status_lock);
54e6b245ccSMika Westerberg 	st = __nvm_get_auth_status(sw);
55e6b245ccSMika Westerberg 	mutex_unlock(&nvm_auth_status_lock);
56e6b245ccSMika Westerberg 
57e6b245ccSMika Westerberg 	*status = st ? st->status : 0;
58e6b245ccSMika Westerberg }
59e6b245ccSMika Westerberg 
nvm_set_auth_status(const struct tb_switch * sw,u32 status)60e6b245ccSMika Westerberg static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
61e6b245ccSMika Westerberg {
62e6b245ccSMika Westerberg 	struct nvm_auth_status *st;
63e6b245ccSMika Westerberg 
64e6b245ccSMika Westerberg 	if (WARN_ON(!sw->uuid))
65e6b245ccSMika Westerberg 		return;
66e6b245ccSMika Westerberg 
67e6b245ccSMika Westerberg 	mutex_lock(&nvm_auth_status_lock);
68e6b245ccSMika Westerberg 	st = __nvm_get_auth_status(sw);
69e6b245ccSMika Westerberg 
70e6b245ccSMika Westerberg 	if (!st) {
71e6b245ccSMika Westerberg 		st = kzalloc(sizeof(*st), GFP_KERNEL);
72e6b245ccSMika Westerberg 		if (!st)
73e6b245ccSMika Westerberg 			goto unlock;
74e6b245ccSMika Westerberg 
75e6b245ccSMika Westerberg 		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
76e6b245ccSMika Westerberg 		INIT_LIST_HEAD(&st->list);
77e6b245ccSMika Westerberg 		list_add_tail(&st->list, &nvm_auth_status_cache);
78e6b245ccSMika Westerberg 	}
79e6b245ccSMika Westerberg 
80e6b245ccSMika Westerberg 	st->status = status;
81e6b245ccSMika Westerberg unlock:
82e6b245ccSMika Westerberg 	mutex_unlock(&nvm_auth_status_lock);
83e6b245ccSMika Westerberg }
84e6b245ccSMika Westerberg 
nvm_clear_auth_status(const struct tb_switch * sw)85e6b245ccSMika Westerberg static void nvm_clear_auth_status(const struct tb_switch *sw)
86e6b245ccSMika Westerberg {
87e6b245ccSMika Westerberg 	struct nvm_auth_status *st;
88e6b245ccSMika Westerberg 
89e6b245ccSMika Westerberg 	mutex_lock(&nvm_auth_status_lock);
90e6b245ccSMika Westerberg 	st = __nvm_get_auth_status(sw);
91e6b245ccSMika Westerberg 	if (st) {
92e6b245ccSMika Westerberg 		list_del(&st->list);
93e6b245ccSMika Westerberg 		kfree(st);
94e6b245ccSMika Westerberg 	}
95e6b245ccSMika Westerberg 	mutex_unlock(&nvm_auth_status_lock);
96e6b245ccSMika Westerberg }
97e6b245ccSMika Westerberg 
nvm_validate_and_write(struct tb_switch * sw)98e6b245ccSMika Westerberg static int nvm_validate_and_write(struct tb_switch *sw)
99e6b245ccSMika Westerberg {
100aef9c693SSzuying Chen 	unsigned int image_size;
101aef9c693SSzuying Chen 	const u8 *buf;
102e6b245ccSMika Westerberg 	int ret;
103e6b245ccSMika Westerberg 
104aef9c693SSzuying Chen 	ret = tb_nvm_validate(sw->nvm);
105e6b245ccSMika Westerberg 	if (ret)
106e6b245ccSMika Westerberg 		return ret;
107e6b245ccSMika Westerberg 
108aef9c693SSzuying Chen 	ret = tb_nvm_write_headers(sw->nvm);
109aef9c693SSzuying Chen 	if (ret)
110aef9c693SSzuying Chen 		return ret;
111aef9c693SSzuying Chen 
112aef9c693SSzuying Chen 	buf = sw->nvm->buf_data_start;
113aef9c693SSzuying Chen 	image_size = sw->nvm->buf_data_size;
114e6b245ccSMika Westerberg 
115b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw))
1164b794f80SMario Limonciello 		ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
1174b794f80SMario Limonciello 	else
1184b794f80SMario Limonciello 		ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
119aef9c693SSzuying Chen 	if (ret)
1204b794f80SMario Limonciello 		return ret;
121aef9c693SSzuying Chen 
122aef9c693SSzuying Chen 	sw->nvm->flushed = true;
123aef9c693SSzuying Chen 	return 0;
124e6b245ccSMika Westerberg }
125e6b245ccSMika Westerberg 
nvm_authenticate_host_dma_port(struct tb_switch * sw)126b0407983SMika Westerberg static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
127e6b245ccSMika Westerberg {
1287a7ebfa8SMika Westerberg 	int ret = 0;
129e6b245ccSMika Westerberg 
130e6b245ccSMika Westerberg 	/*
131e6b245ccSMika Westerberg 	 * Root switch NVM upgrade requires that we disconnect the
132d1ff7024SMika Westerberg 	 * existing paths first (in case it is not in safe mode
133e6b245ccSMika Westerberg 	 * already).
134e6b245ccSMika Westerberg 	 */
135e6b245ccSMika Westerberg 	if (!sw->safe_mode) {
1367a7ebfa8SMika Westerberg 		u32 status;
1377a7ebfa8SMika Westerberg 
138d1ff7024SMika Westerberg 		ret = tb_domain_disconnect_all_paths(sw->tb);
139e6b245ccSMika Westerberg 		if (ret)
140e6b245ccSMika Westerberg 			return ret;
141e6b245ccSMika Westerberg 		/*
142e6b245ccSMika Westerberg 		 * The host controller goes away pretty soon after this if
143e6b245ccSMika Westerberg 		 * everything goes well so getting timeout is expected.
144e6b245ccSMika Westerberg 		 */
145e6b245ccSMika Westerberg 		ret = dma_port_flash_update_auth(sw->dma_port);
1467a7ebfa8SMika Westerberg 		if (!ret || ret == -ETIMEDOUT)
1477a7ebfa8SMika Westerberg 			return 0;
1487a7ebfa8SMika Westerberg 
1497a7ebfa8SMika Westerberg 		/*
1507a7ebfa8SMika Westerberg 		 * Any error from update auth operation requires power
1517a7ebfa8SMika Westerberg 		 * cycling of the host router.
1527a7ebfa8SMika Westerberg 		 */
1537a7ebfa8SMika Westerberg 		tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
1547a7ebfa8SMika Westerberg 		if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
1557a7ebfa8SMika Westerberg 			nvm_set_auth_status(sw, status);
156e6b245ccSMika Westerberg 	}
157e6b245ccSMika Westerberg 
158e6b245ccSMika Westerberg 	/*
159e6b245ccSMika Westerberg 	 * From safe mode we can get out by just power cycling the
160e6b245ccSMika Westerberg 	 * switch.
161e6b245ccSMika Westerberg 	 */
162e6b245ccSMika Westerberg 	dma_port_power_cycle(sw->dma_port);
1637a7ebfa8SMika Westerberg 	return ret;
164e6b245ccSMika Westerberg }
165e6b245ccSMika Westerberg 
nvm_authenticate_device_dma_port(struct tb_switch * sw)166b0407983SMika Westerberg static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
167e6b245ccSMika Westerberg {
168e6b245ccSMika Westerberg 	int ret, retries = 10;
169e6b245ccSMika Westerberg 
170e6b245ccSMika Westerberg 	ret = dma_port_flash_update_auth(sw->dma_port);
1717a7ebfa8SMika Westerberg 	switch (ret) {
1727a7ebfa8SMika Westerberg 	case 0:
1737a7ebfa8SMika Westerberg 	case -ETIMEDOUT:
1747a7ebfa8SMika Westerberg 	case -EACCES:
1757a7ebfa8SMika Westerberg 	case -EINVAL:
1767a7ebfa8SMika Westerberg 		/* Power cycle is required */
1777a7ebfa8SMika Westerberg 		break;
1787a7ebfa8SMika Westerberg 	default:
179e6b245ccSMika Westerberg 		return ret;
1807a7ebfa8SMika Westerberg 	}
181e6b245ccSMika Westerberg 
182e6b245ccSMika Westerberg 	/*
183e6b245ccSMika Westerberg 	 * Poll here for the authentication status. It takes some time
184e6b245ccSMika Westerberg 	 * for the device to respond (we get timeout for a while). Once
185e6b245ccSMika Westerberg 	 * we get response the device needs to be power cycled in order
186e6b245ccSMika Westerberg 	 * to the new NVM to be taken into use.
187e6b245ccSMika Westerberg 	 */
188e6b245ccSMika Westerberg 	do {
189e6b245ccSMika Westerberg 		u32 status;
190e6b245ccSMika Westerberg 
191e6b245ccSMika Westerberg 		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
192e6b245ccSMika Westerberg 		if (ret < 0 && ret != -ETIMEDOUT)
193e6b245ccSMika Westerberg 			return ret;
194e6b245ccSMika Westerberg 		if (ret > 0) {
195e6b245ccSMika Westerberg 			if (status) {
196e6b245ccSMika Westerberg 				tb_sw_warn(sw, "failed to authenticate NVM\n");
197e6b245ccSMika Westerberg 				nvm_set_auth_status(sw, status);
198e6b245ccSMika Westerberg 			}
199e6b245ccSMika Westerberg 
200e6b245ccSMika Westerberg 			tb_sw_info(sw, "power cycling the switch now\n");
201e6b245ccSMika Westerberg 			dma_port_power_cycle(sw->dma_port);
202e6b245ccSMika Westerberg 			return 0;
203e6b245ccSMika Westerberg 		}
204e6b245ccSMika Westerberg 
205e6b245ccSMika Westerberg 		msleep(500);
206e6b245ccSMika Westerberg 	} while (--retries);
207e6b245ccSMika Westerberg 
208e6b245ccSMika Westerberg 	return -ETIMEDOUT;
209e6b245ccSMika Westerberg }
210e6b245ccSMika Westerberg 
nvm_authenticate_start_dma_port(struct tb_switch * sw)211b0407983SMika Westerberg static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
212b0407983SMika Westerberg {
213b0407983SMika Westerberg 	struct pci_dev *root_port;
214b0407983SMika Westerberg 
215b0407983SMika Westerberg 	/*
216b0407983SMika Westerberg 	 * During host router NVM upgrade we should not allow root port to
217b0407983SMika Westerberg 	 * go into D3cold because some root ports cannot trigger PME
218b0407983SMika Westerberg 	 * itself. To be on the safe side keep the root port in D0 during
219b0407983SMika Westerberg 	 * the whole upgrade process.
220b0407983SMika Westerberg 	 */
2216ae72bfaSYicong Yang 	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
222b0407983SMika Westerberg 	if (root_port)
223b0407983SMika Westerberg 		pm_runtime_get_noresume(&root_port->dev);
224b0407983SMika Westerberg }
225b0407983SMika Westerberg 
nvm_authenticate_complete_dma_port(struct tb_switch * sw)226b0407983SMika Westerberg static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
227b0407983SMika Westerberg {
228b0407983SMika Westerberg 	struct pci_dev *root_port;
229b0407983SMika Westerberg 
2306ae72bfaSYicong Yang 	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
231b0407983SMika Westerberg 	if (root_port)
232b0407983SMika Westerberg 		pm_runtime_put(&root_port->dev);
233b0407983SMika Westerberg }
234b0407983SMika Westerberg 
nvm_readable(struct tb_switch * sw)235b0407983SMika Westerberg static inline bool nvm_readable(struct tb_switch *sw)
236b0407983SMika Westerberg {
237b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
238b0407983SMika Westerberg 		/*
239b0407983SMika Westerberg 		 * USB4 devices must support NVM operations but it is
240b0407983SMika Westerberg 		 * optional for hosts. Therefore we query the NVM sector
241b0407983SMika Westerberg 		 * size here and if it is supported assume NVM
242b0407983SMika Westerberg 		 * operations are implemented.
243b0407983SMika Westerberg 		 */
244b0407983SMika Westerberg 		return usb4_switch_nvm_sector_size(sw) > 0;
245b0407983SMika Westerberg 	}
246b0407983SMika Westerberg 
247b0407983SMika Westerberg 	/* Thunderbolt 2 and 3 devices support NVM through DMA port */
248b0407983SMika Westerberg 	return !!sw->dma_port;
249b0407983SMika Westerberg }
250b0407983SMika Westerberg 
nvm_upgradeable(struct tb_switch * sw)251b0407983SMika Westerberg static inline bool nvm_upgradeable(struct tb_switch *sw)
252b0407983SMika Westerberg {
253b0407983SMika Westerberg 	if (sw->no_nvm_upgrade)
254b0407983SMika Westerberg 		return false;
255b0407983SMika Westerberg 	return nvm_readable(sw);
256b0407983SMika Westerberg }
257b0407983SMika Westerberg 
nvm_authenticate(struct tb_switch * sw,bool auth_only)2581cbf680fSMika Westerberg static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
259b0407983SMika Westerberg {
260b0407983SMika Westerberg 	int ret;
261b0407983SMika Westerberg 
2621cbf680fSMika Westerberg 	if (tb_switch_is_usb4(sw)) {
2631cbf680fSMika Westerberg 		if (auth_only) {
2641cbf680fSMika Westerberg 			ret = usb4_switch_nvm_set_offset(sw, 0);
2651cbf680fSMika Westerberg 			if (ret)
2661cbf680fSMika Westerberg 				return ret;
2671cbf680fSMika Westerberg 		}
2681cbf680fSMika Westerberg 		sw->nvm->authenticating = true;
269b0407983SMika Westerberg 		return usb4_switch_nvm_authenticate(sw);
2701cbf680fSMika Westerberg 	}
2714e99c98eSAndy Shevchenko 	if (auth_only)
2724e99c98eSAndy Shevchenko 		return -EOPNOTSUPP;
273b0407983SMika Westerberg 
2741cbf680fSMika Westerberg 	sw->nvm->authenticating = true;
275b0407983SMika Westerberg 	if (!tb_route(sw)) {
276b0407983SMika Westerberg 		nvm_authenticate_start_dma_port(sw);
277b0407983SMika Westerberg 		ret = nvm_authenticate_host_dma_port(sw);
278b0407983SMika Westerberg 	} else {
279b0407983SMika Westerberg 		ret = nvm_authenticate_device_dma_port(sw);
280b0407983SMika Westerberg 	}
281b0407983SMika Westerberg 
282b0407983SMika Westerberg 	return ret;
283b0407983SMika Westerberg }
284b0407983SMika Westerberg 
2857bfafaa5SSzuying Chen /**
2867bfafaa5SSzuying Chen  * tb_switch_nvm_read() - Read router NVM
2877bfafaa5SSzuying Chen  * @sw: Router whose NVM to read
2887bfafaa5SSzuying Chen  * @address: Start address on the NVM
2897bfafaa5SSzuying Chen  * @buf: Buffer where the read data is copied
2907bfafaa5SSzuying Chen  * @size: Size of the buffer in bytes
2917bfafaa5SSzuying Chen  *
2927bfafaa5SSzuying Chen  * Reads from router NVM and returns the requested data in @buf. Locking
2937bfafaa5SSzuying Chen  * is up to the caller. Returns %0 in success and negative errno in case
2947bfafaa5SSzuying Chen  * of failure.
2957bfafaa5SSzuying Chen  */
tb_switch_nvm_read(struct tb_switch * sw,unsigned int address,void * buf,size_t size)2967bfafaa5SSzuying Chen int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
2977bfafaa5SSzuying Chen 		       size_t size)
2987bfafaa5SSzuying Chen {
2997bfafaa5SSzuying Chen 	if (tb_switch_is_usb4(sw))
3007bfafaa5SSzuying Chen 		return usb4_switch_nvm_read(sw, address, buf, size);
3017bfafaa5SSzuying Chen 	return dma_port_flash_read(sw->dma_port, address, buf, size);
3027bfafaa5SSzuying Chen }
3037bfafaa5SSzuying Chen 
nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)3047bfafaa5SSzuying Chen static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
305e6b245ccSMika Westerberg {
306719a5fe8SMika Westerberg 	struct tb_nvm *nvm = priv;
307719a5fe8SMika Westerberg 	struct tb_switch *sw = tb_to_switch(nvm->dev);
3082d8ff0b5SMika Westerberg 	int ret;
309e6b245ccSMika Westerberg 
3102d8ff0b5SMika Westerberg 	pm_runtime_get_sync(&sw->dev);
3114f7c2e0dSMika Westerberg 
3124f7c2e0dSMika Westerberg 	if (!mutex_trylock(&sw->tb->lock)) {
3134f7c2e0dSMika Westerberg 		ret = restart_syscall();
3144f7c2e0dSMika Westerberg 		goto out;
3154f7c2e0dSMika Westerberg 	}
3164f7c2e0dSMika Westerberg 
3177bfafaa5SSzuying Chen 	ret = tb_switch_nvm_read(sw, offset, val, bytes);
3184f7c2e0dSMika Westerberg 	mutex_unlock(&sw->tb->lock);
3194f7c2e0dSMika Westerberg 
3204f7c2e0dSMika Westerberg out:
3212d8ff0b5SMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
3222d8ff0b5SMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
3232d8ff0b5SMika Westerberg 
3242d8ff0b5SMika Westerberg 	return ret;
325e6b245ccSMika Westerberg }
326e6b245ccSMika Westerberg 
nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)3277bfafaa5SSzuying Chen static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
328e6b245ccSMika Westerberg {
329719a5fe8SMika Westerberg 	struct tb_nvm *nvm = priv;
330719a5fe8SMika Westerberg 	struct tb_switch *sw = tb_to_switch(nvm->dev);
331719a5fe8SMika Westerberg 	int ret;
332e6b245ccSMika Westerberg 
33309f11b6cSMika Westerberg 	if (!mutex_trylock(&sw->tb->lock))
33409f11b6cSMika Westerberg 		return restart_syscall();
335e6b245ccSMika Westerberg 
336e6b245ccSMika Westerberg 	/*
337e6b245ccSMika Westerberg 	 * Since writing the NVM image might require some special steps,
338e6b245ccSMika Westerberg 	 * for example when CSS headers are written, we cache the image
339e6b245ccSMika Westerberg 	 * locally here and handle the special cases when the user asks
340e6b245ccSMika Westerberg 	 * us to authenticate the image.
341e6b245ccSMika Westerberg 	 */
342719a5fe8SMika Westerberg 	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
34309f11b6cSMika Westerberg 	mutex_unlock(&sw->tb->lock);
344e6b245ccSMika Westerberg 
345e6b245ccSMika Westerberg 	return ret;
346e6b245ccSMika Westerberg }
347e6b245ccSMika Westerberg 
tb_switch_nvm_add(struct tb_switch * sw)348e6b245ccSMika Westerberg static int tb_switch_nvm_add(struct tb_switch *sw)
349e6b245ccSMika Westerberg {
350719a5fe8SMika Westerberg 	struct tb_nvm *nvm;
351e6b245ccSMika Westerberg 	int ret;
352e6b245ccSMika Westerberg 
353b0407983SMika Westerberg 	if (!nvm_readable(sw))
354e6b245ccSMika Westerberg 		return 0;
355e6b245ccSMika Westerberg 
356aef9c693SSzuying Chen 	nvm = tb_nvm_alloc(&sw->dev);
357aef9c693SSzuying Chen 	if (IS_ERR(nvm)) {
358aef9c693SSzuying Chen 		ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
359aef9c693SSzuying Chen 		goto err_nvm;
360b0407983SMika Westerberg 	}
361b0407983SMika Westerberg 
362aef9c693SSzuying Chen 	ret = tb_nvm_read_version(nvm);
363aef9c693SSzuying Chen 	if (ret)
364aef9c693SSzuying Chen 		goto err_nvm;
365e6b245ccSMika Westerberg 
366e6b245ccSMika Westerberg 	/*
367e6b245ccSMika Westerberg 	 * If the switch is in safe-mode the only accessible portion of
368e6b245ccSMika Westerberg 	 * the NVM is the non-active one where userspace is expected to
369e6b245ccSMika Westerberg 	 * write new functional NVM.
370e6b245ccSMika Westerberg 	 */
371e6b245ccSMika Westerberg 	if (!sw->safe_mode) {
372aef9c693SSzuying Chen 		ret = tb_nvm_add_active(nvm, nvm_read);
373719a5fe8SMika Westerberg 		if (ret)
374719a5fe8SMika Westerberg 			goto err_nvm;
375e6b245ccSMika Westerberg 	}
376e6b245ccSMika Westerberg 
3773f415e5eSMika Westerberg 	if (!sw->no_nvm_upgrade) {
378aef9c693SSzuying Chen 		ret = tb_nvm_add_non_active(nvm, nvm_write);
379719a5fe8SMika Westerberg 		if (ret)
380719a5fe8SMika Westerberg 			goto err_nvm;
3813f415e5eSMika Westerberg 	}
382e6b245ccSMika Westerberg 
383e6b245ccSMika Westerberg 	sw->nvm = nvm;
384e6b245ccSMika Westerberg 	return 0;
385e6b245ccSMika Westerberg 
386719a5fe8SMika Westerberg err_nvm:
387aef9c693SSzuying Chen 	tb_sw_dbg(sw, "NVM upgrade disabled\n");
388aef9c693SSzuying Chen 	sw->no_nvm_upgrade = true;
389aef9c693SSzuying Chen 	if (!IS_ERR(nvm))
390719a5fe8SMika Westerberg 		tb_nvm_free(nvm);
391aef9c693SSzuying Chen 
392e6b245ccSMika Westerberg 	return ret;
393e6b245ccSMika Westerberg }
394e6b245ccSMika Westerberg 
tb_switch_nvm_remove(struct tb_switch * sw)395e6b245ccSMika Westerberg static void tb_switch_nvm_remove(struct tb_switch *sw)
396e6b245ccSMika Westerberg {
397719a5fe8SMika Westerberg 	struct tb_nvm *nvm;
398e6b245ccSMika Westerberg 
399e6b245ccSMika Westerberg 	nvm = sw->nvm;
400e6b245ccSMika Westerberg 	sw->nvm = NULL;
401e6b245ccSMika Westerberg 
402e6b245ccSMika Westerberg 	if (!nvm)
403e6b245ccSMika Westerberg 		return;
404e6b245ccSMika Westerberg 
405e6b245ccSMika Westerberg 	/* Remove authentication status in case the switch is unplugged */
406e6b245ccSMika Westerberg 	if (!nvm->authenticating)
407e6b245ccSMika Westerberg 		nvm_clear_auth_status(sw);
408e6b245ccSMika Westerberg 
409719a5fe8SMika Westerberg 	tb_nvm_free(nvm);
410e6b245ccSMika Westerberg }
411e6b245ccSMika Westerberg 
412a25c8b2fSAndreas Noever /* port utility functions */
413a25c8b2fSAndreas Noever 
tb_port_type(const struct tb_regs_port_header * port)4141c561e4eSMika Westerberg static const char *tb_port_type(const struct tb_regs_port_header *port)
415a25c8b2fSAndreas Noever {
416a25c8b2fSAndreas Noever 	switch (port->type >> 16) {
417a25c8b2fSAndreas Noever 	case 0:
418a25c8b2fSAndreas Noever 		switch ((u8) port->type) {
419a25c8b2fSAndreas Noever 		case 0:
420a25c8b2fSAndreas Noever 			return "Inactive";
421a25c8b2fSAndreas Noever 		case 1:
422a25c8b2fSAndreas Noever 			return "Port";
423a25c8b2fSAndreas Noever 		case 2:
424a25c8b2fSAndreas Noever 			return "NHI";
425a25c8b2fSAndreas Noever 		default:
426a25c8b2fSAndreas Noever 			return "unknown";
427a25c8b2fSAndreas Noever 		}
428a25c8b2fSAndreas Noever 	case 0x2:
429a25c8b2fSAndreas Noever 		return "Ethernet";
430a25c8b2fSAndreas Noever 	case 0x8:
431a25c8b2fSAndreas Noever 		return "SATA";
432a25c8b2fSAndreas Noever 	case 0xe:
433a25c8b2fSAndreas Noever 		return "DP/HDMI";
434a25c8b2fSAndreas Noever 	case 0x10:
435a25c8b2fSAndreas Noever 		return "PCIe";
436a25c8b2fSAndreas Noever 	case 0x20:
437a25c8b2fSAndreas Noever 		return "USB";
438a25c8b2fSAndreas Noever 	default:
439a25c8b2fSAndreas Noever 		return "unknown";
440a25c8b2fSAndreas Noever 	}
441a25c8b2fSAndreas Noever }
442a25c8b2fSAndreas Noever 
tb_dump_port(struct tb * tb,const struct tb_port * port)44356ad3aefSMika Westerberg static void tb_dump_port(struct tb *tb, const struct tb_port *port)
444a25c8b2fSAndreas Noever {
44556ad3aefSMika Westerberg 	const struct tb_regs_port_header *regs = &port->config;
44656ad3aefSMika Westerberg 
447daa5140fSMika Westerberg 	tb_dbg(tb,
448a25c8b2fSAndreas Noever 	       " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
44956ad3aefSMika Westerberg 	       regs->port_number, regs->vendor_id, regs->device_id,
45056ad3aefSMika Westerberg 	       regs->revision, regs->thunderbolt_version, tb_port_type(regs),
45156ad3aefSMika Westerberg 	       regs->type);
452daa5140fSMika Westerberg 	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
45356ad3aefSMika Westerberg 	       regs->max_in_hop_id, regs->max_out_hop_id);
45456ad3aefSMika Westerberg 	tb_dbg(tb, "  Max counters: %d\n", regs->max_counters);
45556ad3aefSMika Westerberg 	tb_dbg(tb, "  NFC Credits: %#x\n", regs->nfc_credits);
45656ad3aefSMika Westerberg 	tb_dbg(tb, "  Credits (total/control): %u/%u\n", port->total_credits,
45756ad3aefSMika Westerberg 	       port->ctl_credits);
458a25c8b2fSAndreas Noever }
459a25c8b2fSAndreas Noever 
460a25c8b2fSAndreas Noever /**
4619da672a4SAndreas Noever  * tb_port_state() - get connectedness state of a port
4625cc0df9cSIsaac Hazan  * @port: the port to check
4639da672a4SAndreas Noever  *
4649da672a4SAndreas Noever  * The port must have a TB_CAP_PHY (i.e. it should be a real port).
4659da672a4SAndreas Noever  *
4669da672a4SAndreas Noever  * Return: Returns an enum tb_port_state on success or an error code on failure.
4679da672a4SAndreas Noever  */
tb_port_state(struct tb_port * port)4685cc0df9cSIsaac Hazan int tb_port_state(struct tb_port *port)
4699da672a4SAndreas Noever {
4709da672a4SAndreas Noever 	struct tb_cap_phy phy;
4719da672a4SAndreas Noever 	int res;
4729da672a4SAndreas Noever 	if (port->cap_phy == 0) {
4739da672a4SAndreas Noever 		tb_port_WARN(port, "does not have a PHY\n");
4749da672a4SAndreas Noever 		return -EINVAL;
4759da672a4SAndreas Noever 	}
4769da672a4SAndreas Noever 	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
4779da672a4SAndreas Noever 	if (res)
4789da672a4SAndreas Noever 		return res;
4799da672a4SAndreas Noever 	return phy.state;
4809da672a4SAndreas Noever }
4819da672a4SAndreas Noever 
4829da672a4SAndreas Noever /**
4839da672a4SAndreas Noever  * tb_wait_for_port() - wait for a port to become ready
4845c6b471bSMika Westerberg  * @port: Port to wait
4855c6b471bSMika Westerberg  * @wait_if_unplugged: Wait also when port is unplugged
4869da672a4SAndreas Noever  *
4879da672a4SAndreas Noever  * Wait up to 1 second for a port to reach state TB_PORT_UP. If
4889da672a4SAndreas Noever  * wait_if_unplugged is set then we also wait if the port is in state
4899da672a4SAndreas Noever  * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
4909da672a4SAndreas Noever  * switch resume). Otherwise we only wait if a device is registered but the link
4919da672a4SAndreas Noever  * has not yet been established.
4929da672a4SAndreas Noever  *
4939da672a4SAndreas Noever  * Return: Returns an error code on failure. Returns 0 if the port is not
4949da672a4SAndreas Noever  * connected or failed to reach state TB_PORT_UP within one second. Returns 1
4959da672a4SAndreas Noever  * if the port is connected and in state TB_PORT_UP.
4969da672a4SAndreas Noever  */
tb_wait_for_port(struct tb_port * port,bool wait_if_unplugged)4979da672a4SAndreas Noever int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
4989da672a4SAndreas Noever {
4999da672a4SAndreas Noever 	int retries = 10;
5009da672a4SAndreas Noever 	int state;
5019da672a4SAndreas Noever 	if (!port->cap_phy) {
5029da672a4SAndreas Noever 		tb_port_WARN(port, "does not have PHY\n");
5039da672a4SAndreas Noever 		return -EINVAL;
5049da672a4SAndreas Noever 	}
5059da672a4SAndreas Noever 	if (tb_is_upstream_port(port)) {
5069da672a4SAndreas Noever 		tb_port_WARN(port, "is the upstream port\n");
5079da672a4SAndreas Noever 		return -EINVAL;
5089da672a4SAndreas Noever 	}
5099da672a4SAndreas Noever 
5109da672a4SAndreas Noever 	while (retries--) {
5119da672a4SAndreas Noever 		state = tb_port_state(port);
512e70a8f36SMika Westerberg 		switch (state) {
513e70a8f36SMika Westerberg 		case TB_PORT_DISABLED:
51462efe699SMika Westerberg 			tb_port_dbg(port, "is disabled (state: 0)\n");
5159da672a4SAndreas Noever 			return 0;
516e70a8f36SMika Westerberg 
517e70a8f36SMika Westerberg 		case TB_PORT_UNPLUGGED:
5189da672a4SAndreas Noever 			if (wait_if_unplugged) {
5199da672a4SAndreas Noever 				/* used during resume */
52062efe699SMika Westerberg 				tb_port_dbg(port,
5219da672a4SAndreas Noever 					    "is unplugged (state: 7), retrying...\n");
5229da672a4SAndreas Noever 				msleep(100);
523e70a8f36SMika Westerberg 				break;
5249da672a4SAndreas Noever 			}
52562efe699SMika Westerberg 			tb_port_dbg(port, "is unplugged (state: 7)\n");
5269da672a4SAndreas Noever 			return 0;
527e70a8f36SMika Westerberg 
528e70a8f36SMika Westerberg 		case TB_PORT_UP:
529e70a8f36SMika Westerberg 		case TB_PORT_TX_CL0S:
530e70a8f36SMika Westerberg 		case TB_PORT_RX_CL0S:
531e70a8f36SMika Westerberg 		case TB_PORT_CL1:
532e70a8f36SMika Westerberg 		case TB_PORT_CL2:
533e70a8f36SMika Westerberg 			tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
5349da672a4SAndreas Noever 			return 1;
535e70a8f36SMika Westerberg 
536e70a8f36SMika Westerberg 		default:
537e70a8f36SMika Westerberg 			if (state < 0)
538e70a8f36SMika Westerberg 				return state;
5399da672a4SAndreas Noever 
5409da672a4SAndreas Noever 			/*
5419da672a4SAndreas Noever 			 * After plug-in the state is TB_PORT_CONNECTING. Give it some
5429da672a4SAndreas Noever 			 * time.
5439da672a4SAndreas Noever 			 */
54462efe699SMika Westerberg 			tb_port_dbg(port,
5459da672a4SAndreas Noever 				    "is connected, link is not up (state: %d), retrying...\n",
5469da672a4SAndreas Noever 				    state);
5479da672a4SAndreas Noever 			msleep(100);
5489da672a4SAndreas Noever 		}
549e70a8f36SMika Westerberg 
550e70a8f36SMika Westerberg 	}
5519da672a4SAndreas Noever 	tb_port_warn(port,
5529da672a4SAndreas Noever 		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
5539da672a4SAndreas Noever 	return 0;
5549da672a4SAndreas Noever }
5559da672a4SAndreas Noever 
5569da672a4SAndreas Noever /**
557520b6702SAndreas Noever  * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
5585c6b471bSMika Westerberg  * @port: Port to add/remove NFC credits
5595c6b471bSMika Westerberg  * @credits: Credits to add/remove
560520b6702SAndreas Noever  *
561520b6702SAndreas Noever  * Change the number of NFC credits allocated to @port by @credits. To remove
562520b6702SAndreas Noever  * NFC credits pass a negative amount of credits.
563520b6702SAndreas Noever  *
564520b6702SAndreas Noever  * Return: Returns 0 on success or an error code on failure.
565520b6702SAndreas Noever  */
tb_port_add_nfc_credits(struct tb_port * port,int credits)566520b6702SAndreas Noever int tb_port_add_nfc_credits(struct tb_port *port, int credits)
567520b6702SAndreas Noever {
568c5ee6febSMika Westerberg 	u32 nfc_credits;
569c5ee6febSMika Westerberg 
570c5ee6febSMika Westerberg 	if (credits == 0 || port->sw->is_unplugged)
571520b6702SAndreas Noever 		return 0;
572c5ee6febSMika Westerberg 
573edfbd68bSMika Westerberg 	/*
574edfbd68bSMika Westerberg 	 * USB4 restricts programming NFC buffers to lane adapters only
575edfbd68bSMika Westerberg 	 * so skip other ports.
576edfbd68bSMika Westerberg 	 */
577edfbd68bSMika Westerberg 	if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
578edfbd68bSMika Westerberg 		return 0;
579edfbd68bSMika Westerberg 
5808f57d478SMika Westerberg 	nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
5816cb27a04SMika Westerberg 	if (credits < 0)
5826cb27a04SMika Westerberg 		credits = max_t(int, -nfc_credits, credits);
5836cb27a04SMika Westerberg 
584c5ee6febSMika Westerberg 	nfc_credits += credits;
585c5ee6febSMika Westerberg 
5868f57d478SMika Westerberg 	tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
5878f57d478SMika Westerberg 		    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
588c5ee6febSMika Westerberg 
5898f57d478SMika Westerberg 	port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
590c5ee6febSMika Westerberg 	port->config.nfc_credits |= nfc_credits;
591c5ee6febSMika Westerberg 
592520b6702SAndreas Noever 	return tb_port_write(port, &port->config.nfc_credits,
5938f57d478SMika Westerberg 			     TB_CFG_PORT, ADP_CS_4, 1);
594520b6702SAndreas Noever }
595520b6702SAndreas Noever 
596520b6702SAndreas Noever /**
597520b6702SAndreas Noever  * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
5985c6b471bSMika Westerberg  * @port: Port whose counters to clear
5995c6b471bSMika Westerberg  * @counter: Counter index to clear
600520b6702SAndreas Noever  *
601520b6702SAndreas Noever  * Return: Returns 0 on success or an error code on failure.
602520b6702SAndreas Noever  */
tb_port_clear_counter(struct tb_port * port,int counter)603520b6702SAndreas Noever int tb_port_clear_counter(struct tb_port *port, int counter)
604520b6702SAndreas Noever {
605520b6702SAndreas Noever 	u32 zero[3] = { 0, 0, 0 };
60662efe699SMika Westerberg 	tb_port_dbg(port, "clearing counter %d\n", counter);
607520b6702SAndreas Noever 	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
608520b6702SAndreas Noever }
609520b6702SAndreas Noever 
610520b6702SAndreas Noever /**
611b0407983SMika Westerberg  * tb_port_unlock() - Unlock downstream port
612b0407983SMika Westerberg  * @port: Port to unlock
613b0407983SMika Westerberg  *
614b0407983SMika Westerberg  * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
615b0407983SMika Westerberg  * downstream router accessible for CM.
616b0407983SMika Westerberg  */
tb_port_unlock(struct tb_port * port)617b0407983SMika Westerberg int tb_port_unlock(struct tb_port *port)
618b0407983SMika Westerberg {
619b0407983SMika Westerberg 	if (tb_switch_is_icm(port->sw))
620b0407983SMika Westerberg 		return 0;
621b0407983SMika Westerberg 	if (!tb_port_is_null(port))
622b0407983SMika Westerberg 		return -EINVAL;
623b0407983SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
624b0407983SMika Westerberg 		return usb4_port_unlock(port);
625b0407983SMika Westerberg 	return 0;
626b0407983SMika Westerberg }
627b0407983SMika Westerberg 
__tb_port_enable(struct tb_port * port,bool enable)628341d4518SMika Westerberg static int __tb_port_enable(struct tb_port *port, bool enable)
629341d4518SMika Westerberg {
630341d4518SMika Westerberg 	int ret;
631341d4518SMika Westerberg 	u32 phy;
632341d4518SMika Westerberg 
633341d4518SMika Westerberg 	if (!tb_port_is_null(port))
634341d4518SMika Westerberg 		return -EINVAL;
635341d4518SMika Westerberg 
636341d4518SMika Westerberg 	ret = tb_port_read(port, &phy, TB_CFG_PORT,
637341d4518SMika Westerberg 			   port->cap_phy + LANE_ADP_CS_1, 1);
638341d4518SMika Westerberg 	if (ret)
639341d4518SMika Westerberg 		return ret;
640341d4518SMika Westerberg 
641341d4518SMika Westerberg 	if (enable)
642341d4518SMika Westerberg 		phy &= ~LANE_ADP_CS_1_LD;
643341d4518SMika Westerberg 	else
644341d4518SMika Westerberg 		phy |= LANE_ADP_CS_1_LD;
645341d4518SMika Westerberg 
64690f720d2SMika Westerberg 
64790f720d2SMika Westerberg 	ret = tb_port_write(port, &phy, TB_CFG_PORT,
648341d4518SMika Westerberg 			    port->cap_phy + LANE_ADP_CS_1, 1);
64990f720d2SMika Westerberg 	if (ret)
65090f720d2SMika Westerberg 		return ret;
65190f720d2SMika Westerberg 
65287fa05b6SAndy Shevchenko 	tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable));
65390f720d2SMika Westerberg 	return 0;
654341d4518SMika Westerberg }
655341d4518SMika Westerberg 
656341d4518SMika Westerberg /**
657341d4518SMika Westerberg  * tb_port_enable() - Enable lane adapter
658341d4518SMika Westerberg  * @port: Port to enable (can be %NULL)
659341d4518SMika Westerberg  *
660341d4518SMika Westerberg  * This is used for lane 0 and 1 adapters to enable it.
661341d4518SMika Westerberg  */
tb_port_enable(struct tb_port * port)662341d4518SMika Westerberg int tb_port_enable(struct tb_port *port)
663341d4518SMika Westerberg {
664341d4518SMika Westerberg 	return __tb_port_enable(port, true);
665341d4518SMika Westerberg }
666341d4518SMika Westerberg 
667341d4518SMika Westerberg /**
668341d4518SMika Westerberg  * tb_port_disable() - Disable lane adapter
669341d4518SMika Westerberg  * @port: Port to disable (can be %NULL)
670341d4518SMika Westerberg  *
671341d4518SMika Westerberg  * This is used for lane 0 and 1 adapters to disable it.
672341d4518SMika Westerberg  */
tb_port_disable(struct tb_port * port)673341d4518SMika Westerberg int tb_port_disable(struct tb_port *port)
674341d4518SMika Westerberg {
675341d4518SMika Westerberg 	return __tb_port_enable(port, false);
676341d4518SMika Westerberg }
677341d4518SMika Westerberg 
tb_port_reset(struct tb_port * port)6786c24584aSSanath S static int tb_port_reset(struct tb_port *port)
6796c24584aSSanath S {
6806c24584aSSanath S 	if (tb_switch_is_usb4(port->sw))
6816c24584aSSanath S 		return port->cap_usb4 ? usb4_port_reset(port) : 0;
6826c24584aSSanath S 	return tb_lc_reset_port(port);
6836c24584aSSanath S }
6846c24584aSSanath S 
68547ba5ae4SLee Jones /*
686a25c8b2fSAndreas Noever  * tb_init_port() - initialize a port
687a25c8b2fSAndreas Noever  *
688a25c8b2fSAndreas Noever  * This is a helper method for tb_switch_alloc. Does not check or initialize
689a25c8b2fSAndreas Noever  * any downstream switches.
690a25c8b2fSAndreas Noever  *
691a25c8b2fSAndreas Noever  * Return: Returns 0 on success or an error code on failure.
692a25c8b2fSAndreas Noever  */
tb_init_port(struct tb_port * port)693343fcb8cSAndreas Noever static int tb_init_port(struct tb_port *port)
694a25c8b2fSAndreas Noever {
695a25c8b2fSAndreas Noever 	int res;
6969da672a4SAndreas Noever 	int cap;
697343fcb8cSAndreas Noever 
698fb7a89adSSanjay R Mehta 	INIT_LIST_HEAD(&port->list);
699fb7a89adSSanjay R Mehta 
700fb7a89adSSanjay R Mehta 	/* Control adapter does not have configuration space */
701fb7a89adSSanjay R Mehta 	if (!port->port)
702fb7a89adSSanjay R Mehta 		return 0;
703fb7a89adSSanjay R Mehta 
704a25c8b2fSAndreas Noever 	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
705d94dcbb1SMika Westerberg 	if (res) {
706d94dcbb1SMika Westerberg 		if (res == -ENODEV) {
707d94dcbb1SMika Westerberg 			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
708d94dcbb1SMika Westerberg 			       port->port);
7098824d19bSNikunj A. Dadhania 			port->disabled = true;
710d94dcbb1SMika Westerberg 			return 0;
711d94dcbb1SMika Westerberg 		}
712a25c8b2fSAndreas Noever 		return res;
713d94dcbb1SMika Westerberg 	}
714a25c8b2fSAndreas Noever 
7159da672a4SAndreas Noever 	/* Port 0 is the switch itself and has no PHY. */
716fb7a89adSSanjay R Mehta 	if (port->config.type == TB_TYPE_PORT) {
717da2da04bSMika Westerberg 		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
7189da672a4SAndreas Noever 
7199da672a4SAndreas Noever 		if (cap > 0)
7209da672a4SAndreas Noever 			port->cap_phy = cap;
7219da672a4SAndreas Noever 		else
7229da672a4SAndreas Noever 			tb_port_WARN(port, "non switch port without a PHY\n");
723b0407983SMika Westerberg 
724b0407983SMika Westerberg 		cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
725b0407983SMika Westerberg 		if (cap > 0)
726b0407983SMika Westerberg 			port->cap_usb4 = cap;
72756ad3aefSMika Westerberg 
72856ad3aefSMika Westerberg 		/*
72956ad3aefSMika Westerberg 		 * USB4 ports the buffers allocated for the control path
73056ad3aefSMika Westerberg 		 * can be read from the path config space. Legacy
73156ad3aefSMika Westerberg 		 * devices we use hard-coded value.
73256ad3aefSMika Westerberg 		 */
7332ad3e131SMika Westerberg 		if (port->cap_usb4) {
73456ad3aefSMika Westerberg 			struct tb_regs_hop hop;
73556ad3aefSMika Westerberg 
73656ad3aefSMika Westerberg 			if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
73756ad3aefSMika Westerberg 				port->ctl_credits = hop.initial_credits;
73856ad3aefSMika Westerberg 		}
73956ad3aefSMika Westerberg 		if (!port->ctl_credits)
74056ad3aefSMika Westerberg 			port->ctl_credits = 2;
74156ad3aefSMika Westerberg 
742fb7a89adSSanjay R Mehta 	} else {
74356183c88SMika Westerberg 		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
74456183c88SMika Westerberg 		if (cap > 0)
74556183c88SMika Westerberg 			port->cap_adap = cap;
7469da672a4SAndreas Noever 	}
7479da672a4SAndreas Noever 
74856ad3aefSMika Westerberg 	port->total_credits =
74956ad3aefSMika Westerberg 		(port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
75056ad3aefSMika Westerberg 		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
75156ad3aefSMika Westerberg 
75256ad3aefSMika Westerberg 	tb_dump_port(port->sw->tb, port);
753a25c8b2fSAndreas Noever 	return 0;
754a25c8b2fSAndreas Noever }
755a25c8b2fSAndreas Noever 
tb_port_alloc_hopid(struct tb_port * port,bool in,int min_hopid,int max_hopid)7560b2863acSMika Westerberg static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
7570b2863acSMika Westerberg 			       int max_hopid)
7580b2863acSMika Westerberg {
7590b2863acSMika Westerberg 	int port_max_hopid;
7600b2863acSMika Westerberg 	struct ida *ida;
7610b2863acSMika Westerberg 
7620b2863acSMika Westerberg 	if (in) {
7630b2863acSMika Westerberg 		port_max_hopid = port->config.max_in_hop_id;
7640b2863acSMika Westerberg 		ida = &port->in_hopids;
7650b2863acSMika Westerberg 	} else {
7660b2863acSMika Westerberg 		port_max_hopid = port->config.max_out_hop_id;
7670b2863acSMika Westerberg 		ida = &port->out_hopids;
7680b2863acSMika Westerberg 	}
7690b2863acSMika Westerberg 
77012676423SMika Westerberg 	/*
77112676423SMika Westerberg 	 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
77212676423SMika Westerberg 	 * reserved.
77312676423SMika Westerberg 	 */
774a3cfebdcSMika Westerberg 	if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
7750b2863acSMika Westerberg 		min_hopid = TB_PATH_MIN_HOPID;
7760b2863acSMika Westerberg 
7770b2863acSMika Westerberg 	if (max_hopid < 0 || max_hopid > port_max_hopid)
7780b2863acSMika Westerberg 		max_hopid = port_max_hopid;
7790b2863acSMika Westerberg 
7800b2863acSMika Westerberg 	return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
7810b2863acSMika Westerberg }
7820b2863acSMika Westerberg 
7830b2863acSMika Westerberg /**
7840b2863acSMika Westerberg  * tb_port_alloc_in_hopid() - Allocate input HopID from port
7850b2863acSMika Westerberg  * @port: Port to allocate HopID for
7860b2863acSMika Westerberg  * @min_hopid: Minimum acceptable input HopID
7870b2863acSMika Westerberg  * @max_hopid: Maximum acceptable input HopID
7880b2863acSMika Westerberg  *
7890b2863acSMika Westerberg  * Return: HopID between @min_hopid and @max_hopid or negative errno in
7900b2863acSMika Westerberg  * case of error.
7910b2863acSMika Westerberg  */
tb_port_alloc_in_hopid(struct tb_port * port,int min_hopid,int max_hopid)7920b2863acSMika Westerberg int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
7930b2863acSMika Westerberg {
7940b2863acSMika Westerberg 	return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
7950b2863acSMika Westerberg }
7960b2863acSMika Westerberg 
7970b2863acSMika Westerberg /**
7980b2863acSMika Westerberg  * tb_port_alloc_out_hopid() - Allocate output HopID from port
7990b2863acSMika Westerberg  * @port: Port to allocate HopID for
8000b2863acSMika Westerberg  * @min_hopid: Minimum acceptable output HopID
8010b2863acSMika Westerberg  * @max_hopid: Maximum acceptable output HopID
8020b2863acSMika Westerberg  *
8030b2863acSMika Westerberg  * Return: HopID between @min_hopid and @max_hopid or negative errno in
8040b2863acSMika Westerberg  * case of error.
8050b2863acSMika Westerberg  */
tb_port_alloc_out_hopid(struct tb_port * port,int min_hopid,int max_hopid)8060b2863acSMika Westerberg int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
8070b2863acSMika Westerberg {
8080b2863acSMika Westerberg 	return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
8090b2863acSMika Westerberg }
8100b2863acSMika Westerberg 
8110b2863acSMika Westerberg /**
8120b2863acSMika Westerberg  * tb_port_release_in_hopid() - Release allocated input HopID from port
8130b2863acSMika Westerberg  * @port: Port whose HopID to release
8140b2863acSMika Westerberg  * @hopid: HopID to release
8150b2863acSMika Westerberg  */
tb_port_release_in_hopid(struct tb_port * port,int hopid)8160b2863acSMika Westerberg void tb_port_release_in_hopid(struct tb_port *port, int hopid)
8170b2863acSMika Westerberg {
8180b2863acSMika Westerberg 	ida_simple_remove(&port->in_hopids, hopid);
8190b2863acSMika Westerberg }
8200b2863acSMika Westerberg 
8210b2863acSMika Westerberg /**
8220b2863acSMika Westerberg  * tb_port_release_out_hopid() - Release allocated output HopID from port
8230b2863acSMika Westerberg  * @port: Port whose HopID to release
8240b2863acSMika Westerberg  * @hopid: HopID to release
8250b2863acSMika Westerberg  */
tb_port_release_out_hopid(struct tb_port * port,int hopid)8260b2863acSMika Westerberg void tb_port_release_out_hopid(struct tb_port *port, int hopid)
8270b2863acSMika Westerberg {
8280b2863acSMika Westerberg 	ida_simple_remove(&port->out_hopids, hopid);
8290b2863acSMika Westerberg }
8300b2863acSMika Westerberg 
tb_switch_is_reachable(const struct tb_switch * parent,const struct tb_switch * sw)83169eb79f7SMika Westerberg static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
83269eb79f7SMika Westerberg 					  const struct tb_switch *sw)
83369eb79f7SMika Westerberg {
83469eb79f7SMika Westerberg 	u64 mask = (1ULL << parent->config.depth * 8) - 1;
83569eb79f7SMika Westerberg 	return (tb_route(parent) & mask) == (tb_route(sw) & mask);
83669eb79f7SMika Westerberg }
83769eb79f7SMika Westerberg 
83893f36adeSMika Westerberg /**
839fb19fac1SMika Westerberg  * tb_next_port_on_path() - Return next port for given port on a path
840fb19fac1SMika Westerberg  * @start: Start port of the walk
841fb19fac1SMika Westerberg  * @end: End port of the walk
842fb19fac1SMika Westerberg  * @prev: Previous port (%NULL if this is the first)
843fb19fac1SMika Westerberg  *
844fb19fac1SMika Westerberg  * This function can be used to walk from one port to another if they
845fb19fac1SMika Westerberg  * are connected through zero or more switches. If the @prev is dual
846fb19fac1SMika Westerberg  * link port, the function follows that link and returns another end on
847fb19fac1SMika Westerberg  * that same link.
848fb19fac1SMika Westerberg  *
849fb19fac1SMika Westerberg  * If the @end port has been reached, return %NULL.
850fb19fac1SMika Westerberg  *
851fb19fac1SMika Westerberg  * Domain tb->lock must be held when this function is called.
852fb19fac1SMika Westerberg  */
tb_next_port_on_path(struct tb_port * start,struct tb_port * end,struct tb_port * prev)853fb19fac1SMika Westerberg struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
854fb19fac1SMika Westerberg 				     struct tb_port *prev)
855fb19fac1SMika Westerberg {
856fb19fac1SMika Westerberg 	struct tb_port *next;
857fb19fac1SMika Westerberg 
858fb19fac1SMika Westerberg 	if (!prev)
859fb19fac1SMika Westerberg 		return start;
860fb19fac1SMika Westerberg 
861fb19fac1SMika Westerberg 	if (prev->sw == end->sw) {
862fb19fac1SMika Westerberg 		if (prev == end)
863fb19fac1SMika Westerberg 			return NULL;
864fb19fac1SMika Westerberg 		return end;
865fb19fac1SMika Westerberg 	}
866fb19fac1SMika Westerberg 
86769eb79f7SMika Westerberg 	if (tb_switch_is_reachable(prev->sw, end->sw)) {
868fb19fac1SMika Westerberg 		next = tb_port_at(tb_route(end->sw), prev->sw);
86969eb79f7SMika Westerberg 		/* Walk down the topology if next == prev */
87069eb79f7SMika Westerberg 		if (prev->remote &&
87169eb79f7SMika Westerberg 		    (next == prev || next->dual_link_port == prev))
87269eb79f7SMika Westerberg 			next = prev->remote;
873fb19fac1SMika Westerberg 	} else {
874fb19fac1SMika Westerberg 		if (tb_is_upstream_port(prev)) {
875fb19fac1SMika Westerberg 			next = prev->remote;
876fb19fac1SMika Westerberg 		} else {
877fb19fac1SMika Westerberg 			next = tb_upstream_port(prev->sw);
878fb19fac1SMika Westerberg 			/*
879fb19fac1SMika Westerberg 			 * Keep the same link if prev and next are both
880fb19fac1SMika Westerberg 			 * dual link ports.
881fb19fac1SMika Westerberg 			 */
882fb19fac1SMika Westerberg 			if (next->dual_link_port &&
883fb19fac1SMika Westerberg 			    next->link_nr != prev->link_nr) {
884fb19fac1SMika Westerberg 				next = next->dual_link_port;
885fb19fac1SMika Westerberg 			}
886fb19fac1SMika Westerberg 		}
887fb19fac1SMika Westerberg 	}
888fb19fac1SMika Westerberg 
88969eb79f7SMika Westerberg 	return next != prev ? next : NULL;
890fb19fac1SMika Westerberg }
891fb19fac1SMika Westerberg 
8925b7b8c0aSMika Westerberg /**
8935b7b8c0aSMika Westerberg  * tb_port_get_link_speed() - Get current link speed
8945b7b8c0aSMika Westerberg  * @port: Port to check (USB4 or CIO)
8955b7b8c0aSMika Westerberg  *
8965b7b8c0aSMika Westerberg  * Returns link speed in Gb/s or negative errno in case of failure.
8975b7b8c0aSMika Westerberg  */
tb_port_get_link_speed(struct tb_port * port)8985b7b8c0aSMika Westerberg int tb_port_get_link_speed(struct tb_port *port)
89991c0c120SMika Westerberg {
90091c0c120SMika Westerberg 	u32 val, speed;
90191c0c120SMika Westerberg 	int ret;
90291c0c120SMika Westerberg 
90391c0c120SMika Westerberg 	if (!port->cap_phy)
90491c0c120SMika Westerberg 		return -EINVAL;
90591c0c120SMika Westerberg 
90691c0c120SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
90791c0c120SMika Westerberg 			   port->cap_phy + LANE_ADP_CS_1, 1);
90891c0c120SMika Westerberg 	if (ret)
90991c0c120SMika Westerberg 		return ret;
91091c0c120SMika Westerberg 
91191c0c120SMika Westerberg 	speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
91291c0c120SMika Westerberg 		LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
913e111fb92SGil Fine 
914e111fb92SGil Fine 	switch (speed) {
915e111fb92SGil Fine 	case LANE_ADP_CS_1_CURRENT_SPEED_GEN4:
916e111fb92SGil Fine 		return 40;
917e111fb92SGil Fine 	case LANE_ADP_CS_1_CURRENT_SPEED_GEN3:
918e111fb92SGil Fine 		return 20;
919e111fb92SGil Fine 	default:
920e111fb92SGil Fine 		return 10;
921e111fb92SGil Fine 	}
92291c0c120SMika Westerberg }
92391c0c120SMika Westerberg 
9244210d50fSIsaac Hazan /**
9257b85d751SGil Fine  * tb_port_get_link_generation() - Returns link generation
9267b85d751SGil Fine  * @port: Lane adapter
9277b85d751SGil Fine  *
9287b85d751SGil Fine  * Returns link generation as number or negative errno in case of
9297b85d751SGil Fine  * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
9307b85d751SGil Fine  * links so for those always returns 2.
9317b85d751SGil Fine  */
tb_port_get_link_generation(struct tb_port * port)9327b85d751SGil Fine int tb_port_get_link_generation(struct tb_port *port)
9337b85d751SGil Fine {
9347b85d751SGil Fine 	int ret;
9357b85d751SGil Fine 
9367b85d751SGil Fine 	ret = tb_port_get_link_speed(port);
9377b85d751SGil Fine 	if (ret < 0)
9387b85d751SGil Fine 		return ret;
9397b85d751SGil Fine 
9407b85d751SGil Fine 	switch (ret) {
9417b85d751SGil Fine 	case 40:
9427b85d751SGil Fine 		return 4;
9437b85d751SGil Fine 	case 20:
9447b85d751SGil Fine 		return 3;
9457b85d751SGil Fine 	default:
9467b85d751SGil Fine 		return 2;
9477b85d751SGil Fine 	}
9487b85d751SGil Fine }
9497b85d751SGil Fine 
width_name(enum tb_link_width width)9509b6933e9SGil Fine static const char *width_name(enum tb_link_width width)
9519b6933e9SGil Fine {
9529b6933e9SGil Fine 	switch (width) {
9539b6933e9SGil Fine 	case TB_LINK_WIDTH_SINGLE:
9549b6933e9SGil Fine 		return "symmetric, single lane";
9559b6933e9SGil Fine 	case TB_LINK_WIDTH_DUAL:
9569b6933e9SGil Fine 		return "symmetric, dual lanes";
9579b6933e9SGil Fine 	case TB_LINK_WIDTH_ASYM_TX:
9589b6933e9SGil Fine 		return "asymmetric, 3 transmitters, 1 receiver";
9599b6933e9SGil Fine 	case TB_LINK_WIDTH_ASYM_RX:
9609b6933e9SGil Fine 		return "asymmetric, 3 receivers, 1 transmitter";
9619b6933e9SGil Fine 	default:
9629b6933e9SGil Fine 		return "unknown";
9639b6933e9SGil Fine 	}
9649b6933e9SGil Fine }
9659b6933e9SGil Fine 
9667b85d751SGil Fine /**
9674210d50fSIsaac Hazan  * tb_port_get_link_width() - Get current link width
9684210d50fSIsaac Hazan  * @port: Port to check (USB4 or CIO)
9694210d50fSIsaac Hazan  *
970e111fb92SGil Fine  * Returns link width. Return the link width as encoded in &enum
971e111fb92SGil Fine  * tb_link_width or negative errno in case of failure.
9724210d50fSIsaac Hazan  */
tb_port_get_link_width(struct tb_port * port)9734210d50fSIsaac Hazan int tb_port_get_link_width(struct tb_port *port)
97491c0c120SMika Westerberg {
97591c0c120SMika Westerberg 	u32 val;
97691c0c120SMika Westerberg 	int ret;
97791c0c120SMika Westerberg 
97891c0c120SMika Westerberg 	if (!port->cap_phy)
97991c0c120SMika Westerberg 		return -EINVAL;
98091c0c120SMika Westerberg 
98191c0c120SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
98291c0c120SMika Westerberg 			   port->cap_phy + LANE_ADP_CS_1, 1);
98391c0c120SMika Westerberg 	if (ret)
98491c0c120SMika Westerberg 		return ret;
98591c0c120SMika Westerberg 
986e111fb92SGil Fine 	/* Matches the values in enum tb_link_width */
98791c0c120SMika Westerberg 	return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
98891c0c120SMika Westerberg 		LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
98991c0c120SMika Westerberg }
99091c0c120SMika Westerberg 
9919b6933e9SGil Fine /**
9929b6933e9SGil Fine  * tb_port_width_supported() - Is the given link width supported
9939b6933e9SGil Fine  * @port: Port to check
9949b6933e9SGil Fine  * @width: Widths to check (bitmask)
9959b6933e9SGil Fine  *
9969b6933e9SGil Fine  * Can be called to any lane adapter. Checks if given @width is
9979b6933e9SGil Fine  * supported by the hardware and returns %true if it is.
9989b6933e9SGil Fine  */
tb_port_width_supported(struct tb_port * port,unsigned int width)9999b6933e9SGil Fine bool tb_port_width_supported(struct tb_port *port, unsigned int width)
100091c0c120SMika Westerberg {
100191c0c120SMika Westerberg 	u32 phy, widths;
100291c0c120SMika Westerberg 	int ret;
100391c0c120SMika Westerberg 
100491c0c120SMika Westerberg 	if (!port->cap_phy)
100591c0c120SMika Westerberg 		return false;
100691c0c120SMika Westerberg 
10079b6933e9SGil Fine 	if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) {
10089b6933e9SGil Fine 		if (tb_port_get_link_generation(port) < 4 ||
10099b6933e9SGil Fine 		    !usb4_port_asym_supported(port))
10109b6933e9SGil Fine 			return false;
10119b6933e9SGil Fine 	}
10129b6933e9SGil Fine 
101391c0c120SMika Westerberg 	ret = tb_port_read(port, &phy, TB_CFG_PORT,
101491c0c120SMika Westerberg 			   port->cap_phy + LANE_ADP_CS_0, 1);
101591c0c120SMika Westerberg 	if (ret)
1016e9d0e751SDan Carpenter 		return false;
101791c0c120SMika Westerberg 
10189b6933e9SGil Fine 	/*
10199b6933e9SGil Fine 	 * The field encoding is the same as &enum tb_link_width (which is
10209b6933e9SGil Fine 	 * passed to @width).
10219b6933e9SGil Fine 	 */
10229b6933e9SGil Fine 	widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy);
10239b6933e9SGil Fine 	return widths & width;
1024e111fb92SGil Fine }
1025e111fb92SGil Fine 
10260e14dd5eSMika Westerberg /**
10270e14dd5eSMika Westerberg  * tb_port_set_link_width() - Set target link width of the lane adapter
10280e14dd5eSMika Westerberg  * @port: Lane adapter
1029e111fb92SGil Fine  * @width: Target link width
10300e14dd5eSMika Westerberg  *
10310e14dd5eSMika Westerberg  * Sets the target link width of the lane adapter to @width. Does not
10320e14dd5eSMika Westerberg  * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
10330e14dd5eSMika Westerberg  *
10340e14dd5eSMika Westerberg  * Return: %0 in case of success and negative errno in case of error
10350e14dd5eSMika Westerberg  */
tb_port_set_link_width(struct tb_port * port,enum tb_link_width width)1036e111fb92SGil Fine int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
103791c0c120SMika Westerberg {
103891c0c120SMika Westerberg 	u32 val;
103991c0c120SMika Westerberg 	int ret;
104091c0c120SMika Westerberg 
104191c0c120SMika Westerberg 	if (!port->cap_phy)
104291c0c120SMika Westerberg 		return -EINVAL;
104391c0c120SMika Westerberg 
104491c0c120SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
104591c0c120SMika Westerberg 			   port->cap_phy + LANE_ADP_CS_1, 1);
104691c0c120SMika Westerberg 	if (ret)
104791c0c120SMika Westerberg 		return ret;
104891c0c120SMika Westerberg 
104991c0c120SMika Westerberg 	val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
105091c0c120SMika Westerberg 	switch (width) {
1051e111fb92SGil Fine 	case TB_LINK_WIDTH_SINGLE:
1052e111fb92SGil Fine 		/* Gen 4 link cannot be single */
10537b85d751SGil Fine 		if (tb_port_get_link_generation(port) >= 4)
1054e111fb92SGil Fine 			return -EOPNOTSUPP;
105591c0c120SMika Westerberg 		val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
105691c0c120SMika Westerberg 			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
105791c0c120SMika Westerberg 		break;
10589b6933e9SGil Fine 
1059e111fb92SGil Fine 	case TB_LINK_WIDTH_DUAL:
10609b6933e9SGil Fine 		if (tb_port_get_link_generation(port) >= 4)
10619b6933e9SGil Fine 			return usb4_port_asym_set_link_width(port, width);
106291c0c120SMika Westerberg 		val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
106391c0c120SMika Westerberg 			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
106491c0c120SMika Westerberg 		break;
10659b6933e9SGil Fine 
10669b6933e9SGil Fine 	case TB_LINK_WIDTH_ASYM_TX:
10679b6933e9SGil Fine 	case TB_LINK_WIDTH_ASYM_RX:
10689b6933e9SGil Fine 		return usb4_port_asym_set_link_width(port, width);
10699b6933e9SGil Fine 
107091c0c120SMika Westerberg 	default:
107191c0c120SMika Westerberg 		return -EINVAL;
107291c0c120SMika Westerberg 	}
107391c0c120SMika Westerberg 
107491c0c120SMika Westerberg 	return tb_port_write(port, &val, TB_CFG_PORT,
107591c0c120SMika Westerberg 			     port->cap_phy + LANE_ADP_CS_1, 1);
107691c0c120SMika Westerberg }
107791c0c120SMika Westerberg 
10785cc0df9cSIsaac Hazan /**
10790e14dd5eSMika Westerberg  * tb_port_set_lane_bonding() - Enable/disable lane bonding
10800e14dd5eSMika Westerberg  * @port: Lane adapter
10810e14dd5eSMika Westerberg  * @bonding: enable/disable bonding
10820e14dd5eSMika Westerberg  *
10830e14dd5eSMika Westerberg  * Enables or disables lane bonding. This should be called after target
10840e14dd5eSMika Westerberg  * link width has been set (tb_port_set_link_width()). Note in most
10850e14dd5eSMika Westerberg  * cases one should use tb_port_lane_bonding_enable() instead to enable
10860e14dd5eSMika Westerberg  * lane bonding.
10870e14dd5eSMika Westerberg  *
10880e14dd5eSMika Westerberg  * Return: %0 in case of success and negative errno in case of error
10890e14dd5eSMika Westerberg  */
tb_port_set_lane_bonding(struct tb_port * port,bool bonding)1090e111fb92SGil Fine static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
10910e14dd5eSMika Westerberg {
10920e14dd5eSMika Westerberg 	u32 val;
10930e14dd5eSMika Westerberg 	int ret;
10940e14dd5eSMika Westerberg 
10950e14dd5eSMika Westerberg 	if (!port->cap_phy)
10960e14dd5eSMika Westerberg 		return -EINVAL;
10970e14dd5eSMika Westerberg 
10980e14dd5eSMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
10990e14dd5eSMika Westerberg 			   port->cap_phy + LANE_ADP_CS_1, 1);
11000e14dd5eSMika Westerberg 	if (ret)
11010e14dd5eSMika Westerberg 		return ret;
11020e14dd5eSMika Westerberg 
11030e14dd5eSMika Westerberg 	if (bonding)
11040e14dd5eSMika Westerberg 		val |= LANE_ADP_CS_1_LB;
11050e14dd5eSMika Westerberg 	else
11060e14dd5eSMika Westerberg 		val &= ~LANE_ADP_CS_1_LB;
11070e14dd5eSMika Westerberg 
1108e111fb92SGil Fine 	return tb_port_write(port, &val, TB_CFG_PORT,
11090e14dd5eSMika Westerberg 			     port->cap_phy + LANE_ADP_CS_1, 1);
11100e14dd5eSMika Westerberg }
11110e14dd5eSMika Westerberg 
11120e14dd5eSMika Westerberg /**
11135cc0df9cSIsaac Hazan  * tb_port_lane_bonding_enable() - Enable bonding on port
11145cc0df9cSIsaac Hazan  * @port: port to enable
11155cc0df9cSIsaac Hazan  *
1116e7051beaSMika Westerberg  * Enable bonding by setting the link width of the port and the other
1117e7051beaSMika Westerberg  * port in case of dual link port. Does not wait for the link to
1118e7051beaSMika Westerberg  * actually reach the bonded state so caller needs to call
1119e7051beaSMika Westerberg  * tb_port_wait_for_link_width() before enabling any paths through the
1120e7051beaSMika Westerberg  * link to make sure the link is in expected state.
11215cc0df9cSIsaac Hazan  *
11225cc0df9cSIsaac Hazan  * Return: %0 in case of success and negative errno in case of error
11235cc0df9cSIsaac Hazan  */
tb_port_lane_bonding_enable(struct tb_port * port)11245cc0df9cSIsaac Hazan int tb_port_lane_bonding_enable(struct tb_port *port)
112591c0c120SMika Westerberg {
1126e111fb92SGil Fine 	enum tb_link_width width;
112791c0c120SMika Westerberg 	int ret;
112891c0c120SMika Westerberg 
112991c0c120SMika Westerberg 	/*
113091c0c120SMika Westerberg 	 * Enable lane bonding for both links if not already enabled by
113191c0c120SMika Westerberg 	 * for example the boot firmware.
113291c0c120SMika Westerberg 	 */
1133e111fb92SGil Fine 	width = tb_port_get_link_width(port);
1134e111fb92SGil Fine 	if (width == TB_LINK_WIDTH_SINGLE) {
1135e111fb92SGil Fine 		ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL);
113691c0c120SMika Westerberg 		if (ret)
11370e14dd5eSMika Westerberg 			goto err_lane0;
113891c0c120SMika Westerberg 	}
113991c0c120SMika Westerberg 
1140e111fb92SGil Fine 	width = tb_port_get_link_width(port->dual_link_port);
1141e111fb92SGil Fine 	if (width == TB_LINK_WIDTH_SINGLE) {
1142e111fb92SGil Fine 		ret = tb_port_set_link_width(port->dual_link_port,
1143e111fb92SGil Fine 					     TB_LINK_WIDTH_DUAL);
11440e14dd5eSMika Westerberg 		if (ret)
11450e14dd5eSMika Westerberg 			goto err_lane0;
114691c0c120SMika Westerberg 	}
114791c0c120SMika Westerberg 
1148e111fb92SGil Fine 	/*
1149e111fb92SGil Fine 	 * Only set bonding if the link was not already bonded. This
1150e111fb92SGil Fine 	 * avoids the lane adapter to re-enter bonding state.
1151e111fb92SGil Fine 	 */
1152e9c62671SGil Fine 	if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
11530e14dd5eSMika Westerberg 		ret = tb_port_set_lane_bonding(port, true);
11540e14dd5eSMika Westerberg 		if (ret)
11550e14dd5eSMika Westerberg 			goto err_lane1;
1156e111fb92SGil Fine 	}
1157e111fb92SGil Fine 
1158e111fb92SGil Fine 	/*
1159e111fb92SGil Fine 	 * When lane 0 bonding is set it will affect lane 1 too so
1160e111fb92SGil Fine 	 * update both.
1161e111fb92SGil Fine 	 */
1162e111fb92SGil Fine 	port->bonded = true;
1163e111fb92SGil Fine 	port->dual_link_port->bonded = true;
116491c0c120SMika Westerberg 
116591c0c120SMika Westerberg 	return 0;
11660e14dd5eSMika Westerberg 
11670e14dd5eSMika Westerberg err_lane1:
1168e111fb92SGil Fine 	tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
11690e14dd5eSMika Westerberg err_lane0:
1170e111fb92SGil Fine 	tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1171e111fb92SGil Fine 
11720e14dd5eSMika Westerberg 	return ret;
117391c0c120SMika Westerberg }
117491c0c120SMika Westerberg 
11755cc0df9cSIsaac Hazan /**
11765cc0df9cSIsaac Hazan  * tb_port_lane_bonding_disable() - Disable bonding on port
11775cc0df9cSIsaac Hazan  * @port: port to disable
11785cc0df9cSIsaac Hazan  *
11795cc0df9cSIsaac Hazan  * Disable bonding by setting the link width of the port and the
11805cc0df9cSIsaac Hazan  * other port in case of dual link port.
11815cc0df9cSIsaac Hazan  */
tb_port_lane_bonding_disable(struct tb_port * port)11825cc0df9cSIsaac Hazan void tb_port_lane_bonding_disable(struct tb_port *port)
118391c0c120SMika Westerberg {
11840e14dd5eSMika Westerberg 	tb_port_set_lane_bonding(port, false);
1185e111fb92SGil Fine 	tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
1186e111fb92SGil Fine 	tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1187e111fb92SGil Fine 	port->dual_link_port->bonded = false;
1188e111fb92SGil Fine 	port->bonded = false;
118991c0c120SMika Westerberg }
119091c0c120SMika Westerberg 
1191e7051beaSMika Westerberg /**
1192e7051beaSMika Westerberg  * tb_port_wait_for_link_width() - Wait until link reaches specific width
1193e7051beaSMika Westerberg  * @port: Port to wait for
11949b6933e9SGil Fine  * @width: Expected link width (bitmask)
1195e7051beaSMika Westerberg  * @timeout_msec: Timeout in ms how long to wait
1196e7051beaSMika Westerberg  *
1197e7051beaSMika Westerberg  * Should be used after both ends of the link have been bonded (or
1198e7051beaSMika Westerberg  * bonding has been disabled) to wait until the link actually reaches
1199e111fb92SGil Fine  * the expected state. Returns %-ETIMEDOUT if the width was not reached
1200e111fb92SGil Fine  * within the given timeout, %0 if it did. Can be passed a mask of
1201e111fb92SGil Fine  * expected widths and succeeds if any of the widths is reached.
1202e7051beaSMika Westerberg  */
tb_port_wait_for_link_width(struct tb_port * port,unsigned int width,int timeout_msec)12039b6933e9SGil Fine int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
1204e7051beaSMika Westerberg 				int timeout_msec)
1205e7051beaSMika Westerberg {
1206e7051beaSMika Westerberg 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1207e7051beaSMika Westerberg 	int ret;
1208e7051beaSMika Westerberg 
1209e111fb92SGil Fine 	/* Gen 4 link does not support single lane */
12109b6933e9SGil Fine 	if ((width & TB_LINK_WIDTH_SINGLE) &&
12117b85d751SGil Fine 	    tb_port_get_link_generation(port) >= 4)
1212e111fb92SGil Fine 		return -EOPNOTSUPP;
1213e111fb92SGil Fine 
1214e7051beaSMika Westerberg 	do {
1215e7051beaSMika Westerberg 		ret = tb_port_get_link_width(port);
12160a2e1667SMika Westerberg 		if (ret < 0) {
12170a2e1667SMika Westerberg 			/*
12180a2e1667SMika Westerberg 			 * Sometimes we get port locked error when
12190a2e1667SMika Westerberg 			 * polling the lanes so we can ignore it and
12200a2e1667SMika Westerberg 			 * retry.
12210a2e1667SMika Westerberg 			 */
12220a2e1667SMika Westerberg 			if (ret != -EACCES)
1223e7051beaSMika Westerberg 				return ret;
12249b6933e9SGil Fine 		} else if (ret & width) {
1225e7051beaSMika Westerberg 			return 0;
12260a2e1667SMika Westerberg 		}
1227e7051beaSMika Westerberg 
1228e7051beaSMika Westerberg 		usleep_range(1000, 2000);
1229e7051beaSMika Westerberg 	} while (ktime_before(ktime_get(), timeout));
1230e7051beaSMika Westerberg 
1231e7051beaSMika Westerberg 	return -ETIMEDOUT;
1232e7051beaSMika Westerberg }
1233e7051beaSMika Westerberg 
tb_port_do_update_credits(struct tb_port * port)123469fea377SMika Westerberg static int tb_port_do_update_credits(struct tb_port *port)
123569fea377SMika Westerberg {
123669fea377SMika Westerberg 	u32 nfc_credits;
123769fea377SMika Westerberg 	int ret;
123869fea377SMika Westerberg 
123969fea377SMika Westerberg 	ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
124069fea377SMika Westerberg 	if (ret)
124169fea377SMika Westerberg 		return ret;
124269fea377SMika Westerberg 
124369fea377SMika Westerberg 	if (nfc_credits != port->config.nfc_credits) {
124469fea377SMika Westerberg 		u32 total;
124569fea377SMika Westerberg 
124669fea377SMika Westerberg 		total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
124769fea377SMika Westerberg 			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
124869fea377SMika Westerberg 
124969fea377SMika Westerberg 		tb_port_dbg(port, "total credits changed %u -> %u\n",
125069fea377SMika Westerberg 			    port->total_credits, total);
125169fea377SMika Westerberg 
125269fea377SMika Westerberg 		port->config.nfc_credits = nfc_credits;
125369fea377SMika Westerberg 		port->total_credits = total;
125469fea377SMika Westerberg 	}
125569fea377SMika Westerberg 
125669fea377SMika Westerberg 	return 0;
125769fea377SMika Westerberg }
125869fea377SMika Westerberg 
125969fea377SMika Westerberg /**
126069fea377SMika Westerberg  * tb_port_update_credits() - Re-read port total credits
126169fea377SMika Westerberg  * @port: Port to update
126269fea377SMika Westerberg  *
126369fea377SMika Westerberg  * After the link is bonded (or bonding was disabled) the port total
126469fea377SMika Westerberg  * credits may change, so this function needs to be called to re-read
126569fea377SMika Westerberg  * the credits. Updates also the second lane adapter.
126669fea377SMika Westerberg  */
tb_port_update_credits(struct tb_port * port)126769fea377SMika Westerberg int tb_port_update_credits(struct tb_port *port)
126869fea377SMika Westerberg {
126969fea377SMika Westerberg 	int ret;
127069fea377SMika Westerberg 
127169fea377SMika Westerberg 	ret = tb_port_do_update_credits(port);
127269fea377SMika Westerberg 	if (ret)
127369fea377SMika Westerberg 		return ret;
1274*440fba89SMika Westerberg 
1275*440fba89SMika Westerberg 	if (!port->dual_link_port)
1276*440fba89SMika Westerberg 		return 0;
127769fea377SMika Westerberg 	return tb_port_do_update_credits(port->dual_link_port);
127869fea377SMika Westerberg }
127969fea377SMika Westerberg 
tb_port_start_lane_initialization(struct tb_port * port)1280fdb0887cSMika Westerberg static int tb_port_start_lane_initialization(struct tb_port *port)
1281fdb0887cSMika Westerberg {
1282fdb0887cSMika Westerberg 	int ret;
1283fdb0887cSMika Westerberg 
1284fdb0887cSMika Westerberg 	if (tb_switch_is_usb4(port->sw))
1285fdb0887cSMika Westerberg 		return 0;
1286fdb0887cSMika Westerberg 
1287fdb0887cSMika Westerberg 	ret = tb_lc_start_lane_initialization(port);
1288fdb0887cSMika Westerberg 	return ret == -EINVAL ? 0 : ret;
1289fdb0887cSMika Westerberg }
1290fdb0887cSMika Westerberg 
12913fb10ea4SRajmohan Mani /*
12923fb10ea4SRajmohan Mani  * Returns true if the port had something (router, XDomain) connected
12933fb10ea4SRajmohan Mani  * before suspend.
12943fb10ea4SRajmohan Mani  */
tb_port_resume(struct tb_port * port)12953fb10ea4SRajmohan Mani static bool tb_port_resume(struct tb_port *port)
12963fb10ea4SRajmohan Mani {
12973fb10ea4SRajmohan Mani 	bool has_remote = tb_port_has_remote(port);
12983fb10ea4SRajmohan Mani 
12993fb10ea4SRajmohan Mani 	if (port->usb4) {
13003fb10ea4SRajmohan Mani 		usb4_port_device_resume(port->usb4);
13013fb10ea4SRajmohan Mani 	} else if (!has_remote) {
13023fb10ea4SRajmohan Mani 		/*
13033fb10ea4SRajmohan Mani 		 * For disconnected downstream lane adapters start lane
13043fb10ea4SRajmohan Mani 		 * initialization now so we detect future connects.
13053fb10ea4SRajmohan Mani 		 *
13063fb10ea4SRajmohan Mani 		 * For XDomain start the lane initialzation now so the
13073fb10ea4SRajmohan Mani 		 * link gets re-established.
13083fb10ea4SRajmohan Mani 		 *
13093fb10ea4SRajmohan Mani 		 * This is only needed for non-USB4 ports.
13103fb10ea4SRajmohan Mani 		 */
13113fb10ea4SRajmohan Mani 		if (!tb_is_upstream_port(port) || port->xdomain)
13123fb10ea4SRajmohan Mani 			tb_port_start_lane_initialization(port);
13133fb10ea4SRajmohan Mani 	}
13143fb10ea4SRajmohan Mani 
13153fb10ea4SRajmohan Mani 	return has_remote || port->xdomain;
13163fb10ea4SRajmohan Mani }
13173fb10ea4SRajmohan Mani 
1318fb19fac1SMika Westerberg /**
1319e78db6f0SMika Westerberg  * tb_port_is_enabled() - Is the adapter port enabled
1320e78db6f0SMika Westerberg  * @port: Port to check
1321e78db6f0SMika Westerberg  */
tb_port_is_enabled(struct tb_port * port)1322e78db6f0SMika Westerberg bool tb_port_is_enabled(struct tb_port *port)
1323e78db6f0SMika Westerberg {
1324e78db6f0SMika Westerberg 	switch (port->config.type) {
1325e78db6f0SMika Westerberg 	case TB_TYPE_PCIE_UP:
1326e78db6f0SMika Westerberg 	case TB_TYPE_PCIE_DOWN:
1327e78db6f0SMika Westerberg 		return tb_pci_port_is_enabled(port);
1328e78db6f0SMika Westerberg 
13294f807e47SMika Westerberg 	case TB_TYPE_DP_HDMI_IN:
13304f807e47SMika Westerberg 	case TB_TYPE_DP_HDMI_OUT:
13314f807e47SMika Westerberg 		return tb_dp_port_is_enabled(port);
13324f807e47SMika Westerberg 
1333e6f81858SRajmohan Mani 	case TB_TYPE_USB3_UP:
1334e6f81858SRajmohan Mani 	case TB_TYPE_USB3_DOWN:
1335e6f81858SRajmohan Mani 		return tb_usb3_port_is_enabled(port);
1336e6f81858SRajmohan Mani 
1337e78db6f0SMika Westerberg 	default:
1338e78db6f0SMika Westerberg 		return false;
1339e78db6f0SMika Westerberg 	}
1340e78db6f0SMika Westerberg }
1341e78db6f0SMika Westerberg 
1342e78db6f0SMika Westerberg /**
1343e6f81858SRajmohan Mani  * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1344e6f81858SRajmohan Mani  * @port: USB3 adapter port to check
1345e6f81858SRajmohan Mani  */
tb_usb3_port_is_enabled(struct tb_port * port)1346e6f81858SRajmohan Mani bool tb_usb3_port_is_enabled(struct tb_port *port)
1347e6f81858SRajmohan Mani {
1348e6f81858SRajmohan Mani 	u32 data;
1349e6f81858SRajmohan Mani 
1350e6f81858SRajmohan Mani 	if (tb_port_read(port, &data, TB_CFG_PORT,
1351e6f81858SRajmohan Mani 			 port->cap_adap + ADP_USB3_CS_0, 1))
1352e6f81858SRajmohan Mani 		return false;
1353e6f81858SRajmohan Mani 
1354e6f81858SRajmohan Mani 	return !!(data & ADP_USB3_CS_0_PE);
1355e6f81858SRajmohan Mani }
1356e6f81858SRajmohan Mani 
1357e6f81858SRajmohan Mani /**
1358e6f81858SRajmohan Mani  * tb_usb3_port_enable() - Enable USB3 adapter port
1359e6f81858SRajmohan Mani  * @port: USB3 adapter port to enable
1360e6f81858SRajmohan Mani  * @enable: Enable/disable the USB3 adapter
1361e6f81858SRajmohan Mani  */
tb_usb3_port_enable(struct tb_port * port,bool enable)1362e6f81858SRajmohan Mani int tb_usb3_port_enable(struct tb_port *port, bool enable)
1363e6f81858SRajmohan Mani {
1364e6f81858SRajmohan Mani 	u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1365e6f81858SRajmohan Mani 			  : ADP_USB3_CS_0_V;
1366e6f81858SRajmohan Mani 
1367e6f81858SRajmohan Mani 	if (!port->cap_adap)
1368e6f81858SRajmohan Mani 		return -ENXIO;
1369e6f81858SRajmohan Mani 	return tb_port_write(port, &word, TB_CFG_PORT,
1370e6f81858SRajmohan Mani 			     port->cap_adap + ADP_USB3_CS_0, 1);
1371e6f81858SRajmohan Mani }
1372e6f81858SRajmohan Mani 
1373e6f81858SRajmohan Mani /**
13740414bec5SMika Westerberg  * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
13750414bec5SMika Westerberg  * @port: PCIe port to check
13760414bec5SMika Westerberg  */
tb_pci_port_is_enabled(struct tb_port * port)13770414bec5SMika Westerberg bool tb_pci_port_is_enabled(struct tb_port *port)
13780414bec5SMika Westerberg {
13790414bec5SMika Westerberg 	u32 data;
13800414bec5SMika Westerberg 
1381778bfca3SMika Westerberg 	if (tb_port_read(port, &data, TB_CFG_PORT,
1382778bfca3SMika Westerberg 			 port->cap_adap + ADP_PCIE_CS_0, 1))
13830414bec5SMika Westerberg 		return false;
13840414bec5SMika Westerberg 
1385778bfca3SMika Westerberg 	return !!(data & ADP_PCIE_CS_0_PE);
13860414bec5SMika Westerberg }
13870414bec5SMika Westerberg 
13880414bec5SMika Westerberg /**
138993f36adeSMika Westerberg  * tb_pci_port_enable() - Enable PCIe adapter port
139093f36adeSMika Westerberg  * @port: PCIe port to enable
139193f36adeSMika Westerberg  * @enable: Enable/disable the PCIe adapter
139293f36adeSMika Westerberg  */
tb_pci_port_enable(struct tb_port * port,bool enable)139393f36adeSMika Westerberg int tb_pci_port_enable(struct tb_port *port, bool enable)
139493f36adeSMika Westerberg {
1395778bfca3SMika Westerberg 	u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
139693f36adeSMika Westerberg 	if (!port->cap_adap)
139793f36adeSMika Westerberg 		return -ENXIO;
1398778bfca3SMika Westerberg 	return tb_port_write(port, &word, TB_CFG_PORT,
1399778bfca3SMika Westerberg 			     port->cap_adap + ADP_PCIE_CS_0, 1);
140093f36adeSMika Westerberg }
140193f36adeSMika Westerberg 
14024f807e47SMika Westerberg /**
14034f807e47SMika Westerberg  * tb_dp_port_hpd_is_active() - Is HPD already active
14044f807e47SMika Westerberg  * @port: DP out port to check
14054f807e47SMika Westerberg  *
14064f807e47SMika Westerberg  * Checks if the DP OUT adapter port has HDP bit already set.
14074f807e47SMika Westerberg  */
tb_dp_port_hpd_is_active(struct tb_port * port)14084f807e47SMika Westerberg int tb_dp_port_hpd_is_active(struct tb_port *port)
14094f807e47SMika Westerberg {
14104f807e47SMika Westerberg 	u32 data;
14114f807e47SMika Westerberg 	int ret;
14124f807e47SMika Westerberg 
141398176380SMika Westerberg 	ret = tb_port_read(port, &data, TB_CFG_PORT,
141498176380SMika Westerberg 			   port->cap_adap + ADP_DP_CS_2, 1);
14154f807e47SMika Westerberg 	if (ret)
14164f807e47SMika Westerberg 		return ret;
14174f807e47SMika Westerberg 
141898176380SMika Westerberg 	return !!(data & ADP_DP_CS_2_HDP);
14194f807e47SMika Westerberg }
14204f807e47SMika Westerberg 
14214f807e47SMika Westerberg /**
14224f807e47SMika Westerberg  * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
14234f807e47SMika Westerberg  * @port: Port to clear HPD
14244f807e47SMika Westerberg  *
14254f807e47SMika Westerberg  * If the DP IN port has HDP set, this function can be used to clear it.
14264f807e47SMika Westerberg  */
tb_dp_port_hpd_clear(struct tb_port * port)14274f807e47SMika Westerberg int tb_dp_port_hpd_clear(struct tb_port *port)
14284f807e47SMika Westerberg {
14294f807e47SMika Westerberg 	u32 data;
14304f807e47SMika Westerberg 	int ret;
14314f807e47SMika Westerberg 
143298176380SMika Westerberg 	ret = tb_port_read(port, &data, TB_CFG_PORT,
143398176380SMika Westerberg 			   port->cap_adap + ADP_DP_CS_3, 1);
14344f807e47SMika Westerberg 	if (ret)
14354f807e47SMika Westerberg 		return ret;
14364f807e47SMika Westerberg 
143798176380SMika Westerberg 	data |= ADP_DP_CS_3_HDPC;
143898176380SMika Westerberg 	return tb_port_write(port, &data, TB_CFG_PORT,
143998176380SMika Westerberg 			     port->cap_adap + ADP_DP_CS_3, 1);
14404f807e47SMika Westerberg }
14414f807e47SMika Westerberg 
14424f807e47SMika Westerberg /**
14434f807e47SMika Westerberg  * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
14444f807e47SMika Westerberg  * @port: DP IN/OUT port to set hops
14454f807e47SMika Westerberg  * @video: Video Hop ID
14464f807e47SMika Westerberg  * @aux_tx: AUX TX Hop ID
14474f807e47SMika Westerberg  * @aux_rx: AUX RX Hop ID
14484f807e47SMika Westerberg  *
1449e5bb88e9SMika Westerberg  * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1450e5bb88e9SMika Westerberg  * router DP adapters too but does not program the values as the fields
1451e5bb88e9SMika Westerberg  * are read-only.
14524f807e47SMika Westerberg  */
tb_dp_port_set_hops(struct tb_port * port,unsigned int video,unsigned int aux_tx,unsigned int aux_rx)14534f807e47SMika Westerberg int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
14544f807e47SMika Westerberg 			unsigned int aux_tx, unsigned int aux_rx)
14554f807e47SMika Westerberg {
14564f807e47SMika Westerberg 	u32 data[2];
14574f807e47SMika Westerberg 	int ret;
14584f807e47SMika Westerberg 
1459e5bb88e9SMika Westerberg 	if (tb_switch_is_usb4(port->sw))
1460e5bb88e9SMika Westerberg 		return 0;
1461e5bb88e9SMika Westerberg 
146298176380SMika Westerberg 	ret = tb_port_read(port, data, TB_CFG_PORT,
146398176380SMika Westerberg 			   port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
14644f807e47SMika Westerberg 	if (ret)
14654f807e47SMika Westerberg 		return ret;
14664f807e47SMika Westerberg 
146798176380SMika Westerberg 	data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
146898176380SMika Westerberg 	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
146998176380SMika Westerberg 	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
14704f807e47SMika Westerberg 
147198176380SMika Westerberg 	data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
147298176380SMika Westerberg 		ADP_DP_CS_0_VIDEO_HOPID_MASK;
147398176380SMika Westerberg 	data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
147498176380SMika Westerberg 	data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
147598176380SMika Westerberg 		ADP_DP_CS_1_AUX_RX_HOPID_MASK;
14764f807e47SMika Westerberg 
147798176380SMika Westerberg 	return tb_port_write(port, data, TB_CFG_PORT,
147898176380SMika Westerberg 			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
14794f807e47SMika Westerberg }
14804f807e47SMika Westerberg 
14814f807e47SMika Westerberg /**
14824f807e47SMika Westerberg  * tb_dp_port_is_enabled() - Is DP adapter port enabled
14834f807e47SMika Westerberg  * @port: DP adapter port to check
14844f807e47SMika Westerberg  */
tb_dp_port_is_enabled(struct tb_port * port)14854f807e47SMika Westerberg bool tb_dp_port_is_enabled(struct tb_port *port)
14864f807e47SMika Westerberg {
1487fd5c46b7SMika Westerberg 	u32 data[2];
14884f807e47SMika Westerberg 
148998176380SMika Westerberg 	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1490fd5c46b7SMika Westerberg 			 ARRAY_SIZE(data)))
14914f807e47SMika Westerberg 		return false;
14924f807e47SMika Westerberg 
149398176380SMika Westerberg 	return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
14944f807e47SMika Westerberg }
14954f807e47SMika Westerberg 
14964f807e47SMika Westerberg /**
14974f807e47SMika Westerberg  * tb_dp_port_enable() - Enables/disables DP paths of a port
14984f807e47SMika Westerberg  * @port: DP IN/OUT port
14994f807e47SMika Westerberg  * @enable: Enable/disable DP path
15004f807e47SMika Westerberg  *
15014f807e47SMika Westerberg  * Once Hop IDs are programmed DP paths can be enabled or disabled by
15024f807e47SMika Westerberg  * calling this function.
15034f807e47SMika Westerberg  */
tb_dp_port_enable(struct tb_port * port,bool enable)15044f807e47SMika Westerberg int tb_dp_port_enable(struct tb_port *port, bool enable)
15054f807e47SMika Westerberg {
1506fd5c46b7SMika Westerberg 	u32 data[2];
15074f807e47SMika Westerberg 	int ret;
15084f807e47SMika Westerberg 
150998176380SMika Westerberg 	ret = tb_port_read(port, data, TB_CFG_PORT,
151098176380SMika Westerberg 			  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
15114f807e47SMika Westerberg 	if (ret)
15124f807e47SMika Westerberg 		return ret;
15134f807e47SMika Westerberg 
15144f807e47SMika Westerberg 	if (enable)
151598176380SMika Westerberg 		data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
15164f807e47SMika Westerberg 	else
151798176380SMika Westerberg 		data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
15184f807e47SMika Westerberg 
151998176380SMika Westerberg 	return tb_port_write(port, data, TB_CFG_PORT,
152098176380SMika Westerberg 			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
15214f807e47SMika Westerberg }
15224f807e47SMika Westerberg 
1523a25c8b2fSAndreas Noever /* switch utility functions */
1524a25c8b2fSAndreas Noever 
tb_switch_generation_name(const struct tb_switch * sw)1525b0407983SMika Westerberg static const char *tb_switch_generation_name(const struct tb_switch *sw)
1526a25c8b2fSAndreas Noever {
1527b0407983SMika Westerberg 	switch (sw->generation) {
1528b0407983SMika Westerberg 	case 1:
1529b0407983SMika Westerberg 		return "Thunderbolt 1";
1530b0407983SMika Westerberg 	case 2:
1531b0407983SMika Westerberg 		return "Thunderbolt 2";
1532b0407983SMika Westerberg 	case 3:
1533b0407983SMika Westerberg 		return "Thunderbolt 3";
1534b0407983SMika Westerberg 	case 4:
1535b0407983SMika Westerberg 		return "USB4";
1536b0407983SMika Westerberg 	default:
1537b0407983SMika Westerberg 		return "Unknown";
1538b0407983SMika Westerberg 	}
1539b0407983SMika Westerberg }
1540b0407983SMika Westerberg 
tb_dump_switch(const struct tb * tb,const struct tb_switch * sw)1541b0407983SMika Westerberg static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1542b0407983SMika Westerberg {
1543b0407983SMika Westerberg 	const struct tb_regs_switch_header *regs = &sw->config;
1544b0407983SMika Westerberg 
1545b0407983SMika Westerberg 	tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1546b0407983SMika Westerberg 	       tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1547b0407983SMika Westerberg 	       regs->revision, regs->thunderbolt_version);
1548b0407983SMika Westerberg 	tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
1549daa5140fSMika Westerberg 	tb_dbg(tb, "  Config:\n");
1550daa5140fSMika Westerberg 	tb_dbg(tb,
1551a25c8b2fSAndreas Noever 		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1552b0407983SMika Westerberg 	       regs->upstream_port_number, regs->depth,
1553b0407983SMika Westerberg 	       (((u64) regs->route_hi) << 32) | regs->route_lo,
1554b0407983SMika Westerberg 	       regs->enabled, regs->plug_events_delay);
1555daa5140fSMika Westerberg 	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
1556b0407983SMika Westerberg 	       regs->__unknown1, regs->__unknown4);
1557a25c8b2fSAndreas Noever }
1558a25c8b2fSAndreas Noever 
tb_switch_reset_host(struct tb_switch * sw)155990cdb982SSanath S static int tb_switch_reset_host(struct tb_switch *sw)
156023dd5bb4SAndreas Noever {
156190cdb982SSanath S 	if (sw->generation > 1) {
156290cdb982SSanath S 		struct tb_port *port;
156390cdb982SSanath S 
156490cdb982SSanath S 		tb_switch_for_each_port(sw, port) {
156590cdb982SSanath S 			int i, ret;
156690cdb982SSanath S 
156790cdb982SSanath S 			/*
156890cdb982SSanath S 			 * For lane adapters we issue downstream port
156990cdb982SSanath S 			 * reset and clear up path config spaces.
157090cdb982SSanath S 			 *
157190cdb982SSanath S 			 * For protocol adapters we disable the path and
157290cdb982SSanath S 			 * clear path config space one by one (from 8 to
157390cdb982SSanath S 			 * Max Input HopID of the adapter).
157490cdb982SSanath S 			 */
157590cdb982SSanath S 			if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
157690cdb982SSanath S 				ret = tb_port_reset(port);
157790cdb982SSanath S 				if (ret)
157890cdb982SSanath S 					return ret;
157990cdb982SSanath S 			} else if (tb_port_is_usb3_down(port) ||
158090cdb982SSanath S 				   tb_port_is_usb3_up(port)) {
158190cdb982SSanath S 				tb_usb3_port_enable(port, false);
158290cdb982SSanath S 			} else if (tb_port_is_dpin(port) ||
158390cdb982SSanath S 				   tb_port_is_dpout(port)) {
158490cdb982SSanath S 				tb_dp_port_enable(port, false);
158590cdb982SSanath S 			} else if (tb_port_is_pcie_down(port) ||
158690cdb982SSanath S 				   tb_port_is_pcie_up(port)) {
158790cdb982SSanath S 				tb_pci_port_enable(port, false);
158890cdb982SSanath S 			} else {
158990cdb982SSanath S 				continue;
159090cdb982SSanath S 			}
159190cdb982SSanath S 
159290cdb982SSanath S 			/* Cleanup path config space of protocol adapter */
159390cdb982SSanath S 			for (i = TB_PATH_MIN_HOPID;
159490cdb982SSanath S 			     i <= port->config.max_in_hop_id; i++) {
159590cdb982SSanath S 				ret = tb_path_deactivate_hop(port, i);
159690cdb982SSanath S 				if (ret)
159790cdb982SSanath S 					return ret;
159890cdb982SSanath S 			}
159990cdb982SSanath S 		}
160090cdb982SSanath S 	} else {
160123dd5bb4SAndreas Noever 		struct tb_cfg_result res;
1602356b6c4eSMika Westerberg 
160390cdb982SSanath S 		/* Thunderbolt 1 uses the "reset" config space packet */
1604356b6c4eSMika Westerberg 		res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1605356b6c4eSMika Westerberg 				      TB_CFG_SWITCH, 2, 2);
160623dd5bb4SAndreas Noever 		if (res.err)
160723dd5bb4SAndreas Noever 			return res.err;
1608bda83aecSMika Westerberg 		res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
160923dd5bb4SAndreas Noever 		if (res.err > 0)
161023dd5bb4SAndreas Noever 			return -EIO;
161190cdb982SSanath S 		else if (res.err < 0)
161223dd5bb4SAndreas Noever 			return res.err;
161323dd5bb4SAndreas Noever 	}
161423dd5bb4SAndreas Noever 
161590cdb982SSanath S 	return 0;
161690cdb982SSanath S }
161790cdb982SSanath S 
tb_switch_reset_device(struct tb_switch * sw)161890cdb982SSanath S static int tb_switch_reset_device(struct tb_switch *sw)
161990cdb982SSanath S {
162090cdb982SSanath S 	return tb_port_reset(tb_switch_downstream_port(sw));
162190cdb982SSanath S }
162290cdb982SSanath S 
tb_switch_enumerated(struct tb_switch * sw)162390cdb982SSanath S static bool tb_switch_enumerated(struct tb_switch *sw)
162490cdb982SSanath S {
162590cdb982SSanath S 	u32 val;
162690cdb982SSanath S 	int ret;
162790cdb982SSanath S 
162890cdb982SSanath S 	/*
162990cdb982SSanath S 	 * Read directly from the hardware because we use this also
163090cdb982SSanath S 	 * during system sleep where sw->config.enabled is already set
163190cdb982SSanath S 	 * by us.
163290cdb982SSanath S 	 */
163390cdb982SSanath S 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
163490cdb982SSanath S 	if (ret)
163590cdb982SSanath S 		return false;
163690cdb982SSanath S 
163790cdb982SSanath S 	return !!(val & ROUTER_CS_3_V);
163890cdb982SSanath S }
163990cdb982SSanath S 
164090cdb982SSanath S /**
164190cdb982SSanath S  * tb_switch_reset() - Perform reset to the router
164290cdb982SSanath S  * @sw: Router to reset
164390cdb982SSanath S  *
164490cdb982SSanath S  * Issues reset to the router @sw. Can be used for any router. For host
164590cdb982SSanath S  * routers, resets all the downstream ports and cleans up path config
164690cdb982SSanath S  * spaces accordingly. For device routers issues downstream port reset
164790cdb982SSanath S  * through the parent router, so as side effect there will be unplug
164890cdb982SSanath S  * soon after this is finished.
164990cdb982SSanath S  *
165090cdb982SSanath S  * If the router is not enumerated does nothing.
165190cdb982SSanath S  *
165290cdb982SSanath S  * Returns %0 on success or negative errno in case of failure.
165390cdb982SSanath S  */
tb_switch_reset(struct tb_switch * sw)165490cdb982SSanath S int tb_switch_reset(struct tb_switch *sw)
165590cdb982SSanath S {
165690cdb982SSanath S 	int ret;
165790cdb982SSanath S 
165890cdb982SSanath S 	/*
165990cdb982SSanath S 	 * We cannot access the port config spaces unless the router is
166090cdb982SSanath S 	 * already enumerated. If the router is not enumerated it is
166190cdb982SSanath S 	 * equal to being reset so we can skip that here.
166290cdb982SSanath S 	 */
166390cdb982SSanath S 	if (!tb_switch_enumerated(sw))
166490cdb982SSanath S 		return 0;
166590cdb982SSanath S 
166690cdb982SSanath S 	tb_sw_dbg(sw, "resetting\n");
166790cdb982SSanath S 
166890cdb982SSanath S 	if (tb_route(sw))
166990cdb982SSanath S 		ret = tb_switch_reset_device(sw);
167090cdb982SSanath S 	else
167190cdb982SSanath S 		ret = tb_switch_reset_host(sw);
167290cdb982SSanath S 
167390cdb982SSanath S 	if (ret)
167490cdb982SSanath S 		tb_sw_warn(sw, "failed to reset\n");
167590cdb982SSanath S 
167690cdb982SSanath S 	return ret;
167790cdb982SSanath S }
167890cdb982SSanath S 
16791639664fSGil Fine /**
16801639664fSGil Fine  * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
16811639664fSGil Fine  * @sw: Router to read the offset value from
16821639664fSGil Fine  * @offset: Offset in the router config space to read from
16831639664fSGil Fine  * @bit: Bit mask in the offset to wait for
16841639664fSGil Fine  * @value: Value of the bits to wait for
16851639664fSGil Fine  * @timeout_msec: Timeout in ms how long to wait
16861639664fSGil Fine  *
16871639664fSGil Fine  * Wait till the specified bits in specified offset reach specified value.
16881639664fSGil Fine  * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
16891639664fSGil Fine  * within the given timeout or a negative errno in case of failure.
16901639664fSGil Fine  */
tb_switch_wait_for_bit(struct tb_switch * sw,u32 offset,u32 bit,u32 value,int timeout_msec)16911639664fSGil Fine int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
16921639664fSGil Fine 			   u32 value, int timeout_msec)
16931639664fSGil Fine {
16941639664fSGil Fine 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
16951639664fSGil Fine 
16961639664fSGil Fine 	do {
16971639664fSGil Fine 		u32 val;
16981639664fSGil Fine 		int ret;
16991639664fSGil Fine 
17001639664fSGil Fine 		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
17011639664fSGil Fine 		if (ret)
17021639664fSGil Fine 			return ret;
17031639664fSGil Fine 
17041639664fSGil Fine 		if ((val & bit) == value)
17051639664fSGil Fine 			return 0;
17061639664fSGil Fine 
17071639664fSGil Fine 		usleep_range(50, 100);
17081639664fSGil Fine 	} while (ktime_before(ktime_get(), timeout));
17091639664fSGil Fine 
17101639664fSGil Fine 	return -ETIMEDOUT;
17111639664fSGil Fine }
17121639664fSGil Fine 
171347ba5ae4SLee Jones /*
1714ca389f71SAndreas Noever  * tb_plug_events_active() - enable/disable plug events on a switch
1715ca389f71SAndreas Noever  *
1716ca389f71SAndreas Noever  * Also configures a sane plug_events_delay of 255ms.
1717ca389f71SAndreas Noever  *
1718ca389f71SAndreas Noever  * Return: Returns 0 on success or an error code on failure.
1719ca389f71SAndreas Noever  */
tb_plug_events_active(struct tb_switch * sw,bool active)1720ca389f71SAndreas Noever static int tb_plug_events_active(struct tb_switch *sw, bool active)
1721ca389f71SAndreas Noever {
1722ca389f71SAndreas Noever 	u32 data;
1723ca389f71SAndreas Noever 	int res;
1724ca389f71SAndreas Noever 
17255cb6ed31SMika Westerberg 	if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1726bfe778acSMika Westerberg 		return 0;
1727bfe778acSMika Westerberg 
1728ca389f71SAndreas Noever 	sw->config.plug_events_delay = 0xff;
1729ca389f71SAndreas Noever 	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1730ca389f71SAndreas Noever 	if (res)
1731ca389f71SAndreas Noever 		return res;
1732ca389f71SAndreas Noever 
1733ca389f71SAndreas Noever 	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1734ca389f71SAndreas Noever 	if (res)
1735ca389f71SAndreas Noever 		return res;
1736ca389f71SAndreas Noever 
1737ca389f71SAndreas Noever 	if (active) {
1738ca389f71SAndreas Noever 		data = data & 0xFFFFFF83;
1739ca389f71SAndreas Noever 		switch (sw->config.device_id) {
17401d111406SLukas Wunner 		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
17411d111406SLukas Wunner 		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
17421d111406SLukas Wunner 		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1743ca389f71SAndreas Noever 			break;
1744ca389f71SAndreas Noever 		default:
174530a4eca6SMika Westerberg 			/*
174630a4eca6SMika Westerberg 			 * Skip Alpine Ridge, it needs to have vendor
174730a4eca6SMika Westerberg 			 * specific USB hotplug event enabled for the
174830a4eca6SMika Westerberg 			 * internal xHCI to work.
174930a4eca6SMika Westerberg 			 */
175030a4eca6SMika Westerberg 			if (!tb_switch_is_alpine_ridge(sw))
175130a4eca6SMika Westerberg 				data |= TB_PLUG_EVENTS_USB_DISABLE;
1752ca389f71SAndreas Noever 		}
1753ca389f71SAndreas Noever 	} else {
1754ca389f71SAndreas Noever 		data = data | 0x7c;
1755ca389f71SAndreas Noever 	}
1756ca389f71SAndreas Noever 	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1757ca389f71SAndreas Noever 			   sw->cap_plug_events + 1, 1);
1758ca389f71SAndreas Noever }
1759ca389f71SAndreas Noever 
authorized_show(struct device * dev,struct device_attribute * attr,char * buf)1760f67cf491SMika Westerberg static ssize_t authorized_show(struct device *dev,
1761f67cf491SMika Westerberg 			       struct device_attribute *attr,
1762f67cf491SMika Westerberg 			       char *buf)
1763f67cf491SMika Westerberg {
1764f67cf491SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
1765f67cf491SMika Westerberg 
17668283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%u\n", sw->authorized);
1767f67cf491SMika Westerberg }
1768f67cf491SMika Westerberg 
disapprove_switch(struct device * dev,void * not_used)17693da88be2SMika Westerberg static int disapprove_switch(struct device *dev, void *not_used)
17703da88be2SMika Westerberg {
17711651d9e7SRajat Jain 	char *envp[] = { "AUTHORIZED=0", NULL };
17723da88be2SMika Westerberg 	struct tb_switch *sw;
17733da88be2SMika Westerberg 
17743da88be2SMika Westerberg 	sw = tb_to_switch(dev);
17753da88be2SMika Westerberg 	if (sw && sw->authorized) {
17763da88be2SMika Westerberg 		int ret;
17773da88be2SMika Westerberg 
17783da88be2SMika Westerberg 		/* First children */
17793da88be2SMika Westerberg 		ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
17803da88be2SMika Westerberg 		if (ret)
17813da88be2SMika Westerberg 			return ret;
17823da88be2SMika Westerberg 
17833da88be2SMika Westerberg 		ret = tb_domain_disapprove_switch(sw->tb, sw);
17843da88be2SMika Westerberg 		if (ret)
17853da88be2SMika Westerberg 			return ret;
17863da88be2SMika Westerberg 
17873da88be2SMika Westerberg 		sw->authorized = 0;
17881651d9e7SRajat Jain 		kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
17893da88be2SMika Westerberg 	}
17903da88be2SMika Westerberg 
17913da88be2SMika Westerberg 	return 0;
17923da88be2SMika Westerberg }
17933da88be2SMika Westerberg 
tb_switch_set_authorized(struct tb_switch * sw,unsigned int val)1794f67cf491SMika Westerberg static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1795f67cf491SMika Westerberg {
17961651d9e7SRajat Jain 	char envp_string[13];
1797f67cf491SMika Westerberg 	int ret = -EINVAL;
17981651d9e7SRajat Jain 	char *envp[] = { envp_string, NULL };
1799f67cf491SMika Westerberg 
180009f11b6cSMika Westerberg 	if (!mutex_trylock(&sw->tb->lock))
180109f11b6cSMika Westerberg 		return restart_syscall();
1802f67cf491SMika Westerberg 
18033da88be2SMika Westerberg 	if (!!sw->authorized == !!val)
1804f67cf491SMika Westerberg 		goto unlock;
1805f67cf491SMika Westerberg 
1806f67cf491SMika Westerberg 	switch (val) {
18073da88be2SMika Westerberg 	/* Disapprove switch */
18083da88be2SMika Westerberg 	case 0:
18093da88be2SMika Westerberg 		if (tb_route(sw)) {
18103da88be2SMika Westerberg 			ret = disapprove_switch(&sw->dev, NULL);
18113da88be2SMika Westerberg 			goto unlock;
18123da88be2SMika Westerberg 		}
18133da88be2SMika Westerberg 		break;
18143da88be2SMika Westerberg 
1815f67cf491SMika Westerberg 	/* Approve switch */
1816f67cf491SMika Westerberg 	case 1:
1817f67cf491SMika Westerberg 		if (sw->key)
1818f67cf491SMika Westerberg 			ret = tb_domain_approve_switch_key(sw->tb, sw);
1819f67cf491SMika Westerberg 		else
1820f67cf491SMika Westerberg 			ret = tb_domain_approve_switch(sw->tb, sw);
1821f67cf491SMika Westerberg 		break;
1822f67cf491SMika Westerberg 
1823f67cf491SMika Westerberg 	/* Challenge switch */
1824f67cf491SMika Westerberg 	case 2:
1825f67cf491SMika Westerberg 		if (sw->key)
1826f67cf491SMika Westerberg 			ret = tb_domain_challenge_switch_key(sw->tb, sw);
1827f67cf491SMika Westerberg 		break;
1828f67cf491SMika Westerberg 
1829f67cf491SMika Westerberg 	default:
1830f67cf491SMika Westerberg 		break;
1831f67cf491SMika Westerberg 	}
1832f67cf491SMika Westerberg 
1833f67cf491SMika Westerberg 	if (!ret) {
1834f67cf491SMika Westerberg 		sw->authorized = val;
18351651d9e7SRajat Jain 		/*
18361651d9e7SRajat Jain 		 * Notify status change to the userspace, informing the new
18371651d9e7SRajat Jain 		 * value of /sys/bus/thunderbolt/devices/.../authorized.
18381651d9e7SRajat Jain 		 */
18391651d9e7SRajat Jain 		sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
18401651d9e7SRajat Jain 		kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1841f67cf491SMika Westerberg 	}
1842f67cf491SMika Westerberg 
1843f67cf491SMika Westerberg unlock:
184409f11b6cSMika Westerberg 	mutex_unlock(&sw->tb->lock);
1845f67cf491SMika Westerberg 	return ret;
1846f67cf491SMika Westerberg }
1847f67cf491SMika Westerberg 
authorized_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1848f67cf491SMika Westerberg static ssize_t authorized_store(struct device *dev,
1849f67cf491SMika Westerberg 				struct device_attribute *attr,
1850f67cf491SMika Westerberg 				const char *buf, size_t count)
1851f67cf491SMika Westerberg {
1852f67cf491SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
1853f67cf491SMika Westerberg 	unsigned int val;
1854f67cf491SMika Westerberg 	ssize_t ret;
1855f67cf491SMika Westerberg 
1856f67cf491SMika Westerberg 	ret = kstrtouint(buf, 0, &val);
1857f67cf491SMika Westerberg 	if (ret)
1858f67cf491SMika Westerberg 		return ret;
1859f67cf491SMika Westerberg 	if (val > 2)
1860f67cf491SMika Westerberg 		return -EINVAL;
1861f67cf491SMika Westerberg 
18624f7c2e0dSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
1863f67cf491SMika Westerberg 	ret = tb_switch_set_authorized(sw, val);
18644f7c2e0dSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
18654f7c2e0dSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
1866f67cf491SMika Westerberg 
1867f67cf491SMika Westerberg 	return ret ? ret : count;
1868f67cf491SMika Westerberg }
1869f67cf491SMika Westerberg static DEVICE_ATTR_RW(authorized);
1870f67cf491SMika Westerberg 
boot_show(struct device * dev,struct device_attribute * attr,char * buf)187114862ee3SYehezkel Bernat static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
187214862ee3SYehezkel Bernat 			 char *buf)
187314862ee3SYehezkel Bernat {
187414862ee3SYehezkel Bernat 	struct tb_switch *sw = tb_to_switch(dev);
187514862ee3SYehezkel Bernat 
18768283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%u\n", sw->boot);
187714862ee3SYehezkel Bernat }
187814862ee3SYehezkel Bernat static DEVICE_ATTR_RO(boot);
187914862ee3SYehezkel Bernat 
device_show(struct device * dev,struct device_attribute * attr,char * buf)1880bfe778acSMika Westerberg static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1881bfe778acSMika Westerberg 			   char *buf)
1882a25c8b2fSAndreas Noever {
1883bfe778acSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
1884bfe778acSMika Westerberg 
18858283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%#x\n", sw->device);
1886a25c8b2fSAndreas Noever }
1887bfe778acSMika Westerberg static DEVICE_ATTR_RO(device);
1888a25c8b2fSAndreas Noever 
188972ee3390SMika Westerberg static ssize_t
device_name_show(struct device * dev,struct device_attribute * attr,char * buf)189072ee3390SMika Westerberg device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
189172ee3390SMika Westerberg {
189272ee3390SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
189372ee3390SMika Westerberg 
18948283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
189572ee3390SMika Westerberg }
189672ee3390SMika Westerberg static DEVICE_ATTR_RO(device_name);
189772ee3390SMika Westerberg 
1898b406357cSChristian Kellner static ssize_t
generation_show(struct device * dev,struct device_attribute * attr,char * buf)1899b406357cSChristian Kellner generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1900b406357cSChristian Kellner {
1901b406357cSChristian Kellner 	struct tb_switch *sw = tb_to_switch(dev);
1902b406357cSChristian Kellner 
19038283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%u\n", sw->generation);
1904b406357cSChristian Kellner }
1905b406357cSChristian Kellner static DEVICE_ATTR_RO(generation);
1906b406357cSChristian Kellner 
key_show(struct device * dev,struct device_attribute * attr,char * buf)1907f67cf491SMika Westerberg static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1908f67cf491SMika Westerberg 			char *buf)
1909f67cf491SMika Westerberg {
1910f67cf491SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
1911f67cf491SMika Westerberg 	ssize_t ret;
1912f67cf491SMika Westerberg 
191309f11b6cSMika Westerberg 	if (!mutex_trylock(&sw->tb->lock))
191409f11b6cSMika Westerberg 		return restart_syscall();
1915f67cf491SMika Westerberg 
1916f67cf491SMika Westerberg 	if (sw->key)
19178283fb57SAndy Shevchenko 		ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1918f67cf491SMika Westerberg 	else
19198283fb57SAndy Shevchenko 		ret = sysfs_emit(buf, "\n");
1920f67cf491SMika Westerberg 
192109f11b6cSMika Westerberg 	mutex_unlock(&sw->tb->lock);
1922f67cf491SMika Westerberg 	return ret;
1923f67cf491SMika Westerberg }
1924f67cf491SMika Westerberg 
key_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1925f67cf491SMika Westerberg static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1926f67cf491SMika Westerberg 			 const char *buf, size_t count)
1927f67cf491SMika Westerberg {
1928f67cf491SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
1929f67cf491SMika Westerberg 	u8 key[TB_SWITCH_KEY_SIZE];
1930f67cf491SMika Westerberg 	ssize_t ret = count;
1931e545f0d8SBernat, Yehezkel 	bool clear = false;
1932f67cf491SMika Westerberg 
1933e545f0d8SBernat, Yehezkel 	if (!strcmp(buf, "\n"))
1934e545f0d8SBernat, Yehezkel 		clear = true;
1935e545f0d8SBernat, Yehezkel 	else if (hex2bin(key, buf, sizeof(key)))
1936f67cf491SMika Westerberg 		return -EINVAL;
1937f67cf491SMika Westerberg 
193809f11b6cSMika Westerberg 	if (!mutex_trylock(&sw->tb->lock))
193909f11b6cSMika Westerberg 		return restart_syscall();
1940f67cf491SMika Westerberg 
1941f67cf491SMika Westerberg 	if (sw->authorized) {
1942f67cf491SMika Westerberg 		ret = -EBUSY;
1943f67cf491SMika Westerberg 	} else {
1944f67cf491SMika Westerberg 		kfree(sw->key);
1945e545f0d8SBernat, Yehezkel 		if (clear) {
1946e545f0d8SBernat, Yehezkel 			sw->key = NULL;
1947e545f0d8SBernat, Yehezkel 		} else {
1948f67cf491SMika Westerberg 			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1949f67cf491SMika Westerberg 			if (!sw->key)
1950f67cf491SMika Westerberg 				ret = -ENOMEM;
1951f67cf491SMika Westerberg 		}
1952e545f0d8SBernat, Yehezkel 	}
1953f67cf491SMika Westerberg 
195409f11b6cSMika Westerberg 	mutex_unlock(&sw->tb->lock);
1955f67cf491SMika Westerberg 	return ret;
1956f67cf491SMika Westerberg }
19570956e411SBernat, Yehezkel static DEVICE_ATTR(key, 0600, key_show, key_store);
1958f67cf491SMika Westerberg 
speed_show(struct device * dev,struct device_attribute * attr,char * buf)195991c0c120SMika Westerberg static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
196091c0c120SMika Westerberg 			  char *buf)
196191c0c120SMika Westerberg {
196291c0c120SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
196391c0c120SMika Westerberg 
19648283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
196591c0c120SMika Westerberg }
196691c0c120SMika Westerberg 
196791c0c120SMika Westerberg /*
196891c0c120SMika Westerberg  * Currently all lanes must run at the same speed but we expose here
196991c0c120SMika Westerberg  * both directions to allow possible asymmetric links in the future.
197091c0c120SMika Westerberg  */
197191c0c120SMika Westerberg static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
197291c0c120SMika Westerberg static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
197391c0c120SMika Westerberg 
rx_lanes_show(struct device * dev,struct device_attribute * attr,char * buf)1974e111fb92SGil Fine static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
197591c0c120SMika Westerberg 			     char *buf)
197691c0c120SMika Westerberg {
197791c0c120SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
1978e111fb92SGil Fine 	unsigned int width;
197991c0c120SMika Westerberg 
1980e111fb92SGil Fine 	switch (sw->link_width) {
1981e111fb92SGil Fine 	case TB_LINK_WIDTH_SINGLE:
1982e111fb92SGil Fine 	case TB_LINK_WIDTH_ASYM_TX:
1983e111fb92SGil Fine 		width = 1;
1984e111fb92SGil Fine 		break;
1985e111fb92SGil Fine 	case TB_LINK_WIDTH_DUAL:
1986e111fb92SGil Fine 		width = 2;
1987e111fb92SGil Fine 		break;
1988e111fb92SGil Fine 	case TB_LINK_WIDTH_ASYM_RX:
1989e111fb92SGil Fine 		width = 3;
1990e111fb92SGil Fine 		break;
1991e111fb92SGil Fine 	default:
1992e111fb92SGil Fine 		WARN_ON_ONCE(1);
1993e111fb92SGil Fine 		return -EINVAL;
199491c0c120SMika Westerberg 	}
199591c0c120SMika Westerberg 
1996e111fb92SGil Fine 	return sysfs_emit(buf, "%u\n", width);
1997e111fb92SGil Fine }
1998e111fb92SGil Fine static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
1999e111fb92SGil Fine 
tx_lanes_show(struct device * dev,struct device_attribute * attr,char * buf)2000e111fb92SGil Fine static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
2001e111fb92SGil Fine 			     char *buf)
2002e111fb92SGil Fine {
2003e111fb92SGil Fine 	struct tb_switch *sw = tb_to_switch(dev);
2004e111fb92SGil Fine 	unsigned int width;
2005e111fb92SGil Fine 
2006e111fb92SGil Fine 	switch (sw->link_width) {
2007e111fb92SGil Fine 	case TB_LINK_WIDTH_SINGLE:
2008e111fb92SGil Fine 	case TB_LINK_WIDTH_ASYM_RX:
2009e111fb92SGil Fine 		width = 1;
2010e111fb92SGil Fine 		break;
2011e111fb92SGil Fine 	case TB_LINK_WIDTH_DUAL:
2012e111fb92SGil Fine 		width = 2;
2013e111fb92SGil Fine 		break;
2014e111fb92SGil Fine 	case TB_LINK_WIDTH_ASYM_TX:
2015e111fb92SGil Fine 		width = 3;
2016e111fb92SGil Fine 		break;
2017e111fb92SGil Fine 	default:
2018e111fb92SGil Fine 		WARN_ON_ONCE(1);
2019e111fb92SGil Fine 		return -EINVAL;
2020e111fb92SGil Fine 	}
2021e111fb92SGil Fine 
2022e111fb92SGil Fine 	return sysfs_emit(buf, "%u\n", width);
2023e111fb92SGil Fine }
2024e111fb92SGil Fine static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
202591c0c120SMika Westerberg 
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)2026e6b245ccSMika Westerberg static ssize_t nvm_authenticate_show(struct device *dev,
2027e6b245ccSMika Westerberg 	struct device_attribute *attr, char *buf)
2028e6b245ccSMika Westerberg {
2029e6b245ccSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
2030e6b245ccSMika Westerberg 	u32 status;
2031e6b245ccSMika Westerberg 
2032e6b245ccSMika Westerberg 	nvm_get_auth_status(sw, &status);
20338283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%#x\n", status);
2034e6b245ccSMika Westerberg }
2035e6b245ccSMika Westerberg 
nvm_authenticate_sysfs(struct device * dev,const char * buf,bool disconnect)20361cb36293SMario Limonciello static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
20371cb36293SMario Limonciello 				      bool disconnect)
2038e6b245ccSMika Westerberg {
2039e6b245ccSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
20401cbf680fSMika Westerberg 	int val, ret;
2041e6b245ccSMika Westerberg 
20424f7c2e0dSMika Westerberg 	pm_runtime_get_sync(&sw->dev);
20434f7c2e0dSMika Westerberg 
20444f7c2e0dSMika Westerberg 	if (!mutex_trylock(&sw->tb->lock)) {
20454f7c2e0dSMika Westerberg 		ret = restart_syscall();
20464f7c2e0dSMika Westerberg 		goto exit_rpm;
20474f7c2e0dSMika Westerberg 	}
2048e6b245ccSMika Westerberg 
2049aef9c693SSzuying Chen 	if (sw->no_nvm_upgrade) {
2050aef9c693SSzuying Chen 		ret = -EOPNOTSUPP;
2051aef9c693SSzuying Chen 		goto exit_unlock;
2052aef9c693SSzuying Chen 	}
2053aef9c693SSzuying Chen 
2054e6b245ccSMika Westerberg 	/* If NVMem devices are not yet added */
2055e6b245ccSMika Westerberg 	if (!sw->nvm) {
2056e6b245ccSMika Westerberg 		ret = -EAGAIN;
2057e6b245ccSMika Westerberg 		goto exit_unlock;
2058e6b245ccSMika Westerberg 	}
2059e6b245ccSMika Westerberg 
20604b794f80SMario Limonciello 	ret = kstrtoint(buf, 10, &val);
2061e6b245ccSMika Westerberg 	if (ret)
2062e6b245ccSMika Westerberg 		goto exit_unlock;
2063e6b245ccSMika Westerberg 
2064e6b245ccSMika Westerberg 	/* Always clear the authentication status */
2065e6b245ccSMika Westerberg 	nvm_clear_auth_status(sw);
2066e6b245ccSMika Westerberg 
20674b794f80SMario Limonciello 	if (val > 0) {
20681cbf680fSMika Westerberg 		if (val == AUTHENTICATE_ONLY) {
20691cbf680fSMika Westerberg 			if (disconnect)
20701cbf680fSMika Westerberg 				ret = -EINVAL;
20711cbf680fSMika Westerberg 			else
20721cbf680fSMika Westerberg 				ret = nvm_authenticate(sw, true);
20731cbf680fSMika Westerberg 		} else {
20744b794f80SMario Limonciello 			if (!sw->nvm->flushed) {
20752d8ff0b5SMika Westerberg 				if (!sw->nvm->buf) {
20762d8ff0b5SMika Westerberg 					ret = -EINVAL;
2077e6b245ccSMika Westerberg 					goto exit_unlock;
20782d8ff0b5SMika Westerberg 				}
20792d8ff0b5SMika Westerberg 
20802d8ff0b5SMika Westerberg 				ret = nvm_validate_and_write(sw);
20814b794f80SMario Limonciello 				if (ret || val == WRITE_ONLY)
20822d8ff0b5SMika Westerberg 					goto exit_unlock;
20834b794f80SMario Limonciello 			}
20844b794f80SMario Limonciello 			if (val == WRITE_AND_AUTHENTICATE) {
20851cbf680fSMika Westerberg 				if (disconnect)
20861cb36293SMario Limonciello 					ret = tb_lc_force_power(sw);
20871cbf680fSMika Westerberg 				else
20881cbf680fSMika Westerberg 					ret = nvm_authenticate(sw, false);
2089e6b245ccSMika Westerberg 			}
20904b794f80SMario Limonciello 		}
20911cb36293SMario Limonciello 	}
2092e6b245ccSMika Westerberg 
2093e6b245ccSMika Westerberg exit_unlock:
209409f11b6cSMika Westerberg 	mutex_unlock(&sw->tb->lock);
20954f7c2e0dSMika Westerberg exit_rpm:
20964f7c2e0dSMika Westerberg 	pm_runtime_mark_last_busy(&sw->dev);
20974f7c2e0dSMika Westerberg 	pm_runtime_put_autosuspend(&sw->dev);
2098e6b245ccSMika Westerberg 
20991cb36293SMario Limonciello 	return ret;
21001cb36293SMario Limonciello }
21011cb36293SMario Limonciello 
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)21021cb36293SMario Limonciello static ssize_t nvm_authenticate_store(struct device *dev,
21031cb36293SMario Limonciello 	struct device_attribute *attr, const char *buf, size_t count)
21041cb36293SMario Limonciello {
21051cb36293SMario Limonciello 	int ret = nvm_authenticate_sysfs(dev, buf, false);
2106e6b245ccSMika Westerberg 	if (ret)
2107e6b245ccSMika Westerberg 		return ret;
2108e6b245ccSMika Westerberg 	return count;
2109e6b245ccSMika Westerberg }
2110e6b245ccSMika Westerberg static DEVICE_ATTR_RW(nvm_authenticate);
2111e6b245ccSMika Westerberg 
nvm_authenticate_on_disconnect_show(struct device * dev,struct device_attribute * attr,char * buf)21121cb36293SMario Limonciello static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
21131cb36293SMario Limonciello 	struct device_attribute *attr, char *buf)
21141cb36293SMario Limonciello {
21151cb36293SMario Limonciello 	return nvm_authenticate_show(dev, attr, buf);
21161cb36293SMario Limonciello }
21171cb36293SMario Limonciello 
nvm_authenticate_on_disconnect_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)21181cb36293SMario Limonciello static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
21191cb36293SMario Limonciello 	struct device_attribute *attr, const char *buf, size_t count)
21201cb36293SMario Limonciello {
21211cb36293SMario Limonciello 	int ret;
21221cb36293SMario Limonciello 
21231cb36293SMario Limonciello 	ret = nvm_authenticate_sysfs(dev, buf, true);
21241cb36293SMario Limonciello 	return ret ? ret : count;
21251cb36293SMario Limonciello }
21261cb36293SMario Limonciello static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
21271cb36293SMario Limonciello 
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)2128e6b245ccSMika Westerberg static ssize_t nvm_version_show(struct device *dev,
2129e6b245ccSMika Westerberg 				struct device_attribute *attr, char *buf)
2130e6b245ccSMika Westerberg {
2131e6b245ccSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
2132e6b245ccSMika Westerberg 	int ret;
2133e6b245ccSMika Westerberg 
213409f11b6cSMika Westerberg 	if (!mutex_trylock(&sw->tb->lock))
213509f11b6cSMika Westerberg 		return restart_syscall();
2136e6b245ccSMika Westerberg 
2137e6b245ccSMika Westerberg 	if (sw->safe_mode)
2138e6b245ccSMika Westerberg 		ret = -ENODATA;
2139e6b245ccSMika Westerberg 	else if (!sw->nvm)
2140e6b245ccSMika Westerberg 		ret = -EAGAIN;
2141e6b245ccSMika Westerberg 	else
21428283fb57SAndy Shevchenko 		ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
2143e6b245ccSMika Westerberg 
214409f11b6cSMika Westerberg 	mutex_unlock(&sw->tb->lock);
2145e6b245ccSMika Westerberg 
2146e6b245ccSMika Westerberg 	return ret;
2147e6b245ccSMika Westerberg }
2148e6b245ccSMika Westerberg static DEVICE_ATTR_RO(nvm_version);
2149e6b245ccSMika Westerberg 
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)2150bfe778acSMika Westerberg static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2151bfe778acSMika Westerberg 			   char *buf)
2152bfe778acSMika Westerberg {
2153bfe778acSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
2154ca389f71SAndreas Noever 
21558283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%#x\n", sw->vendor);
2156bfe778acSMika Westerberg }
2157bfe778acSMika Westerberg static DEVICE_ATTR_RO(vendor);
2158bfe778acSMika Westerberg 
215972ee3390SMika Westerberg static ssize_t
vendor_name_show(struct device * dev,struct device_attribute * attr,char * buf)216072ee3390SMika Westerberg vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
216172ee3390SMika Westerberg {
216272ee3390SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
216372ee3390SMika Westerberg 
21648283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
216572ee3390SMika Westerberg }
216672ee3390SMika Westerberg static DEVICE_ATTR_RO(vendor_name);
216772ee3390SMika Westerberg 
unique_id_show(struct device * dev,struct device_attribute * attr,char * buf)2168bfe778acSMika Westerberg static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2169bfe778acSMika Westerberg 			      char *buf)
2170bfe778acSMika Westerberg {
2171bfe778acSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
2172bfe778acSMika Westerberg 
21738283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%pUb\n", sw->uuid);
2174bfe778acSMika Westerberg }
2175bfe778acSMika Westerberg static DEVICE_ATTR_RO(unique_id);
2176bfe778acSMika Westerberg 
2177bfe778acSMika Westerberg static struct attribute *switch_attrs[] = {
2178f67cf491SMika Westerberg 	&dev_attr_authorized.attr,
217914862ee3SYehezkel Bernat 	&dev_attr_boot.attr,
2180bfe778acSMika Westerberg 	&dev_attr_device.attr,
218172ee3390SMika Westerberg 	&dev_attr_device_name.attr,
2182b406357cSChristian Kellner 	&dev_attr_generation.attr,
2183f67cf491SMika Westerberg 	&dev_attr_key.attr,
2184e6b245ccSMika Westerberg 	&dev_attr_nvm_authenticate.attr,
21851cb36293SMario Limonciello 	&dev_attr_nvm_authenticate_on_disconnect.attr,
2186e6b245ccSMika Westerberg 	&dev_attr_nvm_version.attr,
218791c0c120SMika Westerberg 	&dev_attr_rx_speed.attr,
218891c0c120SMika Westerberg 	&dev_attr_rx_lanes.attr,
218991c0c120SMika Westerberg 	&dev_attr_tx_speed.attr,
219091c0c120SMika Westerberg 	&dev_attr_tx_lanes.attr,
2191bfe778acSMika Westerberg 	&dev_attr_vendor.attr,
219272ee3390SMika Westerberg 	&dev_attr_vendor_name.attr,
2193bfe778acSMika Westerberg 	&dev_attr_unique_id.attr,
2194bfe778acSMika Westerberg 	NULL,
2195bfe778acSMika Westerberg };
2196bfe778acSMika Westerberg 
switch_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)2197f67cf491SMika Westerberg static umode_t switch_attr_is_visible(struct kobject *kobj,
2198f67cf491SMika Westerberg 				      struct attribute *attr, int n)
2199f67cf491SMika Westerberg {
2200fff15f23STian Tao 	struct device *dev = kobj_to_dev(kobj);
2201f67cf491SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
2202f67cf491SMika Westerberg 
22033cd542e6SMika Westerberg 	if (attr == &dev_attr_authorized.attr) {
22043cd542e6SMika Westerberg 		if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
22058e334125SMika Westerberg 		    sw->tb->security_level == TB_SECURITY_DPONLY)
22063cd542e6SMika Westerberg 			return 0;
22073cd542e6SMika Westerberg 	} else if (attr == &dev_attr_device.attr) {
220858f414faSMika Westerberg 		if (!sw->device)
220958f414faSMika Westerberg 			return 0;
221058f414faSMika Westerberg 	} else if (attr == &dev_attr_device_name.attr) {
221158f414faSMika Westerberg 		if (!sw->device_name)
221258f414faSMika Westerberg 			return 0;
221358f414faSMika Westerberg 	} else if (attr == &dev_attr_vendor.attr)  {
221458f414faSMika Westerberg 		if (!sw->vendor)
221558f414faSMika Westerberg 			return 0;
221658f414faSMika Westerberg 	} else if (attr == &dev_attr_vendor_name.attr)  {
221758f414faSMika Westerberg 		if (!sw->vendor_name)
221858f414faSMika Westerberg 			return 0;
221958f414faSMika Westerberg 	} else if (attr == &dev_attr_key.attr) {
2220f67cf491SMika Westerberg 		if (tb_route(sw) &&
2221f67cf491SMika Westerberg 		    sw->tb->security_level == TB_SECURITY_SECURE &&
2222f67cf491SMika Westerberg 		    sw->security_level == TB_SECURITY_SECURE)
2223f67cf491SMika Westerberg 			return attr->mode;
2224f67cf491SMika Westerberg 		return 0;
222591c0c120SMika Westerberg 	} else if (attr == &dev_attr_rx_speed.attr ||
222691c0c120SMika Westerberg 		   attr == &dev_attr_rx_lanes.attr ||
222791c0c120SMika Westerberg 		   attr == &dev_attr_tx_speed.attr ||
222891c0c120SMika Westerberg 		   attr == &dev_attr_tx_lanes.attr) {
222991c0c120SMika Westerberg 		if (tb_route(sw))
223091c0c120SMika Westerberg 			return attr->mode;
223191c0c120SMika Westerberg 		return 0;
22323f415e5eSMika Westerberg 	} else if (attr == &dev_attr_nvm_authenticate.attr) {
2233b0407983SMika Westerberg 		if (nvm_upgradeable(sw))
22343f415e5eSMika Westerberg 			return attr->mode;
22353f415e5eSMika Westerberg 		return 0;
22363f415e5eSMika Westerberg 	} else if (attr == &dev_attr_nvm_version.attr) {
2237b0407983SMika Westerberg 		if (nvm_readable(sw))
2238e6b245ccSMika Westerberg 			return attr->mode;
2239e6b245ccSMika Westerberg 		return 0;
224014862ee3SYehezkel Bernat 	} else if (attr == &dev_attr_boot.attr) {
224114862ee3SYehezkel Bernat 		if (tb_route(sw))
224214862ee3SYehezkel Bernat 			return attr->mode;
224314862ee3SYehezkel Bernat 		return 0;
22441cb36293SMario Limonciello 	} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
22451cb36293SMario Limonciello 		if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
22461cb36293SMario Limonciello 			return attr->mode;
22471cb36293SMario Limonciello 		return 0;
2248f67cf491SMika Westerberg 	}
2249f67cf491SMika Westerberg 
2250e6b245ccSMika Westerberg 	return sw->safe_mode ? 0 : attr->mode;
2251f67cf491SMika Westerberg }
2252f67cf491SMika Westerberg 
22536889e00fSRikard Falkeborn static const struct attribute_group switch_group = {
2254f67cf491SMika Westerberg 	.is_visible = switch_attr_is_visible,
2255bfe778acSMika Westerberg 	.attrs = switch_attrs,
2256bfe778acSMika Westerberg };
2257bfe778acSMika Westerberg 
2258bfe778acSMika Westerberg static const struct attribute_group *switch_groups[] = {
2259bfe778acSMika Westerberg 	&switch_group,
2260bfe778acSMika Westerberg 	NULL,
2261bfe778acSMika Westerberg };
2262bfe778acSMika Westerberg 
tb_switch_release(struct device * dev)2263bfe778acSMika Westerberg static void tb_switch_release(struct device *dev)
2264bfe778acSMika Westerberg {
2265bfe778acSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
2266b433d010SMika Westerberg 	struct tb_port *port;
2267bfe778acSMika Westerberg 
22683e136768SMika Westerberg 	dma_port_free(sw->dma_port);
22693e136768SMika Westerberg 
2270b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
2271b433d010SMika Westerberg 		ida_destroy(&port->in_hopids);
2272b433d010SMika Westerberg 		ida_destroy(&port->out_hopids);
22730b2863acSMika Westerberg 	}
22740b2863acSMika Westerberg 
2275bfe778acSMika Westerberg 	kfree(sw->uuid);
227672ee3390SMika Westerberg 	kfree(sw->device_name);
227772ee3390SMika Westerberg 	kfree(sw->vendor_name);
2278a25c8b2fSAndreas Noever 	kfree(sw->ports);
2279343fcb8cSAndreas Noever 	kfree(sw->drom);
2280f67cf491SMika Westerberg 	kfree(sw->key);
2281a25c8b2fSAndreas Noever 	kfree(sw);
2282a25c8b2fSAndreas Noever }
2283a25c8b2fSAndreas Noever 
tb_switch_uevent(const struct device * dev,struct kobj_uevent_env * env)2284162736b0SGreg Kroah-Hartman static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
22852f608ba1SMika Westerberg {
2286162736b0SGreg Kroah-Hartman 	const struct tb_switch *sw = tb_to_switch(dev);
22872f608ba1SMika Westerberg 	const char *type;
22882f608ba1SMika Westerberg 
22896e21007dSGil Fine 	if (tb_switch_is_usb4(sw)) {
22906e21007dSGil Fine 		if (add_uevent_var(env, "USB4_VERSION=%u.0",
22916e21007dSGil Fine 				   usb4_switch_version(sw)))
22922f608ba1SMika Westerberg 			return -ENOMEM;
22932f608ba1SMika Westerberg 	}
22942f608ba1SMika Westerberg 
22952f608ba1SMika Westerberg 	if (!tb_route(sw)) {
22962f608ba1SMika Westerberg 		type = "host";
22972f608ba1SMika Westerberg 	} else {
22982f608ba1SMika Westerberg 		const struct tb_port *port;
22992f608ba1SMika Westerberg 		bool hub = false;
23002f608ba1SMika Westerberg 
23012f608ba1SMika Westerberg 		/* Device is hub if it has any downstream ports */
23022f608ba1SMika Westerberg 		tb_switch_for_each_port(sw, port) {
23032f608ba1SMika Westerberg 			if (!port->disabled && !tb_is_upstream_port(port) &&
23042f608ba1SMika Westerberg 			     tb_port_is_null(port)) {
23052f608ba1SMika Westerberg 				hub = true;
23062f608ba1SMika Westerberg 				break;
23072f608ba1SMika Westerberg 			}
23082f608ba1SMika Westerberg 		}
23092f608ba1SMika Westerberg 
23102f608ba1SMika Westerberg 		type = hub ? "hub" : "device";
23112f608ba1SMika Westerberg 	}
23122f608ba1SMika Westerberg 
23132f608ba1SMika Westerberg 	if (add_uevent_var(env, "USB4_TYPE=%s", type))
23142f608ba1SMika Westerberg 		return -ENOMEM;
23152f608ba1SMika Westerberg 	return 0;
23162f608ba1SMika Westerberg }
23172f608ba1SMika Westerberg 
23182d8ff0b5SMika Westerberg /*
23192d8ff0b5SMika Westerberg  * Currently only need to provide the callbacks. Everything else is handled
23202d8ff0b5SMika Westerberg  * in the connection manager.
23212d8ff0b5SMika Westerberg  */
tb_switch_runtime_suspend(struct device * dev)23222d8ff0b5SMika Westerberg static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
23232d8ff0b5SMika Westerberg {
23244f7c2e0dSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
23254f7c2e0dSMika Westerberg 	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
23264f7c2e0dSMika Westerberg 
23274f7c2e0dSMika Westerberg 	if (cm_ops->runtime_suspend_switch)
23284f7c2e0dSMika Westerberg 		return cm_ops->runtime_suspend_switch(sw);
23294f7c2e0dSMika Westerberg 
23302d8ff0b5SMika Westerberg 	return 0;
23312d8ff0b5SMika Westerberg }
23322d8ff0b5SMika Westerberg 
tb_switch_runtime_resume(struct device * dev)23332d8ff0b5SMika Westerberg static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
23342d8ff0b5SMika Westerberg {
23354f7c2e0dSMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
23364f7c2e0dSMika Westerberg 	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
23374f7c2e0dSMika Westerberg 
23384f7c2e0dSMika Westerberg 	if (cm_ops->runtime_resume_switch)
23394f7c2e0dSMika Westerberg 		return cm_ops->runtime_resume_switch(sw);
23402d8ff0b5SMika Westerberg 	return 0;
23412d8ff0b5SMika Westerberg }
23422d8ff0b5SMika Westerberg 
23432d8ff0b5SMika Westerberg static const struct dev_pm_ops tb_switch_pm_ops = {
23442d8ff0b5SMika Westerberg 	SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
23452d8ff0b5SMika Westerberg 			   NULL)
23462d8ff0b5SMika Westerberg };
23472d8ff0b5SMika Westerberg 
2348bfe778acSMika Westerberg struct device_type tb_switch_type = {
2349bfe778acSMika Westerberg 	.name = "thunderbolt_device",
2350bfe778acSMika Westerberg 	.release = tb_switch_release,
23512f608ba1SMika Westerberg 	.uevent = tb_switch_uevent,
23522d8ff0b5SMika Westerberg 	.pm = &tb_switch_pm_ops,
2353bfe778acSMika Westerberg };
2354bfe778acSMika Westerberg 
tb_switch_get_generation(struct tb_switch * sw)23552c3c4197SMika Westerberg static int tb_switch_get_generation(struct tb_switch *sw)
23562c3c4197SMika Westerberg {
2357d589fd42SMika Westerberg 	if (tb_switch_is_usb4(sw))
2358d589fd42SMika Westerberg 		return 4;
2359d589fd42SMika Westerberg 
2360d589fd42SMika Westerberg 	if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
23612c3c4197SMika Westerberg 		switch (sw->config.device_id) {
23622c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
23632c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
23642c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
23652c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
23662c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
23672c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
23682c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
23692c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
23702c3c4197SMika Westerberg 			return 1;
23712c3c4197SMika Westerberg 
23722c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
23732c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
23742c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
23752c3c4197SMika Westerberg 			return 2;
23762c3c4197SMika Westerberg 
23772c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
23782c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
23792c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
23802c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
23812c3c4197SMika Westerberg 		case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
23824bac471dSRadion Mirchevsky 		case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
23834bac471dSRadion Mirchevsky 		case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
23844bac471dSRadion Mirchevsky 		case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
23853cdb9446SMika Westerberg 		case PCI_DEVICE_ID_INTEL_ICL_NHI0:
23863cdb9446SMika Westerberg 		case PCI_DEVICE_ID_INTEL_ICL_NHI1:
23872c3c4197SMika Westerberg 			return 3;
2388d589fd42SMika Westerberg 		}
2389d589fd42SMika Westerberg 	}
2390b0407983SMika Westerberg 
23912c3c4197SMika Westerberg 	/*
2392d589fd42SMika Westerberg 	 * For unknown switches assume generation to be 1 to be on the
2393d589fd42SMika Westerberg 	 * safe side.
23942c3c4197SMika Westerberg 	 */
23952c3c4197SMika Westerberg 	tb_sw_warn(sw, "unsupported switch device id %#x\n",
23962c3c4197SMika Westerberg 		   sw->config.device_id);
23972c3c4197SMika Westerberg 	return 1;
23982c3c4197SMika Westerberg }
23992c3c4197SMika Westerberg 
tb_switch_exceeds_max_depth(const struct tb_switch * sw,int depth)2400b0407983SMika Westerberg static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2401b0407983SMika Westerberg {
2402b0407983SMika Westerberg 	int max_depth;
2403b0407983SMika Westerberg 
2404b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw) ||
2405b0407983SMika Westerberg 	    (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2406b0407983SMika Westerberg 		max_depth = USB4_SWITCH_MAX_DEPTH;
2407b0407983SMika Westerberg 	else
2408b0407983SMika Westerberg 		max_depth = TB_SWITCH_MAX_DEPTH;
2409b0407983SMika Westerberg 
2410b0407983SMika Westerberg 	return depth > max_depth;
2411b0407983SMika Westerberg }
2412b0407983SMika Westerberg 
2413a25c8b2fSAndreas Noever /**
2414bfe778acSMika Westerberg  * tb_switch_alloc() - allocate a switch
2415bfe778acSMika Westerberg  * @tb: Pointer to the owning domain
2416bfe778acSMika Westerberg  * @parent: Parent device for this switch
2417bfe778acSMika Westerberg  * @route: Route string for this switch
2418a25c8b2fSAndreas Noever  *
2419bfe778acSMika Westerberg  * Allocates and initializes a switch. Will not upload configuration to
2420bfe778acSMika Westerberg  * the switch. For that you need to call tb_switch_configure()
2421bfe778acSMika Westerberg  * separately. The returned switch should be released by calling
2422bfe778acSMika Westerberg  * tb_switch_put().
2423bfe778acSMika Westerberg  *
2424444ac384SMika Westerberg  * Return: Pointer to the allocated switch or ERR_PTR() in case of
2425444ac384SMika Westerberg  * failure.
2426a25c8b2fSAndreas Noever  */
tb_switch_alloc(struct tb * tb,struct device * parent,u64 route)2427bfe778acSMika Westerberg struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2428bfe778acSMika Westerberg 				  u64 route)
2429a25c8b2fSAndreas Noever {
2430a25c8b2fSAndreas Noever 	struct tb_switch *sw;
2431f0342e75SMika Westerberg 	int upstream_port;
2432444ac384SMika Westerberg 	int i, ret, depth;
2433f0342e75SMika Westerberg 
2434b0407983SMika Westerberg 	/* Unlock the downstream port so we can access the switch below */
2435b0407983SMika Westerberg 	if (route) {
2436b0407983SMika Westerberg 		struct tb_switch *parent_sw = tb_to_switch(parent);
2437b0407983SMika Westerberg 		struct tb_port *down;
2438b0407983SMika Westerberg 
2439b0407983SMika Westerberg 		down = tb_port_at(route, parent_sw);
2440b0407983SMika Westerberg 		tb_port_unlock(down);
2441b0407983SMika Westerberg 	}
2442b0407983SMika Westerberg 
2443f0342e75SMika Westerberg 	depth = tb_route_length(route);
2444f0342e75SMika Westerberg 
2445f0342e75SMika Westerberg 	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2446a25c8b2fSAndreas Noever 	if (upstream_port < 0)
2447444ac384SMika Westerberg 		return ERR_PTR(upstream_port);
2448a25c8b2fSAndreas Noever 
2449a25c8b2fSAndreas Noever 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2450a25c8b2fSAndreas Noever 	if (!sw)
2451444ac384SMika Westerberg 		return ERR_PTR(-ENOMEM);
2452a25c8b2fSAndreas Noever 
2453a25c8b2fSAndreas Noever 	sw->tb = tb;
2454444ac384SMika Westerberg 	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2455444ac384SMika Westerberg 	if (ret)
2456bfe778acSMika Westerberg 		goto err_free_sw_ports;
2457bfe778acSMika Westerberg 
2458b0407983SMika Westerberg 	sw->generation = tb_switch_get_generation(sw);
2459b0407983SMika Westerberg 
2460daa5140fSMika Westerberg 	tb_dbg(tb, "current switch config:\n");
2461b0407983SMika Westerberg 	tb_dump_switch(tb, sw);
2462a25c8b2fSAndreas Noever 
2463a25c8b2fSAndreas Noever 	/* configure switch */
2464a25c8b2fSAndreas Noever 	sw->config.upstream_port_number = upstream_port;
2465f0342e75SMika Westerberg 	sw->config.depth = depth;
2466f0342e75SMika Westerberg 	sw->config.route_hi = upper_32_bits(route);
2467f0342e75SMika Westerberg 	sw->config.route_lo = lower_32_bits(route);
2468bfe778acSMika Westerberg 	sw->config.enabled = 0;
2469a25c8b2fSAndreas Noever 
2470b0407983SMika Westerberg 	/* Make sure we do not exceed maximum topology limit */
2471704a940dSColin Ian King 	if (tb_switch_exceeds_max_depth(sw, depth)) {
2472704a940dSColin Ian King 		ret = -EADDRNOTAVAIL;
2473704a940dSColin Ian King 		goto err_free_sw_ports;
2474704a940dSColin Ian King 	}
2475b0407983SMika Westerberg 
2476bfe778acSMika Westerberg 	/* initialize ports */
2477bfe778acSMika Westerberg 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2478bfe778acSMika Westerberg 				GFP_KERNEL);
2479444ac384SMika Westerberg 	if (!sw->ports) {
2480444ac384SMika Westerberg 		ret = -ENOMEM;
2481bfe778acSMika Westerberg 		goto err_free_sw_ports;
2482444ac384SMika Westerberg 	}
2483bfe778acSMika Westerberg 
2484bfe778acSMika Westerberg 	for (i = 0; i <= sw->config.max_port_number; i++) {
2485bfe778acSMika Westerberg 		/* minimum setup for tb_find_cap and tb_drom_read to work */
2486bfe778acSMika Westerberg 		sw->ports[i].sw = sw;
2487bfe778acSMika Westerberg 		sw->ports[i].port = i;
2488781e14eaSMika Westerberg 
2489781e14eaSMika Westerberg 		/* Control port does not need HopID allocation */
2490781e14eaSMika Westerberg 		if (i) {
2491781e14eaSMika Westerberg 			ida_init(&sw->ports[i].in_hopids);
2492781e14eaSMika Westerberg 			ida_init(&sw->ports[i].out_hopids);
2493781e14eaSMika Westerberg 		}
2494bfe778acSMika Westerberg 	}
2495bfe778acSMika Westerberg 
2496444ac384SMika Westerberg 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2497b0407983SMika Westerberg 	if (ret > 0)
2498444ac384SMika Westerberg 		sw->cap_plug_events = ret;
2499bfe778acSMika Westerberg 
250023ccd21cSGil Fine 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
250123ccd21cSGil Fine 	if (ret > 0)
250223ccd21cSGil Fine 		sw->cap_vsec_tmu = ret;
250323ccd21cSGil Fine 
2504444ac384SMika Westerberg 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2505444ac384SMika Westerberg 	if (ret > 0)
2506444ac384SMika Westerberg 		sw->cap_lc = ret;
2507a9be5582SMika Westerberg 
250843f977bcSGil Fine 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
250943f977bcSGil Fine 	if (ret > 0)
251043f977bcSGil Fine 		sw->cap_lp = ret;
251143f977bcSGil Fine 
2512f67cf491SMika Westerberg 	/* Root switch is always authorized */
2513f67cf491SMika Westerberg 	if (!route)
2514f67cf491SMika Westerberg 		sw->authorized = true;
2515f67cf491SMika Westerberg 
2516bfe778acSMika Westerberg 	device_initialize(&sw->dev);
2517bfe778acSMika Westerberg 	sw->dev.parent = parent;
2518bfe778acSMika Westerberg 	sw->dev.bus = &tb_bus_type;
2519bfe778acSMika Westerberg 	sw->dev.type = &tb_switch_type;
2520bfe778acSMika Westerberg 	sw->dev.groups = switch_groups;
2521bfe778acSMika Westerberg 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2522bfe778acSMika Westerberg 
2523bfe778acSMika Westerberg 	return sw;
2524bfe778acSMika Westerberg 
2525bfe778acSMika Westerberg err_free_sw_ports:
2526bfe778acSMika Westerberg 	kfree(sw->ports);
2527bfe778acSMika Westerberg 	kfree(sw);
2528bfe778acSMika Westerberg 
2529444ac384SMika Westerberg 	return ERR_PTR(ret);
2530bfe778acSMika Westerberg }
2531bfe778acSMika Westerberg 
2532bfe778acSMika Westerberg /**
2533e6b245ccSMika Westerberg  * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2534e6b245ccSMika Westerberg  * @tb: Pointer to the owning domain
2535e6b245ccSMika Westerberg  * @parent: Parent device for this switch
2536e6b245ccSMika Westerberg  * @route: Route string for this switch
2537e6b245ccSMika Westerberg  *
2538e6b245ccSMika Westerberg  * This creates a switch in safe mode. This means the switch pretty much
2539e6b245ccSMika Westerberg  * lacks all capabilities except DMA configuration port before it is
2540e6b245ccSMika Westerberg  * flashed with a valid NVM firmware.
2541e6b245ccSMika Westerberg  *
2542e6b245ccSMika Westerberg  * The returned switch must be released by calling tb_switch_put().
2543e6b245ccSMika Westerberg  *
2544444ac384SMika Westerberg  * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2545e6b245ccSMika Westerberg  */
2546e6b245ccSMika Westerberg struct tb_switch *
tb_switch_alloc_safe_mode(struct tb * tb,struct device * parent,u64 route)2547e6b245ccSMika Westerberg tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2548e6b245ccSMika Westerberg {
2549e6b245ccSMika Westerberg 	struct tb_switch *sw;
2550e6b245ccSMika Westerberg 
2551e6b245ccSMika Westerberg 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2552e6b245ccSMika Westerberg 	if (!sw)
2553444ac384SMika Westerberg 		return ERR_PTR(-ENOMEM);
2554e6b245ccSMika Westerberg 
2555e6b245ccSMika Westerberg 	sw->tb = tb;
2556e6b245ccSMika Westerberg 	sw->config.depth = tb_route_length(route);
2557e6b245ccSMika Westerberg 	sw->config.route_hi = upper_32_bits(route);
2558e6b245ccSMika Westerberg 	sw->config.route_lo = lower_32_bits(route);
2559e6b245ccSMika Westerberg 	sw->safe_mode = true;
2560e6b245ccSMika Westerberg 
2561e6b245ccSMika Westerberg 	device_initialize(&sw->dev);
2562e6b245ccSMika Westerberg 	sw->dev.parent = parent;
2563e6b245ccSMika Westerberg 	sw->dev.bus = &tb_bus_type;
2564e6b245ccSMika Westerberg 	sw->dev.type = &tb_switch_type;
2565e6b245ccSMika Westerberg 	sw->dev.groups = switch_groups;
2566e6b245ccSMika Westerberg 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2567e6b245ccSMika Westerberg 
2568e6b245ccSMika Westerberg 	return sw;
2569e6b245ccSMika Westerberg }
2570e6b245ccSMika Westerberg 
2571e6b245ccSMika Westerberg /**
2572bfe778acSMika Westerberg  * tb_switch_configure() - Uploads configuration to the switch
2573bfe778acSMika Westerberg  * @sw: Switch to configure
2574bfe778acSMika Westerberg  *
2575bfe778acSMika Westerberg  * Call this function before the switch is added to the system. It will
2576bfe778acSMika Westerberg  * upload configuration to the switch and makes it available for the
2577b0407983SMika Westerberg  * connection manager to use. Can be called to the switch again after
2578b0407983SMika Westerberg  * resume from low power states to re-initialize it.
2579bfe778acSMika Westerberg  *
2580bfe778acSMika Westerberg  * Return: %0 in case of success and negative errno in case of failure
2581bfe778acSMika Westerberg  */
tb_switch_configure(struct tb_switch * sw)2582bfe778acSMika Westerberg int tb_switch_configure(struct tb_switch *sw)
2583bfe778acSMika Westerberg {
2584bfe778acSMika Westerberg 	struct tb *tb = sw->tb;
2585bfe778acSMika Westerberg 	u64 route;
2586bfe778acSMika Westerberg 	int ret;
2587bfe778acSMika Westerberg 
2588bfe778acSMika Westerberg 	route = tb_route(sw);
2589bfe778acSMika Westerberg 
2590b0407983SMika Westerberg 	tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2591b0407983SMika Westerberg 	       sw->config.enabled ? "restoring" : "initializing", route,
2592b0407983SMika Westerberg 	       tb_route_length(route), sw->config.upstream_port_number);
2593b0407983SMika Westerberg 
2594b0407983SMika Westerberg 	sw->config.enabled = 1;
2595b0407983SMika Westerberg 
2596b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
2597b0407983SMika Westerberg 		/*
2598b0407983SMika Westerberg 		 * For USB4 devices, we need to program the CM version
2599b0407983SMika Westerberg 		 * accordingly so that it knows to expose all the
260014200a26SGil Fine 		 * additional capabilities. Program it according to USB4
260114200a26SGil Fine 		 * version to avoid changing existing (v1) routers behaviour.
2602b0407983SMika Westerberg 		 */
260314200a26SGil Fine 		if (usb4_switch_version(sw) < 2)
260414200a26SGil Fine 			sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
260514200a26SGil Fine 		else
260614200a26SGil Fine 			sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
260731f87f70SMario Limonciello 		sw->config.plug_events_delay = 0xa;
2608b0407983SMika Westerberg 
2609b0407983SMika Westerberg 		/* Enumerate the switch */
2610b0407983SMika Westerberg 		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2611b0407983SMika Westerberg 				  ROUTER_CS_1, 4);
2612b0407983SMika Westerberg 		if (ret)
2613b0407983SMika Westerberg 			return ret;
2614b0407983SMika Westerberg 
2615b0407983SMika Westerberg 		ret = usb4_switch_setup(sw);
2616b0407983SMika Westerberg 	} else {
2617bfe778acSMika Westerberg 		if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2618a25c8b2fSAndreas Noever 			tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2619a25c8b2fSAndreas Noever 				   sw->config.vendor_id);
2620a25c8b2fSAndreas Noever 
2621b0407983SMika Westerberg 		if (!sw->cap_plug_events) {
2622b0407983SMika Westerberg 			tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2623b0407983SMika Westerberg 			return -ENODEV;
2624b0407983SMika Westerberg 		}
2625bfe778acSMika Westerberg 
2626b0407983SMika Westerberg 		/* Enumerate the switch */
2627b0407983SMika Westerberg 		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2628b0407983SMika Westerberg 				  ROUTER_CS_1, 3);
2629b0407983SMika Westerberg 	}
2630e879a709SMika Westerberg 	if (ret)
2631e879a709SMika Westerberg 		return ret;
2632e879a709SMika Westerberg 
2633bfe778acSMika Westerberg 	return tb_plug_events_active(sw, true);
2634a25c8b2fSAndreas Noever }
2635a25c8b2fSAndreas Noever 
2636d49b4f04SMika Westerberg /**
2637d49b4f04SMika Westerberg  * tb_switch_configuration_valid() - Set the tunneling configuration to be valid
2638d49b4f04SMika Westerberg  * @sw: Router to configure
2639d49b4f04SMika Westerberg  *
2640d49b4f04SMika Westerberg  * Needs to be called before any tunnels can be setup through the
2641d49b4f04SMika Westerberg  * router. Can be called to any router.
2642d49b4f04SMika Westerberg  *
2643d49b4f04SMika Westerberg  * Returns %0 in success and negative errno otherwise.
2644d49b4f04SMika Westerberg  */
tb_switch_configuration_valid(struct tb_switch * sw)2645d49b4f04SMika Westerberg int tb_switch_configuration_valid(struct tb_switch *sw)
2646d49b4f04SMika Westerberg {
2647d49b4f04SMika Westerberg 	if (tb_switch_is_usb4(sw))
2648d49b4f04SMika Westerberg 		return usb4_switch_configuration_valid(sw);
2649d49b4f04SMika Westerberg 	return 0;
2650d49b4f04SMika Westerberg }
2651d49b4f04SMika Westerberg 
tb_switch_set_uuid(struct tb_switch * sw)26522cc12751SAditya Pakki static int tb_switch_set_uuid(struct tb_switch *sw)
2653bfe778acSMika Westerberg {
2654b0407983SMika Westerberg 	bool uid = false;
2655bfe778acSMika Westerberg 	u32 uuid[4];
2656a9be5582SMika Westerberg 	int ret;
2657bfe778acSMika Westerberg 
2658bfe778acSMika Westerberg 	if (sw->uuid)
2659a9be5582SMika Westerberg 		return 0;
2660bfe778acSMika Westerberg 
2661b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
2662b0407983SMika Westerberg 		ret = usb4_switch_read_uid(sw, &sw->uid);
2663b0407983SMika Westerberg 		if (ret)
2664b0407983SMika Westerberg 			return ret;
2665b0407983SMika Westerberg 		uid = true;
2666b0407983SMika Westerberg 	} else {
2667bfe778acSMika Westerberg 		/*
2668b0407983SMika Westerberg 		 * The newer controllers include fused UUID as part of
2669b0407983SMika Westerberg 		 * link controller specific registers
2670bfe778acSMika Westerberg 		 */
2671a9be5582SMika Westerberg 		ret = tb_lc_read_uuid(sw, uuid);
2672a9be5582SMika Westerberg 		if (ret) {
2673b0407983SMika Westerberg 			if (ret != -EINVAL)
2674b0407983SMika Westerberg 				return ret;
2675b0407983SMika Westerberg 			uid = true;
2676b0407983SMika Westerberg 		}
2677b0407983SMika Westerberg 	}
2678b0407983SMika Westerberg 
2679b0407983SMika Westerberg 	if (uid) {
2680bfe778acSMika Westerberg 		/*
2681bfe778acSMika Westerberg 		 * ICM generates UUID based on UID and fills the upper
2682bfe778acSMika Westerberg 		 * two words with ones. This is not strictly following
2683bfe778acSMika Westerberg 		 * UUID format but we want to be compatible with it so
2684bfe778acSMika Westerberg 		 * we do the same here.
2685bfe778acSMika Westerberg 		 */
2686bfe778acSMika Westerberg 		uuid[0] = sw->uid & 0xffffffff;
2687bfe778acSMika Westerberg 		uuid[1] = (sw->uid >> 32) & 0xffffffff;
2688bfe778acSMika Westerberg 		uuid[2] = 0xffffffff;
2689bfe778acSMika Westerberg 		uuid[3] = 0xffffffff;
2690ca389f71SAndreas Noever 	}
2691bfe778acSMika Westerberg 
2692bfe778acSMika Westerberg 	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
26932cc12751SAditya Pakki 	if (!sw->uuid)
2694a9be5582SMika Westerberg 		return -ENOMEM;
2695a9be5582SMika Westerberg 	return 0;
2696bfe778acSMika Westerberg }
2697bfe778acSMika Westerberg 
tb_switch_add_dma_port(struct tb_switch * sw)2698e6b245ccSMika Westerberg static int tb_switch_add_dma_port(struct tb_switch *sw)
26993e136768SMika Westerberg {
2700e6b245ccSMika Westerberg 	u32 status;
2701e6b245ccSMika Westerberg 	int ret;
2702e6b245ccSMika Westerberg 
27033e136768SMika Westerberg 	switch (sw->generation) {
27043e136768SMika Westerberg 	case 2:
27053e136768SMika Westerberg 		/* Only root switch can be upgraded */
27063e136768SMika Westerberg 		if (tb_route(sw))
2707e6b245ccSMika Westerberg 			return 0;
27087a7ebfa8SMika Westerberg 
2709df561f66SGustavo A. R. Silva 		fallthrough;
27107a7ebfa8SMika Westerberg 	case 3:
2711661b1947SMika Westerberg 	case 4:
27127a7ebfa8SMika Westerberg 		ret = tb_switch_set_uuid(sw);
27137a7ebfa8SMika Westerberg 		if (ret)
27147a7ebfa8SMika Westerberg 			return ret;
27153e136768SMika Westerberg 		break;
27163e136768SMika Westerberg 
27173e136768SMika Westerberg 	default:
2718e6b245ccSMika Westerberg 		/*
2719e6b245ccSMika Westerberg 		 * DMA port is the only thing available when the switch
2720e6b245ccSMika Westerberg 		 * is in safe mode.
2721e6b245ccSMika Westerberg 		 */
2722e6b245ccSMika Westerberg 		if (!sw->safe_mode)
2723e6b245ccSMika Westerberg 			return 0;
2724e6b245ccSMika Westerberg 		break;
27253e136768SMika Westerberg 	}
27263e136768SMika Westerberg 
2727661b1947SMika Westerberg 	if (sw->no_nvm_upgrade)
2728661b1947SMika Westerberg 		return 0;
2729661b1947SMika Westerberg 
2730661b1947SMika Westerberg 	if (tb_switch_is_usb4(sw)) {
2731661b1947SMika Westerberg 		ret = usb4_switch_nvm_authenticate_status(sw, &status);
2732661b1947SMika Westerberg 		if (ret)
2733661b1947SMika Westerberg 			return ret;
2734661b1947SMika Westerberg 
2735661b1947SMika Westerberg 		if (status) {
2736661b1947SMika Westerberg 			tb_sw_info(sw, "switch flash authentication failed\n");
2737661b1947SMika Westerberg 			nvm_set_auth_status(sw, status);
2738661b1947SMika Westerberg 		}
2739661b1947SMika Westerberg 
2740661b1947SMika Westerberg 		return 0;
2741661b1947SMika Westerberg 	}
2742661b1947SMika Westerberg 
27433f415e5eSMika Westerberg 	/* Root switch DMA port requires running firmware */
2744f07a3608SMika Westerberg 	if (!tb_route(sw) && !tb_switch_is_icm(sw))
2745e6b245ccSMika Westerberg 		return 0;
2746e6b245ccSMika Westerberg 
27473e136768SMika Westerberg 	sw->dma_port = dma_port_alloc(sw);
2748e6b245ccSMika Westerberg 	if (!sw->dma_port)
2749e6b245ccSMika Westerberg 		return 0;
2750e6b245ccSMika Westerberg 
2751e6b245ccSMika Westerberg 	/*
27527a7ebfa8SMika Westerberg 	 * If there is status already set then authentication failed
27537a7ebfa8SMika Westerberg 	 * when the dma_port_flash_update_auth() returned. Power cycling
27547a7ebfa8SMika Westerberg 	 * is not needed (it was done already) so only thing we do here
27557a7ebfa8SMika Westerberg 	 * is to unblock runtime PM of the root port.
27567a7ebfa8SMika Westerberg 	 */
27577a7ebfa8SMika Westerberg 	nvm_get_auth_status(sw, &status);
27587a7ebfa8SMika Westerberg 	if (status) {
27597a7ebfa8SMika Westerberg 		if (!tb_route(sw))
2760b0407983SMika Westerberg 			nvm_authenticate_complete_dma_port(sw);
27617a7ebfa8SMika Westerberg 		return 0;
27627a7ebfa8SMika Westerberg 	}
27637a7ebfa8SMika Westerberg 
27647a7ebfa8SMika Westerberg 	/*
2765e6b245ccSMika Westerberg 	 * Check status of the previous flash authentication. If there
2766e6b245ccSMika Westerberg 	 * is one we need to power cycle the switch in any case to make
2767e6b245ccSMika Westerberg 	 * it functional again.
2768e6b245ccSMika Westerberg 	 */
2769e6b245ccSMika Westerberg 	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2770e6b245ccSMika Westerberg 	if (ret <= 0)
2771e6b245ccSMika Westerberg 		return ret;
2772e6b245ccSMika Westerberg 
27731830b6eeSMika Westerberg 	/* Now we can allow root port to suspend again */
27741830b6eeSMika Westerberg 	if (!tb_route(sw))
2775b0407983SMika Westerberg 		nvm_authenticate_complete_dma_port(sw);
27761830b6eeSMika Westerberg 
2777e6b245ccSMika Westerberg 	if (status) {
2778e6b245ccSMika Westerberg 		tb_sw_info(sw, "switch flash authentication failed\n");
2779e6b245ccSMika Westerberg 		nvm_set_auth_status(sw, status);
2780e6b245ccSMika Westerberg 	}
2781e6b245ccSMika Westerberg 
2782e6b245ccSMika Westerberg 	tb_sw_info(sw, "power cycling the switch now\n");
2783e6b245ccSMika Westerberg 	dma_port_power_cycle(sw->dma_port);
2784e6b245ccSMika Westerberg 
2785e6b245ccSMika Westerberg 	/*
2786e6b245ccSMika Westerberg 	 * We return error here which causes the switch adding failure.
2787e6b245ccSMika Westerberg 	 * It should appear back after power cycle is complete.
2788e6b245ccSMika Westerberg 	 */
2789e6b245ccSMika Westerberg 	return -ESHUTDOWN;
27903e136768SMika Westerberg }
27913e136768SMika Westerberg 
tb_switch_default_link_ports(struct tb_switch * sw)27920d46c08dSMika Westerberg static void tb_switch_default_link_ports(struct tb_switch *sw)
27930d46c08dSMika Westerberg {
27940d46c08dSMika Westerberg 	int i;
27950d46c08dSMika Westerberg 
279642716425SSanjay R Mehta 	for (i = 1; i <= sw->config.max_port_number; i++) {
27970d46c08dSMika Westerberg 		struct tb_port *port = &sw->ports[i];
27980d46c08dSMika Westerberg 		struct tb_port *subordinate;
27990d46c08dSMika Westerberg 
28000d46c08dSMika Westerberg 		if (!tb_port_is_null(port))
28010d46c08dSMika Westerberg 			continue;
28020d46c08dSMika Westerberg 
28030d46c08dSMika Westerberg 		/* Check for the subordinate port */
28040d46c08dSMika Westerberg 		if (i == sw->config.max_port_number ||
28050d46c08dSMika Westerberg 		    !tb_port_is_null(&sw->ports[i + 1]))
28060d46c08dSMika Westerberg 			continue;
28070d46c08dSMika Westerberg 
28080d46c08dSMika Westerberg 		/* Link them if not already done so (by DROM) */
28090d46c08dSMika Westerberg 		subordinate = &sw->ports[i + 1];
28100d46c08dSMika Westerberg 		if (!port->dual_link_port && !subordinate->dual_link_port) {
28110d46c08dSMika Westerberg 			port->link_nr = 0;
28120d46c08dSMika Westerberg 			port->dual_link_port = subordinate;
28130d46c08dSMika Westerberg 			subordinate->link_nr = 1;
28140d46c08dSMika Westerberg 			subordinate->dual_link_port = port;
28150d46c08dSMika Westerberg 
28160d46c08dSMika Westerberg 			tb_sw_dbg(sw, "linked ports %d <-> %d\n",
28170d46c08dSMika Westerberg 				  port->port, subordinate->port);
28180d46c08dSMika Westerberg 		}
28190d46c08dSMika Westerberg 	}
28200d46c08dSMika Westerberg }
28210d46c08dSMika Westerberg 
tb_switch_lane_bonding_possible(struct tb_switch * sw)282291c0c120SMika Westerberg static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
282391c0c120SMika Westerberg {
282491c0c120SMika Westerberg 	const struct tb_port *up = tb_upstream_port(sw);
282591c0c120SMika Westerberg 
282691c0c120SMika Westerberg 	if (!up->dual_link_port || !up->dual_link_port->remote)
282791c0c120SMika Westerberg 		return false;
282891c0c120SMika Westerberg 
2829b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw))
2830b0407983SMika Westerberg 		return usb4_switch_lane_bonding_possible(sw);
283191c0c120SMika Westerberg 	return tb_lc_lane_bonding_possible(sw);
283291c0c120SMika Westerberg }
283391c0c120SMika Westerberg 
tb_switch_update_link_attributes(struct tb_switch * sw)283491c0c120SMika Westerberg static int tb_switch_update_link_attributes(struct tb_switch *sw)
283591c0c120SMika Westerberg {
283691c0c120SMika Westerberg 	struct tb_port *up;
283791c0c120SMika Westerberg 	bool change = false;
283891c0c120SMika Westerberg 	int ret;
283991c0c120SMika Westerberg 
284091c0c120SMika Westerberg 	if (!tb_route(sw) || tb_switch_is_icm(sw))
284191c0c120SMika Westerberg 		return 0;
284291c0c120SMika Westerberg 
284391c0c120SMika Westerberg 	up = tb_upstream_port(sw);
284491c0c120SMika Westerberg 
284591c0c120SMika Westerberg 	ret = tb_port_get_link_speed(up);
284691c0c120SMika Westerberg 	if (ret < 0)
284791c0c120SMika Westerberg 		return ret;
284891c0c120SMika Westerberg 	if (sw->link_speed != ret)
284991c0c120SMika Westerberg 		change = true;
285091c0c120SMika Westerberg 	sw->link_speed = ret;
285191c0c120SMika Westerberg 
285291c0c120SMika Westerberg 	ret = tb_port_get_link_width(up);
285391c0c120SMika Westerberg 	if (ret < 0)
285491c0c120SMika Westerberg 		return ret;
285591c0c120SMika Westerberg 	if (sw->link_width != ret)
285691c0c120SMika Westerberg 		change = true;
285791c0c120SMika Westerberg 	sw->link_width = ret;
285891c0c120SMika Westerberg 
285991c0c120SMika Westerberg 	/* Notify userspace that there is possible link attribute change */
286091c0c120SMika Westerberg 	if (device_is_registered(&sw->dev) && change)
286191c0c120SMika Westerberg 		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
286291c0c120SMika Westerberg 
286391c0c120SMika Westerberg 	return 0;
286491c0c120SMika Westerberg }
286591c0c120SMika Westerberg 
28669b6933e9SGil Fine /* Must be called after tb_switch_update_link_attributes() */
tb_switch_link_init(struct tb_switch * sw)28679b6933e9SGil Fine static void tb_switch_link_init(struct tb_switch *sw)
28689b6933e9SGil Fine {
28699b6933e9SGil Fine 	struct tb_port *up, *down;
28709b6933e9SGil Fine 	bool bonded;
28719b6933e9SGil Fine 
28729b6933e9SGil Fine 	if (!tb_route(sw) || tb_switch_is_icm(sw))
28739b6933e9SGil Fine 		return;
28749b6933e9SGil Fine 
28759b6933e9SGil Fine 	tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
28769b6933e9SGil Fine 	tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
28779b6933e9SGil Fine 
28789b6933e9SGil Fine 	bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
28799b6933e9SGil Fine 
28809b6933e9SGil Fine 	/*
28819b6933e9SGil Fine 	 * Gen 4 links come up as bonded so update the port structures
28829b6933e9SGil Fine 	 * accordingly.
28839b6933e9SGil Fine 	 */
28849b6933e9SGil Fine 	up = tb_upstream_port(sw);
28859b6933e9SGil Fine 	down = tb_switch_downstream_port(sw);
28869b6933e9SGil Fine 
28879b6933e9SGil Fine 	up->bonded = bonded;
28889b6933e9SGil Fine 	if (up->dual_link_port)
28899b6933e9SGil Fine 		up->dual_link_port->bonded = bonded;
28909b6933e9SGil Fine 	tb_port_update_credits(up);
28919b6933e9SGil Fine 
28929b6933e9SGil Fine 	down->bonded = bonded;
28939b6933e9SGil Fine 	if (down->dual_link_port)
28949b6933e9SGil Fine 		down->dual_link_port->bonded = bonded;
28959b6933e9SGil Fine 	tb_port_update_credits(down);
28969b6933e9SGil Fine }
28979b6933e9SGil Fine 
289891c0c120SMika Westerberg /**
289991c0c120SMika Westerberg  * tb_switch_lane_bonding_enable() - Enable lane bonding
290091c0c120SMika Westerberg  * @sw: Switch to enable lane bonding
290191c0c120SMika Westerberg  *
290291c0c120SMika Westerberg  * Connection manager can call this function to enable lane bonding of a
290391c0c120SMika Westerberg  * switch. If conditions are correct and both switches support the feature,
290491c0c120SMika Westerberg  * lanes are bonded. It is safe to call this to any switch.
290591c0c120SMika Westerberg  */
tb_switch_lane_bonding_enable(struct tb_switch * sw)29069b6933e9SGil Fine static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
290791c0c120SMika Westerberg {
290891c0c120SMika Westerberg 	struct tb_port *up, *down;
29099b6933e9SGil Fine 	unsigned int width;
291091c0c120SMika Westerberg 	int ret;
291191c0c120SMika Westerberg 
291291c0c120SMika Westerberg 	if (!tb_switch_lane_bonding_possible(sw))
291391c0c120SMika Westerberg 		return 0;
291491c0c120SMika Westerberg 
291591c0c120SMika Westerberg 	up = tb_upstream_port(sw);
29167ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
291791c0c120SMika Westerberg 
29189b6933e9SGil Fine 	if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) ||
29199b6933e9SGil Fine 	    !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL))
292091c0c120SMika Westerberg 		return 0;
292191c0c120SMika Westerberg 
2922a9fdf5f9SMika Westerberg 	/*
2923a9fdf5f9SMika Westerberg 	 * Both lanes need to be in CL0. Here we assume lane 0 already be in
2924a9fdf5f9SMika Westerberg 	 * CL0 and check just for lane 1.
2925a9fdf5f9SMika Westerberg 	 */
2926a9fdf5f9SMika Westerberg 	if (tb_wait_for_port(down->dual_link_port, false) <= 0)
2927a9fdf5f9SMika Westerberg 		return -ENOTCONN;
2928a9fdf5f9SMika Westerberg 
292991c0c120SMika Westerberg 	ret = tb_port_lane_bonding_enable(up);
293091c0c120SMika Westerberg 	if (ret) {
293191c0c120SMika Westerberg 		tb_port_warn(up, "failed to enable lane bonding\n");
293291c0c120SMika Westerberg 		return ret;
293391c0c120SMika Westerberg 	}
293491c0c120SMika Westerberg 
293591c0c120SMika Westerberg 	ret = tb_port_lane_bonding_enable(down);
293691c0c120SMika Westerberg 	if (ret) {
293791c0c120SMika Westerberg 		tb_port_warn(down, "failed to enable lane bonding\n");
293891c0c120SMika Westerberg 		tb_port_lane_bonding_disable(up);
293991c0c120SMika Westerberg 		return ret;
294091c0c120SMika Westerberg 	}
294191c0c120SMika Westerberg 
2942e111fb92SGil Fine 	/* Any of the widths are all bonded */
29439b6933e9SGil Fine 	width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
2944e111fb92SGil Fine 		TB_LINK_WIDTH_ASYM_RX;
2945e111fb92SGil Fine 
29469b6933e9SGil Fine 	return tb_port_wait_for_link_width(down, width, 100);
294791c0c120SMika Westerberg }
294891c0c120SMika Westerberg 
294991c0c120SMika Westerberg /**
295091c0c120SMika Westerberg  * tb_switch_lane_bonding_disable() - Disable lane bonding
295191c0c120SMika Westerberg  * @sw: Switch whose lane bonding to disable
295291c0c120SMika Westerberg  *
295391c0c120SMika Westerberg  * Disables lane bonding between @sw and parent. This can be called even
295491c0c120SMika Westerberg  * if lanes were not bonded originally.
295591c0c120SMika Westerberg  */
tb_switch_lane_bonding_disable(struct tb_switch * sw)29569b6933e9SGil Fine static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
295791c0c120SMika Westerberg {
295891c0c120SMika Westerberg 	struct tb_port *up, *down;
2959e111fb92SGil Fine 	int ret;
296091c0c120SMika Westerberg 
296191c0c120SMika Westerberg 	up = tb_upstream_port(sw);
296291c0c120SMika Westerberg 	if (!up->bonded)
29639b6933e9SGil Fine 		return 0;
29649b6933e9SGil Fine 
29659b6933e9SGil Fine 	/*
29669b6933e9SGil Fine 	 * If the link is Gen 4 there is no way to switch the link to
29679b6933e9SGil Fine 	 * two single lane links so avoid that here. Also don't bother
29689b6933e9SGil Fine 	 * if the link is not up anymore (sw is unplugged).
29699b6933e9SGil Fine 	 */
29709b6933e9SGil Fine 	ret = tb_port_get_link_generation(up);
29719b6933e9SGil Fine 	if (ret < 0)
29729b6933e9SGil Fine 		return ret;
29739b6933e9SGil Fine 	if (ret >= 4)
29749b6933e9SGil Fine 		return -EOPNOTSUPP;
297591c0c120SMika Westerberg 
29767ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
297791c0c120SMika Westerberg 	tb_port_lane_bonding_disable(up);
297891c0c120SMika Westerberg 	tb_port_lane_bonding_disable(down);
297991c0c120SMika Westerberg 
2980e7051beaSMika Westerberg 	/*
2981e7051beaSMika Westerberg 	 * It is fine if we get other errors as the router might have
2982e7051beaSMika Westerberg 	 * been unplugged.
2983e7051beaSMika Westerberg 	 */
29849b6933e9SGil Fine 	return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
29859b6933e9SGil Fine }
29869b6933e9SGil Fine 
29873dc5525dSMika Westerberg /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
tb_switch_asym_enable(struct tb_switch * sw,enum tb_link_width width)29889b6933e9SGil Fine static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
29899b6933e9SGil Fine {
29909b6933e9SGil Fine 	struct tb_port *up, *down, *port;
29919b6933e9SGil Fine 	enum tb_link_width down_width;
29929b6933e9SGil Fine 	int ret;
29939b6933e9SGil Fine 
29949b6933e9SGil Fine 	up = tb_upstream_port(sw);
29959b6933e9SGil Fine 	down = tb_switch_downstream_port(sw);
29969b6933e9SGil Fine 
29979b6933e9SGil Fine 	if (width == TB_LINK_WIDTH_ASYM_TX) {
29989b6933e9SGil Fine 		down_width = TB_LINK_WIDTH_ASYM_RX;
29999b6933e9SGil Fine 		port = down;
30009b6933e9SGil Fine 	} else {
30019b6933e9SGil Fine 		down_width = TB_LINK_WIDTH_ASYM_TX;
30029b6933e9SGil Fine 		port = up;
30039b6933e9SGil Fine 	}
30049b6933e9SGil Fine 
30059b6933e9SGil Fine 	ret = tb_port_set_link_width(up, width);
30069b6933e9SGil Fine 	if (ret)
30079b6933e9SGil Fine 		return ret;
30089b6933e9SGil Fine 
30099b6933e9SGil Fine 	ret = tb_port_set_link_width(down, down_width);
30109b6933e9SGil Fine 	if (ret)
30119b6933e9SGil Fine 		return ret;
30129b6933e9SGil Fine 
30139b6933e9SGil Fine 	/*
30149b6933e9SGil Fine 	 * Initiate the change in the router that one of its TX lanes is
30159b6933e9SGil Fine 	 * changing to RX but do so only if there is an actual change.
30169b6933e9SGil Fine 	 */
30179b6933e9SGil Fine 	if (sw->link_width != width) {
30189b6933e9SGil Fine 		ret = usb4_port_asym_start(port);
30199b6933e9SGil Fine 		if (ret)
30209b6933e9SGil Fine 			return ret;
30219b6933e9SGil Fine 
30229b6933e9SGil Fine 		ret = tb_port_wait_for_link_width(up, width, 100);
30239b6933e9SGil Fine 		if (ret)
30249b6933e9SGil Fine 			return ret;
30259b6933e9SGil Fine 	}
30269b6933e9SGil Fine 
30279b6933e9SGil Fine 	return 0;
30289b6933e9SGil Fine }
30299b6933e9SGil Fine 
30303dc5525dSMika Westerberg /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
tb_switch_asym_disable(struct tb_switch * sw)30319b6933e9SGil Fine static int tb_switch_asym_disable(struct tb_switch *sw)
30329b6933e9SGil Fine {
30339b6933e9SGil Fine 	struct tb_port *up, *down;
30349b6933e9SGil Fine 	int ret;
30359b6933e9SGil Fine 
30369b6933e9SGil Fine 	up = tb_upstream_port(sw);
30379b6933e9SGil Fine 	down = tb_switch_downstream_port(sw);
30389b6933e9SGil Fine 
30399b6933e9SGil Fine 	ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL);
30409b6933e9SGil Fine 	if (ret)
30419b6933e9SGil Fine 		return ret;
30429b6933e9SGil Fine 
30439b6933e9SGil Fine 	ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL);
30449b6933e9SGil Fine 	if (ret)
30459b6933e9SGil Fine 		return ret;
30469b6933e9SGil Fine 
30479b6933e9SGil Fine 	/*
30489b6933e9SGil Fine 	 * Initiate the change in the router that has three TX lanes and
30499b6933e9SGil Fine 	 * is changing one of its TX lanes to RX but only if there is a
30509b6933e9SGil Fine 	 * change in the link width.
30519b6933e9SGil Fine 	 */
30529b6933e9SGil Fine 	if (sw->link_width > TB_LINK_WIDTH_DUAL) {
30539b6933e9SGil Fine 		if (sw->link_width == TB_LINK_WIDTH_ASYM_TX)
30549b6933e9SGil Fine 			ret = usb4_port_asym_start(up);
30559b6933e9SGil Fine 		else
30569b6933e9SGil Fine 			ret = usb4_port_asym_start(down);
30579b6933e9SGil Fine 		if (ret)
30589b6933e9SGil Fine 			return ret;
30599b6933e9SGil Fine 
30609b6933e9SGil Fine 		ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100);
30619b6933e9SGil Fine 		if (ret)
30629b6933e9SGil Fine 			return ret;
30639b6933e9SGil Fine 	}
30649b6933e9SGil Fine 
30659b6933e9SGil Fine 	return 0;
30669b6933e9SGil Fine }
30679b6933e9SGil Fine 
30689b6933e9SGil Fine /**
30699b6933e9SGil Fine  * tb_switch_set_link_width() - Configure router link width
30709b6933e9SGil Fine  * @sw: Router to configure
30719b6933e9SGil Fine  * @width: The new link width
30729b6933e9SGil Fine  *
30739b6933e9SGil Fine  * Set device router link width to @width from router upstream port
30749b6933e9SGil Fine  * perspective. Supports also asymmetric links if the routers boths side
30759b6933e9SGil Fine  * of the link supports it.
30769b6933e9SGil Fine  *
30779b6933e9SGil Fine  * Does nothing for host router.
30789b6933e9SGil Fine  *
30799b6933e9SGil Fine  * Returns %0 in case of success, negative errno otherwise.
30809b6933e9SGil Fine  */
tb_switch_set_link_width(struct tb_switch * sw,enum tb_link_width width)30819b6933e9SGil Fine int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
30829b6933e9SGil Fine {
30839b6933e9SGil Fine 	struct tb_port *up, *down;
30849b6933e9SGil Fine 	int ret = 0;
30859b6933e9SGil Fine 
30869b6933e9SGil Fine 	if (!tb_route(sw))
30879b6933e9SGil Fine 		return 0;
30889b6933e9SGil Fine 
30899b6933e9SGil Fine 	up = tb_upstream_port(sw);
30909b6933e9SGil Fine 	down = tb_switch_downstream_port(sw);
30919b6933e9SGil Fine 
30929b6933e9SGil Fine 	switch (width) {
30939b6933e9SGil Fine 	case TB_LINK_WIDTH_SINGLE:
30949b6933e9SGil Fine 		ret = tb_switch_lane_bonding_disable(sw);
30959b6933e9SGil Fine 		break;
30969b6933e9SGil Fine 
30979b6933e9SGil Fine 	case TB_LINK_WIDTH_DUAL:
30989b6933e9SGil Fine 		if (sw->link_width == TB_LINK_WIDTH_ASYM_TX ||
30999b6933e9SGil Fine 		    sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
31009b6933e9SGil Fine 			ret = tb_switch_asym_disable(sw);
31019b6933e9SGil Fine 			if (ret)
31029b6933e9SGil Fine 				break;
31039b6933e9SGil Fine 		}
31049b6933e9SGil Fine 		ret = tb_switch_lane_bonding_enable(sw);
31059b6933e9SGil Fine 		break;
31069b6933e9SGil Fine 
31079b6933e9SGil Fine 	case TB_LINK_WIDTH_ASYM_TX:
31089b6933e9SGil Fine 	case TB_LINK_WIDTH_ASYM_RX:
31099b6933e9SGil Fine 		ret = tb_switch_asym_enable(sw, width);
31109b6933e9SGil Fine 		break;
31119b6933e9SGil Fine 	}
31129b6933e9SGil Fine 
31139b6933e9SGil Fine 	switch (ret) {
31149b6933e9SGil Fine 	case 0:
31159b6933e9SGil Fine 		break;
31169b6933e9SGil Fine 
31179b6933e9SGil Fine 	case -ETIMEDOUT:
31189b6933e9SGil Fine 		tb_sw_warn(sw, "timeout changing link width\n");
31199b6933e9SGil Fine 		return ret;
31209b6933e9SGil Fine 
31219b6933e9SGil Fine 	case -ENOTCONN:
31229b6933e9SGil Fine 	case -EOPNOTSUPP:
31239b6933e9SGil Fine 	case -ENODEV:
31249b6933e9SGil Fine 		return ret;
31259b6933e9SGil Fine 
31269b6933e9SGil Fine 	default:
31279b6933e9SGil Fine 		tb_sw_dbg(sw, "failed to change link width: %d\n", ret);
31289b6933e9SGil Fine 		return ret;
31299b6933e9SGil Fine 	}
3130e7051beaSMika Westerberg 
313169fea377SMika Westerberg 	tb_port_update_credits(down);
313269fea377SMika Westerberg 	tb_port_update_credits(up);
31339b6933e9SGil Fine 
313491c0c120SMika Westerberg 	tb_switch_update_link_attributes(sw);
313569fea377SMika Westerberg 
31369b6933e9SGil Fine 	tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
31379b6933e9SGil Fine 	return ret;
313891c0c120SMika Westerberg }
313991c0c120SMika Westerberg 
3140bfe778acSMika Westerberg /**
3141de462039SMika Westerberg  * tb_switch_configure_link() - Set link configured
3142de462039SMika Westerberg  * @sw: Switch whose link is configured
3143de462039SMika Westerberg  *
3144de462039SMika Westerberg  * Sets the link upstream from @sw configured (from both ends) so that
3145de462039SMika Westerberg  * it will not be disconnected when the domain exits sleep. Can be
3146de462039SMika Westerberg  * called for any switch.
3147de462039SMika Westerberg  *
3148de462039SMika Westerberg  * It is recommended that this is called after lane bonding is enabled.
3149de462039SMika Westerberg  *
3150de462039SMika Westerberg  * Returns %0 on success and negative errno in case of error.
3151de462039SMika Westerberg  */
tb_switch_configure_link(struct tb_switch * sw)3152de462039SMika Westerberg int tb_switch_configure_link(struct tb_switch *sw)
3153de462039SMika Westerberg {
3154e28178bfSMika Westerberg 	struct tb_port *up, *down;
3155e28178bfSMika Westerberg 	int ret;
3156e28178bfSMika Westerberg 
3157de462039SMika Westerberg 	if (!tb_route(sw) || tb_switch_is_icm(sw))
3158de462039SMika Westerberg 		return 0;
3159de462039SMika Westerberg 
3160e28178bfSMika Westerberg 	up = tb_upstream_port(sw);
3161e28178bfSMika Westerberg 	if (tb_switch_is_usb4(up->sw))
3162e28178bfSMika Westerberg 		ret = usb4_port_configure(up);
3163e28178bfSMika Westerberg 	else
3164e28178bfSMika Westerberg 		ret = tb_lc_configure_port(up);
3165e28178bfSMika Westerberg 	if (ret)
3166e28178bfSMika Westerberg 		return ret;
3167e28178bfSMika Westerberg 
3168e28178bfSMika Westerberg 	down = up->remote;
3169e28178bfSMika Westerberg 	if (tb_switch_is_usb4(down->sw))
3170e28178bfSMika Westerberg 		return usb4_port_configure(down);
3171e28178bfSMika Westerberg 	return tb_lc_configure_port(down);
3172de462039SMika Westerberg }
3173de462039SMika Westerberg 
3174de462039SMika Westerberg /**
3175de462039SMika Westerberg  * tb_switch_unconfigure_link() - Unconfigure link
3176de462039SMika Westerberg  * @sw: Switch whose link is unconfigured
3177de462039SMika Westerberg  *
3178de462039SMika Westerberg  * Sets the link unconfigured so the @sw will be disconnected if the
3179de462039SMika Westerberg  * domain exists sleep.
3180de462039SMika Westerberg  */
tb_switch_unconfigure_link(struct tb_switch * sw)3181de462039SMika Westerberg void tb_switch_unconfigure_link(struct tb_switch *sw)
3182de462039SMika Westerberg {
3183e28178bfSMika Westerberg 	struct tb_port *up, *down;
3184e28178bfSMika Westerberg 
3185de462039SMika Westerberg 	if (!tb_route(sw) || tb_switch_is_icm(sw))
3186de462039SMika Westerberg 		return;
3187de462039SMika Westerberg 
31889954c514SGil Fine 	/*
31899954c514SGil Fine 	 * Unconfigure downstream port so that wake-on-connect can be
31909954c514SGil Fine 	 * configured after router unplug. No need to unconfigure upstream port
31919954c514SGil Fine 	 * since its router is unplugged.
31929954c514SGil Fine 	 */
31939954c514SGil Fine 	up = tb_upstream_port(sw);
31949954c514SGil Fine 	down = up->remote;
31959954c514SGil Fine 	if (tb_switch_is_usb4(down->sw))
31969954c514SGil Fine 		usb4_port_unconfigure(down);
31979954c514SGil Fine 	else
31989954c514SGil Fine 		tb_lc_unconfigure_port(down);
31999954c514SGil Fine 
32009954c514SGil Fine 	if (sw->is_unplugged)
32019954c514SGil Fine 		return;
32029954c514SGil Fine 
3203e28178bfSMika Westerberg 	up = tb_upstream_port(sw);
3204e28178bfSMika Westerberg 	if (tb_switch_is_usb4(up->sw))
3205e28178bfSMika Westerberg 		usb4_port_unconfigure(up);
3206de462039SMika Westerberg 	else
3207e28178bfSMika Westerberg 		tb_lc_unconfigure_port(up);
3208de462039SMika Westerberg }
3209de462039SMika Westerberg 
tb_switch_credits_init(struct tb_switch * sw)321056ad3aefSMika Westerberg static void tb_switch_credits_init(struct tb_switch *sw)
321156ad3aefSMika Westerberg {
321256ad3aefSMika Westerberg 	if (tb_switch_is_icm(sw))
321356ad3aefSMika Westerberg 		return;
321456ad3aefSMika Westerberg 	if (!tb_switch_is_usb4(sw))
321556ad3aefSMika Westerberg 		return;
321656ad3aefSMika Westerberg 	if (usb4_switch_credits_init(sw))
321756ad3aefSMika Westerberg 		tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
321856ad3aefSMika Westerberg }
321956ad3aefSMika Westerberg 
tb_switch_port_hotplug_enable(struct tb_switch * sw)32205d2569cbSMario Limonciello static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
32215d2569cbSMario Limonciello {
32225d2569cbSMario Limonciello 	struct tb_port *port;
32235d2569cbSMario Limonciello 
32245d2569cbSMario Limonciello 	if (tb_switch_is_icm(sw))
32255d2569cbSMario Limonciello 		return 0;
32265d2569cbSMario Limonciello 
32275d2569cbSMario Limonciello 	tb_switch_for_each_port(sw, port) {
32285d2569cbSMario Limonciello 		int res;
32295d2569cbSMario Limonciello 
32305d2569cbSMario Limonciello 		if (!port->cap_usb4)
32315d2569cbSMario Limonciello 			continue;
32325d2569cbSMario Limonciello 
32335d2569cbSMario Limonciello 		res = usb4_port_hotplug_enable(port);
32345d2569cbSMario Limonciello 		if (res)
32355d2569cbSMario Limonciello 			return res;
32365d2569cbSMario Limonciello 	}
32375d2569cbSMario Limonciello 	return 0;
32385d2569cbSMario Limonciello }
32395d2569cbSMario Limonciello 
3240de462039SMika Westerberg /**
3241bfe778acSMika Westerberg  * tb_switch_add() - Add a switch to the domain
3242bfe778acSMika Westerberg  * @sw: Switch to add
3243bfe778acSMika Westerberg  *
3244bfe778acSMika Westerberg  * This is the last step in adding switch to the domain. It will read
3245bfe778acSMika Westerberg  * identification information from DROM and initializes ports so that
3246bfe778acSMika Westerberg  * they can be used to connect other switches. The switch will be
3247bfe778acSMika Westerberg  * exposed to the userspace when this function successfully returns. To
3248bfe778acSMika Westerberg  * remove and release the switch, call tb_switch_remove().
3249bfe778acSMika Westerberg  *
3250bfe778acSMika Westerberg  * Return: %0 in case of success and negative errno in case of failure
3251bfe778acSMika Westerberg  */
tb_switch_add(struct tb_switch * sw)3252bfe778acSMika Westerberg int tb_switch_add(struct tb_switch *sw)
3253bfe778acSMika Westerberg {
3254bfe778acSMika Westerberg 	int i, ret;
3255ca389f71SAndreas Noever 
32563e136768SMika Westerberg 	/*
32573e136768SMika Westerberg 	 * Initialize DMA control port now before we read DROM. Recent
32583e136768SMika Westerberg 	 * host controllers have more complete DROM on NVM that includes
32593e136768SMika Westerberg 	 * vendor and model identification strings which we then expose
32603e136768SMika Westerberg 	 * to the userspace. NVM can be accessed through DMA
32613e136768SMika Westerberg 	 * configuration based mailbox.
32623e136768SMika Westerberg 	 */
3263e6b245ccSMika Westerberg 	ret = tb_switch_add_dma_port(sw);
3264af99f696SMika Westerberg 	if (ret) {
3265af99f696SMika Westerberg 		dev_err(&sw->dev, "failed to add DMA port\n");
3266e6b245ccSMika Westerberg 		return ret;
3267af99f696SMika Westerberg 	}
32683e136768SMika Westerberg 
3269e6b245ccSMika Westerberg 	if (!sw->safe_mode) {
327056ad3aefSMika Westerberg 		tb_switch_credits_init(sw);
327156ad3aefSMika Westerberg 
3272343fcb8cSAndreas Noever 		/* read drom */
3273f53e7676SMika Westerberg 		ret = tb_drom_read(sw);
32746915812bSMario Limonciello 		if (ret)
32756915812bSMario Limonciello 			dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
3276daa5140fSMika Westerberg 		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
3277c90553b3SAndreas Noever 
32782cc12751SAditya Pakki 		ret = tb_switch_set_uuid(sw);
3279af99f696SMika Westerberg 		if (ret) {
3280af99f696SMika Westerberg 			dev_err(&sw->dev, "failed to set UUID\n");
32812cc12751SAditya Pakki 			return ret;
3282af99f696SMika Westerberg 		}
3283bfe778acSMika Westerberg 
3284343fcb8cSAndreas Noever 		for (i = 0; i <= sw->config.max_port_number; i++) {
3285343fcb8cSAndreas Noever 			if (sw->ports[i].disabled) {
3286daa5140fSMika Westerberg 				tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
3287343fcb8cSAndreas Noever 				continue;
3288343fcb8cSAndreas Noever 			}
3289bfe778acSMika Westerberg 			ret = tb_init_port(&sw->ports[i]);
3290af99f696SMika Westerberg 			if (ret) {
3291af99f696SMika Westerberg 				dev_err(&sw->dev, "failed to initialize port %d\n", i);
3292bfe778acSMika Westerberg 				return ret;
3293343fcb8cSAndreas Noever 			}
3294e6b245ccSMika Westerberg 		}
329591c0c120SMika Westerberg 
3296d2d6ddf1SMika Westerberg 		tb_check_quirks(sw);
3297d2d6ddf1SMika Westerberg 
32980d46c08dSMika Westerberg 		tb_switch_default_link_ports(sw);
32990d46c08dSMika Westerberg 
330091c0c120SMika Westerberg 		ret = tb_switch_update_link_attributes(sw);
330191c0c120SMika Westerberg 		if (ret)
330291c0c120SMika Westerberg 			return ret;
3303cf29b9afSRajmohan Mani 
33049b6933e9SGil Fine 		tb_switch_link_init(sw);
33059b6933e9SGil Fine 
3306768e6fe6SMika Westerberg 		ret = tb_switch_clx_init(sw);
3307768e6fe6SMika Westerberg 		if (ret)
3308768e6fe6SMika Westerberg 			return ret;
3309768e6fe6SMika Westerberg 
3310cf29b9afSRajmohan Mani 		ret = tb_switch_tmu_init(sw);
3311cf29b9afSRajmohan Mani 		if (ret)
3312cf29b9afSRajmohan Mani 			return ret;
3313af99f696SMika Westerberg 	}
3314343fcb8cSAndreas Noever 
33155d2569cbSMario Limonciello 	ret = tb_switch_port_hotplug_enable(sw);
33165d2569cbSMario Limonciello 	if (ret)
33175d2569cbSMario Limonciello 		return ret;
33185d2569cbSMario Limonciello 
3319e6b245ccSMika Westerberg 	ret = device_add(&sw->dev);
3320af99f696SMika Westerberg 	if (ret) {
3321af99f696SMika Westerberg 		dev_err(&sw->dev, "failed to add device: %d\n", ret);
3322e6b245ccSMika Westerberg 		return ret;
3323af99f696SMika Westerberg 	}
3324e6b245ccSMika Westerberg 
3325a83bc4a5SMika Westerberg 	if (tb_route(sw)) {
3326a83bc4a5SMika Westerberg 		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3327a83bc4a5SMika Westerberg 			 sw->vendor, sw->device);
3328a83bc4a5SMika Westerberg 		if (sw->vendor_name && sw->device_name)
3329a83bc4a5SMika Westerberg 			dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3330a83bc4a5SMika Westerberg 				 sw->device_name);
3331a83bc4a5SMika Westerberg 	}
3332a83bc4a5SMika Westerberg 
3333cae5f515SMika Westerberg 	ret = usb4_switch_add_ports(sw);
3334cae5f515SMika Westerberg 	if (ret) {
3335cae5f515SMika Westerberg 		dev_err(&sw->dev, "failed to add USB4 ports\n");
3336cae5f515SMika Westerberg 		goto err_del;
3337cae5f515SMika Westerberg 	}
3338cae5f515SMika Westerberg 
3339e6b245ccSMika Westerberg 	ret = tb_switch_nvm_add(sw);
33402d8ff0b5SMika Westerberg 	if (ret) {
3341af99f696SMika Westerberg 		dev_err(&sw->dev, "failed to add NVM devices\n");
3342cae5f515SMika Westerberg 		goto err_ports;
3343bfe778acSMika Westerberg 	}
3344343fcb8cSAndreas Noever 
3345b2911a59SMika Westerberg 	/*
3346b2911a59SMika Westerberg 	 * Thunderbolt routers do not generate wakeups themselves but
3347b2911a59SMika Westerberg 	 * they forward wakeups from tunneled protocols, so enable it
3348b2911a59SMika Westerberg 	 * here.
3349b2911a59SMika Westerberg 	 */
3350b2911a59SMika Westerberg 	device_init_wakeup(&sw->dev, true);
3351b2911a59SMika Westerberg 
33522d8ff0b5SMika Westerberg 	pm_runtime_set_active(&sw->dev);
33532d8ff0b5SMika Westerberg 	if (sw->rpm) {
33542d8ff0b5SMika Westerberg 		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
33552d8ff0b5SMika Westerberg 		pm_runtime_use_autosuspend(&sw->dev);
33562d8ff0b5SMika Westerberg 		pm_runtime_mark_last_busy(&sw->dev);
33572d8ff0b5SMika Westerberg 		pm_runtime_enable(&sw->dev);
33582d8ff0b5SMika Westerberg 		pm_request_autosuspend(&sw->dev);
33592d8ff0b5SMika Westerberg 	}
33602d8ff0b5SMika Westerberg 
336154e41810SGil Fine 	tb_switch_debugfs_init(sw);
33622d8ff0b5SMika Westerberg 	return 0;
3363cae5f515SMika Westerberg 
3364cae5f515SMika Westerberg err_ports:
3365cae5f515SMika Westerberg 	usb4_switch_remove_ports(sw);
3366cae5f515SMika Westerberg err_del:
3367cae5f515SMika Westerberg 	device_del(&sw->dev);
3368cae5f515SMika Westerberg 
3369cae5f515SMika Westerberg 	return ret;
33702d8ff0b5SMika Westerberg }
33712d8ff0b5SMika Westerberg 
3372bfe778acSMika Westerberg /**
3373bfe778acSMika Westerberg  * tb_switch_remove() - Remove and release a switch
3374bfe778acSMika Westerberg  * @sw: Switch to remove
3375bfe778acSMika Westerberg  *
3376bfe778acSMika Westerberg  * This will remove the switch from the domain and release it after last
3377bfe778acSMika Westerberg  * reference count drops to zero. If there are switches connected below
3378bfe778acSMika Westerberg  * this switch, they will be removed as well.
3379bfe778acSMika Westerberg  */
tb_switch_remove(struct tb_switch * sw)3380bfe778acSMika Westerberg void tb_switch_remove(struct tb_switch *sw)
3381bfe778acSMika Westerberg {
3382b433d010SMika Westerberg 	struct tb_port *port;
3383ca389f71SAndreas Noever 
338454e41810SGil Fine 	tb_switch_debugfs_remove(sw);
338554e41810SGil Fine 
33862d8ff0b5SMika Westerberg 	if (sw->rpm) {
33872d8ff0b5SMika Westerberg 		pm_runtime_get_sync(&sw->dev);
33882d8ff0b5SMika Westerberg 		pm_runtime_disable(&sw->dev);
33892d8ff0b5SMika Westerberg 	}
33902d8ff0b5SMika Westerberg 
3391bfe778acSMika Westerberg 	/* port 0 is the switch itself and never has a remote */
3392b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3393b433d010SMika Westerberg 		if (tb_port_has_remote(port)) {
3394b433d010SMika Westerberg 			tb_switch_remove(port->remote->sw);
3395b433d010SMika Westerberg 			port->remote = NULL;
3396b433d010SMika Westerberg 		} else if (port->xdomain) {
339718b3ad2aSMika Westerberg 			port->xdomain->is_unplugged = true;
3398b433d010SMika Westerberg 			tb_xdomain_remove(port->xdomain);
3399b433d010SMika Westerberg 			port->xdomain = NULL;
3400bfe778acSMika Westerberg 		}
3401dacb1287SKranthi Kuntala 
3402dacb1287SKranthi Kuntala 		/* Remove any downstream retimers */
3403dacb1287SKranthi Kuntala 		tb_retimer_remove_all(port);
3404dfe40ca4SMika Westerberg 	}
3405bfe778acSMika Westerberg 
3406bfe778acSMika Westerberg 	if (!sw->is_unplugged)
3407bfe778acSMika Westerberg 		tb_plug_events_active(sw, false);
3408b0407983SMika Westerberg 
3409e6b245ccSMika Westerberg 	tb_switch_nvm_remove(sw);
3410cae5f515SMika Westerberg 	usb4_switch_remove_ports(sw);
3411a83bc4a5SMika Westerberg 
3412a83bc4a5SMika Westerberg 	if (tb_route(sw))
3413a83bc4a5SMika Westerberg 		dev_info(&sw->dev, "device disconnected\n");
3414bfe778acSMika Westerberg 	device_unregister(&sw->dev);
3415a25c8b2fSAndreas Noever }
3416a25c8b2fSAndreas Noever 
3417053596d9SAndreas Noever /**
3418aae20bb6SLukas Wunner  * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
34195c6b471bSMika Westerberg  * @sw: Router to mark unplugged
3420053596d9SAndreas Noever  */
tb_sw_set_unplugged(struct tb_switch * sw)3421aae20bb6SLukas Wunner void tb_sw_set_unplugged(struct tb_switch *sw)
3422053596d9SAndreas Noever {
3423b433d010SMika Westerberg 	struct tb_port *port;
3424b433d010SMika Westerberg 
3425053596d9SAndreas Noever 	if (sw == sw->tb->root_switch) {
3426053596d9SAndreas Noever 		tb_sw_WARN(sw, "cannot unplug root switch\n");
3427053596d9SAndreas Noever 		return;
3428053596d9SAndreas Noever 	}
3429053596d9SAndreas Noever 	if (sw->is_unplugged) {
3430053596d9SAndreas Noever 		tb_sw_WARN(sw, "is_unplugged already set\n");
3431053596d9SAndreas Noever 		return;
3432053596d9SAndreas Noever 	}
3433053596d9SAndreas Noever 	sw->is_unplugged = true;
3434b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3435b433d010SMika Westerberg 		if (tb_port_has_remote(port))
3436b433d010SMika Westerberg 			tb_sw_set_unplugged(port->remote->sw);
3437b433d010SMika Westerberg 		else if (port->xdomain)
3438b433d010SMika Westerberg 			port->xdomain->is_unplugged = true;
3439053596d9SAndreas Noever 	}
3440053596d9SAndreas Noever }
3441053596d9SAndreas Noever 
tb_switch_set_wake(struct tb_switch * sw,unsigned int flags)3442b2911a59SMika Westerberg static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3443b2911a59SMika Westerberg {
3444b2911a59SMika Westerberg 	if (flags)
3445b2911a59SMika Westerberg 		tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3446b2911a59SMika Westerberg 	else
3447b2911a59SMika Westerberg 		tb_sw_dbg(sw, "disabling wakeup\n");
3448b2911a59SMika Westerberg 
3449b2911a59SMika Westerberg 	if (tb_switch_is_usb4(sw))
3450b2911a59SMika Westerberg 		return usb4_switch_set_wake(sw, flags);
3451b2911a59SMika Westerberg 	return tb_lc_set_wake(sw, flags);
3452b2911a59SMika Westerberg }
3453b2911a59SMika Westerberg 
tb_switch_check_wakes(struct tb_switch * sw)34543238b23eSGil Fine static void tb_switch_check_wakes(struct tb_switch *sw)
34553238b23eSGil Fine {
34563238b23eSGil Fine 	if (device_may_wakeup(&sw->dev)) {
34573238b23eSGil Fine 		if (tb_switch_is_usb4(sw))
34583238b23eSGil Fine 			usb4_switch_check_wakes(sw);
34593238b23eSGil Fine 	}
34603238b23eSGil Fine }
34613238b23eSGil Fine 
34623238b23eSGil Fine /**
34633238b23eSGil Fine  * tb_switch_resume() - Resume a switch after sleep
34643238b23eSGil Fine  * @sw: Switch to resume
34653238b23eSGil Fine  * @runtime: Is this resume from runtime suspend or system sleep
34663238b23eSGil Fine  *
34673238b23eSGil Fine  * Resumes and re-enumerates router (and all its children), if still plugged
34683238b23eSGil Fine  * after suspend. Don't enumerate device router whose UID was changed during
34693238b23eSGil Fine  * suspend. If this is resume from system sleep, notifies PM core about the
34703238b23eSGil Fine  * wakes occurred during suspend. Disables all wakes, except USB4 wake of
34713238b23eSGil Fine  * upstream port for USB4 routers that shall be always enabled.
34723238b23eSGil Fine  */
tb_switch_resume(struct tb_switch * sw,bool runtime)34733238b23eSGil Fine int tb_switch_resume(struct tb_switch *sw, bool runtime)
347423dd5bb4SAndreas Noever {
3475b433d010SMika Westerberg 	struct tb_port *port;
3476b433d010SMika Westerberg 	int err;
3477b433d010SMika Westerberg 
3478daa5140fSMika Westerberg 	tb_sw_dbg(sw, "resuming switch\n");
347923dd5bb4SAndreas Noever 
348008a5e4ceSMika Westerberg 	/*
348108a5e4ceSMika Westerberg 	 * Check for UID of the connected switches except for root
348208a5e4ceSMika Westerberg 	 * switch which we assume cannot be removed.
348308a5e4ceSMika Westerberg 	 */
348408a5e4ceSMika Westerberg 	if (tb_route(sw)) {
348508a5e4ceSMika Westerberg 		u64 uid;
348608a5e4ceSMika Westerberg 
34877ea4cd6bSMika Westerberg 		/*
34887ea4cd6bSMika Westerberg 		 * Check first that we can still read the switch config
34897ea4cd6bSMika Westerberg 		 * space. It may be that there is now another domain
34907ea4cd6bSMika Westerberg 		 * connected.
34917ea4cd6bSMika Westerberg 		 */
34927ea4cd6bSMika Westerberg 		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
34937ea4cd6bSMika Westerberg 		if (err < 0) {
34947ea4cd6bSMika Westerberg 			tb_sw_info(sw, "switch not present anymore\n");
34957ea4cd6bSMika Westerberg 			return err;
34967ea4cd6bSMika Westerberg 		}
34977ea4cd6bSMika Westerberg 
3498a283de3eSMario Limonciello 		/* We don't have any way to confirm this was the same device */
3499a283de3eSMario Limonciello 		if (!sw->uid)
3500a283de3eSMario Limonciello 			return -ENODEV;
3501a283de3eSMario Limonciello 
3502b0407983SMika Westerberg 		if (tb_switch_is_usb4(sw))
3503b0407983SMika Westerberg 			err = usb4_switch_read_uid(sw, &uid);
3504b0407983SMika Westerberg 		else
3505cd22e73bSAndreas Noever 			err = tb_drom_read_uid_only(sw, &uid);
350623dd5bb4SAndreas Noever 		if (err) {
350723dd5bb4SAndreas Noever 			tb_sw_warn(sw, "uid read failed\n");
350823dd5bb4SAndreas Noever 			return err;
350923dd5bb4SAndreas Noever 		}
351008a5e4ceSMika Westerberg 		if (sw->uid != uid) {
351123dd5bb4SAndreas Noever 			tb_sw_info(sw,
351223dd5bb4SAndreas Noever 				"changed while suspended (uid %#llx -> %#llx)\n",
351323dd5bb4SAndreas Noever 				sw->uid, uid);
351423dd5bb4SAndreas Noever 			return -ENODEV;
351523dd5bb4SAndreas Noever 		}
351608a5e4ceSMika Westerberg 	}
351723dd5bb4SAndreas Noever 
3518b0407983SMika Westerberg 	err = tb_switch_configure(sw);
351923dd5bb4SAndreas Noever 	if (err)
352023dd5bb4SAndreas Noever 		return err;
352123dd5bb4SAndreas Noever 
35223238b23eSGil Fine 	if (!runtime)
35233238b23eSGil Fine 		tb_switch_check_wakes(sw);
35243238b23eSGil Fine 
3525b2911a59SMika Westerberg 	/* Disable wakes */
3526b2911a59SMika Westerberg 	tb_switch_set_wake(sw, 0);
3527b2911a59SMika Westerberg 
35288145c435SMika Westerberg 	err = tb_switch_tmu_init(sw);
35298145c435SMika Westerberg 	if (err)
35308145c435SMika Westerberg 		return err;
35318145c435SMika Westerberg 
353223dd5bb4SAndreas Noever 	/* check for surviving downstream switches */
3533b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
35343fb10ea4SRajmohan Mani 		if (!tb_port_is_null(port))
353523dd5bb4SAndreas Noever 			continue;
35363fb10ea4SRajmohan Mani 
35373fb10ea4SRajmohan Mani 		if (!tb_port_resume(port))
35383fb10ea4SRajmohan Mani 			continue;
3539dfe40ca4SMika Westerberg 
35407ea4cd6bSMika Westerberg 		if (tb_wait_for_port(port, true) <= 0) {
35417ea4cd6bSMika Westerberg 			tb_port_warn(port,
35427ea4cd6bSMika Westerberg 				     "lost during suspend, disconnecting\n");
35437ea4cd6bSMika Westerberg 			if (tb_port_has_remote(port))
35447ea4cd6bSMika Westerberg 				tb_sw_set_unplugged(port->remote->sw);
35457ea4cd6bSMika Westerberg 			else if (port->xdomain)
35467ea4cd6bSMika Westerberg 				port->xdomain->is_unplugged = true;
35473fb10ea4SRajmohan Mani 		} else {
3548b0407983SMika Westerberg 			/*
3549b0407983SMika Westerberg 			 * Always unlock the port so the downstream
3550b0407983SMika Westerberg 			 * switch/domain is accessible.
3551b0407983SMika Westerberg 			 */
3552b0407983SMika Westerberg 			if (tb_port_unlock(port))
3553b0407983SMika Westerberg 				tb_port_warn(port, "failed to unlock port\n");
35543238b23eSGil Fine 			if (port->remote &&
35553238b23eSGil Fine 			    tb_switch_resume(port->remote->sw, runtime)) {
355623dd5bb4SAndreas Noever 				tb_port_warn(port,
355723dd5bb4SAndreas Noever 					     "lost during suspend, disconnecting\n");
3558aae20bb6SLukas Wunner 				tb_sw_set_unplugged(port->remote->sw);
355923dd5bb4SAndreas Noever 			}
356023dd5bb4SAndreas Noever 		}
35617ea4cd6bSMika Westerberg 	}
356223dd5bb4SAndreas Noever 	return 0;
356323dd5bb4SAndreas Noever }
356423dd5bb4SAndreas Noever 
35656ac6faeeSMika Westerberg /**
35666ac6faeeSMika Westerberg  * tb_switch_suspend() - Put a switch to sleep
35676ac6faeeSMika Westerberg  * @sw: Switch to suspend
35686ac6faeeSMika Westerberg  * @runtime: Is this runtime suspend or system sleep
35696ac6faeeSMika Westerberg  *
35706ac6faeeSMika Westerberg  * Suspends router and all its children. Enables wakes according to
35716ac6faeeSMika Westerberg  * value of @runtime and then sets sleep bit for the router. If @sw is
35726ac6faeeSMika Westerberg  * host router the domain is ready to go to sleep once this function
35736ac6faeeSMika Westerberg  * returns.
35746ac6faeeSMika Westerberg  */
tb_switch_suspend(struct tb_switch * sw,bool runtime)35756ac6faeeSMika Westerberg void tb_switch_suspend(struct tb_switch *sw, bool runtime)
357623dd5bb4SAndreas Noever {
3577b2911a59SMika Westerberg 	unsigned int flags = 0;
3578b433d010SMika Westerberg 	struct tb_port *port;
3579b433d010SMika Westerberg 	int err;
3580b433d010SMika Westerberg 
35816ac6faeeSMika Westerberg 	tb_sw_dbg(sw, "suspending switch\n");
35826ac6faeeSMika Westerberg 
358343f977bcSGil Fine 	/*
358443f977bcSGil Fine 	 * Actually only needed for Titan Ridge but for simplicity can be
358543f977bcSGil Fine 	 * done for USB4 device too as CLx is re-enabled at resume.
358643f977bcSGil Fine 	 */
358735627353SMika Westerberg 	tb_switch_clx_disable(sw);
358843f977bcSGil Fine 
358923dd5bb4SAndreas Noever 	err = tb_plug_events_active(sw, false);
359023dd5bb4SAndreas Noever 	if (err)
359123dd5bb4SAndreas Noever 		return;
359223dd5bb4SAndreas Noever 
3593b433d010SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3594b433d010SMika Westerberg 		if (tb_port_has_remote(port))
35956ac6faeeSMika Westerberg 			tb_switch_suspend(port->remote->sw, runtime);
359623dd5bb4SAndreas Noever 	}
35975480dfc2SMika Westerberg 
35986ac6faeeSMika Westerberg 	if (runtime) {
35996ac6faeeSMika Westerberg 		/* Trigger wake when something is plugged in/out */
36006ac6faeeSMika Westerberg 		flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
36016026b703SMika Westerberg 		flags |= TB_WAKE_ON_USB4;
36026026b703SMika Westerberg 		flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
36036ac6faeeSMika Westerberg 	} else if (device_may_wakeup(&sw->dev)) {
36046ac6faeeSMika Westerberg 		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
36056ac6faeeSMika Westerberg 	}
3606b2911a59SMika Westerberg 
3607b2911a59SMika Westerberg 	tb_switch_set_wake(sw, flags);
3608b2911a59SMika Westerberg 
3609b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw))
3610b0407983SMika Westerberg 		usb4_switch_set_sleep(sw);
3611b0407983SMika Westerberg 	else
36125480dfc2SMika Westerberg 		tb_lc_set_sleep(sw);
361323dd5bb4SAndreas Noever }
3614f67cf491SMika Westerberg 
36158afe909bSMika Westerberg /**
36168afe909bSMika Westerberg  * tb_switch_query_dp_resource() - Query availability of DP resource
36178afe909bSMika Westerberg  * @sw: Switch whose DP resource is queried
36188afe909bSMika Westerberg  * @in: DP IN port
36198afe909bSMika Westerberg  *
36208afe909bSMika Westerberg  * Queries availability of DP resource for DP tunneling using switch
36218afe909bSMika Westerberg  * specific means. Returns %true if resource is available.
36228afe909bSMika Westerberg  */
tb_switch_query_dp_resource(struct tb_switch * sw,struct tb_port * in)36238afe909bSMika Westerberg bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
36248afe909bSMika Westerberg {
3625b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw))
3626b0407983SMika Westerberg 		return usb4_switch_query_dp_resource(sw, in);
36278afe909bSMika Westerberg 	return tb_lc_dp_sink_query(sw, in);
36288afe909bSMika Westerberg }
36298afe909bSMika Westerberg 
36308afe909bSMika Westerberg /**
36318afe909bSMika Westerberg  * tb_switch_alloc_dp_resource() - Allocate available DP resource
36328afe909bSMika Westerberg  * @sw: Switch whose DP resource is allocated
36338afe909bSMika Westerberg  * @in: DP IN port
36348afe909bSMika Westerberg  *
36358afe909bSMika Westerberg  * Allocates DP resource for DP tunneling. The resource must be
36368afe909bSMika Westerberg  * available for this to succeed (see tb_switch_query_dp_resource()).
36378afe909bSMika Westerberg  * Returns %0 in success and negative errno otherwise.
36388afe909bSMika Westerberg  */
tb_switch_alloc_dp_resource(struct tb_switch * sw,struct tb_port * in)36398afe909bSMika Westerberg int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
36408afe909bSMika Westerberg {
3641ce05b997SMika Westerberg 	int ret;
3642ce05b997SMika Westerberg 
3643b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw))
3644ce05b997SMika Westerberg 		ret = usb4_switch_alloc_dp_resource(sw, in);
3645ce05b997SMika Westerberg 	else
3646ce05b997SMika Westerberg 		ret = tb_lc_dp_sink_alloc(sw, in);
3647ce05b997SMika Westerberg 
3648ce05b997SMika Westerberg 	if (ret)
3649ce05b997SMika Westerberg 		tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3650ce05b997SMika Westerberg 			   in->port);
3651ce05b997SMika Westerberg 	else
3652ce05b997SMika Westerberg 		tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3653ce05b997SMika Westerberg 
3654ce05b997SMika Westerberg 	return ret;
36558afe909bSMika Westerberg }
36568afe909bSMika Westerberg 
36578afe909bSMika Westerberg /**
36588afe909bSMika Westerberg  * tb_switch_dealloc_dp_resource() - De-allocate DP resource
36598afe909bSMika Westerberg  * @sw: Switch whose DP resource is de-allocated
36608afe909bSMika Westerberg  * @in: DP IN port
36618afe909bSMika Westerberg  *
36628afe909bSMika Westerberg  * De-allocates DP resource that was previously allocated for DP
36638afe909bSMika Westerberg  * tunneling.
36648afe909bSMika Westerberg  */
tb_switch_dealloc_dp_resource(struct tb_switch * sw,struct tb_port * in)36658afe909bSMika Westerberg void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
36668afe909bSMika Westerberg {
3667b0407983SMika Westerberg 	int ret;
3668b0407983SMika Westerberg 
3669b0407983SMika Westerberg 	if (tb_switch_is_usb4(sw))
3670b0407983SMika Westerberg 		ret = usb4_switch_dealloc_dp_resource(sw, in);
3671b0407983SMika Westerberg 	else
3672b0407983SMika Westerberg 		ret = tb_lc_dp_sink_dealloc(sw, in);
3673b0407983SMika Westerberg 
3674b0407983SMika Westerberg 	if (ret)
36758afe909bSMika Westerberg 		tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
36768afe909bSMika Westerberg 			   in->port);
3677ce05b997SMika Westerberg 	else
3678ce05b997SMika Westerberg 		tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
36798afe909bSMika Westerberg }
36808afe909bSMika Westerberg 
3681f67cf491SMika Westerberg struct tb_sw_lookup {
3682f67cf491SMika Westerberg 	struct tb *tb;
3683f67cf491SMika Westerberg 	u8 link;
3684f67cf491SMika Westerberg 	u8 depth;
36857c39ffe7SChristoph Hellwig 	const uuid_t *uuid;
36868e9267bbSRadion Mirchevsky 	u64 route;
3687f67cf491SMika Westerberg };
3688f67cf491SMika Westerberg 
tb_switch_match(struct device * dev,const void * data)3689418e3ea1SSuzuki K Poulose static int tb_switch_match(struct device *dev, const void *data)
3690f67cf491SMika Westerberg {
3691f67cf491SMika Westerberg 	struct tb_switch *sw = tb_to_switch(dev);
3692418e3ea1SSuzuki K Poulose 	const struct tb_sw_lookup *lookup = data;
3693f67cf491SMika Westerberg 
3694f67cf491SMika Westerberg 	if (!sw)
3695f67cf491SMika Westerberg 		return 0;
3696f67cf491SMika Westerberg 	if (sw->tb != lookup->tb)
3697f67cf491SMika Westerberg 		return 0;
3698f67cf491SMika Westerberg 
3699f67cf491SMika Westerberg 	if (lookup->uuid)
3700f67cf491SMika Westerberg 		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3701f67cf491SMika Westerberg 
37028e9267bbSRadion Mirchevsky 	if (lookup->route) {
37038e9267bbSRadion Mirchevsky 		return sw->config.route_lo == lower_32_bits(lookup->route) &&
37048e9267bbSRadion Mirchevsky 		       sw->config.route_hi == upper_32_bits(lookup->route);
37058e9267bbSRadion Mirchevsky 	}
37068e9267bbSRadion Mirchevsky 
3707f67cf491SMika Westerberg 	/* Root switch is matched only by depth */
3708f67cf491SMika Westerberg 	if (!lookup->depth)
3709f67cf491SMika Westerberg 		return !sw->depth;
3710f67cf491SMika Westerberg 
3711f67cf491SMika Westerberg 	return sw->link == lookup->link && sw->depth == lookup->depth;
3712f67cf491SMika Westerberg }
3713f67cf491SMika Westerberg 
3714f67cf491SMika Westerberg /**
3715f67cf491SMika Westerberg  * tb_switch_find_by_link_depth() - Find switch by link and depth
3716f67cf491SMika Westerberg  * @tb: Domain the switch belongs
3717f67cf491SMika Westerberg  * @link: Link number the switch is connected
3718f67cf491SMika Westerberg  * @depth: Depth of the switch in link
3719f67cf491SMika Westerberg  *
3720f67cf491SMika Westerberg  * Returned switch has reference count increased so the caller needs to
3721f67cf491SMika Westerberg  * call tb_switch_put() when done with the switch.
3722f67cf491SMika Westerberg  */
tb_switch_find_by_link_depth(struct tb * tb,u8 link,u8 depth)3723f67cf491SMika Westerberg struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3724f67cf491SMika Westerberg {
3725f67cf491SMika Westerberg 	struct tb_sw_lookup lookup;
3726f67cf491SMika Westerberg 	struct device *dev;
3727f67cf491SMika Westerberg 
3728f67cf491SMika Westerberg 	memset(&lookup, 0, sizeof(lookup));
3729f67cf491SMika Westerberg 	lookup.tb = tb;
3730f67cf491SMika Westerberg 	lookup.link = link;
3731f67cf491SMika Westerberg 	lookup.depth = depth;
3732f67cf491SMika Westerberg 
3733f67cf491SMika Westerberg 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3734f67cf491SMika Westerberg 	if (dev)
3735f67cf491SMika Westerberg 		return tb_to_switch(dev);
3736f67cf491SMika Westerberg 
3737f67cf491SMika Westerberg 	return NULL;
3738f67cf491SMika Westerberg }
3739f67cf491SMika Westerberg 
3740f67cf491SMika Westerberg /**
3741432019d6SRadion Mirchevsky  * tb_switch_find_by_uuid() - Find switch by UUID
3742f67cf491SMika Westerberg  * @tb: Domain the switch belongs
3743f67cf491SMika Westerberg  * @uuid: UUID to look for
3744f67cf491SMika Westerberg  *
3745f67cf491SMika Westerberg  * Returned switch has reference count increased so the caller needs to
3746f67cf491SMika Westerberg  * call tb_switch_put() when done with the switch.
3747f67cf491SMika Westerberg  */
tb_switch_find_by_uuid(struct tb * tb,const uuid_t * uuid)37487c39ffe7SChristoph Hellwig struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3749f67cf491SMika Westerberg {
3750f67cf491SMika Westerberg 	struct tb_sw_lookup lookup;
3751f67cf491SMika Westerberg 	struct device *dev;
3752f67cf491SMika Westerberg 
3753f67cf491SMika Westerberg 	memset(&lookup, 0, sizeof(lookup));
3754f67cf491SMika Westerberg 	lookup.tb = tb;
3755f67cf491SMika Westerberg 	lookup.uuid = uuid;
3756f67cf491SMika Westerberg 
3757f67cf491SMika Westerberg 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3758f67cf491SMika Westerberg 	if (dev)
3759f67cf491SMika Westerberg 		return tb_to_switch(dev);
3760f67cf491SMika Westerberg 
3761f67cf491SMika Westerberg 	return NULL;
3762f67cf491SMika Westerberg }
3763e6b245ccSMika Westerberg 
37648e9267bbSRadion Mirchevsky /**
37658e9267bbSRadion Mirchevsky  * tb_switch_find_by_route() - Find switch by route string
37668e9267bbSRadion Mirchevsky  * @tb: Domain the switch belongs
37678e9267bbSRadion Mirchevsky  * @route: Route string to look for
37688e9267bbSRadion Mirchevsky  *
37698e9267bbSRadion Mirchevsky  * Returned switch has reference count increased so the caller needs to
37708e9267bbSRadion Mirchevsky  * call tb_switch_put() when done with the switch.
37718e9267bbSRadion Mirchevsky  */
tb_switch_find_by_route(struct tb * tb,u64 route)37728e9267bbSRadion Mirchevsky struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
37738e9267bbSRadion Mirchevsky {
37748e9267bbSRadion Mirchevsky 	struct tb_sw_lookup lookup;
37758e9267bbSRadion Mirchevsky 	struct device *dev;
37768e9267bbSRadion Mirchevsky 
37778e9267bbSRadion Mirchevsky 	if (!route)
37788e9267bbSRadion Mirchevsky 		return tb_switch_get(tb->root_switch);
37798e9267bbSRadion Mirchevsky 
37808e9267bbSRadion Mirchevsky 	memset(&lookup, 0, sizeof(lookup));
37818e9267bbSRadion Mirchevsky 	lookup.tb = tb;
37828e9267bbSRadion Mirchevsky 	lookup.route = route;
37838e9267bbSRadion Mirchevsky 
37848e9267bbSRadion Mirchevsky 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
37858e9267bbSRadion Mirchevsky 	if (dev)
37868e9267bbSRadion Mirchevsky 		return tb_to_switch(dev);
37878e9267bbSRadion Mirchevsky 
37888e9267bbSRadion Mirchevsky 	return NULL;
37898e9267bbSRadion Mirchevsky }
37908e9267bbSRadion Mirchevsky 
3791386e5e29SMika Westerberg /**
3792386e5e29SMika Westerberg  * tb_switch_find_port() - return the first port of @type on @sw or NULL
3793386e5e29SMika Westerberg  * @sw: Switch to find the port from
3794386e5e29SMika Westerberg  * @type: Port type to look for
3795386e5e29SMika Westerberg  */
tb_switch_find_port(struct tb_switch * sw,enum tb_port_type type)3796386e5e29SMika Westerberg struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3797386e5e29SMika Westerberg 				    enum tb_port_type type)
3798386e5e29SMika Westerberg {
3799386e5e29SMika Westerberg 	struct tb_port *port;
3800386e5e29SMika Westerberg 
3801386e5e29SMika Westerberg 	tb_switch_for_each_port(sw, port) {
3802386e5e29SMika Westerberg 		if (port->config.type == type)
3803386e5e29SMika Westerberg 			return port;
3804386e5e29SMika Westerberg 	}
3805386e5e29SMika Westerberg 
3806386e5e29SMika Westerberg 	return NULL;
3807386e5e29SMika Westerberg }
38088a90e4faSGil Fine 
380943f977bcSGil Fine /*
381043f977bcSGil Fine  * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
381143f977bcSGil Fine  * device. For now used only for Titan Ridge.
381243f977bcSGil Fine  */
tb_switch_pcie_bridge_write(struct tb_switch * sw,unsigned int bridge,unsigned int pcie_offset,u32 value)381343f977bcSGil Fine static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
381443f977bcSGil Fine 				       unsigned int pcie_offset, u32 value)
381543f977bcSGil Fine {
381643f977bcSGil Fine 	u32 offset, command, val;
381743f977bcSGil Fine 	int ret;
381843f977bcSGil Fine 
381943f977bcSGil Fine 	if (sw->generation != 3)
382043f977bcSGil Fine 		return -EOPNOTSUPP;
382143f977bcSGil Fine 
382243f977bcSGil Fine 	offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
382343f977bcSGil Fine 	ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
382443f977bcSGil Fine 	if (ret)
382543f977bcSGil Fine 		return ret;
382643f977bcSGil Fine 
382743f977bcSGil Fine 	command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
382843f977bcSGil Fine 	command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
382943f977bcSGil Fine 	command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
383043f977bcSGil Fine 	command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
383143f977bcSGil Fine 			<< TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
383243f977bcSGil Fine 	command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
383343f977bcSGil Fine 
383443f977bcSGil Fine 	offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
383543f977bcSGil Fine 
383643f977bcSGil Fine 	ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
383743f977bcSGil Fine 	if (ret)
383843f977bcSGil Fine 		return ret;
383943f977bcSGil Fine 
384043f977bcSGil Fine 	ret = tb_switch_wait_for_bit(sw, offset,
384143f977bcSGil Fine 				     TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
384243f977bcSGil Fine 	if (ret)
384343f977bcSGil Fine 		return ret;
384443f977bcSGil Fine 
384543f977bcSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
384643f977bcSGil Fine 	if (ret)
384743f977bcSGil Fine 		return ret;
384843f977bcSGil Fine 
384943f977bcSGil Fine 	if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
385043f977bcSGil Fine 		return -ETIMEDOUT;
385143f977bcSGil Fine 
385243f977bcSGil Fine 	return 0;
385343f977bcSGil Fine }
385443f977bcSGil Fine 
385543f977bcSGil Fine /**
385643f977bcSGil Fine  * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
385743f977bcSGil Fine  * @sw: Router to enable PCIe L1
385843f977bcSGil Fine  *
385943f977bcSGil Fine  * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
386043f977bcSGil Fine  * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
386143f977bcSGil Fine  * was configured. Due to Intel platforms limitation, shall be called only
386243f977bcSGil Fine  * for first hop switch.
386343f977bcSGil Fine  */
tb_switch_pcie_l1_enable(struct tb_switch * sw)386443f977bcSGil Fine int tb_switch_pcie_l1_enable(struct tb_switch *sw)
386543f977bcSGil Fine {
386643f977bcSGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
386743f977bcSGil Fine 	int ret;
386843f977bcSGil Fine 
386943f977bcSGil Fine 	if (!tb_route(sw))
387043f977bcSGil Fine 		return 0;
387143f977bcSGil Fine 
387243f977bcSGil Fine 	if (!tb_switch_is_titan_ridge(sw))
387343f977bcSGil Fine 		return 0;
387443f977bcSGil Fine 
387543f977bcSGil Fine 	/* Enable PCIe L1 enable only for first hop router (depth = 1) */
387643f977bcSGil Fine 	if (tb_route(parent))
387743f977bcSGil Fine 		return 0;
387843f977bcSGil Fine 
387943f977bcSGil Fine 	/* Write to downstream PCIe bridge #5 aka Dn4 */
388043f977bcSGil Fine 	ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
388143f977bcSGil Fine 	if (ret)
388243f977bcSGil Fine 		return ret;
388343f977bcSGil Fine 
388443f977bcSGil Fine 	/* Write to Upstream PCIe bridge #0 aka Up0 */
388543f977bcSGil Fine 	return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
388643f977bcSGil Fine }
388730a4eca6SMika Westerberg 
388830a4eca6SMika Westerberg /**
388930a4eca6SMika Westerberg  * tb_switch_xhci_connect() - Connect internal xHCI
389030a4eca6SMika Westerberg  * @sw: Router whose xHCI to connect
389130a4eca6SMika Westerberg  *
389230a4eca6SMika Westerberg  * Can be called to any router. For Alpine Ridge and Titan Ridge
389330a4eca6SMika Westerberg  * performs special flows that bring the xHCI functional for any device
389430a4eca6SMika Westerberg  * connected to the type-C port. Call only after PCIe tunnel has been
389530a4eca6SMika Westerberg  * established. The function only does the connect if not done already
389630a4eca6SMika Westerberg  * so can be called several times for the same router.
389730a4eca6SMika Westerberg  */
tb_switch_xhci_connect(struct tb_switch * sw)389830a4eca6SMika Westerberg int tb_switch_xhci_connect(struct tb_switch *sw)
389930a4eca6SMika Westerberg {
390030a4eca6SMika Westerberg 	struct tb_port *port1, *port3;
390130a4eca6SMika Westerberg 	int ret;
390230a4eca6SMika Westerberg 
390393a3c0d4SMika Westerberg 	if (sw->generation != 3)
390493a3c0d4SMika Westerberg 		return 0;
390593a3c0d4SMika Westerberg 
390630a4eca6SMika Westerberg 	port1 = &sw->ports[1];
390730a4eca6SMika Westerberg 	port3 = &sw->ports[3];
390830a4eca6SMika Westerberg 
390930a4eca6SMika Westerberg 	if (tb_switch_is_alpine_ridge(sw)) {
391093a3c0d4SMika Westerberg 		bool usb_port1, usb_port3, xhci_port1, xhci_port3;
391193a3c0d4SMika Westerberg 
391230a4eca6SMika Westerberg 		usb_port1 = tb_lc_is_usb_plugged(port1);
391330a4eca6SMika Westerberg 		usb_port3 = tb_lc_is_usb_plugged(port3);
391430a4eca6SMika Westerberg 		xhci_port1 = tb_lc_is_xhci_connected(port1);
391530a4eca6SMika Westerberg 		xhci_port3 = tb_lc_is_xhci_connected(port3);
391630a4eca6SMika Westerberg 
391730a4eca6SMika Westerberg 		/* Figure out correct USB port to connect */
391830a4eca6SMika Westerberg 		if (usb_port1 && !xhci_port1) {
391930a4eca6SMika Westerberg 			ret = tb_lc_xhci_connect(port1);
392030a4eca6SMika Westerberg 			if (ret)
392130a4eca6SMika Westerberg 				return ret;
392230a4eca6SMika Westerberg 		}
392330a4eca6SMika Westerberg 		if (usb_port3 && !xhci_port3)
392430a4eca6SMika Westerberg 			return tb_lc_xhci_connect(port3);
392530a4eca6SMika Westerberg 	} else if (tb_switch_is_titan_ridge(sw)) {
392630a4eca6SMika Westerberg 		ret = tb_lc_xhci_connect(port1);
392730a4eca6SMika Westerberg 		if (ret)
392830a4eca6SMika Westerberg 			return ret;
392930a4eca6SMika Westerberg 		return tb_lc_xhci_connect(port3);
393030a4eca6SMika Westerberg 	}
393130a4eca6SMika Westerberg 
393230a4eca6SMika Westerberg 	return 0;
393330a4eca6SMika Westerberg }
393430a4eca6SMika Westerberg 
393530a4eca6SMika Westerberg /**
393630a4eca6SMika Westerberg  * tb_switch_xhci_disconnect() - Disconnect internal xHCI
393730a4eca6SMika Westerberg  * @sw: Router whose xHCI to disconnect
393830a4eca6SMika Westerberg  *
393930a4eca6SMika Westerberg  * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
394030a4eca6SMika Westerberg  * ports.
394130a4eca6SMika Westerberg  */
tb_switch_xhci_disconnect(struct tb_switch * sw)394230a4eca6SMika Westerberg void tb_switch_xhci_disconnect(struct tb_switch *sw)
394330a4eca6SMika Westerberg {
394430a4eca6SMika Westerberg 	if (sw->generation == 3) {
394530a4eca6SMika Westerberg 		struct tb_port *port1 = &sw->ports[1];
394630a4eca6SMika Westerberg 		struct tb_port *port3 = &sw->ports[3];
394730a4eca6SMika Westerberg 
394830a4eca6SMika Westerberg 		tb_lc_xhci_disconnect(port1);
394930a4eca6SMika Westerberg 		tb_port_dbg(port1, "disconnected xHCI\n");
395030a4eca6SMika Westerberg 		tb_lc_xhci_disconnect(port3);
395130a4eca6SMika Westerberg 		tb_port_dbg(port3, "disconnected xHCI\n");
395230a4eca6SMika Westerberg 	}
395330a4eca6SMika Westerberg }
3954