xref: /openbmc/linux/drivers/thunderbolt/switch.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - switch/port utility functions
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2018, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 
17 #include "tb.h"
18 
19 /* Switch NVM support */
20 
21 #define NVM_CSS			0x10
22 
23 struct nvm_auth_status {
24 	struct list_head list;
25 	uuid_t uuid;
26 	u32 status;
27 };
28 
29 enum nvm_write_ops {
30 	WRITE_AND_AUTHENTICATE = 1,
31 	WRITE_ONLY = 2,
32 };
33 
34 /*
35  * Hold NVM authentication failure status per switch This information
36  * needs to stay around even when the switch gets power cycled so we
37  * keep it separately.
38  */
39 static LIST_HEAD(nvm_auth_status_cache);
40 static DEFINE_MUTEX(nvm_auth_status_lock);
41 
42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43 {
44 	struct nvm_auth_status *st;
45 
46 	list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 		if (uuid_equal(&st->uuid, sw->uuid))
48 			return st;
49 	}
50 
51 	return NULL;
52 }
53 
54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55 {
56 	struct nvm_auth_status *st;
57 
58 	mutex_lock(&nvm_auth_status_lock);
59 	st = __nvm_get_auth_status(sw);
60 	mutex_unlock(&nvm_auth_status_lock);
61 
62 	*status = st ? st->status : 0;
63 }
64 
65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66 {
67 	struct nvm_auth_status *st;
68 
69 	if (WARN_ON(!sw->uuid))
70 		return;
71 
72 	mutex_lock(&nvm_auth_status_lock);
73 	st = __nvm_get_auth_status(sw);
74 
75 	if (!st) {
76 		st = kzalloc(sizeof(*st), GFP_KERNEL);
77 		if (!st)
78 			goto unlock;
79 
80 		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 		INIT_LIST_HEAD(&st->list);
82 		list_add_tail(&st->list, &nvm_auth_status_cache);
83 	}
84 
85 	st->status = status;
86 unlock:
87 	mutex_unlock(&nvm_auth_status_lock);
88 }
89 
90 static void nvm_clear_auth_status(const struct tb_switch *sw)
91 {
92 	struct nvm_auth_status *st;
93 
94 	mutex_lock(&nvm_auth_status_lock);
95 	st = __nvm_get_auth_status(sw);
96 	if (st) {
97 		list_del(&st->list);
98 		kfree(st);
99 	}
100 	mutex_unlock(&nvm_auth_status_lock);
101 }
102 
103 static int nvm_validate_and_write(struct tb_switch *sw)
104 {
105 	unsigned int image_size, hdr_size;
106 	const u8 *buf = sw->nvm->buf;
107 	u16 ds_size;
108 	int ret;
109 
110 	if (!buf)
111 		return -EINVAL;
112 
113 	image_size = sw->nvm->buf_data_size;
114 	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115 		return -EINVAL;
116 
117 	/*
118 	 * FARB pointer must point inside the image and must at least
119 	 * contain parts of the digital section we will be reading here.
120 	 */
121 	hdr_size = (*(u32 *)buf) & 0xffffff;
122 	if (hdr_size + NVM_DEVID + 2 >= image_size)
123 		return -EINVAL;
124 
125 	/* Digital section start should be aligned to 4k page */
126 	if (!IS_ALIGNED(hdr_size, SZ_4K))
127 		return -EINVAL;
128 
129 	/*
130 	 * Read digital section size and check that it also fits inside
131 	 * the image.
132 	 */
133 	ds_size = *(u16 *)(buf + hdr_size);
134 	if (ds_size >= image_size)
135 		return -EINVAL;
136 
137 	if (!sw->safe_mode) {
138 		u16 device_id;
139 
140 		/*
141 		 * Make sure the device ID in the image matches the one
142 		 * we read from the switch config space.
143 		 */
144 		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 		if (device_id != sw->config.device_id)
146 			return -EINVAL;
147 
148 		if (sw->generation < 3) {
149 			/* Write CSS headers first */
150 			ret = dma_port_flash_write(sw->dma_port,
151 				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 				DMA_PORT_CSS_MAX_SIZE);
153 			if (ret)
154 				return ret;
155 		}
156 
157 		/* Skip headers in the image */
158 		buf += hdr_size;
159 		image_size -= hdr_size;
160 	}
161 
162 	if (tb_switch_is_usb4(sw))
163 		ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164 	else
165 		ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 	if (!ret)
167 		sw->nvm->flushed = true;
168 	return ret;
169 }
170 
171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
172 {
173 	int ret = 0;
174 
175 	/*
176 	 * Root switch NVM upgrade requires that we disconnect the
177 	 * existing paths first (in case it is not in safe mode
178 	 * already).
179 	 */
180 	if (!sw->safe_mode) {
181 		u32 status;
182 
183 		ret = tb_domain_disconnect_all_paths(sw->tb);
184 		if (ret)
185 			return ret;
186 		/*
187 		 * The host controller goes away pretty soon after this if
188 		 * everything goes well so getting timeout is expected.
189 		 */
190 		ret = dma_port_flash_update_auth(sw->dma_port);
191 		if (!ret || ret == -ETIMEDOUT)
192 			return 0;
193 
194 		/*
195 		 * Any error from update auth operation requires power
196 		 * cycling of the host router.
197 		 */
198 		tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 		if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 			nvm_set_auth_status(sw, status);
201 	}
202 
203 	/*
204 	 * From safe mode we can get out by just power cycling the
205 	 * switch.
206 	 */
207 	dma_port_power_cycle(sw->dma_port);
208 	return ret;
209 }
210 
211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
212 {
213 	int ret, retries = 10;
214 
215 	ret = dma_port_flash_update_auth(sw->dma_port);
216 	switch (ret) {
217 	case 0:
218 	case -ETIMEDOUT:
219 	case -EACCES:
220 	case -EINVAL:
221 		/* Power cycle is required */
222 		break;
223 	default:
224 		return ret;
225 	}
226 
227 	/*
228 	 * Poll here for the authentication status. It takes some time
229 	 * for the device to respond (we get timeout for a while). Once
230 	 * we get response the device needs to be power cycled in order
231 	 * to the new NVM to be taken into use.
232 	 */
233 	do {
234 		u32 status;
235 
236 		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 		if (ret < 0 && ret != -ETIMEDOUT)
238 			return ret;
239 		if (ret > 0) {
240 			if (status) {
241 				tb_sw_warn(sw, "failed to authenticate NVM\n");
242 				nvm_set_auth_status(sw, status);
243 			}
244 
245 			tb_sw_info(sw, "power cycling the switch now\n");
246 			dma_port_power_cycle(sw->dma_port);
247 			return 0;
248 		}
249 
250 		msleep(500);
251 	} while (--retries);
252 
253 	return -ETIMEDOUT;
254 }
255 
256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257 {
258 	struct pci_dev *root_port;
259 
260 	/*
261 	 * During host router NVM upgrade we should not allow root port to
262 	 * go into D3cold because some root ports cannot trigger PME
263 	 * itself. To be on the safe side keep the root port in D0 during
264 	 * the whole upgrade process.
265 	 */
266 	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
267 	if (root_port)
268 		pm_runtime_get_noresume(&root_port->dev);
269 }
270 
271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272 {
273 	struct pci_dev *root_port;
274 
275 	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
276 	if (root_port)
277 		pm_runtime_put(&root_port->dev);
278 }
279 
280 static inline bool nvm_readable(struct tb_switch *sw)
281 {
282 	if (tb_switch_is_usb4(sw)) {
283 		/*
284 		 * USB4 devices must support NVM operations but it is
285 		 * optional for hosts. Therefore we query the NVM sector
286 		 * size here and if it is supported assume NVM
287 		 * operations are implemented.
288 		 */
289 		return usb4_switch_nvm_sector_size(sw) > 0;
290 	}
291 
292 	/* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 	return !!sw->dma_port;
294 }
295 
296 static inline bool nvm_upgradeable(struct tb_switch *sw)
297 {
298 	if (sw->no_nvm_upgrade)
299 		return false;
300 	return nvm_readable(sw);
301 }
302 
303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 			   void *buf, size_t size)
305 {
306 	if (tb_switch_is_usb4(sw))
307 		return usb4_switch_nvm_read(sw, address, buf, size);
308 	return dma_port_flash_read(sw->dma_port, address, buf, size);
309 }
310 
311 static int nvm_authenticate(struct tb_switch *sw)
312 {
313 	int ret;
314 
315 	if (tb_switch_is_usb4(sw))
316 		return usb4_switch_nvm_authenticate(sw);
317 
318 	if (!tb_route(sw)) {
319 		nvm_authenticate_start_dma_port(sw);
320 		ret = nvm_authenticate_host_dma_port(sw);
321 	} else {
322 		ret = nvm_authenticate_device_dma_port(sw);
323 	}
324 
325 	return ret;
326 }
327 
328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
329 			      size_t bytes)
330 {
331 	struct tb_nvm *nvm = priv;
332 	struct tb_switch *sw = tb_to_switch(nvm->dev);
333 	int ret;
334 
335 	pm_runtime_get_sync(&sw->dev);
336 
337 	if (!mutex_trylock(&sw->tb->lock)) {
338 		ret = restart_syscall();
339 		goto out;
340 	}
341 
342 	ret = nvm_read(sw, offset, val, bytes);
343 	mutex_unlock(&sw->tb->lock);
344 
345 out:
346 	pm_runtime_mark_last_busy(&sw->dev);
347 	pm_runtime_put_autosuspend(&sw->dev);
348 
349 	return ret;
350 }
351 
352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
353 			       size_t bytes)
354 {
355 	struct tb_nvm *nvm = priv;
356 	struct tb_switch *sw = tb_to_switch(nvm->dev);
357 	int ret;
358 
359 	if (!mutex_trylock(&sw->tb->lock))
360 		return restart_syscall();
361 
362 	/*
363 	 * Since writing the NVM image might require some special steps,
364 	 * for example when CSS headers are written, we cache the image
365 	 * locally here and handle the special cases when the user asks
366 	 * us to authenticate the image.
367 	 */
368 	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
369 	mutex_unlock(&sw->tb->lock);
370 
371 	return ret;
372 }
373 
374 static int tb_switch_nvm_add(struct tb_switch *sw)
375 {
376 	struct tb_nvm *nvm;
377 	u32 val;
378 	int ret;
379 
380 	if (!nvm_readable(sw))
381 		return 0;
382 
383 	/*
384 	 * The NVM format of non-Intel hardware is not known so
385 	 * currently restrict NVM upgrade for Intel hardware. We may
386 	 * relax this in the future when we learn other NVM formats.
387 	 */
388 	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
389 	    sw->config.vendor_id != 0x8087) {
390 		dev_info(&sw->dev,
391 			 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
392 			 sw->config.vendor_id);
393 		return 0;
394 	}
395 
396 	nvm = tb_nvm_alloc(&sw->dev);
397 	if (IS_ERR(nvm))
398 		return PTR_ERR(nvm);
399 
400 	/*
401 	 * If the switch is in safe-mode the only accessible portion of
402 	 * the NVM is the non-active one where userspace is expected to
403 	 * write new functional NVM.
404 	 */
405 	if (!sw->safe_mode) {
406 		u32 nvm_size, hdr_size;
407 
408 		ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
409 		if (ret)
410 			goto err_nvm;
411 
412 		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
413 		nvm_size = (SZ_1M << (val & 7)) / 8;
414 		nvm_size = (nvm_size - hdr_size) / 2;
415 
416 		ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
417 		if (ret)
418 			goto err_nvm;
419 
420 		nvm->major = val >> 16;
421 		nvm->minor = val >> 8;
422 
423 		ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
424 		if (ret)
425 			goto err_nvm;
426 	}
427 
428 	if (!sw->no_nvm_upgrade) {
429 		ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
430 					    tb_switch_nvm_write);
431 		if (ret)
432 			goto err_nvm;
433 	}
434 
435 	sw->nvm = nvm;
436 	return 0;
437 
438 err_nvm:
439 	tb_nvm_free(nvm);
440 	return ret;
441 }
442 
443 static void tb_switch_nvm_remove(struct tb_switch *sw)
444 {
445 	struct tb_nvm *nvm;
446 
447 	nvm = sw->nvm;
448 	sw->nvm = NULL;
449 
450 	if (!nvm)
451 		return;
452 
453 	/* Remove authentication status in case the switch is unplugged */
454 	if (!nvm->authenticating)
455 		nvm_clear_auth_status(sw);
456 
457 	tb_nvm_free(nvm);
458 }
459 
460 /* port utility functions */
461 
462 static const char *tb_port_type(struct tb_regs_port_header *port)
463 {
464 	switch (port->type >> 16) {
465 	case 0:
466 		switch ((u8) port->type) {
467 		case 0:
468 			return "Inactive";
469 		case 1:
470 			return "Port";
471 		case 2:
472 			return "NHI";
473 		default:
474 			return "unknown";
475 		}
476 	case 0x2:
477 		return "Ethernet";
478 	case 0x8:
479 		return "SATA";
480 	case 0xe:
481 		return "DP/HDMI";
482 	case 0x10:
483 		return "PCIe";
484 	case 0x20:
485 		return "USB";
486 	default:
487 		return "unknown";
488 	}
489 }
490 
491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
492 {
493 	tb_dbg(tb,
494 	       " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
495 	       port->port_number, port->vendor_id, port->device_id,
496 	       port->revision, port->thunderbolt_version, tb_port_type(port),
497 	       port->type);
498 	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
499 	       port->max_in_hop_id, port->max_out_hop_id);
500 	tb_dbg(tb, "  Max counters: %d\n", port->max_counters);
501 	tb_dbg(tb, "  NFC Credits: %#x\n", port->nfc_credits);
502 }
503 
504 /**
505  * tb_port_state() - get connectedness state of a port
506  * @port: the port to check
507  *
508  * The port must have a TB_CAP_PHY (i.e. it should be a real port).
509  *
510  * Return: Returns an enum tb_port_state on success or an error code on failure.
511  */
512 int tb_port_state(struct tb_port *port)
513 {
514 	struct tb_cap_phy phy;
515 	int res;
516 	if (port->cap_phy == 0) {
517 		tb_port_WARN(port, "does not have a PHY\n");
518 		return -EINVAL;
519 	}
520 	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
521 	if (res)
522 		return res;
523 	return phy.state;
524 }
525 
526 /**
527  * tb_wait_for_port() - wait for a port to become ready
528  * @port: Port to wait
529  * @wait_if_unplugged: Wait also when port is unplugged
530  *
531  * Wait up to 1 second for a port to reach state TB_PORT_UP. If
532  * wait_if_unplugged is set then we also wait if the port is in state
533  * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
534  * switch resume). Otherwise we only wait if a device is registered but the link
535  * has not yet been established.
536  *
537  * Return: Returns an error code on failure. Returns 0 if the port is not
538  * connected or failed to reach state TB_PORT_UP within one second. Returns 1
539  * if the port is connected and in state TB_PORT_UP.
540  */
541 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
542 {
543 	int retries = 10;
544 	int state;
545 	if (!port->cap_phy) {
546 		tb_port_WARN(port, "does not have PHY\n");
547 		return -EINVAL;
548 	}
549 	if (tb_is_upstream_port(port)) {
550 		tb_port_WARN(port, "is the upstream port\n");
551 		return -EINVAL;
552 	}
553 
554 	while (retries--) {
555 		state = tb_port_state(port);
556 		if (state < 0)
557 			return state;
558 		if (state == TB_PORT_DISABLED) {
559 			tb_port_dbg(port, "is disabled (state: 0)\n");
560 			return 0;
561 		}
562 		if (state == TB_PORT_UNPLUGGED) {
563 			if (wait_if_unplugged) {
564 				/* used during resume */
565 				tb_port_dbg(port,
566 					    "is unplugged (state: 7), retrying...\n");
567 				msleep(100);
568 				continue;
569 			}
570 			tb_port_dbg(port, "is unplugged (state: 7)\n");
571 			return 0;
572 		}
573 		if (state == TB_PORT_UP) {
574 			tb_port_dbg(port, "is connected, link is up (state: 2)\n");
575 			return 1;
576 		}
577 
578 		/*
579 		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
580 		 * time.
581 		 */
582 		tb_port_dbg(port,
583 			    "is connected, link is not up (state: %d), retrying...\n",
584 			    state);
585 		msleep(100);
586 	}
587 	tb_port_warn(port,
588 		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
589 	return 0;
590 }
591 
592 /**
593  * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
594  * @port: Port to add/remove NFC credits
595  * @credits: Credits to add/remove
596  *
597  * Change the number of NFC credits allocated to @port by @credits. To remove
598  * NFC credits pass a negative amount of credits.
599  *
600  * Return: Returns 0 on success or an error code on failure.
601  */
602 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
603 {
604 	u32 nfc_credits;
605 
606 	if (credits == 0 || port->sw->is_unplugged)
607 		return 0;
608 
609 	/*
610 	 * USB4 restricts programming NFC buffers to lane adapters only
611 	 * so skip other ports.
612 	 */
613 	if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
614 		return 0;
615 
616 	nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
617 	nfc_credits += credits;
618 
619 	tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
620 		    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
621 
622 	port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
623 	port->config.nfc_credits |= nfc_credits;
624 
625 	return tb_port_write(port, &port->config.nfc_credits,
626 			     TB_CFG_PORT, ADP_CS_4, 1);
627 }
628 
629 /**
630  * tb_port_set_initial_credits() - Set initial port link credits allocated
631  * @port: Port to set the initial credits
632  * @credits: Number of credits to to allocate
633  *
634  * Set initial credits value to be used for ingress shared buffering.
635  */
636 int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
637 {
638 	u32 data;
639 	int ret;
640 
641 	ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
642 	if (ret)
643 		return ret;
644 
645 	data &= ~ADP_CS_5_LCA_MASK;
646 	data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
647 
648 	return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
649 }
650 
651 /**
652  * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
653  * @port: Port whose counters to clear
654  * @counter: Counter index to clear
655  *
656  * Return: Returns 0 on success or an error code on failure.
657  */
658 int tb_port_clear_counter(struct tb_port *port, int counter)
659 {
660 	u32 zero[3] = { 0, 0, 0 };
661 	tb_port_dbg(port, "clearing counter %d\n", counter);
662 	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
663 }
664 
665 /**
666  * tb_port_unlock() - Unlock downstream port
667  * @port: Port to unlock
668  *
669  * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
670  * downstream router accessible for CM.
671  */
672 int tb_port_unlock(struct tb_port *port)
673 {
674 	if (tb_switch_is_icm(port->sw))
675 		return 0;
676 	if (!tb_port_is_null(port))
677 		return -EINVAL;
678 	if (tb_switch_is_usb4(port->sw))
679 		return usb4_port_unlock(port);
680 	return 0;
681 }
682 
683 static int __tb_port_enable(struct tb_port *port, bool enable)
684 {
685 	int ret;
686 	u32 phy;
687 
688 	if (!tb_port_is_null(port))
689 		return -EINVAL;
690 
691 	ret = tb_port_read(port, &phy, TB_CFG_PORT,
692 			   port->cap_phy + LANE_ADP_CS_1, 1);
693 	if (ret)
694 		return ret;
695 
696 	if (enable)
697 		phy &= ~LANE_ADP_CS_1_LD;
698 	else
699 		phy |= LANE_ADP_CS_1_LD;
700 
701 	return tb_port_write(port, &phy, TB_CFG_PORT,
702 			     port->cap_phy + LANE_ADP_CS_1, 1);
703 }
704 
705 /**
706  * tb_port_enable() - Enable lane adapter
707  * @port: Port to enable (can be %NULL)
708  *
709  * This is used for lane 0 and 1 adapters to enable it.
710  */
711 int tb_port_enable(struct tb_port *port)
712 {
713 	return __tb_port_enable(port, true);
714 }
715 
716 /**
717  * tb_port_disable() - Disable lane adapter
718  * @port: Port to disable (can be %NULL)
719  *
720  * This is used for lane 0 and 1 adapters to disable it.
721  */
722 int tb_port_disable(struct tb_port *port)
723 {
724 	return __tb_port_enable(port, false);
725 }
726 
727 /*
728  * tb_init_port() - initialize a port
729  *
730  * This is a helper method for tb_switch_alloc. Does not check or initialize
731  * any downstream switches.
732  *
733  * Return: Returns 0 on success or an error code on failure.
734  */
735 static int tb_init_port(struct tb_port *port)
736 {
737 	int res;
738 	int cap;
739 
740 	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
741 	if (res) {
742 		if (res == -ENODEV) {
743 			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
744 			       port->port);
745 			port->disabled = true;
746 			return 0;
747 		}
748 		return res;
749 	}
750 
751 	/* Port 0 is the switch itself and has no PHY. */
752 	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
753 		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
754 
755 		if (cap > 0)
756 			port->cap_phy = cap;
757 		else
758 			tb_port_WARN(port, "non switch port without a PHY\n");
759 
760 		cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
761 		if (cap > 0)
762 			port->cap_usb4 = cap;
763 	} else if (port->port != 0) {
764 		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
765 		if (cap > 0)
766 			port->cap_adap = cap;
767 	}
768 
769 	tb_dump_port(port->sw->tb, &port->config);
770 
771 	/* Control port does not need HopID allocation */
772 	if (port->port) {
773 		ida_init(&port->in_hopids);
774 		ida_init(&port->out_hopids);
775 	}
776 
777 	INIT_LIST_HEAD(&port->list);
778 	return 0;
779 
780 }
781 
782 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
783 			       int max_hopid)
784 {
785 	int port_max_hopid;
786 	struct ida *ida;
787 
788 	if (in) {
789 		port_max_hopid = port->config.max_in_hop_id;
790 		ida = &port->in_hopids;
791 	} else {
792 		port_max_hopid = port->config.max_out_hop_id;
793 		ida = &port->out_hopids;
794 	}
795 
796 	/*
797 	 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
798 	 * reserved.
799 	 */
800 	if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
801 		min_hopid = TB_PATH_MIN_HOPID;
802 
803 	if (max_hopid < 0 || max_hopid > port_max_hopid)
804 		max_hopid = port_max_hopid;
805 
806 	return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
807 }
808 
809 /**
810  * tb_port_alloc_in_hopid() - Allocate input HopID from port
811  * @port: Port to allocate HopID for
812  * @min_hopid: Minimum acceptable input HopID
813  * @max_hopid: Maximum acceptable input HopID
814  *
815  * Return: HopID between @min_hopid and @max_hopid or negative errno in
816  * case of error.
817  */
818 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
819 {
820 	return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
821 }
822 
823 /**
824  * tb_port_alloc_out_hopid() - Allocate output HopID from port
825  * @port: Port to allocate HopID for
826  * @min_hopid: Minimum acceptable output HopID
827  * @max_hopid: Maximum acceptable output HopID
828  *
829  * Return: HopID between @min_hopid and @max_hopid or negative errno in
830  * case of error.
831  */
832 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
833 {
834 	return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
835 }
836 
837 /**
838  * tb_port_release_in_hopid() - Release allocated input HopID from port
839  * @port: Port whose HopID to release
840  * @hopid: HopID to release
841  */
842 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
843 {
844 	ida_simple_remove(&port->in_hopids, hopid);
845 }
846 
847 /**
848  * tb_port_release_out_hopid() - Release allocated output HopID from port
849  * @port: Port whose HopID to release
850  * @hopid: HopID to release
851  */
852 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
853 {
854 	ida_simple_remove(&port->out_hopids, hopid);
855 }
856 
857 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
858 					  const struct tb_switch *sw)
859 {
860 	u64 mask = (1ULL << parent->config.depth * 8) - 1;
861 	return (tb_route(parent) & mask) == (tb_route(sw) & mask);
862 }
863 
864 /**
865  * tb_next_port_on_path() - Return next port for given port on a path
866  * @start: Start port of the walk
867  * @end: End port of the walk
868  * @prev: Previous port (%NULL if this is the first)
869  *
870  * This function can be used to walk from one port to another if they
871  * are connected through zero or more switches. If the @prev is dual
872  * link port, the function follows that link and returns another end on
873  * that same link.
874  *
875  * If the @end port has been reached, return %NULL.
876  *
877  * Domain tb->lock must be held when this function is called.
878  */
879 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
880 				     struct tb_port *prev)
881 {
882 	struct tb_port *next;
883 
884 	if (!prev)
885 		return start;
886 
887 	if (prev->sw == end->sw) {
888 		if (prev == end)
889 			return NULL;
890 		return end;
891 	}
892 
893 	if (tb_switch_is_reachable(prev->sw, end->sw)) {
894 		next = tb_port_at(tb_route(end->sw), prev->sw);
895 		/* Walk down the topology if next == prev */
896 		if (prev->remote &&
897 		    (next == prev || next->dual_link_port == prev))
898 			next = prev->remote;
899 	} else {
900 		if (tb_is_upstream_port(prev)) {
901 			next = prev->remote;
902 		} else {
903 			next = tb_upstream_port(prev->sw);
904 			/*
905 			 * Keep the same link if prev and next are both
906 			 * dual link ports.
907 			 */
908 			if (next->dual_link_port &&
909 			    next->link_nr != prev->link_nr) {
910 				next = next->dual_link_port;
911 			}
912 		}
913 	}
914 
915 	return next != prev ? next : NULL;
916 }
917 
918 /**
919  * tb_port_get_link_speed() - Get current link speed
920  * @port: Port to check (USB4 or CIO)
921  *
922  * Returns link speed in Gb/s or negative errno in case of failure.
923  */
924 int tb_port_get_link_speed(struct tb_port *port)
925 {
926 	u32 val, speed;
927 	int ret;
928 
929 	if (!port->cap_phy)
930 		return -EINVAL;
931 
932 	ret = tb_port_read(port, &val, TB_CFG_PORT,
933 			   port->cap_phy + LANE_ADP_CS_1, 1);
934 	if (ret)
935 		return ret;
936 
937 	speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
938 		LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
939 	return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
940 }
941 
942 /**
943  * tb_port_get_link_width() - Get current link width
944  * @port: Port to check (USB4 or CIO)
945  *
946  * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
947  * or negative errno in case of failure.
948  */
949 int tb_port_get_link_width(struct tb_port *port)
950 {
951 	u32 val;
952 	int ret;
953 
954 	if (!port->cap_phy)
955 		return -EINVAL;
956 
957 	ret = tb_port_read(port, &val, TB_CFG_PORT,
958 			   port->cap_phy + LANE_ADP_CS_1, 1);
959 	if (ret)
960 		return ret;
961 
962 	return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
963 		LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
964 }
965 
966 static bool tb_port_is_width_supported(struct tb_port *port, int width)
967 {
968 	u32 phy, widths;
969 	int ret;
970 
971 	if (!port->cap_phy)
972 		return false;
973 
974 	ret = tb_port_read(port, &phy, TB_CFG_PORT,
975 			   port->cap_phy + LANE_ADP_CS_0, 1);
976 	if (ret)
977 		return false;
978 
979 	widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
980 		LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
981 
982 	return !!(widths & width);
983 }
984 
985 static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
986 {
987 	u32 val;
988 	int ret;
989 
990 	if (!port->cap_phy)
991 		return -EINVAL;
992 
993 	ret = tb_port_read(port, &val, TB_CFG_PORT,
994 			   port->cap_phy + LANE_ADP_CS_1, 1);
995 	if (ret)
996 		return ret;
997 
998 	val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
999 	switch (width) {
1000 	case 1:
1001 		val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1002 			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1003 		break;
1004 	case 2:
1005 		val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1006 			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1007 		break;
1008 	default:
1009 		return -EINVAL;
1010 	}
1011 
1012 	val |= LANE_ADP_CS_1_LB;
1013 
1014 	return tb_port_write(port, &val, TB_CFG_PORT,
1015 			     port->cap_phy + LANE_ADP_CS_1, 1);
1016 }
1017 
1018 /**
1019  * tb_port_lane_bonding_enable() - Enable bonding on port
1020  * @port: port to enable
1021  *
1022  * Enable bonding by setting the link width of the port and the
1023  * other port in case of dual link port.
1024  *
1025  * Return: %0 in case of success and negative errno in case of error
1026  */
1027 int tb_port_lane_bonding_enable(struct tb_port *port)
1028 {
1029 	int ret;
1030 
1031 	/*
1032 	 * Enable lane bonding for both links if not already enabled by
1033 	 * for example the boot firmware.
1034 	 */
1035 	ret = tb_port_get_link_width(port);
1036 	if (ret == 1) {
1037 		ret = tb_port_set_link_width(port, 2);
1038 		if (ret)
1039 			return ret;
1040 	}
1041 
1042 	ret = tb_port_get_link_width(port->dual_link_port);
1043 	if (ret == 1) {
1044 		ret = tb_port_set_link_width(port->dual_link_port, 2);
1045 		if (ret) {
1046 			tb_port_set_link_width(port, 1);
1047 			return ret;
1048 		}
1049 	}
1050 
1051 	port->bonded = true;
1052 	port->dual_link_port->bonded = true;
1053 
1054 	return 0;
1055 }
1056 
1057 /**
1058  * tb_port_lane_bonding_disable() - Disable bonding on port
1059  * @port: port to disable
1060  *
1061  * Disable bonding by setting the link width of the port and the
1062  * other port in case of dual link port.
1063  *
1064  */
1065 void tb_port_lane_bonding_disable(struct tb_port *port)
1066 {
1067 	port->dual_link_port->bonded = false;
1068 	port->bonded = false;
1069 
1070 	tb_port_set_link_width(port->dual_link_port, 1);
1071 	tb_port_set_link_width(port, 1);
1072 }
1073 
1074 static int tb_port_start_lane_initialization(struct tb_port *port)
1075 {
1076 	int ret;
1077 
1078 	if (tb_switch_is_usb4(port->sw))
1079 		return 0;
1080 
1081 	ret = tb_lc_start_lane_initialization(port);
1082 	return ret == -EINVAL ? 0 : ret;
1083 }
1084 
1085 /**
1086  * tb_port_is_enabled() - Is the adapter port enabled
1087  * @port: Port to check
1088  */
1089 bool tb_port_is_enabled(struct tb_port *port)
1090 {
1091 	switch (port->config.type) {
1092 	case TB_TYPE_PCIE_UP:
1093 	case TB_TYPE_PCIE_DOWN:
1094 		return tb_pci_port_is_enabled(port);
1095 
1096 	case TB_TYPE_DP_HDMI_IN:
1097 	case TB_TYPE_DP_HDMI_OUT:
1098 		return tb_dp_port_is_enabled(port);
1099 
1100 	case TB_TYPE_USB3_UP:
1101 	case TB_TYPE_USB3_DOWN:
1102 		return tb_usb3_port_is_enabled(port);
1103 
1104 	default:
1105 		return false;
1106 	}
1107 }
1108 
1109 /**
1110  * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1111  * @port: USB3 adapter port to check
1112  */
1113 bool tb_usb3_port_is_enabled(struct tb_port *port)
1114 {
1115 	u32 data;
1116 
1117 	if (tb_port_read(port, &data, TB_CFG_PORT,
1118 			 port->cap_adap + ADP_USB3_CS_0, 1))
1119 		return false;
1120 
1121 	return !!(data & ADP_USB3_CS_0_PE);
1122 }
1123 
1124 /**
1125  * tb_usb3_port_enable() - Enable USB3 adapter port
1126  * @port: USB3 adapter port to enable
1127  * @enable: Enable/disable the USB3 adapter
1128  */
1129 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1130 {
1131 	u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1132 			  : ADP_USB3_CS_0_V;
1133 
1134 	if (!port->cap_adap)
1135 		return -ENXIO;
1136 	return tb_port_write(port, &word, TB_CFG_PORT,
1137 			     port->cap_adap + ADP_USB3_CS_0, 1);
1138 }
1139 
1140 /**
1141  * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1142  * @port: PCIe port to check
1143  */
1144 bool tb_pci_port_is_enabled(struct tb_port *port)
1145 {
1146 	u32 data;
1147 
1148 	if (tb_port_read(port, &data, TB_CFG_PORT,
1149 			 port->cap_adap + ADP_PCIE_CS_0, 1))
1150 		return false;
1151 
1152 	return !!(data & ADP_PCIE_CS_0_PE);
1153 }
1154 
1155 /**
1156  * tb_pci_port_enable() - Enable PCIe adapter port
1157  * @port: PCIe port to enable
1158  * @enable: Enable/disable the PCIe adapter
1159  */
1160 int tb_pci_port_enable(struct tb_port *port, bool enable)
1161 {
1162 	u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1163 	if (!port->cap_adap)
1164 		return -ENXIO;
1165 	return tb_port_write(port, &word, TB_CFG_PORT,
1166 			     port->cap_adap + ADP_PCIE_CS_0, 1);
1167 }
1168 
1169 /**
1170  * tb_dp_port_hpd_is_active() - Is HPD already active
1171  * @port: DP out port to check
1172  *
1173  * Checks if the DP OUT adapter port has HDP bit already set.
1174  */
1175 int tb_dp_port_hpd_is_active(struct tb_port *port)
1176 {
1177 	u32 data;
1178 	int ret;
1179 
1180 	ret = tb_port_read(port, &data, TB_CFG_PORT,
1181 			   port->cap_adap + ADP_DP_CS_2, 1);
1182 	if (ret)
1183 		return ret;
1184 
1185 	return !!(data & ADP_DP_CS_2_HDP);
1186 }
1187 
1188 /**
1189  * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1190  * @port: Port to clear HPD
1191  *
1192  * If the DP IN port has HDP set, this function can be used to clear it.
1193  */
1194 int tb_dp_port_hpd_clear(struct tb_port *port)
1195 {
1196 	u32 data;
1197 	int ret;
1198 
1199 	ret = tb_port_read(port, &data, TB_CFG_PORT,
1200 			   port->cap_adap + ADP_DP_CS_3, 1);
1201 	if (ret)
1202 		return ret;
1203 
1204 	data |= ADP_DP_CS_3_HDPC;
1205 	return tb_port_write(port, &data, TB_CFG_PORT,
1206 			     port->cap_adap + ADP_DP_CS_3, 1);
1207 }
1208 
1209 /**
1210  * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1211  * @port: DP IN/OUT port to set hops
1212  * @video: Video Hop ID
1213  * @aux_tx: AUX TX Hop ID
1214  * @aux_rx: AUX RX Hop ID
1215  *
1216  * Programs specified Hop IDs for DP IN/OUT port.
1217  */
1218 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1219 			unsigned int aux_tx, unsigned int aux_rx)
1220 {
1221 	u32 data[2];
1222 	int ret;
1223 
1224 	ret = tb_port_read(port, data, TB_CFG_PORT,
1225 			   port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1226 	if (ret)
1227 		return ret;
1228 
1229 	data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1230 	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1231 	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1232 
1233 	data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1234 		ADP_DP_CS_0_VIDEO_HOPID_MASK;
1235 	data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1236 	data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1237 		ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1238 
1239 	return tb_port_write(port, data, TB_CFG_PORT,
1240 			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1241 }
1242 
1243 /**
1244  * tb_dp_port_is_enabled() - Is DP adapter port enabled
1245  * @port: DP adapter port to check
1246  */
1247 bool tb_dp_port_is_enabled(struct tb_port *port)
1248 {
1249 	u32 data[2];
1250 
1251 	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1252 			 ARRAY_SIZE(data)))
1253 		return false;
1254 
1255 	return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1256 }
1257 
1258 /**
1259  * tb_dp_port_enable() - Enables/disables DP paths of a port
1260  * @port: DP IN/OUT port
1261  * @enable: Enable/disable DP path
1262  *
1263  * Once Hop IDs are programmed DP paths can be enabled or disabled by
1264  * calling this function.
1265  */
1266 int tb_dp_port_enable(struct tb_port *port, bool enable)
1267 {
1268 	u32 data[2];
1269 	int ret;
1270 
1271 	ret = tb_port_read(port, data, TB_CFG_PORT,
1272 			  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1273 	if (ret)
1274 		return ret;
1275 
1276 	if (enable)
1277 		data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1278 	else
1279 		data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1280 
1281 	return tb_port_write(port, data, TB_CFG_PORT,
1282 			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1283 }
1284 
1285 /* switch utility functions */
1286 
1287 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1288 {
1289 	switch (sw->generation) {
1290 	case 1:
1291 		return "Thunderbolt 1";
1292 	case 2:
1293 		return "Thunderbolt 2";
1294 	case 3:
1295 		return "Thunderbolt 3";
1296 	case 4:
1297 		return "USB4";
1298 	default:
1299 		return "Unknown";
1300 	}
1301 }
1302 
1303 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1304 {
1305 	const struct tb_regs_switch_header *regs = &sw->config;
1306 
1307 	tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1308 	       tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1309 	       regs->revision, regs->thunderbolt_version);
1310 	tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
1311 	tb_dbg(tb, "  Config:\n");
1312 	tb_dbg(tb,
1313 		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1314 	       regs->upstream_port_number, regs->depth,
1315 	       (((u64) regs->route_hi) << 32) | regs->route_lo,
1316 	       regs->enabled, regs->plug_events_delay);
1317 	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
1318 	       regs->__unknown1, regs->__unknown4);
1319 }
1320 
1321 /**
1322  * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1323  * @sw: Switch to reset
1324  *
1325  * Return: Returns 0 on success or an error code on failure.
1326  */
1327 int tb_switch_reset(struct tb_switch *sw)
1328 {
1329 	struct tb_cfg_result res;
1330 
1331 	if (sw->generation > 1)
1332 		return 0;
1333 
1334 	tb_sw_dbg(sw, "resetting switch\n");
1335 
1336 	res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1337 			      TB_CFG_SWITCH, 2, 2);
1338 	if (res.err)
1339 		return res.err;
1340 	res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
1341 	if (res.err > 0)
1342 		return -EIO;
1343 	return res.err;
1344 }
1345 
1346 /*
1347  * tb_plug_events_active() - enable/disable plug events on a switch
1348  *
1349  * Also configures a sane plug_events_delay of 255ms.
1350  *
1351  * Return: Returns 0 on success or an error code on failure.
1352  */
1353 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1354 {
1355 	u32 data;
1356 	int res;
1357 
1358 	if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1359 		return 0;
1360 
1361 	sw->config.plug_events_delay = 0xff;
1362 	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1363 	if (res)
1364 		return res;
1365 
1366 	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1367 	if (res)
1368 		return res;
1369 
1370 	if (active) {
1371 		data = data & 0xFFFFFF83;
1372 		switch (sw->config.device_id) {
1373 		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1374 		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1375 		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1376 			break;
1377 		default:
1378 			data |= 4;
1379 		}
1380 	} else {
1381 		data = data | 0x7c;
1382 	}
1383 	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1384 			   sw->cap_plug_events + 1, 1);
1385 }
1386 
1387 static ssize_t authorized_show(struct device *dev,
1388 			       struct device_attribute *attr,
1389 			       char *buf)
1390 {
1391 	struct tb_switch *sw = tb_to_switch(dev);
1392 
1393 	return sprintf(buf, "%u\n", sw->authorized);
1394 }
1395 
1396 static int disapprove_switch(struct device *dev, void *not_used)
1397 {
1398 	struct tb_switch *sw;
1399 
1400 	sw = tb_to_switch(dev);
1401 	if (sw && sw->authorized) {
1402 		int ret;
1403 
1404 		/* First children */
1405 		ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1406 		if (ret)
1407 			return ret;
1408 
1409 		ret = tb_domain_disapprove_switch(sw->tb, sw);
1410 		if (ret)
1411 			return ret;
1412 
1413 		sw->authorized = 0;
1414 		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1421 {
1422 	int ret = -EINVAL;
1423 
1424 	if (!mutex_trylock(&sw->tb->lock))
1425 		return restart_syscall();
1426 
1427 	if (!!sw->authorized == !!val)
1428 		goto unlock;
1429 
1430 	switch (val) {
1431 	/* Disapprove switch */
1432 	case 0:
1433 		if (tb_route(sw)) {
1434 			ret = disapprove_switch(&sw->dev, NULL);
1435 			goto unlock;
1436 		}
1437 		break;
1438 
1439 	/* Approve switch */
1440 	case 1:
1441 		if (sw->key)
1442 			ret = tb_domain_approve_switch_key(sw->tb, sw);
1443 		else
1444 			ret = tb_domain_approve_switch(sw->tb, sw);
1445 		break;
1446 
1447 	/* Challenge switch */
1448 	case 2:
1449 		if (sw->key)
1450 			ret = tb_domain_challenge_switch_key(sw->tb, sw);
1451 		break;
1452 
1453 	default:
1454 		break;
1455 	}
1456 
1457 	if (!ret) {
1458 		sw->authorized = val;
1459 		/* Notify status change to the userspace */
1460 		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1461 	}
1462 
1463 unlock:
1464 	mutex_unlock(&sw->tb->lock);
1465 	return ret;
1466 }
1467 
1468 static ssize_t authorized_store(struct device *dev,
1469 				struct device_attribute *attr,
1470 				const char *buf, size_t count)
1471 {
1472 	struct tb_switch *sw = tb_to_switch(dev);
1473 	unsigned int val;
1474 	ssize_t ret;
1475 
1476 	ret = kstrtouint(buf, 0, &val);
1477 	if (ret)
1478 		return ret;
1479 	if (val > 2)
1480 		return -EINVAL;
1481 
1482 	pm_runtime_get_sync(&sw->dev);
1483 	ret = tb_switch_set_authorized(sw, val);
1484 	pm_runtime_mark_last_busy(&sw->dev);
1485 	pm_runtime_put_autosuspend(&sw->dev);
1486 
1487 	return ret ? ret : count;
1488 }
1489 static DEVICE_ATTR_RW(authorized);
1490 
1491 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1492 			 char *buf)
1493 {
1494 	struct tb_switch *sw = tb_to_switch(dev);
1495 
1496 	return sprintf(buf, "%u\n", sw->boot);
1497 }
1498 static DEVICE_ATTR_RO(boot);
1499 
1500 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1501 			   char *buf)
1502 {
1503 	struct tb_switch *sw = tb_to_switch(dev);
1504 
1505 	return sprintf(buf, "%#x\n", sw->device);
1506 }
1507 static DEVICE_ATTR_RO(device);
1508 
1509 static ssize_t
1510 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1511 {
1512 	struct tb_switch *sw = tb_to_switch(dev);
1513 
1514 	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1515 }
1516 static DEVICE_ATTR_RO(device_name);
1517 
1518 static ssize_t
1519 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1520 {
1521 	struct tb_switch *sw = tb_to_switch(dev);
1522 
1523 	return sprintf(buf, "%u\n", sw->generation);
1524 }
1525 static DEVICE_ATTR_RO(generation);
1526 
1527 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1528 			char *buf)
1529 {
1530 	struct tb_switch *sw = tb_to_switch(dev);
1531 	ssize_t ret;
1532 
1533 	if (!mutex_trylock(&sw->tb->lock))
1534 		return restart_syscall();
1535 
1536 	if (sw->key)
1537 		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1538 	else
1539 		ret = sprintf(buf, "\n");
1540 
1541 	mutex_unlock(&sw->tb->lock);
1542 	return ret;
1543 }
1544 
1545 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1546 			 const char *buf, size_t count)
1547 {
1548 	struct tb_switch *sw = tb_to_switch(dev);
1549 	u8 key[TB_SWITCH_KEY_SIZE];
1550 	ssize_t ret = count;
1551 	bool clear = false;
1552 
1553 	if (!strcmp(buf, "\n"))
1554 		clear = true;
1555 	else if (hex2bin(key, buf, sizeof(key)))
1556 		return -EINVAL;
1557 
1558 	if (!mutex_trylock(&sw->tb->lock))
1559 		return restart_syscall();
1560 
1561 	if (sw->authorized) {
1562 		ret = -EBUSY;
1563 	} else {
1564 		kfree(sw->key);
1565 		if (clear) {
1566 			sw->key = NULL;
1567 		} else {
1568 			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1569 			if (!sw->key)
1570 				ret = -ENOMEM;
1571 		}
1572 	}
1573 
1574 	mutex_unlock(&sw->tb->lock);
1575 	return ret;
1576 }
1577 static DEVICE_ATTR(key, 0600, key_show, key_store);
1578 
1579 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1580 			  char *buf)
1581 {
1582 	struct tb_switch *sw = tb_to_switch(dev);
1583 
1584 	return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1585 }
1586 
1587 /*
1588  * Currently all lanes must run at the same speed but we expose here
1589  * both directions to allow possible asymmetric links in the future.
1590  */
1591 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1592 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1593 
1594 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1595 			  char *buf)
1596 {
1597 	struct tb_switch *sw = tb_to_switch(dev);
1598 
1599 	return sprintf(buf, "%u\n", sw->link_width);
1600 }
1601 
1602 /*
1603  * Currently link has same amount of lanes both directions (1 or 2) but
1604  * expose them separately to allow possible asymmetric links in the future.
1605  */
1606 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1607 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1608 
1609 static ssize_t nvm_authenticate_show(struct device *dev,
1610 	struct device_attribute *attr, char *buf)
1611 {
1612 	struct tb_switch *sw = tb_to_switch(dev);
1613 	u32 status;
1614 
1615 	nvm_get_auth_status(sw, &status);
1616 	return sprintf(buf, "%#x\n", status);
1617 }
1618 
1619 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1620 				      bool disconnect)
1621 {
1622 	struct tb_switch *sw = tb_to_switch(dev);
1623 	int val;
1624 	int ret;
1625 
1626 	pm_runtime_get_sync(&sw->dev);
1627 
1628 	if (!mutex_trylock(&sw->tb->lock)) {
1629 		ret = restart_syscall();
1630 		goto exit_rpm;
1631 	}
1632 
1633 	/* If NVMem devices are not yet added */
1634 	if (!sw->nvm) {
1635 		ret = -EAGAIN;
1636 		goto exit_unlock;
1637 	}
1638 
1639 	ret = kstrtoint(buf, 10, &val);
1640 	if (ret)
1641 		goto exit_unlock;
1642 
1643 	/* Always clear the authentication status */
1644 	nvm_clear_auth_status(sw);
1645 
1646 	if (val > 0) {
1647 		if (!sw->nvm->flushed) {
1648 			if (!sw->nvm->buf) {
1649 				ret = -EINVAL;
1650 				goto exit_unlock;
1651 			}
1652 
1653 			ret = nvm_validate_and_write(sw);
1654 			if (ret || val == WRITE_ONLY)
1655 				goto exit_unlock;
1656 		}
1657 		if (val == WRITE_AND_AUTHENTICATE) {
1658 			if (disconnect) {
1659 				ret = tb_lc_force_power(sw);
1660 			} else {
1661 				sw->nvm->authenticating = true;
1662 				ret = nvm_authenticate(sw);
1663 			}
1664 		}
1665 	}
1666 
1667 exit_unlock:
1668 	mutex_unlock(&sw->tb->lock);
1669 exit_rpm:
1670 	pm_runtime_mark_last_busy(&sw->dev);
1671 	pm_runtime_put_autosuspend(&sw->dev);
1672 
1673 	return ret;
1674 }
1675 
1676 static ssize_t nvm_authenticate_store(struct device *dev,
1677 	struct device_attribute *attr, const char *buf, size_t count)
1678 {
1679 	int ret = nvm_authenticate_sysfs(dev, buf, false);
1680 	if (ret)
1681 		return ret;
1682 	return count;
1683 }
1684 static DEVICE_ATTR_RW(nvm_authenticate);
1685 
1686 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1687 	struct device_attribute *attr, char *buf)
1688 {
1689 	return nvm_authenticate_show(dev, attr, buf);
1690 }
1691 
1692 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1693 	struct device_attribute *attr, const char *buf, size_t count)
1694 {
1695 	int ret;
1696 
1697 	ret = nvm_authenticate_sysfs(dev, buf, true);
1698 	return ret ? ret : count;
1699 }
1700 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1701 
1702 static ssize_t nvm_version_show(struct device *dev,
1703 				struct device_attribute *attr, char *buf)
1704 {
1705 	struct tb_switch *sw = tb_to_switch(dev);
1706 	int ret;
1707 
1708 	if (!mutex_trylock(&sw->tb->lock))
1709 		return restart_syscall();
1710 
1711 	if (sw->safe_mode)
1712 		ret = -ENODATA;
1713 	else if (!sw->nvm)
1714 		ret = -EAGAIN;
1715 	else
1716 		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1717 
1718 	mutex_unlock(&sw->tb->lock);
1719 
1720 	return ret;
1721 }
1722 static DEVICE_ATTR_RO(nvm_version);
1723 
1724 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1725 			   char *buf)
1726 {
1727 	struct tb_switch *sw = tb_to_switch(dev);
1728 
1729 	return sprintf(buf, "%#x\n", sw->vendor);
1730 }
1731 static DEVICE_ATTR_RO(vendor);
1732 
1733 static ssize_t
1734 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1735 {
1736 	struct tb_switch *sw = tb_to_switch(dev);
1737 
1738 	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1739 }
1740 static DEVICE_ATTR_RO(vendor_name);
1741 
1742 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1743 			      char *buf)
1744 {
1745 	struct tb_switch *sw = tb_to_switch(dev);
1746 
1747 	return sprintf(buf, "%pUb\n", sw->uuid);
1748 }
1749 static DEVICE_ATTR_RO(unique_id);
1750 
1751 static struct attribute *switch_attrs[] = {
1752 	&dev_attr_authorized.attr,
1753 	&dev_attr_boot.attr,
1754 	&dev_attr_device.attr,
1755 	&dev_attr_device_name.attr,
1756 	&dev_attr_generation.attr,
1757 	&dev_attr_key.attr,
1758 	&dev_attr_nvm_authenticate.attr,
1759 	&dev_attr_nvm_authenticate_on_disconnect.attr,
1760 	&dev_attr_nvm_version.attr,
1761 	&dev_attr_rx_speed.attr,
1762 	&dev_attr_rx_lanes.attr,
1763 	&dev_attr_tx_speed.attr,
1764 	&dev_attr_tx_lanes.attr,
1765 	&dev_attr_vendor.attr,
1766 	&dev_attr_vendor_name.attr,
1767 	&dev_attr_unique_id.attr,
1768 	NULL,
1769 };
1770 
1771 static umode_t switch_attr_is_visible(struct kobject *kobj,
1772 				      struct attribute *attr, int n)
1773 {
1774 	struct device *dev = kobj_to_dev(kobj);
1775 	struct tb_switch *sw = tb_to_switch(dev);
1776 
1777 	if (attr == &dev_attr_authorized.attr) {
1778 		if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
1779 		    sw->tb->security_level == TB_SECURITY_DPONLY)
1780 			return 0;
1781 	} else if (attr == &dev_attr_device.attr) {
1782 		if (!sw->device)
1783 			return 0;
1784 	} else if (attr == &dev_attr_device_name.attr) {
1785 		if (!sw->device_name)
1786 			return 0;
1787 	} else if (attr == &dev_attr_vendor.attr)  {
1788 		if (!sw->vendor)
1789 			return 0;
1790 	} else if (attr == &dev_attr_vendor_name.attr)  {
1791 		if (!sw->vendor_name)
1792 			return 0;
1793 	} else if (attr == &dev_attr_key.attr) {
1794 		if (tb_route(sw) &&
1795 		    sw->tb->security_level == TB_SECURITY_SECURE &&
1796 		    sw->security_level == TB_SECURITY_SECURE)
1797 			return attr->mode;
1798 		return 0;
1799 	} else if (attr == &dev_attr_rx_speed.attr ||
1800 		   attr == &dev_attr_rx_lanes.attr ||
1801 		   attr == &dev_attr_tx_speed.attr ||
1802 		   attr == &dev_attr_tx_lanes.attr) {
1803 		if (tb_route(sw))
1804 			return attr->mode;
1805 		return 0;
1806 	} else if (attr == &dev_attr_nvm_authenticate.attr) {
1807 		if (nvm_upgradeable(sw))
1808 			return attr->mode;
1809 		return 0;
1810 	} else if (attr == &dev_attr_nvm_version.attr) {
1811 		if (nvm_readable(sw))
1812 			return attr->mode;
1813 		return 0;
1814 	} else if (attr == &dev_attr_boot.attr) {
1815 		if (tb_route(sw))
1816 			return attr->mode;
1817 		return 0;
1818 	} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1819 		if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1820 			return attr->mode;
1821 		return 0;
1822 	}
1823 
1824 	return sw->safe_mode ? 0 : attr->mode;
1825 }
1826 
1827 static const struct attribute_group switch_group = {
1828 	.is_visible = switch_attr_is_visible,
1829 	.attrs = switch_attrs,
1830 };
1831 
1832 static const struct attribute_group *switch_groups[] = {
1833 	&switch_group,
1834 	NULL,
1835 };
1836 
1837 static void tb_switch_release(struct device *dev)
1838 {
1839 	struct tb_switch *sw = tb_to_switch(dev);
1840 	struct tb_port *port;
1841 
1842 	dma_port_free(sw->dma_port);
1843 
1844 	tb_switch_for_each_port(sw, port) {
1845 		if (!port->disabled) {
1846 			ida_destroy(&port->in_hopids);
1847 			ida_destroy(&port->out_hopids);
1848 		}
1849 	}
1850 
1851 	kfree(sw->uuid);
1852 	kfree(sw->device_name);
1853 	kfree(sw->vendor_name);
1854 	kfree(sw->ports);
1855 	kfree(sw->drom);
1856 	kfree(sw->key);
1857 	kfree(sw);
1858 }
1859 
1860 /*
1861  * Currently only need to provide the callbacks. Everything else is handled
1862  * in the connection manager.
1863  */
1864 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1865 {
1866 	struct tb_switch *sw = tb_to_switch(dev);
1867 	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1868 
1869 	if (cm_ops->runtime_suspend_switch)
1870 		return cm_ops->runtime_suspend_switch(sw);
1871 
1872 	return 0;
1873 }
1874 
1875 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1876 {
1877 	struct tb_switch *sw = tb_to_switch(dev);
1878 	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1879 
1880 	if (cm_ops->runtime_resume_switch)
1881 		return cm_ops->runtime_resume_switch(sw);
1882 	return 0;
1883 }
1884 
1885 static const struct dev_pm_ops tb_switch_pm_ops = {
1886 	SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1887 			   NULL)
1888 };
1889 
1890 struct device_type tb_switch_type = {
1891 	.name = "thunderbolt_device",
1892 	.release = tb_switch_release,
1893 	.pm = &tb_switch_pm_ops,
1894 };
1895 
1896 static int tb_switch_get_generation(struct tb_switch *sw)
1897 {
1898 	switch (sw->config.device_id) {
1899 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1900 	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1901 	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1902 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1903 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1904 	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1905 	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1906 	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1907 		return 1;
1908 
1909 	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1910 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1911 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1912 		return 2;
1913 
1914 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1915 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1916 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1917 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1918 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1919 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1920 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1921 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1922 	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1923 	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1924 		return 3;
1925 
1926 	default:
1927 		if (tb_switch_is_usb4(sw))
1928 			return 4;
1929 
1930 		/*
1931 		 * For unknown switches assume generation to be 1 to be
1932 		 * on the safe side.
1933 		 */
1934 		tb_sw_warn(sw, "unsupported switch device id %#x\n",
1935 			   sw->config.device_id);
1936 		return 1;
1937 	}
1938 }
1939 
1940 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
1941 {
1942 	int max_depth;
1943 
1944 	if (tb_switch_is_usb4(sw) ||
1945 	    (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
1946 		max_depth = USB4_SWITCH_MAX_DEPTH;
1947 	else
1948 		max_depth = TB_SWITCH_MAX_DEPTH;
1949 
1950 	return depth > max_depth;
1951 }
1952 
1953 /**
1954  * tb_switch_alloc() - allocate a switch
1955  * @tb: Pointer to the owning domain
1956  * @parent: Parent device for this switch
1957  * @route: Route string for this switch
1958  *
1959  * Allocates and initializes a switch. Will not upload configuration to
1960  * the switch. For that you need to call tb_switch_configure()
1961  * separately. The returned switch should be released by calling
1962  * tb_switch_put().
1963  *
1964  * Return: Pointer to the allocated switch or ERR_PTR() in case of
1965  * failure.
1966  */
1967 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1968 				  u64 route)
1969 {
1970 	struct tb_switch *sw;
1971 	int upstream_port;
1972 	int i, ret, depth;
1973 
1974 	/* Unlock the downstream port so we can access the switch below */
1975 	if (route) {
1976 		struct tb_switch *parent_sw = tb_to_switch(parent);
1977 		struct tb_port *down;
1978 
1979 		down = tb_port_at(route, parent_sw);
1980 		tb_port_unlock(down);
1981 	}
1982 
1983 	depth = tb_route_length(route);
1984 
1985 	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1986 	if (upstream_port < 0)
1987 		return ERR_PTR(upstream_port);
1988 
1989 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1990 	if (!sw)
1991 		return ERR_PTR(-ENOMEM);
1992 
1993 	sw->tb = tb;
1994 	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1995 	if (ret)
1996 		goto err_free_sw_ports;
1997 
1998 	sw->generation = tb_switch_get_generation(sw);
1999 
2000 	tb_dbg(tb, "current switch config:\n");
2001 	tb_dump_switch(tb, sw);
2002 
2003 	/* configure switch */
2004 	sw->config.upstream_port_number = upstream_port;
2005 	sw->config.depth = depth;
2006 	sw->config.route_hi = upper_32_bits(route);
2007 	sw->config.route_lo = lower_32_bits(route);
2008 	sw->config.enabled = 0;
2009 
2010 	/* Make sure we do not exceed maximum topology limit */
2011 	if (tb_switch_exceeds_max_depth(sw, depth)) {
2012 		ret = -EADDRNOTAVAIL;
2013 		goto err_free_sw_ports;
2014 	}
2015 
2016 	/* initialize ports */
2017 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2018 				GFP_KERNEL);
2019 	if (!sw->ports) {
2020 		ret = -ENOMEM;
2021 		goto err_free_sw_ports;
2022 	}
2023 
2024 	for (i = 0; i <= sw->config.max_port_number; i++) {
2025 		/* minimum setup for tb_find_cap and tb_drom_read to work */
2026 		sw->ports[i].sw = sw;
2027 		sw->ports[i].port = i;
2028 	}
2029 
2030 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2031 	if (ret > 0)
2032 		sw->cap_plug_events = ret;
2033 
2034 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2035 	if (ret > 0)
2036 		sw->cap_lc = ret;
2037 
2038 	/* Root switch is always authorized */
2039 	if (!route)
2040 		sw->authorized = true;
2041 
2042 	device_initialize(&sw->dev);
2043 	sw->dev.parent = parent;
2044 	sw->dev.bus = &tb_bus_type;
2045 	sw->dev.type = &tb_switch_type;
2046 	sw->dev.groups = switch_groups;
2047 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2048 
2049 	return sw;
2050 
2051 err_free_sw_ports:
2052 	kfree(sw->ports);
2053 	kfree(sw);
2054 
2055 	return ERR_PTR(ret);
2056 }
2057 
2058 /**
2059  * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2060  * @tb: Pointer to the owning domain
2061  * @parent: Parent device for this switch
2062  * @route: Route string for this switch
2063  *
2064  * This creates a switch in safe mode. This means the switch pretty much
2065  * lacks all capabilities except DMA configuration port before it is
2066  * flashed with a valid NVM firmware.
2067  *
2068  * The returned switch must be released by calling tb_switch_put().
2069  *
2070  * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2071  */
2072 struct tb_switch *
2073 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2074 {
2075 	struct tb_switch *sw;
2076 
2077 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2078 	if (!sw)
2079 		return ERR_PTR(-ENOMEM);
2080 
2081 	sw->tb = tb;
2082 	sw->config.depth = tb_route_length(route);
2083 	sw->config.route_hi = upper_32_bits(route);
2084 	sw->config.route_lo = lower_32_bits(route);
2085 	sw->safe_mode = true;
2086 
2087 	device_initialize(&sw->dev);
2088 	sw->dev.parent = parent;
2089 	sw->dev.bus = &tb_bus_type;
2090 	sw->dev.type = &tb_switch_type;
2091 	sw->dev.groups = switch_groups;
2092 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2093 
2094 	return sw;
2095 }
2096 
2097 /**
2098  * tb_switch_configure() - Uploads configuration to the switch
2099  * @sw: Switch to configure
2100  *
2101  * Call this function before the switch is added to the system. It will
2102  * upload configuration to the switch and makes it available for the
2103  * connection manager to use. Can be called to the switch again after
2104  * resume from low power states to re-initialize it.
2105  *
2106  * Return: %0 in case of success and negative errno in case of failure
2107  */
2108 int tb_switch_configure(struct tb_switch *sw)
2109 {
2110 	struct tb *tb = sw->tb;
2111 	u64 route;
2112 	int ret;
2113 
2114 	route = tb_route(sw);
2115 
2116 	tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2117 	       sw->config.enabled ? "restoring" : "initializing", route,
2118 	       tb_route_length(route), sw->config.upstream_port_number);
2119 
2120 	sw->config.enabled = 1;
2121 
2122 	if (tb_switch_is_usb4(sw)) {
2123 		/*
2124 		 * For USB4 devices, we need to program the CM version
2125 		 * accordingly so that it knows to expose all the
2126 		 * additional capabilities.
2127 		 */
2128 		sw->config.cmuv = USB4_VERSION_1_0;
2129 
2130 		/* Enumerate the switch */
2131 		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2132 				  ROUTER_CS_1, 4);
2133 		if (ret)
2134 			return ret;
2135 
2136 		ret = usb4_switch_setup(sw);
2137 	} else {
2138 		if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2139 			tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2140 				   sw->config.vendor_id);
2141 
2142 		if (!sw->cap_plug_events) {
2143 			tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2144 			return -ENODEV;
2145 		}
2146 
2147 		/* Enumerate the switch */
2148 		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2149 				  ROUTER_CS_1, 3);
2150 	}
2151 	if (ret)
2152 		return ret;
2153 
2154 	return tb_plug_events_active(sw, true);
2155 }
2156 
2157 static int tb_switch_set_uuid(struct tb_switch *sw)
2158 {
2159 	bool uid = false;
2160 	u32 uuid[4];
2161 	int ret;
2162 
2163 	if (sw->uuid)
2164 		return 0;
2165 
2166 	if (tb_switch_is_usb4(sw)) {
2167 		ret = usb4_switch_read_uid(sw, &sw->uid);
2168 		if (ret)
2169 			return ret;
2170 		uid = true;
2171 	} else {
2172 		/*
2173 		 * The newer controllers include fused UUID as part of
2174 		 * link controller specific registers
2175 		 */
2176 		ret = tb_lc_read_uuid(sw, uuid);
2177 		if (ret) {
2178 			if (ret != -EINVAL)
2179 				return ret;
2180 			uid = true;
2181 		}
2182 	}
2183 
2184 	if (uid) {
2185 		/*
2186 		 * ICM generates UUID based on UID and fills the upper
2187 		 * two words with ones. This is not strictly following
2188 		 * UUID format but we want to be compatible with it so
2189 		 * we do the same here.
2190 		 */
2191 		uuid[0] = sw->uid & 0xffffffff;
2192 		uuid[1] = (sw->uid >> 32) & 0xffffffff;
2193 		uuid[2] = 0xffffffff;
2194 		uuid[3] = 0xffffffff;
2195 	}
2196 
2197 	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2198 	if (!sw->uuid)
2199 		return -ENOMEM;
2200 	return 0;
2201 }
2202 
2203 static int tb_switch_add_dma_port(struct tb_switch *sw)
2204 {
2205 	u32 status;
2206 	int ret;
2207 
2208 	switch (sw->generation) {
2209 	case 2:
2210 		/* Only root switch can be upgraded */
2211 		if (tb_route(sw))
2212 			return 0;
2213 
2214 		fallthrough;
2215 	case 3:
2216 	case 4:
2217 		ret = tb_switch_set_uuid(sw);
2218 		if (ret)
2219 			return ret;
2220 		break;
2221 
2222 	default:
2223 		/*
2224 		 * DMA port is the only thing available when the switch
2225 		 * is in safe mode.
2226 		 */
2227 		if (!sw->safe_mode)
2228 			return 0;
2229 		break;
2230 	}
2231 
2232 	if (sw->no_nvm_upgrade)
2233 		return 0;
2234 
2235 	if (tb_switch_is_usb4(sw)) {
2236 		ret = usb4_switch_nvm_authenticate_status(sw, &status);
2237 		if (ret)
2238 			return ret;
2239 
2240 		if (status) {
2241 			tb_sw_info(sw, "switch flash authentication failed\n");
2242 			nvm_set_auth_status(sw, status);
2243 		}
2244 
2245 		return 0;
2246 	}
2247 
2248 	/* Root switch DMA port requires running firmware */
2249 	if (!tb_route(sw) && !tb_switch_is_icm(sw))
2250 		return 0;
2251 
2252 	sw->dma_port = dma_port_alloc(sw);
2253 	if (!sw->dma_port)
2254 		return 0;
2255 
2256 	/*
2257 	 * If there is status already set then authentication failed
2258 	 * when the dma_port_flash_update_auth() returned. Power cycling
2259 	 * is not needed (it was done already) so only thing we do here
2260 	 * is to unblock runtime PM of the root port.
2261 	 */
2262 	nvm_get_auth_status(sw, &status);
2263 	if (status) {
2264 		if (!tb_route(sw))
2265 			nvm_authenticate_complete_dma_port(sw);
2266 		return 0;
2267 	}
2268 
2269 	/*
2270 	 * Check status of the previous flash authentication. If there
2271 	 * is one we need to power cycle the switch in any case to make
2272 	 * it functional again.
2273 	 */
2274 	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2275 	if (ret <= 0)
2276 		return ret;
2277 
2278 	/* Now we can allow root port to suspend again */
2279 	if (!tb_route(sw))
2280 		nvm_authenticate_complete_dma_port(sw);
2281 
2282 	if (status) {
2283 		tb_sw_info(sw, "switch flash authentication failed\n");
2284 		nvm_set_auth_status(sw, status);
2285 	}
2286 
2287 	tb_sw_info(sw, "power cycling the switch now\n");
2288 	dma_port_power_cycle(sw->dma_port);
2289 
2290 	/*
2291 	 * We return error here which causes the switch adding failure.
2292 	 * It should appear back after power cycle is complete.
2293 	 */
2294 	return -ESHUTDOWN;
2295 }
2296 
2297 static void tb_switch_default_link_ports(struct tb_switch *sw)
2298 {
2299 	int i;
2300 
2301 	for (i = 1; i <= sw->config.max_port_number; i += 2) {
2302 		struct tb_port *port = &sw->ports[i];
2303 		struct tb_port *subordinate;
2304 
2305 		if (!tb_port_is_null(port))
2306 			continue;
2307 
2308 		/* Check for the subordinate port */
2309 		if (i == sw->config.max_port_number ||
2310 		    !tb_port_is_null(&sw->ports[i + 1]))
2311 			continue;
2312 
2313 		/* Link them if not already done so (by DROM) */
2314 		subordinate = &sw->ports[i + 1];
2315 		if (!port->dual_link_port && !subordinate->dual_link_port) {
2316 			port->link_nr = 0;
2317 			port->dual_link_port = subordinate;
2318 			subordinate->link_nr = 1;
2319 			subordinate->dual_link_port = port;
2320 
2321 			tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2322 				  port->port, subordinate->port);
2323 		}
2324 	}
2325 }
2326 
2327 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2328 {
2329 	const struct tb_port *up = tb_upstream_port(sw);
2330 
2331 	if (!up->dual_link_port || !up->dual_link_port->remote)
2332 		return false;
2333 
2334 	if (tb_switch_is_usb4(sw))
2335 		return usb4_switch_lane_bonding_possible(sw);
2336 	return tb_lc_lane_bonding_possible(sw);
2337 }
2338 
2339 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2340 {
2341 	struct tb_port *up;
2342 	bool change = false;
2343 	int ret;
2344 
2345 	if (!tb_route(sw) || tb_switch_is_icm(sw))
2346 		return 0;
2347 
2348 	up = tb_upstream_port(sw);
2349 
2350 	ret = tb_port_get_link_speed(up);
2351 	if (ret < 0)
2352 		return ret;
2353 	if (sw->link_speed != ret)
2354 		change = true;
2355 	sw->link_speed = ret;
2356 
2357 	ret = tb_port_get_link_width(up);
2358 	if (ret < 0)
2359 		return ret;
2360 	if (sw->link_width != ret)
2361 		change = true;
2362 	sw->link_width = ret;
2363 
2364 	/* Notify userspace that there is possible link attribute change */
2365 	if (device_is_registered(&sw->dev) && change)
2366 		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2367 
2368 	return 0;
2369 }
2370 
2371 /**
2372  * tb_switch_lane_bonding_enable() - Enable lane bonding
2373  * @sw: Switch to enable lane bonding
2374  *
2375  * Connection manager can call this function to enable lane bonding of a
2376  * switch. If conditions are correct and both switches support the feature,
2377  * lanes are bonded. It is safe to call this to any switch.
2378  */
2379 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2380 {
2381 	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2382 	struct tb_port *up, *down;
2383 	u64 route = tb_route(sw);
2384 	int ret;
2385 
2386 	if (!route)
2387 		return 0;
2388 
2389 	if (!tb_switch_lane_bonding_possible(sw))
2390 		return 0;
2391 
2392 	up = tb_upstream_port(sw);
2393 	down = tb_port_at(route, parent);
2394 
2395 	if (!tb_port_is_width_supported(up, 2) ||
2396 	    !tb_port_is_width_supported(down, 2))
2397 		return 0;
2398 
2399 	ret = tb_port_lane_bonding_enable(up);
2400 	if (ret) {
2401 		tb_port_warn(up, "failed to enable lane bonding\n");
2402 		return ret;
2403 	}
2404 
2405 	ret = tb_port_lane_bonding_enable(down);
2406 	if (ret) {
2407 		tb_port_warn(down, "failed to enable lane bonding\n");
2408 		tb_port_lane_bonding_disable(up);
2409 		return ret;
2410 	}
2411 
2412 	tb_switch_update_link_attributes(sw);
2413 
2414 	tb_sw_dbg(sw, "lane bonding enabled\n");
2415 	return ret;
2416 }
2417 
2418 /**
2419  * tb_switch_lane_bonding_disable() - Disable lane bonding
2420  * @sw: Switch whose lane bonding to disable
2421  *
2422  * Disables lane bonding between @sw and parent. This can be called even
2423  * if lanes were not bonded originally.
2424  */
2425 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2426 {
2427 	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2428 	struct tb_port *up, *down;
2429 
2430 	if (!tb_route(sw))
2431 		return;
2432 
2433 	up = tb_upstream_port(sw);
2434 	if (!up->bonded)
2435 		return;
2436 
2437 	down = tb_port_at(tb_route(sw), parent);
2438 
2439 	tb_port_lane_bonding_disable(up);
2440 	tb_port_lane_bonding_disable(down);
2441 
2442 	tb_switch_update_link_attributes(sw);
2443 	tb_sw_dbg(sw, "lane bonding disabled\n");
2444 }
2445 
2446 /**
2447  * tb_switch_configure_link() - Set link configured
2448  * @sw: Switch whose link is configured
2449  *
2450  * Sets the link upstream from @sw configured (from both ends) so that
2451  * it will not be disconnected when the domain exits sleep. Can be
2452  * called for any switch.
2453  *
2454  * It is recommended that this is called after lane bonding is enabled.
2455  *
2456  * Returns %0 on success and negative errno in case of error.
2457  */
2458 int tb_switch_configure_link(struct tb_switch *sw)
2459 {
2460 	struct tb_port *up, *down;
2461 	int ret;
2462 
2463 	if (!tb_route(sw) || tb_switch_is_icm(sw))
2464 		return 0;
2465 
2466 	up = tb_upstream_port(sw);
2467 	if (tb_switch_is_usb4(up->sw))
2468 		ret = usb4_port_configure(up);
2469 	else
2470 		ret = tb_lc_configure_port(up);
2471 	if (ret)
2472 		return ret;
2473 
2474 	down = up->remote;
2475 	if (tb_switch_is_usb4(down->sw))
2476 		return usb4_port_configure(down);
2477 	return tb_lc_configure_port(down);
2478 }
2479 
2480 /**
2481  * tb_switch_unconfigure_link() - Unconfigure link
2482  * @sw: Switch whose link is unconfigured
2483  *
2484  * Sets the link unconfigured so the @sw will be disconnected if the
2485  * domain exists sleep.
2486  */
2487 void tb_switch_unconfigure_link(struct tb_switch *sw)
2488 {
2489 	struct tb_port *up, *down;
2490 
2491 	if (sw->is_unplugged)
2492 		return;
2493 	if (!tb_route(sw) || tb_switch_is_icm(sw))
2494 		return;
2495 
2496 	up = tb_upstream_port(sw);
2497 	if (tb_switch_is_usb4(up->sw))
2498 		usb4_port_unconfigure(up);
2499 	else
2500 		tb_lc_unconfigure_port(up);
2501 
2502 	down = up->remote;
2503 	if (tb_switch_is_usb4(down->sw))
2504 		usb4_port_unconfigure(down);
2505 	else
2506 		tb_lc_unconfigure_port(down);
2507 }
2508 
2509 /**
2510  * tb_switch_add() - Add a switch to the domain
2511  * @sw: Switch to add
2512  *
2513  * This is the last step in adding switch to the domain. It will read
2514  * identification information from DROM and initializes ports so that
2515  * they can be used to connect other switches. The switch will be
2516  * exposed to the userspace when this function successfully returns. To
2517  * remove and release the switch, call tb_switch_remove().
2518  *
2519  * Return: %0 in case of success and negative errno in case of failure
2520  */
2521 int tb_switch_add(struct tb_switch *sw)
2522 {
2523 	int i, ret;
2524 
2525 	/*
2526 	 * Initialize DMA control port now before we read DROM. Recent
2527 	 * host controllers have more complete DROM on NVM that includes
2528 	 * vendor and model identification strings which we then expose
2529 	 * to the userspace. NVM can be accessed through DMA
2530 	 * configuration based mailbox.
2531 	 */
2532 	ret = tb_switch_add_dma_port(sw);
2533 	if (ret) {
2534 		dev_err(&sw->dev, "failed to add DMA port\n");
2535 		return ret;
2536 	}
2537 
2538 	if (!sw->safe_mode) {
2539 		/* read drom */
2540 		ret = tb_drom_read(sw);
2541 		if (ret) {
2542 			dev_err(&sw->dev, "reading DROM failed\n");
2543 			return ret;
2544 		}
2545 		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2546 
2547 		ret = tb_switch_set_uuid(sw);
2548 		if (ret) {
2549 			dev_err(&sw->dev, "failed to set UUID\n");
2550 			return ret;
2551 		}
2552 
2553 		for (i = 0; i <= sw->config.max_port_number; i++) {
2554 			if (sw->ports[i].disabled) {
2555 				tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2556 				continue;
2557 			}
2558 			ret = tb_init_port(&sw->ports[i]);
2559 			if (ret) {
2560 				dev_err(&sw->dev, "failed to initialize port %d\n", i);
2561 				return ret;
2562 			}
2563 		}
2564 
2565 		tb_switch_default_link_ports(sw);
2566 
2567 		ret = tb_switch_update_link_attributes(sw);
2568 		if (ret)
2569 			return ret;
2570 
2571 		ret = tb_switch_tmu_init(sw);
2572 		if (ret)
2573 			return ret;
2574 	}
2575 
2576 	ret = device_add(&sw->dev);
2577 	if (ret) {
2578 		dev_err(&sw->dev, "failed to add device: %d\n", ret);
2579 		return ret;
2580 	}
2581 
2582 	if (tb_route(sw)) {
2583 		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2584 			 sw->vendor, sw->device);
2585 		if (sw->vendor_name && sw->device_name)
2586 			dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2587 				 sw->device_name);
2588 	}
2589 
2590 	ret = tb_switch_nvm_add(sw);
2591 	if (ret) {
2592 		dev_err(&sw->dev, "failed to add NVM devices\n");
2593 		device_del(&sw->dev);
2594 		return ret;
2595 	}
2596 
2597 	/*
2598 	 * Thunderbolt routers do not generate wakeups themselves but
2599 	 * they forward wakeups from tunneled protocols, so enable it
2600 	 * here.
2601 	 */
2602 	device_init_wakeup(&sw->dev, true);
2603 
2604 	pm_runtime_set_active(&sw->dev);
2605 	if (sw->rpm) {
2606 		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2607 		pm_runtime_use_autosuspend(&sw->dev);
2608 		pm_runtime_mark_last_busy(&sw->dev);
2609 		pm_runtime_enable(&sw->dev);
2610 		pm_request_autosuspend(&sw->dev);
2611 	}
2612 
2613 	tb_switch_debugfs_init(sw);
2614 	return 0;
2615 }
2616 
2617 /**
2618  * tb_switch_remove() - Remove and release a switch
2619  * @sw: Switch to remove
2620  *
2621  * This will remove the switch from the domain and release it after last
2622  * reference count drops to zero. If there are switches connected below
2623  * this switch, they will be removed as well.
2624  */
2625 void tb_switch_remove(struct tb_switch *sw)
2626 {
2627 	struct tb_port *port;
2628 
2629 	tb_switch_debugfs_remove(sw);
2630 
2631 	if (sw->rpm) {
2632 		pm_runtime_get_sync(&sw->dev);
2633 		pm_runtime_disable(&sw->dev);
2634 	}
2635 
2636 	/* port 0 is the switch itself and never has a remote */
2637 	tb_switch_for_each_port(sw, port) {
2638 		if (tb_port_has_remote(port)) {
2639 			tb_switch_remove(port->remote->sw);
2640 			port->remote = NULL;
2641 		} else if (port->xdomain) {
2642 			tb_xdomain_remove(port->xdomain);
2643 			port->xdomain = NULL;
2644 		}
2645 
2646 		/* Remove any downstream retimers */
2647 		tb_retimer_remove_all(port);
2648 	}
2649 
2650 	if (!sw->is_unplugged)
2651 		tb_plug_events_active(sw, false);
2652 
2653 	tb_switch_nvm_remove(sw);
2654 
2655 	if (tb_route(sw))
2656 		dev_info(&sw->dev, "device disconnected\n");
2657 	device_unregister(&sw->dev);
2658 }
2659 
2660 /**
2661  * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2662  * @sw: Router to mark unplugged
2663  */
2664 void tb_sw_set_unplugged(struct tb_switch *sw)
2665 {
2666 	struct tb_port *port;
2667 
2668 	if (sw == sw->tb->root_switch) {
2669 		tb_sw_WARN(sw, "cannot unplug root switch\n");
2670 		return;
2671 	}
2672 	if (sw->is_unplugged) {
2673 		tb_sw_WARN(sw, "is_unplugged already set\n");
2674 		return;
2675 	}
2676 	sw->is_unplugged = true;
2677 	tb_switch_for_each_port(sw, port) {
2678 		if (tb_port_has_remote(port))
2679 			tb_sw_set_unplugged(port->remote->sw);
2680 		else if (port->xdomain)
2681 			port->xdomain->is_unplugged = true;
2682 	}
2683 }
2684 
2685 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2686 {
2687 	if (flags)
2688 		tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2689 	else
2690 		tb_sw_dbg(sw, "disabling wakeup\n");
2691 
2692 	if (tb_switch_is_usb4(sw))
2693 		return usb4_switch_set_wake(sw, flags);
2694 	return tb_lc_set_wake(sw, flags);
2695 }
2696 
2697 int tb_switch_resume(struct tb_switch *sw)
2698 {
2699 	struct tb_port *port;
2700 	int err;
2701 
2702 	tb_sw_dbg(sw, "resuming switch\n");
2703 
2704 	/*
2705 	 * Check for UID of the connected switches except for root
2706 	 * switch which we assume cannot be removed.
2707 	 */
2708 	if (tb_route(sw)) {
2709 		u64 uid;
2710 
2711 		/*
2712 		 * Check first that we can still read the switch config
2713 		 * space. It may be that there is now another domain
2714 		 * connected.
2715 		 */
2716 		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2717 		if (err < 0) {
2718 			tb_sw_info(sw, "switch not present anymore\n");
2719 			return err;
2720 		}
2721 
2722 		if (tb_switch_is_usb4(sw))
2723 			err = usb4_switch_read_uid(sw, &uid);
2724 		else
2725 			err = tb_drom_read_uid_only(sw, &uid);
2726 		if (err) {
2727 			tb_sw_warn(sw, "uid read failed\n");
2728 			return err;
2729 		}
2730 		if (sw->uid != uid) {
2731 			tb_sw_info(sw,
2732 				"changed while suspended (uid %#llx -> %#llx)\n",
2733 				sw->uid, uid);
2734 			return -ENODEV;
2735 		}
2736 	}
2737 
2738 	err = tb_switch_configure(sw);
2739 	if (err)
2740 		return err;
2741 
2742 	/* Disable wakes */
2743 	tb_switch_set_wake(sw, 0);
2744 
2745 	err = tb_switch_tmu_init(sw);
2746 	if (err)
2747 		return err;
2748 
2749 	/* check for surviving downstream switches */
2750 	tb_switch_for_each_port(sw, port) {
2751 		if (!tb_port_has_remote(port) && !port->xdomain) {
2752 			/*
2753 			 * For disconnected downstream lane adapters
2754 			 * start lane initialization now so we detect
2755 			 * future connects.
2756 			 */
2757 			if (!tb_is_upstream_port(port) && tb_port_is_null(port))
2758 				tb_port_start_lane_initialization(port);
2759 			continue;
2760 		} else if (port->xdomain) {
2761 			/*
2762 			 * Start lane initialization for XDomain so the
2763 			 * link gets re-established.
2764 			 */
2765 			tb_port_start_lane_initialization(port);
2766 		}
2767 
2768 		if (tb_wait_for_port(port, true) <= 0) {
2769 			tb_port_warn(port,
2770 				     "lost during suspend, disconnecting\n");
2771 			if (tb_port_has_remote(port))
2772 				tb_sw_set_unplugged(port->remote->sw);
2773 			else if (port->xdomain)
2774 				port->xdomain->is_unplugged = true;
2775 		} else if (tb_port_has_remote(port) || port->xdomain) {
2776 			/*
2777 			 * Always unlock the port so the downstream
2778 			 * switch/domain is accessible.
2779 			 */
2780 			if (tb_port_unlock(port))
2781 				tb_port_warn(port, "failed to unlock port\n");
2782 			if (port->remote && tb_switch_resume(port->remote->sw)) {
2783 				tb_port_warn(port,
2784 					     "lost during suspend, disconnecting\n");
2785 				tb_sw_set_unplugged(port->remote->sw);
2786 			}
2787 		}
2788 	}
2789 	return 0;
2790 }
2791 
2792 /**
2793  * tb_switch_suspend() - Put a switch to sleep
2794  * @sw: Switch to suspend
2795  * @runtime: Is this runtime suspend or system sleep
2796  *
2797  * Suspends router and all its children. Enables wakes according to
2798  * value of @runtime and then sets sleep bit for the router. If @sw is
2799  * host router the domain is ready to go to sleep once this function
2800  * returns.
2801  */
2802 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
2803 {
2804 	unsigned int flags = 0;
2805 	struct tb_port *port;
2806 	int err;
2807 
2808 	tb_sw_dbg(sw, "suspending switch\n");
2809 
2810 	err = tb_plug_events_active(sw, false);
2811 	if (err)
2812 		return;
2813 
2814 	tb_switch_for_each_port(sw, port) {
2815 		if (tb_port_has_remote(port))
2816 			tb_switch_suspend(port->remote->sw, runtime);
2817 	}
2818 
2819 	if (runtime) {
2820 		/* Trigger wake when something is plugged in/out */
2821 		flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
2822 		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2823 	} else if (device_may_wakeup(&sw->dev)) {
2824 		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2825 	}
2826 
2827 	tb_switch_set_wake(sw, flags);
2828 
2829 	if (tb_switch_is_usb4(sw))
2830 		usb4_switch_set_sleep(sw);
2831 	else
2832 		tb_lc_set_sleep(sw);
2833 }
2834 
2835 /**
2836  * tb_switch_query_dp_resource() - Query availability of DP resource
2837  * @sw: Switch whose DP resource is queried
2838  * @in: DP IN port
2839  *
2840  * Queries availability of DP resource for DP tunneling using switch
2841  * specific means. Returns %true if resource is available.
2842  */
2843 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
2844 {
2845 	if (tb_switch_is_usb4(sw))
2846 		return usb4_switch_query_dp_resource(sw, in);
2847 	return tb_lc_dp_sink_query(sw, in);
2848 }
2849 
2850 /**
2851  * tb_switch_alloc_dp_resource() - Allocate available DP resource
2852  * @sw: Switch whose DP resource is allocated
2853  * @in: DP IN port
2854  *
2855  * Allocates DP resource for DP tunneling. The resource must be
2856  * available for this to succeed (see tb_switch_query_dp_resource()).
2857  * Returns %0 in success and negative errno otherwise.
2858  */
2859 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2860 {
2861 	if (tb_switch_is_usb4(sw))
2862 		return usb4_switch_alloc_dp_resource(sw, in);
2863 	return tb_lc_dp_sink_alloc(sw, in);
2864 }
2865 
2866 /**
2867  * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2868  * @sw: Switch whose DP resource is de-allocated
2869  * @in: DP IN port
2870  *
2871  * De-allocates DP resource that was previously allocated for DP
2872  * tunneling.
2873  */
2874 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2875 {
2876 	int ret;
2877 
2878 	if (tb_switch_is_usb4(sw))
2879 		ret = usb4_switch_dealloc_dp_resource(sw, in);
2880 	else
2881 		ret = tb_lc_dp_sink_dealloc(sw, in);
2882 
2883 	if (ret)
2884 		tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
2885 			   in->port);
2886 }
2887 
2888 struct tb_sw_lookup {
2889 	struct tb *tb;
2890 	u8 link;
2891 	u8 depth;
2892 	const uuid_t *uuid;
2893 	u64 route;
2894 };
2895 
2896 static int tb_switch_match(struct device *dev, const void *data)
2897 {
2898 	struct tb_switch *sw = tb_to_switch(dev);
2899 	const struct tb_sw_lookup *lookup = data;
2900 
2901 	if (!sw)
2902 		return 0;
2903 	if (sw->tb != lookup->tb)
2904 		return 0;
2905 
2906 	if (lookup->uuid)
2907 		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2908 
2909 	if (lookup->route) {
2910 		return sw->config.route_lo == lower_32_bits(lookup->route) &&
2911 		       sw->config.route_hi == upper_32_bits(lookup->route);
2912 	}
2913 
2914 	/* Root switch is matched only by depth */
2915 	if (!lookup->depth)
2916 		return !sw->depth;
2917 
2918 	return sw->link == lookup->link && sw->depth == lookup->depth;
2919 }
2920 
2921 /**
2922  * tb_switch_find_by_link_depth() - Find switch by link and depth
2923  * @tb: Domain the switch belongs
2924  * @link: Link number the switch is connected
2925  * @depth: Depth of the switch in link
2926  *
2927  * Returned switch has reference count increased so the caller needs to
2928  * call tb_switch_put() when done with the switch.
2929  */
2930 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2931 {
2932 	struct tb_sw_lookup lookup;
2933 	struct device *dev;
2934 
2935 	memset(&lookup, 0, sizeof(lookup));
2936 	lookup.tb = tb;
2937 	lookup.link = link;
2938 	lookup.depth = depth;
2939 
2940 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2941 	if (dev)
2942 		return tb_to_switch(dev);
2943 
2944 	return NULL;
2945 }
2946 
2947 /**
2948  * tb_switch_find_by_uuid() - Find switch by UUID
2949  * @tb: Domain the switch belongs
2950  * @uuid: UUID to look for
2951  *
2952  * Returned switch has reference count increased so the caller needs to
2953  * call tb_switch_put() when done with the switch.
2954  */
2955 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2956 {
2957 	struct tb_sw_lookup lookup;
2958 	struct device *dev;
2959 
2960 	memset(&lookup, 0, sizeof(lookup));
2961 	lookup.tb = tb;
2962 	lookup.uuid = uuid;
2963 
2964 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2965 	if (dev)
2966 		return tb_to_switch(dev);
2967 
2968 	return NULL;
2969 }
2970 
2971 /**
2972  * tb_switch_find_by_route() - Find switch by route string
2973  * @tb: Domain the switch belongs
2974  * @route: Route string to look for
2975  *
2976  * Returned switch has reference count increased so the caller needs to
2977  * call tb_switch_put() when done with the switch.
2978  */
2979 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2980 {
2981 	struct tb_sw_lookup lookup;
2982 	struct device *dev;
2983 
2984 	if (!route)
2985 		return tb_switch_get(tb->root_switch);
2986 
2987 	memset(&lookup, 0, sizeof(lookup));
2988 	lookup.tb = tb;
2989 	lookup.route = route;
2990 
2991 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2992 	if (dev)
2993 		return tb_to_switch(dev);
2994 
2995 	return NULL;
2996 }
2997 
2998 /**
2999  * tb_switch_find_port() - return the first port of @type on @sw or NULL
3000  * @sw: Switch to find the port from
3001  * @type: Port type to look for
3002  */
3003 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3004 				    enum tb_port_type type)
3005 {
3006 	struct tb_port *port;
3007 
3008 	tb_switch_for_each_port(sw, port) {
3009 		if (port->config.type == type)
3010 			return port;
3011 	}
3012 
3013 	return NULL;
3014 }
3015