xref: /openbmc/linux/drivers/thunderbolt/switch.c (revision 3fc41476)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - switch/port utility functions
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2018, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 
18 #include "tb.h"
19 
20 /* Switch NVM support */
21 
22 #define NVM_DEVID		0x05
23 #define NVM_VERSION		0x08
24 #define NVM_CSS			0x10
25 #define NVM_FLASH_SIZE		0x45
26 
27 #define NVM_MIN_SIZE		SZ_32K
28 #define NVM_MAX_SIZE		SZ_512K
29 
30 static DEFINE_IDA(nvm_ida);
31 
32 struct nvm_auth_status {
33 	struct list_head list;
34 	uuid_t uuid;
35 	u32 status;
36 };
37 
38 /*
39  * Hold NVM authentication failure status per switch This information
40  * needs to stay around even when the switch gets power cycled so we
41  * keep it separately.
42  */
43 static LIST_HEAD(nvm_auth_status_cache);
44 static DEFINE_MUTEX(nvm_auth_status_lock);
45 
46 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47 {
48 	struct nvm_auth_status *st;
49 
50 	list_for_each_entry(st, &nvm_auth_status_cache, list) {
51 		if (uuid_equal(&st->uuid, sw->uuid))
52 			return st;
53 	}
54 
55 	return NULL;
56 }
57 
58 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
59 {
60 	struct nvm_auth_status *st;
61 
62 	mutex_lock(&nvm_auth_status_lock);
63 	st = __nvm_get_auth_status(sw);
64 	mutex_unlock(&nvm_auth_status_lock);
65 
66 	*status = st ? st->status : 0;
67 }
68 
69 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
70 {
71 	struct nvm_auth_status *st;
72 
73 	if (WARN_ON(!sw->uuid))
74 		return;
75 
76 	mutex_lock(&nvm_auth_status_lock);
77 	st = __nvm_get_auth_status(sw);
78 
79 	if (!st) {
80 		st = kzalloc(sizeof(*st), GFP_KERNEL);
81 		if (!st)
82 			goto unlock;
83 
84 		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
85 		INIT_LIST_HEAD(&st->list);
86 		list_add_tail(&st->list, &nvm_auth_status_cache);
87 	}
88 
89 	st->status = status;
90 unlock:
91 	mutex_unlock(&nvm_auth_status_lock);
92 }
93 
94 static void nvm_clear_auth_status(const struct tb_switch *sw)
95 {
96 	struct nvm_auth_status *st;
97 
98 	mutex_lock(&nvm_auth_status_lock);
99 	st = __nvm_get_auth_status(sw);
100 	if (st) {
101 		list_del(&st->list);
102 		kfree(st);
103 	}
104 	mutex_unlock(&nvm_auth_status_lock);
105 }
106 
107 static int nvm_validate_and_write(struct tb_switch *sw)
108 {
109 	unsigned int image_size, hdr_size;
110 	const u8 *buf = sw->nvm->buf;
111 	u16 ds_size;
112 	int ret;
113 
114 	if (!buf)
115 		return -EINVAL;
116 
117 	image_size = sw->nvm->buf_data_size;
118 	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
119 		return -EINVAL;
120 
121 	/*
122 	 * FARB pointer must point inside the image and must at least
123 	 * contain parts of the digital section we will be reading here.
124 	 */
125 	hdr_size = (*(u32 *)buf) & 0xffffff;
126 	if (hdr_size + NVM_DEVID + 2 >= image_size)
127 		return -EINVAL;
128 
129 	/* Digital section start should be aligned to 4k page */
130 	if (!IS_ALIGNED(hdr_size, SZ_4K))
131 		return -EINVAL;
132 
133 	/*
134 	 * Read digital section size and check that it also fits inside
135 	 * the image.
136 	 */
137 	ds_size = *(u16 *)(buf + hdr_size);
138 	if (ds_size >= image_size)
139 		return -EINVAL;
140 
141 	if (!sw->safe_mode) {
142 		u16 device_id;
143 
144 		/*
145 		 * Make sure the device ID in the image matches the one
146 		 * we read from the switch config space.
147 		 */
148 		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
149 		if (device_id != sw->config.device_id)
150 			return -EINVAL;
151 
152 		if (sw->generation < 3) {
153 			/* Write CSS headers first */
154 			ret = dma_port_flash_write(sw->dma_port,
155 				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
156 				DMA_PORT_CSS_MAX_SIZE);
157 			if (ret)
158 				return ret;
159 		}
160 
161 		/* Skip headers in the image */
162 		buf += hdr_size;
163 		image_size -= hdr_size;
164 	}
165 
166 	return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167 }
168 
169 static int nvm_authenticate_host(struct tb_switch *sw)
170 {
171 	int ret;
172 
173 	/*
174 	 * Root switch NVM upgrade requires that we disconnect the
175 	 * existing paths first (in case it is not in safe mode
176 	 * already).
177 	 */
178 	if (!sw->safe_mode) {
179 		ret = tb_domain_disconnect_all_paths(sw->tb);
180 		if (ret)
181 			return ret;
182 		/*
183 		 * The host controller goes away pretty soon after this if
184 		 * everything goes well so getting timeout is expected.
185 		 */
186 		ret = dma_port_flash_update_auth(sw->dma_port);
187 		return ret == -ETIMEDOUT ? 0 : ret;
188 	}
189 
190 	/*
191 	 * From safe mode we can get out by just power cycling the
192 	 * switch.
193 	 */
194 	dma_port_power_cycle(sw->dma_port);
195 	return 0;
196 }
197 
198 static int nvm_authenticate_device(struct tb_switch *sw)
199 {
200 	int ret, retries = 10;
201 
202 	ret = dma_port_flash_update_auth(sw->dma_port);
203 	if (ret && ret != -ETIMEDOUT)
204 		return ret;
205 
206 	/*
207 	 * Poll here for the authentication status. It takes some time
208 	 * for the device to respond (we get timeout for a while). Once
209 	 * we get response the device needs to be power cycled in order
210 	 * to the new NVM to be taken into use.
211 	 */
212 	do {
213 		u32 status;
214 
215 		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
216 		if (ret < 0 && ret != -ETIMEDOUT)
217 			return ret;
218 		if (ret > 0) {
219 			if (status) {
220 				tb_sw_warn(sw, "failed to authenticate NVM\n");
221 				nvm_set_auth_status(sw, status);
222 			}
223 
224 			tb_sw_info(sw, "power cycling the switch now\n");
225 			dma_port_power_cycle(sw->dma_port);
226 			return 0;
227 		}
228 
229 		msleep(500);
230 	} while (--retries);
231 
232 	return -ETIMEDOUT;
233 }
234 
235 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
236 			      size_t bytes)
237 {
238 	struct tb_switch *sw = priv;
239 	int ret;
240 
241 	pm_runtime_get_sync(&sw->dev);
242 	ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
243 	pm_runtime_mark_last_busy(&sw->dev);
244 	pm_runtime_put_autosuspend(&sw->dev);
245 
246 	return ret;
247 }
248 
249 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
250 			       size_t bytes)
251 {
252 	struct tb_switch *sw = priv;
253 	int ret = 0;
254 
255 	if (!mutex_trylock(&sw->tb->lock))
256 		return restart_syscall();
257 
258 	/*
259 	 * Since writing the NVM image might require some special steps,
260 	 * for example when CSS headers are written, we cache the image
261 	 * locally here and handle the special cases when the user asks
262 	 * us to authenticate the image.
263 	 */
264 	if (!sw->nvm->buf) {
265 		sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
266 		if (!sw->nvm->buf) {
267 			ret = -ENOMEM;
268 			goto unlock;
269 		}
270 	}
271 
272 	sw->nvm->buf_data_size = offset + bytes;
273 	memcpy(sw->nvm->buf + offset, val, bytes);
274 
275 unlock:
276 	mutex_unlock(&sw->tb->lock);
277 
278 	return ret;
279 }
280 
281 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
282 					   size_t size, bool active)
283 {
284 	struct nvmem_config config;
285 
286 	memset(&config, 0, sizeof(config));
287 
288 	if (active) {
289 		config.name = "nvm_active";
290 		config.reg_read = tb_switch_nvm_read;
291 		config.read_only = true;
292 	} else {
293 		config.name = "nvm_non_active";
294 		config.reg_write = tb_switch_nvm_write;
295 		config.root_only = true;
296 	}
297 
298 	config.id = id;
299 	config.stride = 4;
300 	config.word_size = 4;
301 	config.size = size;
302 	config.dev = &sw->dev;
303 	config.owner = THIS_MODULE;
304 	config.priv = sw;
305 
306 	return nvmem_register(&config);
307 }
308 
309 static int tb_switch_nvm_add(struct tb_switch *sw)
310 {
311 	struct nvmem_device *nvm_dev;
312 	struct tb_switch_nvm *nvm;
313 	u32 val;
314 	int ret;
315 
316 	if (!sw->dma_port)
317 		return 0;
318 
319 	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
320 	if (!nvm)
321 		return -ENOMEM;
322 
323 	nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
324 
325 	/*
326 	 * If the switch is in safe-mode the only accessible portion of
327 	 * the NVM is the non-active one where userspace is expected to
328 	 * write new functional NVM.
329 	 */
330 	if (!sw->safe_mode) {
331 		u32 nvm_size, hdr_size;
332 
333 		ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
334 					  sizeof(val));
335 		if (ret)
336 			goto err_ida;
337 
338 		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
339 		nvm_size = (SZ_1M << (val & 7)) / 8;
340 		nvm_size = (nvm_size - hdr_size) / 2;
341 
342 		ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
343 					  sizeof(val));
344 		if (ret)
345 			goto err_ida;
346 
347 		nvm->major = val >> 16;
348 		nvm->minor = val >> 8;
349 
350 		nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
351 		if (IS_ERR(nvm_dev)) {
352 			ret = PTR_ERR(nvm_dev);
353 			goto err_ida;
354 		}
355 		nvm->active = nvm_dev;
356 	}
357 
358 	nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
359 	if (IS_ERR(nvm_dev)) {
360 		ret = PTR_ERR(nvm_dev);
361 		goto err_nvm_active;
362 	}
363 	nvm->non_active = nvm_dev;
364 
365 	sw->nvm = nvm;
366 	return 0;
367 
368 err_nvm_active:
369 	if (nvm->active)
370 		nvmem_unregister(nvm->active);
371 err_ida:
372 	ida_simple_remove(&nvm_ida, nvm->id);
373 	kfree(nvm);
374 
375 	return ret;
376 }
377 
378 static void tb_switch_nvm_remove(struct tb_switch *sw)
379 {
380 	struct tb_switch_nvm *nvm;
381 
382 	nvm = sw->nvm;
383 	sw->nvm = NULL;
384 
385 	if (!nvm)
386 		return;
387 
388 	/* Remove authentication status in case the switch is unplugged */
389 	if (!nvm->authenticating)
390 		nvm_clear_auth_status(sw);
391 
392 	nvmem_unregister(nvm->non_active);
393 	if (nvm->active)
394 		nvmem_unregister(nvm->active);
395 	ida_simple_remove(&nvm_ida, nvm->id);
396 	vfree(nvm->buf);
397 	kfree(nvm);
398 }
399 
400 /* port utility functions */
401 
402 static const char *tb_port_type(struct tb_regs_port_header *port)
403 {
404 	switch (port->type >> 16) {
405 	case 0:
406 		switch ((u8) port->type) {
407 		case 0:
408 			return "Inactive";
409 		case 1:
410 			return "Port";
411 		case 2:
412 			return "NHI";
413 		default:
414 			return "unknown";
415 		}
416 	case 0x2:
417 		return "Ethernet";
418 	case 0x8:
419 		return "SATA";
420 	case 0xe:
421 		return "DP/HDMI";
422 	case 0x10:
423 		return "PCIe";
424 	case 0x20:
425 		return "USB";
426 	default:
427 		return "unknown";
428 	}
429 }
430 
431 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
432 {
433 	tb_dbg(tb,
434 	       " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
435 	       port->port_number, port->vendor_id, port->device_id,
436 	       port->revision, port->thunderbolt_version, tb_port_type(port),
437 	       port->type);
438 	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
439 	       port->max_in_hop_id, port->max_out_hop_id);
440 	tb_dbg(tb, "  Max counters: %d\n", port->max_counters);
441 	tb_dbg(tb, "  NFC Credits: %#x\n", port->nfc_credits);
442 }
443 
444 /**
445  * tb_port_state() - get connectedness state of a port
446  *
447  * The port must have a TB_CAP_PHY (i.e. it should be a real port).
448  *
449  * Return: Returns an enum tb_port_state on success or an error code on failure.
450  */
451 static int tb_port_state(struct tb_port *port)
452 {
453 	struct tb_cap_phy phy;
454 	int res;
455 	if (port->cap_phy == 0) {
456 		tb_port_WARN(port, "does not have a PHY\n");
457 		return -EINVAL;
458 	}
459 	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
460 	if (res)
461 		return res;
462 	return phy.state;
463 }
464 
465 /**
466  * tb_wait_for_port() - wait for a port to become ready
467  *
468  * Wait up to 1 second for a port to reach state TB_PORT_UP. If
469  * wait_if_unplugged is set then we also wait if the port is in state
470  * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
471  * switch resume). Otherwise we only wait if a device is registered but the link
472  * has not yet been established.
473  *
474  * Return: Returns an error code on failure. Returns 0 if the port is not
475  * connected or failed to reach state TB_PORT_UP within one second. Returns 1
476  * if the port is connected and in state TB_PORT_UP.
477  */
478 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
479 {
480 	int retries = 10;
481 	int state;
482 	if (!port->cap_phy) {
483 		tb_port_WARN(port, "does not have PHY\n");
484 		return -EINVAL;
485 	}
486 	if (tb_is_upstream_port(port)) {
487 		tb_port_WARN(port, "is the upstream port\n");
488 		return -EINVAL;
489 	}
490 
491 	while (retries--) {
492 		state = tb_port_state(port);
493 		if (state < 0)
494 			return state;
495 		if (state == TB_PORT_DISABLED) {
496 			tb_port_dbg(port, "is disabled (state: 0)\n");
497 			return 0;
498 		}
499 		if (state == TB_PORT_UNPLUGGED) {
500 			if (wait_if_unplugged) {
501 				/* used during resume */
502 				tb_port_dbg(port,
503 					    "is unplugged (state: 7), retrying...\n");
504 				msleep(100);
505 				continue;
506 			}
507 			tb_port_dbg(port, "is unplugged (state: 7)\n");
508 			return 0;
509 		}
510 		if (state == TB_PORT_UP) {
511 			tb_port_dbg(port, "is connected, link is up (state: 2)\n");
512 			return 1;
513 		}
514 
515 		/*
516 		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
517 		 * time.
518 		 */
519 		tb_port_dbg(port,
520 			    "is connected, link is not up (state: %d), retrying...\n",
521 			    state);
522 		msleep(100);
523 	}
524 	tb_port_warn(port,
525 		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
526 	return 0;
527 }
528 
529 /**
530  * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
531  *
532  * Change the number of NFC credits allocated to @port by @credits. To remove
533  * NFC credits pass a negative amount of credits.
534  *
535  * Return: Returns 0 on success or an error code on failure.
536  */
537 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
538 {
539 	u32 nfc_credits;
540 
541 	if (credits == 0 || port->sw->is_unplugged)
542 		return 0;
543 
544 	nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
545 	nfc_credits += credits;
546 
547 	tb_port_dbg(port, "adding %d NFC credits to %lu",
548 		    credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
549 
550 	port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
551 	port->config.nfc_credits |= nfc_credits;
552 
553 	return tb_port_write(port, &port->config.nfc_credits,
554 			     TB_CFG_PORT, 4, 1);
555 }
556 
557 /**
558  * tb_port_set_initial_credits() - Set initial port link credits allocated
559  * @port: Port to set the initial credits
560  * @credits: Number of credits to to allocate
561  *
562  * Set initial credits value to be used for ingress shared buffering.
563  */
564 int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
565 {
566 	u32 data;
567 	int ret;
568 
569 	ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
570 	if (ret)
571 		return ret;
572 
573 	data &= ~TB_PORT_LCA_MASK;
574 	data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
575 
576 	return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
577 }
578 
579 /**
580  * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
581  *
582  * Return: Returns 0 on success or an error code on failure.
583  */
584 int tb_port_clear_counter(struct tb_port *port, int counter)
585 {
586 	u32 zero[3] = { 0, 0, 0 };
587 	tb_port_dbg(port, "clearing counter %d\n", counter);
588 	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
589 }
590 
591 /**
592  * tb_init_port() - initialize a port
593  *
594  * This is a helper method for tb_switch_alloc. Does not check or initialize
595  * any downstream switches.
596  *
597  * Return: Returns 0 on success or an error code on failure.
598  */
599 static int tb_init_port(struct tb_port *port)
600 {
601 	int res;
602 	int cap;
603 
604 	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
605 	if (res)
606 		return res;
607 
608 	/* Port 0 is the switch itself and has no PHY. */
609 	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
610 		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
611 
612 		if (cap > 0)
613 			port->cap_phy = cap;
614 		else
615 			tb_port_WARN(port, "non switch port without a PHY\n");
616 	} else if (port->port != 0) {
617 		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
618 		if (cap > 0)
619 			port->cap_adap = cap;
620 	}
621 
622 	tb_dump_port(port->sw->tb, &port->config);
623 
624 	/* Control port does not need HopID allocation */
625 	if (port->port) {
626 		ida_init(&port->in_hopids);
627 		ida_init(&port->out_hopids);
628 	}
629 
630 	return 0;
631 
632 }
633 
634 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
635 			       int max_hopid)
636 {
637 	int port_max_hopid;
638 	struct ida *ida;
639 
640 	if (in) {
641 		port_max_hopid = port->config.max_in_hop_id;
642 		ida = &port->in_hopids;
643 	} else {
644 		port_max_hopid = port->config.max_out_hop_id;
645 		ida = &port->out_hopids;
646 	}
647 
648 	/* HopIDs 0-7 are reserved */
649 	if (min_hopid < TB_PATH_MIN_HOPID)
650 		min_hopid = TB_PATH_MIN_HOPID;
651 
652 	if (max_hopid < 0 || max_hopid > port_max_hopid)
653 		max_hopid = port_max_hopid;
654 
655 	return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
656 }
657 
658 /**
659  * tb_port_alloc_in_hopid() - Allocate input HopID from port
660  * @port: Port to allocate HopID for
661  * @min_hopid: Minimum acceptable input HopID
662  * @max_hopid: Maximum acceptable input HopID
663  *
664  * Return: HopID between @min_hopid and @max_hopid or negative errno in
665  * case of error.
666  */
667 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
668 {
669 	return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
670 }
671 
672 /**
673  * tb_port_alloc_out_hopid() - Allocate output HopID from port
674  * @port: Port to allocate HopID for
675  * @min_hopid: Minimum acceptable output HopID
676  * @max_hopid: Maximum acceptable output HopID
677  *
678  * Return: HopID between @min_hopid and @max_hopid or negative errno in
679  * case of error.
680  */
681 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
682 {
683 	return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
684 }
685 
686 /**
687  * tb_port_release_in_hopid() - Release allocated input HopID from port
688  * @port: Port whose HopID to release
689  * @hopid: HopID to release
690  */
691 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
692 {
693 	ida_simple_remove(&port->in_hopids, hopid);
694 }
695 
696 /**
697  * tb_port_release_out_hopid() - Release allocated output HopID from port
698  * @port: Port whose HopID to release
699  * @hopid: HopID to release
700  */
701 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
702 {
703 	ida_simple_remove(&port->out_hopids, hopid);
704 }
705 
706 /**
707  * tb_next_port_on_path() - Return next port for given port on a path
708  * @start: Start port of the walk
709  * @end: End port of the walk
710  * @prev: Previous port (%NULL if this is the first)
711  *
712  * This function can be used to walk from one port to another if they
713  * are connected through zero or more switches. If the @prev is dual
714  * link port, the function follows that link and returns another end on
715  * that same link.
716  *
717  * If the @end port has been reached, return %NULL.
718  *
719  * Domain tb->lock must be held when this function is called.
720  */
721 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
722 				     struct tb_port *prev)
723 {
724 	struct tb_port *next;
725 
726 	if (!prev)
727 		return start;
728 
729 	if (prev->sw == end->sw) {
730 		if (prev == end)
731 			return NULL;
732 		return end;
733 	}
734 
735 	if (start->sw->config.depth < end->sw->config.depth) {
736 		if (prev->remote &&
737 		    prev->remote->sw->config.depth > prev->sw->config.depth)
738 			next = prev->remote;
739 		else
740 			next = tb_port_at(tb_route(end->sw), prev->sw);
741 	} else {
742 		if (tb_is_upstream_port(prev)) {
743 			next = prev->remote;
744 		} else {
745 			next = tb_upstream_port(prev->sw);
746 			/*
747 			 * Keep the same link if prev and next are both
748 			 * dual link ports.
749 			 */
750 			if (next->dual_link_port &&
751 			    next->link_nr != prev->link_nr) {
752 				next = next->dual_link_port;
753 			}
754 		}
755 	}
756 
757 	return next;
758 }
759 
760 /**
761  * tb_port_is_enabled() - Is the adapter port enabled
762  * @port: Port to check
763  */
764 bool tb_port_is_enabled(struct tb_port *port)
765 {
766 	switch (port->config.type) {
767 	case TB_TYPE_PCIE_UP:
768 	case TB_TYPE_PCIE_DOWN:
769 		return tb_pci_port_is_enabled(port);
770 
771 	case TB_TYPE_DP_HDMI_IN:
772 	case TB_TYPE_DP_HDMI_OUT:
773 		return tb_dp_port_is_enabled(port);
774 
775 	default:
776 		return false;
777 	}
778 }
779 
780 /**
781  * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
782  * @port: PCIe port to check
783  */
784 bool tb_pci_port_is_enabled(struct tb_port *port)
785 {
786 	u32 data;
787 
788 	if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
789 		return false;
790 
791 	return !!(data & TB_PCI_EN);
792 }
793 
794 /**
795  * tb_pci_port_enable() - Enable PCIe adapter port
796  * @port: PCIe port to enable
797  * @enable: Enable/disable the PCIe adapter
798  */
799 int tb_pci_port_enable(struct tb_port *port, bool enable)
800 {
801 	u32 word = enable ? TB_PCI_EN : 0x0;
802 	if (!port->cap_adap)
803 		return -ENXIO;
804 	return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
805 }
806 
807 /**
808  * tb_dp_port_hpd_is_active() - Is HPD already active
809  * @port: DP out port to check
810  *
811  * Checks if the DP OUT adapter port has HDP bit already set.
812  */
813 int tb_dp_port_hpd_is_active(struct tb_port *port)
814 {
815 	u32 data;
816 	int ret;
817 
818 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
819 	if (ret)
820 		return ret;
821 
822 	return !!(data & TB_DP_HDP);
823 }
824 
825 /**
826  * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
827  * @port: Port to clear HPD
828  *
829  * If the DP IN port has HDP set, this function can be used to clear it.
830  */
831 int tb_dp_port_hpd_clear(struct tb_port *port)
832 {
833 	u32 data;
834 	int ret;
835 
836 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
837 	if (ret)
838 		return ret;
839 
840 	data |= TB_DP_HPDC;
841 	return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
842 }
843 
844 /**
845  * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
846  * @port: DP IN/OUT port to set hops
847  * @video: Video Hop ID
848  * @aux_tx: AUX TX Hop ID
849  * @aux_rx: AUX RX Hop ID
850  *
851  * Programs specified Hop IDs for DP IN/OUT port.
852  */
853 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
854 			unsigned int aux_tx, unsigned int aux_rx)
855 {
856 	u32 data[2];
857 	int ret;
858 
859 	ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
860 			   ARRAY_SIZE(data));
861 	if (ret)
862 		return ret;
863 
864 	data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
865 	data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
866 
867 	data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
868 	data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
869 	data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
870 
871 	return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
872 			     ARRAY_SIZE(data));
873 }
874 
875 /**
876  * tb_dp_port_is_enabled() - Is DP adapter port enabled
877  * @port: DP adapter port to check
878  */
879 bool tb_dp_port_is_enabled(struct tb_port *port)
880 {
881 	u32 data;
882 
883 	if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
884 		return false;
885 
886 	return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
887 }
888 
889 /**
890  * tb_dp_port_enable() - Enables/disables DP paths of a port
891  * @port: DP IN/OUT port
892  * @enable: Enable/disable DP path
893  *
894  * Once Hop IDs are programmed DP paths can be enabled or disabled by
895  * calling this function.
896  */
897 int tb_dp_port_enable(struct tb_port *port, bool enable)
898 {
899 	u32 data;
900 	int ret;
901 
902 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1);
903 	if (ret)
904 		return ret;
905 
906 	if (enable)
907 		data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
908 	else
909 		data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
910 
911 	return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1);
912 }
913 
914 /* switch utility functions */
915 
916 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
917 {
918 	tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
919 	       sw->vendor_id, sw->device_id, sw->revision,
920 	       sw->thunderbolt_version);
921 	tb_dbg(tb, "  Max Port Number: %d\n", sw->max_port_number);
922 	tb_dbg(tb, "  Config:\n");
923 	tb_dbg(tb,
924 		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
925 	       sw->upstream_port_number, sw->depth,
926 	       (((u64) sw->route_hi) << 32) | sw->route_lo,
927 	       sw->enabled, sw->plug_events_delay);
928 	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
929 	       sw->__unknown1, sw->__unknown4);
930 }
931 
932 /**
933  * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
934  *
935  * Return: Returns 0 on success or an error code on failure.
936  */
937 int tb_switch_reset(struct tb *tb, u64 route)
938 {
939 	struct tb_cfg_result res;
940 	struct tb_regs_switch_header header = {
941 		header.route_hi = route >> 32,
942 		header.route_lo = route,
943 		header.enabled = true,
944 	};
945 	tb_dbg(tb, "resetting switch at %llx\n", route);
946 	res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
947 			0, 2, 2, 2);
948 	if (res.err)
949 		return res.err;
950 	res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
951 	if (res.err > 0)
952 		return -EIO;
953 	return res.err;
954 }
955 
956 /**
957  * tb_plug_events_active() - enable/disable plug events on a switch
958  *
959  * Also configures a sane plug_events_delay of 255ms.
960  *
961  * Return: Returns 0 on success or an error code on failure.
962  */
963 static int tb_plug_events_active(struct tb_switch *sw, bool active)
964 {
965 	u32 data;
966 	int res;
967 
968 	if (!sw->config.enabled)
969 		return 0;
970 
971 	sw->config.plug_events_delay = 0xff;
972 	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
973 	if (res)
974 		return res;
975 
976 	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
977 	if (res)
978 		return res;
979 
980 	if (active) {
981 		data = data & 0xFFFFFF83;
982 		switch (sw->config.device_id) {
983 		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
984 		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
985 		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
986 			break;
987 		default:
988 			data |= 4;
989 		}
990 	} else {
991 		data = data | 0x7c;
992 	}
993 	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
994 			   sw->cap_plug_events + 1, 1);
995 }
996 
997 static ssize_t authorized_show(struct device *dev,
998 			       struct device_attribute *attr,
999 			       char *buf)
1000 {
1001 	struct tb_switch *sw = tb_to_switch(dev);
1002 
1003 	return sprintf(buf, "%u\n", sw->authorized);
1004 }
1005 
1006 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1007 {
1008 	int ret = -EINVAL;
1009 
1010 	if (!mutex_trylock(&sw->tb->lock))
1011 		return restart_syscall();
1012 
1013 	if (sw->authorized)
1014 		goto unlock;
1015 
1016 	/*
1017 	 * Make sure there is no PCIe rescan ongoing when a new PCIe
1018 	 * tunnel is created. Otherwise the PCIe rescan code might find
1019 	 * the new tunnel too early.
1020 	 */
1021 	pci_lock_rescan_remove();
1022 	pm_runtime_get_sync(&sw->dev);
1023 
1024 	switch (val) {
1025 	/* Approve switch */
1026 	case 1:
1027 		if (sw->key)
1028 			ret = tb_domain_approve_switch_key(sw->tb, sw);
1029 		else
1030 			ret = tb_domain_approve_switch(sw->tb, sw);
1031 		break;
1032 
1033 	/* Challenge switch */
1034 	case 2:
1035 		if (sw->key)
1036 			ret = tb_domain_challenge_switch_key(sw->tb, sw);
1037 		break;
1038 
1039 	default:
1040 		break;
1041 	}
1042 
1043 	pm_runtime_mark_last_busy(&sw->dev);
1044 	pm_runtime_put_autosuspend(&sw->dev);
1045 	pci_unlock_rescan_remove();
1046 
1047 	if (!ret) {
1048 		sw->authorized = val;
1049 		/* Notify status change to the userspace */
1050 		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1051 	}
1052 
1053 unlock:
1054 	mutex_unlock(&sw->tb->lock);
1055 	return ret;
1056 }
1057 
1058 static ssize_t authorized_store(struct device *dev,
1059 				struct device_attribute *attr,
1060 				const char *buf, size_t count)
1061 {
1062 	struct tb_switch *sw = tb_to_switch(dev);
1063 	unsigned int val;
1064 	ssize_t ret;
1065 
1066 	ret = kstrtouint(buf, 0, &val);
1067 	if (ret)
1068 		return ret;
1069 	if (val > 2)
1070 		return -EINVAL;
1071 
1072 	ret = tb_switch_set_authorized(sw, val);
1073 
1074 	return ret ? ret : count;
1075 }
1076 static DEVICE_ATTR_RW(authorized);
1077 
1078 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1079 			 char *buf)
1080 {
1081 	struct tb_switch *sw = tb_to_switch(dev);
1082 
1083 	return sprintf(buf, "%u\n", sw->boot);
1084 }
1085 static DEVICE_ATTR_RO(boot);
1086 
1087 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1088 			   char *buf)
1089 {
1090 	struct tb_switch *sw = tb_to_switch(dev);
1091 
1092 	return sprintf(buf, "%#x\n", sw->device);
1093 }
1094 static DEVICE_ATTR_RO(device);
1095 
1096 static ssize_t
1097 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1098 {
1099 	struct tb_switch *sw = tb_to_switch(dev);
1100 
1101 	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1102 }
1103 static DEVICE_ATTR_RO(device_name);
1104 
1105 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1106 			char *buf)
1107 {
1108 	struct tb_switch *sw = tb_to_switch(dev);
1109 	ssize_t ret;
1110 
1111 	if (!mutex_trylock(&sw->tb->lock))
1112 		return restart_syscall();
1113 
1114 	if (sw->key)
1115 		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1116 	else
1117 		ret = sprintf(buf, "\n");
1118 
1119 	mutex_unlock(&sw->tb->lock);
1120 	return ret;
1121 }
1122 
1123 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1124 			 const char *buf, size_t count)
1125 {
1126 	struct tb_switch *sw = tb_to_switch(dev);
1127 	u8 key[TB_SWITCH_KEY_SIZE];
1128 	ssize_t ret = count;
1129 	bool clear = false;
1130 
1131 	if (!strcmp(buf, "\n"))
1132 		clear = true;
1133 	else if (hex2bin(key, buf, sizeof(key)))
1134 		return -EINVAL;
1135 
1136 	if (!mutex_trylock(&sw->tb->lock))
1137 		return restart_syscall();
1138 
1139 	if (sw->authorized) {
1140 		ret = -EBUSY;
1141 	} else {
1142 		kfree(sw->key);
1143 		if (clear) {
1144 			sw->key = NULL;
1145 		} else {
1146 			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1147 			if (!sw->key)
1148 				ret = -ENOMEM;
1149 		}
1150 	}
1151 
1152 	mutex_unlock(&sw->tb->lock);
1153 	return ret;
1154 }
1155 static DEVICE_ATTR(key, 0600, key_show, key_store);
1156 
1157 static void nvm_authenticate_start(struct tb_switch *sw)
1158 {
1159 	struct pci_dev *root_port;
1160 
1161 	/*
1162 	 * During host router NVM upgrade we should not allow root port to
1163 	 * go into D3cold because some root ports cannot trigger PME
1164 	 * itself. To be on the safe side keep the root port in D0 during
1165 	 * the whole upgrade process.
1166 	 */
1167 	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1168 	if (root_port)
1169 		pm_runtime_get_noresume(&root_port->dev);
1170 }
1171 
1172 static void nvm_authenticate_complete(struct tb_switch *sw)
1173 {
1174 	struct pci_dev *root_port;
1175 
1176 	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1177 	if (root_port)
1178 		pm_runtime_put(&root_port->dev);
1179 }
1180 
1181 static ssize_t nvm_authenticate_show(struct device *dev,
1182 	struct device_attribute *attr, char *buf)
1183 {
1184 	struct tb_switch *sw = tb_to_switch(dev);
1185 	u32 status;
1186 
1187 	nvm_get_auth_status(sw, &status);
1188 	return sprintf(buf, "%#x\n", status);
1189 }
1190 
1191 static ssize_t nvm_authenticate_store(struct device *dev,
1192 	struct device_attribute *attr, const char *buf, size_t count)
1193 {
1194 	struct tb_switch *sw = tb_to_switch(dev);
1195 	bool val;
1196 	int ret;
1197 
1198 	if (!mutex_trylock(&sw->tb->lock))
1199 		return restart_syscall();
1200 
1201 	/* If NVMem devices are not yet added */
1202 	if (!sw->nvm) {
1203 		ret = -EAGAIN;
1204 		goto exit_unlock;
1205 	}
1206 
1207 	ret = kstrtobool(buf, &val);
1208 	if (ret)
1209 		goto exit_unlock;
1210 
1211 	/* Always clear the authentication status */
1212 	nvm_clear_auth_status(sw);
1213 
1214 	if (val) {
1215 		if (!sw->nvm->buf) {
1216 			ret = -EINVAL;
1217 			goto exit_unlock;
1218 		}
1219 
1220 		pm_runtime_get_sync(&sw->dev);
1221 		ret = nvm_validate_and_write(sw);
1222 		if (ret) {
1223 			pm_runtime_mark_last_busy(&sw->dev);
1224 			pm_runtime_put_autosuspend(&sw->dev);
1225 			goto exit_unlock;
1226 		}
1227 
1228 		sw->nvm->authenticating = true;
1229 
1230 		if (!tb_route(sw)) {
1231 			/*
1232 			 * Keep root port from suspending as long as the
1233 			 * NVM upgrade process is running.
1234 			 */
1235 			nvm_authenticate_start(sw);
1236 			ret = nvm_authenticate_host(sw);
1237 			if (ret)
1238 				nvm_authenticate_complete(sw);
1239 		} else {
1240 			ret = nvm_authenticate_device(sw);
1241 		}
1242 		pm_runtime_mark_last_busy(&sw->dev);
1243 		pm_runtime_put_autosuspend(&sw->dev);
1244 	}
1245 
1246 exit_unlock:
1247 	mutex_unlock(&sw->tb->lock);
1248 
1249 	if (ret)
1250 		return ret;
1251 	return count;
1252 }
1253 static DEVICE_ATTR_RW(nvm_authenticate);
1254 
1255 static ssize_t nvm_version_show(struct device *dev,
1256 				struct device_attribute *attr, char *buf)
1257 {
1258 	struct tb_switch *sw = tb_to_switch(dev);
1259 	int ret;
1260 
1261 	if (!mutex_trylock(&sw->tb->lock))
1262 		return restart_syscall();
1263 
1264 	if (sw->safe_mode)
1265 		ret = -ENODATA;
1266 	else if (!sw->nvm)
1267 		ret = -EAGAIN;
1268 	else
1269 		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1270 
1271 	mutex_unlock(&sw->tb->lock);
1272 
1273 	return ret;
1274 }
1275 static DEVICE_ATTR_RO(nvm_version);
1276 
1277 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1278 			   char *buf)
1279 {
1280 	struct tb_switch *sw = tb_to_switch(dev);
1281 
1282 	return sprintf(buf, "%#x\n", sw->vendor);
1283 }
1284 static DEVICE_ATTR_RO(vendor);
1285 
1286 static ssize_t
1287 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1288 {
1289 	struct tb_switch *sw = tb_to_switch(dev);
1290 
1291 	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1292 }
1293 static DEVICE_ATTR_RO(vendor_name);
1294 
1295 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1296 			      char *buf)
1297 {
1298 	struct tb_switch *sw = tb_to_switch(dev);
1299 
1300 	return sprintf(buf, "%pUb\n", sw->uuid);
1301 }
1302 static DEVICE_ATTR_RO(unique_id);
1303 
1304 static struct attribute *switch_attrs[] = {
1305 	&dev_attr_authorized.attr,
1306 	&dev_attr_boot.attr,
1307 	&dev_attr_device.attr,
1308 	&dev_attr_device_name.attr,
1309 	&dev_attr_key.attr,
1310 	&dev_attr_nvm_authenticate.attr,
1311 	&dev_attr_nvm_version.attr,
1312 	&dev_attr_vendor.attr,
1313 	&dev_attr_vendor_name.attr,
1314 	&dev_attr_unique_id.attr,
1315 	NULL,
1316 };
1317 
1318 static umode_t switch_attr_is_visible(struct kobject *kobj,
1319 				      struct attribute *attr, int n)
1320 {
1321 	struct device *dev = container_of(kobj, struct device, kobj);
1322 	struct tb_switch *sw = tb_to_switch(dev);
1323 
1324 	if (attr == &dev_attr_key.attr) {
1325 		if (tb_route(sw) &&
1326 		    sw->tb->security_level == TB_SECURITY_SECURE &&
1327 		    sw->security_level == TB_SECURITY_SECURE)
1328 			return attr->mode;
1329 		return 0;
1330 	} else if (attr == &dev_attr_nvm_authenticate.attr ||
1331 		   attr == &dev_attr_nvm_version.attr) {
1332 		if (sw->dma_port)
1333 			return attr->mode;
1334 		return 0;
1335 	} else if (attr == &dev_attr_boot.attr) {
1336 		if (tb_route(sw))
1337 			return attr->mode;
1338 		return 0;
1339 	}
1340 
1341 	return sw->safe_mode ? 0 : attr->mode;
1342 }
1343 
1344 static struct attribute_group switch_group = {
1345 	.is_visible = switch_attr_is_visible,
1346 	.attrs = switch_attrs,
1347 };
1348 
1349 static const struct attribute_group *switch_groups[] = {
1350 	&switch_group,
1351 	NULL,
1352 };
1353 
1354 static void tb_switch_release(struct device *dev)
1355 {
1356 	struct tb_switch *sw = tb_to_switch(dev);
1357 	int i;
1358 
1359 	dma_port_free(sw->dma_port);
1360 
1361 	for (i = 1; i <= sw->config.max_port_number; i++) {
1362 		if (!sw->ports[i].disabled) {
1363 			ida_destroy(&sw->ports[i].in_hopids);
1364 			ida_destroy(&sw->ports[i].out_hopids);
1365 		}
1366 	}
1367 
1368 	kfree(sw->uuid);
1369 	kfree(sw->device_name);
1370 	kfree(sw->vendor_name);
1371 	kfree(sw->ports);
1372 	kfree(sw->drom);
1373 	kfree(sw->key);
1374 	kfree(sw);
1375 }
1376 
1377 /*
1378  * Currently only need to provide the callbacks. Everything else is handled
1379  * in the connection manager.
1380  */
1381 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1382 {
1383 	return 0;
1384 }
1385 
1386 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1387 {
1388 	return 0;
1389 }
1390 
1391 static const struct dev_pm_ops tb_switch_pm_ops = {
1392 	SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1393 			   NULL)
1394 };
1395 
1396 struct device_type tb_switch_type = {
1397 	.name = "thunderbolt_device",
1398 	.release = tb_switch_release,
1399 	.pm = &tb_switch_pm_ops,
1400 };
1401 
1402 static int tb_switch_get_generation(struct tb_switch *sw)
1403 {
1404 	switch (sw->config.device_id) {
1405 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1406 	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1407 	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1408 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1409 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1410 	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1411 	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1412 	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1413 		return 1;
1414 
1415 	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1416 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1417 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1418 		return 2;
1419 
1420 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1421 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1422 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1423 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1424 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1425 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1426 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1427 	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1428 		return 3;
1429 
1430 	default:
1431 		/*
1432 		 * For unknown switches assume generation to be 1 to be
1433 		 * on the safe side.
1434 		 */
1435 		tb_sw_warn(sw, "unsupported switch device id %#x\n",
1436 			   sw->config.device_id);
1437 		return 1;
1438 	}
1439 }
1440 
1441 /**
1442  * tb_switch_alloc() - allocate a switch
1443  * @tb: Pointer to the owning domain
1444  * @parent: Parent device for this switch
1445  * @route: Route string for this switch
1446  *
1447  * Allocates and initializes a switch. Will not upload configuration to
1448  * the switch. For that you need to call tb_switch_configure()
1449  * separately. The returned switch should be released by calling
1450  * tb_switch_put().
1451  *
1452  * Return: Pointer to the allocated switch or ERR_PTR() in case of
1453  * failure.
1454  */
1455 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1456 				  u64 route)
1457 {
1458 	struct tb_switch *sw;
1459 	int upstream_port;
1460 	int i, ret, depth;
1461 
1462 	/* Make sure we do not exceed maximum topology limit */
1463 	depth = tb_route_length(route);
1464 	if (depth > TB_SWITCH_MAX_DEPTH)
1465 		return ERR_PTR(-EADDRNOTAVAIL);
1466 
1467 	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1468 	if (upstream_port < 0)
1469 		return ERR_PTR(upstream_port);
1470 
1471 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1472 	if (!sw)
1473 		return ERR_PTR(-ENOMEM);
1474 
1475 	sw->tb = tb;
1476 	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1477 	if (ret)
1478 		goto err_free_sw_ports;
1479 
1480 	tb_dbg(tb, "current switch config:\n");
1481 	tb_dump_switch(tb, &sw->config);
1482 
1483 	/* configure switch */
1484 	sw->config.upstream_port_number = upstream_port;
1485 	sw->config.depth = depth;
1486 	sw->config.route_hi = upper_32_bits(route);
1487 	sw->config.route_lo = lower_32_bits(route);
1488 	sw->config.enabled = 0;
1489 
1490 	/* initialize ports */
1491 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1492 				GFP_KERNEL);
1493 	if (!sw->ports) {
1494 		ret = -ENOMEM;
1495 		goto err_free_sw_ports;
1496 	}
1497 
1498 	for (i = 0; i <= sw->config.max_port_number; i++) {
1499 		/* minimum setup for tb_find_cap and tb_drom_read to work */
1500 		sw->ports[i].sw = sw;
1501 		sw->ports[i].port = i;
1502 	}
1503 
1504 	sw->generation = tb_switch_get_generation(sw);
1505 
1506 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1507 	if (ret < 0) {
1508 		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1509 		goto err_free_sw_ports;
1510 	}
1511 	sw->cap_plug_events = ret;
1512 
1513 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1514 	if (ret > 0)
1515 		sw->cap_lc = ret;
1516 
1517 	/* Root switch is always authorized */
1518 	if (!route)
1519 		sw->authorized = true;
1520 
1521 	device_initialize(&sw->dev);
1522 	sw->dev.parent = parent;
1523 	sw->dev.bus = &tb_bus_type;
1524 	sw->dev.type = &tb_switch_type;
1525 	sw->dev.groups = switch_groups;
1526 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1527 
1528 	return sw;
1529 
1530 err_free_sw_ports:
1531 	kfree(sw->ports);
1532 	kfree(sw);
1533 
1534 	return ERR_PTR(ret);
1535 }
1536 
1537 /**
1538  * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1539  * @tb: Pointer to the owning domain
1540  * @parent: Parent device for this switch
1541  * @route: Route string for this switch
1542  *
1543  * This creates a switch in safe mode. This means the switch pretty much
1544  * lacks all capabilities except DMA configuration port before it is
1545  * flashed with a valid NVM firmware.
1546  *
1547  * The returned switch must be released by calling tb_switch_put().
1548  *
1549  * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1550  */
1551 struct tb_switch *
1552 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1553 {
1554 	struct tb_switch *sw;
1555 
1556 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1557 	if (!sw)
1558 		return ERR_PTR(-ENOMEM);
1559 
1560 	sw->tb = tb;
1561 	sw->config.depth = tb_route_length(route);
1562 	sw->config.route_hi = upper_32_bits(route);
1563 	sw->config.route_lo = lower_32_bits(route);
1564 	sw->safe_mode = true;
1565 
1566 	device_initialize(&sw->dev);
1567 	sw->dev.parent = parent;
1568 	sw->dev.bus = &tb_bus_type;
1569 	sw->dev.type = &tb_switch_type;
1570 	sw->dev.groups = switch_groups;
1571 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1572 
1573 	return sw;
1574 }
1575 
1576 /**
1577  * tb_switch_configure() - Uploads configuration to the switch
1578  * @sw: Switch to configure
1579  *
1580  * Call this function before the switch is added to the system. It will
1581  * upload configuration to the switch and makes it available for the
1582  * connection manager to use.
1583  *
1584  * Return: %0 in case of success and negative errno in case of failure
1585  */
1586 int tb_switch_configure(struct tb_switch *sw)
1587 {
1588 	struct tb *tb = sw->tb;
1589 	u64 route;
1590 	int ret;
1591 
1592 	route = tb_route(sw);
1593 	tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1594 	       route, tb_route_length(route), sw->config.upstream_port_number);
1595 
1596 	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1597 		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1598 			   sw->config.vendor_id);
1599 
1600 	sw->config.enabled = 1;
1601 
1602 	/* upload configuration */
1603 	ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1604 	if (ret)
1605 		return ret;
1606 
1607 	ret = tb_lc_configure_link(sw);
1608 	if (ret)
1609 		return ret;
1610 
1611 	return tb_plug_events_active(sw, true);
1612 }
1613 
1614 static int tb_switch_set_uuid(struct tb_switch *sw)
1615 {
1616 	u32 uuid[4];
1617 	int ret;
1618 
1619 	if (sw->uuid)
1620 		return 0;
1621 
1622 	/*
1623 	 * The newer controllers include fused UUID as part of link
1624 	 * controller specific registers
1625 	 */
1626 	ret = tb_lc_read_uuid(sw, uuid);
1627 	if (ret) {
1628 		/*
1629 		 * ICM generates UUID based on UID and fills the upper
1630 		 * two words with ones. This is not strictly following
1631 		 * UUID format but we want to be compatible with it so
1632 		 * we do the same here.
1633 		 */
1634 		uuid[0] = sw->uid & 0xffffffff;
1635 		uuid[1] = (sw->uid >> 32) & 0xffffffff;
1636 		uuid[2] = 0xffffffff;
1637 		uuid[3] = 0xffffffff;
1638 	}
1639 
1640 	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1641 	if (!sw->uuid)
1642 		return -ENOMEM;
1643 	return 0;
1644 }
1645 
1646 static int tb_switch_add_dma_port(struct tb_switch *sw)
1647 {
1648 	u32 status;
1649 	int ret;
1650 
1651 	switch (sw->generation) {
1652 	case 3:
1653 		break;
1654 
1655 	case 2:
1656 		/* Only root switch can be upgraded */
1657 		if (tb_route(sw))
1658 			return 0;
1659 		break;
1660 
1661 	default:
1662 		/*
1663 		 * DMA port is the only thing available when the switch
1664 		 * is in safe mode.
1665 		 */
1666 		if (!sw->safe_mode)
1667 			return 0;
1668 		break;
1669 	}
1670 
1671 	if (sw->no_nvm_upgrade)
1672 		return 0;
1673 
1674 	sw->dma_port = dma_port_alloc(sw);
1675 	if (!sw->dma_port)
1676 		return 0;
1677 
1678 	/*
1679 	 * Check status of the previous flash authentication. If there
1680 	 * is one we need to power cycle the switch in any case to make
1681 	 * it functional again.
1682 	 */
1683 	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1684 	if (ret <= 0)
1685 		return ret;
1686 
1687 	/* Now we can allow root port to suspend again */
1688 	if (!tb_route(sw))
1689 		nvm_authenticate_complete(sw);
1690 
1691 	if (status) {
1692 		tb_sw_info(sw, "switch flash authentication failed\n");
1693 		ret = tb_switch_set_uuid(sw);
1694 		if (ret)
1695 			return ret;
1696 		nvm_set_auth_status(sw, status);
1697 	}
1698 
1699 	tb_sw_info(sw, "power cycling the switch now\n");
1700 	dma_port_power_cycle(sw->dma_port);
1701 
1702 	/*
1703 	 * We return error here which causes the switch adding failure.
1704 	 * It should appear back after power cycle is complete.
1705 	 */
1706 	return -ESHUTDOWN;
1707 }
1708 
1709 /**
1710  * tb_switch_add() - Add a switch to the domain
1711  * @sw: Switch to add
1712  *
1713  * This is the last step in adding switch to the domain. It will read
1714  * identification information from DROM and initializes ports so that
1715  * they can be used to connect other switches. The switch will be
1716  * exposed to the userspace when this function successfully returns. To
1717  * remove and release the switch, call tb_switch_remove().
1718  *
1719  * Return: %0 in case of success and negative errno in case of failure
1720  */
1721 int tb_switch_add(struct tb_switch *sw)
1722 {
1723 	int i, ret;
1724 
1725 	/*
1726 	 * Initialize DMA control port now before we read DROM. Recent
1727 	 * host controllers have more complete DROM on NVM that includes
1728 	 * vendor and model identification strings which we then expose
1729 	 * to the userspace. NVM can be accessed through DMA
1730 	 * configuration based mailbox.
1731 	 */
1732 	ret = tb_switch_add_dma_port(sw);
1733 	if (ret)
1734 		return ret;
1735 
1736 	if (!sw->safe_mode) {
1737 		/* read drom */
1738 		ret = tb_drom_read(sw);
1739 		if (ret) {
1740 			tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1741 			return ret;
1742 		}
1743 		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
1744 
1745 		ret = tb_switch_set_uuid(sw);
1746 		if (ret)
1747 			return ret;
1748 
1749 		for (i = 0; i <= sw->config.max_port_number; i++) {
1750 			if (sw->ports[i].disabled) {
1751 				tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
1752 				continue;
1753 			}
1754 			ret = tb_init_port(&sw->ports[i]);
1755 			if (ret)
1756 				return ret;
1757 		}
1758 	}
1759 
1760 	ret = device_add(&sw->dev);
1761 	if (ret)
1762 		return ret;
1763 
1764 	if (tb_route(sw)) {
1765 		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
1766 			 sw->vendor, sw->device);
1767 		if (sw->vendor_name && sw->device_name)
1768 			dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
1769 				 sw->device_name);
1770 	}
1771 
1772 	ret = tb_switch_nvm_add(sw);
1773 	if (ret) {
1774 		device_del(&sw->dev);
1775 		return ret;
1776 	}
1777 
1778 	pm_runtime_set_active(&sw->dev);
1779 	if (sw->rpm) {
1780 		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
1781 		pm_runtime_use_autosuspend(&sw->dev);
1782 		pm_runtime_mark_last_busy(&sw->dev);
1783 		pm_runtime_enable(&sw->dev);
1784 		pm_request_autosuspend(&sw->dev);
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 /**
1791  * tb_switch_remove() - Remove and release a switch
1792  * @sw: Switch to remove
1793  *
1794  * This will remove the switch from the domain and release it after last
1795  * reference count drops to zero. If there are switches connected below
1796  * this switch, they will be removed as well.
1797  */
1798 void tb_switch_remove(struct tb_switch *sw)
1799 {
1800 	int i;
1801 
1802 	if (sw->rpm) {
1803 		pm_runtime_get_sync(&sw->dev);
1804 		pm_runtime_disable(&sw->dev);
1805 	}
1806 
1807 	/* port 0 is the switch itself and never has a remote */
1808 	for (i = 1; i <= sw->config.max_port_number; i++) {
1809 		if (tb_port_has_remote(&sw->ports[i])) {
1810 			tb_switch_remove(sw->ports[i].remote->sw);
1811 			sw->ports[i].remote = NULL;
1812 		} else if (sw->ports[i].xdomain) {
1813 			tb_xdomain_remove(sw->ports[i].xdomain);
1814 			sw->ports[i].xdomain = NULL;
1815 		}
1816 	}
1817 
1818 	if (!sw->is_unplugged)
1819 		tb_plug_events_active(sw, false);
1820 	tb_lc_unconfigure_link(sw);
1821 
1822 	tb_switch_nvm_remove(sw);
1823 
1824 	if (tb_route(sw))
1825 		dev_info(&sw->dev, "device disconnected\n");
1826 	device_unregister(&sw->dev);
1827 }
1828 
1829 /**
1830  * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1831  */
1832 void tb_sw_set_unplugged(struct tb_switch *sw)
1833 {
1834 	int i;
1835 	if (sw == sw->tb->root_switch) {
1836 		tb_sw_WARN(sw, "cannot unplug root switch\n");
1837 		return;
1838 	}
1839 	if (sw->is_unplugged) {
1840 		tb_sw_WARN(sw, "is_unplugged already set\n");
1841 		return;
1842 	}
1843 	sw->is_unplugged = true;
1844 	for (i = 0; i <= sw->config.max_port_number; i++) {
1845 		if (tb_port_has_remote(&sw->ports[i]))
1846 			tb_sw_set_unplugged(sw->ports[i].remote->sw);
1847 		else if (sw->ports[i].xdomain)
1848 			sw->ports[i].xdomain->is_unplugged = true;
1849 	}
1850 }
1851 
1852 int tb_switch_resume(struct tb_switch *sw)
1853 {
1854 	int i, err;
1855 	tb_sw_dbg(sw, "resuming switch\n");
1856 
1857 	/*
1858 	 * Check for UID of the connected switches except for root
1859 	 * switch which we assume cannot be removed.
1860 	 */
1861 	if (tb_route(sw)) {
1862 		u64 uid;
1863 
1864 		/*
1865 		 * Check first that we can still read the switch config
1866 		 * space. It may be that there is now another domain
1867 		 * connected.
1868 		 */
1869 		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
1870 		if (err < 0) {
1871 			tb_sw_info(sw, "switch not present anymore\n");
1872 			return err;
1873 		}
1874 
1875 		err = tb_drom_read_uid_only(sw, &uid);
1876 		if (err) {
1877 			tb_sw_warn(sw, "uid read failed\n");
1878 			return err;
1879 		}
1880 		if (sw->uid != uid) {
1881 			tb_sw_info(sw,
1882 				"changed while suspended (uid %#llx -> %#llx)\n",
1883 				sw->uid, uid);
1884 			return -ENODEV;
1885 		}
1886 	}
1887 
1888 	/* upload configuration */
1889 	err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1890 	if (err)
1891 		return err;
1892 
1893 	err = tb_lc_configure_link(sw);
1894 	if (err)
1895 		return err;
1896 
1897 	err = tb_plug_events_active(sw, true);
1898 	if (err)
1899 		return err;
1900 
1901 	/* check for surviving downstream switches */
1902 	for (i = 1; i <= sw->config.max_port_number; i++) {
1903 		struct tb_port *port = &sw->ports[i];
1904 
1905 		if (!tb_port_has_remote(port) && !port->xdomain)
1906 			continue;
1907 
1908 		if (tb_wait_for_port(port, true) <= 0) {
1909 			tb_port_warn(port,
1910 				     "lost during suspend, disconnecting\n");
1911 			if (tb_port_has_remote(port))
1912 				tb_sw_set_unplugged(port->remote->sw);
1913 			else if (port->xdomain)
1914 				port->xdomain->is_unplugged = true;
1915 		} else if (tb_port_has_remote(port)) {
1916 			if (tb_switch_resume(port->remote->sw)) {
1917 				tb_port_warn(port,
1918 					     "lost during suspend, disconnecting\n");
1919 				tb_sw_set_unplugged(port->remote->sw);
1920 			}
1921 		}
1922 	}
1923 	return 0;
1924 }
1925 
1926 void tb_switch_suspend(struct tb_switch *sw)
1927 {
1928 	int i, err;
1929 	err = tb_plug_events_active(sw, false);
1930 	if (err)
1931 		return;
1932 
1933 	for (i = 1; i <= sw->config.max_port_number; i++) {
1934 		if (tb_port_has_remote(&sw->ports[i]))
1935 			tb_switch_suspend(sw->ports[i].remote->sw);
1936 	}
1937 
1938 	tb_lc_set_sleep(sw);
1939 }
1940 
1941 struct tb_sw_lookup {
1942 	struct tb *tb;
1943 	u8 link;
1944 	u8 depth;
1945 	const uuid_t *uuid;
1946 	u64 route;
1947 };
1948 
1949 static int tb_switch_match(struct device *dev, void *data)
1950 {
1951 	struct tb_switch *sw = tb_to_switch(dev);
1952 	struct tb_sw_lookup *lookup = data;
1953 
1954 	if (!sw)
1955 		return 0;
1956 	if (sw->tb != lookup->tb)
1957 		return 0;
1958 
1959 	if (lookup->uuid)
1960 		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
1961 
1962 	if (lookup->route) {
1963 		return sw->config.route_lo == lower_32_bits(lookup->route) &&
1964 		       sw->config.route_hi == upper_32_bits(lookup->route);
1965 	}
1966 
1967 	/* Root switch is matched only by depth */
1968 	if (!lookup->depth)
1969 		return !sw->depth;
1970 
1971 	return sw->link == lookup->link && sw->depth == lookup->depth;
1972 }
1973 
1974 /**
1975  * tb_switch_find_by_link_depth() - Find switch by link and depth
1976  * @tb: Domain the switch belongs
1977  * @link: Link number the switch is connected
1978  * @depth: Depth of the switch in link
1979  *
1980  * Returned switch has reference count increased so the caller needs to
1981  * call tb_switch_put() when done with the switch.
1982  */
1983 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1984 {
1985 	struct tb_sw_lookup lookup;
1986 	struct device *dev;
1987 
1988 	memset(&lookup, 0, sizeof(lookup));
1989 	lookup.tb = tb;
1990 	lookup.link = link;
1991 	lookup.depth = depth;
1992 
1993 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1994 	if (dev)
1995 		return tb_to_switch(dev);
1996 
1997 	return NULL;
1998 }
1999 
2000 /**
2001  * tb_switch_find_by_uuid() - Find switch by UUID
2002  * @tb: Domain the switch belongs
2003  * @uuid: UUID to look for
2004  *
2005  * Returned switch has reference count increased so the caller needs to
2006  * call tb_switch_put() when done with the switch.
2007  */
2008 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2009 {
2010 	struct tb_sw_lookup lookup;
2011 	struct device *dev;
2012 
2013 	memset(&lookup, 0, sizeof(lookup));
2014 	lookup.tb = tb;
2015 	lookup.uuid = uuid;
2016 
2017 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2018 	if (dev)
2019 		return tb_to_switch(dev);
2020 
2021 	return NULL;
2022 }
2023 
2024 /**
2025  * tb_switch_find_by_route() - Find switch by route string
2026  * @tb: Domain the switch belongs
2027  * @route: Route string to look for
2028  *
2029  * Returned switch has reference count increased so the caller needs to
2030  * call tb_switch_put() when done with the switch.
2031  */
2032 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2033 {
2034 	struct tb_sw_lookup lookup;
2035 	struct device *dev;
2036 
2037 	if (!route)
2038 		return tb_switch_get(tb->root_switch);
2039 
2040 	memset(&lookup, 0, sizeof(lookup));
2041 	lookup.tb = tb;
2042 	lookup.route = route;
2043 
2044 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2045 	if (dev)
2046 		return tb_to_switch(dev);
2047 
2048 	return NULL;
2049 }
2050 
2051 void tb_switch_exit(void)
2052 {
2053 	ida_destroy(&nvm_ida);
2054 }
2055