xref: /openbmc/linux/drivers/thunderbolt/switch.c (revision fb960bd2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt Cactus Ridge driver - switch/port utility functions
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  */
7 
8 #include <linux/delay.h>
9 #include <linux/idr.h>
10 #include <linux/nvmem-provider.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 
15 #include "tb.h"
16 
17 /* Switch authorization from userspace is serialized by this lock */
18 static DEFINE_MUTEX(switch_lock);
19 
20 /* Switch NVM support */
21 
22 #define NVM_DEVID		0x05
23 #define NVM_VERSION		0x08
24 #define NVM_CSS			0x10
25 #define NVM_FLASH_SIZE		0x45
26 
27 #define NVM_MIN_SIZE		SZ_32K
28 #define NVM_MAX_SIZE		SZ_512K
29 
30 static DEFINE_IDA(nvm_ida);
31 
32 struct nvm_auth_status {
33 	struct list_head list;
34 	uuid_t uuid;
35 	u32 status;
36 };
37 
38 /*
39  * Hold NVM authentication failure status per switch This information
40  * needs to stay around even when the switch gets power cycled so we
41  * keep it separately.
42  */
43 static LIST_HEAD(nvm_auth_status_cache);
44 static DEFINE_MUTEX(nvm_auth_status_lock);
45 
46 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47 {
48 	struct nvm_auth_status *st;
49 
50 	list_for_each_entry(st, &nvm_auth_status_cache, list) {
51 		if (uuid_equal(&st->uuid, sw->uuid))
52 			return st;
53 	}
54 
55 	return NULL;
56 }
57 
58 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
59 {
60 	struct nvm_auth_status *st;
61 
62 	mutex_lock(&nvm_auth_status_lock);
63 	st = __nvm_get_auth_status(sw);
64 	mutex_unlock(&nvm_auth_status_lock);
65 
66 	*status = st ? st->status : 0;
67 }
68 
69 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
70 {
71 	struct nvm_auth_status *st;
72 
73 	if (WARN_ON(!sw->uuid))
74 		return;
75 
76 	mutex_lock(&nvm_auth_status_lock);
77 	st = __nvm_get_auth_status(sw);
78 
79 	if (!st) {
80 		st = kzalloc(sizeof(*st), GFP_KERNEL);
81 		if (!st)
82 			goto unlock;
83 
84 		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
85 		INIT_LIST_HEAD(&st->list);
86 		list_add_tail(&st->list, &nvm_auth_status_cache);
87 	}
88 
89 	st->status = status;
90 unlock:
91 	mutex_unlock(&nvm_auth_status_lock);
92 }
93 
94 static void nvm_clear_auth_status(const struct tb_switch *sw)
95 {
96 	struct nvm_auth_status *st;
97 
98 	mutex_lock(&nvm_auth_status_lock);
99 	st = __nvm_get_auth_status(sw);
100 	if (st) {
101 		list_del(&st->list);
102 		kfree(st);
103 	}
104 	mutex_unlock(&nvm_auth_status_lock);
105 }
106 
107 static int nvm_validate_and_write(struct tb_switch *sw)
108 {
109 	unsigned int image_size, hdr_size;
110 	const u8 *buf = sw->nvm->buf;
111 	u16 ds_size;
112 	int ret;
113 
114 	if (!buf)
115 		return -EINVAL;
116 
117 	image_size = sw->nvm->buf_data_size;
118 	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
119 		return -EINVAL;
120 
121 	/*
122 	 * FARB pointer must point inside the image and must at least
123 	 * contain parts of the digital section we will be reading here.
124 	 */
125 	hdr_size = (*(u32 *)buf) & 0xffffff;
126 	if (hdr_size + NVM_DEVID + 2 >= image_size)
127 		return -EINVAL;
128 
129 	/* Digital section start should be aligned to 4k page */
130 	if (!IS_ALIGNED(hdr_size, SZ_4K))
131 		return -EINVAL;
132 
133 	/*
134 	 * Read digital section size and check that it also fits inside
135 	 * the image.
136 	 */
137 	ds_size = *(u16 *)(buf + hdr_size);
138 	if (ds_size >= image_size)
139 		return -EINVAL;
140 
141 	if (!sw->safe_mode) {
142 		u16 device_id;
143 
144 		/*
145 		 * Make sure the device ID in the image matches the one
146 		 * we read from the switch config space.
147 		 */
148 		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
149 		if (device_id != sw->config.device_id)
150 			return -EINVAL;
151 
152 		if (sw->generation < 3) {
153 			/* Write CSS headers first */
154 			ret = dma_port_flash_write(sw->dma_port,
155 				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
156 				DMA_PORT_CSS_MAX_SIZE);
157 			if (ret)
158 				return ret;
159 		}
160 
161 		/* Skip headers in the image */
162 		buf += hdr_size;
163 		image_size -= hdr_size;
164 	}
165 
166 	return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167 }
168 
169 static int nvm_authenticate_host(struct tb_switch *sw)
170 {
171 	int ret;
172 
173 	/*
174 	 * Root switch NVM upgrade requires that we disconnect the
175 	 * existing paths first (in case it is not in safe mode
176 	 * already).
177 	 */
178 	if (!sw->safe_mode) {
179 		ret = tb_domain_disconnect_all_paths(sw->tb);
180 		if (ret)
181 			return ret;
182 		/*
183 		 * The host controller goes away pretty soon after this if
184 		 * everything goes well so getting timeout is expected.
185 		 */
186 		ret = dma_port_flash_update_auth(sw->dma_port);
187 		return ret == -ETIMEDOUT ? 0 : ret;
188 	}
189 
190 	/*
191 	 * From safe mode we can get out by just power cycling the
192 	 * switch.
193 	 */
194 	dma_port_power_cycle(sw->dma_port);
195 	return 0;
196 }
197 
198 static int nvm_authenticate_device(struct tb_switch *sw)
199 {
200 	int ret, retries = 10;
201 
202 	ret = dma_port_flash_update_auth(sw->dma_port);
203 	if (ret && ret != -ETIMEDOUT)
204 		return ret;
205 
206 	/*
207 	 * Poll here for the authentication status. It takes some time
208 	 * for the device to respond (we get timeout for a while). Once
209 	 * we get response the device needs to be power cycled in order
210 	 * to the new NVM to be taken into use.
211 	 */
212 	do {
213 		u32 status;
214 
215 		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
216 		if (ret < 0 && ret != -ETIMEDOUT)
217 			return ret;
218 		if (ret > 0) {
219 			if (status) {
220 				tb_sw_warn(sw, "failed to authenticate NVM\n");
221 				nvm_set_auth_status(sw, status);
222 			}
223 
224 			tb_sw_info(sw, "power cycling the switch now\n");
225 			dma_port_power_cycle(sw->dma_port);
226 			return 0;
227 		}
228 
229 		msleep(500);
230 	} while (--retries);
231 
232 	return -ETIMEDOUT;
233 }
234 
235 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
236 			      size_t bytes)
237 {
238 	struct tb_switch *sw = priv;
239 
240 	return dma_port_flash_read(sw->dma_port, offset, val, bytes);
241 }
242 
243 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
244 			       size_t bytes)
245 {
246 	struct tb_switch *sw = priv;
247 	int ret = 0;
248 
249 	if (mutex_lock_interruptible(&switch_lock))
250 		return -ERESTARTSYS;
251 
252 	/*
253 	 * Since writing the NVM image might require some special steps,
254 	 * for example when CSS headers are written, we cache the image
255 	 * locally here and handle the special cases when the user asks
256 	 * us to authenticate the image.
257 	 */
258 	if (!sw->nvm->buf) {
259 		sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
260 		if (!sw->nvm->buf) {
261 			ret = -ENOMEM;
262 			goto unlock;
263 		}
264 	}
265 
266 	sw->nvm->buf_data_size = offset + bytes;
267 	memcpy(sw->nvm->buf + offset, val, bytes);
268 
269 unlock:
270 	mutex_unlock(&switch_lock);
271 
272 	return ret;
273 }
274 
275 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
276 					   size_t size, bool active)
277 {
278 	struct nvmem_config config;
279 
280 	memset(&config, 0, sizeof(config));
281 
282 	if (active) {
283 		config.name = "nvm_active";
284 		config.reg_read = tb_switch_nvm_read;
285 		config.read_only = true;
286 	} else {
287 		config.name = "nvm_non_active";
288 		config.reg_write = tb_switch_nvm_write;
289 		config.root_only = true;
290 	}
291 
292 	config.id = id;
293 	config.stride = 4;
294 	config.word_size = 4;
295 	config.size = size;
296 	config.dev = &sw->dev;
297 	config.owner = THIS_MODULE;
298 	config.priv = sw;
299 
300 	return nvmem_register(&config);
301 }
302 
303 static int tb_switch_nvm_add(struct tb_switch *sw)
304 {
305 	struct nvmem_device *nvm_dev;
306 	struct tb_switch_nvm *nvm;
307 	u32 val;
308 	int ret;
309 
310 	if (!sw->dma_port)
311 		return 0;
312 
313 	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
314 	if (!nvm)
315 		return -ENOMEM;
316 
317 	nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
318 
319 	/*
320 	 * If the switch is in safe-mode the only accessible portion of
321 	 * the NVM is the non-active one where userspace is expected to
322 	 * write new functional NVM.
323 	 */
324 	if (!sw->safe_mode) {
325 		u32 nvm_size, hdr_size;
326 
327 		ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
328 					  sizeof(val));
329 		if (ret)
330 			goto err_ida;
331 
332 		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
333 		nvm_size = (SZ_1M << (val & 7)) / 8;
334 		nvm_size = (nvm_size - hdr_size) / 2;
335 
336 		ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
337 					  sizeof(val));
338 		if (ret)
339 			goto err_ida;
340 
341 		nvm->major = val >> 16;
342 		nvm->minor = val >> 8;
343 
344 		nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
345 		if (IS_ERR(nvm_dev)) {
346 			ret = PTR_ERR(nvm_dev);
347 			goto err_ida;
348 		}
349 		nvm->active = nvm_dev;
350 	}
351 
352 	nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
353 	if (IS_ERR(nvm_dev)) {
354 		ret = PTR_ERR(nvm_dev);
355 		goto err_nvm_active;
356 	}
357 	nvm->non_active = nvm_dev;
358 
359 	mutex_lock(&switch_lock);
360 	sw->nvm = nvm;
361 	mutex_unlock(&switch_lock);
362 
363 	return 0;
364 
365 err_nvm_active:
366 	if (nvm->active)
367 		nvmem_unregister(nvm->active);
368 err_ida:
369 	ida_simple_remove(&nvm_ida, nvm->id);
370 	kfree(nvm);
371 
372 	return ret;
373 }
374 
375 static void tb_switch_nvm_remove(struct tb_switch *sw)
376 {
377 	struct tb_switch_nvm *nvm;
378 
379 	mutex_lock(&switch_lock);
380 	nvm = sw->nvm;
381 	sw->nvm = NULL;
382 	mutex_unlock(&switch_lock);
383 
384 	if (!nvm)
385 		return;
386 
387 	/* Remove authentication status in case the switch is unplugged */
388 	if (!nvm->authenticating)
389 		nvm_clear_auth_status(sw);
390 
391 	nvmem_unregister(nvm->non_active);
392 	if (nvm->active)
393 		nvmem_unregister(nvm->active);
394 	ida_simple_remove(&nvm_ida, nvm->id);
395 	vfree(nvm->buf);
396 	kfree(nvm);
397 }
398 
399 /* port utility functions */
400 
401 static const char *tb_port_type(struct tb_regs_port_header *port)
402 {
403 	switch (port->type >> 16) {
404 	case 0:
405 		switch ((u8) port->type) {
406 		case 0:
407 			return "Inactive";
408 		case 1:
409 			return "Port";
410 		case 2:
411 			return "NHI";
412 		default:
413 			return "unknown";
414 		}
415 	case 0x2:
416 		return "Ethernet";
417 	case 0x8:
418 		return "SATA";
419 	case 0xe:
420 		return "DP/HDMI";
421 	case 0x10:
422 		return "PCIe";
423 	case 0x20:
424 		return "USB";
425 	default:
426 		return "unknown";
427 	}
428 }
429 
430 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
431 {
432 	tb_info(tb,
433 		" Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
434 		port->port_number, port->vendor_id, port->device_id,
435 		port->revision, port->thunderbolt_version, tb_port_type(port),
436 		port->type);
437 	tb_info(tb, "  Max hop id (in/out): %d/%d\n",
438 		port->max_in_hop_id, port->max_out_hop_id);
439 	tb_info(tb, "  Max counters: %d\n", port->max_counters);
440 	tb_info(tb, "  NFC Credits: %#x\n", port->nfc_credits);
441 }
442 
443 /**
444  * tb_port_state() - get connectedness state of a port
445  *
446  * The port must have a TB_CAP_PHY (i.e. it should be a real port).
447  *
448  * Return: Returns an enum tb_port_state on success or an error code on failure.
449  */
450 static int tb_port_state(struct tb_port *port)
451 {
452 	struct tb_cap_phy phy;
453 	int res;
454 	if (port->cap_phy == 0) {
455 		tb_port_WARN(port, "does not have a PHY\n");
456 		return -EINVAL;
457 	}
458 	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
459 	if (res)
460 		return res;
461 	return phy.state;
462 }
463 
464 /**
465  * tb_wait_for_port() - wait for a port to become ready
466  *
467  * Wait up to 1 second for a port to reach state TB_PORT_UP. If
468  * wait_if_unplugged is set then we also wait if the port is in state
469  * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
470  * switch resume). Otherwise we only wait if a device is registered but the link
471  * has not yet been established.
472  *
473  * Return: Returns an error code on failure. Returns 0 if the port is not
474  * connected or failed to reach state TB_PORT_UP within one second. Returns 1
475  * if the port is connected and in state TB_PORT_UP.
476  */
477 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
478 {
479 	int retries = 10;
480 	int state;
481 	if (!port->cap_phy) {
482 		tb_port_WARN(port, "does not have PHY\n");
483 		return -EINVAL;
484 	}
485 	if (tb_is_upstream_port(port)) {
486 		tb_port_WARN(port, "is the upstream port\n");
487 		return -EINVAL;
488 	}
489 
490 	while (retries--) {
491 		state = tb_port_state(port);
492 		if (state < 0)
493 			return state;
494 		if (state == TB_PORT_DISABLED) {
495 			tb_port_info(port, "is disabled (state: 0)\n");
496 			return 0;
497 		}
498 		if (state == TB_PORT_UNPLUGGED) {
499 			if (wait_if_unplugged) {
500 				/* used during resume */
501 				tb_port_info(port,
502 					     "is unplugged (state: 7), retrying...\n");
503 				msleep(100);
504 				continue;
505 			}
506 			tb_port_info(port, "is unplugged (state: 7)\n");
507 			return 0;
508 		}
509 		if (state == TB_PORT_UP) {
510 			tb_port_info(port,
511 				     "is connected, link is up (state: 2)\n");
512 			return 1;
513 		}
514 
515 		/*
516 		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
517 		 * time.
518 		 */
519 		tb_port_info(port,
520 			     "is connected, link is not up (state: %d), retrying...\n",
521 			     state);
522 		msleep(100);
523 	}
524 	tb_port_warn(port,
525 		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
526 	return 0;
527 }
528 
529 /**
530  * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
531  *
532  * Change the number of NFC credits allocated to @port by @credits. To remove
533  * NFC credits pass a negative amount of credits.
534  *
535  * Return: Returns 0 on success or an error code on failure.
536  */
537 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
538 {
539 	if (credits == 0)
540 		return 0;
541 	tb_port_info(port,
542 		     "adding %#x NFC credits (%#x -> %#x)",
543 		     credits,
544 		     port->config.nfc_credits,
545 		     port->config.nfc_credits + credits);
546 	port->config.nfc_credits += credits;
547 	return tb_port_write(port, &port->config.nfc_credits,
548 			     TB_CFG_PORT, 4, 1);
549 }
550 
551 /**
552  * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
553  *
554  * Return: Returns 0 on success or an error code on failure.
555  */
556 int tb_port_clear_counter(struct tb_port *port, int counter)
557 {
558 	u32 zero[3] = { 0, 0, 0 };
559 	tb_port_info(port, "clearing counter %d\n", counter);
560 	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
561 }
562 
563 /**
564  * tb_init_port() - initialize a port
565  *
566  * This is a helper method for tb_switch_alloc. Does not check or initialize
567  * any downstream switches.
568  *
569  * Return: Returns 0 on success or an error code on failure.
570  */
571 static int tb_init_port(struct tb_port *port)
572 {
573 	int res;
574 	int cap;
575 
576 	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
577 	if (res)
578 		return res;
579 
580 	/* Port 0 is the switch itself and has no PHY. */
581 	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
582 		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
583 
584 		if (cap > 0)
585 			port->cap_phy = cap;
586 		else
587 			tb_port_WARN(port, "non switch port without a PHY\n");
588 	}
589 
590 	tb_dump_port(port->sw->tb, &port->config);
591 
592 	/* TODO: Read dual link port, DP port and more from EEPROM. */
593 	return 0;
594 
595 }
596 
597 /* switch utility functions */
598 
599 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
600 {
601 	tb_info(tb,
602 		" Switch: %x:%x (Revision: %d, TB Version: %d)\n",
603 		sw->vendor_id, sw->device_id, sw->revision,
604 		sw->thunderbolt_version);
605 	tb_info(tb, "  Max Port Number: %d\n", sw->max_port_number);
606 	tb_info(tb, "  Config:\n");
607 	tb_info(tb,
608 		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
609 		sw->upstream_port_number, sw->depth,
610 		(((u64) sw->route_hi) << 32) | sw->route_lo,
611 		sw->enabled, sw->plug_events_delay);
612 	tb_info(tb,
613 		"   unknown1: %#x unknown4: %#x\n",
614 		sw->__unknown1, sw->__unknown4);
615 }
616 
617 /**
618  * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
619  *
620  * Return: Returns 0 on success or an error code on failure.
621  */
622 int tb_switch_reset(struct tb *tb, u64 route)
623 {
624 	struct tb_cfg_result res;
625 	struct tb_regs_switch_header header = {
626 		header.route_hi = route >> 32,
627 		header.route_lo = route,
628 		header.enabled = true,
629 	};
630 	tb_info(tb, "resetting switch at %llx\n", route);
631 	res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
632 			0, 2, 2, 2);
633 	if (res.err)
634 		return res.err;
635 	res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
636 	if (res.err > 0)
637 		return -EIO;
638 	return res.err;
639 }
640 
641 struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
642 {
643 	u8 next_port = route; /*
644 			       * Routes use a stride of 8 bits,
645 			       * eventhough a port index has 6 bits at most.
646 			       * */
647 	if (route == 0)
648 		return sw;
649 	if (next_port > sw->config.max_port_number)
650 		return NULL;
651 	if (tb_is_upstream_port(&sw->ports[next_port]))
652 		return NULL;
653 	if (!sw->ports[next_port].remote)
654 		return NULL;
655 	return get_switch_at_route(sw->ports[next_port].remote->sw,
656 				   route >> TB_ROUTE_SHIFT);
657 }
658 
659 /**
660  * tb_plug_events_active() - enable/disable plug events on a switch
661  *
662  * Also configures a sane plug_events_delay of 255ms.
663  *
664  * Return: Returns 0 on success or an error code on failure.
665  */
666 static int tb_plug_events_active(struct tb_switch *sw, bool active)
667 {
668 	u32 data;
669 	int res;
670 
671 	if (!sw->config.enabled)
672 		return 0;
673 
674 	sw->config.plug_events_delay = 0xff;
675 	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
676 	if (res)
677 		return res;
678 
679 	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
680 	if (res)
681 		return res;
682 
683 	if (active) {
684 		data = data & 0xFFFFFF83;
685 		switch (sw->config.device_id) {
686 		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
687 		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
688 		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
689 			break;
690 		default:
691 			data |= 4;
692 		}
693 	} else {
694 		data = data | 0x7c;
695 	}
696 	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
697 			   sw->cap_plug_events + 1, 1);
698 }
699 
700 static ssize_t authorized_show(struct device *dev,
701 			       struct device_attribute *attr,
702 			       char *buf)
703 {
704 	struct tb_switch *sw = tb_to_switch(dev);
705 
706 	return sprintf(buf, "%u\n", sw->authorized);
707 }
708 
709 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
710 {
711 	int ret = -EINVAL;
712 
713 	if (mutex_lock_interruptible(&switch_lock))
714 		return -ERESTARTSYS;
715 
716 	if (sw->authorized)
717 		goto unlock;
718 
719 	switch (val) {
720 	/* Approve switch */
721 	case 1:
722 		if (sw->key)
723 			ret = tb_domain_approve_switch_key(sw->tb, sw);
724 		else
725 			ret = tb_domain_approve_switch(sw->tb, sw);
726 		break;
727 
728 	/* Challenge switch */
729 	case 2:
730 		if (sw->key)
731 			ret = tb_domain_challenge_switch_key(sw->tb, sw);
732 		break;
733 
734 	default:
735 		break;
736 	}
737 
738 	if (!ret) {
739 		sw->authorized = val;
740 		/* Notify status change to the userspace */
741 		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
742 	}
743 
744 unlock:
745 	mutex_unlock(&switch_lock);
746 	return ret;
747 }
748 
749 static ssize_t authorized_store(struct device *dev,
750 				struct device_attribute *attr,
751 				const char *buf, size_t count)
752 {
753 	struct tb_switch *sw = tb_to_switch(dev);
754 	unsigned int val;
755 	ssize_t ret;
756 
757 	ret = kstrtouint(buf, 0, &val);
758 	if (ret)
759 		return ret;
760 	if (val > 2)
761 		return -EINVAL;
762 
763 	ret = tb_switch_set_authorized(sw, val);
764 
765 	return ret ? ret : count;
766 }
767 static DEVICE_ATTR_RW(authorized);
768 
769 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
770 			   char *buf)
771 {
772 	struct tb_switch *sw = tb_to_switch(dev);
773 
774 	return sprintf(buf, "%#x\n", sw->device);
775 }
776 static DEVICE_ATTR_RO(device);
777 
778 static ssize_t
779 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
780 {
781 	struct tb_switch *sw = tb_to_switch(dev);
782 
783 	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
784 }
785 static DEVICE_ATTR_RO(device_name);
786 
787 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
788 			char *buf)
789 {
790 	struct tb_switch *sw = tb_to_switch(dev);
791 	ssize_t ret;
792 
793 	if (mutex_lock_interruptible(&switch_lock))
794 		return -ERESTARTSYS;
795 
796 	if (sw->key)
797 		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
798 	else
799 		ret = sprintf(buf, "\n");
800 
801 	mutex_unlock(&switch_lock);
802 	return ret;
803 }
804 
805 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
806 			 const char *buf, size_t count)
807 {
808 	struct tb_switch *sw = tb_to_switch(dev);
809 	u8 key[TB_SWITCH_KEY_SIZE];
810 	ssize_t ret = count;
811 	bool clear = false;
812 
813 	if (!strcmp(buf, "\n"))
814 		clear = true;
815 	else if (hex2bin(key, buf, sizeof(key)))
816 		return -EINVAL;
817 
818 	if (mutex_lock_interruptible(&switch_lock))
819 		return -ERESTARTSYS;
820 
821 	if (sw->authorized) {
822 		ret = -EBUSY;
823 	} else {
824 		kfree(sw->key);
825 		if (clear) {
826 			sw->key = NULL;
827 		} else {
828 			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
829 			if (!sw->key)
830 				ret = -ENOMEM;
831 		}
832 	}
833 
834 	mutex_unlock(&switch_lock);
835 	return ret;
836 }
837 static DEVICE_ATTR(key, 0600, key_show, key_store);
838 
839 static ssize_t nvm_authenticate_show(struct device *dev,
840 	struct device_attribute *attr, char *buf)
841 {
842 	struct tb_switch *sw = tb_to_switch(dev);
843 	u32 status;
844 
845 	nvm_get_auth_status(sw, &status);
846 	return sprintf(buf, "%#x\n", status);
847 }
848 
849 static ssize_t nvm_authenticate_store(struct device *dev,
850 	struct device_attribute *attr, const char *buf, size_t count)
851 {
852 	struct tb_switch *sw = tb_to_switch(dev);
853 	bool val;
854 	int ret;
855 
856 	if (mutex_lock_interruptible(&switch_lock))
857 		return -ERESTARTSYS;
858 
859 	/* If NVMem devices are not yet added */
860 	if (!sw->nvm) {
861 		ret = -EAGAIN;
862 		goto exit_unlock;
863 	}
864 
865 	ret = kstrtobool(buf, &val);
866 	if (ret)
867 		goto exit_unlock;
868 
869 	/* Always clear the authentication status */
870 	nvm_clear_auth_status(sw);
871 
872 	if (val) {
873 		ret = nvm_validate_and_write(sw);
874 		if (ret)
875 			goto exit_unlock;
876 
877 		sw->nvm->authenticating = true;
878 
879 		if (!tb_route(sw))
880 			ret = nvm_authenticate_host(sw);
881 		else
882 			ret = nvm_authenticate_device(sw);
883 	}
884 
885 exit_unlock:
886 	mutex_unlock(&switch_lock);
887 
888 	if (ret)
889 		return ret;
890 	return count;
891 }
892 static DEVICE_ATTR_RW(nvm_authenticate);
893 
894 static ssize_t nvm_version_show(struct device *dev,
895 				struct device_attribute *attr, char *buf)
896 {
897 	struct tb_switch *sw = tb_to_switch(dev);
898 	int ret;
899 
900 	if (mutex_lock_interruptible(&switch_lock))
901 		return -ERESTARTSYS;
902 
903 	if (sw->safe_mode)
904 		ret = -ENODATA;
905 	else if (!sw->nvm)
906 		ret = -EAGAIN;
907 	else
908 		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
909 
910 	mutex_unlock(&switch_lock);
911 
912 	return ret;
913 }
914 static DEVICE_ATTR_RO(nvm_version);
915 
916 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
917 			   char *buf)
918 {
919 	struct tb_switch *sw = tb_to_switch(dev);
920 
921 	return sprintf(buf, "%#x\n", sw->vendor);
922 }
923 static DEVICE_ATTR_RO(vendor);
924 
925 static ssize_t
926 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
927 {
928 	struct tb_switch *sw = tb_to_switch(dev);
929 
930 	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
931 }
932 static DEVICE_ATTR_RO(vendor_name);
933 
934 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
935 			      char *buf)
936 {
937 	struct tb_switch *sw = tb_to_switch(dev);
938 
939 	return sprintf(buf, "%pUb\n", sw->uuid);
940 }
941 static DEVICE_ATTR_RO(unique_id);
942 
943 static struct attribute *switch_attrs[] = {
944 	&dev_attr_authorized.attr,
945 	&dev_attr_device.attr,
946 	&dev_attr_device_name.attr,
947 	&dev_attr_key.attr,
948 	&dev_attr_nvm_authenticate.attr,
949 	&dev_attr_nvm_version.attr,
950 	&dev_attr_vendor.attr,
951 	&dev_attr_vendor_name.attr,
952 	&dev_attr_unique_id.attr,
953 	NULL,
954 };
955 
956 static umode_t switch_attr_is_visible(struct kobject *kobj,
957 				      struct attribute *attr, int n)
958 {
959 	struct device *dev = container_of(kobj, struct device, kobj);
960 	struct tb_switch *sw = tb_to_switch(dev);
961 
962 	if (attr == &dev_attr_key.attr) {
963 		if (tb_route(sw) &&
964 		    sw->tb->security_level == TB_SECURITY_SECURE &&
965 		    sw->security_level == TB_SECURITY_SECURE)
966 			return attr->mode;
967 		return 0;
968 	} else if (attr == &dev_attr_nvm_authenticate.attr ||
969 		   attr == &dev_attr_nvm_version.attr) {
970 		if (sw->dma_port)
971 			return attr->mode;
972 		return 0;
973 	}
974 
975 	return sw->safe_mode ? 0 : attr->mode;
976 }
977 
978 static struct attribute_group switch_group = {
979 	.is_visible = switch_attr_is_visible,
980 	.attrs = switch_attrs,
981 };
982 
983 static const struct attribute_group *switch_groups[] = {
984 	&switch_group,
985 	NULL,
986 };
987 
988 static void tb_switch_release(struct device *dev)
989 {
990 	struct tb_switch *sw = tb_to_switch(dev);
991 
992 	dma_port_free(sw->dma_port);
993 
994 	kfree(sw->uuid);
995 	kfree(sw->device_name);
996 	kfree(sw->vendor_name);
997 	kfree(sw->ports);
998 	kfree(sw->drom);
999 	kfree(sw->key);
1000 	kfree(sw);
1001 }
1002 
1003 struct device_type tb_switch_type = {
1004 	.name = "thunderbolt_device",
1005 	.release = tb_switch_release,
1006 };
1007 
1008 static int tb_switch_get_generation(struct tb_switch *sw)
1009 {
1010 	switch (sw->config.device_id) {
1011 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1012 	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1013 	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1014 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1015 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1016 	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1017 	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1018 	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1019 		return 1;
1020 
1021 	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1022 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1023 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1024 		return 2;
1025 
1026 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1027 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1028 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1029 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1030 	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1031 		return 3;
1032 
1033 	default:
1034 		/*
1035 		 * For unknown switches assume generation to be 1 to be
1036 		 * on the safe side.
1037 		 */
1038 		tb_sw_warn(sw, "unsupported switch device id %#x\n",
1039 			   sw->config.device_id);
1040 		return 1;
1041 	}
1042 }
1043 
1044 /**
1045  * tb_switch_alloc() - allocate a switch
1046  * @tb: Pointer to the owning domain
1047  * @parent: Parent device for this switch
1048  * @route: Route string for this switch
1049  *
1050  * Allocates and initializes a switch. Will not upload configuration to
1051  * the switch. For that you need to call tb_switch_configure()
1052  * separately. The returned switch should be released by calling
1053  * tb_switch_put().
1054  *
1055  * Return: Pointer to the allocated switch or %NULL in case of failure
1056  */
1057 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1058 				  u64 route)
1059 {
1060 	int i;
1061 	int cap;
1062 	struct tb_switch *sw;
1063 	int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1064 	if (upstream_port < 0)
1065 		return NULL;
1066 
1067 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1068 	if (!sw)
1069 		return NULL;
1070 
1071 	sw->tb = tb;
1072 	if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
1073 		goto err_free_sw_ports;
1074 
1075 	tb_info(tb, "current switch config:\n");
1076 	tb_dump_switch(tb, &sw->config);
1077 
1078 	/* configure switch */
1079 	sw->config.upstream_port_number = upstream_port;
1080 	sw->config.depth = tb_route_length(route);
1081 	sw->config.route_lo = route;
1082 	sw->config.route_hi = route >> 32;
1083 	sw->config.enabled = 0;
1084 
1085 	/* initialize ports */
1086 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1087 				GFP_KERNEL);
1088 	if (!sw->ports)
1089 		goto err_free_sw_ports;
1090 
1091 	for (i = 0; i <= sw->config.max_port_number; i++) {
1092 		/* minimum setup for tb_find_cap and tb_drom_read to work */
1093 		sw->ports[i].sw = sw;
1094 		sw->ports[i].port = i;
1095 	}
1096 
1097 	sw->generation = tb_switch_get_generation(sw);
1098 
1099 	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1100 	if (cap < 0) {
1101 		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1102 		goto err_free_sw_ports;
1103 	}
1104 	sw->cap_plug_events = cap;
1105 
1106 	/* Root switch is always authorized */
1107 	if (!route)
1108 		sw->authorized = true;
1109 
1110 	device_initialize(&sw->dev);
1111 	sw->dev.parent = parent;
1112 	sw->dev.bus = &tb_bus_type;
1113 	sw->dev.type = &tb_switch_type;
1114 	sw->dev.groups = switch_groups;
1115 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1116 
1117 	return sw;
1118 
1119 err_free_sw_ports:
1120 	kfree(sw->ports);
1121 	kfree(sw);
1122 
1123 	return NULL;
1124 }
1125 
1126 /**
1127  * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1128  * @tb: Pointer to the owning domain
1129  * @parent: Parent device for this switch
1130  * @route: Route string for this switch
1131  *
1132  * This creates a switch in safe mode. This means the switch pretty much
1133  * lacks all capabilities except DMA configuration port before it is
1134  * flashed with a valid NVM firmware.
1135  *
1136  * The returned switch must be released by calling tb_switch_put().
1137  *
1138  * Return: Pointer to the allocated switch or %NULL in case of failure
1139  */
1140 struct tb_switch *
1141 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1142 {
1143 	struct tb_switch *sw;
1144 
1145 	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1146 	if (!sw)
1147 		return NULL;
1148 
1149 	sw->tb = tb;
1150 	sw->config.depth = tb_route_length(route);
1151 	sw->config.route_hi = upper_32_bits(route);
1152 	sw->config.route_lo = lower_32_bits(route);
1153 	sw->safe_mode = true;
1154 
1155 	device_initialize(&sw->dev);
1156 	sw->dev.parent = parent;
1157 	sw->dev.bus = &tb_bus_type;
1158 	sw->dev.type = &tb_switch_type;
1159 	sw->dev.groups = switch_groups;
1160 	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1161 
1162 	return sw;
1163 }
1164 
1165 /**
1166  * tb_switch_configure() - Uploads configuration to the switch
1167  * @sw: Switch to configure
1168  *
1169  * Call this function before the switch is added to the system. It will
1170  * upload configuration to the switch and makes it available for the
1171  * connection manager to use.
1172  *
1173  * Return: %0 in case of success and negative errno in case of failure
1174  */
1175 int tb_switch_configure(struct tb_switch *sw)
1176 {
1177 	struct tb *tb = sw->tb;
1178 	u64 route;
1179 	int ret;
1180 
1181 	route = tb_route(sw);
1182 	tb_info(tb,
1183 		"initializing Switch at %#llx (depth: %d, up port: %d)\n",
1184 		route, tb_route_length(route), sw->config.upstream_port_number);
1185 
1186 	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1187 		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1188 			   sw->config.vendor_id);
1189 
1190 	sw->config.enabled = 1;
1191 
1192 	/* upload configuration */
1193 	ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1194 	if (ret)
1195 		return ret;
1196 
1197 	return tb_plug_events_active(sw, true);
1198 }
1199 
1200 static void tb_switch_set_uuid(struct tb_switch *sw)
1201 {
1202 	u32 uuid[4];
1203 	int cap;
1204 
1205 	if (sw->uuid)
1206 		return;
1207 
1208 	/*
1209 	 * The newer controllers include fused UUID as part of link
1210 	 * controller specific registers
1211 	 */
1212 	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1213 	if (cap > 0) {
1214 		tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
1215 	} else {
1216 		/*
1217 		 * ICM generates UUID based on UID and fills the upper
1218 		 * two words with ones. This is not strictly following
1219 		 * UUID format but we want to be compatible with it so
1220 		 * we do the same here.
1221 		 */
1222 		uuid[0] = sw->uid & 0xffffffff;
1223 		uuid[1] = (sw->uid >> 32) & 0xffffffff;
1224 		uuid[2] = 0xffffffff;
1225 		uuid[3] = 0xffffffff;
1226 	}
1227 
1228 	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1229 }
1230 
1231 static int tb_switch_add_dma_port(struct tb_switch *sw)
1232 {
1233 	u32 status;
1234 	int ret;
1235 
1236 	switch (sw->generation) {
1237 	case 3:
1238 		break;
1239 
1240 	case 2:
1241 		/* Only root switch can be upgraded */
1242 		if (tb_route(sw))
1243 			return 0;
1244 		break;
1245 
1246 	default:
1247 		/*
1248 		 * DMA port is the only thing available when the switch
1249 		 * is in safe mode.
1250 		 */
1251 		if (!sw->safe_mode)
1252 			return 0;
1253 		break;
1254 	}
1255 
1256 	if (sw->no_nvm_upgrade)
1257 		return 0;
1258 
1259 	sw->dma_port = dma_port_alloc(sw);
1260 	if (!sw->dma_port)
1261 		return 0;
1262 
1263 	/*
1264 	 * Check status of the previous flash authentication. If there
1265 	 * is one we need to power cycle the switch in any case to make
1266 	 * it functional again.
1267 	 */
1268 	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1269 	if (ret <= 0)
1270 		return ret;
1271 
1272 	if (status) {
1273 		tb_sw_info(sw, "switch flash authentication failed\n");
1274 		tb_switch_set_uuid(sw);
1275 		nvm_set_auth_status(sw, status);
1276 	}
1277 
1278 	tb_sw_info(sw, "power cycling the switch now\n");
1279 	dma_port_power_cycle(sw->dma_port);
1280 
1281 	/*
1282 	 * We return error here which causes the switch adding failure.
1283 	 * It should appear back after power cycle is complete.
1284 	 */
1285 	return -ESHUTDOWN;
1286 }
1287 
1288 /**
1289  * tb_switch_add() - Add a switch to the domain
1290  * @sw: Switch to add
1291  *
1292  * This is the last step in adding switch to the domain. It will read
1293  * identification information from DROM and initializes ports so that
1294  * they can be used to connect other switches. The switch will be
1295  * exposed to the userspace when this function successfully returns. To
1296  * remove and release the switch, call tb_switch_remove().
1297  *
1298  * Return: %0 in case of success and negative errno in case of failure
1299  */
1300 int tb_switch_add(struct tb_switch *sw)
1301 {
1302 	int i, ret;
1303 
1304 	/*
1305 	 * Initialize DMA control port now before we read DROM. Recent
1306 	 * host controllers have more complete DROM on NVM that includes
1307 	 * vendor and model identification strings which we then expose
1308 	 * to the userspace. NVM can be accessed through DMA
1309 	 * configuration based mailbox.
1310 	 */
1311 	ret = tb_switch_add_dma_port(sw);
1312 	if (ret)
1313 		return ret;
1314 
1315 	if (!sw->safe_mode) {
1316 		/* read drom */
1317 		ret = tb_drom_read(sw);
1318 		if (ret) {
1319 			tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1320 			return ret;
1321 		}
1322 		tb_sw_info(sw, "uid: %#llx\n", sw->uid);
1323 
1324 		tb_switch_set_uuid(sw);
1325 
1326 		for (i = 0; i <= sw->config.max_port_number; i++) {
1327 			if (sw->ports[i].disabled) {
1328 				tb_port_info(&sw->ports[i], "disabled by eeprom\n");
1329 				continue;
1330 			}
1331 			ret = tb_init_port(&sw->ports[i]);
1332 			if (ret)
1333 				return ret;
1334 		}
1335 	}
1336 
1337 	ret = device_add(&sw->dev);
1338 	if (ret)
1339 		return ret;
1340 
1341 	ret = tb_switch_nvm_add(sw);
1342 	if (ret)
1343 		device_del(&sw->dev);
1344 
1345 	return ret;
1346 }
1347 
1348 /**
1349  * tb_switch_remove() - Remove and release a switch
1350  * @sw: Switch to remove
1351  *
1352  * This will remove the switch from the domain and release it after last
1353  * reference count drops to zero. If there are switches connected below
1354  * this switch, they will be removed as well.
1355  */
1356 void tb_switch_remove(struct tb_switch *sw)
1357 {
1358 	int i;
1359 
1360 	/* port 0 is the switch itself and never has a remote */
1361 	for (i = 1; i <= sw->config.max_port_number; i++) {
1362 		if (tb_is_upstream_port(&sw->ports[i]))
1363 			continue;
1364 		if (sw->ports[i].remote)
1365 			tb_switch_remove(sw->ports[i].remote->sw);
1366 		sw->ports[i].remote = NULL;
1367 		if (sw->ports[i].xdomain)
1368 			tb_xdomain_remove(sw->ports[i].xdomain);
1369 		sw->ports[i].xdomain = NULL;
1370 	}
1371 
1372 	if (!sw->is_unplugged)
1373 		tb_plug_events_active(sw, false);
1374 
1375 	tb_switch_nvm_remove(sw);
1376 	device_unregister(&sw->dev);
1377 }
1378 
1379 /**
1380  * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1381  */
1382 void tb_sw_set_unplugged(struct tb_switch *sw)
1383 {
1384 	int i;
1385 	if (sw == sw->tb->root_switch) {
1386 		tb_sw_WARN(sw, "cannot unplug root switch\n");
1387 		return;
1388 	}
1389 	if (sw->is_unplugged) {
1390 		tb_sw_WARN(sw, "is_unplugged already set\n");
1391 		return;
1392 	}
1393 	sw->is_unplugged = true;
1394 	for (i = 0; i <= sw->config.max_port_number; i++) {
1395 		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
1396 			tb_sw_set_unplugged(sw->ports[i].remote->sw);
1397 	}
1398 }
1399 
1400 int tb_switch_resume(struct tb_switch *sw)
1401 {
1402 	int i, err;
1403 	tb_sw_info(sw, "resuming switch\n");
1404 
1405 	/*
1406 	 * Check for UID of the connected switches except for root
1407 	 * switch which we assume cannot be removed.
1408 	 */
1409 	if (tb_route(sw)) {
1410 		u64 uid;
1411 
1412 		err = tb_drom_read_uid_only(sw, &uid);
1413 		if (err) {
1414 			tb_sw_warn(sw, "uid read failed\n");
1415 			return err;
1416 		}
1417 		if (sw->uid != uid) {
1418 			tb_sw_info(sw,
1419 				"changed while suspended (uid %#llx -> %#llx)\n",
1420 				sw->uid, uid);
1421 			return -ENODEV;
1422 		}
1423 	}
1424 
1425 	/* upload configuration */
1426 	err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1427 	if (err)
1428 		return err;
1429 
1430 	err = tb_plug_events_active(sw, true);
1431 	if (err)
1432 		return err;
1433 
1434 	/* check for surviving downstream switches */
1435 	for (i = 1; i <= sw->config.max_port_number; i++) {
1436 		struct tb_port *port = &sw->ports[i];
1437 		if (tb_is_upstream_port(port))
1438 			continue;
1439 		if (!port->remote)
1440 			continue;
1441 		if (tb_wait_for_port(port, true) <= 0
1442 			|| tb_switch_resume(port->remote->sw)) {
1443 			tb_port_warn(port,
1444 				     "lost during suspend, disconnecting\n");
1445 			tb_sw_set_unplugged(port->remote->sw);
1446 		}
1447 	}
1448 	return 0;
1449 }
1450 
1451 void tb_switch_suspend(struct tb_switch *sw)
1452 {
1453 	int i, err;
1454 	err = tb_plug_events_active(sw, false);
1455 	if (err)
1456 		return;
1457 
1458 	for (i = 1; i <= sw->config.max_port_number; i++) {
1459 		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
1460 			tb_switch_suspend(sw->ports[i].remote->sw);
1461 	}
1462 	/*
1463 	 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
1464 	 * effect?
1465 	 */
1466 }
1467 
1468 struct tb_sw_lookup {
1469 	struct tb *tb;
1470 	u8 link;
1471 	u8 depth;
1472 	const uuid_t *uuid;
1473 };
1474 
1475 static int tb_switch_match(struct device *dev, void *data)
1476 {
1477 	struct tb_switch *sw = tb_to_switch(dev);
1478 	struct tb_sw_lookup *lookup = data;
1479 
1480 	if (!sw)
1481 		return 0;
1482 	if (sw->tb != lookup->tb)
1483 		return 0;
1484 
1485 	if (lookup->uuid)
1486 		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
1487 
1488 	/* Root switch is matched only by depth */
1489 	if (!lookup->depth)
1490 		return !sw->depth;
1491 
1492 	return sw->link == lookup->link && sw->depth == lookup->depth;
1493 }
1494 
1495 /**
1496  * tb_switch_find_by_link_depth() - Find switch by link and depth
1497  * @tb: Domain the switch belongs
1498  * @link: Link number the switch is connected
1499  * @depth: Depth of the switch in link
1500  *
1501  * Returned switch has reference count increased so the caller needs to
1502  * call tb_switch_put() when done with the switch.
1503  */
1504 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1505 {
1506 	struct tb_sw_lookup lookup;
1507 	struct device *dev;
1508 
1509 	memset(&lookup, 0, sizeof(lookup));
1510 	lookup.tb = tb;
1511 	lookup.link = link;
1512 	lookup.depth = depth;
1513 
1514 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1515 	if (dev)
1516 		return tb_to_switch(dev);
1517 
1518 	return NULL;
1519 }
1520 
1521 /**
1522  * tb_switch_find_by_link_depth() - Find switch by UUID
1523  * @tb: Domain the switch belongs
1524  * @uuid: UUID to look for
1525  *
1526  * Returned switch has reference count increased so the caller needs to
1527  * call tb_switch_put() when done with the switch.
1528  */
1529 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1530 {
1531 	struct tb_sw_lookup lookup;
1532 	struct device *dev;
1533 
1534 	memset(&lookup, 0, sizeof(lookup));
1535 	lookup.tb = tb;
1536 	lookup.uuid = uuid;
1537 
1538 	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1539 	if (dev)
1540 		return tb_to_switch(dev);
1541 
1542 	return NULL;
1543 }
1544 
1545 void tb_switch_exit(void)
1546 {
1547 	ida_destroy(&nvm_ida);
1548 }
1549