xref: /openbmc/linux/drivers/thunderbolt/domain.c (revision 1c82407a)
1 /*
2  * Thunderbolt bus support
3  *
4  * Copyright (C) 2017, Intel Corporation
5  * Author:  Mika Westerberg <mika.westerberg@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/device.h>
13 #include <linux/idr.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
18 
19 #include "tb.h"
20 
21 static DEFINE_IDA(tb_domain_ida);
22 
23 static bool match_service_id(const struct tb_service_id *id,
24 			     const struct tb_service *svc)
25 {
26 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 		if (strcmp(id->protocol_key, svc->key))
28 			return false;
29 	}
30 
31 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 		if (id->protocol_id != svc->prtcid)
33 			return false;
34 	}
35 
36 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 		if (id->protocol_version != svc->prtcvers)
38 			return false;
39 	}
40 
41 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 		if (id->protocol_revision != svc->prtcrevs)
43 			return false;
44 	}
45 
46 	return true;
47 }
48 
49 static const struct tb_service_id *__tb_service_match(struct device *dev,
50 						      struct device_driver *drv)
51 {
52 	struct tb_service_driver *driver;
53 	const struct tb_service_id *ids;
54 	struct tb_service *svc;
55 
56 	svc = tb_to_service(dev);
57 	if (!svc)
58 		return NULL;
59 
60 	driver = container_of(drv, struct tb_service_driver, driver);
61 	if (!driver->id_table)
62 		return NULL;
63 
64 	for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 		if (match_service_id(ids, svc))
66 			return ids;
67 	}
68 
69 	return NULL;
70 }
71 
72 static int tb_service_match(struct device *dev, struct device_driver *drv)
73 {
74 	return !!__tb_service_match(dev, drv);
75 }
76 
77 static int tb_service_probe(struct device *dev)
78 {
79 	struct tb_service *svc = tb_to_service(dev);
80 	struct tb_service_driver *driver;
81 	const struct tb_service_id *id;
82 
83 	driver = container_of(dev->driver, struct tb_service_driver, driver);
84 	id = __tb_service_match(dev, &driver->driver);
85 
86 	return driver->probe(svc, id);
87 }
88 
89 static int tb_service_remove(struct device *dev)
90 {
91 	struct tb_service *svc = tb_to_service(dev);
92 	struct tb_service_driver *driver;
93 
94 	driver = container_of(dev->driver, struct tb_service_driver, driver);
95 	if (driver->remove)
96 		driver->remove(svc);
97 
98 	return 0;
99 }
100 
101 static void tb_service_shutdown(struct device *dev)
102 {
103 	struct tb_service_driver *driver;
104 	struct tb_service *svc;
105 
106 	svc = tb_to_service(dev);
107 	if (!svc || !dev->driver)
108 		return;
109 
110 	driver = container_of(dev->driver, struct tb_service_driver, driver);
111 	if (driver->shutdown)
112 		driver->shutdown(svc);
113 }
114 
115 static const char * const tb_security_names[] = {
116 	[TB_SECURITY_NONE] = "none",
117 	[TB_SECURITY_USER] = "user",
118 	[TB_SECURITY_SECURE] = "secure",
119 	[TB_SECURITY_DPONLY] = "dponly",
120 	[TB_SECURITY_USBONLY] = "usbonly",
121 };
122 
123 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
124 			     char *buf)
125 {
126 	struct tb *tb = container_of(dev, struct tb, dev);
127 	uuid_t *uuids;
128 	ssize_t ret;
129 	int i;
130 
131 	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
132 	if (!uuids)
133 		return -ENOMEM;
134 
135 	if (mutex_lock_interruptible(&tb->lock)) {
136 		ret = -ERESTARTSYS;
137 		goto out;
138 	}
139 	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
140 	if (ret) {
141 		mutex_unlock(&tb->lock);
142 		goto out;
143 	}
144 	mutex_unlock(&tb->lock);
145 
146 	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
147 		if (!uuid_is_null(&uuids[i]))
148 			ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
149 					&uuids[i]);
150 
151 		ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s",
152 			       i < tb->nboot_acl - 1 ? "," : "\n");
153 	}
154 
155 out:
156 	kfree(uuids);
157 	return ret;
158 }
159 
160 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
161 			      const char *buf, size_t count)
162 {
163 	struct tb *tb = container_of(dev, struct tb, dev);
164 	char *str, *s, *uuid_str;
165 	ssize_t ret = 0;
166 	uuid_t *acl;
167 	int i = 0;
168 
169 	/*
170 	 * Make sure the value is not bigger than tb->nboot_acl * UUID
171 	 * length + commas and optional "\n". Also the smallest allowable
172 	 * string is tb->nboot_acl * ",".
173 	 */
174 	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
175 		return -EINVAL;
176 	if (count < tb->nboot_acl - 1)
177 		return -EINVAL;
178 
179 	str = kstrdup(buf, GFP_KERNEL);
180 	if (!str)
181 		return -ENOMEM;
182 
183 	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
184 	if (!acl) {
185 		ret = -ENOMEM;
186 		goto err_free_str;
187 	}
188 
189 	uuid_str = strim(str);
190 	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
191 		size_t len = strlen(s);
192 
193 		if (len) {
194 			if (len != UUID_STRING_LEN) {
195 				ret = -EINVAL;
196 				goto err_free_acl;
197 			}
198 			ret = uuid_parse(s, &acl[i]);
199 			if (ret)
200 				goto err_free_acl;
201 		}
202 
203 		i++;
204 	}
205 
206 	if (s || i < tb->nboot_acl) {
207 		ret = -EINVAL;
208 		goto err_free_acl;
209 	}
210 
211 	if (mutex_lock_interruptible(&tb->lock)) {
212 		ret = -ERESTARTSYS;
213 		goto err_free_acl;
214 	}
215 	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
216 	if (!ret) {
217 		/* Notify userspace about the change */
218 		kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
219 	}
220 	mutex_unlock(&tb->lock);
221 
222 err_free_acl:
223 	kfree(acl);
224 err_free_str:
225 	kfree(str);
226 
227 	return ret ?: count;
228 }
229 static DEVICE_ATTR_RW(boot_acl);
230 
231 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
232 			     char *buf)
233 {
234 	struct tb *tb = container_of(dev, struct tb, dev);
235 	const char *name = "unknown";
236 
237 	if (tb->security_level < ARRAY_SIZE(tb_security_names))
238 		name = tb_security_names[tb->security_level];
239 
240 	return sprintf(buf, "%s\n", name);
241 }
242 static DEVICE_ATTR_RO(security);
243 
244 static struct attribute *domain_attrs[] = {
245 	&dev_attr_boot_acl.attr,
246 	&dev_attr_security.attr,
247 	NULL,
248 };
249 
250 static umode_t domain_attr_is_visible(struct kobject *kobj,
251 				      struct attribute *attr, int n)
252 {
253 	struct device *dev = container_of(kobj, struct device, kobj);
254 	struct tb *tb = container_of(dev, struct tb, dev);
255 
256 	if (attr == &dev_attr_boot_acl.attr) {
257 		if (tb->nboot_acl &&
258 		    tb->cm_ops->get_boot_acl &&
259 		    tb->cm_ops->set_boot_acl)
260 			return attr->mode;
261 		return 0;
262 	}
263 
264 	return attr->mode;
265 }
266 
267 static struct attribute_group domain_attr_group = {
268 	.is_visible = domain_attr_is_visible,
269 	.attrs = domain_attrs,
270 };
271 
272 static const struct attribute_group *domain_attr_groups[] = {
273 	&domain_attr_group,
274 	NULL,
275 };
276 
277 struct bus_type tb_bus_type = {
278 	.name = "thunderbolt",
279 	.match = tb_service_match,
280 	.probe = tb_service_probe,
281 	.remove = tb_service_remove,
282 	.shutdown = tb_service_shutdown,
283 };
284 
285 static void tb_domain_release(struct device *dev)
286 {
287 	struct tb *tb = container_of(dev, struct tb, dev);
288 
289 	tb_ctl_free(tb->ctl);
290 	destroy_workqueue(tb->wq);
291 	ida_simple_remove(&tb_domain_ida, tb->index);
292 	mutex_destroy(&tb->lock);
293 	kfree(tb);
294 }
295 
296 struct device_type tb_domain_type = {
297 	.name = "thunderbolt_domain",
298 	.release = tb_domain_release,
299 };
300 
301 /**
302  * tb_domain_alloc() - Allocate a domain
303  * @nhi: Pointer to the host controller
304  * @privsize: Size of the connection manager private data
305  *
306  * Allocates and initializes a new Thunderbolt domain. Connection
307  * managers are expected to call this and then fill in @cm_ops
308  * accordingly.
309  *
310  * Call tb_domain_put() to release the domain before it has been added
311  * to the system.
312  *
313  * Return: allocated domain structure on %NULL in case of error
314  */
315 struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
316 {
317 	struct tb *tb;
318 
319 	/*
320 	 * Make sure the structure sizes map with that the hardware
321 	 * expects because bit-fields are being used.
322 	 */
323 	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
324 	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
325 	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
326 
327 	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
328 	if (!tb)
329 		return NULL;
330 
331 	tb->nhi = nhi;
332 	mutex_init(&tb->lock);
333 
334 	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
335 	if (tb->index < 0)
336 		goto err_free;
337 
338 	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
339 	if (!tb->wq)
340 		goto err_remove_ida;
341 
342 	tb->dev.parent = &nhi->pdev->dev;
343 	tb->dev.bus = &tb_bus_type;
344 	tb->dev.type = &tb_domain_type;
345 	tb->dev.groups = domain_attr_groups;
346 	dev_set_name(&tb->dev, "domain%d", tb->index);
347 	device_initialize(&tb->dev);
348 
349 	return tb;
350 
351 err_remove_ida:
352 	ida_simple_remove(&tb_domain_ida, tb->index);
353 err_free:
354 	kfree(tb);
355 
356 	return NULL;
357 }
358 
359 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
360 			       const void *buf, size_t size)
361 {
362 	struct tb *tb = data;
363 
364 	if (!tb->cm_ops->handle_event) {
365 		tb_warn(tb, "domain does not have event handler\n");
366 		return true;
367 	}
368 
369 	switch (type) {
370 	case TB_CFG_PKG_XDOMAIN_REQ:
371 	case TB_CFG_PKG_XDOMAIN_RESP:
372 		return tb_xdomain_handle_request(tb, type, buf, size);
373 
374 	default:
375 		tb->cm_ops->handle_event(tb, type, buf, size);
376 	}
377 
378 	return true;
379 }
380 
381 /**
382  * tb_domain_add() - Add domain to the system
383  * @tb: Domain to add
384  *
385  * Starts the domain and adds it to the system. Hotplugging devices will
386  * work after this has been returned successfully. In order to remove
387  * and release the domain after this function has been called, call
388  * tb_domain_remove().
389  *
390  * Return: %0 in case of success and negative errno in case of error
391  */
392 int tb_domain_add(struct tb *tb)
393 {
394 	int ret;
395 
396 	if (WARN_ON(!tb->cm_ops))
397 		return -EINVAL;
398 
399 	mutex_lock(&tb->lock);
400 
401 	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
402 	if (!tb->ctl) {
403 		ret = -ENOMEM;
404 		goto err_unlock;
405 	}
406 
407 	/*
408 	 * tb_schedule_hotplug_handler may be called as soon as the config
409 	 * channel is started. Thats why we have to hold the lock here.
410 	 */
411 	tb_ctl_start(tb->ctl);
412 
413 	if (tb->cm_ops->driver_ready) {
414 		ret = tb->cm_ops->driver_ready(tb);
415 		if (ret)
416 			goto err_ctl_stop;
417 	}
418 
419 	ret = device_add(&tb->dev);
420 	if (ret)
421 		goto err_ctl_stop;
422 
423 	/* Start the domain */
424 	if (tb->cm_ops->start) {
425 		ret = tb->cm_ops->start(tb);
426 		if (ret)
427 			goto err_domain_del;
428 	}
429 
430 	/* This starts event processing */
431 	mutex_unlock(&tb->lock);
432 
433 	return 0;
434 
435 err_domain_del:
436 	device_del(&tb->dev);
437 err_ctl_stop:
438 	tb_ctl_stop(tb->ctl);
439 err_unlock:
440 	mutex_unlock(&tb->lock);
441 
442 	return ret;
443 }
444 
445 /**
446  * tb_domain_remove() - Removes and releases a domain
447  * @tb: Domain to remove
448  *
449  * Stops the domain, removes it from the system and releases all
450  * resources once the last reference has been released.
451  */
452 void tb_domain_remove(struct tb *tb)
453 {
454 	mutex_lock(&tb->lock);
455 	if (tb->cm_ops->stop)
456 		tb->cm_ops->stop(tb);
457 	/* Stop the domain control traffic */
458 	tb_ctl_stop(tb->ctl);
459 	mutex_unlock(&tb->lock);
460 
461 	flush_workqueue(tb->wq);
462 	device_unregister(&tb->dev);
463 }
464 
465 /**
466  * tb_domain_suspend_noirq() - Suspend a domain
467  * @tb: Domain to suspend
468  *
469  * Suspends all devices in the domain and stops the control channel.
470  */
471 int tb_domain_suspend_noirq(struct tb *tb)
472 {
473 	int ret = 0;
474 
475 	/*
476 	 * The control channel interrupt is left enabled during suspend
477 	 * and taking the lock here prevents any events happening before
478 	 * we actually have stopped the domain and the control channel.
479 	 */
480 	mutex_lock(&tb->lock);
481 	if (tb->cm_ops->suspend_noirq)
482 		ret = tb->cm_ops->suspend_noirq(tb);
483 	if (!ret)
484 		tb_ctl_stop(tb->ctl);
485 	mutex_unlock(&tb->lock);
486 
487 	return ret;
488 }
489 
490 /**
491  * tb_domain_resume_noirq() - Resume a domain
492  * @tb: Domain to resume
493  *
494  * Re-starts the control channel, and resumes all devices connected to
495  * the domain.
496  */
497 int tb_domain_resume_noirq(struct tb *tb)
498 {
499 	int ret = 0;
500 
501 	mutex_lock(&tb->lock);
502 	tb_ctl_start(tb->ctl);
503 	if (tb->cm_ops->resume_noirq)
504 		ret = tb->cm_ops->resume_noirq(tb);
505 	mutex_unlock(&tb->lock);
506 
507 	return ret;
508 }
509 
510 int tb_domain_suspend(struct tb *tb)
511 {
512 	int ret;
513 
514 	mutex_lock(&tb->lock);
515 	if (tb->cm_ops->suspend) {
516 		ret = tb->cm_ops->suspend(tb);
517 		if (ret) {
518 			mutex_unlock(&tb->lock);
519 			return ret;
520 		}
521 	}
522 	mutex_unlock(&tb->lock);
523 	return 0;
524 }
525 
526 void tb_domain_complete(struct tb *tb)
527 {
528 	mutex_lock(&tb->lock);
529 	if (tb->cm_ops->complete)
530 		tb->cm_ops->complete(tb);
531 	mutex_unlock(&tb->lock);
532 }
533 
534 /**
535  * tb_domain_approve_switch() - Approve switch
536  * @tb: Domain the switch belongs to
537  * @sw: Switch to approve
538  *
539  * This will approve switch by connection manager specific means. In
540  * case of success the connection manager will create tunnels for all
541  * supported protocols.
542  */
543 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
544 {
545 	struct tb_switch *parent_sw;
546 
547 	if (!tb->cm_ops->approve_switch)
548 		return -EPERM;
549 
550 	/* The parent switch must be authorized before this one */
551 	parent_sw = tb_to_switch(sw->dev.parent);
552 	if (!parent_sw || !parent_sw->authorized)
553 		return -EINVAL;
554 
555 	return tb->cm_ops->approve_switch(tb, sw);
556 }
557 
558 /**
559  * tb_domain_approve_switch_key() - Approve switch and add key
560  * @tb: Domain the switch belongs to
561  * @sw: Switch to approve
562  *
563  * For switches that support secure connect, this function first adds
564  * key to the switch NVM using connection manager specific means. If
565  * adding the key is successful, the switch is approved and connected.
566  *
567  * Return: %0 on success and negative errno in case of failure.
568  */
569 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
570 {
571 	struct tb_switch *parent_sw;
572 	int ret;
573 
574 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
575 		return -EPERM;
576 
577 	/* The parent switch must be authorized before this one */
578 	parent_sw = tb_to_switch(sw->dev.parent);
579 	if (!parent_sw || !parent_sw->authorized)
580 		return -EINVAL;
581 
582 	ret = tb->cm_ops->add_switch_key(tb, sw);
583 	if (ret)
584 		return ret;
585 
586 	return tb->cm_ops->approve_switch(tb, sw);
587 }
588 
589 /**
590  * tb_domain_challenge_switch_key() - Challenge and approve switch
591  * @tb: Domain the switch belongs to
592  * @sw: Switch to approve
593  *
594  * For switches that support secure connect, this function generates
595  * random challenge and sends it to the switch. The switch responds to
596  * this and if the response matches our random challenge, the switch is
597  * approved and connected.
598  *
599  * Return: %0 on success and negative errno in case of failure.
600  */
601 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
602 {
603 	u8 challenge[TB_SWITCH_KEY_SIZE];
604 	u8 response[TB_SWITCH_KEY_SIZE];
605 	u8 hmac[TB_SWITCH_KEY_SIZE];
606 	struct tb_switch *parent_sw;
607 	struct crypto_shash *tfm;
608 	struct shash_desc *shash;
609 	int ret;
610 
611 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
612 		return -EPERM;
613 
614 	/* The parent switch must be authorized before this one */
615 	parent_sw = tb_to_switch(sw->dev.parent);
616 	if (!parent_sw || !parent_sw->authorized)
617 		return -EINVAL;
618 
619 	get_random_bytes(challenge, sizeof(challenge));
620 	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
621 	if (ret)
622 		return ret;
623 
624 	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
625 	if (IS_ERR(tfm))
626 		return PTR_ERR(tfm);
627 
628 	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
629 	if (ret)
630 		goto err_free_tfm;
631 
632 	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
633 			GFP_KERNEL);
634 	if (!shash) {
635 		ret = -ENOMEM;
636 		goto err_free_tfm;
637 	}
638 
639 	shash->tfm = tfm;
640 	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
641 
642 	memset(hmac, 0, sizeof(hmac));
643 	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
644 	if (ret)
645 		goto err_free_shash;
646 
647 	/* The returned HMAC must match the one we calculated */
648 	if (memcmp(response, hmac, sizeof(hmac))) {
649 		ret = -EKEYREJECTED;
650 		goto err_free_shash;
651 	}
652 
653 	crypto_free_shash(tfm);
654 	kfree(shash);
655 
656 	return tb->cm_ops->approve_switch(tb, sw);
657 
658 err_free_shash:
659 	kfree(shash);
660 err_free_tfm:
661 	crypto_free_shash(tfm);
662 
663 	return ret;
664 }
665 
666 /**
667  * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
668  * @tb: Domain whose PCIe paths to disconnect
669  *
670  * This needs to be called in preparation for NVM upgrade of the host
671  * controller. Makes sure all PCIe paths are disconnected.
672  *
673  * Return %0 on success and negative errno in case of error.
674  */
675 int tb_domain_disconnect_pcie_paths(struct tb *tb)
676 {
677 	if (!tb->cm_ops->disconnect_pcie_paths)
678 		return -EPERM;
679 
680 	return tb->cm_ops->disconnect_pcie_paths(tb);
681 }
682 
683 /**
684  * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
685  * @tb: Domain enabling the DMA paths
686  * @xd: XDomain DMA paths are created to
687  *
688  * Calls connection manager specific method to enable DMA paths to the
689  * XDomain in question.
690  *
691  * Return: 0% in case of success and negative errno otherwise. In
692  * particular returns %-ENOTSUPP if the connection manager
693  * implementation does not support XDomains.
694  */
695 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
696 {
697 	if (!tb->cm_ops->approve_xdomain_paths)
698 		return -ENOTSUPP;
699 
700 	return tb->cm_ops->approve_xdomain_paths(tb, xd);
701 }
702 
703 /**
704  * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
705  * @tb: Domain disabling the DMA paths
706  * @xd: XDomain whose DMA paths are disconnected
707  *
708  * Calls connection manager specific method to disconnect DMA paths to
709  * the XDomain in question.
710  *
711  * Return: 0% in case of success and negative errno otherwise. In
712  * particular returns %-ENOTSUPP if the connection manager
713  * implementation does not support XDomains.
714  */
715 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
716 {
717 	if (!tb->cm_ops->disconnect_xdomain_paths)
718 		return -ENOTSUPP;
719 
720 	return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
721 }
722 
723 static int disconnect_xdomain(struct device *dev, void *data)
724 {
725 	struct tb_xdomain *xd;
726 	struct tb *tb = data;
727 	int ret = 0;
728 
729 	xd = tb_to_xdomain(dev);
730 	if (xd && xd->tb == tb)
731 		ret = tb_xdomain_disable_paths(xd);
732 
733 	return ret;
734 }
735 
736 /**
737  * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
738  * @tb: Domain whose paths are disconnected
739  *
740  * This function can be used to disconnect all paths (PCIe, XDomain) for
741  * example in preparation for host NVM firmware upgrade. After this is
742  * called the paths cannot be established without resetting the switch.
743  *
744  * Return: %0 in case of success and negative errno otherwise.
745  */
746 int tb_domain_disconnect_all_paths(struct tb *tb)
747 {
748 	int ret;
749 
750 	ret = tb_domain_disconnect_pcie_paths(tb);
751 	if (ret)
752 		return ret;
753 
754 	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
755 }
756 
757 int tb_domain_init(void)
758 {
759 	int ret;
760 
761 	ret = tb_xdomain_init();
762 	if (ret)
763 		return ret;
764 	ret = bus_register(&tb_bus_type);
765 	if (ret)
766 		tb_xdomain_exit();
767 
768 	return ret;
769 }
770 
771 void tb_domain_exit(void)
772 {
773 	bus_unregister(&tb_bus_type);
774 	ida_destroy(&tb_domain_ida);
775 	tb_switch_exit();
776 	tb_xdomain_exit();
777 }
778