xref: /openbmc/linux/drivers/thunderbolt/domain.c (revision 6f4eaea2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt bus support
4  *
5  * Copyright (C) 2017, Intel Corporation
6  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/dmar.h>
11 #include <linux/idr.h>
12 #include <linux/iommu.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
18 
19 #include "tb.h"
20 
21 static DEFINE_IDA(tb_domain_ida);
22 
23 static bool match_service_id(const struct tb_service_id *id,
24 			     const struct tb_service *svc)
25 {
26 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 		if (strcmp(id->protocol_key, svc->key))
28 			return false;
29 	}
30 
31 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 		if (id->protocol_id != svc->prtcid)
33 			return false;
34 	}
35 
36 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 		if (id->protocol_version != svc->prtcvers)
38 			return false;
39 	}
40 
41 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 		if (id->protocol_revision != svc->prtcrevs)
43 			return false;
44 	}
45 
46 	return true;
47 }
48 
49 static const struct tb_service_id *__tb_service_match(struct device *dev,
50 						      struct device_driver *drv)
51 {
52 	struct tb_service_driver *driver;
53 	const struct tb_service_id *ids;
54 	struct tb_service *svc;
55 
56 	svc = tb_to_service(dev);
57 	if (!svc)
58 		return NULL;
59 
60 	driver = container_of(drv, struct tb_service_driver, driver);
61 	if (!driver->id_table)
62 		return NULL;
63 
64 	for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 		if (match_service_id(ids, svc))
66 			return ids;
67 	}
68 
69 	return NULL;
70 }
71 
72 static int tb_service_match(struct device *dev, struct device_driver *drv)
73 {
74 	return !!__tb_service_match(dev, drv);
75 }
76 
77 static int tb_service_probe(struct device *dev)
78 {
79 	struct tb_service *svc = tb_to_service(dev);
80 	struct tb_service_driver *driver;
81 	const struct tb_service_id *id;
82 
83 	driver = container_of(dev->driver, struct tb_service_driver, driver);
84 	id = __tb_service_match(dev, &driver->driver);
85 
86 	return driver->probe(svc, id);
87 }
88 
89 static int tb_service_remove(struct device *dev)
90 {
91 	struct tb_service *svc = tb_to_service(dev);
92 	struct tb_service_driver *driver;
93 
94 	driver = container_of(dev->driver, struct tb_service_driver, driver);
95 	if (driver->remove)
96 		driver->remove(svc);
97 
98 	return 0;
99 }
100 
101 static void tb_service_shutdown(struct device *dev)
102 {
103 	struct tb_service_driver *driver;
104 	struct tb_service *svc;
105 
106 	svc = tb_to_service(dev);
107 	if (!svc || !dev->driver)
108 		return;
109 
110 	driver = container_of(dev->driver, struct tb_service_driver, driver);
111 	if (driver->shutdown)
112 		driver->shutdown(svc);
113 }
114 
115 static const char * const tb_security_names[] = {
116 	[TB_SECURITY_NONE] = "none",
117 	[TB_SECURITY_USER] = "user",
118 	[TB_SECURITY_SECURE] = "secure",
119 	[TB_SECURITY_DPONLY] = "dponly",
120 	[TB_SECURITY_USBONLY] = "usbonly",
121 	[TB_SECURITY_NOPCIE] = "nopcie",
122 };
123 
124 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
125 			     char *buf)
126 {
127 	struct tb *tb = container_of(dev, struct tb, dev);
128 	uuid_t *uuids;
129 	ssize_t ret;
130 	int i;
131 
132 	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
133 	if (!uuids)
134 		return -ENOMEM;
135 
136 	pm_runtime_get_sync(&tb->dev);
137 
138 	if (mutex_lock_interruptible(&tb->lock)) {
139 		ret = -ERESTARTSYS;
140 		goto out;
141 	}
142 	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
143 	if (ret) {
144 		mutex_unlock(&tb->lock);
145 		goto out;
146 	}
147 	mutex_unlock(&tb->lock);
148 
149 	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
150 		if (!uuid_is_null(&uuids[i]))
151 			ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
152 					&uuids[i]);
153 
154 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
155 			       i < tb->nboot_acl - 1 ? "," : "\n");
156 	}
157 
158 out:
159 	pm_runtime_mark_last_busy(&tb->dev);
160 	pm_runtime_put_autosuspend(&tb->dev);
161 	kfree(uuids);
162 
163 	return ret;
164 }
165 
166 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
167 			      const char *buf, size_t count)
168 {
169 	struct tb *tb = container_of(dev, struct tb, dev);
170 	char *str, *s, *uuid_str;
171 	ssize_t ret = 0;
172 	uuid_t *acl;
173 	int i = 0;
174 
175 	/*
176 	 * Make sure the value is not bigger than tb->nboot_acl * UUID
177 	 * length + commas and optional "\n". Also the smallest allowable
178 	 * string is tb->nboot_acl * ",".
179 	 */
180 	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
181 		return -EINVAL;
182 	if (count < tb->nboot_acl - 1)
183 		return -EINVAL;
184 
185 	str = kstrdup(buf, GFP_KERNEL);
186 	if (!str)
187 		return -ENOMEM;
188 
189 	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
190 	if (!acl) {
191 		ret = -ENOMEM;
192 		goto err_free_str;
193 	}
194 
195 	uuid_str = strim(str);
196 	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
197 		size_t len = strlen(s);
198 
199 		if (len) {
200 			if (len != UUID_STRING_LEN) {
201 				ret = -EINVAL;
202 				goto err_free_acl;
203 			}
204 			ret = uuid_parse(s, &acl[i]);
205 			if (ret)
206 				goto err_free_acl;
207 		}
208 
209 		i++;
210 	}
211 
212 	if (s || i < tb->nboot_acl) {
213 		ret = -EINVAL;
214 		goto err_free_acl;
215 	}
216 
217 	pm_runtime_get_sync(&tb->dev);
218 
219 	if (mutex_lock_interruptible(&tb->lock)) {
220 		ret = -ERESTARTSYS;
221 		goto err_rpm_put;
222 	}
223 	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
224 	if (!ret) {
225 		/* Notify userspace about the change */
226 		kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
227 	}
228 	mutex_unlock(&tb->lock);
229 
230 err_rpm_put:
231 	pm_runtime_mark_last_busy(&tb->dev);
232 	pm_runtime_put_autosuspend(&tb->dev);
233 err_free_acl:
234 	kfree(acl);
235 err_free_str:
236 	kfree(str);
237 
238 	return ret ?: count;
239 }
240 static DEVICE_ATTR_RW(boot_acl);
241 
242 static ssize_t deauthorization_show(struct device *dev,
243 				    struct device_attribute *attr,
244 				    char *buf)
245 {
246 	const struct tb *tb = container_of(dev, struct tb, dev);
247 	bool deauthorization = false;
248 
249 	/* Only meaningful if authorization is supported */
250 	if (tb->security_level == TB_SECURITY_USER ||
251 	    tb->security_level == TB_SECURITY_SECURE)
252 		deauthorization = !!tb->cm_ops->disapprove_switch;
253 
254 	return sprintf(buf, "%d\n", deauthorization);
255 }
256 static DEVICE_ATTR_RO(deauthorization);
257 
258 static ssize_t iommu_dma_protection_show(struct device *dev,
259 					 struct device_attribute *attr,
260 					 char *buf)
261 {
262 	/*
263 	 * Kernel DMA protection is a feature where Thunderbolt security is
264 	 * handled natively using IOMMU. It is enabled when IOMMU is
265 	 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
266 	 */
267 	return sprintf(buf, "%d\n",
268 		       iommu_present(&pci_bus_type) && dmar_platform_optin());
269 }
270 static DEVICE_ATTR_RO(iommu_dma_protection);
271 
272 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
273 			     char *buf)
274 {
275 	struct tb *tb = container_of(dev, struct tb, dev);
276 	const char *name = "unknown";
277 
278 	if (tb->security_level < ARRAY_SIZE(tb_security_names))
279 		name = tb_security_names[tb->security_level];
280 
281 	return sprintf(buf, "%s\n", name);
282 }
283 static DEVICE_ATTR_RO(security);
284 
285 static struct attribute *domain_attrs[] = {
286 	&dev_attr_boot_acl.attr,
287 	&dev_attr_deauthorization.attr,
288 	&dev_attr_iommu_dma_protection.attr,
289 	&dev_attr_security.attr,
290 	NULL,
291 };
292 
293 static umode_t domain_attr_is_visible(struct kobject *kobj,
294 				      struct attribute *attr, int n)
295 {
296 	struct device *dev = kobj_to_dev(kobj);
297 	struct tb *tb = container_of(dev, struct tb, dev);
298 
299 	if (attr == &dev_attr_boot_acl.attr) {
300 		if (tb->nboot_acl &&
301 		    tb->cm_ops->get_boot_acl &&
302 		    tb->cm_ops->set_boot_acl)
303 			return attr->mode;
304 		return 0;
305 	}
306 
307 	return attr->mode;
308 }
309 
310 static const struct attribute_group domain_attr_group = {
311 	.is_visible = domain_attr_is_visible,
312 	.attrs = domain_attrs,
313 };
314 
315 static const struct attribute_group *domain_attr_groups[] = {
316 	&domain_attr_group,
317 	NULL,
318 };
319 
320 struct bus_type tb_bus_type = {
321 	.name = "thunderbolt",
322 	.match = tb_service_match,
323 	.probe = tb_service_probe,
324 	.remove = tb_service_remove,
325 	.shutdown = tb_service_shutdown,
326 };
327 
328 static void tb_domain_release(struct device *dev)
329 {
330 	struct tb *tb = container_of(dev, struct tb, dev);
331 
332 	tb_ctl_free(tb->ctl);
333 	destroy_workqueue(tb->wq);
334 	ida_simple_remove(&tb_domain_ida, tb->index);
335 	mutex_destroy(&tb->lock);
336 	kfree(tb);
337 }
338 
339 struct device_type tb_domain_type = {
340 	.name = "thunderbolt_domain",
341 	.release = tb_domain_release,
342 };
343 
344 /**
345  * tb_domain_alloc() - Allocate a domain
346  * @nhi: Pointer to the host controller
347  * @privsize: Size of the connection manager private data
348  *
349  * Allocates and initializes a new Thunderbolt domain. Connection
350  * managers are expected to call this and then fill in @cm_ops
351  * accordingly.
352  *
353  * Call tb_domain_put() to release the domain before it has been added
354  * to the system.
355  *
356  * Return: allocated domain structure on %NULL in case of error
357  */
358 struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
359 {
360 	struct tb *tb;
361 
362 	/*
363 	 * Make sure the structure sizes map with that the hardware
364 	 * expects because bit-fields are being used.
365 	 */
366 	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
367 	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
368 	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
369 
370 	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
371 	if (!tb)
372 		return NULL;
373 
374 	tb->nhi = nhi;
375 	mutex_init(&tb->lock);
376 
377 	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
378 	if (tb->index < 0)
379 		goto err_free;
380 
381 	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
382 	if (!tb->wq)
383 		goto err_remove_ida;
384 
385 	tb->dev.parent = &nhi->pdev->dev;
386 	tb->dev.bus = &tb_bus_type;
387 	tb->dev.type = &tb_domain_type;
388 	tb->dev.groups = domain_attr_groups;
389 	dev_set_name(&tb->dev, "domain%d", tb->index);
390 	device_initialize(&tb->dev);
391 
392 	return tb;
393 
394 err_remove_ida:
395 	ida_simple_remove(&tb_domain_ida, tb->index);
396 err_free:
397 	kfree(tb);
398 
399 	return NULL;
400 }
401 
402 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
403 			       const void *buf, size_t size)
404 {
405 	struct tb *tb = data;
406 
407 	if (!tb->cm_ops->handle_event) {
408 		tb_warn(tb, "domain does not have event handler\n");
409 		return true;
410 	}
411 
412 	switch (type) {
413 	case TB_CFG_PKG_XDOMAIN_REQ:
414 	case TB_CFG_PKG_XDOMAIN_RESP:
415 		if (tb_is_xdomain_enabled())
416 			return tb_xdomain_handle_request(tb, type, buf, size);
417 		break;
418 
419 	default:
420 		tb->cm_ops->handle_event(tb, type, buf, size);
421 	}
422 
423 	return true;
424 }
425 
426 /**
427  * tb_domain_add() - Add domain to the system
428  * @tb: Domain to add
429  *
430  * Starts the domain and adds it to the system. Hotplugging devices will
431  * work after this has been returned successfully. In order to remove
432  * and release the domain after this function has been called, call
433  * tb_domain_remove().
434  *
435  * Return: %0 in case of success and negative errno in case of error
436  */
437 int tb_domain_add(struct tb *tb)
438 {
439 	int ret;
440 
441 	if (WARN_ON(!tb->cm_ops))
442 		return -EINVAL;
443 
444 	mutex_lock(&tb->lock);
445 
446 	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
447 	if (!tb->ctl) {
448 		ret = -ENOMEM;
449 		goto err_unlock;
450 	}
451 
452 	/*
453 	 * tb_schedule_hotplug_handler may be called as soon as the config
454 	 * channel is started. Thats why we have to hold the lock here.
455 	 */
456 	tb_ctl_start(tb->ctl);
457 
458 	if (tb->cm_ops->driver_ready) {
459 		ret = tb->cm_ops->driver_ready(tb);
460 		if (ret)
461 			goto err_ctl_stop;
462 	}
463 
464 	tb_dbg(tb, "security level set to %s\n",
465 	       tb_security_names[tb->security_level]);
466 
467 	ret = device_add(&tb->dev);
468 	if (ret)
469 		goto err_ctl_stop;
470 
471 	/* Start the domain */
472 	if (tb->cm_ops->start) {
473 		ret = tb->cm_ops->start(tb);
474 		if (ret)
475 			goto err_domain_del;
476 	}
477 
478 	/* This starts event processing */
479 	mutex_unlock(&tb->lock);
480 
481 	device_init_wakeup(&tb->dev, true);
482 
483 	pm_runtime_no_callbacks(&tb->dev);
484 	pm_runtime_set_active(&tb->dev);
485 	pm_runtime_enable(&tb->dev);
486 	pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
487 	pm_runtime_mark_last_busy(&tb->dev);
488 	pm_runtime_use_autosuspend(&tb->dev);
489 
490 	return 0;
491 
492 err_domain_del:
493 	device_del(&tb->dev);
494 err_ctl_stop:
495 	tb_ctl_stop(tb->ctl);
496 err_unlock:
497 	mutex_unlock(&tb->lock);
498 
499 	return ret;
500 }
501 
502 /**
503  * tb_domain_remove() - Removes and releases a domain
504  * @tb: Domain to remove
505  *
506  * Stops the domain, removes it from the system and releases all
507  * resources once the last reference has been released.
508  */
509 void tb_domain_remove(struct tb *tb)
510 {
511 	mutex_lock(&tb->lock);
512 	if (tb->cm_ops->stop)
513 		tb->cm_ops->stop(tb);
514 	/* Stop the domain control traffic */
515 	tb_ctl_stop(tb->ctl);
516 	mutex_unlock(&tb->lock);
517 
518 	flush_workqueue(tb->wq);
519 	device_unregister(&tb->dev);
520 }
521 
522 /**
523  * tb_domain_suspend_noirq() - Suspend a domain
524  * @tb: Domain to suspend
525  *
526  * Suspends all devices in the domain and stops the control channel.
527  */
528 int tb_domain_suspend_noirq(struct tb *tb)
529 {
530 	int ret = 0;
531 
532 	/*
533 	 * The control channel interrupt is left enabled during suspend
534 	 * and taking the lock here prevents any events happening before
535 	 * we actually have stopped the domain and the control channel.
536 	 */
537 	mutex_lock(&tb->lock);
538 	if (tb->cm_ops->suspend_noirq)
539 		ret = tb->cm_ops->suspend_noirq(tb);
540 	if (!ret)
541 		tb_ctl_stop(tb->ctl);
542 	mutex_unlock(&tb->lock);
543 
544 	return ret;
545 }
546 
547 /**
548  * tb_domain_resume_noirq() - Resume a domain
549  * @tb: Domain to resume
550  *
551  * Re-starts the control channel, and resumes all devices connected to
552  * the domain.
553  */
554 int tb_domain_resume_noirq(struct tb *tb)
555 {
556 	int ret = 0;
557 
558 	mutex_lock(&tb->lock);
559 	tb_ctl_start(tb->ctl);
560 	if (tb->cm_ops->resume_noirq)
561 		ret = tb->cm_ops->resume_noirq(tb);
562 	mutex_unlock(&tb->lock);
563 
564 	return ret;
565 }
566 
567 int tb_domain_suspend(struct tb *tb)
568 {
569 	return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
570 }
571 
572 int tb_domain_freeze_noirq(struct tb *tb)
573 {
574 	int ret = 0;
575 
576 	mutex_lock(&tb->lock);
577 	if (tb->cm_ops->freeze_noirq)
578 		ret = tb->cm_ops->freeze_noirq(tb);
579 	if (!ret)
580 		tb_ctl_stop(tb->ctl);
581 	mutex_unlock(&tb->lock);
582 
583 	return ret;
584 }
585 
586 int tb_domain_thaw_noirq(struct tb *tb)
587 {
588 	int ret = 0;
589 
590 	mutex_lock(&tb->lock);
591 	tb_ctl_start(tb->ctl);
592 	if (tb->cm_ops->thaw_noirq)
593 		ret = tb->cm_ops->thaw_noirq(tb);
594 	mutex_unlock(&tb->lock);
595 
596 	return ret;
597 }
598 
599 void tb_domain_complete(struct tb *tb)
600 {
601 	if (tb->cm_ops->complete)
602 		tb->cm_ops->complete(tb);
603 }
604 
605 int tb_domain_runtime_suspend(struct tb *tb)
606 {
607 	if (tb->cm_ops->runtime_suspend) {
608 		int ret = tb->cm_ops->runtime_suspend(tb);
609 		if (ret)
610 			return ret;
611 	}
612 	tb_ctl_stop(tb->ctl);
613 	return 0;
614 }
615 
616 int tb_domain_runtime_resume(struct tb *tb)
617 {
618 	tb_ctl_start(tb->ctl);
619 	if (tb->cm_ops->runtime_resume) {
620 		int ret = tb->cm_ops->runtime_resume(tb);
621 		if (ret)
622 			return ret;
623 	}
624 	return 0;
625 }
626 
627 /**
628  * tb_domain_disapprove_switch() - Disapprove switch
629  * @tb: Domain the switch belongs to
630  * @sw: Switch to disapprove
631  *
632  * This will disconnect PCIe tunnel from parent to this @sw.
633  *
634  * Return: %0 on success and negative errno in case of failure.
635  */
636 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
637 {
638 	if (!tb->cm_ops->disapprove_switch)
639 		return -EPERM;
640 
641 	return tb->cm_ops->disapprove_switch(tb, sw);
642 }
643 
644 /**
645  * tb_domain_approve_switch() - Approve switch
646  * @tb: Domain the switch belongs to
647  * @sw: Switch to approve
648  *
649  * This will approve switch by connection manager specific means. In
650  * case of success the connection manager will create PCIe tunnel from
651  * parent to @sw.
652  */
653 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
654 {
655 	struct tb_switch *parent_sw;
656 
657 	if (!tb->cm_ops->approve_switch)
658 		return -EPERM;
659 
660 	/* The parent switch must be authorized before this one */
661 	parent_sw = tb_to_switch(sw->dev.parent);
662 	if (!parent_sw || !parent_sw->authorized)
663 		return -EINVAL;
664 
665 	return tb->cm_ops->approve_switch(tb, sw);
666 }
667 
668 /**
669  * tb_domain_approve_switch_key() - Approve switch and add key
670  * @tb: Domain the switch belongs to
671  * @sw: Switch to approve
672  *
673  * For switches that support secure connect, this function first adds
674  * key to the switch NVM using connection manager specific means. If
675  * adding the key is successful, the switch is approved and connected.
676  *
677  * Return: %0 on success and negative errno in case of failure.
678  */
679 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
680 {
681 	struct tb_switch *parent_sw;
682 	int ret;
683 
684 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
685 		return -EPERM;
686 
687 	/* The parent switch must be authorized before this one */
688 	parent_sw = tb_to_switch(sw->dev.parent);
689 	if (!parent_sw || !parent_sw->authorized)
690 		return -EINVAL;
691 
692 	ret = tb->cm_ops->add_switch_key(tb, sw);
693 	if (ret)
694 		return ret;
695 
696 	return tb->cm_ops->approve_switch(tb, sw);
697 }
698 
699 /**
700  * tb_domain_challenge_switch_key() - Challenge and approve switch
701  * @tb: Domain the switch belongs to
702  * @sw: Switch to approve
703  *
704  * For switches that support secure connect, this function generates
705  * random challenge and sends it to the switch. The switch responds to
706  * this and if the response matches our random challenge, the switch is
707  * approved and connected.
708  *
709  * Return: %0 on success and negative errno in case of failure.
710  */
711 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
712 {
713 	u8 challenge[TB_SWITCH_KEY_SIZE];
714 	u8 response[TB_SWITCH_KEY_SIZE];
715 	u8 hmac[TB_SWITCH_KEY_SIZE];
716 	struct tb_switch *parent_sw;
717 	struct crypto_shash *tfm;
718 	struct shash_desc *shash;
719 	int ret;
720 
721 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
722 		return -EPERM;
723 
724 	/* The parent switch must be authorized before this one */
725 	parent_sw = tb_to_switch(sw->dev.parent);
726 	if (!parent_sw || !parent_sw->authorized)
727 		return -EINVAL;
728 
729 	get_random_bytes(challenge, sizeof(challenge));
730 	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
731 	if (ret)
732 		return ret;
733 
734 	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
735 	if (IS_ERR(tfm))
736 		return PTR_ERR(tfm);
737 
738 	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
739 	if (ret)
740 		goto err_free_tfm;
741 
742 	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
743 			GFP_KERNEL);
744 	if (!shash) {
745 		ret = -ENOMEM;
746 		goto err_free_tfm;
747 	}
748 
749 	shash->tfm = tfm;
750 
751 	memset(hmac, 0, sizeof(hmac));
752 	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
753 	if (ret)
754 		goto err_free_shash;
755 
756 	/* The returned HMAC must match the one we calculated */
757 	if (memcmp(response, hmac, sizeof(hmac))) {
758 		ret = -EKEYREJECTED;
759 		goto err_free_shash;
760 	}
761 
762 	crypto_free_shash(tfm);
763 	kfree(shash);
764 
765 	return tb->cm_ops->approve_switch(tb, sw);
766 
767 err_free_shash:
768 	kfree(shash);
769 err_free_tfm:
770 	crypto_free_shash(tfm);
771 
772 	return ret;
773 }
774 
775 /**
776  * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
777  * @tb: Domain whose PCIe paths to disconnect
778  *
779  * This needs to be called in preparation for NVM upgrade of the host
780  * controller. Makes sure all PCIe paths are disconnected.
781  *
782  * Return %0 on success and negative errno in case of error.
783  */
784 int tb_domain_disconnect_pcie_paths(struct tb *tb)
785 {
786 	if (!tb->cm_ops->disconnect_pcie_paths)
787 		return -EPERM;
788 
789 	return tb->cm_ops->disconnect_pcie_paths(tb);
790 }
791 
792 /**
793  * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
794  * @tb: Domain enabling the DMA paths
795  * @xd: XDomain DMA paths are created to
796  *
797  * Calls connection manager specific method to enable DMA paths to the
798  * XDomain in question.
799  *
800  * Return: 0% in case of success and negative errno otherwise. In
801  * particular returns %-ENOTSUPP if the connection manager
802  * implementation does not support XDomains.
803  */
804 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
805 {
806 	if (!tb->cm_ops->approve_xdomain_paths)
807 		return -ENOTSUPP;
808 
809 	return tb->cm_ops->approve_xdomain_paths(tb, xd);
810 }
811 
812 /**
813  * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
814  * @tb: Domain disabling the DMA paths
815  * @xd: XDomain whose DMA paths are disconnected
816  *
817  * Calls connection manager specific method to disconnect DMA paths to
818  * the XDomain in question.
819  *
820  * Return: 0% in case of success and negative errno otherwise. In
821  * particular returns %-ENOTSUPP if the connection manager
822  * implementation does not support XDomains.
823  */
824 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
825 {
826 	if (!tb->cm_ops->disconnect_xdomain_paths)
827 		return -ENOTSUPP;
828 
829 	return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
830 }
831 
832 static int disconnect_xdomain(struct device *dev, void *data)
833 {
834 	struct tb_xdomain *xd;
835 	struct tb *tb = data;
836 	int ret = 0;
837 
838 	xd = tb_to_xdomain(dev);
839 	if (xd && xd->tb == tb)
840 		ret = tb_xdomain_disable_paths(xd);
841 
842 	return ret;
843 }
844 
845 /**
846  * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
847  * @tb: Domain whose paths are disconnected
848  *
849  * This function can be used to disconnect all paths (PCIe, XDomain) for
850  * example in preparation for host NVM firmware upgrade. After this is
851  * called the paths cannot be established without resetting the switch.
852  *
853  * Return: %0 in case of success and negative errno otherwise.
854  */
855 int tb_domain_disconnect_all_paths(struct tb *tb)
856 {
857 	int ret;
858 
859 	ret = tb_domain_disconnect_pcie_paths(tb);
860 	if (ret)
861 		return ret;
862 
863 	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
864 }
865 
866 int tb_domain_init(void)
867 {
868 	int ret;
869 
870 	tb_test_init();
871 
872 	tb_debugfs_init();
873 	ret = tb_xdomain_init();
874 	if (ret)
875 		goto err_debugfs;
876 	ret = bus_register(&tb_bus_type);
877 	if (ret)
878 		goto err_xdomain;
879 
880 	return 0;
881 
882 err_xdomain:
883 	tb_xdomain_exit();
884 err_debugfs:
885 	tb_debugfs_exit();
886 	tb_test_exit();
887 
888 	return ret;
889 }
890 
891 void tb_domain_exit(void)
892 {
893 	bus_unregister(&tb_bus_type);
894 	ida_destroy(&tb_domain_ida);
895 	tb_nvm_exit();
896 	tb_xdomain_exit();
897 	tb_debugfs_exit();
898 	tb_test_exit();
899 }
900