xref: /openbmc/linux/drivers/thunderbolt/domain.c (revision 3c1d704d)
1fd3b339cSMika Westerberg // SPDX-License-Identifier: GPL-2.0
29d3cce0bSMika Westerberg /*
39d3cce0bSMika Westerberg  * Thunderbolt bus support
49d3cce0bSMika Westerberg  *
59d3cce0bSMika Westerberg  * Copyright (C) 2017, Intel Corporation
69d3cce0bSMika Westerberg  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
79d3cce0bSMika Westerberg  */
89d3cce0bSMika Westerberg 
99d3cce0bSMika Westerberg #include <linux/device.h>
109d3cce0bSMika Westerberg #include <linux/idr.h>
119d3cce0bSMika Westerberg #include <linux/module.h>
122d8ff0b5SMika Westerberg #include <linux/pm_runtime.h>
139d3cce0bSMika Westerberg #include <linux/slab.h>
14f67cf491SMika Westerberg #include <linux/random.h>
15f67cf491SMika Westerberg #include <crypto/hash.h>
169d3cce0bSMika Westerberg 
179d3cce0bSMika Westerberg #include "tb.h"
189d3cce0bSMika Westerberg 
199d3cce0bSMika Westerberg static DEFINE_IDA(tb_domain_ida);
209d3cce0bSMika Westerberg 
match_service_id(const struct tb_service_id * id,const struct tb_service * svc)21d1ff7024SMika Westerberg static bool match_service_id(const struct tb_service_id *id,
22d1ff7024SMika Westerberg 			     const struct tb_service *svc)
23d1ff7024SMika Westerberg {
24d1ff7024SMika Westerberg 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
25d1ff7024SMika Westerberg 		if (strcmp(id->protocol_key, svc->key))
26d1ff7024SMika Westerberg 			return false;
27d1ff7024SMika Westerberg 	}
28d1ff7024SMika Westerberg 
29d1ff7024SMika Westerberg 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
30d1ff7024SMika Westerberg 		if (id->protocol_id != svc->prtcid)
31d1ff7024SMika Westerberg 			return false;
32d1ff7024SMika Westerberg 	}
33d1ff7024SMika Westerberg 
34d1ff7024SMika Westerberg 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
35d1ff7024SMika Westerberg 		if (id->protocol_version != svc->prtcvers)
36d1ff7024SMika Westerberg 			return false;
37d1ff7024SMika Westerberg 	}
38d1ff7024SMika Westerberg 
39d1ff7024SMika Westerberg 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
40d1ff7024SMika Westerberg 		if (id->protocol_revision != svc->prtcrevs)
41d1ff7024SMika Westerberg 			return false;
42d1ff7024SMika Westerberg 	}
43d1ff7024SMika Westerberg 
44d1ff7024SMika Westerberg 	return true;
45d1ff7024SMika Westerberg }
46d1ff7024SMika Westerberg 
__tb_service_match(struct device * dev,struct device_driver * drv)47d1ff7024SMika Westerberg static const struct tb_service_id *__tb_service_match(struct device *dev,
48d1ff7024SMika Westerberg 						      struct device_driver *drv)
49d1ff7024SMika Westerberg {
50d1ff7024SMika Westerberg 	struct tb_service_driver *driver;
51d1ff7024SMika Westerberg 	const struct tb_service_id *ids;
52d1ff7024SMika Westerberg 	struct tb_service *svc;
53d1ff7024SMika Westerberg 
54d1ff7024SMika Westerberg 	svc = tb_to_service(dev);
55d1ff7024SMika Westerberg 	if (!svc)
56d1ff7024SMika Westerberg 		return NULL;
57d1ff7024SMika Westerberg 
58d1ff7024SMika Westerberg 	driver = container_of(drv, struct tb_service_driver, driver);
59d1ff7024SMika Westerberg 	if (!driver->id_table)
60d1ff7024SMika Westerberg 		return NULL;
61d1ff7024SMika Westerberg 
62d1ff7024SMika Westerberg 	for (ids = driver->id_table; ids->match_flags != 0; ids++) {
63d1ff7024SMika Westerberg 		if (match_service_id(ids, svc))
64d1ff7024SMika Westerberg 			return ids;
65d1ff7024SMika Westerberg 	}
66d1ff7024SMika Westerberg 
67d1ff7024SMika Westerberg 	return NULL;
68d1ff7024SMika Westerberg }
69d1ff7024SMika Westerberg 
tb_service_match(struct device * dev,struct device_driver * drv)70d1ff7024SMika Westerberg static int tb_service_match(struct device *dev, struct device_driver *drv)
71d1ff7024SMika Westerberg {
72d1ff7024SMika Westerberg 	return !!__tb_service_match(dev, drv);
73d1ff7024SMika Westerberg }
74d1ff7024SMika Westerberg 
tb_service_probe(struct device * dev)75d1ff7024SMika Westerberg static int tb_service_probe(struct device *dev)
76d1ff7024SMika Westerberg {
77d1ff7024SMika Westerberg 	struct tb_service *svc = tb_to_service(dev);
78d1ff7024SMika Westerberg 	struct tb_service_driver *driver;
79d1ff7024SMika Westerberg 	const struct tb_service_id *id;
80d1ff7024SMika Westerberg 
81d1ff7024SMika Westerberg 	driver = container_of(dev->driver, struct tb_service_driver, driver);
82d1ff7024SMika Westerberg 	id = __tb_service_match(dev, &driver->driver);
83d1ff7024SMika Westerberg 
84d1ff7024SMika Westerberg 	return driver->probe(svc, id);
85d1ff7024SMika Westerberg }
86d1ff7024SMika Westerberg 
tb_service_remove(struct device * dev)87fc7a6209SUwe Kleine-König static void tb_service_remove(struct device *dev)
88d1ff7024SMika Westerberg {
89d1ff7024SMika Westerberg 	struct tb_service *svc = tb_to_service(dev);
90d1ff7024SMika Westerberg 	struct tb_service_driver *driver;
91d1ff7024SMika Westerberg 
92d1ff7024SMika Westerberg 	driver = container_of(dev->driver, struct tb_service_driver, driver);
93d1ff7024SMika Westerberg 	if (driver->remove)
94d1ff7024SMika Westerberg 		driver->remove(svc);
95d1ff7024SMika Westerberg }
96d1ff7024SMika Westerberg 
tb_service_shutdown(struct device * dev)97d1ff7024SMika Westerberg static void tb_service_shutdown(struct device *dev)
98d1ff7024SMika Westerberg {
99d1ff7024SMika Westerberg 	struct tb_service_driver *driver;
100d1ff7024SMika Westerberg 	struct tb_service *svc;
101d1ff7024SMika Westerberg 
102d1ff7024SMika Westerberg 	svc = tb_to_service(dev);
103d1ff7024SMika Westerberg 	if (!svc || !dev->driver)
104d1ff7024SMika Westerberg 		return;
105d1ff7024SMika Westerberg 
106d1ff7024SMika Westerberg 	driver = container_of(dev->driver, struct tb_service_driver, driver);
107d1ff7024SMika Westerberg 	if (driver->shutdown)
108d1ff7024SMika Westerberg 		driver->shutdown(svc);
109d1ff7024SMika Westerberg }
110d1ff7024SMika Westerberg 
111f67cf491SMika Westerberg static const char * const tb_security_names[] = {
112f67cf491SMika Westerberg 	[TB_SECURITY_NONE] = "none",
113f67cf491SMika Westerberg 	[TB_SECURITY_USER] = "user",
114f67cf491SMika Westerberg 	[TB_SECURITY_SECURE] = "secure",
115f67cf491SMika Westerberg 	[TB_SECURITY_DPONLY] = "dponly",
1166fc14e1aSMika Westerberg 	[TB_SECURITY_USBONLY] = "usbonly",
1173cd542e6SMika Westerberg 	[TB_SECURITY_NOPCIE] = "nopcie",
118f67cf491SMika Westerberg };
119f67cf491SMika Westerberg 
boot_acl_show(struct device * dev,struct device_attribute * attr,char * buf)1209aaa3b8bSMika Westerberg static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
1219aaa3b8bSMika Westerberg 			     char *buf)
1229aaa3b8bSMika Westerberg {
1239aaa3b8bSMika Westerberg 	struct tb *tb = container_of(dev, struct tb, dev);
1249aaa3b8bSMika Westerberg 	uuid_t *uuids;
1259aaa3b8bSMika Westerberg 	ssize_t ret;
1269aaa3b8bSMika Westerberg 	int i;
1279aaa3b8bSMika Westerberg 
1289aaa3b8bSMika Westerberg 	uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
1299aaa3b8bSMika Westerberg 	if (!uuids)
1309aaa3b8bSMika Westerberg 		return -ENOMEM;
1319aaa3b8bSMika Westerberg 
1322d8ff0b5SMika Westerberg 	pm_runtime_get_sync(&tb->dev);
1332d8ff0b5SMika Westerberg 
1349aaa3b8bSMika Westerberg 	if (mutex_lock_interruptible(&tb->lock)) {
1359aaa3b8bSMika Westerberg 		ret = -ERESTARTSYS;
1369aaa3b8bSMika Westerberg 		goto out;
1379aaa3b8bSMika Westerberg 	}
1389aaa3b8bSMika Westerberg 	ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
1399aaa3b8bSMika Westerberg 	if (ret) {
1409aaa3b8bSMika Westerberg 		mutex_unlock(&tb->lock);
1419aaa3b8bSMika Westerberg 		goto out;
1429aaa3b8bSMika Westerberg 	}
1439aaa3b8bSMika Westerberg 	mutex_unlock(&tb->lock);
1449aaa3b8bSMika Westerberg 
1459aaa3b8bSMika Westerberg 	for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
1469aaa3b8bSMika Westerberg 		if (!uuid_is_null(&uuids[i]))
1478283fb57SAndy Shevchenko 			ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]);
1489aaa3b8bSMika Westerberg 
1498283fb57SAndy Shevchenko 		ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
1509aaa3b8bSMika Westerberg 	}
1519aaa3b8bSMika Westerberg 
1529aaa3b8bSMika Westerberg out:
1532d8ff0b5SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
1542d8ff0b5SMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
1559aaa3b8bSMika Westerberg 	kfree(uuids);
1562d8ff0b5SMika Westerberg 
1579aaa3b8bSMika Westerberg 	return ret;
1589aaa3b8bSMika Westerberg }
1599aaa3b8bSMika Westerberg 
boot_acl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1609aaa3b8bSMika Westerberg static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
1619aaa3b8bSMika Westerberg 			      const char *buf, size_t count)
1629aaa3b8bSMika Westerberg {
1639aaa3b8bSMika Westerberg 	struct tb *tb = container_of(dev, struct tb, dev);
1649aaa3b8bSMika Westerberg 	char *str, *s, *uuid_str;
1659aaa3b8bSMika Westerberg 	ssize_t ret = 0;
1669aaa3b8bSMika Westerberg 	uuid_t *acl;
1679aaa3b8bSMika Westerberg 	int i = 0;
1689aaa3b8bSMika Westerberg 
1699aaa3b8bSMika Westerberg 	/*
1709aaa3b8bSMika Westerberg 	 * Make sure the value is not bigger than tb->nboot_acl * UUID
1719aaa3b8bSMika Westerberg 	 * length + commas and optional "\n". Also the smallest allowable
1729aaa3b8bSMika Westerberg 	 * string is tb->nboot_acl * ",".
1739aaa3b8bSMika Westerberg 	 */
1749aaa3b8bSMika Westerberg 	if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
1759aaa3b8bSMika Westerberg 		return -EINVAL;
1769aaa3b8bSMika Westerberg 	if (count < tb->nboot_acl - 1)
1779aaa3b8bSMika Westerberg 		return -EINVAL;
1789aaa3b8bSMika Westerberg 
1799aaa3b8bSMika Westerberg 	str = kstrdup(buf, GFP_KERNEL);
1809aaa3b8bSMika Westerberg 	if (!str)
1819aaa3b8bSMika Westerberg 		return -ENOMEM;
1829aaa3b8bSMika Westerberg 
1839aaa3b8bSMika Westerberg 	acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
1849aaa3b8bSMika Westerberg 	if (!acl) {
1859aaa3b8bSMika Westerberg 		ret = -ENOMEM;
1869aaa3b8bSMika Westerberg 		goto err_free_str;
1879aaa3b8bSMika Westerberg 	}
1889aaa3b8bSMika Westerberg 
1899aaa3b8bSMika Westerberg 	uuid_str = strim(str);
1909aaa3b8bSMika Westerberg 	while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
1919aaa3b8bSMika Westerberg 		size_t len = strlen(s);
1929aaa3b8bSMika Westerberg 
1939aaa3b8bSMika Westerberg 		if (len) {
1949aaa3b8bSMika Westerberg 			if (len != UUID_STRING_LEN) {
1959aaa3b8bSMika Westerberg 				ret = -EINVAL;
1969aaa3b8bSMika Westerberg 				goto err_free_acl;
1979aaa3b8bSMika Westerberg 			}
1989aaa3b8bSMika Westerberg 			ret = uuid_parse(s, &acl[i]);
1999aaa3b8bSMika Westerberg 			if (ret)
2009aaa3b8bSMika Westerberg 				goto err_free_acl;
2019aaa3b8bSMika Westerberg 		}
2029aaa3b8bSMika Westerberg 
2039aaa3b8bSMika Westerberg 		i++;
2049aaa3b8bSMika Westerberg 	}
2059aaa3b8bSMika Westerberg 
2069aaa3b8bSMika Westerberg 	if (s || i < tb->nboot_acl) {
2079aaa3b8bSMika Westerberg 		ret = -EINVAL;
2089aaa3b8bSMika Westerberg 		goto err_free_acl;
2099aaa3b8bSMika Westerberg 	}
2109aaa3b8bSMika Westerberg 
2112d8ff0b5SMika Westerberg 	pm_runtime_get_sync(&tb->dev);
2122d8ff0b5SMika Westerberg 
2139aaa3b8bSMika Westerberg 	if (mutex_lock_interruptible(&tb->lock)) {
2149aaa3b8bSMika Westerberg 		ret = -ERESTARTSYS;
2152d8ff0b5SMika Westerberg 		goto err_rpm_put;
2169aaa3b8bSMika Westerberg 	}
2179aaa3b8bSMika Westerberg 	ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
218007a7490SMika Westerberg 	if (!ret) {
219007a7490SMika Westerberg 		/* Notify userspace about the change */
220007a7490SMika Westerberg 		kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
221007a7490SMika Westerberg 	}
2229aaa3b8bSMika Westerberg 	mutex_unlock(&tb->lock);
2239aaa3b8bSMika Westerberg 
2242d8ff0b5SMika Westerberg err_rpm_put:
2252d8ff0b5SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
2262d8ff0b5SMika Westerberg 	pm_runtime_put_autosuspend(&tb->dev);
2279aaa3b8bSMika Westerberg err_free_acl:
2289aaa3b8bSMika Westerberg 	kfree(acl);
2299aaa3b8bSMika Westerberg err_free_str:
2309aaa3b8bSMika Westerberg 	kfree(str);
2319aaa3b8bSMika Westerberg 
2329aaa3b8bSMika Westerberg 	return ret ?: count;
2339aaa3b8bSMika Westerberg }
2349aaa3b8bSMika Westerberg static DEVICE_ATTR_RW(boot_acl);
2359aaa3b8bSMika Westerberg 
deauthorization_show(struct device * dev,struct device_attribute * attr,char * buf)2363da88be2SMika Westerberg static ssize_t deauthorization_show(struct device *dev,
2373da88be2SMika Westerberg 				    struct device_attribute *attr,
2383da88be2SMika Westerberg 				    char *buf)
2393da88be2SMika Westerberg {
2403da88be2SMika Westerberg 	const struct tb *tb = container_of(dev, struct tb, dev);
2413cd542e6SMika Westerberg 	bool deauthorization = false;
2423da88be2SMika Westerberg 
2433cd542e6SMika Westerberg 	/* Only meaningful if authorization is supported */
2443cd542e6SMika Westerberg 	if (tb->security_level == TB_SECURITY_USER ||
2453cd542e6SMika Westerberg 	    tb->security_level == TB_SECURITY_SECURE)
2463cd542e6SMika Westerberg 		deauthorization = !!tb->cm_ops->disapprove_switch;
2473cd542e6SMika Westerberg 
2488283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%d\n", deauthorization);
2493da88be2SMika Westerberg }
2503da88be2SMika Westerberg static DEVICE_ATTR_RO(deauthorization);
2513da88be2SMika Westerberg 
iommu_dma_protection_show(struct device * dev,struct device_attribute * attr,char * buf)252dcc3c9e3SMika Westerberg static ssize_t iommu_dma_protection_show(struct device *dev,
253dcc3c9e3SMika Westerberg 					 struct device_attribute *attr,
254dcc3c9e3SMika Westerberg 					 char *buf)
255dcc3c9e3SMika Westerberg {
25686eaf4a5SRobin Murphy 	struct tb *tb = container_of(dev, struct tb, dev);
25786eaf4a5SRobin Murphy 
25886eaf4a5SRobin Murphy 	return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
259dcc3c9e3SMika Westerberg }
260dcc3c9e3SMika Westerberg static DEVICE_ATTR_RO(iommu_dma_protection);
261dcc3c9e3SMika Westerberg 
security_show(struct device * dev,struct device_attribute * attr,char * buf)262f67cf491SMika Westerberg static ssize_t security_show(struct device *dev, struct device_attribute *attr,
263f67cf491SMika Westerberg 			     char *buf)
264f67cf491SMika Westerberg {
265f67cf491SMika Westerberg 	struct tb *tb = container_of(dev, struct tb, dev);
2666fc14e1aSMika Westerberg 	const char *name = "unknown";
267f67cf491SMika Westerberg 
2686fc14e1aSMika Westerberg 	if (tb->security_level < ARRAY_SIZE(tb_security_names))
2696fc14e1aSMika Westerberg 		name = tb_security_names[tb->security_level];
2706fc14e1aSMika Westerberg 
2718283fb57SAndy Shevchenko 	return sysfs_emit(buf, "%s\n", name);
272f67cf491SMika Westerberg }
273f67cf491SMika Westerberg static DEVICE_ATTR_RO(security);
274f67cf491SMika Westerberg 
275f67cf491SMika Westerberg static struct attribute *domain_attrs[] = {
2769aaa3b8bSMika Westerberg 	&dev_attr_boot_acl.attr,
2773da88be2SMika Westerberg 	&dev_attr_deauthorization.attr,
278dcc3c9e3SMika Westerberg 	&dev_attr_iommu_dma_protection.attr,
279f67cf491SMika Westerberg 	&dev_attr_security.attr,
280f67cf491SMika Westerberg 	NULL,
281f67cf491SMika Westerberg };
282f67cf491SMika Westerberg 
domain_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)2839aaa3b8bSMika Westerberg static umode_t domain_attr_is_visible(struct kobject *kobj,
2849aaa3b8bSMika Westerberg 				      struct attribute *attr, int n)
2859aaa3b8bSMika Westerberg {
286fff15f23STian Tao 	struct device *dev = kobj_to_dev(kobj);
2879aaa3b8bSMika Westerberg 	struct tb *tb = container_of(dev, struct tb, dev);
2889aaa3b8bSMika Westerberg 
2899aaa3b8bSMika Westerberg 	if (attr == &dev_attr_boot_acl.attr) {
2909aaa3b8bSMika Westerberg 		if (tb->nboot_acl &&
2919aaa3b8bSMika Westerberg 		    tb->cm_ops->get_boot_acl &&
2929aaa3b8bSMika Westerberg 		    tb->cm_ops->set_boot_acl)
2939aaa3b8bSMika Westerberg 			return attr->mode;
2949aaa3b8bSMika Westerberg 		return 0;
2959aaa3b8bSMika Westerberg 	}
2969aaa3b8bSMika Westerberg 
2979aaa3b8bSMika Westerberg 	return attr->mode;
2989aaa3b8bSMika Westerberg }
2999aaa3b8bSMika Westerberg 
3006889e00fSRikard Falkeborn static const struct attribute_group domain_attr_group = {
3019aaa3b8bSMika Westerberg 	.is_visible = domain_attr_is_visible,
302f67cf491SMika Westerberg 	.attrs = domain_attrs,
303f67cf491SMika Westerberg };
304f67cf491SMika Westerberg 
305f67cf491SMika Westerberg static const struct attribute_group *domain_attr_groups[] = {
306f67cf491SMika Westerberg 	&domain_attr_group,
307f67cf491SMika Westerberg 	NULL,
308f67cf491SMika Westerberg };
309f67cf491SMika Westerberg 
3109d3cce0bSMika Westerberg struct bus_type tb_bus_type = {
3119d3cce0bSMika Westerberg 	.name = "thunderbolt",
312d1ff7024SMika Westerberg 	.match = tb_service_match,
313d1ff7024SMika Westerberg 	.probe = tb_service_probe,
314d1ff7024SMika Westerberg 	.remove = tb_service_remove,
315d1ff7024SMika Westerberg 	.shutdown = tb_service_shutdown,
3169d3cce0bSMika Westerberg };
3179d3cce0bSMika Westerberg 
tb_domain_release(struct device * dev)3189d3cce0bSMika Westerberg static void tb_domain_release(struct device *dev)
3199d3cce0bSMika Westerberg {
3209d3cce0bSMika Westerberg 	struct tb *tb = container_of(dev, struct tb, dev);
3219d3cce0bSMika Westerberg 
3229d3cce0bSMika Westerberg 	tb_ctl_free(tb->ctl);
3239d3cce0bSMika Westerberg 	destroy_workqueue(tb->wq);
3249d3cce0bSMika Westerberg 	ida_simple_remove(&tb_domain_ida, tb->index);
3259d3cce0bSMika Westerberg 	mutex_destroy(&tb->lock);
3269d3cce0bSMika Westerberg 	kfree(tb);
3279d3cce0bSMika Westerberg }
3289d3cce0bSMika Westerberg 
3299d3cce0bSMika Westerberg struct device_type tb_domain_type = {
3309d3cce0bSMika Westerberg 	.name = "thunderbolt_domain",
3319d3cce0bSMika Westerberg 	.release = tb_domain_release,
3329d3cce0bSMika Westerberg };
3339d3cce0bSMika Westerberg 
tb_domain_event_cb(void * data,enum tb_cfg_pkg_type type,const void * buf,size_t size)3347f0a34d7SMika Westerberg static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
3357f0a34d7SMika Westerberg 			       const void *buf, size_t size)
3367f0a34d7SMika Westerberg {
3377f0a34d7SMika Westerberg 	struct tb *tb = data;
3387f0a34d7SMika Westerberg 
3397f0a34d7SMika Westerberg 	if (!tb->cm_ops->handle_event) {
3407f0a34d7SMika Westerberg 		tb_warn(tb, "domain does not have event handler\n");
3417f0a34d7SMika Westerberg 		return true;
3427f0a34d7SMika Westerberg 	}
3437f0a34d7SMika Westerberg 
3447f0a34d7SMika Westerberg 	switch (type) {
3457f0a34d7SMika Westerberg 	case TB_CFG_PKG_XDOMAIN_REQ:
3467f0a34d7SMika Westerberg 	case TB_CFG_PKG_XDOMAIN_RESP:
3477f0a34d7SMika Westerberg 		if (tb_is_xdomain_enabled())
3487f0a34d7SMika Westerberg 			return tb_xdomain_handle_request(tb, type, buf, size);
3497f0a34d7SMika Westerberg 		break;
3507f0a34d7SMika Westerberg 
3517f0a34d7SMika Westerberg 	default:
3527f0a34d7SMika Westerberg 		tb->cm_ops->handle_event(tb, type, buf, size);
3537f0a34d7SMika Westerberg 	}
3547f0a34d7SMika Westerberg 
3557f0a34d7SMika Westerberg 	return true;
3567f0a34d7SMika Westerberg }
3577f0a34d7SMika Westerberg 
3589d3cce0bSMika Westerberg /**
3599d3cce0bSMika Westerberg  * tb_domain_alloc() - Allocate a domain
3609d3cce0bSMika Westerberg  * @nhi: Pointer to the host controller
3617f0a34d7SMika Westerberg  * @timeout_msec: Control channel timeout for non-raw messages
3629d3cce0bSMika Westerberg  * @privsize: Size of the connection manager private data
3639d3cce0bSMika Westerberg  *
3649d3cce0bSMika Westerberg  * Allocates and initializes a new Thunderbolt domain. Connection
3659d3cce0bSMika Westerberg  * managers are expected to call this and then fill in @cm_ops
3669d3cce0bSMika Westerberg  * accordingly.
3679d3cce0bSMika Westerberg  *
3689d3cce0bSMika Westerberg  * Call tb_domain_put() to release the domain before it has been added
3699d3cce0bSMika Westerberg  * to the system.
3709d3cce0bSMika Westerberg  *
3719d3cce0bSMika Westerberg  * Return: allocated domain structure on %NULL in case of error
3729d3cce0bSMika Westerberg  */
tb_domain_alloc(struct tb_nhi * nhi,int timeout_msec,size_t privsize)3737f0a34d7SMika Westerberg struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
3749d3cce0bSMika Westerberg {
3759d3cce0bSMika Westerberg 	struct tb *tb;
3769d3cce0bSMika Westerberg 
3779d3cce0bSMika Westerberg 	/*
3789d3cce0bSMika Westerberg 	 * Make sure the structure sizes map with that the hardware
3799d3cce0bSMika Westerberg 	 * expects because bit-fields are being used.
3809d3cce0bSMika Westerberg 	 */
3819d3cce0bSMika Westerberg 	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
3829d3cce0bSMika Westerberg 	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
3839d3cce0bSMika Westerberg 	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
3849d3cce0bSMika Westerberg 
3859d3cce0bSMika Westerberg 	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
3869d3cce0bSMika Westerberg 	if (!tb)
3879d3cce0bSMika Westerberg 		return NULL;
3889d3cce0bSMika Westerberg 
3899d3cce0bSMika Westerberg 	tb->nhi = nhi;
3909d3cce0bSMika Westerberg 	mutex_init(&tb->lock);
3919d3cce0bSMika Westerberg 
3929d3cce0bSMika Westerberg 	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
3939d3cce0bSMika Westerberg 	if (tb->index < 0)
3949d3cce0bSMika Westerberg 		goto err_free;
3959d3cce0bSMika Westerberg 
3969d3cce0bSMika Westerberg 	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
3979d3cce0bSMika Westerberg 	if (!tb->wq)
3989d3cce0bSMika Westerberg 		goto err_remove_ida;
3999d3cce0bSMika Westerberg 
4007f0a34d7SMika Westerberg 	tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
4017f0a34d7SMika Westerberg 	if (!tb->ctl)
4027f0a34d7SMika Westerberg 		goto err_destroy_wq;
4037f0a34d7SMika Westerberg 
4049d3cce0bSMika Westerberg 	tb->dev.parent = &nhi->pdev->dev;
4059d3cce0bSMika Westerberg 	tb->dev.bus = &tb_bus_type;
4069d3cce0bSMika Westerberg 	tb->dev.type = &tb_domain_type;
407f67cf491SMika Westerberg 	tb->dev.groups = domain_attr_groups;
4089d3cce0bSMika Westerberg 	dev_set_name(&tb->dev, "domain%d", tb->index);
4099d3cce0bSMika Westerberg 	device_initialize(&tb->dev);
4109d3cce0bSMika Westerberg 
4119d3cce0bSMika Westerberg 	return tb;
4129d3cce0bSMika Westerberg 
4137f0a34d7SMika Westerberg err_destroy_wq:
4147f0a34d7SMika Westerberg 	destroy_workqueue(tb->wq);
4159d3cce0bSMika Westerberg err_remove_ida:
4169d3cce0bSMika Westerberg 	ida_simple_remove(&tb_domain_ida, tb->index);
4179d3cce0bSMika Westerberg err_free:
4189d3cce0bSMika Westerberg 	kfree(tb);
4199d3cce0bSMika Westerberg 
4209d3cce0bSMika Westerberg 	return NULL;
4219d3cce0bSMika Westerberg }
4229d3cce0bSMika Westerberg 
4239d3cce0bSMika Westerberg /**
4249d3cce0bSMika Westerberg  * tb_domain_add() - Add domain to the system
4259d3cce0bSMika Westerberg  * @tb: Domain to add
4263c1d704dSSanath S  * @reset: Issue reset to the host router
4279d3cce0bSMika Westerberg  *
4289d3cce0bSMika Westerberg  * Starts the domain and adds it to the system. Hotplugging devices will
4299d3cce0bSMika Westerberg  * work after this has been returned successfully. In order to remove
4309d3cce0bSMika Westerberg  * and release the domain after this function has been called, call
4319d3cce0bSMika Westerberg  * tb_domain_remove().
4329d3cce0bSMika Westerberg  *
4339d3cce0bSMika Westerberg  * Return: %0 in case of success and negative errno in case of error
4349d3cce0bSMika Westerberg  */
tb_domain_add(struct tb * tb,bool reset)4353c1d704dSSanath S int tb_domain_add(struct tb *tb, bool reset)
4369d3cce0bSMika Westerberg {
4379d3cce0bSMika Westerberg 	int ret;
4389d3cce0bSMika Westerberg 
4399d3cce0bSMika Westerberg 	if (WARN_ON(!tb->cm_ops))
4409d3cce0bSMika Westerberg 		return -EINVAL;
4419d3cce0bSMika Westerberg 
4429d3cce0bSMika Westerberg 	mutex_lock(&tb->lock);
4439d3cce0bSMika Westerberg 	/*
4449d3cce0bSMika Westerberg 	 * tb_schedule_hotplug_handler may be called as soon as the config
4459d3cce0bSMika Westerberg 	 * channel is started. Thats why we have to hold the lock here.
4469d3cce0bSMika Westerberg 	 */
4479d3cce0bSMika Westerberg 	tb_ctl_start(tb->ctl);
4489d3cce0bSMika Westerberg 
449f67cf491SMika Westerberg 	if (tb->cm_ops->driver_ready) {
450f67cf491SMika Westerberg 		ret = tb->cm_ops->driver_ready(tb);
451f67cf491SMika Westerberg 		if (ret)
452f67cf491SMika Westerberg 			goto err_ctl_stop;
453f67cf491SMika Westerberg 	}
454f67cf491SMika Westerberg 
4553cd542e6SMika Westerberg 	tb_dbg(tb, "security level set to %s\n",
4563cd542e6SMika Westerberg 	       tb_security_names[tb->security_level]);
4573cd542e6SMika Westerberg 
4589d3cce0bSMika Westerberg 	ret = device_add(&tb->dev);
4599d3cce0bSMika Westerberg 	if (ret)
4609d3cce0bSMika Westerberg 		goto err_ctl_stop;
4619d3cce0bSMika Westerberg 
4629d3cce0bSMika Westerberg 	/* Start the domain */
4639d3cce0bSMika Westerberg 	if (tb->cm_ops->start) {
4643c1d704dSSanath S 		ret = tb->cm_ops->start(tb, reset);
4659d3cce0bSMika Westerberg 		if (ret)
4669d3cce0bSMika Westerberg 			goto err_domain_del;
4679d3cce0bSMika Westerberg 	}
4689d3cce0bSMika Westerberg 
4699d3cce0bSMika Westerberg 	/* This starts event processing */
4709d3cce0bSMika Westerberg 	mutex_unlock(&tb->lock);
4719d3cce0bSMika Westerberg 
472b2911a59SMika Westerberg 	device_init_wakeup(&tb->dev, true);
473b2911a59SMika Westerberg 
4742d8ff0b5SMika Westerberg 	pm_runtime_no_callbacks(&tb->dev);
4752d8ff0b5SMika Westerberg 	pm_runtime_set_active(&tb->dev);
4762d8ff0b5SMika Westerberg 	pm_runtime_enable(&tb->dev);
4772d8ff0b5SMika Westerberg 	pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
4782d8ff0b5SMika Westerberg 	pm_runtime_mark_last_busy(&tb->dev);
4792d8ff0b5SMika Westerberg 	pm_runtime_use_autosuspend(&tb->dev);
4802d8ff0b5SMika Westerberg 
4819d3cce0bSMika Westerberg 	return 0;
4829d3cce0bSMika Westerberg 
4839d3cce0bSMika Westerberg err_domain_del:
4849d3cce0bSMika Westerberg 	device_del(&tb->dev);
4859d3cce0bSMika Westerberg err_ctl_stop:
4869d3cce0bSMika Westerberg 	tb_ctl_stop(tb->ctl);
4872e7a5b3eSDan Carpenter 	mutex_unlock(&tb->lock);
4889d3cce0bSMika Westerberg 
4899d3cce0bSMika Westerberg 	return ret;
4909d3cce0bSMika Westerberg }
4919d3cce0bSMika Westerberg 
4929d3cce0bSMika Westerberg /**
4939d3cce0bSMika Westerberg  * tb_domain_remove() - Removes and releases a domain
4949d3cce0bSMika Westerberg  * @tb: Domain to remove
4959d3cce0bSMika Westerberg  *
4969d3cce0bSMika Westerberg  * Stops the domain, removes it from the system and releases all
4979d3cce0bSMika Westerberg  * resources once the last reference has been released.
4989d3cce0bSMika Westerberg  */
tb_domain_remove(struct tb * tb)4999d3cce0bSMika Westerberg void tb_domain_remove(struct tb *tb)
5009d3cce0bSMika Westerberg {
5019d3cce0bSMika Westerberg 	mutex_lock(&tb->lock);
5029d3cce0bSMika Westerberg 	if (tb->cm_ops->stop)
5039d3cce0bSMika Westerberg 		tb->cm_ops->stop(tb);
5049d3cce0bSMika Westerberg 	/* Stop the domain control traffic */
5059d3cce0bSMika Westerberg 	tb_ctl_stop(tb->ctl);
5069d3cce0bSMika Westerberg 	mutex_unlock(&tb->lock);
5079d3cce0bSMika Westerberg 
5089d3cce0bSMika Westerberg 	flush_workqueue(tb->wq);
5099d3cce0bSMika Westerberg 	device_unregister(&tb->dev);
5109d3cce0bSMika Westerberg }
5119d3cce0bSMika Westerberg 
5129d3cce0bSMika Westerberg /**
5139d3cce0bSMika Westerberg  * tb_domain_suspend_noirq() - Suspend a domain
5149d3cce0bSMika Westerberg  * @tb: Domain to suspend
5159d3cce0bSMika Westerberg  *
5169d3cce0bSMika Westerberg  * Suspends all devices in the domain and stops the control channel.
5179d3cce0bSMika Westerberg  */
tb_domain_suspend_noirq(struct tb * tb)5189d3cce0bSMika Westerberg int tb_domain_suspend_noirq(struct tb *tb)
5199d3cce0bSMika Westerberg {
5209d3cce0bSMika Westerberg 	int ret = 0;
5219d3cce0bSMika Westerberg 
5229d3cce0bSMika Westerberg 	/*
5239d3cce0bSMika Westerberg 	 * The control channel interrupt is left enabled during suspend
5249d3cce0bSMika Westerberg 	 * and taking the lock here prevents any events happening before
5259d3cce0bSMika Westerberg 	 * we actually have stopped the domain and the control channel.
5269d3cce0bSMika Westerberg 	 */
5279d3cce0bSMika Westerberg 	mutex_lock(&tb->lock);
5289d3cce0bSMika Westerberg 	if (tb->cm_ops->suspend_noirq)
5299d3cce0bSMika Westerberg 		ret = tb->cm_ops->suspend_noirq(tb);
5309d3cce0bSMika Westerberg 	if (!ret)
5319d3cce0bSMika Westerberg 		tb_ctl_stop(tb->ctl);
5329d3cce0bSMika Westerberg 	mutex_unlock(&tb->lock);
5339d3cce0bSMika Westerberg 
5349d3cce0bSMika Westerberg 	return ret;
5359d3cce0bSMika Westerberg }
5369d3cce0bSMika Westerberg 
5379d3cce0bSMika Westerberg /**
5389d3cce0bSMika Westerberg  * tb_domain_resume_noirq() - Resume a domain
5399d3cce0bSMika Westerberg  * @tb: Domain to resume
5409d3cce0bSMika Westerberg  *
5419d3cce0bSMika Westerberg  * Re-starts the control channel, and resumes all devices connected to
5429d3cce0bSMika Westerberg  * the domain.
5439d3cce0bSMika Westerberg  */
tb_domain_resume_noirq(struct tb * tb)5449d3cce0bSMika Westerberg int tb_domain_resume_noirq(struct tb *tb)
5459d3cce0bSMika Westerberg {
5469d3cce0bSMika Westerberg 	int ret = 0;
5479d3cce0bSMika Westerberg 
5489d3cce0bSMika Westerberg 	mutex_lock(&tb->lock);
5499d3cce0bSMika Westerberg 	tb_ctl_start(tb->ctl);
5509d3cce0bSMika Westerberg 	if (tb->cm_ops->resume_noirq)
5519d3cce0bSMika Westerberg 		ret = tb->cm_ops->resume_noirq(tb);
5529d3cce0bSMika Westerberg 	mutex_unlock(&tb->lock);
5539d3cce0bSMika Westerberg 
5549d3cce0bSMika Westerberg 	return ret;
5559d3cce0bSMika Westerberg }
5569d3cce0bSMika Westerberg 
tb_domain_suspend(struct tb * tb)557f67cf491SMika Westerberg int tb_domain_suspend(struct tb *tb)
558f67cf491SMika Westerberg {
55984db6858SMika Westerberg 	return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
560f67cf491SMika Westerberg }
561f67cf491SMika Westerberg 
tb_domain_freeze_noirq(struct tb * tb)562884e4d57SMika Westerberg int tb_domain_freeze_noirq(struct tb *tb)
563884e4d57SMika Westerberg {
564884e4d57SMika Westerberg 	int ret = 0;
565884e4d57SMika Westerberg 
566884e4d57SMika Westerberg 	mutex_lock(&tb->lock);
567884e4d57SMika Westerberg 	if (tb->cm_ops->freeze_noirq)
568884e4d57SMika Westerberg 		ret = tb->cm_ops->freeze_noirq(tb);
569884e4d57SMika Westerberg 	if (!ret)
570884e4d57SMika Westerberg 		tb_ctl_stop(tb->ctl);
571884e4d57SMika Westerberg 	mutex_unlock(&tb->lock);
572884e4d57SMika Westerberg 
573884e4d57SMika Westerberg 	return ret;
574884e4d57SMika Westerberg }
575884e4d57SMika Westerberg 
tb_domain_thaw_noirq(struct tb * tb)576884e4d57SMika Westerberg int tb_domain_thaw_noirq(struct tb *tb)
577884e4d57SMika Westerberg {
578884e4d57SMika Westerberg 	int ret = 0;
579884e4d57SMika Westerberg 
580884e4d57SMika Westerberg 	mutex_lock(&tb->lock);
581884e4d57SMika Westerberg 	tb_ctl_start(tb->ctl);
582884e4d57SMika Westerberg 	if (tb->cm_ops->thaw_noirq)
583884e4d57SMika Westerberg 		ret = tb->cm_ops->thaw_noirq(tb);
584884e4d57SMika Westerberg 	mutex_unlock(&tb->lock);
585884e4d57SMika Westerberg 
586884e4d57SMika Westerberg 	return ret;
587884e4d57SMika Westerberg }
588884e4d57SMika Westerberg 
tb_domain_complete(struct tb * tb)589f67cf491SMika Westerberg void tb_domain_complete(struct tb *tb)
590f67cf491SMika Westerberg {
591f67cf491SMika Westerberg 	if (tb->cm_ops->complete)
592f67cf491SMika Westerberg 		tb->cm_ops->complete(tb);
593f67cf491SMika Westerberg }
594f67cf491SMika Westerberg 
tb_domain_runtime_suspend(struct tb * tb)5952d8ff0b5SMika Westerberg int tb_domain_runtime_suspend(struct tb *tb)
5962d8ff0b5SMika Westerberg {
5972d8ff0b5SMika Westerberg 	if (tb->cm_ops->runtime_suspend) {
5982d8ff0b5SMika Westerberg 		int ret = tb->cm_ops->runtime_suspend(tb);
5992d8ff0b5SMika Westerberg 		if (ret)
6002d8ff0b5SMika Westerberg 			return ret;
6012d8ff0b5SMika Westerberg 	}
6022d8ff0b5SMika Westerberg 	tb_ctl_stop(tb->ctl);
6032d8ff0b5SMika Westerberg 	return 0;
6042d8ff0b5SMika Westerberg }
6052d8ff0b5SMika Westerberg 
tb_domain_runtime_resume(struct tb * tb)6062d8ff0b5SMika Westerberg int tb_domain_runtime_resume(struct tb *tb)
6072d8ff0b5SMika Westerberg {
6082d8ff0b5SMika Westerberg 	tb_ctl_start(tb->ctl);
6092d8ff0b5SMika Westerberg 	if (tb->cm_ops->runtime_resume) {
6102d8ff0b5SMika Westerberg 		int ret = tb->cm_ops->runtime_resume(tb);
6112d8ff0b5SMika Westerberg 		if (ret)
6122d8ff0b5SMika Westerberg 			return ret;
6132d8ff0b5SMika Westerberg 	}
6142d8ff0b5SMika Westerberg 	return 0;
6152d8ff0b5SMika Westerberg }
6162d8ff0b5SMika Westerberg 
617f67cf491SMika Westerberg /**
6183da88be2SMika Westerberg  * tb_domain_disapprove_switch() - Disapprove switch
6193da88be2SMika Westerberg  * @tb: Domain the switch belongs to
6203da88be2SMika Westerberg  * @sw: Switch to disapprove
6213da88be2SMika Westerberg  *
6223da88be2SMika Westerberg  * This will disconnect PCIe tunnel from parent to this @sw.
6233da88be2SMika Westerberg  *
6243da88be2SMika Westerberg  * Return: %0 on success and negative errno in case of failure.
6253da88be2SMika Westerberg  */
tb_domain_disapprove_switch(struct tb * tb,struct tb_switch * sw)6263da88be2SMika Westerberg int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
6273da88be2SMika Westerberg {
6283da88be2SMika Westerberg 	if (!tb->cm_ops->disapprove_switch)
6293da88be2SMika Westerberg 		return -EPERM;
6303da88be2SMika Westerberg 
6313da88be2SMika Westerberg 	return tb->cm_ops->disapprove_switch(tb, sw);
6323da88be2SMika Westerberg }
6333da88be2SMika Westerberg 
6343da88be2SMika Westerberg /**
635f67cf491SMika Westerberg  * tb_domain_approve_switch() - Approve switch
636f67cf491SMika Westerberg  * @tb: Domain the switch belongs to
637f67cf491SMika Westerberg  * @sw: Switch to approve
638f67cf491SMika Westerberg  *
639f67cf491SMika Westerberg  * This will approve switch by connection manager specific means. In
6403da88be2SMika Westerberg  * case of success the connection manager will create PCIe tunnel from
6413da88be2SMika Westerberg  * parent to @sw.
642f67cf491SMika Westerberg  */
tb_domain_approve_switch(struct tb * tb,struct tb_switch * sw)643f67cf491SMika Westerberg int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
644f67cf491SMika Westerberg {
645f67cf491SMika Westerberg 	struct tb_switch *parent_sw;
646f67cf491SMika Westerberg 
647f67cf491SMika Westerberg 	if (!tb->cm_ops->approve_switch)
648f67cf491SMika Westerberg 		return -EPERM;
649f67cf491SMika Westerberg 
650f67cf491SMika Westerberg 	/* The parent switch must be authorized before this one */
651f67cf491SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
652f67cf491SMika Westerberg 	if (!parent_sw || !parent_sw->authorized)
653f67cf491SMika Westerberg 		return -EINVAL;
654f67cf491SMika Westerberg 
655f67cf491SMika Westerberg 	return tb->cm_ops->approve_switch(tb, sw);
656f67cf491SMika Westerberg }
657f67cf491SMika Westerberg 
658f67cf491SMika Westerberg /**
659f67cf491SMika Westerberg  * tb_domain_approve_switch_key() - Approve switch and add key
660f67cf491SMika Westerberg  * @tb: Domain the switch belongs to
661f67cf491SMika Westerberg  * @sw: Switch to approve
662f67cf491SMika Westerberg  *
663f67cf491SMika Westerberg  * For switches that support secure connect, this function first adds
664f67cf491SMika Westerberg  * key to the switch NVM using connection manager specific means. If
665f67cf491SMika Westerberg  * adding the key is successful, the switch is approved and connected.
666f67cf491SMika Westerberg  *
667f67cf491SMika Westerberg  * Return: %0 on success and negative errno in case of failure.
668f67cf491SMika Westerberg  */
tb_domain_approve_switch_key(struct tb * tb,struct tb_switch * sw)669f67cf491SMika Westerberg int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
670f67cf491SMika Westerberg {
671f67cf491SMika Westerberg 	struct tb_switch *parent_sw;
672f67cf491SMika Westerberg 	int ret;
673f67cf491SMika Westerberg 
674f67cf491SMika Westerberg 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
675f67cf491SMika Westerberg 		return -EPERM;
676f67cf491SMika Westerberg 
677f67cf491SMika Westerberg 	/* The parent switch must be authorized before this one */
678f67cf491SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
679f67cf491SMika Westerberg 	if (!parent_sw || !parent_sw->authorized)
680f67cf491SMika Westerberg 		return -EINVAL;
681f67cf491SMika Westerberg 
682f67cf491SMika Westerberg 	ret = tb->cm_ops->add_switch_key(tb, sw);
683f67cf491SMika Westerberg 	if (ret)
684f67cf491SMika Westerberg 		return ret;
685f67cf491SMika Westerberg 
686f67cf491SMika Westerberg 	return tb->cm_ops->approve_switch(tb, sw);
687f67cf491SMika Westerberg }
688f67cf491SMika Westerberg 
689f67cf491SMika Westerberg /**
690f67cf491SMika Westerberg  * tb_domain_challenge_switch_key() - Challenge and approve switch
691f67cf491SMika Westerberg  * @tb: Domain the switch belongs to
692f67cf491SMika Westerberg  * @sw: Switch to approve
693f67cf491SMika Westerberg  *
694f67cf491SMika Westerberg  * For switches that support secure connect, this function generates
695f67cf491SMika Westerberg  * random challenge and sends it to the switch. The switch responds to
696f67cf491SMika Westerberg  * this and if the response matches our random challenge, the switch is
697f67cf491SMika Westerberg  * approved and connected.
698f67cf491SMika Westerberg  *
699f67cf491SMika Westerberg  * Return: %0 on success and negative errno in case of failure.
700f67cf491SMika Westerberg  */
tb_domain_challenge_switch_key(struct tb * tb,struct tb_switch * sw)701f67cf491SMika Westerberg int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
702f67cf491SMika Westerberg {
703f67cf491SMika Westerberg 	u8 challenge[TB_SWITCH_KEY_SIZE];
704f67cf491SMika Westerberg 	u8 response[TB_SWITCH_KEY_SIZE];
705f67cf491SMika Westerberg 	u8 hmac[TB_SWITCH_KEY_SIZE];
706f67cf491SMika Westerberg 	struct tb_switch *parent_sw;
707f67cf491SMika Westerberg 	struct crypto_shash *tfm;
708f67cf491SMika Westerberg 	struct shash_desc *shash;
709f67cf491SMika Westerberg 	int ret;
710f67cf491SMika Westerberg 
711f67cf491SMika Westerberg 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
712f67cf491SMika Westerberg 		return -EPERM;
713f67cf491SMika Westerberg 
714f67cf491SMika Westerberg 	/* The parent switch must be authorized before this one */
715f67cf491SMika Westerberg 	parent_sw = tb_to_switch(sw->dev.parent);
716f67cf491SMika Westerberg 	if (!parent_sw || !parent_sw->authorized)
717f67cf491SMika Westerberg 		return -EINVAL;
718f67cf491SMika Westerberg 
719f67cf491SMika Westerberg 	get_random_bytes(challenge, sizeof(challenge));
720f67cf491SMika Westerberg 	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
721f67cf491SMika Westerberg 	if (ret)
722f67cf491SMika Westerberg 		return ret;
723f67cf491SMika Westerberg 
724f67cf491SMika Westerberg 	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
725f67cf491SMika Westerberg 	if (IS_ERR(tfm))
726f67cf491SMika Westerberg 		return PTR_ERR(tfm);
727f67cf491SMika Westerberg 
728f67cf491SMika Westerberg 	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
729f67cf491SMika Westerberg 	if (ret)
730f67cf491SMika Westerberg 		goto err_free_tfm;
731f67cf491SMika Westerberg 
732f67cf491SMika Westerberg 	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
733f67cf491SMika Westerberg 			GFP_KERNEL);
734f67cf491SMika Westerberg 	if (!shash) {
735f67cf491SMika Westerberg 		ret = -ENOMEM;
736f67cf491SMika Westerberg 		goto err_free_tfm;
737f67cf491SMika Westerberg 	}
738f67cf491SMika Westerberg 
739f67cf491SMika Westerberg 	shash->tfm = tfm;
740f67cf491SMika Westerberg 
741f67cf491SMika Westerberg 	memset(hmac, 0, sizeof(hmac));
742f67cf491SMika Westerberg 	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
743f67cf491SMika Westerberg 	if (ret)
744f67cf491SMika Westerberg 		goto err_free_shash;
745f67cf491SMika Westerberg 
746f67cf491SMika Westerberg 	/* The returned HMAC must match the one we calculated */
747f67cf491SMika Westerberg 	if (memcmp(response, hmac, sizeof(hmac))) {
748f67cf491SMika Westerberg 		ret = -EKEYREJECTED;
749f67cf491SMika Westerberg 		goto err_free_shash;
750f67cf491SMika Westerberg 	}
751f67cf491SMika Westerberg 
752f67cf491SMika Westerberg 	crypto_free_shash(tfm);
753f67cf491SMika Westerberg 	kfree(shash);
754f67cf491SMika Westerberg 
755f67cf491SMika Westerberg 	return tb->cm_ops->approve_switch(tb, sw);
756f67cf491SMika Westerberg 
757f67cf491SMika Westerberg err_free_shash:
758f67cf491SMika Westerberg 	kfree(shash);
759f67cf491SMika Westerberg err_free_tfm:
760f67cf491SMika Westerberg 	crypto_free_shash(tfm);
761f67cf491SMika Westerberg 
762f67cf491SMika Westerberg 	return ret;
763f67cf491SMika Westerberg }
764f67cf491SMika Westerberg 
765e6b245ccSMika Westerberg /**
766e6b245ccSMika Westerberg  * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
767e6b245ccSMika Westerberg  * @tb: Domain whose PCIe paths to disconnect
768e6b245ccSMika Westerberg  *
769e6b245ccSMika Westerberg  * This needs to be called in preparation for NVM upgrade of the host
770e6b245ccSMika Westerberg  * controller. Makes sure all PCIe paths are disconnected.
771e6b245ccSMika Westerberg  *
772e6b245ccSMika Westerberg  * Return %0 on success and negative errno in case of error.
773e6b245ccSMika Westerberg  */
tb_domain_disconnect_pcie_paths(struct tb * tb)774e6b245ccSMika Westerberg int tb_domain_disconnect_pcie_paths(struct tb *tb)
775e6b245ccSMika Westerberg {
776e6b245ccSMika Westerberg 	if (!tb->cm_ops->disconnect_pcie_paths)
777e6b245ccSMika Westerberg 		return -EPERM;
778e6b245ccSMika Westerberg 
779e6b245ccSMika Westerberg 	return tb->cm_ops->disconnect_pcie_paths(tb);
780e6b245ccSMika Westerberg }
781e6b245ccSMika Westerberg 
782d1ff7024SMika Westerberg /**
783d1ff7024SMika Westerberg  * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
784d1ff7024SMika Westerberg  * @tb: Domain enabling the DMA paths
785d1ff7024SMika Westerberg  * @xd: XDomain DMA paths are created to
786180b0689SMika Westerberg  * @transmit_path: HopID we are using to send out packets
787180b0689SMika Westerberg  * @transmit_ring: DMA ring used to send out packets
788180b0689SMika Westerberg  * @receive_path: HopID the other end is using to send packets to us
789180b0689SMika Westerberg  * @receive_ring: DMA ring used to receive packets from @receive_path
790d1ff7024SMika Westerberg  *
791d1ff7024SMika Westerberg  * Calls connection manager specific method to enable DMA paths to the
792d1ff7024SMika Westerberg  * XDomain in question.
793d1ff7024SMika Westerberg  *
794d1ff7024SMika Westerberg  * Return: 0% in case of success and negative errno otherwise. In
795d1ff7024SMika Westerberg  * particular returns %-ENOTSUPP if the connection manager
796d1ff7024SMika Westerberg  * implementation does not support XDomains.
797d1ff7024SMika Westerberg  */
tb_domain_approve_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)798180b0689SMika Westerberg int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
799180b0689SMika Westerberg 				    int transmit_path, int transmit_ring,
800180b0689SMika Westerberg 				    int receive_path, int receive_ring)
801d1ff7024SMika Westerberg {
802d1ff7024SMika Westerberg 	if (!tb->cm_ops->approve_xdomain_paths)
803d1ff7024SMika Westerberg 		return -ENOTSUPP;
804d1ff7024SMika Westerberg 
805180b0689SMika Westerberg 	return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
806180b0689SMika Westerberg 			transmit_ring, receive_path, receive_ring);
807d1ff7024SMika Westerberg }
808d1ff7024SMika Westerberg 
809d1ff7024SMika Westerberg /**
810d1ff7024SMika Westerberg  * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
811d1ff7024SMika Westerberg  * @tb: Domain disabling the DMA paths
812d1ff7024SMika Westerberg  * @xd: XDomain whose DMA paths are disconnected
813180b0689SMika Westerberg  * @transmit_path: HopID we are using to send out packets
814180b0689SMika Westerberg  * @transmit_ring: DMA ring used to send out packets
815180b0689SMika Westerberg  * @receive_path: HopID the other end is using to send packets to us
816180b0689SMika Westerberg  * @receive_ring: DMA ring used to receive packets from @receive_path
817d1ff7024SMika Westerberg  *
818d1ff7024SMika Westerberg  * Calls connection manager specific method to disconnect DMA paths to
819d1ff7024SMika Westerberg  * the XDomain in question.
820d1ff7024SMika Westerberg  *
821d1ff7024SMika Westerberg  * Return: 0% in case of success and negative errno otherwise. In
822d1ff7024SMika Westerberg  * particular returns %-ENOTSUPP if the connection manager
823d1ff7024SMika Westerberg  * implementation does not support XDomains.
824d1ff7024SMika Westerberg  */
tb_domain_disconnect_xdomain_paths(struct tb * tb,struct tb_xdomain * xd,int transmit_path,int transmit_ring,int receive_path,int receive_ring)825180b0689SMika Westerberg int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
826180b0689SMika Westerberg 				       int transmit_path, int transmit_ring,
827180b0689SMika Westerberg 				       int receive_path, int receive_ring)
828d1ff7024SMika Westerberg {
829d1ff7024SMika Westerberg 	if (!tb->cm_ops->disconnect_xdomain_paths)
830d1ff7024SMika Westerberg 		return -ENOTSUPP;
831d1ff7024SMika Westerberg 
832180b0689SMika Westerberg 	return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
833180b0689SMika Westerberg 			transmit_ring, receive_path, receive_ring);
834d1ff7024SMika Westerberg }
835d1ff7024SMika Westerberg 
disconnect_xdomain(struct device * dev,void * data)836d1ff7024SMika Westerberg static int disconnect_xdomain(struct device *dev, void *data)
837d1ff7024SMika Westerberg {
838d1ff7024SMika Westerberg 	struct tb_xdomain *xd;
839d1ff7024SMika Westerberg 	struct tb *tb = data;
840d1ff7024SMika Westerberg 	int ret = 0;
841d1ff7024SMika Westerberg 
842d1ff7024SMika Westerberg 	xd = tb_to_xdomain(dev);
843d1ff7024SMika Westerberg 	if (xd && xd->tb == tb)
844180b0689SMika Westerberg 		ret = tb_xdomain_disable_all_paths(xd);
845d1ff7024SMika Westerberg 
846d1ff7024SMika Westerberg 	return ret;
847d1ff7024SMika Westerberg }
848d1ff7024SMika Westerberg 
849d1ff7024SMika Westerberg /**
850d1ff7024SMika Westerberg  * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
851d1ff7024SMika Westerberg  * @tb: Domain whose paths are disconnected
852d1ff7024SMika Westerberg  *
853d1ff7024SMika Westerberg  * This function can be used to disconnect all paths (PCIe, XDomain) for
854d1ff7024SMika Westerberg  * example in preparation for host NVM firmware upgrade. After this is
855d1ff7024SMika Westerberg  * called the paths cannot be established without resetting the switch.
856d1ff7024SMika Westerberg  *
857d1ff7024SMika Westerberg  * Return: %0 in case of success and negative errno otherwise.
858d1ff7024SMika Westerberg  */
tb_domain_disconnect_all_paths(struct tb * tb)859d1ff7024SMika Westerberg int tb_domain_disconnect_all_paths(struct tb *tb)
860d1ff7024SMika Westerberg {
861d1ff7024SMika Westerberg 	int ret;
862d1ff7024SMika Westerberg 
863d1ff7024SMika Westerberg 	ret = tb_domain_disconnect_pcie_paths(tb);
864d1ff7024SMika Westerberg 	if (ret)
865d1ff7024SMika Westerberg 		return ret;
866d1ff7024SMika Westerberg 
867d1ff7024SMika Westerberg 	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
868d1ff7024SMika Westerberg }
869d1ff7024SMika Westerberg 
tb_domain_init(void)8709d3cce0bSMika Westerberg int tb_domain_init(void)
8719d3cce0bSMika Westerberg {
872d1ff7024SMika Westerberg 	int ret;
873d1ff7024SMika Westerberg 
87454e41810SGil Fine 	tb_debugfs_init();
875ccc5cb8aSRajmohan Mani 	tb_acpi_init();
876ccc5cb8aSRajmohan Mani 
877d1ff7024SMika Westerberg 	ret = tb_xdomain_init();
878d1ff7024SMika Westerberg 	if (ret)
879ccc5cb8aSRajmohan Mani 		goto err_acpi;
880d1ff7024SMika Westerberg 	ret = bus_register(&tb_bus_type);
881d1ff7024SMika Westerberg 	if (ret)
88254e41810SGil Fine 		goto err_xdomain;
88354e41810SGil Fine 
88454e41810SGil Fine 	return 0;
88554e41810SGil Fine 
88654e41810SGil Fine err_xdomain:
887d1ff7024SMika Westerberg 	tb_xdomain_exit();
888ccc5cb8aSRajmohan Mani err_acpi:
889ccc5cb8aSRajmohan Mani 	tb_acpi_exit();
89054e41810SGil Fine 	tb_debugfs_exit();
891d1ff7024SMika Westerberg 
892d1ff7024SMika Westerberg 	return ret;
8939d3cce0bSMika Westerberg }
8949d3cce0bSMika Westerberg 
tb_domain_exit(void)8959d3cce0bSMika Westerberg void tb_domain_exit(void)
8969d3cce0bSMika Westerberg {
8979d3cce0bSMika Westerberg 	bus_unregister(&tb_bus_type);
8989d3cce0bSMika Westerberg 	ida_destroy(&tb_domain_ida);
899719a5fe8SMika Westerberg 	tb_nvm_exit();
900d1ff7024SMika Westerberg 	tb_xdomain_exit();
901ccc5cb8aSRajmohan Mani 	tb_acpi_exit();
90254e41810SGil Fine 	tb_debugfs_exit();
9039d3cce0bSMika Westerberg }
904