xref: /openbmc/linux/drivers/cxl/port.c (revision 6aeadf78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
6 
7 #include "cxlmem.h"
8 #include "cxlpci.h"
9 
10 /**
11  * DOC: cxl port
12  *
13  * The port driver enumerates dport via PCI and scans for HDM
14  * (Host-managed-Device-Memory) decoder resources via the
15  * @component_reg_phys value passed in by the agent that registered the
16  * port. All descendant ports of a CXL root port (described by platform
17  * firmware) are managed in this drivers context. Each driver instance
18  * is responsible for tearing down the driver context of immediate
19  * descendant ports. The locking for this is validated by
20  * CONFIG_PROVE_CXL_LOCKING.
21  *
22  * The primary service this driver provides is presenting APIs to other
23  * drivers to utilize the decoders, and indicating to userspace (via bind
24  * status) the connectivity of the CXL.mem protocol throughout the
25  * PCIe topology.
26  */
27 
28 static void schedule_detach(void *cxlmd)
29 {
30 	schedule_cxl_memdev_detach(cxlmd);
31 }
32 
33 static int discover_region(struct device *dev, void *root)
34 {
35 	struct cxl_endpoint_decoder *cxled;
36 	int rc;
37 
38 	if (!is_endpoint_decoder(dev))
39 		return 0;
40 
41 	cxled = to_cxl_endpoint_decoder(dev);
42 	if ((cxled->cxld.flags & CXL_DECODER_F_ENABLE) == 0)
43 		return 0;
44 
45 	if (cxled->state != CXL_DECODER_STATE_AUTO)
46 		return 0;
47 
48 	/*
49 	 * Region enumeration is opportunistic, if this add-event fails,
50 	 * continue to the next endpoint decoder.
51 	 */
52 	rc = cxl_add_to_region(root, cxled);
53 	if (rc)
54 		dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
55 			cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
56 
57 	return 0;
58 }
59 
60 static int cxl_switch_port_probe(struct cxl_port *port)
61 {
62 	struct cxl_hdm *cxlhdm;
63 	int rc, nr_dports;
64 
65 	nr_dports = devm_cxl_port_enumerate_dports(port);
66 	if (nr_dports < 0)
67 		return nr_dports;
68 
69 	cxlhdm = devm_cxl_setup_hdm(port, NULL);
70 	rc = devm_cxl_enable_hdm(port, cxlhdm);
71 	if (rc)
72 		return rc;
73 
74 	if (!IS_ERR(cxlhdm))
75 		return devm_cxl_enumerate_decoders(cxlhdm, NULL);
76 
77 	if (PTR_ERR(cxlhdm) != -ENODEV) {
78 		dev_err(&port->dev, "Failed to map HDM decoder capability\n");
79 		return PTR_ERR(cxlhdm);
80 	}
81 
82 	if (nr_dports == 1) {
83 		dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
84 		return devm_cxl_add_passthrough_decoder(port);
85 	}
86 
87 	dev_err(&port->dev, "HDM decoder capability not found\n");
88 	return -ENXIO;
89 }
90 
91 static int cxl_endpoint_port_probe(struct cxl_port *port)
92 {
93 	struct cxl_endpoint_dvsec_info info = { .port = port };
94 	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
95 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
96 	struct cxl_hdm *cxlhdm;
97 	struct cxl_port *root;
98 	int rc;
99 
100 	rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info);
101 	if (rc < 0)
102 		return rc;
103 
104 	cxlhdm = devm_cxl_setup_hdm(port, &info);
105 	if (IS_ERR(cxlhdm))
106 		return PTR_ERR(cxlhdm);
107 
108 	/* Cache the data early to ensure is_visible() works */
109 	read_cdat_data(port);
110 
111 	get_device(&cxlmd->dev);
112 	rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
113 	if (rc)
114 		return rc;
115 
116 	rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
117 	if (rc)
118 		return rc;
119 
120 	rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
121 	if (rc)
122 		return rc;
123 
124 	/*
125 	 * This can't fail in practice as CXL root exit unregisters all
126 	 * descendant ports and that in turn synchronizes with cxl_port_probe()
127 	 */
128 	root = find_cxl_root(port);
129 
130 	/*
131 	 * Now that all endpoint decoders are successfully enumerated, try to
132 	 * assemble regions from committed decoders
133 	 */
134 	device_for_each_child(&port->dev, root, discover_region);
135 	put_device(&root->dev);
136 
137 	return 0;
138 }
139 
140 static int cxl_port_probe(struct device *dev)
141 {
142 	struct cxl_port *port = to_cxl_port(dev);
143 
144 	if (is_cxl_endpoint(port))
145 		return cxl_endpoint_port_probe(port);
146 	return cxl_switch_port_probe(port);
147 }
148 
149 static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
150 			 struct bin_attribute *bin_attr, char *buf,
151 			 loff_t offset, size_t count)
152 {
153 	struct device *dev = kobj_to_dev(kobj);
154 	struct cxl_port *port = to_cxl_port(dev);
155 
156 	if (!port->cdat_available)
157 		return -ENXIO;
158 
159 	if (!port->cdat.table)
160 		return 0;
161 
162 	return memory_read_from_buffer(buf, count, &offset,
163 				       port->cdat.table,
164 				       port->cdat.length);
165 }
166 
167 static BIN_ATTR_ADMIN_RO(CDAT, 0);
168 
169 static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
170 					    struct bin_attribute *attr, int i)
171 {
172 	struct device *dev = kobj_to_dev(kobj);
173 	struct cxl_port *port = to_cxl_port(dev);
174 
175 	if ((attr == &bin_attr_CDAT) && port->cdat_available)
176 		return attr->attr.mode;
177 
178 	return 0;
179 }
180 
181 static struct bin_attribute *cxl_cdat_bin_attributes[] = {
182 	&bin_attr_CDAT,
183 	NULL,
184 };
185 
186 static struct attribute_group cxl_cdat_attribute_group = {
187 	.bin_attrs = cxl_cdat_bin_attributes,
188 	.is_bin_visible = cxl_port_bin_attr_is_visible,
189 };
190 
191 static const struct attribute_group *cxl_port_attribute_groups[] = {
192 	&cxl_cdat_attribute_group,
193 	NULL,
194 };
195 
196 static struct cxl_driver cxl_port_driver = {
197 	.name = "cxl_port",
198 	.probe = cxl_port_probe,
199 	.id = CXL_DEVICE_PORT,
200 	.drv = {
201 		.dev_groups = cxl_port_attribute_groups,
202 	},
203 };
204 
205 module_cxl_driver(cxl_port_driver);
206 MODULE_LICENSE("GPL v2");
207 MODULE_IMPORT_NS(CXL);
208 MODULE_ALIAS_CXL(CXL_DEVICE_PORT);
209