xref: /openbmc/linux/drivers/cxl/acpi.c (revision c6acb1e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/acpi.h>
8 #include <linux/pci.h>
9 #include "cxl.h"
10 
11 static struct acpi_table_header *acpi_cedt;
12 
13 /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
14 #define CFMWS_INTERLEAVE_WAYS(x)	(1 << (x)->interleave_ways)
15 #define CFMWS_INTERLEAVE_GRANULARITY(x)	((x)->granularity + 8)
16 
17 static unsigned long cfmws_to_decoder_flags(int restrictions)
18 {
19 	unsigned long flags = 0;
20 
21 	if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
22 		flags |= CXL_DECODER_F_TYPE2;
23 	if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
24 		flags |= CXL_DECODER_F_TYPE3;
25 	if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
26 		flags |= CXL_DECODER_F_RAM;
27 	if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
28 		flags |= CXL_DECODER_F_PMEM;
29 	if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
30 		flags |= CXL_DECODER_F_LOCK;
31 
32 	return flags;
33 }
34 
35 static int cxl_acpi_cfmws_verify(struct device *dev,
36 				 struct acpi_cedt_cfmws *cfmws)
37 {
38 	int expected_len;
39 
40 	if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
41 		dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
42 		return -EINVAL;
43 	}
44 
45 	if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
46 		dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
47 		return -EINVAL;
48 	}
49 
50 	if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
51 		dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
52 		return -EINVAL;
53 	}
54 
55 	if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
56 		dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
57 			CFMWS_INTERLEAVE_WAYS(cfmws));
58 		return -EINVAL;
59 	}
60 
61 	expected_len = struct_size((cfmws), interleave_targets,
62 				   CFMWS_INTERLEAVE_WAYS(cfmws));
63 
64 	if (cfmws->header.length < expected_len) {
65 		dev_err(dev, "CFMWS length %d less than expected %d\n",
66 			cfmws->header.length, expected_len);
67 		return -EINVAL;
68 	}
69 
70 	if (cfmws->header.length > expected_len)
71 		dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
72 			cfmws->header.length, expected_len);
73 
74 	return 0;
75 }
76 
77 static void cxl_add_cfmws_decoders(struct device *dev,
78 				   struct cxl_port *root_port)
79 {
80 	int target_map[CXL_DECODER_MAX_INTERLEAVE];
81 	struct acpi_cedt_cfmws *cfmws;
82 	struct cxl_decoder *cxld;
83 	acpi_size len, cur = 0;
84 	void *cedt_subtable;
85 	int rc;
86 
87 	len = acpi_cedt->length - sizeof(*acpi_cedt);
88 	cedt_subtable = acpi_cedt + 1;
89 
90 	while (cur < len) {
91 		struct acpi_cedt_header *c = cedt_subtable + cur;
92 		int i;
93 
94 		if (c->type != ACPI_CEDT_TYPE_CFMWS) {
95 			cur += c->length;
96 			continue;
97 		}
98 
99 		cfmws = cedt_subtable + cur;
100 
101 		if (cfmws->header.length < sizeof(*cfmws)) {
102 			dev_warn_once(dev,
103 				      "CFMWS entry skipped:invalid length:%u\n",
104 				      cfmws->header.length);
105 			cur += c->length;
106 			continue;
107 		}
108 
109 		rc = cxl_acpi_cfmws_verify(dev, cfmws);
110 		if (rc) {
111 			dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
112 				cfmws->base_hpa, cfmws->base_hpa +
113 				cfmws->window_size - 1);
114 			cur += c->length;
115 			continue;
116 		}
117 
118 		for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
119 			target_map[i] = cfmws->interleave_targets[i];
120 
121 		cxld = cxl_decoder_alloc(root_port,
122 					 CFMWS_INTERLEAVE_WAYS(cfmws));
123 		if (IS_ERR(cxld))
124 			goto next;
125 
126 		cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
127 		cxld->target_type = CXL_DECODER_EXPANDER;
128 		cxld->range = (struct range) {
129 			.start = cfmws->base_hpa,
130 			.end = cfmws->base_hpa + cfmws->window_size - 1,
131 		};
132 		cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
133 		cxld->interleave_granularity =
134 			CFMWS_INTERLEAVE_GRANULARITY(cfmws);
135 
136 		rc = cxl_decoder_add(cxld, target_map);
137 		if (rc)
138 			put_device(&cxld->dev);
139 		else
140 			rc = cxl_decoder_autoremove(dev, cxld);
141 		if (rc) {
142 			dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
143 				cfmws->base_hpa, cfmws->base_hpa +
144 				cfmws->window_size - 1);
145 			goto next;
146 		}
147 		dev_dbg(dev, "add: %s range %#llx-%#llx\n",
148 			dev_name(&cxld->dev), cfmws->base_hpa,
149 			cfmws->base_hpa + cfmws->window_size - 1);
150 next:
151 		cur += c->length;
152 	}
153 }
154 
155 static struct acpi_cedt_chbs *cxl_acpi_match_chbs(struct device *dev, u32 uid)
156 {
157 	struct acpi_cedt_chbs *chbs, *chbs_match = NULL;
158 	acpi_size len, cur = 0;
159 	void *cedt_subtable;
160 
161 	len = acpi_cedt->length - sizeof(*acpi_cedt);
162 	cedt_subtable = acpi_cedt + 1;
163 
164 	while (cur < len) {
165 		struct acpi_cedt_header *c = cedt_subtable + cur;
166 
167 		if (c->type != ACPI_CEDT_TYPE_CHBS) {
168 			cur += c->length;
169 			continue;
170 		}
171 
172 		chbs = cedt_subtable + cur;
173 
174 		if (chbs->header.length < sizeof(*chbs)) {
175 			dev_warn_once(dev,
176 				      "CHBS entry skipped: invalid length:%u\n",
177 				      chbs->header.length);
178 			cur += c->length;
179 			continue;
180 		}
181 
182 		if (chbs->uid != uid) {
183 			cur += c->length;
184 			continue;
185 		}
186 
187 		if (chbs_match) {
188 			dev_warn_once(dev,
189 				      "CHBS entry skipped: duplicate UID:%u\n",
190 				      uid);
191 			cur += c->length;
192 			continue;
193 		}
194 
195 		chbs_match = chbs;
196 		cur += c->length;
197 	}
198 
199 	return chbs_match ? chbs_match : ERR_PTR(-ENODEV);
200 }
201 
202 static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
203 {
204 	return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
205 }
206 
207 __mock int match_add_root_ports(struct pci_dev *pdev, void *data)
208 {
209 	struct cxl_walk_context *ctx = data;
210 	struct pci_bus *root_bus = ctx->root;
211 	struct cxl_port *port = ctx->port;
212 	int type = pci_pcie_type(pdev);
213 	struct device *dev = ctx->dev;
214 	u32 lnkcap, port_num;
215 	int rc;
216 
217 	if (pdev->bus != root_bus)
218 		return 0;
219 	if (!pci_is_pcie(pdev))
220 		return 0;
221 	if (type != PCI_EXP_TYPE_ROOT_PORT)
222 		return 0;
223 	if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
224 				  &lnkcap) != PCIBIOS_SUCCESSFUL)
225 		return 0;
226 
227 	/* TODO walk DVSEC to find component register base */
228 	port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
229 	rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE);
230 	if (rc) {
231 		ctx->error = rc;
232 		return rc;
233 	}
234 	ctx->count++;
235 
236 	dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
237 
238 	return 0;
239 }
240 
241 static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device *dev)
242 {
243 	struct cxl_dport *dport;
244 
245 	device_lock(&port->dev);
246 	list_for_each_entry(dport, &port->dports, list)
247 		if (dport->dport == dev) {
248 			device_unlock(&port->dev);
249 			return dport;
250 		}
251 
252 	device_unlock(&port->dev);
253 	return NULL;
254 }
255 
256 __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
257 					      struct device *dev)
258 {
259 	struct acpi_device *adev = to_acpi_device(dev);
260 
261 	if (!acpi_pci_find_root(adev->handle))
262 		return NULL;
263 
264 	if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
265 		return adev;
266 	return NULL;
267 }
268 
269 /*
270  * A host bridge is a dport to a CFMWS decode and it is a uport to the
271  * dport (PCIe Root Ports) in the host bridge.
272  */
273 static int add_host_bridge_uport(struct device *match, void *arg)
274 {
275 	struct cxl_port *root_port = arg;
276 	struct device *host = root_port->dev.parent;
277 	struct acpi_device *bridge = to_cxl_host_bridge(host, match);
278 	struct acpi_pci_root *pci_root;
279 	struct cxl_walk_context ctx;
280 	int single_port_map[1], rc;
281 	struct cxl_decoder *cxld;
282 	struct cxl_dport *dport;
283 	struct cxl_port *port;
284 
285 	if (!bridge)
286 		return 0;
287 
288 	dport = find_dport_by_dev(root_port, match);
289 	if (!dport) {
290 		dev_dbg(host, "host bridge expected and not found\n");
291 		return 0;
292 	}
293 
294 	port = devm_cxl_add_port(host, match, dport->component_reg_phys,
295 				 root_port);
296 	if (IS_ERR(port))
297 		return PTR_ERR(port);
298 	dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
299 
300 	/*
301 	 * Note that this lookup already succeeded in
302 	 * to_cxl_host_bridge(), so no need to check for failure here
303 	 */
304 	pci_root = acpi_pci_find_root(bridge->handle);
305 	ctx = (struct cxl_walk_context){
306 		.dev = host,
307 		.root = pci_root->bus,
308 		.port = port,
309 	};
310 	pci_walk_bus(pci_root->bus, match_add_root_ports, &ctx);
311 
312 	if (ctx.count == 0)
313 		return -ENODEV;
314 	if (ctx.error)
315 		return ctx.error;
316 	if (ctx.count > 1)
317 		return 0;
318 
319 	/* TODO: Scan CHBCR for HDM Decoder resources */
320 
321 	/*
322 	 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability
323 	 * Structure) single ported host-bridges need not publish a decoder
324 	 * capability when a passthrough decode can be assumed, i.e. all
325 	 * transactions that the uport sees are claimed and passed to the single
326 	 * dport. Disable the range until the first CXL region is enumerated /
327 	 * activated.
328 	 */
329 	cxld = cxl_decoder_alloc(port, 1);
330 	if (IS_ERR(cxld))
331 		return PTR_ERR(cxld);
332 
333 	cxld->interleave_ways = 1;
334 	cxld->interleave_granularity = PAGE_SIZE;
335 	cxld->target_type = CXL_DECODER_EXPANDER;
336 	cxld->range = (struct range) {
337 		.start = 0,
338 		.end = -1,
339 	};
340 
341 	device_lock(&port->dev);
342 	dport = list_first_entry(&port->dports, typeof(*dport), list);
343 	device_unlock(&port->dev);
344 
345 	single_port_map[0] = dport->port_id;
346 
347 	rc = cxl_decoder_add(cxld, single_port_map);
348 	if (rc)
349 		put_device(&cxld->dev);
350 	else
351 		rc = cxl_decoder_autoremove(host, cxld);
352 
353 	if (rc == 0)
354 		dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
355 	return rc;
356 }
357 
358 static int add_host_bridge_dport(struct device *match, void *arg)
359 {
360 	int rc;
361 	acpi_status status;
362 	unsigned long long uid;
363 	struct acpi_cedt_chbs *chbs;
364 	struct cxl_port *root_port = arg;
365 	struct device *host = root_port->dev.parent;
366 	struct acpi_device *bridge = to_cxl_host_bridge(host, match);
367 
368 	if (!bridge)
369 		return 0;
370 
371 	status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL,
372 				       &uid);
373 	if (status != AE_OK) {
374 		dev_err(host, "unable to retrieve _UID of %s\n",
375 			dev_name(match));
376 		return -ENODEV;
377 	}
378 
379 	chbs = cxl_acpi_match_chbs(host, uid);
380 	if (IS_ERR(chbs)) {
381 		dev_warn(host, "No CHBS found for Host Bridge: %s\n",
382 			 dev_name(match));
383 		return 0;
384 	}
385 
386 	rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
387 	if (rc) {
388 		dev_err(host, "failed to add downstream port: %s\n",
389 			dev_name(match));
390 		return rc;
391 	}
392 	dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match));
393 	return 0;
394 }
395 
396 static int add_root_nvdimm_bridge(struct device *match, void *data)
397 {
398 	struct cxl_decoder *cxld;
399 	struct cxl_port *root_port = data;
400 	struct cxl_nvdimm_bridge *cxl_nvb;
401 	struct device *host = root_port->dev.parent;
402 
403 	if (!is_root_decoder(match))
404 		return 0;
405 
406 	cxld = to_cxl_decoder(match);
407 	if (!(cxld->flags & CXL_DECODER_F_PMEM))
408 		return 0;
409 
410 	cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
411 	if (IS_ERR(cxl_nvb)) {
412 		dev_dbg(host, "failed to register pmem\n");
413 		return PTR_ERR(cxl_nvb);
414 	}
415 	dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
416 		dev_name(&cxl_nvb->dev));
417 	return 1;
418 }
419 
420 static u32 cedt_instance(struct platform_device *pdev)
421 {
422 	const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev);
423 
424 	if (native_acpi0017 && *native_acpi0017)
425 		return 0;
426 
427 	/* for cxl_test request a non-canonical instance */
428 	return U32_MAX;
429 }
430 
431 static int cxl_acpi_probe(struct platform_device *pdev)
432 {
433 	int rc;
434 	acpi_status status;
435 	struct cxl_port *root_port;
436 	struct device *host = &pdev->dev;
437 	struct acpi_device *adev = ACPI_COMPANION(host);
438 
439 	root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
440 	if (IS_ERR(root_port))
441 		return PTR_ERR(root_port);
442 	dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
443 
444 	status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt);
445 	if (ACPI_FAILURE(status))
446 		return -ENXIO;
447 
448 	rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
449 			      add_host_bridge_dport);
450 	if (rc)
451 		goto out;
452 
453 	cxl_add_cfmws_decoders(host, root_port);
454 
455 	/*
456 	 * Root level scanned with host-bridge as dports, now scan host-bridges
457 	 * for their role as CXL uports to their CXL-capable PCIe Root Ports.
458 	 */
459 	rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
460 			      add_host_bridge_uport);
461 	if (rc)
462 		goto out;
463 
464 	if (IS_ENABLED(CONFIG_CXL_PMEM))
465 		rc = device_for_each_child(&root_port->dev, root_port,
466 					   add_root_nvdimm_bridge);
467 
468 out:
469 	acpi_put_table(acpi_cedt);
470 	if (rc < 0)
471 		return rc;
472 	return 0;
473 }
474 
475 static bool native_acpi0017 = true;
476 
477 static const struct acpi_device_id cxl_acpi_ids[] = {
478 	{ "ACPI0017", (unsigned long) &native_acpi0017 },
479 	{ },
480 };
481 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
482 
483 static struct platform_driver cxl_acpi_driver = {
484 	.probe = cxl_acpi_probe,
485 	.driver = {
486 		.name = KBUILD_MODNAME,
487 		.acpi_match_table = cxl_acpi_ids,
488 	},
489 };
490 
491 module_platform_driver(cxl_acpi_driver);
492 MODULE_LICENSE("GPL v2");
493 MODULE_IMPORT_NS(CXL);
494