1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/module.h> 5 #include <linux/device.h> 6 #include <linux/kernel.h> 7 #include <linux/acpi.h> 8 #include <linux/pci.h> 9 #include "cxl.h" 10 11 /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ 12 #define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways) 13 #define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8) 14 15 static unsigned long cfmws_to_decoder_flags(int restrictions) 16 { 17 unsigned long flags = 0; 18 19 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2) 20 flags |= CXL_DECODER_F_TYPE2; 21 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3) 22 flags |= CXL_DECODER_F_TYPE3; 23 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE) 24 flags |= CXL_DECODER_F_RAM; 25 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM) 26 flags |= CXL_DECODER_F_PMEM; 27 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED) 28 flags |= CXL_DECODER_F_LOCK; 29 30 return flags; 31 } 32 33 static int cxl_acpi_cfmws_verify(struct device *dev, 34 struct acpi_cedt_cfmws *cfmws) 35 { 36 int expected_len; 37 38 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) { 39 dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n"); 40 return -EINVAL; 41 } 42 43 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) { 44 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n"); 45 return -EINVAL; 46 } 47 48 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) { 49 dev_err(dev, "CFMWS Window Size not 256MB aligned\n"); 50 return -EINVAL; 51 } 52 53 if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) { 54 dev_err(dev, "CFMWS Interleave Ways (%d) too large\n", 55 CFMWS_INTERLEAVE_WAYS(cfmws)); 56 return -EINVAL; 57 } 58 59 expected_len = struct_size((cfmws), interleave_targets, 60 CFMWS_INTERLEAVE_WAYS(cfmws)); 61 62 if (cfmws->header.length < expected_len) { 63 dev_err(dev, "CFMWS length %d less than expected %d\n", 64 cfmws->header.length, expected_len); 65 return -EINVAL; 66 } 67 68 if (cfmws->header.length > expected_len) 69 dev_dbg(dev, "CFMWS length %d greater than expected %d\n", 70 cfmws->header.length, expected_len); 71 72 return 0; 73 } 74 75 struct cxl_cfmws_context { 76 struct device *dev; 77 struct cxl_port *root_port; 78 }; 79 80 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, 81 const unsigned long end) 82 { 83 int target_map[CXL_DECODER_MAX_INTERLEAVE]; 84 struct cxl_cfmws_context *ctx = arg; 85 struct cxl_port *root_port = ctx->root_port; 86 struct device *dev = ctx->dev; 87 struct acpi_cedt_cfmws *cfmws; 88 struct cxl_decoder *cxld; 89 int rc, i; 90 91 cfmws = (struct acpi_cedt_cfmws *) header; 92 93 rc = cxl_acpi_cfmws_verify(dev, cfmws); 94 if (rc) { 95 dev_err(dev, "CFMWS range %#llx-%#llx not registered\n", 96 cfmws->base_hpa, 97 cfmws->base_hpa + cfmws->window_size - 1); 98 return 0; 99 } 100 101 for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++) 102 target_map[i] = cfmws->interleave_targets[i]; 103 104 cxld = cxl_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws)); 105 if (IS_ERR(cxld)) 106 return 0; 107 108 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); 109 cxld->target_type = CXL_DECODER_EXPANDER; 110 cxld->range = (struct range){ 111 .start = cfmws->base_hpa, 112 .end = cfmws->base_hpa + cfmws->window_size - 1, 113 }; 114 cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws); 115 cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws); 116 117 rc = cxl_decoder_add(cxld, target_map); 118 if (rc) 119 put_device(&cxld->dev); 120 else 121 rc = cxl_decoder_autoremove(dev, cxld); 122 if (rc) { 123 dev_err(dev, "Failed to add decoder for %#llx-%#llx\n", 124 cfmws->base_hpa, 125 cfmws->base_hpa + cfmws->window_size - 1); 126 return 0; 127 } 128 dev_dbg(dev, "add: %s node: %d range %#llx-%#llx\n", 129 dev_name(&cxld->dev), phys_to_target_node(cxld->range.start), 130 cfmws->base_hpa, cfmws->base_hpa + cfmws->window_size - 1); 131 132 return 0; 133 } 134 135 __mock int match_add_root_ports(struct pci_dev *pdev, void *data) 136 { 137 struct cxl_walk_context *ctx = data; 138 struct pci_bus *root_bus = ctx->root; 139 struct cxl_port *port = ctx->port; 140 int type = pci_pcie_type(pdev); 141 struct device *dev = ctx->dev; 142 u32 lnkcap, port_num; 143 int rc; 144 145 if (pdev->bus != root_bus) 146 return 0; 147 if (!pci_is_pcie(pdev)) 148 return 0; 149 if (type != PCI_EXP_TYPE_ROOT_PORT) 150 return 0; 151 if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP, 152 &lnkcap) != PCIBIOS_SUCCESSFUL) 153 return 0; 154 155 /* TODO walk DVSEC to find component register base */ 156 port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); 157 rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE); 158 if (rc) { 159 ctx->error = rc; 160 return rc; 161 } 162 ctx->count++; 163 164 dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev)); 165 166 return 0; 167 } 168 169 static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device *dev) 170 { 171 struct cxl_dport *dport; 172 173 device_lock(&port->dev); 174 list_for_each_entry(dport, &port->dports, list) 175 if (dport->dport == dev) { 176 device_unlock(&port->dev); 177 return dport; 178 } 179 180 device_unlock(&port->dev); 181 return NULL; 182 } 183 184 __mock struct acpi_device *to_cxl_host_bridge(struct device *host, 185 struct device *dev) 186 { 187 struct acpi_device *adev = to_acpi_device(dev); 188 189 if (!acpi_pci_find_root(adev->handle)) 190 return NULL; 191 192 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) 193 return adev; 194 return NULL; 195 } 196 197 /* 198 * A host bridge is a dport to a CFMWS decode and it is a uport to the 199 * dport (PCIe Root Ports) in the host bridge. 200 */ 201 static int add_host_bridge_uport(struct device *match, void *arg) 202 { 203 struct cxl_port *root_port = arg; 204 struct device *host = root_port->dev.parent; 205 struct acpi_device *bridge = to_cxl_host_bridge(host, match); 206 struct acpi_pci_root *pci_root; 207 struct cxl_walk_context ctx; 208 int single_port_map[1], rc; 209 struct cxl_decoder *cxld; 210 struct cxl_dport *dport; 211 struct cxl_port *port; 212 213 if (!bridge) 214 return 0; 215 216 dport = find_dport_by_dev(root_port, match); 217 if (!dport) { 218 dev_dbg(host, "host bridge expected and not found\n"); 219 return 0; 220 } 221 222 port = devm_cxl_add_port(host, match, dport->component_reg_phys, 223 root_port); 224 if (IS_ERR(port)) 225 return PTR_ERR(port); 226 dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev)); 227 228 /* 229 * Note that this lookup already succeeded in 230 * to_cxl_host_bridge(), so no need to check for failure here 231 */ 232 pci_root = acpi_pci_find_root(bridge->handle); 233 ctx = (struct cxl_walk_context){ 234 .dev = host, 235 .root = pci_root->bus, 236 .port = port, 237 }; 238 pci_walk_bus(pci_root->bus, match_add_root_ports, &ctx); 239 240 if (ctx.count == 0) 241 return -ENODEV; 242 if (ctx.error) 243 return ctx.error; 244 if (ctx.count > 1) 245 return 0; 246 247 /* TODO: Scan CHBCR for HDM Decoder resources */ 248 249 /* 250 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability 251 * Structure) single ported host-bridges need not publish a decoder 252 * capability when a passthrough decode can be assumed, i.e. all 253 * transactions that the uport sees are claimed and passed to the single 254 * dport. Disable the range until the first CXL region is enumerated / 255 * activated. 256 */ 257 cxld = cxl_decoder_alloc(port, 1); 258 if (IS_ERR(cxld)) 259 return PTR_ERR(cxld); 260 261 cxld->interleave_ways = 1; 262 cxld->interleave_granularity = PAGE_SIZE; 263 cxld->target_type = CXL_DECODER_EXPANDER; 264 cxld->range = (struct range) { 265 .start = 0, 266 .end = -1, 267 }; 268 269 device_lock(&port->dev); 270 dport = list_first_entry(&port->dports, typeof(*dport), list); 271 device_unlock(&port->dev); 272 273 single_port_map[0] = dport->port_id; 274 275 rc = cxl_decoder_add(cxld, single_port_map); 276 if (rc) 277 put_device(&cxld->dev); 278 else 279 rc = cxl_decoder_autoremove(host, cxld); 280 281 if (rc == 0) 282 dev_dbg(host, "add: %s\n", dev_name(&cxld->dev)); 283 return rc; 284 } 285 286 struct cxl_chbs_context { 287 struct device *dev; 288 unsigned long long uid; 289 resource_size_t chbcr; 290 }; 291 292 static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg, 293 const unsigned long end) 294 { 295 struct cxl_chbs_context *ctx = arg; 296 struct acpi_cedt_chbs *chbs; 297 298 if (ctx->chbcr) 299 return 0; 300 301 chbs = (struct acpi_cedt_chbs *) header; 302 303 if (ctx->uid != chbs->uid) 304 return 0; 305 ctx->chbcr = chbs->base; 306 307 return 0; 308 } 309 310 static int add_host_bridge_dport(struct device *match, void *arg) 311 { 312 int rc; 313 acpi_status status; 314 unsigned long long uid; 315 struct cxl_chbs_context ctx; 316 struct cxl_port *root_port = arg; 317 struct device *host = root_port->dev.parent; 318 struct acpi_device *bridge = to_cxl_host_bridge(host, match); 319 320 if (!bridge) 321 return 0; 322 323 status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL, 324 &uid); 325 if (status != AE_OK) { 326 dev_err(host, "unable to retrieve _UID of %s\n", 327 dev_name(match)); 328 return -ENODEV; 329 } 330 331 ctx = (struct cxl_chbs_context) { 332 .dev = host, 333 .uid = uid, 334 }; 335 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx); 336 337 if (ctx.chbcr == 0) { 338 dev_warn(host, "No CHBS found for Host Bridge: %s\n", 339 dev_name(match)); 340 return 0; 341 } 342 343 rc = cxl_add_dport(root_port, match, uid, ctx.chbcr); 344 if (rc) { 345 dev_err(host, "failed to add downstream port: %s\n", 346 dev_name(match)); 347 return rc; 348 } 349 dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match)); 350 return 0; 351 } 352 353 static int add_root_nvdimm_bridge(struct device *match, void *data) 354 { 355 struct cxl_decoder *cxld; 356 struct cxl_port *root_port = data; 357 struct cxl_nvdimm_bridge *cxl_nvb; 358 struct device *host = root_port->dev.parent; 359 360 if (!is_root_decoder(match)) 361 return 0; 362 363 cxld = to_cxl_decoder(match); 364 if (!(cxld->flags & CXL_DECODER_F_PMEM)) 365 return 0; 366 367 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port); 368 if (IS_ERR(cxl_nvb)) { 369 dev_dbg(host, "failed to register pmem\n"); 370 return PTR_ERR(cxl_nvb); 371 } 372 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev), 373 dev_name(&cxl_nvb->dev)); 374 return 1; 375 } 376 377 static int cxl_acpi_probe(struct platform_device *pdev) 378 { 379 int rc; 380 struct cxl_port *root_port; 381 struct device *host = &pdev->dev; 382 struct acpi_device *adev = ACPI_COMPANION(host); 383 struct cxl_cfmws_context ctx; 384 385 root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); 386 if (IS_ERR(root_port)) 387 return PTR_ERR(root_port); 388 dev_dbg(host, "add: %s\n", dev_name(&root_port->dev)); 389 390 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 391 add_host_bridge_dport); 392 if (rc < 0) 393 return rc; 394 395 ctx = (struct cxl_cfmws_context) { 396 .dev = host, 397 .root_port = root_port, 398 }; 399 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx); 400 401 /* 402 * Root level scanned with host-bridge as dports, now scan host-bridges 403 * for their role as CXL uports to their CXL-capable PCIe Root Ports. 404 */ 405 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 406 add_host_bridge_uport); 407 if (rc < 0) 408 return rc; 409 410 if (IS_ENABLED(CONFIG_CXL_PMEM)) 411 rc = device_for_each_child(&root_port->dev, root_port, 412 add_root_nvdimm_bridge); 413 if (rc < 0) 414 return rc; 415 416 return 0; 417 } 418 419 static const struct acpi_device_id cxl_acpi_ids[] = { 420 { "ACPI0017" }, 421 { }, 422 }; 423 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids); 424 425 static struct platform_driver cxl_acpi_driver = { 426 .probe = cxl_acpi_probe, 427 .driver = { 428 .name = KBUILD_MODNAME, 429 .acpi_match_table = cxl_acpi_ids, 430 }, 431 }; 432 433 module_platform_driver(cxl_acpi_driver); 434 MODULE_LICENSE("GPL v2"); 435 MODULE_IMPORT_NS(CXL); 436 MODULE_IMPORT_NS(ACPI); 437