1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/memregion.h> 5 #include <linux/workqueue.h> 6 #include <linux/debugfs.h> 7 #include <linux/device.h> 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/slab.h> 11 #include <linux/idr.h> 12 #include <cxlmem.h> 13 #include <cxlpci.h> 14 #include <cxl.h> 15 #include "core.h" 16 17 /** 18 * DOC: cxl core 19 * 20 * The CXL core provides a set of interfaces that can be consumed by CXL aware 21 * drivers. The interfaces allow for creation, modification, and destruction of 22 * regions, memory devices, ports, and decoders. CXL aware drivers must register 23 * with the CXL core via these interfaces in order to be able to participate in 24 * cross-device interleave coordination. The CXL core also establishes and 25 * maintains the bridge to the nvdimm subsystem. 26 * 27 * CXL core introduces sysfs hierarchy to control the devices that are 28 * instantiated by the core. 29 */ 30 31 /* 32 * All changes to the interleave configuration occur with this lock held 33 * for write. 34 */ 35 DECLARE_RWSEM(cxl_region_rwsem); 36 37 static DEFINE_IDA(cxl_port_ida); 38 static DEFINE_XARRAY(cxl_root_buses); 39 40 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, 41 char *buf) 42 { 43 return sysfs_emit(buf, "%s\n", dev->type->name); 44 } 45 static DEVICE_ATTR_RO(devtype); 46 47 static int cxl_device_id(const struct device *dev) 48 { 49 if (dev->type == &cxl_nvdimm_bridge_type) 50 return CXL_DEVICE_NVDIMM_BRIDGE; 51 if (dev->type == &cxl_nvdimm_type) 52 return CXL_DEVICE_NVDIMM; 53 if (dev->type == CXL_PMEM_REGION_TYPE()) 54 return CXL_DEVICE_PMEM_REGION; 55 if (dev->type == CXL_DAX_REGION_TYPE()) 56 return CXL_DEVICE_DAX_REGION; 57 if (is_cxl_port(dev)) { 58 if (is_cxl_root(to_cxl_port(dev))) 59 return CXL_DEVICE_ROOT; 60 return CXL_DEVICE_PORT; 61 } 62 if (is_cxl_memdev(dev)) 63 return CXL_DEVICE_MEMORY_EXPANDER; 64 if (dev->type == CXL_REGION_TYPE()) 65 return CXL_DEVICE_REGION; 66 if (dev->type == &cxl_pmu_type) 67 return CXL_DEVICE_PMU; 68 return 0; 69 } 70 71 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 72 char *buf) 73 { 74 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev)); 75 } 76 static DEVICE_ATTR_RO(modalias); 77 78 static struct attribute *cxl_base_attributes[] = { 79 &dev_attr_devtype.attr, 80 &dev_attr_modalias.attr, 81 NULL, 82 }; 83 84 struct attribute_group cxl_base_attribute_group = { 85 .attrs = cxl_base_attributes, 86 }; 87 88 static ssize_t start_show(struct device *dev, struct device_attribute *attr, 89 char *buf) 90 { 91 struct cxl_decoder *cxld = to_cxl_decoder(dev); 92 93 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start); 94 } 95 static DEVICE_ATTR_ADMIN_RO(start); 96 97 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 98 char *buf) 99 { 100 struct cxl_decoder *cxld = to_cxl_decoder(dev); 101 102 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range)); 103 } 104 static DEVICE_ATTR_RO(size); 105 106 #define CXL_DECODER_FLAG_ATTR(name, flag) \ 107 static ssize_t name##_show(struct device *dev, \ 108 struct device_attribute *attr, char *buf) \ 109 { \ 110 struct cxl_decoder *cxld = to_cxl_decoder(dev); \ 111 \ 112 return sysfs_emit(buf, "%s\n", \ 113 (cxld->flags & (flag)) ? "1" : "0"); \ 114 } \ 115 static DEVICE_ATTR_RO(name) 116 117 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); 118 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); 119 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); 120 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); 121 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); 122 123 static ssize_t target_type_show(struct device *dev, 124 struct device_attribute *attr, char *buf) 125 { 126 struct cxl_decoder *cxld = to_cxl_decoder(dev); 127 128 switch (cxld->target_type) { 129 case CXL_DECODER_DEVMEM: 130 return sysfs_emit(buf, "accelerator\n"); 131 case CXL_DECODER_HOSTONLYMEM: 132 return sysfs_emit(buf, "expander\n"); 133 } 134 return -ENXIO; 135 } 136 static DEVICE_ATTR_RO(target_type); 137 138 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf) 139 { 140 struct cxl_decoder *cxld = &cxlsd->cxld; 141 ssize_t offset = 0; 142 int i, rc = 0; 143 144 for (i = 0; i < cxld->interleave_ways; i++) { 145 struct cxl_dport *dport = cxlsd->target[i]; 146 struct cxl_dport *next = NULL; 147 148 if (!dport) 149 break; 150 151 if (i + 1 < cxld->interleave_ways) 152 next = cxlsd->target[i + 1]; 153 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, 154 next ? "," : ""); 155 if (rc < 0) 156 return rc; 157 offset += rc; 158 } 159 160 return offset; 161 } 162 163 static ssize_t target_list_show(struct device *dev, 164 struct device_attribute *attr, char *buf) 165 { 166 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 167 ssize_t offset; 168 unsigned int seq; 169 int rc; 170 171 do { 172 seq = read_seqbegin(&cxlsd->target_lock); 173 rc = emit_target_list(cxlsd, buf); 174 } while (read_seqretry(&cxlsd->target_lock, seq)); 175 176 if (rc < 0) 177 return rc; 178 offset = rc; 179 180 rc = sysfs_emit_at(buf, offset, "\n"); 181 if (rc < 0) 182 return rc; 183 184 return offset + rc; 185 } 186 static DEVICE_ATTR_RO(target_list); 187 188 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 189 char *buf) 190 { 191 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 192 193 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode)); 194 } 195 196 static ssize_t mode_store(struct device *dev, struct device_attribute *attr, 197 const char *buf, size_t len) 198 { 199 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 200 enum cxl_decoder_mode mode; 201 ssize_t rc; 202 203 if (sysfs_streq(buf, "pmem")) 204 mode = CXL_DECODER_PMEM; 205 else if (sysfs_streq(buf, "ram")) 206 mode = CXL_DECODER_RAM; 207 else 208 return -EINVAL; 209 210 rc = cxl_dpa_set_mode(cxled, mode); 211 if (rc) 212 return rc; 213 214 return len; 215 } 216 static DEVICE_ATTR_RW(mode); 217 218 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr, 219 char *buf) 220 { 221 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 222 u64 base = cxl_dpa_resource_start(cxled); 223 224 return sysfs_emit(buf, "%#llx\n", base); 225 } 226 static DEVICE_ATTR_RO(dpa_resource); 227 228 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr, 229 char *buf) 230 { 231 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 232 resource_size_t size = cxl_dpa_size(cxled); 233 234 return sysfs_emit(buf, "%pa\n", &size); 235 } 236 237 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr, 238 const char *buf, size_t len) 239 { 240 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 241 unsigned long long size; 242 ssize_t rc; 243 244 rc = kstrtoull(buf, 0, &size); 245 if (rc) 246 return rc; 247 248 if (!IS_ALIGNED(size, SZ_256M)) 249 return -EINVAL; 250 251 rc = cxl_dpa_free(cxled); 252 if (rc) 253 return rc; 254 255 if (size == 0) 256 return len; 257 258 rc = cxl_dpa_alloc(cxled, size); 259 if (rc) 260 return rc; 261 262 return len; 263 } 264 static DEVICE_ATTR_RW(dpa_size); 265 266 static ssize_t interleave_granularity_show(struct device *dev, 267 struct device_attribute *attr, 268 char *buf) 269 { 270 struct cxl_decoder *cxld = to_cxl_decoder(dev); 271 272 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity); 273 } 274 275 static DEVICE_ATTR_RO(interleave_granularity); 276 277 static ssize_t interleave_ways_show(struct device *dev, 278 struct device_attribute *attr, char *buf) 279 { 280 struct cxl_decoder *cxld = to_cxl_decoder(dev); 281 282 return sysfs_emit(buf, "%d\n", cxld->interleave_ways); 283 } 284 285 static DEVICE_ATTR_RO(interleave_ways); 286 287 static struct attribute *cxl_decoder_base_attrs[] = { 288 &dev_attr_start.attr, 289 &dev_attr_size.attr, 290 &dev_attr_locked.attr, 291 &dev_attr_interleave_granularity.attr, 292 &dev_attr_interleave_ways.attr, 293 NULL, 294 }; 295 296 static struct attribute_group cxl_decoder_base_attribute_group = { 297 .attrs = cxl_decoder_base_attrs, 298 }; 299 300 static struct attribute *cxl_decoder_root_attrs[] = { 301 &dev_attr_cap_pmem.attr, 302 &dev_attr_cap_ram.attr, 303 &dev_attr_cap_type2.attr, 304 &dev_attr_cap_type3.attr, 305 &dev_attr_target_list.attr, 306 SET_CXL_REGION_ATTR(create_pmem_region) 307 SET_CXL_REGION_ATTR(create_ram_region) 308 SET_CXL_REGION_ATTR(delete_region) 309 NULL, 310 }; 311 312 static bool can_create_pmem(struct cxl_root_decoder *cxlrd) 313 { 314 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM; 315 316 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 317 } 318 319 static bool can_create_ram(struct cxl_root_decoder *cxlrd) 320 { 321 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM; 322 323 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 324 } 325 326 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n) 327 { 328 struct device *dev = kobj_to_dev(kobj); 329 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 330 331 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd)) 332 return 0; 333 334 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd)) 335 return 0; 336 337 if (a == CXL_REGION_ATTR(delete_region) && 338 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd))) 339 return 0; 340 341 return a->mode; 342 } 343 344 static struct attribute_group cxl_decoder_root_attribute_group = { 345 .attrs = cxl_decoder_root_attrs, 346 .is_visible = cxl_root_decoder_visible, 347 }; 348 349 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { 350 &cxl_decoder_root_attribute_group, 351 &cxl_decoder_base_attribute_group, 352 &cxl_base_attribute_group, 353 NULL, 354 }; 355 356 static struct attribute *cxl_decoder_switch_attrs[] = { 357 &dev_attr_target_type.attr, 358 &dev_attr_target_list.attr, 359 SET_CXL_REGION_ATTR(region) 360 NULL, 361 }; 362 363 static struct attribute_group cxl_decoder_switch_attribute_group = { 364 .attrs = cxl_decoder_switch_attrs, 365 }; 366 367 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { 368 &cxl_decoder_switch_attribute_group, 369 &cxl_decoder_base_attribute_group, 370 &cxl_base_attribute_group, 371 NULL, 372 }; 373 374 static struct attribute *cxl_decoder_endpoint_attrs[] = { 375 &dev_attr_target_type.attr, 376 &dev_attr_mode.attr, 377 &dev_attr_dpa_size.attr, 378 &dev_attr_dpa_resource.attr, 379 SET_CXL_REGION_ATTR(region) 380 NULL, 381 }; 382 383 static struct attribute_group cxl_decoder_endpoint_attribute_group = { 384 .attrs = cxl_decoder_endpoint_attrs, 385 }; 386 387 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { 388 &cxl_decoder_base_attribute_group, 389 &cxl_decoder_endpoint_attribute_group, 390 &cxl_base_attribute_group, 391 NULL, 392 }; 393 394 static void __cxl_decoder_release(struct cxl_decoder *cxld) 395 { 396 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 397 398 ida_free(&port->decoder_ida, cxld->id); 399 put_device(&port->dev); 400 } 401 402 static void cxl_endpoint_decoder_release(struct device *dev) 403 { 404 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 405 406 __cxl_decoder_release(&cxled->cxld); 407 kfree(cxled); 408 } 409 410 static void cxl_switch_decoder_release(struct device *dev) 411 { 412 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 413 414 __cxl_decoder_release(&cxlsd->cxld); 415 kfree(cxlsd); 416 } 417 418 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) 419 { 420 if (dev_WARN_ONCE(dev, !is_root_decoder(dev), 421 "not a cxl_root_decoder device\n")) 422 return NULL; 423 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); 424 } 425 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL); 426 427 static void cxl_root_decoder_release(struct device *dev) 428 { 429 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 430 431 if (atomic_read(&cxlrd->region_id) >= 0) 432 memregion_free(atomic_read(&cxlrd->region_id)); 433 __cxl_decoder_release(&cxlrd->cxlsd.cxld); 434 kfree(cxlrd); 435 } 436 437 static const struct device_type cxl_decoder_endpoint_type = { 438 .name = "cxl_decoder_endpoint", 439 .release = cxl_endpoint_decoder_release, 440 .groups = cxl_decoder_endpoint_attribute_groups, 441 }; 442 443 static const struct device_type cxl_decoder_switch_type = { 444 .name = "cxl_decoder_switch", 445 .release = cxl_switch_decoder_release, 446 .groups = cxl_decoder_switch_attribute_groups, 447 }; 448 449 static const struct device_type cxl_decoder_root_type = { 450 .name = "cxl_decoder_root", 451 .release = cxl_root_decoder_release, 452 .groups = cxl_decoder_root_attribute_groups, 453 }; 454 455 bool is_endpoint_decoder(struct device *dev) 456 { 457 return dev->type == &cxl_decoder_endpoint_type; 458 } 459 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL); 460 461 bool is_root_decoder(struct device *dev) 462 { 463 return dev->type == &cxl_decoder_root_type; 464 } 465 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); 466 467 bool is_switch_decoder(struct device *dev) 468 { 469 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; 470 } 471 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL); 472 473 struct cxl_decoder *to_cxl_decoder(struct device *dev) 474 { 475 if (dev_WARN_ONCE(dev, 476 !is_switch_decoder(dev) && !is_endpoint_decoder(dev), 477 "not a cxl_decoder device\n")) 478 return NULL; 479 return container_of(dev, struct cxl_decoder, dev); 480 } 481 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); 482 483 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) 484 { 485 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev), 486 "not a cxl_endpoint_decoder device\n")) 487 return NULL; 488 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); 489 } 490 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL); 491 492 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) 493 { 494 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev), 495 "not a cxl_switch_decoder device\n")) 496 return NULL; 497 return container_of(dev, struct cxl_switch_decoder, cxld.dev); 498 } 499 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL); 500 501 static void cxl_ep_release(struct cxl_ep *ep) 502 { 503 put_device(ep->ep); 504 kfree(ep); 505 } 506 507 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) 508 { 509 if (!ep) 510 return; 511 xa_erase(&port->endpoints, (unsigned long) ep->ep); 512 cxl_ep_release(ep); 513 } 514 515 static void cxl_port_release(struct device *dev) 516 { 517 struct cxl_port *port = to_cxl_port(dev); 518 unsigned long index; 519 struct cxl_ep *ep; 520 521 xa_for_each(&port->endpoints, index, ep) 522 cxl_ep_remove(port, ep); 523 xa_destroy(&port->endpoints); 524 xa_destroy(&port->dports); 525 xa_destroy(&port->regions); 526 ida_free(&cxl_port_ida, port->id); 527 kfree(port); 528 } 529 530 static const struct attribute_group *cxl_port_attribute_groups[] = { 531 &cxl_base_attribute_group, 532 NULL, 533 }; 534 535 static const struct device_type cxl_port_type = { 536 .name = "cxl_port", 537 .release = cxl_port_release, 538 .groups = cxl_port_attribute_groups, 539 }; 540 541 bool is_cxl_port(const struct device *dev) 542 { 543 return dev->type == &cxl_port_type; 544 } 545 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); 546 547 struct cxl_port *to_cxl_port(const struct device *dev) 548 { 549 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, 550 "not a cxl_port device\n")) 551 return NULL; 552 return container_of(dev, struct cxl_port, dev); 553 } 554 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); 555 556 static void unregister_port(void *_port) 557 { 558 struct cxl_port *port = _port; 559 struct cxl_port *parent; 560 struct device *lock_dev; 561 562 if (is_cxl_root(port)) 563 parent = NULL; 564 else 565 parent = to_cxl_port(port->dev.parent); 566 567 /* 568 * CXL root port's and the first level of ports are unregistered 569 * under the platform firmware device lock, all other ports are 570 * unregistered while holding their parent port lock. 571 */ 572 if (!parent) 573 lock_dev = port->uport_dev; 574 else if (is_cxl_root(parent)) 575 lock_dev = parent->uport_dev; 576 else 577 lock_dev = &parent->dev; 578 579 device_lock_assert(lock_dev); 580 port->dead = true; 581 device_unregister(&port->dev); 582 } 583 584 static void cxl_unlink_uport(void *_port) 585 { 586 struct cxl_port *port = _port; 587 588 sysfs_remove_link(&port->dev.kobj, "uport"); 589 } 590 591 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) 592 { 593 int rc; 594 595 rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj, 596 "uport"); 597 if (rc) 598 return rc; 599 return devm_add_action_or_reset(host, cxl_unlink_uport, port); 600 } 601 602 static void cxl_unlink_parent_dport(void *_port) 603 { 604 struct cxl_port *port = _port; 605 606 sysfs_remove_link(&port->dev.kobj, "parent_dport"); 607 } 608 609 static int devm_cxl_link_parent_dport(struct device *host, 610 struct cxl_port *port, 611 struct cxl_dport *parent_dport) 612 { 613 int rc; 614 615 if (!parent_dport) 616 return 0; 617 618 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj, 619 "parent_dport"); 620 if (rc) 621 return rc; 622 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port); 623 } 624 625 static struct lock_class_key cxl_port_key; 626 627 static struct cxl_port *cxl_port_alloc(struct device *uport_dev, 628 resource_size_t component_reg_phys, 629 struct cxl_dport *parent_dport) 630 { 631 struct cxl_port *port; 632 struct device *dev; 633 int rc; 634 635 port = kzalloc(sizeof(*port), GFP_KERNEL); 636 if (!port) 637 return ERR_PTR(-ENOMEM); 638 639 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); 640 if (rc < 0) 641 goto err; 642 port->id = rc; 643 port->uport_dev = uport_dev; 644 645 /* 646 * The top-level cxl_port "cxl_root" does not have a cxl_port as 647 * its parent and it does not have any corresponding component 648 * registers as its decode is described by a fixed platform 649 * description. 650 */ 651 dev = &port->dev; 652 if (parent_dport) { 653 struct cxl_port *parent_port = parent_dport->port; 654 struct cxl_port *iter; 655 656 dev->parent = &parent_port->dev; 657 port->depth = parent_port->depth + 1; 658 port->parent_dport = parent_dport; 659 660 /* 661 * walk to the host bridge, or the first ancestor that knows 662 * the host bridge 663 */ 664 iter = port; 665 while (!iter->host_bridge && 666 !is_cxl_root(to_cxl_port(iter->dev.parent))) 667 iter = to_cxl_port(iter->dev.parent); 668 if (iter->host_bridge) 669 port->host_bridge = iter->host_bridge; 670 else if (parent_dport->rch) 671 port->host_bridge = parent_dport->dport_dev; 672 else 673 port->host_bridge = iter->uport_dev; 674 dev_dbg(uport_dev, "host-bridge: %s\n", 675 dev_name(port->host_bridge)); 676 } else 677 dev->parent = uport_dev; 678 679 port->component_reg_phys = component_reg_phys; 680 ida_init(&port->decoder_ida); 681 port->hdm_end = -1; 682 port->commit_end = -1; 683 xa_init(&port->dports); 684 xa_init(&port->endpoints); 685 xa_init(&port->regions); 686 687 device_initialize(dev); 688 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); 689 device_set_pm_not_required(dev); 690 dev->bus = &cxl_bus_type; 691 dev->type = &cxl_port_type; 692 693 return port; 694 695 err: 696 kfree(port); 697 return ERR_PTR(rc); 698 } 699 700 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, 701 resource_size_t component_reg_phys) 702 { 703 if (component_reg_phys == CXL_RESOURCE_NONE) 704 return 0; 705 706 *map = (struct cxl_register_map) { 707 .host = host, 708 .reg_type = CXL_REGLOC_RBI_COMPONENT, 709 .resource = component_reg_phys, 710 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, 711 }; 712 713 return cxl_setup_regs(map); 714 } 715 716 static int cxl_port_setup_regs(struct cxl_port *port, 717 resource_size_t component_reg_phys) 718 { 719 if (dev_is_platform(port->uport_dev)) 720 return 0; 721 return cxl_setup_comp_regs(&port->dev, &port->comp_map, 722 component_reg_phys); 723 } 724 725 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, 726 resource_size_t component_reg_phys) 727 { 728 int rc; 729 730 if (dev_is_platform(dport->dport_dev)) 731 return 0; 732 733 /* 734 * use @dport->dport_dev for the context for error messages during 735 * register probing, and fixup @host after the fact, since @host may be 736 * NULL. 737 */ 738 rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map, 739 component_reg_phys); 740 dport->comp_map.host = host; 741 return rc; 742 } 743 744 static struct cxl_port *__devm_cxl_add_port(struct device *host, 745 struct device *uport_dev, 746 resource_size_t component_reg_phys, 747 struct cxl_dport *parent_dport) 748 { 749 struct cxl_port *port; 750 struct device *dev; 751 int rc; 752 753 port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport); 754 if (IS_ERR(port)) 755 return port; 756 757 dev = &port->dev; 758 if (is_cxl_memdev(uport_dev)) 759 rc = dev_set_name(dev, "endpoint%d", port->id); 760 else if (parent_dport) 761 rc = dev_set_name(dev, "port%d", port->id); 762 else 763 rc = dev_set_name(dev, "root%d", port->id); 764 if (rc) 765 goto err; 766 767 rc = cxl_port_setup_regs(port, component_reg_phys); 768 if (rc) 769 goto err; 770 771 rc = device_add(dev); 772 if (rc) 773 goto err; 774 775 rc = devm_add_action_or_reset(host, unregister_port, port); 776 if (rc) 777 return ERR_PTR(rc); 778 779 rc = devm_cxl_link_uport(host, port); 780 if (rc) 781 return ERR_PTR(rc); 782 783 rc = devm_cxl_link_parent_dport(host, port, parent_dport); 784 if (rc) 785 return ERR_PTR(rc); 786 787 return port; 788 789 err: 790 put_device(dev); 791 return ERR_PTR(rc); 792 } 793 794 /** 795 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy 796 * @host: host device for devm operations 797 * @uport_dev: "physical" device implementing this upstream port 798 * @component_reg_phys: (optional) for configurable cxl_port instances 799 * @parent_dport: next hop up in the CXL memory decode hierarchy 800 */ 801 struct cxl_port *devm_cxl_add_port(struct device *host, 802 struct device *uport_dev, 803 resource_size_t component_reg_phys, 804 struct cxl_dport *parent_dport) 805 { 806 struct cxl_port *port, *parent_port; 807 808 port = __devm_cxl_add_port(host, uport_dev, component_reg_phys, 809 parent_dport); 810 811 parent_port = parent_dport ? parent_dport->port : NULL; 812 if (IS_ERR(port)) { 813 dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n", 814 parent_port ? " port to " : "", 815 parent_port ? dev_name(&parent_port->dev) : "", 816 parent_port ? "" : " root port", 817 PTR_ERR(port)); 818 } else { 819 dev_dbg(uport_dev, "%s added%s%s%s\n", 820 dev_name(&port->dev), 821 parent_port ? " to " : "", 822 parent_port ? dev_name(&parent_port->dev) : "", 823 parent_port ? "" : " (root port)"); 824 } 825 826 return port; 827 } 828 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); 829 830 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) 831 { 832 /* There is no pci_bus associated with a CXL platform-root port */ 833 if (is_cxl_root(port)) 834 return NULL; 835 836 if (dev_is_pci(port->uport_dev)) { 837 struct pci_dev *pdev = to_pci_dev(port->uport_dev); 838 839 return pdev->subordinate; 840 } 841 842 return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev); 843 } 844 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL); 845 846 static void unregister_pci_bus(void *uport_dev) 847 { 848 xa_erase(&cxl_root_buses, (unsigned long)uport_dev); 849 } 850 851 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, 852 struct pci_bus *bus) 853 { 854 int rc; 855 856 if (dev_is_pci(uport_dev)) 857 return -EINVAL; 858 859 rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus, 860 GFP_KERNEL); 861 if (rc) 862 return rc; 863 return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev); 864 } 865 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL); 866 867 static bool dev_is_cxl_root_child(struct device *dev) 868 { 869 struct cxl_port *port, *parent; 870 871 if (!is_cxl_port(dev)) 872 return false; 873 874 port = to_cxl_port(dev); 875 if (is_cxl_root(port)) 876 return false; 877 878 parent = to_cxl_port(port->dev.parent); 879 if (is_cxl_root(parent)) 880 return true; 881 882 return false; 883 } 884 885 struct cxl_port *find_cxl_root(struct cxl_port *port) 886 { 887 struct cxl_port *iter = port; 888 889 while (iter && !is_cxl_root(iter)) 890 iter = to_cxl_port(iter->dev.parent); 891 892 if (!iter) 893 return NULL; 894 get_device(&iter->dev); 895 return iter; 896 } 897 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); 898 899 static struct cxl_dport *find_dport(struct cxl_port *port, int id) 900 { 901 struct cxl_dport *dport; 902 unsigned long index; 903 904 device_lock_assert(&port->dev); 905 xa_for_each(&port->dports, index, dport) 906 if (dport->port_id == id) 907 return dport; 908 return NULL; 909 } 910 911 static int add_dport(struct cxl_port *port, struct cxl_dport *dport) 912 { 913 struct cxl_dport *dup; 914 int rc; 915 916 device_lock_assert(&port->dev); 917 dup = find_dport(port, dport->port_id); 918 if (dup) { 919 dev_err(&port->dev, 920 "unable to add dport%d-%s non-unique port id (%s)\n", 921 dport->port_id, dev_name(dport->dport_dev), 922 dev_name(dup->dport_dev)); 923 return -EBUSY; 924 } 925 926 rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport, 927 GFP_KERNEL); 928 if (rc) 929 return rc; 930 931 port->nr_dports++; 932 return 0; 933 } 934 935 /* 936 * Since root-level CXL dports cannot be enumerated by PCI they are not 937 * enumerated by the common port driver that acquires the port lock over 938 * dport add/remove. Instead, root dports are manually added by a 939 * platform driver and cond_cxl_root_lock() is used to take the missing 940 * port lock in that case. 941 */ 942 static void cond_cxl_root_lock(struct cxl_port *port) 943 { 944 if (is_cxl_root(port)) 945 device_lock(&port->dev); 946 } 947 948 static void cond_cxl_root_unlock(struct cxl_port *port) 949 { 950 if (is_cxl_root(port)) 951 device_unlock(&port->dev); 952 } 953 954 static void cxl_dport_remove(void *data) 955 { 956 struct cxl_dport *dport = data; 957 struct cxl_port *port = dport->port; 958 959 xa_erase(&port->dports, (unsigned long) dport->dport_dev); 960 put_device(dport->dport_dev); 961 } 962 963 static void cxl_dport_unlink(void *data) 964 { 965 struct cxl_dport *dport = data; 966 struct cxl_port *port = dport->port; 967 char link_name[CXL_TARGET_STRLEN]; 968 969 sprintf(link_name, "dport%d", dport->port_id); 970 sysfs_remove_link(&port->dev.kobj, link_name); 971 } 972 973 static struct cxl_dport * 974 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, 975 int port_id, resource_size_t component_reg_phys, 976 resource_size_t rcrb) 977 { 978 char link_name[CXL_TARGET_STRLEN]; 979 struct cxl_dport *dport; 980 struct device *host; 981 int rc; 982 983 if (is_cxl_root(port)) 984 host = port->uport_dev; 985 else 986 host = &port->dev; 987 988 if (!host->driver) { 989 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", 990 dev_name(dport_dev)); 991 return ERR_PTR(-ENXIO); 992 } 993 994 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= 995 CXL_TARGET_STRLEN) 996 return ERR_PTR(-EINVAL); 997 998 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL); 999 if (!dport) 1000 return ERR_PTR(-ENOMEM); 1001 1002 dport->dport_dev = dport_dev; 1003 dport->port_id = port_id; 1004 dport->port = port; 1005 1006 if (rcrb == CXL_RESOURCE_NONE) { 1007 rc = cxl_dport_setup_regs(&port->dev, dport, 1008 component_reg_phys); 1009 if (rc) 1010 return ERR_PTR(rc); 1011 } else { 1012 dport->rcrb.base = rcrb; 1013 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb, 1014 CXL_RCRB_DOWNSTREAM); 1015 if (component_reg_phys == CXL_RESOURCE_NONE) { 1016 dev_warn(dport_dev, "Invalid Component Registers in RCRB"); 1017 return ERR_PTR(-ENXIO); 1018 } 1019 1020 /* 1021 * RCH @dport is not ready to map until associated with its 1022 * memdev 1023 */ 1024 rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys); 1025 if (rc) 1026 return ERR_PTR(rc); 1027 1028 dport->rch = true; 1029 } 1030 1031 if (component_reg_phys != CXL_RESOURCE_NONE) 1032 dev_dbg(dport_dev, "Component Registers found for dport: %pa\n", 1033 &component_reg_phys); 1034 1035 cond_cxl_root_lock(port); 1036 rc = add_dport(port, dport); 1037 cond_cxl_root_unlock(port); 1038 if (rc) 1039 return ERR_PTR(rc); 1040 1041 get_device(dport_dev); 1042 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); 1043 if (rc) 1044 return ERR_PTR(rc); 1045 1046 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); 1047 if (rc) 1048 return ERR_PTR(rc); 1049 1050 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); 1051 if (rc) 1052 return ERR_PTR(rc); 1053 1054 return dport; 1055 } 1056 1057 /** 1058 * devm_cxl_add_dport - append VH downstream port data to a cxl_port 1059 * @port: the cxl_port that references this dport 1060 * @dport_dev: firmware or PCI device representing the dport 1061 * @port_id: identifier for this dport in a decoder's target list 1062 * @component_reg_phys: optional location of CXL component registers 1063 * 1064 * Note that dports are appended to the devm release action's of the 1065 * either the port's host (for root ports), or the port itself (for 1066 * switch ports) 1067 */ 1068 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, 1069 struct device *dport_dev, int port_id, 1070 resource_size_t component_reg_phys) 1071 { 1072 struct cxl_dport *dport; 1073 1074 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1075 component_reg_phys, CXL_RESOURCE_NONE); 1076 if (IS_ERR(dport)) { 1077 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n", 1078 dev_name(&port->dev), PTR_ERR(dport)); 1079 } else { 1080 dev_dbg(dport_dev, "dport added to %s\n", 1081 dev_name(&port->dev)); 1082 } 1083 1084 return dport; 1085 } 1086 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); 1087 1088 /** 1089 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port 1090 * @port: the cxl_port that references this dport 1091 * @dport_dev: firmware or PCI device representing the dport 1092 * @port_id: identifier for this dport in a decoder's target list 1093 * @rcrb: mandatory location of a Root Complex Register Block 1094 * 1095 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH 1096 */ 1097 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, 1098 struct device *dport_dev, int port_id, 1099 resource_size_t rcrb) 1100 { 1101 struct cxl_dport *dport; 1102 1103 if (rcrb == CXL_RESOURCE_NONE) { 1104 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n"); 1105 return ERR_PTR(-EINVAL); 1106 } 1107 1108 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1109 CXL_RESOURCE_NONE, rcrb); 1110 if (IS_ERR(dport)) { 1111 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n", 1112 dev_name(&port->dev), PTR_ERR(dport)); 1113 } else { 1114 dev_dbg(dport_dev, "RCH dport added to %s\n", 1115 dev_name(&port->dev)); 1116 } 1117 1118 return dport; 1119 } 1120 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); 1121 1122 static int add_ep(struct cxl_ep *new) 1123 { 1124 struct cxl_port *port = new->dport->port; 1125 int rc; 1126 1127 device_lock(&port->dev); 1128 if (port->dead) { 1129 device_unlock(&port->dev); 1130 return -ENXIO; 1131 } 1132 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new, 1133 GFP_KERNEL); 1134 device_unlock(&port->dev); 1135 1136 return rc; 1137 } 1138 1139 /** 1140 * cxl_add_ep - register an endpoint's interest in a port 1141 * @dport: the dport that routes to @ep_dev 1142 * @ep_dev: device representing the endpoint 1143 * 1144 * Intermediate CXL ports are scanned based on the arrival of endpoints. 1145 * When those endpoints depart the port can be destroyed once all 1146 * endpoints that care about that port have been removed. 1147 */ 1148 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) 1149 { 1150 struct cxl_ep *ep; 1151 int rc; 1152 1153 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 1154 if (!ep) 1155 return -ENOMEM; 1156 1157 ep->ep = get_device(ep_dev); 1158 ep->dport = dport; 1159 1160 rc = add_ep(ep); 1161 if (rc) 1162 cxl_ep_release(ep); 1163 return rc; 1164 } 1165 1166 struct cxl_find_port_ctx { 1167 const struct device *dport_dev; 1168 const struct cxl_port *parent_port; 1169 struct cxl_dport **dport; 1170 }; 1171 1172 static int match_port_by_dport(struct device *dev, const void *data) 1173 { 1174 const struct cxl_find_port_ctx *ctx = data; 1175 struct cxl_dport *dport; 1176 struct cxl_port *port; 1177 1178 if (!is_cxl_port(dev)) 1179 return 0; 1180 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) 1181 return 0; 1182 1183 port = to_cxl_port(dev); 1184 dport = cxl_find_dport_by_dev(port, ctx->dport_dev); 1185 if (ctx->dport) 1186 *ctx->dport = dport; 1187 return dport != NULL; 1188 } 1189 1190 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) 1191 { 1192 struct device *dev; 1193 1194 if (!ctx->dport_dev) 1195 return NULL; 1196 1197 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport); 1198 if (dev) 1199 return to_cxl_port(dev); 1200 return NULL; 1201 } 1202 1203 static struct cxl_port *find_cxl_port(struct device *dport_dev, 1204 struct cxl_dport **dport) 1205 { 1206 struct cxl_find_port_ctx ctx = { 1207 .dport_dev = dport_dev, 1208 .dport = dport, 1209 }; 1210 struct cxl_port *port; 1211 1212 port = __find_cxl_port(&ctx); 1213 return port; 1214 } 1215 1216 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, 1217 struct device *dport_dev, 1218 struct cxl_dport **dport) 1219 { 1220 struct cxl_find_port_ctx ctx = { 1221 .dport_dev = dport_dev, 1222 .parent_port = parent_port, 1223 .dport = dport, 1224 }; 1225 struct cxl_port *port; 1226 1227 port = __find_cxl_port(&ctx); 1228 return port; 1229 } 1230 1231 /* 1232 * All users of grandparent() are using it to walk PCIe-like switch port 1233 * hierarchy. A PCIe switch is comprised of a bridge device representing the 1234 * upstream switch port and N bridges representing downstream switch ports. When 1235 * bridges stack the grand-parent of a downstream switch port is another 1236 * downstream switch port in the immediate ancestor switch. 1237 */ 1238 static struct device *grandparent(struct device *dev) 1239 { 1240 if (dev && dev->parent) 1241 return dev->parent->parent; 1242 return NULL; 1243 } 1244 1245 static void delete_endpoint(void *data) 1246 { 1247 struct cxl_memdev *cxlmd = data; 1248 struct cxl_port *endpoint = cxlmd->endpoint; 1249 struct cxl_port *parent_port; 1250 struct device *parent; 1251 1252 parent_port = cxl_mem_find_port(cxlmd, NULL); 1253 if (!parent_port) 1254 goto out; 1255 parent = &parent_port->dev; 1256 1257 device_lock(parent); 1258 if (parent->driver && !endpoint->dead) { 1259 devm_release_action(parent, cxl_unlink_parent_dport, endpoint); 1260 devm_release_action(parent, cxl_unlink_uport, endpoint); 1261 devm_release_action(parent, unregister_port, endpoint); 1262 } 1263 cxlmd->endpoint = NULL; 1264 device_unlock(parent); 1265 put_device(parent); 1266 out: 1267 put_device(&endpoint->dev); 1268 } 1269 1270 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 1271 { 1272 struct device *dev = &cxlmd->dev; 1273 1274 get_device(&endpoint->dev); 1275 cxlmd->endpoint = endpoint; 1276 cxlmd->depth = endpoint->depth; 1277 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); 1278 } 1279 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); 1280 1281 /* 1282 * The natural end of life of a non-root 'cxl_port' is when its parent port goes 1283 * through a ->remove() event ("top-down" unregistration). The unnatural trigger 1284 * for a port to be unregistered is when all memdevs beneath that port have gone 1285 * through ->remove(). This "bottom-up" removal selectively removes individual 1286 * child ports manually. This depends on devm_cxl_add_port() to not change is 1287 * devm action registration order, and for dports to have already been 1288 * destroyed by reap_dports(). 1289 */ 1290 static void delete_switch_port(struct cxl_port *port) 1291 { 1292 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port); 1293 devm_release_action(port->dev.parent, cxl_unlink_uport, port); 1294 devm_release_action(port->dev.parent, unregister_port, port); 1295 } 1296 1297 static void reap_dports(struct cxl_port *port) 1298 { 1299 struct cxl_dport *dport; 1300 unsigned long index; 1301 1302 device_lock_assert(&port->dev); 1303 1304 xa_for_each(&port->dports, index, dport) { 1305 devm_release_action(&port->dev, cxl_dport_unlink, dport); 1306 devm_release_action(&port->dev, cxl_dport_remove, dport); 1307 devm_kfree(&port->dev, dport); 1308 } 1309 } 1310 1311 struct detach_ctx { 1312 struct cxl_memdev *cxlmd; 1313 int depth; 1314 }; 1315 1316 static int port_has_memdev(struct device *dev, const void *data) 1317 { 1318 const struct detach_ctx *ctx = data; 1319 struct cxl_port *port; 1320 1321 if (!is_cxl_port(dev)) 1322 return 0; 1323 1324 port = to_cxl_port(dev); 1325 if (port->depth != ctx->depth) 1326 return 0; 1327 1328 return !!cxl_ep_load(port, ctx->cxlmd); 1329 } 1330 1331 static void cxl_detach_ep(void *data) 1332 { 1333 struct cxl_memdev *cxlmd = data; 1334 1335 for (int i = cxlmd->depth - 1; i >= 1; i--) { 1336 struct cxl_port *port, *parent_port; 1337 struct detach_ctx ctx = { 1338 .cxlmd = cxlmd, 1339 .depth = i, 1340 }; 1341 struct device *dev; 1342 struct cxl_ep *ep; 1343 bool died = false; 1344 1345 dev = bus_find_device(&cxl_bus_type, NULL, &ctx, 1346 port_has_memdev); 1347 if (!dev) 1348 continue; 1349 port = to_cxl_port(dev); 1350 1351 parent_port = to_cxl_port(port->dev.parent); 1352 device_lock(&parent_port->dev); 1353 device_lock(&port->dev); 1354 ep = cxl_ep_load(port, cxlmd); 1355 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", 1356 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); 1357 cxl_ep_remove(port, ep); 1358 if (ep && !port->dead && xa_empty(&port->endpoints) && 1359 !is_cxl_root(parent_port) && parent_port->dev.driver) { 1360 /* 1361 * This was the last ep attached to a dynamically 1362 * enumerated port. Block new cxl_add_ep() and garbage 1363 * collect the port. 1364 */ 1365 died = true; 1366 port->dead = true; 1367 reap_dports(port); 1368 } 1369 device_unlock(&port->dev); 1370 1371 if (died) { 1372 dev_dbg(&cxlmd->dev, "delete %s\n", 1373 dev_name(&port->dev)); 1374 delete_switch_port(port); 1375 } 1376 put_device(&port->dev); 1377 device_unlock(&parent_port->dev); 1378 } 1379 } 1380 1381 static resource_size_t find_component_registers(struct device *dev) 1382 { 1383 struct cxl_register_map map; 1384 struct pci_dev *pdev; 1385 1386 /* 1387 * Theoretically, CXL component registers can be hosted on a 1388 * non-PCI device, in practice, only cxl_test hits this case. 1389 */ 1390 if (!dev_is_pci(dev)) 1391 return CXL_RESOURCE_NONE; 1392 1393 pdev = to_pci_dev(dev); 1394 1395 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 1396 return map.resource; 1397 } 1398 1399 static int add_port_attach_ep(struct cxl_memdev *cxlmd, 1400 struct device *uport_dev, 1401 struct device *dport_dev) 1402 { 1403 struct device *dparent = grandparent(dport_dev); 1404 struct cxl_port *port, *parent_port = NULL; 1405 struct cxl_dport *dport, *parent_dport; 1406 resource_size_t component_reg_phys; 1407 int rc; 1408 1409 if (!dparent) { 1410 /* 1411 * The iteration reached the topology root without finding the 1412 * CXL-root 'cxl_port' on a previous iteration, fail for now to 1413 * be re-probed after platform driver attaches. 1414 */ 1415 dev_dbg(&cxlmd->dev, "%s is a root dport\n", 1416 dev_name(dport_dev)); 1417 return -ENXIO; 1418 } 1419 1420 parent_port = find_cxl_port(dparent, &parent_dport); 1421 if (!parent_port) { 1422 /* iterate to create this parent_port */ 1423 return -EAGAIN; 1424 } 1425 1426 device_lock(&parent_port->dev); 1427 if (!parent_port->dev.driver) { 1428 dev_warn(&cxlmd->dev, 1429 "port %s:%s disabled, failed to enumerate CXL.mem\n", 1430 dev_name(&parent_port->dev), dev_name(uport_dev)); 1431 port = ERR_PTR(-ENXIO); 1432 goto out; 1433 } 1434 1435 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1436 if (!port) { 1437 component_reg_phys = find_component_registers(uport_dev); 1438 port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1439 component_reg_phys, parent_dport); 1440 /* retry find to pick up the new dport information */ 1441 if (!IS_ERR(port)) 1442 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1443 } 1444 out: 1445 device_unlock(&parent_port->dev); 1446 1447 if (IS_ERR(port)) 1448 rc = PTR_ERR(port); 1449 else { 1450 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", 1451 dev_name(&port->dev), dev_name(port->uport_dev)); 1452 rc = cxl_add_ep(dport, &cxlmd->dev); 1453 if (rc == -EBUSY) { 1454 /* 1455 * "can't" happen, but this error code means 1456 * something to the caller, so translate it. 1457 */ 1458 rc = -ENXIO; 1459 } 1460 put_device(&port->dev); 1461 } 1462 1463 put_device(&parent_port->dev); 1464 return rc; 1465 } 1466 1467 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) 1468 { 1469 struct device *dev = &cxlmd->dev; 1470 struct device *iter; 1471 int rc; 1472 1473 /* 1474 * Skip intermediate port enumeration in the RCH case, there 1475 * are no ports in between a host bridge and an endpoint. 1476 */ 1477 if (cxlmd->cxlds->rcd) 1478 return 0; 1479 1480 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); 1481 if (rc) 1482 return rc; 1483 1484 /* 1485 * Scan for and add all cxl_ports in this device's ancestry. 1486 * Repeat until no more ports are added. Abort if a port add 1487 * attempt fails. 1488 */ 1489 retry: 1490 for (iter = dev; iter; iter = grandparent(iter)) { 1491 struct device *dport_dev = grandparent(iter); 1492 struct device *uport_dev; 1493 struct cxl_dport *dport; 1494 struct cxl_port *port; 1495 1496 if (!dport_dev) 1497 return 0; 1498 1499 uport_dev = dport_dev->parent; 1500 if (!uport_dev) { 1501 dev_warn(dev, "at %s no parent for dport: %s\n", 1502 dev_name(iter), dev_name(dport_dev)); 1503 return -ENXIO; 1504 } 1505 1506 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", 1507 dev_name(iter), dev_name(dport_dev), 1508 dev_name(uport_dev)); 1509 port = find_cxl_port(dport_dev, &dport); 1510 if (port) { 1511 dev_dbg(&cxlmd->dev, 1512 "found already registered port %s:%s\n", 1513 dev_name(&port->dev), 1514 dev_name(port->uport_dev)); 1515 rc = cxl_add_ep(dport, &cxlmd->dev); 1516 1517 /* 1518 * If the endpoint already exists in the port's list, 1519 * that's ok, it was added on a previous pass. 1520 * Otherwise, retry in add_port_attach_ep() after taking 1521 * the parent_port lock as the current port may be being 1522 * reaped. 1523 */ 1524 if (rc && rc != -EBUSY) { 1525 put_device(&port->dev); 1526 return rc; 1527 } 1528 1529 /* Any more ports to add between this one and the root? */ 1530 if (!dev_is_cxl_root_child(&port->dev)) { 1531 put_device(&port->dev); 1532 continue; 1533 } 1534 1535 put_device(&port->dev); 1536 return 0; 1537 } 1538 1539 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev); 1540 /* port missing, try to add parent */ 1541 if (rc == -EAGAIN) 1542 continue; 1543 /* failed to add ep or port */ 1544 if (rc) 1545 return rc; 1546 /* port added, new descendants possible, start over */ 1547 goto retry; 1548 } 1549 1550 return 0; 1551 } 1552 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); 1553 1554 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, 1555 struct cxl_dport **dport) 1556 { 1557 return find_cxl_port(pdev->dev.parent, dport); 1558 } 1559 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL); 1560 1561 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, 1562 struct cxl_dport **dport) 1563 { 1564 return find_cxl_port(grandparent(&cxlmd->dev), dport); 1565 } 1566 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); 1567 1568 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, 1569 struct cxl_port *port, int *target_map) 1570 { 1571 int i, rc = 0; 1572 1573 if (!target_map) 1574 return 0; 1575 1576 device_lock_assert(&port->dev); 1577 1578 if (xa_empty(&port->dports)) 1579 return -EINVAL; 1580 1581 write_seqlock(&cxlsd->target_lock); 1582 for (i = 0; i < cxlsd->nr_targets; i++) { 1583 struct cxl_dport *dport = find_dport(port, target_map[i]); 1584 1585 if (!dport) { 1586 rc = -ENXIO; 1587 break; 1588 } 1589 cxlsd->target[i] = dport; 1590 } 1591 write_sequnlock(&cxlsd->target_lock); 1592 1593 return rc; 1594 } 1595 1596 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) 1597 { 1598 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; 1599 struct cxl_decoder *cxld = &cxlsd->cxld; 1600 int iw; 1601 1602 iw = cxld->interleave_ways; 1603 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets, 1604 "misconfigured root decoder\n")) 1605 return NULL; 1606 1607 return cxlrd->cxlsd.target[pos % iw]; 1608 } 1609 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL); 1610 1611 static struct lock_class_key cxl_decoder_key; 1612 1613 /** 1614 * cxl_decoder_init - Common decoder setup / initialization 1615 * @port: owning port of this decoder 1616 * @cxld: common decoder properties to initialize 1617 * 1618 * A port may contain one or more decoders. Each of those decoders 1619 * enable some address space for CXL.mem utilization. A decoder is 1620 * expected to be configured by the caller before registering via 1621 * cxl_decoder_add() 1622 */ 1623 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) 1624 { 1625 struct device *dev; 1626 int rc; 1627 1628 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); 1629 if (rc < 0) 1630 return rc; 1631 1632 /* need parent to stick around to release the id */ 1633 get_device(&port->dev); 1634 cxld->id = rc; 1635 1636 dev = &cxld->dev; 1637 device_initialize(dev); 1638 lockdep_set_class(&dev->mutex, &cxl_decoder_key); 1639 device_set_pm_not_required(dev); 1640 dev->parent = &port->dev; 1641 dev->bus = &cxl_bus_type; 1642 1643 /* Pre initialize an "empty" decoder */ 1644 cxld->interleave_ways = 1; 1645 cxld->interleave_granularity = PAGE_SIZE; 1646 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 1647 cxld->hpa_range = (struct range) { 1648 .start = 0, 1649 .end = -1, 1650 }; 1651 1652 return 0; 1653 } 1654 1655 static int cxl_switch_decoder_init(struct cxl_port *port, 1656 struct cxl_switch_decoder *cxlsd, 1657 int nr_targets) 1658 { 1659 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) 1660 return -EINVAL; 1661 1662 cxlsd->nr_targets = nr_targets; 1663 seqlock_init(&cxlsd->target_lock); 1664 return cxl_decoder_init(port, &cxlsd->cxld); 1665 } 1666 1667 /** 1668 * cxl_root_decoder_alloc - Allocate a root level decoder 1669 * @port: owning CXL root of this decoder 1670 * @nr_targets: static number of downstream targets 1671 * @calc_hb: which host bridge covers the n'th position by granularity 1672 * 1673 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1674 * 'CXL root' decoder is one that decodes from a top-level / static platform 1675 * firmware description of CXL resources into a CXL standard decode 1676 * topology. 1677 */ 1678 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, 1679 unsigned int nr_targets, 1680 cxl_calc_hb_fn calc_hb) 1681 { 1682 struct cxl_root_decoder *cxlrd; 1683 struct cxl_switch_decoder *cxlsd; 1684 struct cxl_decoder *cxld; 1685 int rc; 1686 1687 if (!is_cxl_root(port)) 1688 return ERR_PTR(-EINVAL); 1689 1690 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets), 1691 GFP_KERNEL); 1692 if (!cxlrd) 1693 return ERR_PTR(-ENOMEM); 1694 1695 cxlsd = &cxlrd->cxlsd; 1696 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1697 if (rc) { 1698 kfree(cxlrd); 1699 return ERR_PTR(rc); 1700 } 1701 1702 cxlrd->calc_hb = calc_hb; 1703 mutex_init(&cxlrd->range_lock); 1704 1705 cxld = &cxlsd->cxld; 1706 cxld->dev.type = &cxl_decoder_root_type; 1707 /* 1708 * cxl_root_decoder_release() special cases negative ids to 1709 * detect memregion_alloc() failures. 1710 */ 1711 atomic_set(&cxlrd->region_id, -1); 1712 rc = memregion_alloc(GFP_KERNEL); 1713 if (rc < 0) { 1714 put_device(&cxld->dev); 1715 return ERR_PTR(rc); 1716 } 1717 1718 atomic_set(&cxlrd->region_id, rc); 1719 return cxlrd; 1720 } 1721 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); 1722 1723 /** 1724 * cxl_switch_decoder_alloc - Allocate a switch level decoder 1725 * @port: owning CXL switch port of this decoder 1726 * @nr_targets: max number of dynamically addressable downstream targets 1727 * 1728 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1729 * 'switch' decoder is any decoder that can be enumerated by PCIe 1730 * topology and the HDM Decoder Capability. This includes the decoders 1731 * that sit between Switch Upstream Ports / Switch Downstream Ports and 1732 * Host Bridges / Root Ports. 1733 */ 1734 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, 1735 unsigned int nr_targets) 1736 { 1737 struct cxl_switch_decoder *cxlsd; 1738 struct cxl_decoder *cxld; 1739 int rc; 1740 1741 if (is_cxl_root(port) || is_cxl_endpoint(port)) 1742 return ERR_PTR(-EINVAL); 1743 1744 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL); 1745 if (!cxlsd) 1746 return ERR_PTR(-ENOMEM); 1747 1748 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1749 if (rc) { 1750 kfree(cxlsd); 1751 return ERR_PTR(rc); 1752 } 1753 1754 cxld = &cxlsd->cxld; 1755 cxld->dev.type = &cxl_decoder_switch_type; 1756 return cxlsd; 1757 } 1758 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); 1759 1760 /** 1761 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder 1762 * @port: owning port of this decoder 1763 * 1764 * Return: A new cxl decoder to be registered by cxl_decoder_add() 1765 */ 1766 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) 1767 { 1768 struct cxl_endpoint_decoder *cxled; 1769 struct cxl_decoder *cxld; 1770 int rc; 1771 1772 if (!is_cxl_endpoint(port)) 1773 return ERR_PTR(-EINVAL); 1774 1775 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL); 1776 if (!cxled) 1777 return ERR_PTR(-ENOMEM); 1778 1779 cxled->pos = -1; 1780 cxld = &cxled->cxld; 1781 rc = cxl_decoder_init(port, cxld); 1782 if (rc) { 1783 kfree(cxled); 1784 return ERR_PTR(rc); 1785 } 1786 1787 cxld->dev.type = &cxl_decoder_endpoint_type; 1788 return cxled; 1789 } 1790 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); 1791 1792 /** 1793 * cxl_decoder_add_locked - Add a decoder with targets 1794 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1795 * @target_map: A list of downstream ports that this decoder can direct memory 1796 * traffic to. These numbers should correspond with the port number 1797 * in the PCIe Link Capabilities structure. 1798 * 1799 * Certain types of decoders may not have any targets. The main example of this 1800 * is an endpoint device. A more awkward example is a hostbridge whose root 1801 * ports get hot added (technically possible, though unlikely). 1802 * 1803 * This is the locked variant of cxl_decoder_add(). 1804 * 1805 * Context: Process context. Expects the device lock of the port that owns the 1806 * @cxld to be held. 1807 * 1808 * Return: Negative error code if the decoder wasn't properly configured; else 1809 * returns 0. 1810 */ 1811 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) 1812 { 1813 struct cxl_port *port; 1814 struct device *dev; 1815 int rc; 1816 1817 if (WARN_ON_ONCE(!cxld)) 1818 return -EINVAL; 1819 1820 if (WARN_ON_ONCE(IS_ERR(cxld))) 1821 return PTR_ERR(cxld); 1822 1823 if (cxld->interleave_ways < 1) 1824 return -EINVAL; 1825 1826 dev = &cxld->dev; 1827 1828 port = to_cxl_port(cxld->dev.parent); 1829 if (!is_endpoint_decoder(dev)) { 1830 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 1831 1832 rc = decoder_populate_targets(cxlsd, port, target_map); 1833 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { 1834 dev_err(&port->dev, 1835 "Failed to populate active decoder targets\n"); 1836 return rc; 1837 } 1838 } 1839 1840 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); 1841 if (rc) 1842 return rc; 1843 1844 return device_add(dev); 1845 } 1846 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); 1847 1848 /** 1849 * cxl_decoder_add - Add a decoder with targets 1850 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1851 * @target_map: A list of downstream ports that this decoder can direct memory 1852 * traffic to. These numbers should correspond with the port number 1853 * in the PCIe Link Capabilities structure. 1854 * 1855 * This is the unlocked variant of cxl_decoder_add_locked(). 1856 * See cxl_decoder_add_locked(). 1857 * 1858 * Context: Process context. Takes and releases the device lock of the port that 1859 * owns the @cxld. 1860 */ 1861 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) 1862 { 1863 struct cxl_port *port; 1864 int rc; 1865 1866 if (WARN_ON_ONCE(!cxld)) 1867 return -EINVAL; 1868 1869 if (WARN_ON_ONCE(IS_ERR(cxld))) 1870 return PTR_ERR(cxld); 1871 1872 port = to_cxl_port(cxld->dev.parent); 1873 1874 device_lock(&port->dev); 1875 rc = cxl_decoder_add_locked(cxld, target_map); 1876 device_unlock(&port->dev); 1877 1878 return rc; 1879 } 1880 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); 1881 1882 static void cxld_unregister(void *dev) 1883 { 1884 struct cxl_endpoint_decoder *cxled; 1885 1886 if (is_endpoint_decoder(dev)) { 1887 cxled = to_cxl_endpoint_decoder(dev); 1888 cxl_decoder_kill_region(cxled); 1889 } 1890 1891 device_unregister(dev); 1892 } 1893 1894 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) 1895 { 1896 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); 1897 } 1898 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL); 1899 1900 /** 1901 * __cxl_driver_register - register a driver for the cxl bus 1902 * @cxl_drv: cxl driver structure to attach 1903 * @owner: owning module/driver 1904 * @modname: KBUILD_MODNAME for parent driver 1905 */ 1906 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, 1907 const char *modname) 1908 { 1909 if (!cxl_drv->probe) { 1910 pr_debug("%s ->probe() must be specified\n", modname); 1911 return -EINVAL; 1912 } 1913 1914 if (!cxl_drv->name) { 1915 pr_debug("%s ->name must be specified\n", modname); 1916 return -EINVAL; 1917 } 1918 1919 if (!cxl_drv->id) { 1920 pr_debug("%s ->id must be specified\n", modname); 1921 return -EINVAL; 1922 } 1923 1924 cxl_drv->drv.bus = &cxl_bus_type; 1925 cxl_drv->drv.owner = owner; 1926 cxl_drv->drv.mod_name = modname; 1927 cxl_drv->drv.name = cxl_drv->name; 1928 1929 return driver_register(&cxl_drv->drv); 1930 } 1931 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL); 1932 1933 void cxl_driver_unregister(struct cxl_driver *cxl_drv) 1934 { 1935 driver_unregister(&cxl_drv->drv); 1936 } 1937 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); 1938 1939 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 1940 { 1941 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, 1942 cxl_device_id(dev)); 1943 } 1944 1945 static int cxl_bus_match(struct device *dev, struct device_driver *drv) 1946 { 1947 return cxl_device_id(dev) == to_cxl_drv(drv)->id; 1948 } 1949 1950 static int cxl_bus_probe(struct device *dev) 1951 { 1952 int rc; 1953 1954 rc = to_cxl_drv(dev->driver)->probe(dev); 1955 dev_dbg(dev, "probe: %d\n", rc); 1956 return rc; 1957 } 1958 1959 static void cxl_bus_remove(struct device *dev) 1960 { 1961 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); 1962 1963 if (cxl_drv->remove) 1964 cxl_drv->remove(dev); 1965 } 1966 1967 static struct workqueue_struct *cxl_bus_wq; 1968 1969 static void cxl_bus_rescan_queue(struct work_struct *w) 1970 { 1971 int rc = bus_rescan_devices(&cxl_bus_type); 1972 1973 pr_debug("CXL bus rescan result: %d\n", rc); 1974 } 1975 1976 void cxl_bus_rescan(void) 1977 { 1978 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue); 1979 1980 queue_work(cxl_bus_wq, &rescan_work); 1981 } 1982 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); 1983 1984 void cxl_bus_drain(void) 1985 { 1986 drain_workqueue(cxl_bus_wq); 1987 } 1988 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL); 1989 1990 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) 1991 { 1992 return queue_work(cxl_bus_wq, &cxlmd->detach_work); 1993 } 1994 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); 1995 1996 /* for user tooling to ensure port disable work has completed */ 1997 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count) 1998 { 1999 if (sysfs_streq(buf, "1")) { 2000 flush_workqueue(cxl_bus_wq); 2001 return count; 2002 } 2003 2004 return -EINVAL; 2005 } 2006 2007 static BUS_ATTR_WO(flush); 2008 2009 static struct attribute *cxl_bus_attributes[] = { 2010 &bus_attr_flush.attr, 2011 NULL, 2012 }; 2013 2014 static struct attribute_group cxl_bus_attribute_group = { 2015 .attrs = cxl_bus_attributes, 2016 }; 2017 2018 static const struct attribute_group *cxl_bus_attribute_groups[] = { 2019 &cxl_bus_attribute_group, 2020 NULL, 2021 }; 2022 2023 struct bus_type cxl_bus_type = { 2024 .name = "cxl", 2025 .uevent = cxl_bus_uevent, 2026 .match = cxl_bus_match, 2027 .probe = cxl_bus_probe, 2028 .remove = cxl_bus_remove, 2029 .bus_groups = cxl_bus_attribute_groups, 2030 }; 2031 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); 2032 2033 static struct dentry *cxl_debugfs; 2034 2035 struct dentry *cxl_debugfs_create_dir(const char *dir) 2036 { 2037 return debugfs_create_dir(dir, cxl_debugfs); 2038 } 2039 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL); 2040 2041 static __init int cxl_core_init(void) 2042 { 2043 int rc; 2044 2045 cxl_debugfs = debugfs_create_dir("cxl", NULL); 2046 2047 cxl_mbox_init(); 2048 2049 rc = cxl_memdev_init(); 2050 if (rc) 2051 return rc; 2052 2053 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0); 2054 if (!cxl_bus_wq) { 2055 rc = -ENOMEM; 2056 goto err_wq; 2057 } 2058 2059 rc = bus_register(&cxl_bus_type); 2060 if (rc) 2061 goto err_bus; 2062 2063 rc = cxl_region_init(); 2064 if (rc) 2065 goto err_region; 2066 2067 return 0; 2068 2069 err_region: 2070 bus_unregister(&cxl_bus_type); 2071 err_bus: 2072 destroy_workqueue(cxl_bus_wq); 2073 err_wq: 2074 cxl_memdev_exit(); 2075 return rc; 2076 } 2077 2078 static void cxl_core_exit(void) 2079 { 2080 cxl_region_exit(); 2081 bus_unregister(&cxl_bus_type); 2082 destroy_workqueue(cxl_bus_wq); 2083 cxl_memdev_exit(); 2084 debugfs_remove_recursive(cxl_debugfs); 2085 } 2086 2087 subsys_initcall(cxl_core_init); 2088 module_exit(cxl_core_exit); 2089 MODULE_LICENSE("GPL v2"); 2090