1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/memregion.h> 5 #include <linux/workqueue.h> 6 #include <linux/debugfs.h> 7 #include <linux/device.h> 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/slab.h> 11 #include <linux/idr.h> 12 #include <cxlmem.h> 13 #include <cxlpci.h> 14 #include <cxl.h> 15 #include "core.h" 16 17 /** 18 * DOC: cxl core 19 * 20 * The CXL core provides a set of interfaces that can be consumed by CXL aware 21 * drivers. The interfaces allow for creation, modification, and destruction of 22 * regions, memory devices, ports, and decoders. CXL aware drivers must register 23 * with the CXL core via these interfaces in order to be able to participate in 24 * cross-device interleave coordination. The CXL core also establishes and 25 * maintains the bridge to the nvdimm subsystem. 26 * 27 * CXL core introduces sysfs hierarchy to control the devices that are 28 * instantiated by the core. 29 */ 30 31 /* 32 * All changes to the interleave configuration occur with this lock held 33 * for write. 34 */ 35 DECLARE_RWSEM(cxl_region_rwsem); 36 37 static DEFINE_IDA(cxl_port_ida); 38 static DEFINE_XARRAY(cxl_root_buses); 39 40 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, 41 char *buf) 42 { 43 return sysfs_emit(buf, "%s\n", dev->type->name); 44 } 45 static DEVICE_ATTR_RO(devtype); 46 47 static int cxl_device_id(const struct device *dev) 48 { 49 if (dev->type == &cxl_nvdimm_bridge_type) 50 return CXL_DEVICE_NVDIMM_BRIDGE; 51 if (dev->type == &cxl_nvdimm_type) 52 return CXL_DEVICE_NVDIMM; 53 if (dev->type == CXL_PMEM_REGION_TYPE()) 54 return CXL_DEVICE_PMEM_REGION; 55 if (dev->type == CXL_DAX_REGION_TYPE()) 56 return CXL_DEVICE_DAX_REGION; 57 if (is_cxl_port(dev)) { 58 if (is_cxl_root(to_cxl_port(dev))) 59 return CXL_DEVICE_ROOT; 60 return CXL_DEVICE_PORT; 61 } 62 if (is_cxl_memdev(dev)) 63 return CXL_DEVICE_MEMORY_EXPANDER; 64 if (dev->type == CXL_REGION_TYPE()) 65 return CXL_DEVICE_REGION; 66 if (dev->type == &cxl_pmu_type) 67 return CXL_DEVICE_PMU; 68 return 0; 69 } 70 71 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 72 char *buf) 73 { 74 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev)); 75 } 76 static DEVICE_ATTR_RO(modalias); 77 78 static struct attribute *cxl_base_attributes[] = { 79 &dev_attr_devtype.attr, 80 &dev_attr_modalias.attr, 81 NULL, 82 }; 83 84 struct attribute_group cxl_base_attribute_group = { 85 .attrs = cxl_base_attributes, 86 }; 87 88 static ssize_t start_show(struct device *dev, struct device_attribute *attr, 89 char *buf) 90 { 91 struct cxl_decoder *cxld = to_cxl_decoder(dev); 92 93 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start); 94 } 95 static DEVICE_ATTR_ADMIN_RO(start); 96 97 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 98 char *buf) 99 { 100 struct cxl_decoder *cxld = to_cxl_decoder(dev); 101 102 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range)); 103 } 104 static DEVICE_ATTR_RO(size); 105 106 #define CXL_DECODER_FLAG_ATTR(name, flag) \ 107 static ssize_t name##_show(struct device *dev, \ 108 struct device_attribute *attr, char *buf) \ 109 { \ 110 struct cxl_decoder *cxld = to_cxl_decoder(dev); \ 111 \ 112 return sysfs_emit(buf, "%s\n", \ 113 (cxld->flags & (flag)) ? "1" : "0"); \ 114 } \ 115 static DEVICE_ATTR_RO(name) 116 117 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); 118 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); 119 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); 120 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); 121 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); 122 123 static ssize_t target_type_show(struct device *dev, 124 struct device_attribute *attr, char *buf) 125 { 126 struct cxl_decoder *cxld = to_cxl_decoder(dev); 127 128 switch (cxld->target_type) { 129 case CXL_DECODER_DEVMEM: 130 return sysfs_emit(buf, "accelerator\n"); 131 case CXL_DECODER_HOSTONLYMEM: 132 return sysfs_emit(buf, "expander\n"); 133 } 134 return -ENXIO; 135 } 136 static DEVICE_ATTR_RO(target_type); 137 138 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf) 139 { 140 struct cxl_decoder *cxld = &cxlsd->cxld; 141 ssize_t offset = 0; 142 int i, rc = 0; 143 144 for (i = 0; i < cxld->interleave_ways; i++) { 145 struct cxl_dport *dport = cxlsd->target[i]; 146 struct cxl_dport *next = NULL; 147 148 if (!dport) 149 break; 150 151 if (i + 1 < cxld->interleave_ways) 152 next = cxlsd->target[i + 1]; 153 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, 154 next ? "," : ""); 155 if (rc < 0) 156 return rc; 157 offset += rc; 158 } 159 160 return offset; 161 } 162 163 static ssize_t target_list_show(struct device *dev, 164 struct device_attribute *attr, char *buf) 165 { 166 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 167 ssize_t offset; 168 unsigned int seq; 169 int rc; 170 171 do { 172 seq = read_seqbegin(&cxlsd->target_lock); 173 rc = emit_target_list(cxlsd, buf); 174 } while (read_seqretry(&cxlsd->target_lock, seq)); 175 176 if (rc < 0) 177 return rc; 178 offset = rc; 179 180 rc = sysfs_emit_at(buf, offset, "\n"); 181 if (rc < 0) 182 return rc; 183 184 return offset + rc; 185 } 186 static DEVICE_ATTR_RO(target_list); 187 188 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 189 char *buf) 190 { 191 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 192 193 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode)); 194 } 195 196 static ssize_t mode_store(struct device *dev, struct device_attribute *attr, 197 const char *buf, size_t len) 198 { 199 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 200 enum cxl_decoder_mode mode; 201 ssize_t rc; 202 203 if (sysfs_streq(buf, "pmem")) 204 mode = CXL_DECODER_PMEM; 205 else if (sysfs_streq(buf, "ram")) 206 mode = CXL_DECODER_RAM; 207 else 208 return -EINVAL; 209 210 rc = cxl_dpa_set_mode(cxled, mode); 211 if (rc) 212 return rc; 213 214 return len; 215 } 216 static DEVICE_ATTR_RW(mode); 217 218 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr, 219 char *buf) 220 { 221 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 222 223 guard(rwsem_read)(&cxl_dpa_rwsem); 224 return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled)); 225 } 226 static DEVICE_ATTR_RO(dpa_resource); 227 228 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr, 229 char *buf) 230 { 231 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 232 resource_size_t size = cxl_dpa_size(cxled); 233 234 return sysfs_emit(buf, "%pa\n", &size); 235 } 236 237 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr, 238 const char *buf, size_t len) 239 { 240 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 241 unsigned long long size; 242 ssize_t rc; 243 244 rc = kstrtoull(buf, 0, &size); 245 if (rc) 246 return rc; 247 248 if (!IS_ALIGNED(size, SZ_256M)) 249 return -EINVAL; 250 251 rc = cxl_dpa_free(cxled); 252 if (rc) 253 return rc; 254 255 if (size == 0) 256 return len; 257 258 rc = cxl_dpa_alloc(cxled, size); 259 if (rc) 260 return rc; 261 262 return len; 263 } 264 static DEVICE_ATTR_RW(dpa_size); 265 266 static ssize_t interleave_granularity_show(struct device *dev, 267 struct device_attribute *attr, 268 char *buf) 269 { 270 struct cxl_decoder *cxld = to_cxl_decoder(dev); 271 272 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity); 273 } 274 275 static DEVICE_ATTR_RO(interleave_granularity); 276 277 static ssize_t interleave_ways_show(struct device *dev, 278 struct device_attribute *attr, char *buf) 279 { 280 struct cxl_decoder *cxld = to_cxl_decoder(dev); 281 282 return sysfs_emit(buf, "%d\n", cxld->interleave_ways); 283 } 284 285 static DEVICE_ATTR_RO(interleave_ways); 286 287 static struct attribute *cxl_decoder_base_attrs[] = { 288 &dev_attr_start.attr, 289 &dev_attr_size.attr, 290 &dev_attr_locked.attr, 291 &dev_attr_interleave_granularity.attr, 292 &dev_attr_interleave_ways.attr, 293 NULL, 294 }; 295 296 static struct attribute_group cxl_decoder_base_attribute_group = { 297 .attrs = cxl_decoder_base_attrs, 298 }; 299 300 static struct attribute *cxl_decoder_root_attrs[] = { 301 &dev_attr_cap_pmem.attr, 302 &dev_attr_cap_ram.attr, 303 &dev_attr_cap_type2.attr, 304 &dev_attr_cap_type3.attr, 305 &dev_attr_target_list.attr, 306 SET_CXL_REGION_ATTR(create_pmem_region) 307 SET_CXL_REGION_ATTR(create_ram_region) 308 SET_CXL_REGION_ATTR(delete_region) 309 NULL, 310 }; 311 312 static bool can_create_pmem(struct cxl_root_decoder *cxlrd) 313 { 314 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM; 315 316 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 317 } 318 319 static bool can_create_ram(struct cxl_root_decoder *cxlrd) 320 { 321 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM; 322 323 return (cxlrd->cxlsd.cxld.flags & flags) == flags; 324 } 325 326 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n) 327 { 328 struct device *dev = kobj_to_dev(kobj); 329 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 330 331 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd)) 332 return 0; 333 334 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd)) 335 return 0; 336 337 if (a == CXL_REGION_ATTR(delete_region) && 338 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd))) 339 return 0; 340 341 return a->mode; 342 } 343 344 static struct attribute_group cxl_decoder_root_attribute_group = { 345 .attrs = cxl_decoder_root_attrs, 346 .is_visible = cxl_root_decoder_visible, 347 }; 348 349 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { 350 &cxl_decoder_root_attribute_group, 351 &cxl_decoder_base_attribute_group, 352 &cxl_base_attribute_group, 353 NULL, 354 }; 355 356 static struct attribute *cxl_decoder_switch_attrs[] = { 357 &dev_attr_target_type.attr, 358 &dev_attr_target_list.attr, 359 SET_CXL_REGION_ATTR(region) 360 NULL, 361 }; 362 363 static struct attribute_group cxl_decoder_switch_attribute_group = { 364 .attrs = cxl_decoder_switch_attrs, 365 }; 366 367 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { 368 &cxl_decoder_switch_attribute_group, 369 &cxl_decoder_base_attribute_group, 370 &cxl_base_attribute_group, 371 NULL, 372 }; 373 374 static struct attribute *cxl_decoder_endpoint_attrs[] = { 375 &dev_attr_target_type.attr, 376 &dev_attr_mode.attr, 377 &dev_attr_dpa_size.attr, 378 &dev_attr_dpa_resource.attr, 379 SET_CXL_REGION_ATTR(region) 380 NULL, 381 }; 382 383 static struct attribute_group cxl_decoder_endpoint_attribute_group = { 384 .attrs = cxl_decoder_endpoint_attrs, 385 }; 386 387 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { 388 &cxl_decoder_base_attribute_group, 389 &cxl_decoder_endpoint_attribute_group, 390 &cxl_base_attribute_group, 391 NULL, 392 }; 393 394 static void __cxl_decoder_release(struct cxl_decoder *cxld) 395 { 396 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 397 398 ida_free(&port->decoder_ida, cxld->id); 399 put_device(&port->dev); 400 } 401 402 static void cxl_endpoint_decoder_release(struct device *dev) 403 { 404 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); 405 406 __cxl_decoder_release(&cxled->cxld); 407 kfree(cxled); 408 } 409 410 static void cxl_switch_decoder_release(struct device *dev) 411 { 412 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 413 414 __cxl_decoder_release(&cxlsd->cxld); 415 kfree(cxlsd); 416 } 417 418 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) 419 { 420 if (dev_WARN_ONCE(dev, !is_root_decoder(dev), 421 "not a cxl_root_decoder device\n")) 422 return NULL; 423 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); 424 } 425 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL); 426 427 static void cxl_root_decoder_release(struct device *dev) 428 { 429 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 430 431 if (atomic_read(&cxlrd->region_id) >= 0) 432 memregion_free(atomic_read(&cxlrd->region_id)); 433 __cxl_decoder_release(&cxlrd->cxlsd.cxld); 434 kfree(cxlrd); 435 } 436 437 static const struct device_type cxl_decoder_endpoint_type = { 438 .name = "cxl_decoder_endpoint", 439 .release = cxl_endpoint_decoder_release, 440 .groups = cxl_decoder_endpoint_attribute_groups, 441 }; 442 443 static const struct device_type cxl_decoder_switch_type = { 444 .name = "cxl_decoder_switch", 445 .release = cxl_switch_decoder_release, 446 .groups = cxl_decoder_switch_attribute_groups, 447 }; 448 449 static const struct device_type cxl_decoder_root_type = { 450 .name = "cxl_decoder_root", 451 .release = cxl_root_decoder_release, 452 .groups = cxl_decoder_root_attribute_groups, 453 }; 454 455 bool is_endpoint_decoder(struct device *dev) 456 { 457 return dev->type == &cxl_decoder_endpoint_type; 458 } 459 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL); 460 461 bool is_root_decoder(struct device *dev) 462 { 463 return dev->type == &cxl_decoder_root_type; 464 } 465 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); 466 467 bool is_switch_decoder(struct device *dev) 468 { 469 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; 470 } 471 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL); 472 473 struct cxl_decoder *to_cxl_decoder(struct device *dev) 474 { 475 if (dev_WARN_ONCE(dev, 476 !is_switch_decoder(dev) && !is_endpoint_decoder(dev), 477 "not a cxl_decoder device\n")) 478 return NULL; 479 return container_of(dev, struct cxl_decoder, dev); 480 } 481 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); 482 483 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) 484 { 485 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev), 486 "not a cxl_endpoint_decoder device\n")) 487 return NULL; 488 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); 489 } 490 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL); 491 492 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) 493 { 494 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev), 495 "not a cxl_switch_decoder device\n")) 496 return NULL; 497 return container_of(dev, struct cxl_switch_decoder, cxld.dev); 498 } 499 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL); 500 501 static void cxl_ep_release(struct cxl_ep *ep) 502 { 503 put_device(ep->ep); 504 kfree(ep); 505 } 506 507 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) 508 { 509 if (!ep) 510 return; 511 xa_erase(&port->endpoints, (unsigned long) ep->ep); 512 cxl_ep_release(ep); 513 } 514 515 static void cxl_port_release(struct device *dev) 516 { 517 struct cxl_port *port = to_cxl_port(dev); 518 unsigned long index; 519 struct cxl_ep *ep; 520 521 xa_for_each(&port->endpoints, index, ep) 522 cxl_ep_remove(port, ep); 523 xa_destroy(&port->endpoints); 524 xa_destroy(&port->dports); 525 xa_destroy(&port->regions); 526 ida_free(&cxl_port_ida, port->id); 527 kfree(port); 528 } 529 530 static const struct attribute_group *cxl_port_attribute_groups[] = { 531 &cxl_base_attribute_group, 532 NULL, 533 }; 534 535 static const struct device_type cxl_port_type = { 536 .name = "cxl_port", 537 .release = cxl_port_release, 538 .groups = cxl_port_attribute_groups, 539 }; 540 541 bool is_cxl_port(const struct device *dev) 542 { 543 return dev->type == &cxl_port_type; 544 } 545 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); 546 547 struct cxl_port *to_cxl_port(const struct device *dev) 548 { 549 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, 550 "not a cxl_port device\n")) 551 return NULL; 552 return container_of(dev, struct cxl_port, dev); 553 } 554 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); 555 556 static void unregister_port(void *_port) 557 { 558 struct cxl_port *port = _port; 559 struct cxl_port *parent; 560 struct device *lock_dev; 561 562 if (is_cxl_root(port)) 563 parent = NULL; 564 else 565 parent = to_cxl_port(port->dev.parent); 566 567 /* 568 * CXL root port's and the first level of ports are unregistered 569 * under the platform firmware device lock, all other ports are 570 * unregistered while holding their parent port lock. 571 */ 572 if (!parent) 573 lock_dev = port->uport_dev; 574 else if (is_cxl_root(parent)) 575 lock_dev = parent->uport_dev; 576 else 577 lock_dev = &parent->dev; 578 579 device_lock_assert(lock_dev); 580 port->dead = true; 581 device_unregister(&port->dev); 582 } 583 584 static void cxl_unlink_uport(void *_port) 585 { 586 struct cxl_port *port = _port; 587 588 sysfs_remove_link(&port->dev.kobj, "uport"); 589 } 590 591 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) 592 { 593 int rc; 594 595 rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj, 596 "uport"); 597 if (rc) 598 return rc; 599 return devm_add_action_or_reset(host, cxl_unlink_uport, port); 600 } 601 602 static void cxl_unlink_parent_dport(void *_port) 603 { 604 struct cxl_port *port = _port; 605 606 sysfs_remove_link(&port->dev.kobj, "parent_dport"); 607 } 608 609 static int devm_cxl_link_parent_dport(struct device *host, 610 struct cxl_port *port, 611 struct cxl_dport *parent_dport) 612 { 613 int rc; 614 615 if (!parent_dport) 616 return 0; 617 618 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj, 619 "parent_dport"); 620 if (rc) 621 return rc; 622 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port); 623 } 624 625 static struct lock_class_key cxl_port_key; 626 627 static struct cxl_port *cxl_port_alloc(struct device *uport_dev, 628 resource_size_t component_reg_phys, 629 struct cxl_dport *parent_dport) 630 { 631 struct cxl_port *port; 632 struct device *dev; 633 int rc; 634 635 port = kzalloc(sizeof(*port), GFP_KERNEL); 636 if (!port) 637 return ERR_PTR(-ENOMEM); 638 639 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); 640 if (rc < 0) 641 goto err; 642 port->id = rc; 643 port->uport_dev = uport_dev; 644 645 /* 646 * The top-level cxl_port "cxl_root" does not have a cxl_port as 647 * its parent and it does not have any corresponding component 648 * registers as its decode is described by a fixed platform 649 * description. 650 */ 651 dev = &port->dev; 652 if (parent_dport) { 653 struct cxl_port *parent_port = parent_dport->port; 654 struct cxl_port *iter; 655 656 dev->parent = &parent_port->dev; 657 port->depth = parent_port->depth + 1; 658 port->parent_dport = parent_dport; 659 660 /* 661 * walk to the host bridge, or the first ancestor that knows 662 * the host bridge 663 */ 664 iter = port; 665 while (!iter->host_bridge && 666 !is_cxl_root(to_cxl_port(iter->dev.parent))) 667 iter = to_cxl_port(iter->dev.parent); 668 if (iter->host_bridge) 669 port->host_bridge = iter->host_bridge; 670 else if (parent_dport->rch) 671 port->host_bridge = parent_dport->dport_dev; 672 else 673 port->host_bridge = iter->uport_dev; 674 dev_dbg(uport_dev, "host-bridge: %s\n", 675 dev_name(port->host_bridge)); 676 } else 677 dev->parent = uport_dev; 678 679 port->component_reg_phys = component_reg_phys; 680 ida_init(&port->decoder_ida); 681 port->hdm_end = -1; 682 port->commit_end = -1; 683 xa_init(&port->dports); 684 xa_init(&port->endpoints); 685 xa_init(&port->regions); 686 687 device_initialize(dev); 688 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); 689 device_set_pm_not_required(dev); 690 dev->bus = &cxl_bus_type; 691 dev->type = &cxl_port_type; 692 693 return port; 694 695 err: 696 kfree(port); 697 return ERR_PTR(rc); 698 } 699 700 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, 701 resource_size_t component_reg_phys) 702 { 703 if (component_reg_phys == CXL_RESOURCE_NONE) 704 return 0; 705 706 *map = (struct cxl_register_map) { 707 .host = host, 708 .reg_type = CXL_REGLOC_RBI_COMPONENT, 709 .resource = component_reg_phys, 710 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, 711 }; 712 713 return cxl_setup_regs(map); 714 } 715 716 static int cxl_port_setup_regs(struct cxl_port *port, 717 resource_size_t component_reg_phys) 718 { 719 if (dev_is_platform(port->uport_dev)) 720 return 0; 721 return cxl_setup_comp_regs(&port->dev, &port->comp_map, 722 component_reg_phys); 723 } 724 725 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, 726 resource_size_t component_reg_phys) 727 { 728 int rc; 729 730 if (dev_is_platform(dport->dport_dev)) 731 return 0; 732 733 /* 734 * use @dport->dport_dev for the context for error messages during 735 * register probing, and fixup @host after the fact, since @host may be 736 * NULL. 737 */ 738 rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map, 739 component_reg_phys); 740 dport->comp_map.host = host; 741 return rc; 742 } 743 744 static struct cxl_port *__devm_cxl_add_port(struct device *host, 745 struct device *uport_dev, 746 resource_size_t component_reg_phys, 747 struct cxl_dport *parent_dport) 748 { 749 struct cxl_port *port; 750 struct device *dev; 751 int rc; 752 753 port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport); 754 if (IS_ERR(port)) 755 return port; 756 757 dev = &port->dev; 758 if (is_cxl_memdev(uport_dev)) 759 rc = dev_set_name(dev, "endpoint%d", port->id); 760 else if (parent_dport) 761 rc = dev_set_name(dev, "port%d", port->id); 762 else 763 rc = dev_set_name(dev, "root%d", port->id); 764 if (rc) 765 goto err; 766 767 rc = cxl_port_setup_regs(port, component_reg_phys); 768 if (rc) 769 goto err; 770 771 rc = device_add(dev); 772 if (rc) 773 goto err; 774 775 rc = devm_add_action_or_reset(host, unregister_port, port); 776 if (rc) 777 return ERR_PTR(rc); 778 779 rc = devm_cxl_link_uport(host, port); 780 if (rc) 781 return ERR_PTR(rc); 782 783 rc = devm_cxl_link_parent_dport(host, port, parent_dport); 784 if (rc) 785 return ERR_PTR(rc); 786 787 return port; 788 789 err: 790 put_device(dev); 791 return ERR_PTR(rc); 792 } 793 794 /** 795 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy 796 * @host: host device for devm operations 797 * @uport_dev: "physical" device implementing this upstream port 798 * @component_reg_phys: (optional) for configurable cxl_port instances 799 * @parent_dport: next hop up in the CXL memory decode hierarchy 800 */ 801 struct cxl_port *devm_cxl_add_port(struct device *host, 802 struct device *uport_dev, 803 resource_size_t component_reg_phys, 804 struct cxl_dport *parent_dport) 805 { 806 struct cxl_port *port, *parent_port; 807 808 port = __devm_cxl_add_port(host, uport_dev, component_reg_phys, 809 parent_dport); 810 811 parent_port = parent_dport ? parent_dport->port : NULL; 812 if (IS_ERR(port)) { 813 dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n", 814 parent_port ? " port to " : "", 815 parent_port ? dev_name(&parent_port->dev) : "", 816 parent_port ? "" : " root port", 817 PTR_ERR(port)); 818 } else { 819 dev_dbg(uport_dev, "%s added%s%s%s\n", 820 dev_name(&port->dev), 821 parent_port ? " to " : "", 822 parent_port ? dev_name(&parent_port->dev) : "", 823 parent_port ? "" : " (root port)"); 824 } 825 826 return port; 827 } 828 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); 829 830 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) 831 { 832 /* There is no pci_bus associated with a CXL platform-root port */ 833 if (is_cxl_root(port)) 834 return NULL; 835 836 if (dev_is_pci(port->uport_dev)) { 837 struct pci_dev *pdev = to_pci_dev(port->uport_dev); 838 839 return pdev->subordinate; 840 } 841 842 return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev); 843 } 844 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL); 845 846 static void unregister_pci_bus(void *uport_dev) 847 { 848 xa_erase(&cxl_root_buses, (unsigned long)uport_dev); 849 } 850 851 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, 852 struct pci_bus *bus) 853 { 854 int rc; 855 856 if (dev_is_pci(uport_dev)) 857 return -EINVAL; 858 859 rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus, 860 GFP_KERNEL); 861 if (rc) 862 return rc; 863 return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev); 864 } 865 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL); 866 867 static bool dev_is_cxl_root_child(struct device *dev) 868 { 869 struct cxl_port *port, *parent; 870 871 if (!is_cxl_port(dev)) 872 return false; 873 874 port = to_cxl_port(dev); 875 if (is_cxl_root(port)) 876 return false; 877 878 parent = to_cxl_port(port->dev.parent); 879 if (is_cxl_root(parent)) 880 return true; 881 882 return false; 883 } 884 885 struct cxl_port *find_cxl_root(struct cxl_port *port) 886 { 887 struct cxl_port *iter = port; 888 889 while (iter && !is_cxl_root(iter)) 890 iter = to_cxl_port(iter->dev.parent); 891 892 if (!iter) 893 return NULL; 894 get_device(&iter->dev); 895 return iter; 896 } 897 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); 898 899 static struct cxl_dport *find_dport(struct cxl_port *port, int id) 900 { 901 struct cxl_dport *dport; 902 unsigned long index; 903 904 device_lock_assert(&port->dev); 905 xa_for_each(&port->dports, index, dport) 906 if (dport->port_id == id) 907 return dport; 908 return NULL; 909 } 910 911 static int add_dport(struct cxl_port *port, struct cxl_dport *dport) 912 { 913 struct cxl_dport *dup; 914 int rc; 915 916 device_lock_assert(&port->dev); 917 dup = find_dport(port, dport->port_id); 918 if (dup) { 919 dev_err(&port->dev, 920 "unable to add dport%d-%s non-unique port id (%s)\n", 921 dport->port_id, dev_name(dport->dport_dev), 922 dev_name(dup->dport_dev)); 923 return -EBUSY; 924 } 925 926 rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport, 927 GFP_KERNEL); 928 if (rc) 929 return rc; 930 931 port->nr_dports++; 932 return 0; 933 } 934 935 /* 936 * Since root-level CXL dports cannot be enumerated by PCI they are not 937 * enumerated by the common port driver that acquires the port lock over 938 * dport add/remove. Instead, root dports are manually added by a 939 * platform driver and cond_cxl_root_lock() is used to take the missing 940 * port lock in that case. 941 */ 942 static void cond_cxl_root_lock(struct cxl_port *port) 943 { 944 if (is_cxl_root(port)) 945 device_lock(&port->dev); 946 } 947 948 static void cond_cxl_root_unlock(struct cxl_port *port) 949 { 950 if (is_cxl_root(port)) 951 device_unlock(&port->dev); 952 } 953 954 static void cxl_dport_remove(void *data) 955 { 956 struct cxl_dport *dport = data; 957 struct cxl_port *port = dport->port; 958 959 xa_erase(&port->dports, (unsigned long) dport->dport_dev); 960 put_device(dport->dport_dev); 961 } 962 963 static void cxl_dport_unlink(void *data) 964 { 965 struct cxl_dport *dport = data; 966 struct cxl_port *port = dport->port; 967 char link_name[CXL_TARGET_STRLEN]; 968 969 sprintf(link_name, "dport%d", dport->port_id); 970 sysfs_remove_link(&port->dev.kobj, link_name); 971 } 972 973 static struct cxl_dport * 974 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, 975 int port_id, resource_size_t component_reg_phys, 976 resource_size_t rcrb) 977 { 978 char link_name[CXL_TARGET_STRLEN]; 979 struct cxl_dport *dport; 980 struct device *host; 981 int rc; 982 983 if (is_cxl_root(port)) 984 host = port->uport_dev; 985 else 986 host = &port->dev; 987 988 if (!host->driver) { 989 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", 990 dev_name(dport_dev)); 991 return ERR_PTR(-ENXIO); 992 } 993 994 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= 995 CXL_TARGET_STRLEN) 996 return ERR_PTR(-EINVAL); 997 998 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL); 999 if (!dport) 1000 return ERR_PTR(-ENOMEM); 1001 1002 dport->dport_dev = dport_dev; 1003 dport->port_id = port_id; 1004 dport->port = port; 1005 1006 if (rcrb == CXL_RESOURCE_NONE) { 1007 rc = cxl_dport_setup_regs(&port->dev, dport, 1008 component_reg_phys); 1009 if (rc) 1010 return ERR_PTR(rc); 1011 } else { 1012 dport->rcrb.base = rcrb; 1013 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb, 1014 CXL_RCRB_DOWNSTREAM); 1015 if (component_reg_phys == CXL_RESOURCE_NONE) { 1016 dev_warn(dport_dev, "Invalid Component Registers in RCRB"); 1017 return ERR_PTR(-ENXIO); 1018 } 1019 1020 /* 1021 * RCH @dport is not ready to map until associated with its 1022 * memdev 1023 */ 1024 rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys); 1025 if (rc) 1026 return ERR_PTR(rc); 1027 1028 dport->rch = true; 1029 } 1030 1031 if (component_reg_phys != CXL_RESOURCE_NONE) 1032 dev_dbg(dport_dev, "Component Registers found for dport: %pa\n", 1033 &component_reg_phys); 1034 1035 cond_cxl_root_lock(port); 1036 rc = add_dport(port, dport); 1037 cond_cxl_root_unlock(port); 1038 if (rc) 1039 return ERR_PTR(rc); 1040 1041 get_device(dport_dev); 1042 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); 1043 if (rc) 1044 return ERR_PTR(rc); 1045 1046 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); 1047 if (rc) 1048 return ERR_PTR(rc); 1049 1050 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); 1051 if (rc) 1052 return ERR_PTR(rc); 1053 1054 return dport; 1055 } 1056 1057 /** 1058 * devm_cxl_add_dport - append VH downstream port data to a cxl_port 1059 * @port: the cxl_port that references this dport 1060 * @dport_dev: firmware or PCI device representing the dport 1061 * @port_id: identifier for this dport in a decoder's target list 1062 * @component_reg_phys: optional location of CXL component registers 1063 * 1064 * Note that dports are appended to the devm release action's of the 1065 * either the port's host (for root ports), or the port itself (for 1066 * switch ports) 1067 */ 1068 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, 1069 struct device *dport_dev, int port_id, 1070 resource_size_t component_reg_phys) 1071 { 1072 struct cxl_dport *dport; 1073 1074 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1075 component_reg_phys, CXL_RESOURCE_NONE); 1076 if (IS_ERR(dport)) { 1077 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n", 1078 dev_name(&port->dev), PTR_ERR(dport)); 1079 } else { 1080 dev_dbg(dport_dev, "dport added to %s\n", 1081 dev_name(&port->dev)); 1082 } 1083 1084 return dport; 1085 } 1086 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); 1087 1088 /** 1089 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port 1090 * @port: the cxl_port that references this dport 1091 * @dport_dev: firmware or PCI device representing the dport 1092 * @port_id: identifier for this dport in a decoder's target list 1093 * @rcrb: mandatory location of a Root Complex Register Block 1094 * 1095 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH 1096 */ 1097 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, 1098 struct device *dport_dev, int port_id, 1099 resource_size_t rcrb) 1100 { 1101 struct cxl_dport *dport; 1102 1103 if (rcrb == CXL_RESOURCE_NONE) { 1104 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n"); 1105 return ERR_PTR(-EINVAL); 1106 } 1107 1108 dport = __devm_cxl_add_dport(port, dport_dev, port_id, 1109 CXL_RESOURCE_NONE, rcrb); 1110 if (IS_ERR(dport)) { 1111 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n", 1112 dev_name(&port->dev), PTR_ERR(dport)); 1113 } else { 1114 dev_dbg(dport_dev, "RCH dport added to %s\n", 1115 dev_name(&port->dev)); 1116 } 1117 1118 return dport; 1119 } 1120 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); 1121 1122 static int add_ep(struct cxl_ep *new) 1123 { 1124 struct cxl_port *port = new->dport->port; 1125 int rc; 1126 1127 device_lock(&port->dev); 1128 if (port->dead) { 1129 device_unlock(&port->dev); 1130 return -ENXIO; 1131 } 1132 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new, 1133 GFP_KERNEL); 1134 device_unlock(&port->dev); 1135 1136 return rc; 1137 } 1138 1139 /** 1140 * cxl_add_ep - register an endpoint's interest in a port 1141 * @dport: the dport that routes to @ep_dev 1142 * @ep_dev: device representing the endpoint 1143 * 1144 * Intermediate CXL ports are scanned based on the arrival of endpoints. 1145 * When those endpoints depart the port can be destroyed once all 1146 * endpoints that care about that port have been removed. 1147 */ 1148 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) 1149 { 1150 struct cxl_ep *ep; 1151 int rc; 1152 1153 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 1154 if (!ep) 1155 return -ENOMEM; 1156 1157 ep->ep = get_device(ep_dev); 1158 ep->dport = dport; 1159 1160 rc = add_ep(ep); 1161 if (rc) 1162 cxl_ep_release(ep); 1163 return rc; 1164 } 1165 1166 struct cxl_find_port_ctx { 1167 const struct device *dport_dev; 1168 const struct cxl_port *parent_port; 1169 struct cxl_dport **dport; 1170 }; 1171 1172 static int match_port_by_dport(struct device *dev, const void *data) 1173 { 1174 const struct cxl_find_port_ctx *ctx = data; 1175 struct cxl_dport *dport; 1176 struct cxl_port *port; 1177 1178 if (!is_cxl_port(dev)) 1179 return 0; 1180 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) 1181 return 0; 1182 1183 port = to_cxl_port(dev); 1184 dport = cxl_find_dport_by_dev(port, ctx->dport_dev); 1185 if (ctx->dport) 1186 *ctx->dport = dport; 1187 return dport != NULL; 1188 } 1189 1190 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) 1191 { 1192 struct device *dev; 1193 1194 if (!ctx->dport_dev) 1195 return NULL; 1196 1197 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport); 1198 if (dev) 1199 return to_cxl_port(dev); 1200 return NULL; 1201 } 1202 1203 static struct cxl_port *find_cxl_port(struct device *dport_dev, 1204 struct cxl_dport **dport) 1205 { 1206 struct cxl_find_port_ctx ctx = { 1207 .dport_dev = dport_dev, 1208 .dport = dport, 1209 }; 1210 struct cxl_port *port; 1211 1212 port = __find_cxl_port(&ctx); 1213 return port; 1214 } 1215 1216 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, 1217 struct device *dport_dev, 1218 struct cxl_dport **dport) 1219 { 1220 struct cxl_find_port_ctx ctx = { 1221 .dport_dev = dport_dev, 1222 .parent_port = parent_port, 1223 .dport = dport, 1224 }; 1225 struct cxl_port *port; 1226 1227 port = __find_cxl_port(&ctx); 1228 return port; 1229 } 1230 1231 /* 1232 * All users of grandparent() are using it to walk PCIe-like switch port 1233 * hierarchy. A PCIe switch is comprised of a bridge device representing the 1234 * upstream switch port and N bridges representing downstream switch ports. When 1235 * bridges stack the grand-parent of a downstream switch port is another 1236 * downstream switch port in the immediate ancestor switch. 1237 */ 1238 static struct device *grandparent(struct device *dev) 1239 { 1240 if (dev && dev->parent) 1241 return dev->parent->parent; 1242 return NULL; 1243 } 1244 1245 static struct device *endpoint_host(struct cxl_port *endpoint) 1246 { 1247 struct cxl_port *port = to_cxl_port(endpoint->dev.parent); 1248 1249 if (is_cxl_root(port)) 1250 return port->uport_dev; 1251 return &port->dev; 1252 } 1253 1254 static void delete_endpoint(void *data) 1255 { 1256 struct cxl_memdev *cxlmd = data; 1257 struct cxl_port *endpoint = cxlmd->endpoint; 1258 struct device *host = endpoint_host(endpoint); 1259 1260 device_lock(host); 1261 if (host->driver && !endpoint->dead) { 1262 devm_release_action(host, cxl_unlink_parent_dport, endpoint); 1263 devm_release_action(host, cxl_unlink_uport, endpoint); 1264 devm_release_action(host, unregister_port, endpoint); 1265 } 1266 cxlmd->endpoint = NULL; 1267 device_unlock(host); 1268 put_device(&endpoint->dev); 1269 put_device(host); 1270 } 1271 1272 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 1273 { 1274 struct device *host = endpoint_host(endpoint); 1275 struct device *dev = &cxlmd->dev; 1276 1277 get_device(host); 1278 get_device(&endpoint->dev); 1279 cxlmd->endpoint = endpoint; 1280 cxlmd->depth = endpoint->depth; 1281 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); 1282 } 1283 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); 1284 1285 /* 1286 * The natural end of life of a non-root 'cxl_port' is when its parent port goes 1287 * through a ->remove() event ("top-down" unregistration). The unnatural trigger 1288 * for a port to be unregistered is when all memdevs beneath that port have gone 1289 * through ->remove(). This "bottom-up" removal selectively removes individual 1290 * child ports manually. This depends on devm_cxl_add_port() to not change is 1291 * devm action registration order, and for dports to have already been 1292 * destroyed by reap_dports(). 1293 */ 1294 static void delete_switch_port(struct cxl_port *port) 1295 { 1296 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port); 1297 devm_release_action(port->dev.parent, cxl_unlink_uport, port); 1298 devm_release_action(port->dev.parent, unregister_port, port); 1299 } 1300 1301 static void reap_dports(struct cxl_port *port) 1302 { 1303 struct cxl_dport *dport; 1304 unsigned long index; 1305 1306 device_lock_assert(&port->dev); 1307 1308 xa_for_each(&port->dports, index, dport) { 1309 devm_release_action(&port->dev, cxl_dport_unlink, dport); 1310 devm_release_action(&port->dev, cxl_dport_remove, dport); 1311 devm_kfree(&port->dev, dport); 1312 } 1313 } 1314 1315 struct detach_ctx { 1316 struct cxl_memdev *cxlmd; 1317 int depth; 1318 }; 1319 1320 static int port_has_memdev(struct device *dev, const void *data) 1321 { 1322 const struct detach_ctx *ctx = data; 1323 struct cxl_port *port; 1324 1325 if (!is_cxl_port(dev)) 1326 return 0; 1327 1328 port = to_cxl_port(dev); 1329 if (port->depth != ctx->depth) 1330 return 0; 1331 1332 return !!cxl_ep_load(port, ctx->cxlmd); 1333 } 1334 1335 static void cxl_detach_ep(void *data) 1336 { 1337 struct cxl_memdev *cxlmd = data; 1338 1339 for (int i = cxlmd->depth - 1; i >= 1; i--) { 1340 struct cxl_port *port, *parent_port; 1341 struct detach_ctx ctx = { 1342 .cxlmd = cxlmd, 1343 .depth = i, 1344 }; 1345 struct device *dev; 1346 struct cxl_ep *ep; 1347 bool died = false; 1348 1349 dev = bus_find_device(&cxl_bus_type, NULL, &ctx, 1350 port_has_memdev); 1351 if (!dev) 1352 continue; 1353 port = to_cxl_port(dev); 1354 1355 parent_port = to_cxl_port(port->dev.parent); 1356 device_lock(&parent_port->dev); 1357 device_lock(&port->dev); 1358 ep = cxl_ep_load(port, cxlmd); 1359 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", 1360 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); 1361 cxl_ep_remove(port, ep); 1362 if (ep && !port->dead && xa_empty(&port->endpoints) && 1363 !is_cxl_root(parent_port) && parent_port->dev.driver) { 1364 /* 1365 * This was the last ep attached to a dynamically 1366 * enumerated port. Block new cxl_add_ep() and garbage 1367 * collect the port. 1368 */ 1369 died = true; 1370 port->dead = true; 1371 reap_dports(port); 1372 } 1373 device_unlock(&port->dev); 1374 1375 if (died) { 1376 dev_dbg(&cxlmd->dev, "delete %s\n", 1377 dev_name(&port->dev)); 1378 delete_switch_port(port); 1379 } 1380 put_device(&port->dev); 1381 device_unlock(&parent_port->dev); 1382 } 1383 } 1384 1385 static resource_size_t find_component_registers(struct device *dev) 1386 { 1387 struct cxl_register_map map; 1388 struct pci_dev *pdev; 1389 1390 /* 1391 * Theoretically, CXL component registers can be hosted on a 1392 * non-PCI device, in practice, only cxl_test hits this case. 1393 */ 1394 if (!dev_is_pci(dev)) 1395 return CXL_RESOURCE_NONE; 1396 1397 pdev = to_pci_dev(dev); 1398 1399 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 1400 return map.resource; 1401 } 1402 1403 static int add_port_attach_ep(struct cxl_memdev *cxlmd, 1404 struct device *uport_dev, 1405 struct device *dport_dev) 1406 { 1407 struct device *dparent = grandparent(dport_dev); 1408 struct cxl_port *port, *parent_port = NULL; 1409 struct cxl_dport *dport, *parent_dport; 1410 resource_size_t component_reg_phys; 1411 int rc; 1412 1413 if (!dparent) { 1414 /* 1415 * The iteration reached the topology root without finding the 1416 * CXL-root 'cxl_port' on a previous iteration, fail for now to 1417 * be re-probed after platform driver attaches. 1418 */ 1419 dev_dbg(&cxlmd->dev, "%s is a root dport\n", 1420 dev_name(dport_dev)); 1421 return -ENXIO; 1422 } 1423 1424 parent_port = find_cxl_port(dparent, &parent_dport); 1425 if (!parent_port) { 1426 /* iterate to create this parent_port */ 1427 return -EAGAIN; 1428 } 1429 1430 device_lock(&parent_port->dev); 1431 if (!parent_port->dev.driver) { 1432 dev_warn(&cxlmd->dev, 1433 "port %s:%s disabled, failed to enumerate CXL.mem\n", 1434 dev_name(&parent_port->dev), dev_name(uport_dev)); 1435 port = ERR_PTR(-ENXIO); 1436 goto out; 1437 } 1438 1439 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1440 if (!port) { 1441 component_reg_phys = find_component_registers(uport_dev); 1442 port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1443 component_reg_phys, parent_dport); 1444 /* retry find to pick up the new dport information */ 1445 if (!IS_ERR(port)) 1446 port = find_cxl_port_at(parent_port, dport_dev, &dport); 1447 } 1448 out: 1449 device_unlock(&parent_port->dev); 1450 1451 if (IS_ERR(port)) 1452 rc = PTR_ERR(port); 1453 else { 1454 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", 1455 dev_name(&port->dev), dev_name(port->uport_dev)); 1456 rc = cxl_add_ep(dport, &cxlmd->dev); 1457 if (rc == -EBUSY) { 1458 /* 1459 * "can't" happen, but this error code means 1460 * something to the caller, so translate it. 1461 */ 1462 rc = -ENXIO; 1463 } 1464 put_device(&port->dev); 1465 } 1466 1467 put_device(&parent_port->dev); 1468 return rc; 1469 } 1470 1471 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) 1472 { 1473 struct device *dev = &cxlmd->dev; 1474 struct device *iter; 1475 int rc; 1476 1477 /* 1478 * Skip intermediate port enumeration in the RCH case, there 1479 * are no ports in between a host bridge and an endpoint. 1480 */ 1481 if (cxlmd->cxlds->rcd) 1482 return 0; 1483 1484 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); 1485 if (rc) 1486 return rc; 1487 1488 /* 1489 * Scan for and add all cxl_ports in this device's ancestry. 1490 * Repeat until no more ports are added. Abort if a port add 1491 * attempt fails. 1492 */ 1493 retry: 1494 for (iter = dev; iter; iter = grandparent(iter)) { 1495 struct device *dport_dev = grandparent(iter); 1496 struct device *uport_dev; 1497 struct cxl_dport *dport; 1498 struct cxl_port *port; 1499 1500 if (!dport_dev) 1501 return 0; 1502 1503 uport_dev = dport_dev->parent; 1504 if (!uport_dev) { 1505 dev_warn(dev, "at %s no parent for dport: %s\n", 1506 dev_name(iter), dev_name(dport_dev)); 1507 return -ENXIO; 1508 } 1509 1510 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", 1511 dev_name(iter), dev_name(dport_dev), 1512 dev_name(uport_dev)); 1513 port = find_cxl_port(dport_dev, &dport); 1514 if (port) { 1515 dev_dbg(&cxlmd->dev, 1516 "found already registered port %s:%s\n", 1517 dev_name(&port->dev), 1518 dev_name(port->uport_dev)); 1519 rc = cxl_add_ep(dport, &cxlmd->dev); 1520 1521 /* 1522 * If the endpoint already exists in the port's list, 1523 * that's ok, it was added on a previous pass. 1524 * Otherwise, retry in add_port_attach_ep() after taking 1525 * the parent_port lock as the current port may be being 1526 * reaped. 1527 */ 1528 if (rc && rc != -EBUSY) { 1529 put_device(&port->dev); 1530 return rc; 1531 } 1532 1533 /* Any more ports to add between this one and the root? */ 1534 if (!dev_is_cxl_root_child(&port->dev)) { 1535 put_device(&port->dev); 1536 continue; 1537 } 1538 1539 put_device(&port->dev); 1540 return 0; 1541 } 1542 1543 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev); 1544 /* port missing, try to add parent */ 1545 if (rc == -EAGAIN) 1546 continue; 1547 /* failed to add ep or port */ 1548 if (rc) 1549 return rc; 1550 /* port added, new descendants possible, start over */ 1551 goto retry; 1552 } 1553 1554 return 0; 1555 } 1556 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); 1557 1558 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, 1559 struct cxl_dport **dport) 1560 { 1561 return find_cxl_port(pdev->dev.parent, dport); 1562 } 1563 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL); 1564 1565 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, 1566 struct cxl_dport **dport) 1567 { 1568 return find_cxl_port(grandparent(&cxlmd->dev), dport); 1569 } 1570 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); 1571 1572 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, 1573 struct cxl_port *port, int *target_map) 1574 { 1575 int i, rc = 0; 1576 1577 if (!target_map) 1578 return 0; 1579 1580 device_lock_assert(&port->dev); 1581 1582 if (xa_empty(&port->dports)) 1583 return -EINVAL; 1584 1585 write_seqlock(&cxlsd->target_lock); 1586 for (i = 0; i < cxlsd->nr_targets; i++) { 1587 struct cxl_dport *dport = find_dport(port, target_map[i]); 1588 1589 if (!dport) { 1590 rc = -ENXIO; 1591 break; 1592 } 1593 cxlsd->target[i] = dport; 1594 } 1595 write_sequnlock(&cxlsd->target_lock); 1596 1597 return rc; 1598 } 1599 1600 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) 1601 { 1602 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; 1603 struct cxl_decoder *cxld = &cxlsd->cxld; 1604 int iw; 1605 1606 iw = cxld->interleave_ways; 1607 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets, 1608 "misconfigured root decoder\n")) 1609 return NULL; 1610 1611 return cxlrd->cxlsd.target[pos % iw]; 1612 } 1613 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL); 1614 1615 static struct lock_class_key cxl_decoder_key; 1616 1617 /** 1618 * cxl_decoder_init - Common decoder setup / initialization 1619 * @port: owning port of this decoder 1620 * @cxld: common decoder properties to initialize 1621 * 1622 * A port may contain one or more decoders. Each of those decoders 1623 * enable some address space for CXL.mem utilization. A decoder is 1624 * expected to be configured by the caller before registering via 1625 * cxl_decoder_add() 1626 */ 1627 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) 1628 { 1629 struct device *dev; 1630 int rc; 1631 1632 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); 1633 if (rc < 0) 1634 return rc; 1635 1636 /* need parent to stick around to release the id */ 1637 get_device(&port->dev); 1638 cxld->id = rc; 1639 1640 dev = &cxld->dev; 1641 device_initialize(dev); 1642 lockdep_set_class(&dev->mutex, &cxl_decoder_key); 1643 device_set_pm_not_required(dev); 1644 dev->parent = &port->dev; 1645 dev->bus = &cxl_bus_type; 1646 1647 /* Pre initialize an "empty" decoder */ 1648 cxld->interleave_ways = 1; 1649 cxld->interleave_granularity = PAGE_SIZE; 1650 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 1651 cxld->hpa_range = (struct range) { 1652 .start = 0, 1653 .end = -1, 1654 }; 1655 1656 return 0; 1657 } 1658 1659 static int cxl_switch_decoder_init(struct cxl_port *port, 1660 struct cxl_switch_decoder *cxlsd, 1661 int nr_targets) 1662 { 1663 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) 1664 return -EINVAL; 1665 1666 cxlsd->nr_targets = nr_targets; 1667 seqlock_init(&cxlsd->target_lock); 1668 return cxl_decoder_init(port, &cxlsd->cxld); 1669 } 1670 1671 /** 1672 * cxl_root_decoder_alloc - Allocate a root level decoder 1673 * @port: owning CXL root of this decoder 1674 * @nr_targets: static number of downstream targets 1675 * @calc_hb: which host bridge covers the n'th position by granularity 1676 * 1677 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1678 * 'CXL root' decoder is one that decodes from a top-level / static platform 1679 * firmware description of CXL resources into a CXL standard decode 1680 * topology. 1681 */ 1682 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, 1683 unsigned int nr_targets, 1684 cxl_calc_hb_fn calc_hb) 1685 { 1686 struct cxl_root_decoder *cxlrd; 1687 struct cxl_switch_decoder *cxlsd; 1688 struct cxl_decoder *cxld; 1689 int rc; 1690 1691 if (!is_cxl_root(port)) 1692 return ERR_PTR(-EINVAL); 1693 1694 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets), 1695 GFP_KERNEL); 1696 if (!cxlrd) 1697 return ERR_PTR(-ENOMEM); 1698 1699 cxlsd = &cxlrd->cxlsd; 1700 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1701 if (rc) { 1702 kfree(cxlrd); 1703 return ERR_PTR(rc); 1704 } 1705 1706 cxlrd->calc_hb = calc_hb; 1707 mutex_init(&cxlrd->range_lock); 1708 1709 cxld = &cxlsd->cxld; 1710 cxld->dev.type = &cxl_decoder_root_type; 1711 /* 1712 * cxl_root_decoder_release() special cases negative ids to 1713 * detect memregion_alloc() failures. 1714 */ 1715 atomic_set(&cxlrd->region_id, -1); 1716 rc = memregion_alloc(GFP_KERNEL); 1717 if (rc < 0) { 1718 put_device(&cxld->dev); 1719 return ERR_PTR(rc); 1720 } 1721 1722 atomic_set(&cxlrd->region_id, rc); 1723 return cxlrd; 1724 } 1725 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); 1726 1727 /** 1728 * cxl_switch_decoder_alloc - Allocate a switch level decoder 1729 * @port: owning CXL switch port of this decoder 1730 * @nr_targets: max number of dynamically addressable downstream targets 1731 * 1732 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1733 * 'switch' decoder is any decoder that can be enumerated by PCIe 1734 * topology and the HDM Decoder Capability. This includes the decoders 1735 * that sit between Switch Upstream Ports / Switch Downstream Ports and 1736 * Host Bridges / Root Ports. 1737 */ 1738 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, 1739 unsigned int nr_targets) 1740 { 1741 struct cxl_switch_decoder *cxlsd; 1742 struct cxl_decoder *cxld; 1743 int rc; 1744 1745 if (is_cxl_root(port) || is_cxl_endpoint(port)) 1746 return ERR_PTR(-EINVAL); 1747 1748 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL); 1749 if (!cxlsd) 1750 return ERR_PTR(-ENOMEM); 1751 1752 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); 1753 if (rc) { 1754 kfree(cxlsd); 1755 return ERR_PTR(rc); 1756 } 1757 1758 cxld = &cxlsd->cxld; 1759 cxld->dev.type = &cxl_decoder_switch_type; 1760 return cxlsd; 1761 } 1762 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); 1763 1764 /** 1765 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder 1766 * @port: owning port of this decoder 1767 * 1768 * Return: A new cxl decoder to be registered by cxl_decoder_add() 1769 */ 1770 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) 1771 { 1772 struct cxl_endpoint_decoder *cxled; 1773 struct cxl_decoder *cxld; 1774 int rc; 1775 1776 if (!is_cxl_endpoint(port)) 1777 return ERR_PTR(-EINVAL); 1778 1779 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL); 1780 if (!cxled) 1781 return ERR_PTR(-ENOMEM); 1782 1783 cxled->pos = -1; 1784 cxld = &cxled->cxld; 1785 rc = cxl_decoder_init(port, cxld); 1786 if (rc) { 1787 kfree(cxled); 1788 return ERR_PTR(rc); 1789 } 1790 1791 cxld->dev.type = &cxl_decoder_endpoint_type; 1792 return cxled; 1793 } 1794 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); 1795 1796 /** 1797 * cxl_decoder_add_locked - Add a decoder with targets 1798 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1799 * @target_map: A list of downstream ports that this decoder can direct memory 1800 * traffic to. These numbers should correspond with the port number 1801 * in the PCIe Link Capabilities structure. 1802 * 1803 * Certain types of decoders may not have any targets. The main example of this 1804 * is an endpoint device. A more awkward example is a hostbridge whose root 1805 * ports get hot added (technically possible, though unlikely). 1806 * 1807 * This is the locked variant of cxl_decoder_add(). 1808 * 1809 * Context: Process context. Expects the device lock of the port that owns the 1810 * @cxld to be held. 1811 * 1812 * Return: Negative error code if the decoder wasn't properly configured; else 1813 * returns 0. 1814 */ 1815 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) 1816 { 1817 struct cxl_port *port; 1818 struct device *dev; 1819 int rc; 1820 1821 if (WARN_ON_ONCE(!cxld)) 1822 return -EINVAL; 1823 1824 if (WARN_ON_ONCE(IS_ERR(cxld))) 1825 return PTR_ERR(cxld); 1826 1827 if (cxld->interleave_ways < 1) 1828 return -EINVAL; 1829 1830 dev = &cxld->dev; 1831 1832 port = to_cxl_port(cxld->dev.parent); 1833 if (!is_endpoint_decoder(dev)) { 1834 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 1835 1836 rc = decoder_populate_targets(cxlsd, port, target_map); 1837 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { 1838 dev_err(&port->dev, 1839 "Failed to populate active decoder targets\n"); 1840 return rc; 1841 } 1842 } 1843 1844 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); 1845 if (rc) 1846 return rc; 1847 1848 return device_add(dev); 1849 } 1850 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); 1851 1852 /** 1853 * cxl_decoder_add - Add a decoder with targets 1854 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 1855 * @target_map: A list of downstream ports that this decoder can direct memory 1856 * traffic to. These numbers should correspond with the port number 1857 * in the PCIe Link Capabilities structure. 1858 * 1859 * This is the unlocked variant of cxl_decoder_add_locked(). 1860 * See cxl_decoder_add_locked(). 1861 * 1862 * Context: Process context. Takes and releases the device lock of the port that 1863 * owns the @cxld. 1864 */ 1865 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) 1866 { 1867 struct cxl_port *port; 1868 int rc; 1869 1870 if (WARN_ON_ONCE(!cxld)) 1871 return -EINVAL; 1872 1873 if (WARN_ON_ONCE(IS_ERR(cxld))) 1874 return PTR_ERR(cxld); 1875 1876 port = to_cxl_port(cxld->dev.parent); 1877 1878 device_lock(&port->dev); 1879 rc = cxl_decoder_add_locked(cxld, target_map); 1880 device_unlock(&port->dev); 1881 1882 return rc; 1883 } 1884 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); 1885 1886 static void cxld_unregister(void *dev) 1887 { 1888 struct cxl_endpoint_decoder *cxled; 1889 1890 if (is_endpoint_decoder(dev)) { 1891 cxled = to_cxl_endpoint_decoder(dev); 1892 cxl_decoder_kill_region(cxled); 1893 } 1894 1895 device_unregister(dev); 1896 } 1897 1898 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) 1899 { 1900 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); 1901 } 1902 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL); 1903 1904 /** 1905 * __cxl_driver_register - register a driver for the cxl bus 1906 * @cxl_drv: cxl driver structure to attach 1907 * @owner: owning module/driver 1908 * @modname: KBUILD_MODNAME for parent driver 1909 */ 1910 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, 1911 const char *modname) 1912 { 1913 if (!cxl_drv->probe) { 1914 pr_debug("%s ->probe() must be specified\n", modname); 1915 return -EINVAL; 1916 } 1917 1918 if (!cxl_drv->name) { 1919 pr_debug("%s ->name must be specified\n", modname); 1920 return -EINVAL; 1921 } 1922 1923 if (!cxl_drv->id) { 1924 pr_debug("%s ->id must be specified\n", modname); 1925 return -EINVAL; 1926 } 1927 1928 cxl_drv->drv.bus = &cxl_bus_type; 1929 cxl_drv->drv.owner = owner; 1930 cxl_drv->drv.mod_name = modname; 1931 cxl_drv->drv.name = cxl_drv->name; 1932 1933 return driver_register(&cxl_drv->drv); 1934 } 1935 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL); 1936 1937 void cxl_driver_unregister(struct cxl_driver *cxl_drv) 1938 { 1939 driver_unregister(&cxl_drv->drv); 1940 } 1941 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); 1942 1943 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 1944 { 1945 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, 1946 cxl_device_id(dev)); 1947 } 1948 1949 static int cxl_bus_match(struct device *dev, struct device_driver *drv) 1950 { 1951 return cxl_device_id(dev) == to_cxl_drv(drv)->id; 1952 } 1953 1954 static int cxl_bus_probe(struct device *dev) 1955 { 1956 int rc; 1957 1958 rc = to_cxl_drv(dev->driver)->probe(dev); 1959 dev_dbg(dev, "probe: %d\n", rc); 1960 return rc; 1961 } 1962 1963 static void cxl_bus_remove(struct device *dev) 1964 { 1965 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); 1966 1967 if (cxl_drv->remove) 1968 cxl_drv->remove(dev); 1969 } 1970 1971 static struct workqueue_struct *cxl_bus_wq; 1972 1973 static void cxl_bus_rescan_queue(struct work_struct *w) 1974 { 1975 int rc = bus_rescan_devices(&cxl_bus_type); 1976 1977 pr_debug("CXL bus rescan result: %d\n", rc); 1978 } 1979 1980 void cxl_bus_rescan(void) 1981 { 1982 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue); 1983 1984 queue_work(cxl_bus_wq, &rescan_work); 1985 } 1986 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); 1987 1988 void cxl_bus_drain(void) 1989 { 1990 drain_workqueue(cxl_bus_wq); 1991 } 1992 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL); 1993 1994 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) 1995 { 1996 return queue_work(cxl_bus_wq, &cxlmd->detach_work); 1997 } 1998 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); 1999 2000 /* for user tooling to ensure port disable work has completed */ 2001 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count) 2002 { 2003 if (sysfs_streq(buf, "1")) { 2004 flush_workqueue(cxl_bus_wq); 2005 return count; 2006 } 2007 2008 return -EINVAL; 2009 } 2010 2011 static BUS_ATTR_WO(flush); 2012 2013 static struct attribute *cxl_bus_attributes[] = { 2014 &bus_attr_flush.attr, 2015 NULL, 2016 }; 2017 2018 static struct attribute_group cxl_bus_attribute_group = { 2019 .attrs = cxl_bus_attributes, 2020 }; 2021 2022 static const struct attribute_group *cxl_bus_attribute_groups[] = { 2023 &cxl_bus_attribute_group, 2024 NULL, 2025 }; 2026 2027 struct bus_type cxl_bus_type = { 2028 .name = "cxl", 2029 .uevent = cxl_bus_uevent, 2030 .match = cxl_bus_match, 2031 .probe = cxl_bus_probe, 2032 .remove = cxl_bus_remove, 2033 .bus_groups = cxl_bus_attribute_groups, 2034 }; 2035 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); 2036 2037 static struct dentry *cxl_debugfs; 2038 2039 struct dentry *cxl_debugfs_create_dir(const char *dir) 2040 { 2041 return debugfs_create_dir(dir, cxl_debugfs); 2042 } 2043 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL); 2044 2045 static __init int cxl_core_init(void) 2046 { 2047 int rc; 2048 2049 cxl_debugfs = debugfs_create_dir("cxl", NULL); 2050 2051 cxl_mbox_init(); 2052 2053 rc = cxl_memdev_init(); 2054 if (rc) 2055 return rc; 2056 2057 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0); 2058 if (!cxl_bus_wq) { 2059 rc = -ENOMEM; 2060 goto err_wq; 2061 } 2062 2063 rc = bus_register(&cxl_bus_type); 2064 if (rc) 2065 goto err_bus; 2066 2067 rc = cxl_region_init(); 2068 if (rc) 2069 goto err_region; 2070 2071 return 0; 2072 2073 err_region: 2074 bus_unregister(&cxl_bus_type); 2075 err_bus: 2076 destroy_workqueue(cxl_bus_wq); 2077 err_wq: 2078 cxl_memdev_exit(); 2079 return rc; 2080 } 2081 2082 static void cxl_core_exit(void) 2083 { 2084 cxl_region_exit(); 2085 bus_unregister(&cxl_bus_type); 2086 destroy_workqueue(cxl_bus_wq); 2087 cxl_memdev_exit(); 2088 debugfs_remove_recursive(cxl_debugfs); 2089 } 2090 2091 subsys_initcall(cxl_core_init); 2092 module_exit(cxl_core_exit); 2093 MODULE_LICENSE("GPL v2"); 2094