1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-lo-hi.h> 4 #include <linux/workqueue.h> 5 #include <linux/device.h> 6 #include <linux/module.h> 7 #include <linux/pci.h> 8 #include <linux/slab.h> 9 #include <linux/idr.h> 10 #include <cxlmem.h> 11 #include <cxlpci.h> 12 #include <cxl.h> 13 #include "core.h" 14 15 /** 16 * DOC: cxl core 17 * 18 * The CXL core provides a set of interfaces that can be consumed by CXL aware 19 * drivers. The interfaces allow for creation, modification, and destruction of 20 * regions, memory devices, ports, and decoders. CXL aware drivers must register 21 * with the CXL core via these interfaces in order to be able to participate in 22 * cross-device interleave coordination. The CXL core also establishes and 23 * maintains the bridge to the nvdimm subsystem. 24 * 25 * CXL core introduces sysfs hierarchy to control the devices that are 26 * instantiated by the core. 27 */ 28 29 static DEFINE_IDA(cxl_port_ida); 30 static DEFINE_XARRAY(cxl_root_buses); 31 32 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, 33 char *buf) 34 { 35 return sysfs_emit(buf, "%s\n", dev->type->name); 36 } 37 static DEVICE_ATTR_RO(devtype); 38 39 static int cxl_device_id(struct device *dev) 40 { 41 if (dev->type == &cxl_nvdimm_bridge_type) 42 return CXL_DEVICE_NVDIMM_BRIDGE; 43 if (dev->type == &cxl_nvdimm_type) 44 return CXL_DEVICE_NVDIMM; 45 if (is_cxl_port(dev)) { 46 if (is_cxl_root(to_cxl_port(dev))) 47 return CXL_DEVICE_ROOT; 48 return CXL_DEVICE_PORT; 49 } 50 if (is_cxl_memdev(dev)) 51 return CXL_DEVICE_MEMORY_EXPANDER; 52 return 0; 53 } 54 55 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 56 char *buf) 57 { 58 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev)); 59 } 60 static DEVICE_ATTR_RO(modalias); 61 62 static struct attribute *cxl_base_attributes[] = { 63 &dev_attr_devtype.attr, 64 &dev_attr_modalias.attr, 65 NULL, 66 }; 67 68 struct attribute_group cxl_base_attribute_group = { 69 .attrs = cxl_base_attributes, 70 }; 71 72 static ssize_t start_show(struct device *dev, struct device_attribute *attr, 73 char *buf) 74 { 75 struct cxl_decoder *cxld = to_cxl_decoder(dev); 76 u64 start; 77 78 if (is_root_decoder(dev)) 79 start = cxld->platform_res.start; 80 else 81 start = cxld->decoder_range.start; 82 83 return sysfs_emit(buf, "%#llx\n", start); 84 } 85 static DEVICE_ATTR_ADMIN_RO(start); 86 87 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 88 char *buf) 89 { 90 struct cxl_decoder *cxld = to_cxl_decoder(dev); 91 u64 size; 92 93 if (is_root_decoder(dev)) 94 size = resource_size(&cxld->platform_res); 95 else 96 size = range_len(&cxld->decoder_range); 97 98 return sysfs_emit(buf, "%#llx\n", size); 99 } 100 static DEVICE_ATTR_RO(size); 101 102 #define CXL_DECODER_FLAG_ATTR(name, flag) \ 103 static ssize_t name##_show(struct device *dev, \ 104 struct device_attribute *attr, char *buf) \ 105 { \ 106 struct cxl_decoder *cxld = to_cxl_decoder(dev); \ 107 \ 108 return sysfs_emit(buf, "%s\n", \ 109 (cxld->flags & (flag)) ? "1" : "0"); \ 110 } \ 111 static DEVICE_ATTR_RO(name) 112 113 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); 114 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); 115 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); 116 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); 117 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); 118 119 static ssize_t target_type_show(struct device *dev, 120 struct device_attribute *attr, char *buf) 121 { 122 struct cxl_decoder *cxld = to_cxl_decoder(dev); 123 124 switch (cxld->target_type) { 125 case CXL_DECODER_ACCELERATOR: 126 return sysfs_emit(buf, "accelerator\n"); 127 case CXL_DECODER_EXPANDER: 128 return sysfs_emit(buf, "expander\n"); 129 } 130 return -ENXIO; 131 } 132 static DEVICE_ATTR_RO(target_type); 133 134 static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf) 135 { 136 ssize_t offset = 0; 137 int i, rc = 0; 138 139 for (i = 0; i < cxld->interleave_ways; i++) { 140 struct cxl_dport *dport = cxld->target[i]; 141 struct cxl_dport *next = NULL; 142 143 if (!dport) 144 break; 145 146 if (i + 1 < cxld->interleave_ways) 147 next = cxld->target[i + 1]; 148 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, 149 next ? "," : ""); 150 if (rc < 0) 151 return rc; 152 offset += rc; 153 } 154 155 return offset; 156 } 157 158 static ssize_t target_list_show(struct device *dev, 159 struct device_attribute *attr, char *buf) 160 { 161 struct cxl_decoder *cxld = to_cxl_decoder(dev); 162 ssize_t offset; 163 unsigned int seq; 164 int rc; 165 166 do { 167 seq = read_seqbegin(&cxld->target_lock); 168 rc = emit_target_list(cxld, buf); 169 } while (read_seqretry(&cxld->target_lock, seq)); 170 171 if (rc < 0) 172 return rc; 173 offset = rc; 174 175 rc = sysfs_emit_at(buf, offset, "\n"); 176 if (rc < 0) 177 return rc; 178 179 return offset + rc; 180 } 181 static DEVICE_ATTR_RO(target_list); 182 183 static struct attribute *cxl_decoder_base_attrs[] = { 184 &dev_attr_start.attr, 185 &dev_attr_size.attr, 186 &dev_attr_locked.attr, 187 NULL, 188 }; 189 190 static struct attribute_group cxl_decoder_base_attribute_group = { 191 .attrs = cxl_decoder_base_attrs, 192 }; 193 194 static struct attribute *cxl_decoder_root_attrs[] = { 195 &dev_attr_cap_pmem.attr, 196 &dev_attr_cap_ram.attr, 197 &dev_attr_cap_type2.attr, 198 &dev_attr_cap_type3.attr, 199 &dev_attr_target_list.attr, 200 NULL, 201 }; 202 203 static struct attribute_group cxl_decoder_root_attribute_group = { 204 .attrs = cxl_decoder_root_attrs, 205 }; 206 207 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { 208 &cxl_decoder_root_attribute_group, 209 &cxl_decoder_base_attribute_group, 210 &cxl_base_attribute_group, 211 NULL, 212 }; 213 214 static struct attribute *cxl_decoder_switch_attrs[] = { 215 &dev_attr_target_type.attr, 216 &dev_attr_target_list.attr, 217 NULL, 218 }; 219 220 static struct attribute_group cxl_decoder_switch_attribute_group = { 221 .attrs = cxl_decoder_switch_attrs, 222 }; 223 224 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { 225 &cxl_decoder_switch_attribute_group, 226 &cxl_decoder_base_attribute_group, 227 &cxl_base_attribute_group, 228 NULL, 229 }; 230 231 static struct attribute *cxl_decoder_endpoint_attrs[] = { 232 &dev_attr_target_type.attr, 233 NULL, 234 }; 235 236 static struct attribute_group cxl_decoder_endpoint_attribute_group = { 237 .attrs = cxl_decoder_endpoint_attrs, 238 }; 239 240 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { 241 &cxl_decoder_base_attribute_group, 242 &cxl_decoder_endpoint_attribute_group, 243 &cxl_base_attribute_group, 244 NULL, 245 }; 246 247 static void cxl_decoder_release(struct device *dev) 248 { 249 struct cxl_decoder *cxld = to_cxl_decoder(dev); 250 struct cxl_port *port = to_cxl_port(dev->parent); 251 252 ida_free(&port->decoder_ida, cxld->id); 253 kfree(cxld); 254 put_device(&port->dev); 255 } 256 257 static const struct device_type cxl_decoder_endpoint_type = { 258 .name = "cxl_decoder_endpoint", 259 .release = cxl_decoder_release, 260 .groups = cxl_decoder_endpoint_attribute_groups, 261 }; 262 263 static const struct device_type cxl_decoder_switch_type = { 264 .name = "cxl_decoder_switch", 265 .release = cxl_decoder_release, 266 .groups = cxl_decoder_switch_attribute_groups, 267 }; 268 269 static const struct device_type cxl_decoder_root_type = { 270 .name = "cxl_decoder_root", 271 .release = cxl_decoder_release, 272 .groups = cxl_decoder_root_attribute_groups, 273 }; 274 275 bool is_endpoint_decoder(struct device *dev) 276 { 277 return dev->type == &cxl_decoder_endpoint_type; 278 } 279 280 bool is_root_decoder(struct device *dev) 281 { 282 return dev->type == &cxl_decoder_root_type; 283 } 284 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); 285 286 bool is_cxl_decoder(struct device *dev) 287 { 288 return dev->type && dev->type->release == cxl_decoder_release; 289 } 290 EXPORT_SYMBOL_NS_GPL(is_cxl_decoder, CXL); 291 292 struct cxl_decoder *to_cxl_decoder(struct device *dev) 293 { 294 if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release, 295 "not a cxl_decoder device\n")) 296 return NULL; 297 return container_of(dev, struct cxl_decoder, dev); 298 } 299 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); 300 301 static void cxl_ep_release(struct cxl_ep *ep) 302 { 303 if (!ep) 304 return; 305 list_del(&ep->list); 306 put_device(ep->ep); 307 kfree(ep); 308 } 309 310 static void cxl_port_release(struct device *dev) 311 { 312 struct cxl_port *port = to_cxl_port(dev); 313 struct cxl_ep *ep, *_e; 314 315 device_lock(dev); 316 list_for_each_entry_safe(ep, _e, &port->endpoints, list) 317 cxl_ep_release(ep); 318 device_unlock(dev); 319 ida_free(&cxl_port_ida, port->id); 320 kfree(port); 321 } 322 323 static const struct attribute_group *cxl_port_attribute_groups[] = { 324 &cxl_base_attribute_group, 325 NULL, 326 }; 327 328 static const struct device_type cxl_port_type = { 329 .name = "cxl_port", 330 .release = cxl_port_release, 331 .groups = cxl_port_attribute_groups, 332 }; 333 334 bool is_cxl_port(struct device *dev) 335 { 336 return dev->type == &cxl_port_type; 337 } 338 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); 339 340 struct cxl_port *to_cxl_port(struct device *dev) 341 { 342 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, 343 "not a cxl_port device\n")) 344 return NULL; 345 return container_of(dev, struct cxl_port, dev); 346 } 347 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); 348 349 static void unregister_port(void *_port) 350 { 351 struct cxl_port *port = _port; 352 struct cxl_port *parent; 353 struct device *lock_dev; 354 355 if (is_cxl_root(port)) 356 parent = NULL; 357 else 358 parent = to_cxl_port(port->dev.parent); 359 360 /* 361 * CXL root port's and the first level of ports are unregistered 362 * under the platform firmware device lock, all other ports are 363 * unregistered while holding their parent port lock. 364 */ 365 if (!parent) 366 lock_dev = port->uport; 367 else if (is_cxl_root(parent)) 368 lock_dev = parent->uport; 369 else 370 lock_dev = &parent->dev; 371 372 device_lock_assert(lock_dev); 373 port->uport = NULL; 374 device_unregister(&port->dev); 375 } 376 377 static void cxl_unlink_uport(void *_port) 378 { 379 struct cxl_port *port = _port; 380 381 sysfs_remove_link(&port->dev.kobj, "uport"); 382 } 383 384 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) 385 { 386 int rc; 387 388 rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport"); 389 if (rc) 390 return rc; 391 return devm_add_action_or_reset(host, cxl_unlink_uport, port); 392 } 393 394 static struct lock_class_key cxl_port_key; 395 396 static struct cxl_port *cxl_port_alloc(struct device *uport, 397 resource_size_t component_reg_phys, 398 struct cxl_port *parent_port) 399 { 400 struct cxl_port *port; 401 struct device *dev; 402 int rc; 403 404 port = kzalloc(sizeof(*port), GFP_KERNEL); 405 if (!port) 406 return ERR_PTR(-ENOMEM); 407 408 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); 409 if (rc < 0) 410 goto err; 411 port->id = rc; 412 413 /* 414 * The top-level cxl_port "cxl_root" does not have a cxl_port as 415 * its parent and it does not have any corresponding component 416 * registers as its decode is described by a fixed platform 417 * description. 418 */ 419 dev = &port->dev; 420 if (parent_port) { 421 dev->parent = &parent_port->dev; 422 port->depth = parent_port->depth + 1; 423 } else 424 dev->parent = uport; 425 426 port->uport = uport; 427 port->component_reg_phys = component_reg_phys; 428 ida_init(&port->decoder_ida); 429 INIT_LIST_HEAD(&port->dports); 430 INIT_LIST_HEAD(&port->endpoints); 431 432 device_initialize(dev); 433 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); 434 device_set_pm_not_required(dev); 435 dev->bus = &cxl_bus_type; 436 dev->type = &cxl_port_type; 437 438 return port; 439 440 err: 441 kfree(port); 442 return ERR_PTR(rc); 443 } 444 445 /** 446 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy 447 * @host: host device for devm operations 448 * @uport: "physical" device implementing this upstream port 449 * @component_reg_phys: (optional) for configurable cxl_port instances 450 * @parent_port: next hop up in the CXL memory decode hierarchy 451 */ 452 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, 453 resource_size_t component_reg_phys, 454 struct cxl_port *parent_port) 455 { 456 struct cxl_port *port; 457 struct device *dev; 458 int rc; 459 460 port = cxl_port_alloc(uport, component_reg_phys, parent_port); 461 if (IS_ERR(port)) 462 return port; 463 464 dev = &port->dev; 465 if (is_cxl_memdev(uport)) 466 rc = dev_set_name(dev, "endpoint%d", port->id); 467 else if (parent_port) 468 rc = dev_set_name(dev, "port%d", port->id); 469 else 470 rc = dev_set_name(dev, "root%d", port->id); 471 if (rc) 472 goto err; 473 474 rc = device_add(dev); 475 if (rc) 476 goto err; 477 478 rc = devm_add_action_or_reset(host, unregister_port, port); 479 if (rc) 480 return ERR_PTR(rc); 481 482 rc = devm_cxl_link_uport(host, port); 483 if (rc) 484 return ERR_PTR(rc); 485 486 return port; 487 488 err: 489 put_device(dev); 490 return ERR_PTR(rc); 491 } 492 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); 493 494 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) 495 { 496 /* There is no pci_bus associated with a CXL platform-root port */ 497 if (is_cxl_root(port)) 498 return NULL; 499 500 if (dev_is_pci(port->uport)) { 501 struct pci_dev *pdev = to_pci_dev(port->uport); 502 503 return pdev->subordinate; 504 } 505 506 return xa_load(&cxl_root_buses, (unsigned long)port->uport); 507 } 508 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL); 509 510 static void unregister_pci_bus(void *uport) 511 { 512 xa_erase(&cxl_root_buses, (unsigned long)uport); 513 } 514 515 int devm_cxl_register_pci_bus(struct device *host, struct device *uport, 516 struct pci_bus *bus) 517 { 518 int rc; 519 520 if (dev_is_pci(uport)) 521 return -EINVAL; 522 523 rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL); 524 if (rc) 525 return rc; 526 return devm_add_action_or_reset(host, unregister_pci_bus, uport); 527 } 528 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL); 529 530 static bool dev_is_cxl_root_child(struct device *dev) 531 { 532 struct cxl_port *port, *parent; 533 534 if (!is_cxl_port(dev)) 535 return false; 536 537 port = to_cxl_port(dev); 538 if (is_cxl_root(port)) 539 return false; 540 541 parent = to_cxl_port(port->dev.parent); 542 if (is_cxl_root(parent)) 543 return true; 544 545 return false; 546 } 547 548 /* Find a 2nd level CXL port that has a dport that is an ancestor of @match */ 549 static int match_root_child(struct device *dev, const void *match) 550 { 551 const struct device *iter = NULL; 552 struct cxl_dport *dport; 553 struct cxl_port *port; 554 555 if (!dev_is_cxl_root_child(dev)) 556 return 0; 557 558 port = to_cxl_port(dev); 559 device_lock(dev); 560 list_for_each_entry(dport, &port->dports, list) { 561 iter = match; 562 while (iter) { 563 if (iter == dport->dport) 564 goto out; 565 iter = iter->parent; 566 } 567 } 568 out: 569 device_unlock(dev); 570 571 return !!iter; 572 } 573 574 struct cxl_port *find_cxl_root(struct device *dev) 575 { 576 struct device *port_dev; 577 struct cxl_port *root; 578 579 port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child); 580 if (!port_dev) 581 return NULL; 582 583 root = to_cxl_port(port_dev->parent); 584 get_device(&root->dev); 585 put_device(port_dev); 586 return root; 587 } 588 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); 589 590 static struct cxl_dport *find_dport(struct cxl_port *port, int id) 591 { 592 struct cxl_dport *dport; 593 594 device_lock_assert(&port->dev); 595 list_for_each_entry (dport, &port->dports, list) 596 if (dport->port_id == id) 597 return dport; 598 return NULL; 599 } 600 601 static int add_dport(struct cxl_port *port, struct cxl_dport *new) 602 { 603 struct cxl_dport *dup; 604 605 device_lock_assert(&port->dev); 606 dup = find_dport(port, new->port_id); 607 if (dup) 608 dev_err(&port->dev, 609 "unable to add dport%d-%s non-unique port id (%s)\n", 610 new->port_id, dev_name(new->dport), 611 dev_name(dup->dport)); 612 else 613 list_add_tail(&new->list, &port->dports); 614 615 return dup ? -EEXIST : 0; 616 } 617 618 /* 619 * Since root-level CXL dports cannot be enumerated by PCI they are not 620 * enumerated by the common port driver that acquires the port lock over 621 * dport add/remove. Instead, root dports are manually added by a 622 * platform driver and cond_cxl_root_lock() is used to take the missing 623 * port lock in that case. 624 */ 625 static void cond_cxl_root_lock(struct cxl_port *port) 626 { 627 if (is_cxl_root(port)) 628 device_lock(&port->dev); 629 } 630 631 static void cond_cxl_root_unlock(struct cxl_port *port) 632 { 633 if (is_cxl_root(port)) 634 device_unlock(&port->dev); 635 } 636 637 static void cxl_dport_remove(void *data) 638 { 639 struct cxl_dport *dport = data; 640 struct cxl_port *port = dport->port; 641 642 put_device(dport->dport); 643 cond_cxl_root_lock(port); 644 list_del(&dport->list); 645 cond_cxl_root_unlock(port); 646 } 647 648 static void cxl_dport_unlink(void *data) 649 { 650 struct cxl_dport *dport = data; 651 struct cxl_port *port = dport->port; 652 char link_name[CXL_TARGET_STRLEN]; 653 654 sprintf(link_name, "dport%d", dport->port_id); 655 sysfs_remove_link(&port->dev.kobj, link_name); 656 } 657 658 /** 659 * devm_cxl_add_dport - append downstream port data to a cxl_port 660 * @port: the cxl_port that references this dport 661 * @dport_dev: firmware or PCI device representing the dport 662 * @port_id: identifier for this dport in a decoder's target list 663 * @component_reg_phys: optional location of CXL component registers 664 * 665 * Note that dports are appended to the devm release action's of the 666 * either the port's host (for root ports), or the port itself (for 667 * switch ports) 668 */ 669 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, 670 struct device *dport_dev, int port_id, 671 resource_size_t component_reg_phys) 672 { 673 char link_name[CXL_TARGET_STRLEN]; 674 struct cxl_dport *dport; 675 struct device *host; 676 int rc; 677 678 if (is_cxl_root(port)) 679 host = port->uport; 680 else 681 host = &port->dev; 682 683 if (!host->driver) { 684 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", 685 dev_name(dport_dev)); 686 return ERR_PTR(-ENXIO); 687 } 688 689 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= 690 CXL_TARGET_STRLEN) 691 return ERR_PTR(-EINVAL); 692 693 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL); 694 if (!dport) 695 return ERR_PTR(-ENOMEM); 696 697 INIT_LIST_HEAD(&dport->list); 698 dport->dport = dport_dev; 699 dport->port_id = port_id; 700 dport->component_reg_phys = component_reg_phys; 701 dport->port = port; 702 703 cond_cxl_root_lock(port); 704 rc = add_dport(port, dport); 705 cond_cxl_root_unlock(port); 706 if (rc) 707 return ERR_PTR(rc); 708 709 get_device(dport_dev); 710 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); 711 if (rc) 712 return ERR_PTR(rc); 713 714 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); 715 if (rc) 716 return ERR_PTR(rc); 717 718 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); 719 if (rc) 720 return ERR_PTR(rc); 721 722 return dport; 723 } 724 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); 725 726 static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev) 727 { 728 struct cxl_ep *ep; 729 730 device_lock_assert(&port->dev); 731 list_for_each_entry(ep, &port->endpoints, list) 732 if (ep->ep == ep_dev) 733 return ep; 734 return NULL; 735 } 736 737 static int add_ep(struct cxl_port *port, struct cxl_ep *new) 738 { 739 struct cxl_ep *dup; 740 741 device_lock(&port->dev); 742 if (port->dead) { 743 device_unlock(&port->dev); 744 return -ENXIO; 745 } 746 dup = find_ep(port, new->ep); 747 if (!dup) 748 list_add_tail(&new->list, &port->endpoints); 749 device_unlock(&port->dev); 750 751 return dup ? -EEXIST : 0; 752 } 753 754 /** 755 * cxl_add_ep - register an endpoint's interest in a port 756 * @port: a port in the endpoint's topology ancestry 757 * @ep_dev: device representing the endpoint 758 * 759 * Intermediate CXL ports are scanned based on the arrival of endpoints. 760 * When those endpoints depart the port can be destroyed once all 761 * endpoints that care about that port have been removed. 762 */ 763 static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev) 764 { 765 struct cxl_ep *ep; 766 int rc; 767 768 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 769 if (!ep) 770 return -ENOMEM; 771 772 INIT_LIST_HEAD(&ep->list); 773 ep->ep = get_device(ep_dev); 774 775 rc = add_ep(port, ep); 776 if (rc) 777 cxl_ep_release(ep); 778 return rc; 779 } 780 781 struct cxl_find_port_ctx { 782 const struct device *dport_dev; 783 const struct cxl_port *parent_port; 784 }; 785 786 static int match_port_by_dport(struct device *dev, const void *data) 787 { 788 const struct cxl_find_port_ctx *ctx = data; 789 struct cxl_port *port; 790 791 if (!is_cxl_port(dev)) 792 return 0; 793 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) 794 return 0; 795 796 port = to_cxl_port(dev); 797 return cxl_find_dport_by_dev(port, ctx->dport_dev) != NULL; 798 } 799 800 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) 801 { 802 struct device *dev; 803 804 if (!ctx->dport_dev) 805 return NULL; 806 807 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport); 808 if (dev) 809 return to_cxl_port(dev); 810 return NULL; 811 } 812 813 static struct cxl_port *find_cxl_port(struct device *dport_dev) 814 { 815 struct cxl_find_port_ctx ctx = { 816 .dport_dev = dport_dev, 817 }; 818 819 return __find_cxl_port(&ctx); 820 } 821 822 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, 823 struct device *dport_dev) 824 { 825 struct cxl_find_port_ctx ctx = { 826 .dport_dev = dport_dev, 827 .parent_port = parent_port, 828 }; 829 830 return __find_cxl_port(&ctx); 831 } 832 833 /* 834 * All users of grandparent() are using it to walk PCIe-like swich port 835 * hierarchy. A PCIe switch is comprised of a bridge device representing the 836 * upstream switch port and N bridges representing downstream switch ports. When 837 * bridges stack the grand-parent of a downstream switch port is another 838 * downstream switch port in the immediate ancestor switch. 839 */ 840 static struct device *grandparent(struct device *dev) 841 { 842 if (dev && dev->parent) 843 return dev->parent->parent; 844 return NULL; 845 } 846 847 static void delete_endpoint(void *data) 848 { 849 struct cxl_memdev *cxlmd = data; 850 struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev); 851 struct cxl_port *parent_port; 852 struct device *parent; 853 854 parent_port = cxl_mem_find_port(cxlmd); 855 if (!parent_port) 856 goto out; 857 parent = &parent_port->dev; 858 859 device_lock(parent); 860 if (parent->driver && endpoint->uport) { 861 devm_release_action(parent, cxl_unlink_uport, endpoint); 862 devm_release_action(parent, unregister_port, endpoint); 863 } 864 device_unlock(parent); 865 put_device(parent); 866 out: 867 put_device(&endpoint->dev); 868 } 869 870 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 871 { 872 struct device *dev = &cxlmd->dev; 873 874 get_device(&endpoint->dev); 875 dev_set_drvdata(dev, endpoint); 876 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); 877 } 878 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); 879 880 /* 881 * The natural end of life of a non-root 'cxl_port' is when its parent port goes 882 * through a ->remove() event ("top-down" unregistration). The unnatural trigger 883 * for a port to be unregistered is when all memdevs beneath that port have gone 884 * through ->remove(). This "bottom-up" removal selectively removes individual 885 * child ports manually. This depends on devm_cxl_add_port() to not change is 886 * devm action registration order. 887 */ 888 static void delete_switch_port(struct cxl_port *port, struct list_head *dports) 889 { 890 struct cxl_dport *dport, *_d; 891 892 list_for_each_entry_safe(dport, _d, dports, list) { 893 devm_release_action(&port->dev, cxl_dport_unlink, dport); 894 devm_release_action(&port->dev, cxl_dport_remove, dport); 895 devm_kfree(&port->dev, dport); 896 } 897 devm_release_action(port->dev.parent, cxl_unlink_uport, port); 898 devm_release_action(port->dev.parent, unregister_port, port); 899 } 900 901 static void cxl_detach_ep(void *data) 902 { 903 struct cxl_memdev *cxlmd = data; 904 struct device *iter; 905 906 for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) { 907 struct device *dport_dev = grandparent(iter); 908 struct cxl_port *port, *parent_port; 909 LIST_HEAD(reap_dports); 910 struct cxl_ep *ep; 911 912 if (!dport_dev) 913 break; 914 915 port = find_cxl_port(dport_dev); 916 if (!port) 917 continue; 918 919 if (is_cxl_root(port)) { 920 put_device(&port->dev); 921 continue; 922 } 923 924 parent_port = to_cxl_port(port->dev.parent); 925 device_lock(&parent_port->dev); 926 if (!parent_port->dev.driver) { 927 /* 928 * The bottom-up race to delete the port lost to a 929 * top-down port disable, give up here, because the 930 * parent_port ->remove() will have cleaned up all 931 * descendants. 932 */ 933 device_unlock(&parent_port->dev); 934 put_device(&port->dev); 935 continue; 936 } 937 938 device_lock(&port->dev); 939 ep = find_ep(port, &cxlmd->dev); 940 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", 941 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); 942 cxl_ep_release(ep); 943 if (ep && !port->dead && list_empty(&port->endpoints) && 944 !is_cxl_root(parent_port)) { 945 /* 946 * This was the last ep attached to a dynamically 947 * enumerated port. Block new cxl_add_ep() and garbage 948 * collect the port. 949 */ 950 port->dead = true; 951 list_splice_init(&port->dports, &reap_dports); 952 } 953 device_unlock(&port->dev); 954 955 if (!list_empty(&reap_dports)) { 956 dev_dbg(&cxlmd->dev, "delete %s\n", 957 dev_name(&port->dev)); 958 delete_switch_port(port, &reap_dports); 959 } 960 put_device(&port->dev); 961 device_unlock(&parent_port->dev); 962 } 963 } 964 965 static resource_size_t find_component_registers(struct device *dev) 966 { 967 struct cxl_register_map map; 968 struct pci_dev *pdev; 969 970 /* 971 * Theoretically, CXL component registers can be hosted on a 972 * non-PCI device, in practice, only cxl_test hits this case. 973 */ 974 if (!dev_is_pci(dev)) 975 return CXL_RESOURCE_NONE; 976 977 pdev = to_pci_dev(dev); 978 979 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 980 return cxl_regmap_to_base(pdev, &map); 981 } 982 983 static int add_port_attach_ep(struct cxl_memdev *cxlmd, 984 struct device *uport_dev, 985 struct device *dport_dev) 986 { 987 struct device *dparent = grandparent(dport_dev); 988 struct cxl_port *port, *parent_port = NULL; 989 resource_size_t component_reg_phys; 990 int rc; 991 992 if (!dparent) { 993 /* 994 * The iteration reached the topology root without finding the 995 * CXL-root 'cxl_port' on a previous iteration, fail for now to 996 * be re-probed after platform driver attaches. 997 */ 998 dev_dbg(&cxlmd->dev, "%s is a root dport\n", 999 dev_name(dport_dev)); 1000 return -ENXIO; 1001 } 1002 1003 parent_port = find_cxl_port(dparent); 1004 if (!parent_port) { 1005 /* iterate to create this parent_port */ 1006 return -EAGAIN; 1007 } 1008 1009 device_lock(&parent_port->dev); 1010 if (!parent_port->dev.driver) { 1011 dev_warn(&cxlmd->dev, 1012 "port %s:%s disabled, failed to enumerate CXL.mem\n", 1013 dev_name(&parent_port->dev), dev_name(uport_dev)); 1014 port = ERR_PTR(-ENXIO); 1015 goto out; 1016 } 1017 1018 port = find_cxl_port_at(parent_port, dport_dev); 1019 if (!port) { 1020 component_reg_phys = find_component_registers(uport_dev); 1021 port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1022 component_reg_phys, parent_port); 1023 if (!IS_ERR(port)) 1024 get_device(&port->dev); 1025 } 1026 out: 1027 device_unlock(&parent_port->dev); 1028 1029 if (IS_ERR(port)) 1030 rc = PTR_ERR(port); 1031 else { 1032 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", 1033 dev_name(&port->dev), dev_name(port->uport)); 1034 rc = cxl_add_ep(port, &cxlmd->dev); 1035 if (rc == -EEXIST) { 1036 /* 1037 * "can't" happen, but this error code means 1038 * something to the caller, so translate it. 1039 */ 1040 rc = -ENXIO; 1041 } 1042 put_device(&port->dev); 1043 } 1044 1045 put_device(&parent_port->dev); 1046 return rc; 1047 } 1048 1049 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) 1050 { 1051 struct device *dev = &cxlmd->dev; 1052 struct device *iter; 1053 int rc; 1054 1055 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); 1056 if (rc) 1057 return rc; 1058 1059 /* 1060 * Scan for and add all cxl_ports in this device's ancestry. 1061 * Repeat until no more ports are added. Abort if a port add 1062 * attempt fails. 1063 */ 1064 retry: 1065 for (iter = dev; iter; iter = grandparent(iter)) { 1066 struct device *dport_dev = grandparent(iter); 1067 struct device *uport_dev; 1068 struct cxl_port *port; 1069 1070 if (!dport_dev) 1071 return 0; 1072 1073 uport_dev = dport_dev->parent; 1074 if (!uport_dev) { 1075 dev_warn(dev, "at %s no parent for dport: %s\n", 1076 dev_name(iter), dev_name(dport_dev)); 1077 return -ENXIO; 1078 } 1079 1080 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", 1081 dev_name(iter), dev_name(dport_dev), 1082 dev_name(uport_dev)); 1083 port = find_cxl_port(dport_dev); 1084 if (port) { 1085 dev_dbg(&cxlmd->dev, 1086 "found already registered port %s:%s\n", 1087 dev_name(&port->dev), dev_name(port->uport)); 1088 rc = cxl_add_ep(port, &cxlmd->dev); 1089 1090 /* 1091 * If the endpoint already exists in the port's list, 1092 * that's ok, it was added on a previous pass. 1093 * Otherwise, retry in add_port_attach_ep() after taking 1094 * the parent_port lock as the current port may be being 1095 * reaped. 1096 */ 1097 if (rc && rc != -EEXIST) { 1098 put_device(&port->dev); 1099 return rc; 1100 } 1101 1102 /* Any more ports to add between this one and the root? */ 1103 if (!dev_is_cxl_root_child(&port->dev)) { 1104 put_device(&port->dev); 1105 continue; 1106 } 1107 1108 put_device(&port->dev); 1109 return 0; 1110 } 1111 1112 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev); 1113 /* port missing, try to add parent */ 1114 if (rc == -EAGAIN) 1115 continue; 1116 /* failed to add ep or port */ 1117 if (rc) 1118 return rc; 1119 /* port added, new descendants possible, start over */ 1120 goto retry; 1121 } 1122 1123 return 0; 1124 } 1125 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); 1126 1127 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd) 1128 { 1129 return find_cxl_port(grandparent(&cxlmd->dev)); 1130 } 1131 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); 1132 1133 struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port, 1134 const struct device *dev) 1135 { 1136 struct cxl_dport *dport; 1137 1138 device_lock(&port->dev); 1139 list_for_each_entry(dport, &port->dports, list) 1140 if (dport->dport == dev) { 1141 device_unlock(&port->dev); 1142 return dport; 1143 } 1144 1145 device_unlock(&port->dev); 1146 return NULL; 1147 } 1148 EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL); 1149 1150 static int decoder_populate_targets(struct cxl_decoder *cxld, 1151 struct cxl_port *port, int *target_map) 1152 { 1153 int i, rc = 0; 1154 1155 if (!target_map) 1156 return 0; 1157 1158 device_lock_assert(&port->dev); 1159 1160 if (list_empty(&port->dports)) 1161 return -EINVAL; 1162 1163 write_seqlock(&cxld->target_lock); 1164 for (i = 0; i < cxld->nr_targets; i++) { 1165 struct cxl_dport *dport = find_dport(port, target_map[i]); 1166 1167 if (!dport) { 1168 rc = -ENXIO; 1169 break; 1170 } 1171 cxld->target[i] = dport; 1172 } 1173 write_sequnlock(&cxld->target_lock); 1174 1175 return rc; 1176 } 1177 1178 static struct lock_class_key cxl_decoder_key; 1179 1180 /** 1181 * cxl_decoder_alloc - Allocate a new CXL decoder 1182 * @port: owning port of this decoder 1183 * @nr_targets: downstream targets accessible by this decoder. All upstream 1184 * ports and root ports must have at least 1 target. Endpoint 1185 * devices will have 0 targets. Callers wishing to register an 1186 * endpoint device should specify 0. 1187 * 1188 * A port should contain one or more decoders. Each of those decoders enable 1189 * some address space for CXL.mem utilization. A decoder is expected to be 1190 * configured by the caller before registering. 1191 * 1192 * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder 1193 * is initialized to be a "passthrough" decoder. 1194 */ 1195 static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, 1196 unsigned int nr_targets) 1197 { 1198 struct cxl_decoder *cxld; 1199 struct device *dev; 1200 int rc = 0; 1201 1202 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) 1203 return ERR_PTR(-EINVAL); 1204 1205 cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); 1206 if (!cxld) 1207 return ERR_PTR(-ENOMEM); 1208 1209 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); 1210 if (rc < 0) 1211 goto err; 1212 1213 /* need parent to stick around to release the id */ 1214 get_device(&port->dev); 1215 cxld->id = rc; 1216 1217 cxld->nr_targets = nr_targets; 1218 seqlock_init(&cxld->target_lock); 1219 dev = &cxld->dev; 1220 device_initialize(dev); 1221 lockdep_set_class(&dev->mutex, &cxl_decoder_key); 1222 device_set_pm_not_required(dev); 1223 dev->parent = &port->dev; 1224 dev->bus = &cxl_bus_type; 1225 if (is_cxl_root(port)) 1226 cxld->dev.type = &cxl_decoder_root_type; 1227 else if (is_cxl_endpoint(port)) 1228 cxld->dev.type = &cxl_decoder_endpoint_type; 1229 else 1230 cxld->dev.type = &cxl_decoder_switch_type; 1231 1232 /* Pre initialize an "empty" decoder */ 1233 cxld->interleave_ways = 1; 1234 cxld->interleave_granularity = PAGE_SIZE; 1235 cxld->target_type = CXL_DECODER_EXPANDER; 1236 cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0); 1237 1238 return cxld; 1239 err: 1240 kfree(cxld); 1241 return ERR_PTR(rc); 1242 } 1243 1244 /** 1245 * cxl_root_decoder_alloc - Allocate a root level decoder 1246 * @port: owning CXL root of this decoder 1247 * @nr_targets: static number of downstream targets 1248 * 1249 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1250 * 'CXL root' decoder is one that decodes from a top-level / static platform 1251 * firmware description of CXL resources into a CXL standard decode 1252 * topology. 1253 */ 1254 struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port, 1255 unsigned int nr_targets) 1256 { 1257 if (!is_cxl_root(port)) 1258 return ERR_PTR(-EINVAL); 1259 1260 return cxl_decoder_alloc(port, nr_targets); 1261 } 1262 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); 1263 1264 /** 1265 * cxl_switch_decoder_alloc - Allocate a switch level decoder 1266 * @port: owning CXL switch port of this decoder 1267 * @nr_targets: max number of dynamically addressable downstream targets 1268 * 1269 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A 1270 * 'switch' decoder is any decoder that can be enumerated by PCIe 1271 * topology and the HDM Decoder Capability. This includes the decoders 1272 * that sit between Switch Upstream Ports / Switch Downstream Ports and 1273 * Host Bridges / Root Ports. 1274 */ 1275 struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, 1276 unsigned int nr_targets) 1277 { 1278 if (is_cxl_root(port) || is_cxl_endpoint(port)) 1279 return ERR_PTR(-EINVAL); 1280 1281 return cxl_decoder_alloc(port, nr_targets); 1282 } 1283 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); 1284 1285 /** 1286 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder 1287 * @port: owning port of this decoder 1288 * 1289 * Return: A new cxl decoder to be registered by cxl_decoder_add() 1290 */ 1291 struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) 1292 { 1293 if (!is_cxl_endpoint(port)) 1294 return ERR_PTR(-EINVAL); 1295 1296 return cxl_decoder_alloc(port, 0); 1297 } 1298 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); 1299 1300 /** 1301 * cxl_decoder_add_locked - Add a decoder with targets 1302 * @cxld: The cxl decoder allocated by cxl_decoder_alloc() 1303 * @target_map: A list of downstream ports that this decoder can direct memory 1304 * traffic to. These numbers should correspond with the port number 1305 * in the PCIe Link Capabilities structure. 1306 * 1307 * Certain types of decoders may not have any targets. The main example of this 1308 * is an endpoint device. A more awkward example is a hostbridge whose root 1309 * ports get hot added (technically possible, though unlikely). 1310 * 1311 * This is the locked variant of cxl_decoder_add(). 1312 * 1313 * Context: Process context. Expects the device lock of the port that owns the 1314 * @cxld to be held. 1315 * 1316 * Return: Negative error code if the decoder wasn't properly configured; else 1317 * returns 0. 1318 */ 1319 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) 1320 { 1321 struct cxl_port *port; 1322 struct device *dev; 1323 int rc; 1324 1325 if (WARN_ON_ONCE(!cxld)) 1326 return -EINVAL; 1327 1328 if (WARN_ON_ONCE(IS_ERR(cxld))) 1329 return PTR_ERR(cxld); 1330 1331 if (cxld->interleave_ways < 1) 1332 return -EINVAL; 1333 1334 dev = &cxld->dev; 1335 1336 port = to_cxl_port(cxld->dev.parent); 1337 if (!is_endpoint_decoder(dev)) { 1338 rc = decoder_populate_targets(cxld, port, target_map); 1339 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { 1340 dev_err(&port->dev, 1341 "Failed to populate active decoder targets\n"); 1342 return rc; 1343 } 1344 } 1345 1346 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); 1347 if (rc) 1348 return rc; 1349 1350 /* 1351 * Platform decoder resources should show up with a reasonable name. All 1352 * other resources are just sub ranges within the main decoder resource. 1353 */ 1354 if (is_root_decoder(dev)) 1355 cxld->platform_res.name = dev_name(dev); 1356 1357 return device_add(dev); 1358 } 1359 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); 1360 1361 /** 1362 * cxl_decoder_add - Add a decoder with targets 1363 * @cxld: The cxl decoder allocated by cxl_decoder_alloc() 1364 * @target_map: A list of downstream ports that this decoder can direct memory 1365 * traffic to. These numbers should correspond with the port number 1366 * in the PCIe Link Capabilities structure. 1367 * 1368 * This is the unlocked variant of cxl_decoder_add_locked(). 1369 * See cxl_decoder_add_locked(). 1370 * 1371 * Context: Process context. Takes and releases the device lock of the port that 1372 * owns the @cxld. 1373 */ 1374 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) 1375 { 1376 struct cxl_port *port; 1377 int rc; 1378 1379 if (WARN_ON_ONCE(!cxld)) 1380 return -EINVAL; 1381 1382 if (WARN_ON_ONCE(IS_ERR(cxld))) 1383 return PTR_ERR(cxld); 1384 1385 port = to_cxl_port(cxld->dev.parent); 1386 1387 device_lock(&port->dev); 1388 rc = cxl_decoder_add_locked(cxld, target_map); 1389 device_unlock(&port->dev); 1390 1391 return rc; 1392 } 1393 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); 1394 1395 static void cxld_unregister(void *dev) 1396 { 1397 device_unregister(dev); 1398 } 1399 1400 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) 1401 { 1402 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); 1403 } 1404 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL); 1405 1406 /** 1407 * __cxl_driver_register - register a driver for the cxl bus 1408 * @cxl_drv: cxl driver structure to attach 1409 * @owner: owning module/driver 1410 * @modname: KBUILD_MODNAME for parent driver 1411 */ 1412 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, 1413 const char *modname) 1414 { 1415 if (!cxl_drv->probe) { 1416 pr_debug("%s ->probe() must be specified\n", modname); 1417 return -EINVAL; 1418 } 1419 1420 if (!cxl_drv->name) { 1421 pr_debug("%s ->name must be specified\n", modname); 1422 return -EINVAL; 1423 } 1424 1425 if (!cxl_drv->id) { 1426 pr_debug("%s ->id must be specified\n", modname); 1427 return -EINVAL; 1428 } 1429 1430 cxl_drv->drv.bus = &cxl_bus_type; 1431 cxl_drv->drv.owner = owner; 1432 cxl_drv->drv.mod_name = modname; 1433 cxl_drv->drv.name = cxl_drv->name; 1434 1435 return driver_register(&cxl_drv->drv); 1436 } 1437 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL); 1438 1439 void cxl_driver_unregister(struct cxl_driver *cxl_drv) 1440 { 1441 driver_unregister(&cxl_drv->drv); 1442 } 1443 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); 1444 1445 static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 1446 { 1447 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, 1448 cxl_device_id(dev)); 1449 } 1450 1451 static int cxl_bus_match(struct device *dev, struct device_driver *drv) 1452 { 1453 return cxl_device_id(dev) == to_cxl_drv(drv)->id; 1454 } 1455 1456 static int cxl_bus_probe(struct device *dev) 1457 { 1458 int rc; 1459 1460 rc = to_cxl_drv(dev->driver)->probe(dev); 1461 dev_dbg(dev, "probe: %d\n", rc); 1462 return rc; 1463 } 1464 1465 static void cxl_bus_remove(struct device *dev) 1466 { 1467 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); 1468 1469 if (cxl_drv->remove) 1470 cxl_drv->remove(dev); 1471 } 1472 1473 static struct workqueue_struct *cxl_bus_wq; 1474 1475 int cxl_bus_rescan(void) 1476 { 1477 return bus_rescan_devices(&cxl_bus_type); 1478 } 1479 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); 1480 1481 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) 1482 { 1483 return queue_work(cxl_bus_wq, &cxlmd->detach_work); 1484 } 1485 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); 1486 1487 /* for user tooling to ensure port disable work has completed */ 1488 static ssize_t flush_store(struct bus_type *bus, const char *buf, size_t count) 1489 { 1490 if (sysfs_streq(buf, "1")) { 1491 flush_workqueue(cxl_bus_wq); 1492 return count; 1493 } 1494 1495 return -EINVAL; 1496 } 1497 1498 static BUS_ATTR_WO(flush); 1499 1500 static struct attribute *cxl_bus_attributes[] = { 1501 &bus_attr_flush.attr, 1502 NULL, 1503 }; 1504 1505 static struct attribute_group cxl_bus_attribute_group = { 1506 .attrs = cxl_bus_attributes, 1507 }; 1508 1509 static const struct attribute_group *cxl_bus_attribute_groups[] = { 1510 &cxl_bus_attribute_group, 1511 NULL, 1512 }; 1513 1514 struct bus_type cxl_bus_type = { 1515 .name = "cxl", 1516 .uevent = cxl_bus_uevent, 1517 .match = cxl_bus_match, 1518 .probe = cxl_bus_probe, 1519 .remove = cxl_bus_remove, 1520 .bus_groups = cxl_bus_attribute_groups, 1521 }; 1522 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); 1523 1524 static __init int cxl_core_init(void) 1525 { 1526 int rc; 1527 1528 cxl_mbox_init(); 1529 1530 rc = cxl_memdev_init(); 1531 if (rc) 1532 return rc; 1533 1534 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0); 1535 if (!cxl_bus_wq) { 1536 rc = -ENOMEM; 1537 goto err_wq; 1538 } 1539 1540 rc = bus_register(&cxl_bus_type); 1541 if (rc) 1542 goto err_bus; 1543 1544 return 0; 1545 1546 err_bus: 1547 destroy_workqueue(cxl_bus_wq); 1548 err_wq: 1549 cxl_memdev_exit(); 1550 cxl_mbox_exit(); 1551 return rc; 1552 } 1553 1554 static void cxl_core_exit(void) 1555 { 1556 bus_unregister(&cxl_bus_type); 1557 destroy_workqueue(cxl_bus_wq); 1558 cxl_memdev_exit(); 1559 cxl_mbox_exit(); 1560 } 1561 1562 module_init(cxl_core_init); 1563 module_exit(cxl_core_exit); 1564 MODULE_LICENSE("GPL v2"); 1565