1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-lo-hi.h> 4 #include <linux/moduleparam.h> 5 #include <linux/module.h> 6 #include <linux/delay.h> 7 #include <linux/sizes.h> 8 #include <linux/mutex.h> 9 #include <linux/list.h> 10 #include <linux/pci.h> 11 #include <linux/io.h> 12 #include "cxlmem.h" 13 #include "cxlpci.h" 14 #include "cxl.h" 15 16 /** 17 * DOC: cxl pci 18 * 19 * This implements the PCI exclusive functionality for a CXL device as it is 20 * defined by the Compute Express Link specification. CXL devices may surface 21 * certain functionality even if it isn't CXL enabled. While this driver is 22 * focused around the PCI specific aspects of a CXL device, it binds to the 23 * specific CXL memory device class code, and therefore the implementation of 24 * cxl_pci is focused around CXL memory devices. 25 * 26 * The driver has several responsibilities, mainly: 27 * - Create the memX device and register on the CXL bus. 28 * - Enumerate device's register interface and map them. 29 * - Registers nvdimm bridge device with cxl_core. 30 * - Registers a CXL mailbox with cxl_core. 31 */ 32 33 #define cxl_doorbell_busy(cxlds) \ 34 (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ 35 CXLDEV_MBOX_CTRL_DOORBELL) 36 37 /* CXL 2.0 - 8.2.8.4 */ 38 #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) 39 40 /* 41 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to 42 * dictate how long to wait for the mailbox to become ready. The new 43 * field allows the device to tell software the amount of time to wait 44 * before mailbox ready. This field per the spec theoretically allows 45 * for up to 255 seconds. 255 seconds is unreasonably long, its longer 46 * than the maximum SATA port link recovery wait. Default to 60 seconds 47 * until someone builds a CXL device that needs more time in practice. 48 */ 49 static unsigned short mbox_ready_timeout = 60; 50 module_param(mbox_ready_timeout, ushort, 0644); 51 MODULE_PARM_DESC(mbox_ready_timeout, 52 "seconds to wait for mailbox ready / memory active status"); 53 54 static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds) 55 { 56 const unsigned long start = jiffies; 57 unsigned long end = start; 58 59 while (cxl_doorbell_busy(cxlds)) { 60 end = jiffies; 61 62 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { 63 /* Check again in case preempted before timeout test */ 64 if (!cxl_doorbell_busy(cxlds)) 65 break; 66 return -ETIMEDOUT; 67 } 68 cpu_relax(); 69 } 70 71 dev_dbg(cxlds->dev, "Doorbell wait took %dms", 72 jiffies_to_msecs(end) - jiffies_to_msecs(start)); 73 return 0; 74 } 75 76 #define cxl_err(dev, status, msg) \ 77 dev_err_ratelimited(dev, msg ", device state %s%s\n", \ 78 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ 79 status & CXLMDEV_FW_HALT ? " firmware-halt" : "") 80 81 #define cxl_cmd_err(dev, cmd, status, msg) \ 82 dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \ 83 (cmd)->opcode, \ 84 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ 85 status & CXLMDEV_FW_HALT ? " firmware-halt" : "") 86 87 /** 88 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command 89 * @cxlds: The device state to communicate with. 90 * @mbox_cmd: Command to send to the memory device. 91 * 92 * Context: Any context. Expects mbox_mutex to be held. 93 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. 94 * Caller should check the return code in @mbox_cmd to make sure it 95 * succeeded. 96 * 97 * This is a generic form of the CXL mailbox send command thus only using the 98 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory 99 * devices, and perhaps other types of CXL devices may have further information 100 * available upon error conditions. Driver facilities wishing to send mailbox 101 * commands should use the wrapper command. 102 * 103 * The CXL spec allows for up to two mailboxes. The intention is for the primary 104 * mailbox to be OS controlled and the secondary mailbox to be used by system 105 * firmware. This allows the OS and firmware to communicate with the device and 106 * not need to coordinate with each other. The driver only uses the primary 107 * mailbox. 108 */ 109 static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds, 110 struct cxl_mbox_cmd *mbox_cmd) 111 { 112 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; 113 struct device *dev = cxlds->dev; 114 u64 cmd_reg, status_reg; 115 size_t out_len; 116 int rc; 117 118 lockdep_assert_held(&cxlds->mbox_mutex); 119 120 /* 121 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. 122 * 1. Caller reads MB Control Register to verify doorbell is clear 123 * 2. Caller writes Command Register 124 * 3. Caller writes Command Payload Registers if input payload is non-empty 125 * 4. Caller writes MB Control Register to set doorbell 126 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured 127 * 6. Caller reads MB Status Register to fetch Return code 128 * 7. If command successful, Caller reads Command Register to get Payload Length 129 * 8. If output payload is non-empty, host reads Command Payload Registers 130 * 131 * Hardware is free to do whatever it wants before the doorbell is rung, 132 * and isn't allowed to change anything after it clears the doorbell. As 133 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can 134 * also happen in any order (though some orders might not make sense). 135 */ 136 137 /* #1 */ 138 if (cxl_doorbell_busy(cxlds)) { 139 u64 md_status = 140 readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 141 142 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, 143 "mailbox queue busy"); 144 return -EBUSY; 145 } 146 147 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, 148 mbox_cmd->opcode); 149 if (mbox_cmd->size_in) { 150 if (WARN_ON(!mbox_cmd->payload_in)) 151 return -EINVAL; 152 153 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, 154 mbox_cmd->size_in); 155 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); 156 } 157 158 /* #2, #3 */ 159 writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 160 161 /* #4 */ 162 dev_dbg(dev, "Sending command\n"); 163 writel(CXLDEV_MBOX_CTRL_DOORBELL, 164 cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); 165 166 /* #5 */ 167 rc = cxl_pci_mbox_wait_for_doorbell(cxlds); 168 if (rc == -ETIMEDOUT) { 169 u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 170 171 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout"); 172 return rc; 173 } 174 175 /* #6 */ 176 status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); 177 mbox_cmd->return_code = 178 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); 179 180 if (mbox_cmd->return_code != 0) { 181 dev_dbg(dev, "Mailbox operation had an error\n"); 182 return 0; 183 } 184 185 /* #7 */ 186 cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 187 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); 188 189 /* #8 */ 190 if (out_len && mbox_cmd->payload_out) { 191 /* 192 * Sanitize the copy. If hardware misbehaves, out_len per the 193 * spec can actually be greater than the max allowed size (21 194 * bits available but spec defined 1M max). The caller also may 195 * have requested less data than the hardware supplied even 196 * within spec. 197 */ 198 size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len); 199 200 memcpy_fromio(mbox_cmd->payload_out, payload, n); 201 mbox_cmd->size_out = n; 202 } else { 203 mbox_cmd->size_out = 0; 204 } 205 206 return 0; 207 } 208 209 static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 210 { 211 int rc; 212 213 mutex_lock_io(&cxlds->mbox_mutex); 214 rc = __cxl_pci_mbox_send_cmd(cxlds, cmd); 215 mutex_unlock(&cxlds->mbox_mutex); 216 217 return rc; 218 } 219 220 static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) 221 { 222 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); 223 unsigned long timeout; 224 u64 md_status; 225 226 timeout = jiffies + mbox_ready_timeout * HZ; 227 do { 228 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 229 if (md_status & CXLMDEV_MBOX_IF_READY) 230 break; 231 if (msleep_interruptible(100)) 232 break; 233 } while (!time_after(jiffies, timeout)); 234 235 if (!(md_status & CXLMDEV_MBOX_IF_READY)) { 236 cxl_err(cxlds->dev, md_status, 237 "timeout awaiting mailbox ready"); 238 return -ETIMEDOUT; 239 } 240 241 /* 242 * A command may be in flight from a previous driver instance, 243 * think kexec, do one doorbell wait so that 244 * __cxl_pci_mbox_send_cmd() can assume that it is the only 245 * source for future doorbell busy events. 246 */ 247 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) { 248 cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle"); 249 return -ETIMEDOUT; 250 } 251 252 cxlds->mbox_send = cxl_pci_mbox_send; 253 cxlds->payload_size = 254 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); 255 256 /* 257 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register 258 * 259 * If the size is too small, mandatory commands will not work and so 260 * there's no point in going forward. If the size is too large, there's 261 * no harm is soft limiting it. 262 */ 263 cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M); 264 if (cxlds->payload_size < 256) { 265 dev_err(cxlds->dev, "Mailbox is too small (%zub)", 266 cxlds->payload_size); 267 return -ENXIO; 268 } 269 270 dev_dbg(cxlds->dev, "Mailbox payload sized %zu", 271 cxlds->payload_size); 272 273 return 0; 274 } 275 276 static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map) 277 { 278 void __iomem *addr; 279 int bar = map->barno; 280 struct device *dev = &pdev->dev; 281 resource_size_t offset = map->block_offset; 282 283 /* Basic sanity check that BAR is big enough */ 284 if (pci_resource_len(pdev, bar) < offset) { 285 dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar, 286 &pdev->resource[bar], &offset); 287 return -ENXIO; 288 } 289 290 addr = pci_iomap(pdev, bar, 0); 291 if (!addr) { 292 dev_err(dev, "failed to map registers\n"); 293 return -ENOMEM; 294 } 295 296 dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n", 297 bar, &offset); 298 299 map->base = addr + map->block_offset; 300 return 0; 301 } 302 303 static void cxl_unmap_regblock(struct pci_dev *pdev, 304 struct cxl_register_map *map) 305 { 306 pci_iounmap(pdev, map->base - map->block_offset); 307 map->base = NULL; 308 } 309 310 static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) 311 { 312 struct cxl_component_reg_map *comp_map; 313 struct cxl_device_reg_map *dev_map; 314 struct device *dev = &pdev->dev; 315 void __iomem *base = map->base; 316 317 switch (map->reg_type) { 318 case CXL_REGLOC_RBI_COMPONENT: 319 comp_map = &map->component_map; 320 cxl_probe_component_regs(dev, base, comp_map); 321 if (!comp_map->hdm_decoder.valid) { 322 dev_err(dev, "HDM decoder registers not found\n"); 323 return -ENXIO; 324 } 325 326 dev_dbg(dev, "Set up component registers\n"); 327 break; 328 case CXL_REGLOC_RBI_MEMDEV: 329 dev_map = &map->device_map; 330 cxl_probe_device_regs(dev, base, dev_map); 331 if (!dev_map->status.valid || !dev_map->mbox.valid || 332 !dev_map->memdev.valid) { 333 dev_err(dev, "registers not found: %s%s%s\n", 334 !dev_map->status.valid ? "status " : "", 335 !dev_map->mbox.valid ? "mbox " : "", 336 !dev_map->memdev.valid ? "memdev " : ""); 337 return -ENXIO; 338 } 339 340 dev_dbg(dev, "Probing device registers...\n"); 341 break; 342 default: 343 break; 344 } 345 346 return 0; 347 } 348 349 static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map) 350 { 351 struct device *dev = cxlds->dev; 352 struct pci_dev *pdev = to_pci_dev(dev); 353 354 switch (map->reg_type) { 355 case CXL_REGLOC_RBI_COMPONENT: 356 cxl_map_component_regs(pdev, &cxlds->regs.component, map); 357 dev_dbg(dev, "Mapping component registers...\n"); 358 break; 359 case CXL_REGLOC_RBI_MEMDEV: 360 cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map); 361 dev_dbg(dev, "Probing device registers...\n"); 362 break; 363 default: 364 break; 365 } 366 367 return 0; 368 } 369 370 static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, 371 struct cxl_register_map *map) 372 { 373 int rc; 374 375 rc = cxl_find_regblock(pdev, type, map); 376 if (rc) 377 return rc; 378 379 rc = cxl_map_regblock(pdev, map); 380 if (rc) 381 return rc; 382 383 rc = cxl_probe_regs(pdev, map); 384 cxl_unmap_regblock(pdev, map); 385 386 return rc; 387 } 388 389 static int wait_for_valid(struct cxl_dev_state *cxlds) 390 { 391 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 392 int d = cxlds->cxl_dvsec, rc; 393 u32 val; 394 395 /* 396 * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high 397 * and Size Low registers are valid. Must be set within 1 second of 398 * deassertion of reset to CXL device. Likely it is already set by the 399 * time this runs, but otherwise give a 1.5 second timeout in case of 400 * clock skew. 401 */ 402 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); 403 if (rc) 404 return rc; 405 406 if (val & CXL_DVSEC_MEM_INFO_VALID) 407 return 0; 408 409 msleep(1500); 410 411 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); 412 if (rc) 413 return rc; 414 415 if (val & CXL_DVSEC_MEM_INFO_VALID) 416 return 0; 417 418 return -ETIMEDOUT; 419 } 420 421 /* 422 * Wait up to @mbox_ready_timeout for the device to report memory 423 * active. 424 */ 425 static int wait_for_media_ready(struct cxl_dev_state *cxlds) 426 { 427 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 428 int d = cxlds->cxl_dvsec; 429 bool active = false; 430 u64 md_status; 431 int rc, i; 432 433 rc = wait_for_valid(cxlds); 434 if (rc) 435 return rc; 436 437 for (i = mbox_ready_timeout; i; i--) { 438 u32 temp; 439 440 rc = pci_read_config_dword( 441 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp); 442 if (rc) 443 return rc; 444 445 active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp); 446 if (active) 447 break; 448 msleep(1000); 449 } 450 451 if (!active) { 452 dev_err(&pdev->dev, 453 "timeout awaiting memory active after %d seconds\n", 454 mbox_ready_timeout); 455 return -ETIMEDOUT; 456 } 457 458 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 459 if (!CXLMDEV_READY(md_status)) 460 return -EIO; 461 462 return 0; 463 } 464 465 static int cxl_dvsec_ranges(struct cxl_dev_state *cxlds) 466 { 467 struct cxl_endpoint_dvsec_info *info = &cxlds->info; 468 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 469 int d = cxlds->cxl_dvsec; 470 int hdm_count, rc, i; 471 u16 cap, ctrl; 472 473 if (!d) 474 return -ENXIO; 475 476 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap); 477 if (rc) 478 return rc; 479 480 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); 481 if (rc) 482 return rc; 483 484 if (!(cap & CXL_DVSEC_MEM_CAPABLE)) 485 return -ENXIO; 486 487 /* 488 * It is not allowed by spec for MEM.capable to be set and have 0 legacy 489 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this 490 * driver is for a spec defined class code which must be CXL.mem 491 * capable, there is no point in continuing to enable CXL.mem. 492 */ 493 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); 494 if (!hdm_count || hdm_count > 2) 495 return -EINVAL; 496 497 rc = wait_for_valid(cxlds); 498 if (rc) 499 return rc; 500 501 info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl); 502 503 for (i = 0; i < hdm_count; i++) { 504 u64 base, size; 505 u32 temp; 506 507 rc = pci_read_config_dword( 508 pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); 509 if (rc) 510 return rc; 511 512 size = (u64)temp << 32; 513 514 rc = pci_read_config_dword( 515 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp); 516 if (rc) 517 return rc; 518 519 size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; 520 521 rc = pci_read_config_dword( 522 pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp); 523 if (rc) 524 return rc; 525 526 base = (u64)temp << 32; 527 528 rc = pci_read_config_dword( 529 pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp); 530 if (rc) 531 return rc; 532 533 base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; 534 535 info->dvsec_range[i] = (struct range) { 536 .start = base, 537 .end = base + size - 1 538 }; 539 540 if (size) 541 info->ranges++; 542 } 543 544 return 0; 545 } 546 547 static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 548 { 549 struct cxl_register_map map; 550 struct cxl_memdev *cxlmd; 551 struct cxl_dev_state *cxlds; 552 int rc; 553 554 /* 555 * Double check the anonymous union trickery in struct cxl_regs 556 * FIXME switch to struct_group() 557 */ 558 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != 559 offsetof(struct cxl_regs, device_regs.memdev)); 560 561 rc = pcim_enable_device(pdev); 562 if (rc) 563 return rc; 564 565 cxlds = cxl_dev_state_create(&pdev->dev); 566 if (IS_ERR(cxlds)) 567 return PTR_ERR(cxlds); 568 569 cxlds->serial = pci_get_dsn(pdev); 570 cxlds->cxl_dvsec = pci_find_dvsec_capability( 571 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); 572 if (!cxlds->cxl_dvsec) 573 dev_warn(&pdev->dev, 574 "Device DVSEC not present, skip CXL.mem init\n"); 575 576 cxlds->wait_media_ready = wait_for_media_ready; 577 578 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); 579 if (rc) 580 return rc; 581 582 rc = cxl_map_regs(cxlds, &map); 583 if (rc) 584 return rc; 585 586 /* 587 * If the component registers can't be found, the cxl_pci driver may 588 * still be useful for management functions so don't return an error. 589 */ 590 cxlds->component_reg_phys = CXL_RESOURCE_NONE; 591 rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 592 if (rc) 593 dev_warn(&pdev->dev, "No component registers (%d)\n", rc); 594 595 cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map); 596 597 rc = cxl_pci_setup_mailbox(cxlds); 598 if (rc) 599 return rc; 600 601 rc = cxl_enumerate_cmds(cxlds); 602 if (rc) 603 return rc; 604 605 rc = cxl_dev_state_identify(cxlds); 606 if (rc) 607 return rc; 608 609 rc = cxl_mem_create_range_info(cxlds); 610 if (rc) 611 return rc; 612 613 rc = cxl_dvsec_ranges(cxlds); 614 if (rc) 615 dev_warn(&pdev->dev, 616 "Failed to get DVSEC range information (%d)\n", rc); 617 618 cxlmd = devm_cxl_add_memdev(cxlds); 619 if (IS_ERR(cxlmd)) 620 return PTR_ERR(cxlmd); 621 622 if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) 623 rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); 624 625 return rc; 626 } 627 628 static const struct pci_device_id cxl_mem_pci_tbl[] = { 629 /* PCI class code for CXL.mem Type-3 Devices */ 630 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 631 { /* terminate list */ }, 632 }; 633 MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 634 635 static struct pci_driver cxl_pci_driver = { 636 .name = KBUILD_MODNAME, 637 .id_table = cxl_mem_pci_tbl, 638 .probe = cxl_pci_probe, 639 .driver = { 640 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 641 }, 642 }; 643 644 MODULE_LICENSE("GPL v2"); 645 module_pci_driver(cxl_pci_driver); 646 MODULE_IMPORT_NS(CXL); 647