121e9f767SBen Widawsky // SPDX-License-Identifier: GPL-2.0-only 221e9f767SBen Widawsky /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 34faf31b4SDan Williams #include <linux/io-64-nonatomic-lo-hi.h> 4229e8828SBen Widawsky #include <linux/moduleparam.h> 521e9f767SBen Widawsky #include <linux/module.h> 6229e8828SBen Widawsky #include <linux/delay.h> 721e9f767SBen Widawsky #include <linux/sizes.h> 821e9f767SBen Widawsky #include <linux/mutex.h> 930af9729SIra Weiny #include <linux/list.h> 1021e9f767SBen Widawsky #include <linux/pci.h> 112905cb52SDan Williams #include <linux/aer.h> 1221e9f767SBen Widawsky #include <linux/io.h> 135161a55cSBen Widawsky #include "cxlmem.h" 14af9cae9fSDan Williams #include "cxlpci.h" 1521e9f767SBen Widawsky #include "cxl.h" 1621e9f767SBen Widawsky 1721e9f767SBen Widawsky /** 1821e9f767SBen Widawsky * DOC: cxl pci 1921e9f767SBen Widawsky * 2021e9f767SBen Widawsky * This implements the PCI exclusive functionality for a CXL device as it is 2121e9f767SBen Widawsky * defined by the Compute Express Link specification. CXL devices may surface 22ed97afb5SBen Widawsky * certain functionality even if it isn't CXL enabled. While this driver is 23ed97afb5SBen Widawsky * focused around the PCI specific aspects of a CXL device, it binds to the 24ed97afb5SBen Widawsky * specific CXL memory device class code, and therefore the implementation of 25ed97afb5SBen Widawsky * cxl_pci is focused around CXL memory devices. 2621e9f767SBen Widawsky * 2721e9f767SBen Widawsky * The driver has several responsibilities, mainly: 2821e9f767SBen Widawsky * - Create the memX device and register on the CXL bus. 2921e9f767SBen Widawsky * - Enumerate device's register interface and map them. 30ed97afb5SBen Widawsky * - Registers nvdimm bridge device with cxl_core. 31ed97afb5SBen Widawsky * - Registers a CXL mailbox with cxl_core. 3221e9f767SBen Widawsky */ 3321e9f767SBen Widawsky 345e2411aeSIra Weiny #define cxl_doorbell_busy(cxlds) \ 355e2411aeSIra Weiny (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ 3621e9f767SBen Widawsky CXLDEV_MBOX_CTRL_DOORBELL) 3721e9f767SBen Widawsky 3821e9f767SBen Widawsky /* CXL 2.0 - 8.2.8.4 */ 3921e9f767SBen Widawsky #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) 4021e9f767SBen Widawsky 41229e8828SBen Widawsky /* 42229e8828SBen Widawsky * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to 43229e8828SBen Widawsky * dictate how long to wait for the mailbox to become ready. The new 44229e8828SBen Widawsky * field allows the device to tell software the amount of time to wait 45229e8828SBen Widawsky * before mailbox ready. This field per the spec theoretically allows 46229e8828SBen Widawsky * for up to 255 seconds. 255 seconds is unreasonably long, its longer 47229e8828SBen Widawsky * than the maximum SATA port link recovery wait. Default to 60 seconds 48229e8828SBen Widawsky * until someone builds a CXL device that needs more time in practice. 49229e8828SBen Widawsky */ 50229e8828SBen Widawsky static unsigned short mbox_ready_timeout = 60; 51229e8828SBen Widawsky module_param(mbox_ready_timeout, ushort, 0644); 522e4ba0ecSDan Williams MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready"); 53229e8828SBen Widawsky 545e2411aeSIra Weiny static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds) 5521e9f767SBen Widawsky { 5621e9f767SBen Widawsky const unsigned long start = jiffies; 5721e9f767SBen Widawsky unsigned long end = start; 5821e9f767SBen Widawsky 595e2411aeSIra Weiny while (cxl_doorbell_busy(cxlds)) { 6021e9f767SBen Widawsky end = jiffies; 6121e9f767SBen Widawsky 6221e9f767SBen Widawsky if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { 6321e9f767SBen Widawsky /* Check again in case preempted before timeout test */ 645e2411aeSIra Weiny if (!cxl_doorbell_busy(cxlds)) 6521e9f767SBen Widawsky break; 6621e9f767SBen Widawsky return -ETIMEDOUT; 6721e9f767SBen Widawsky } 6821e9f767SBen Widawsky cpu_relax(); 6921e9f767SBen Widawsky } 7021e9f767SBen Widawsky 715e2411aeSIra Weiny dev_dbg(cxlds->dev, "Doorbell wait took %dms", 7221e9f767SBen Widawsky jiffies_to_msecs(end) - jiffies_to_msecs(start)); 7321e9f767SBen Widawsky return 0; 7421e9f767SBen Widawsky } 7521e9f767SBen Widawsky 764f195ee7SDan Williams #define cxl_err(dev, status, msg) \ 774f195ee7SDan Williams dev_err_ratelimited(dev, msg ", device state %s%s\n", \ 784f195ee7SDan Williams status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ 794f195ee7SDan Williams status & CXLMDEV_FW_HALT ? " firmware-halt" : "") 8021e9f767SBen Widawsky 814f195ee7SDan Williams #define cxl_cmd_err(dev, cmd, status, msg) \ 824f195ee7SDan Williams dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \ 834f195ee7SDan Williams (cmd)->opcode, \ 844f195ee7SDan Williams status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ 854f195ee7SDan Williams status & CXLMDEV_FW_HALT ? " firmware-halt" : "") 8621e9f767SBen Widawsky 8721e9f767SBen Widawsky /** 88ed97afb5SBen Widawsky * __cxl_pci_mbox_send_cmd() - Execute a mailbox command 895e2411aeSIra Weiny * @cxlds: The device state to communicate with. 9021e9f767SBen Widawsky * @mbox_cmd: Command to send to the memory device. 9121e9f767SBen Widawsky * 9221e9f767SBen Widawsky * Context: Any context. Expects mbox_mutex to be held. 9321e9f767SBen Widawsky * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. 9421e9f767SBen Widawsky * Caller should check the return code in @mbox_cmd to make sure it 9521e9f767SBen Widawsky * succeeded. 9621e9f767SBen Widawsky * 9721e9f767SBen Widawsky * This is a generic form of the CXL mailbox send command thus only using the 9821e9f767SBen Widawsky * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory 9921e9f767SBen Widawsky * devices, and perhaps other types of CXL devices may have further information 10021e9f767SBen Widawsky * available upon error conditions. Driver facilities wishing to send mailbox 10121e9f767SBen Widawsky * commands should use the wrapper command. 10221e9f767SBen Widawsky * 10321e9f767SBen Widawsky * The CXL spec allows for up to two mailboxes. The intention is for the primary 10421e9f767SBen Widawsky * mailbox to be OS controlled and the secondary mailbox to be used by system 10521e9f767SBen Widawsky * firmware. This allows the OS and firmware to communicate with the device and 10621e9f767SBen Widawsky * not need to coordinate with each other. The driver only uses the primary 10721e9f767SBen Widawsky * mailbox. 10821e9f767SBen Widawsky */ 1095e2411aeSIra Weiny static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds, 110b64955a9SDan Williams struct cxl_mbox_cmd *mbox_cmd) 11121e9f767SBen Widawsky { 1125e2411aeSIra Weiny void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; 1135e2411aeSIra Weiny struct device *dev = cxlds->dev; 11421e9f767SBen Widawsky u64 cmd_reg, status_reg; 11521e9f767SBen Widawsky size_t out_len; 11621e9f767SBen Widawsky int rc; 11721e9f767SBen Widawsky 1185e2411aeSIra Weiny lockdep_assert_held(&cxlds->mbox_mutex); 11921e9f767SBen Widawsky 12021e9f767SBen Widawsky /* 12121e9f767SBen Widawsky * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. 12221e9f767SBen Widawsky * 1. Caller reads MB Control Register to verify doorbell is clear 12321e9f767SBen Widawsky * 2. Caller writes Command Register 12421e9f767SBen Widawsky * 3. Caller writes Command Payload Registers if input payload is non-empty 12521e9f767SBen Widawsky * 4. Caller writes MB Control Register to set doorbell 12621e9f767SBen Widawsky * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured 12721e9f767SBen Widawsky * 6. Caller reads MB Status Register to fetch Return code 12821e9f767SBen Widawsky * 7. If command successful, Caller reads Command Register to get Payload Length 12921e9f767SBen Widawsky * 8. If output payload is non-empty, host reads Command Payload Registers 13021e9f767SBen Widawsky * 13121e9f767SBen Widawsky * Hardware is free to do whatever it wants before the doorbell is rung, 13221e9f767SBen Widawsky * and isn't allowed to change anything after it clears the doorbell. As 13321e9f767SBen Widawsky * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can 13421e9f767SBen Widawsky * also happen in any order (though some orders might not make sense). 13521e9f767SBen Widawsky */ 13621e9f767SBen Widawsky 13721e9f767SBen Widawsky /* #1 */ 1385e2411aeSIra Weiny if (cxl_doorbell_busy(cxlds)) { 1394f195ee7SDan Williams u64 md_status = 1404f195ee7SDan Williams readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 1414f195ee7SDan Williams 1424f195ee7SDan Williams cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, 1434f195ee7SDan Williams "mailbox queue busy"); 14421e9f767SBen Widawsky return -EBUSY; 14521e9f767SBen Widawsky } 14621e9f767SBen Widawsky 14721e9f767SBen Widawsky cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, 14821e9f767SBen Widawsky mbox_cmd->opcode); 14921e9f767SBen Widawsky if (mbox_cmd->size_in) { 15021e9f767SBen Widawsky if (WARN_ON(!mbox_cmd->payload_in)) 15121e9f767SBen Widawsky return -EINVAL; 15221e9f767SBen Widawsky 15321e9f767SBen Widawsky cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, 15421e9f767SBen Widawsky mbox_cmd->size_in); 15521e9f767SBen Widawsky memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); 15621e9f767SBen Widawsky } 15721e9f767SBen Widawsky 15821e9f767SBen Widawsky /* #2, #3 */ 1595e2411aeSIra Weiny writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 16021e9f767SBen Widawsky 16121e9f767SBen Widawsky /* #4 */ 162852db33cSRobert Richter dev_dbg(dev, "Sending command: 0x%04x\n", mbox_cmd->opcode); 16321e9f767SBen Widawsky writel(CXLDEV_MBOX_CTRL_DOORBELL, 1645e2411aeSIra Weiny cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); 16521e9f767SBen Widawsky 16621e9f767SBen Widawsky /* #5 */ 1675e2411aeSIra Weiny rc = cxl_pci_mbox_wait_for_doorbell(cxlds); 16821e9f767SBen Widawsky if (rc == -ETIMEDOUT) { 1694f195ee7SDan Williams u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 1704f195ee7SDan Williams 1714f195ee7SDan Williams cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout"); 17221e9f767SBen Widawsky return rc; 17321e9f767SBen Widawsky } 17421e9f767SBen Widawsky 17521e9f767SBen Widawsky /* #6 */ 1765e2411aeSIra Weiny status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); 17721e9f767SBen Widawsky mbox_cmd->return_code = 17821e9f767SBen Widawsky FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); 17921e9f767SBen Widawsky 18092fcc1abSDavidlohr Bueso if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) { 181c43e036dSDavidlohr Bueso dev_dbg(dev, "Mailbox operation had an error: %s\n", 182c43e036dSDavidlohr Bueso cxl_mbox_cmd_rc2str(mbox_cmd)); 183cbe83a20SDavidlohr Bueso return 0; /* completed but caller must check return_code */ 18421e9f767SBen Widawsky } 18521e9f767SBen Widawsky 18621e9f767SBen Widawsky /* #7 */ 1875e2411aeSIra Weiny cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 18821e9f767SBen Widawsky out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); 18921e9f767SBen Widawsky 19021e9f767SBen Widawsky /* #8 */ 19121e9f767SBen Widawsky if (out_len && mbox_cmd->payload_out) { 19221e9f767SBen Widawsky /* 19321e9f767SBen Widawsky * Sanitize the copy. If hardware misbehaves, out_len per the 19421e9f767SBen Widawsky * spec can actually be greater than the max allowed size (21 19521e9f767SBen Widawsky * bits available but spec defined 1M max). The caller also may 19621e9f767SBen Widawsky * have requested less data than the hardware supplied even 19721e9f767SBen Widawsky * within spec. 19821e9f767SBen Widawsky */ 1995e2411aeSIra Weiny size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len); 20021e9f767SBen Widawsky 20121e9f767SBen Widawsky memcpy_fromio(mbox_cmd->payload_out, payload, n); 20221e9f767SBen Widawsky mbox_cmd->size_out = n; 20321e9f767SBen Widawsky } else { 20421e9f767SBen Widawsky mbox_cmd->size_out = 0; 20521e9f767SBen Widawsky } 20621e9f767SBen Widawsky 20721e9f767SBen Widawsky return 0; 20821e9f767SBen Widawsky } 20921e9f767SBen Widawsky 2105e2411aeSIra Weiny static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 211b64955a9SDan Williams { 212b64955a9SDan Williams int rc; 213b64955a9SDan Williams 2144f195ee7SDan Williams mutex_lock_io(&cxlds->mbox_mutex); 2155e2411aeSIra Weiny rc = __cxl_pci_mbox_send_cmd(cxlds, cmd); 2164f195ee7SDan Williams mutex_unlock(&cxlds->mbox_mutex); 217b64955a9SDan Williams 218b64955a9SDan Williams return rc; 219b64955a9SDan Williams } 220b64955a9SDan Williams 2215e2411aeSIra Weiny static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) 22221e9f767SBen Widawsky { 2235e2411aeSIra Weiny const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); 224229e8828SBen Widawsky unsigned long timeout; 225229e8828SBen Widawsky u64 md_status; 226229e8828SBen Widawsky 227229e8828SBen Widawsky timeout = jiffies + mbox_ready_timeout * HZ; 228229e8828SBen Widawsky do { 229229e8828SBen Widawsky md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 230229e8828SBen Widawsky if (md_status & CXLMDEV_MBOX_IF_READY) 231229e8828SBen Widawsky break; 232229e8828SBen Widawsky if (msleep_interruptible(100)) 233229e8828SBen Widawsky break; 234229e8828SBen Widawsky } while (!time_after(jiffies, timeout)); 235229e8828SBen Widawsky 236229e8828SBen Widawsky if (!(md_status & CXLMDEV_MBOX_IF_READY)) { 2374f195ee7SDan Williams cxl_err(cxlds->dev, md_status, 2384f195ee7SDan Williams "timeout awaiting mailbox ready"); 2394f195ee7SDan Williams return -ETIMEDOUT; 2404f195ee7SDan Williams } 2414f195ee7SDan Williams 2424f195ee7SDan Williams /* 2434f195ee7SDan Williams * A command may be in flight from a previous driver instance, 2444f195ee7SDan Williams * think kexec, do one doorbell wait so that 2454f195ee7SDan Williams * __cxl_pci_mbox_send_cmd() can assume that it is the only 2464f195ee7SDan Williams * source for future doorbell busy events. 2474f195ee7SDan Williams */ 2484f195ee7SDan Williams if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) { 2494f195ee7SDan Williams cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle"); 2504f195ee7SDan Williams return -ETIMEDOUT; 251229e8828SBen Widawsky } 25221e9f767SBen Widawsky 2535e2411aeSIra Weiny cxlds->mbox_send = cxl_pci_mbox_send; 2545e2411aeSIra Weiny cxlds->payload_size = 25521e9f767SBen Widawsky 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); 25621e9f767SBen Widawsky 25721e9f767SBen Widawsky /* 25821e9f767SBen Widawsky * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register 25921e9f767SBen Widawsky * 26021e9f767SBen Widawsky * If the size is too small, mandatory commands will not work and so 26121e9f767SBen Widawsky * there's no point in going forward. If the size is too large, there's 26221e9f767SBen Widawsky * no harm is soft limiting it. 26321e9f767SBen Widawsky */ 2645e2411aeSIra Weiny cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M); 2655e2411aeSIra Weiny if (cxlds->payload_size < 256) { 2665e2411aeSIra Weiny dev_err(cxlds->dev, "Mailbox is too small (%zub)", 2675e2411aeSIra Weiny cxlds->payload_size); 26821e9f767SBen Widawsky return -ENXIO; 26921e9f767SBen Widawsky } 27021e9f767SBen Widawsky 2715e2411aeSIra Weiny dev_dbg(cxlds->dev, "Mailbox payload sized %zu", 2725e2411aeSIra Weiny cxlds->payload_size); 27321e9f767SBen Widawsky 27421e9f767SBen Widawsky return 0; 27521e9f767SBen Widawsky } 27621e9f767SBen Widawsky 277a261e9a1SDan Williams static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map) 2781b0a1a2aSBen Widawsky { 2797dc7a64dSBen Widawsky struct device *dev = &pdev->dev; 2801b0a1a2aSBen Widawsky 2816c7f4f1eSDan Williams map->base = ioremap(map->resource, map->max_size); 2826c7f4f1eSDan Williams if (!map->base) { 28321e9f767SBen Widawsky dev_err(dev, "failed to map registers\n"); 284a261e9a1SDan Williams return -ENOMEM; 28521e9f767SBen Widawsky } 28621e9f767SBen Widawsky 2876c7f4f1eSDan Williams dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource); 288a261e9a1SDan Williams return 0; 28930af9729SIra Weiny } 29030af9729SIra Weiny 291a261e9a1SDan Williams static void cxl_unmap_regblock(struct pci_dev *pdev, 292a261e9a1SDan Williams struct cxl_register_map *map) 29330af9729SIra Weiny { 2946c7f4f1eSDan Williams iounmap(map->base); 295a261e9a1SDan Williams map->base = NULL; 29621e9f767SBen Widawsky } 29721e9f767SBen Widawsky 298a261e9a1SDan Williams static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) 29930af9729SIra Weiny { 30008422378SBen Widawsky struct cxl_component_reg_map *comp_map; 30130af9729SIra Weiny struct cxl_device_reg_map *dev_map; 3027dc7a64dSBen Widawsky struct device *dev = &pdev->dev; 303a261e9a1SDan Williams void __iomem *base = map->base; 30430af9729SIra Weiny 30530af9729SIra Weiny switch (map->reg_type) { 30608422378SBen Widawsky case CXL_REGLOC_RBI_COMPONENT: 30708422378SBen Widawsky comp_map = &map->component_map; 30808422378SBen Widawsky cxl_probe_component_regs(dev, base, comp_map); 30908422378SBen Widawsky if (!comp_map->hdm_decoder.valid) { 31008422378SBen Widawsky dev_err(dev, "HDM decoder registers not found\n"); 31108422378SBen Widawsky return -ENXIO; 31208422378SBen Widawsky } 31308422378SBen Widawsky 314bd09626bSDan Williams if (!comp_map->ras.valid) 315bd09626bSDan Williams dev_dbg(dev, "RAS registers not found\n"); 316bd09626bSDan Williams 31708422378SBen Widawsky dev_dbg(dev, "Set up component registers\n"); 31808422378SBen Widawsky break; 31930af9729SIra Weiny case CXL_REGLOC_RBI_MEMDEV: 32030af9729SIra Weiny dev_map = &map->device_map; 32130af9729SIra Weiny cxl_probe_device_regs(dev, base, dev_map); 32230af9729SIra Weiny if (!dev_map->status.valid || !dev_map->mbox.valid || 32330af9729SIra Weiny !dev_map->memdev.valid) { 32430af9729SIra Weiny dev_err(dev, "registers not found: %s%s%s\n", 32530af9729SIra Weiny !dev_map->status.valid ? "status " : "", 326da582aa5SLi Qiang (Johnny Li) !dev_map->mbox.valid ? "mbox " : "", 327da582aa5SLi Qiang (Johnny Li) !dev_map->memdev.valid ? "memdev " : ""); 32830af9729SIra Weiny return -ENXIO; 32930af9729SIra Weiny } 33030af9729SIra Weiny 33130af9729SIra Weiny dev_dbg(dev, "Probing device registers...\n"); 33230af9729SIra Weiny break; 33330af9729SIra Weiny default: 33430af9729SIra Weiny break; 33530af9729SIra Weiny } 33630af9729SIra Weiny 33730af9729SIra Weiny return 0; 33830af9729SIra Weiny } 33930af9729SIra Weiny 34085afc317SBen Widawsky static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, 34185afc317SBen Widawsky struct cxl_register_map *map) 34285afc317SBen Widawsky { 34385afc317SBen Widawsky int rc; 34485afc317SBen Widawsky 34585afc317SBen Widawsky rc = cxl_find_regblock(pdev, type, map); 34685afc317SBen Widawsky if (rc) 34785afc317SBen Widawsky return rc; 34885afc317SBen Widawsky 34985afc317SBen Widawsky rc = cxl_map_regblock(pdev, map); 35085afc317SBen Widawsky if (rc) 35185afc317SBen Widawsky return rc; 35285afc317SBen Widawsky 35385afc317SBen Widawsky rc = cxl_probe_regs(pdev, map); 354a261e9a1SDan Williams cxl_unmap_regblock(pdev, map); 3555b68705dSBen Widawsky 35685afc317SBen Widawsky return rc; 3571d5a4159SBen Widawsky } 3581d5a4159SBen Widawsky 3590a19bfc8SDan Williams /* 3600a19bfc8SDan Williams * Assume that any RCIEP that emits the CXL memory expander class code 3610a19bfc8SDan Williams * is an RCD 3620a19bfc8SDan Williams */ 3630a19bfc8SDan Williams static bool is_cxl_restricted(struct pci_dev *pdev) 3640a19bfc8SDan Williams { 3650a19bfc8SDan Williams return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END; 3660a19bfc8SDan Williams } 3670a19bfc8SDan Williams 368248529edSDave Jiang /* 369248529edSDave Jiang * CXL v3.0 6.2.3 Table 6-4 370248529edSDave Jiang * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits 371248529edSDave Jiang * mode, otherwise it's 68B flits mode. 372248529edSDave Jiang */ 373248529edSDave Jiang static bool cxl_pci_flit_256(struct pci_dev *pdev) 3742905cb52SDan Williams { 375248529edSDave Jiang u16 lnksta2; 376248529edSDave Jiang 377248529edSDave Jiang pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2); 378248529edSDave Jiang return lnksta2 & PCI_EXP_LNKSTA2_FLIT; 379248529edSDave Jiang } 380248529edSDave Jiang 381248529edSDave Jiang static int cxl_pci_ras_unmask(struct pci_dev *pdev) 382248529edSDave Jiang { 383248529edSDave Jiang struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); 384248529edSDave Jiang struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 385248529edSDave Jiang void __iomem *addr; 386248529edSDave Jiang u32 orig_val, val, mask; 387248529edSDave Jiang u16 cap; 388248529edSDave Jiang int rc; 389248529edSDave Jiang 390248529edSDave Jiang if (!cxlds->regs.ras) { 391248529edSDave Jiang dev_dbg(&pdev->dev, "No RAS registers.\n"); 392248529edSDave Jiang return 0; 393248529edSDave Jiang } 394248529edSDave Jiang 395248529edSDave Jiang /* BIOS has CXL error control */ 396248529edSDave Jiang if (!host_bridge->native_cxl_error) 397248529edSDave Jiang return -ENXIO; 398248529edSDave Jiang 399248529edSDave Jiang rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap); 400248529edSDave Jiang if (rc) 401248529edSDave Jiang return rc; 402248529edSDave Jiang 403248529edSDave Jiang if (cap & PCI_EXP_DEVCTL_URRE) { 404248529edSDave Jiang addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET; 405248529edSDave Jiang orig_val = readl(addr); 406248529edSDave Jiang 407248529edSDave Jiang mask = CXL_RAS_UNCORRECTABLE_MASK_MASK; 408248529edSDave Jiang if (!cxl_pci_flit_256(pdev)) 409248529edSDave Jiang mask &= ~CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK; 410248529edSDave Jiang val = orig_val & ~mask; 411248529edSDave Jiang writel(val, addr); 412248529edSDave Jiang dev_dbg(&pdev->dev, 413248529edSDave Jiang "Uncorrectable RAS Errors Mask: %#x -> %#x\n", 414248529edSDave Jiang orig_val, val); 415248529edSDave Jiang } 416248529edSDave Jiang 417248529edSDave Jiang if (cap & PCI_EXP_DEVCTL_CERE) { 418248529edSDave Jiang addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET; 419248529edSDave Jiang orig_val = readl(addr); 420248529edSDave Jiang val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK; 421248529edSDave Jiang writel(val, addr); 422248529edSDave Jiang dev_dbg(&pdev->dev, "Correctable RAS Errors Mask: %#x -> %#x\n", 423248529edSDave Jiang orig_val, val); 424248529edSDave Jiang } 425248529edSDave Jiang 426248529edSDave Jiang return 0; 4272905cb52SDan Williams } 4282905cb52SDan Williams 4296ebe28f9SIra Weiny static void free_event_buf(void *buf) 4306ebe28f9SIra Weiny { 4316ebe28f9SIra Weiny kvfree(buf); 4326ebe28f9SIra Weiny } 4336ebe28f9SIra Weiny 4346ebe28f9SIra Weiny /* 4356ebe28f9SIra Weiny * There is a single buffer for reading event logs from the mailbox. All logs 4366ebe28f9SIra Weiny * share this buffer protected by the cxlds->event_log_lock. 4376ebe28f9SIra Weiny */ 4386ebe28f9SIra Weiny static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds) 4396ebe28f9SIra Weiny { 4406ebe28f9SIra Weiny struct cxl_get_event_payload *buf; 4416ebe28f9SIra Weiny 4426ebe28f9SIra Weiny buf = kvmalloc(cxlds->payload_size, GFP_KERNEL); 4436ebe28f9SIra Weiny if (!buf) 4446ebe28f9SIra Weiny return -ENOMEM; 4456ebe28f9SIra Weiny cxlds->event.buf = buf; 4466ebe28f9SIra Weiny 4476ebe28f9SIra Weiny return devm_add_action_or_reset(cxlds->dev, free_event_buf, buf); 4486ebe28f9SIra Weiny } 4496ebe28f9SIra Weiny 450a49aa814SDavidlohr Bueso static int cxl_alloc_irq_vectors(struct pci_dev *pdev) 451a49aa814SDavidlohr Bueso { 452a49aa814SDavidlohr Bueso int nvecs; 453a49aa814SDavidlohr Bueso 454a49aa814SDavidlohr Bueso /* 455a49aa814SDavidlohr Bueso * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must 456a49aa814SDavidlohr Bueso * not generate INTx messages if that function participates in 457a49aa814SDavidlohr Bueso * CXL.cache or CXL.mem. 458a49aa814SDavidlohr Bueso * 459a49aa814SDavidlohr Bueso * Additionally pci_alloc_irq_vectors() handles calling 460a49aa814SDavidlohr Bueso * pci_free_irq_vectors() automatically despite not being called 461a49aa814SDavidlohr Bueso * pcim_*. See pci_setup_msi_context(). 462a49aa814SDavidlohr Bueso */ 463a49aa814SDavidlohr Bueso nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS, 464a49aa814SDavidlohr Bueso PCI_IRQ_MSIX | PCI_IRQ_MSI); 465a49aa814SDavidlohr Bueso if (nvecs < 1) { 466a49aa814SDavidlohr Bueso dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs); 467a49aa814SDavidlohr Bueso return -ENXIO; 468a49aa814SDavidlohr Bueso } 469a49aa814SDavidlohr Bueso return 0; 470a49aa814SDavidlohr Bueso } 471a49aa814SDavidlohr Bueso 472a49aa814SDavidlohr Bueso struct cxl_dev_id { 473a49aa814SDavidlohr Bueso struct cxl_dev_state *cxlds; 474a49aa814SDavidlohr Bueso }; 475a49aa814SDavidlohr Bueso 476a49aa814SDavidlohr Bueso static irqreturn_t cxl_event_thread(int irq, void *id) 477a49aa814SDavidlohr Bueso { 478a49aa814SDavidlohr Bueso struct cxl_dev_id *dev_id = id; 479a49aa814SDavidlohr Bueso struct cxl_dev_state *cxlds = dev_id->cxlds; 480a49aa814SDavidlohr Bueso u32 status; 481a49aa814SDavidlohr Bueso 482a49aa814SDavidlohr Bueso do { 483a49aa814SDavidlohr Bueso /* 484a49aa814SDavidlohr Bueso * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status; 485a49aa814SDavidlohr Bueso * ignore the reserved upper 32 bits 486a49aa814SDavidlohr Bueso */ 487a49aa814SDavidlohr Bueso status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET); 488a49aa814SDavidlohr Bueso /* Ignore logs unknown to the driver */ 489a49aa814SDavidlohr Bueso status &= CXLDEV_EVENT_STATUS_ALL; 490a49aa814SDavidlohr Bueso if (!status) 491a49aa814SDavidlohr Bueso break; 492a49aa814SDavidlohr Bueso cxl_mem_get_event_records(cxlds, status); 493a49aa814SDavidlohr Bueso cond_resched(); 494a49aa814SDavidlohr Bueso } while (status); 495a49aa814SDavidlohr Bueso 496a49aa814SDavidlohr Bueso return IRQ_HANDLED; 497a49aa814SDavidlohr Bueso } 498a49aa814SDavidlohr Bueso 499a49aa814SDavidlohr Bueso static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting) 500a49aa814SDavidlohr Bueso { 501a49aa814SDavidlohr Bueso struct device *dev = cxlds->dev; 502a49aa814SDavidlohr Bueso struct pci_dev *pdev = to_pci_dev(dev); 503a49aa814SDavidlohr Bueso struct cxl_dev_id *dev_id; 504a49aa814SDavidlohr Bueso int irq; 505a49aa814SDavidlohr Bueso 506a49aa814SDavidlohr Bueso if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX) 507a49aa814SDavidlohr Bueso return -ENXIO; 508a49aa814SDavidlohr Bueso 509a49aa814SDavidlohr Bueso /* dev_id must be globally unique and must contain the cxlds */ 510a49aa814SDavidlohr Bueso dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL); 511a49aa814SDavidlohr Bueso if (!dev_id) 512a49aa814SDavidlohr Bueso return -ENOMEM; 513a49aa814SDavidlohr Bueso dev_id->cxlds = cxlds; 514a49aa814SDavidlohr Bueso 515a49aa814SDavidlohr Bueso irq = pci_irq_vector(pdev, 516a49aa814SDavidlohr Bueso FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting)); 517a49aa814SDavidlohr Bueso if (irq < 0) 518a49aa814SDavidlohr Bueso return irq; 519a49aa814SDavidlohr Bueso 520a49aa814SDavidlohr Bueso return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread, 5215a84711fSDan Williams IRQF_SHARED | IRQF_ONESHOT, NULL, 5225a84711fSDan Williams dev_id); 523a49aa814SDavidlohr Bueso } 524a49aa814SDavidlohr Bueso 525a49aa814SDavidlohr Bueso static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds, 526a49aa814SDavidlohr Bueso struct cxl_event_interrupt_policy *policy) 527a49aa814SDavidlohr Bueso { 528a49aa814SDavidlohr Bueso struct cxl_mbox_cmd mbox_cmd = { 529a49aa814SDavidlohr Bueso .opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY, 530a49aa814SDavidlohr Bueso .payload_out = policy, 531a49aa814SDavidlohr Bueso .size_out = sizeof(*policy), 532a49aa814SDavidlohr Bueso }; 533a49aa814SDavidlohr Bueso int rc; 534a49aa814SDavidlohr Bueso 535a49aa814SDavidlohr Bueso rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 536a49aa814SDavidlohr Bueso if (rc < 0) 537a49aa814SDavidlohr Bueso dev_err(cxlds->dev, "Failed to get event interrupt policy : %d", 538a49aa814SDavidlohr Bueso rc); 539a49aa814SDavidlohr Bueso 540a49aa814SDavidlohr Bueso return rc; 541a49aa814SDavidlohr Bueso } 542a49aa814SDavidlohr Bueso 543a49aa814SDavidlohr Bueso static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds, 544a49aa814SDavidlohr Bueso struct cxl_event_interrupt_policy *policy) 545a49aa814SDavidlohr Bueso { 546a49aa814SDavidlohr Bueso struct cxl_mbox_cmd mbox_cmd; 547a49aa814SDavidlohr Bueso int rc; 548a49aa814SDavidlohr Bueso 549a49aa814SDavidlohr Bueso *policy = (struct cxl_event_interrupt_policy) { 550a49aa814SDavidlohr Bueso .info_settings = CXL_INT_MSI_MSIX, 551a49aa814SDavidlohr Bueso .warn_settings = CXL_INT_MSI_MSIX, 552a49aa814SDavidlohr Bueso .failure_settings = CXL_INT_MSI_MSIX, 553a49aa814SDavidlohr Bueso .fatal_settings = CXL_INT_MSI_MSIX, 554a49aa814SDavidlohr Bueso }; 555a49aa814SDavidlohr Bueso 556a49aa814SDavidlohr Bueso mbox_cmd = (struct cxl_mbox_cmd) { 557a49aa814SDavidlohr Bueso .opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY, 558a49aa814SDavidlohr Bueso .payload_in = policy, 559a49aa814SDavidlohr Bueso .size_in = sizeof(*policy), 560a49aa814SDavidlohr Bueso }; 561a49aa814SDavidlohr Bueso 562a49aa814SDavidlohr Bueso rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); 563a49aa814SDavidlohr Bueso if (rc < 0) { 564a49aa814SDavidlohr Bueso dev_err(cxlds->dev, "Failed to set event interrupt policy : %d", 565a49aa814SDavidlohr Bueso rc); 566a49aa814SDavidlohr Bueso return rc; 567a49aa814SDavidlohr Bueso } 568a49aa814SDavidlohr Bueso 569a49aa814SDavidlohr Bueso /* Retrieve final interrupt settings */ 570a49aa814SDavidlohr Bueso return cxl_event_get_int_policy(cxlds, policy); 571a49aa814SDavidlohr Bueso } 572a49aa814SDavidlohr Bueso 573a49aa814SDavidlohr Bueso static int cxl_event_irqsetup(struct cxl_dev_state *cxlds) 574a49aa814SDavidlohr Bueso { 575a49aa814SDavidlohr Bueso struct cxl_event_interrupt_policy policy; 576a49aa814SDavidlohr Bueso int rc; 577a49aa814SDavidlohr Bueso 578a49aa814SDavidlohr Bueso rc = cxl_event_config_msgnums(cxlds, &policy); 579a49aa814SDavidlohr Bueso if (rc) 580a49aa814SDavidlohr Bueso return rc; 581a49aa814SDavidlohr Bueso 582a49aa814SDavidlohr Bueso rc = cxl_event_req_irq(cxlds, policy.info_settings); 583a49aa814SDavidlohr Bueso if (rc) { 584a49aa814SDavidlohr Bueso dev_err(cxlds->dev, "Failed to get interrupt for event Info log\n"); 585a49aa814SDavidlohr Bueso return rc; 586a49aa814SDavidlohr Bueso } 587a49aa814SDavidlohr Bueso 588a49aa814SDavidlohr Bueso rc = cxl_event_req_irq(cxlds, policy.warn_settings); 589a49aa814SDavidlohr Bueso if (rc) { 590a49aa814SDavidlohr Bueso dev_err(cxlds->dev, "Failed to get interrupt for event Warn log\n"); 591a49aa814SDavidlohr Bueso return rc; 592a49aa814SDavidlohr Bueso } 593a49aa814SDavidlohr Bueso 594a49aa814SDavidlohr Bueso rc = cxl_event_req_irq(cxlds, policy.failure_settings); 595a49aa814SDavidlohr Bueso if (rc) { 596a49aa814SDavidlohr Bueso dev_err(cxlds->dev, "Failed to get interrupt for event Failure log\n"); 597a49aa814SDavidlohr Bueso return rc; 598a49aa814SDavidlohr Bueso } 599a49aa814SDavidlohr Bueso 600a49aa814SDavidlohr Bueso rc = cxl_event_req_irq(cxlds, policy.fatal_settings); 601a49aa814SDavidlohr Bueso if (rc) { 602a49aa814SDavidlohr Bueso dev_err(cxlds->dev, "Failed to get interrupt for event Fatal log\n"); 603a49aa814SDavidlohr Bueso return rc; 604a49aa814SDavidlohr Bueso } 605a49aa814SDavidlohr Bueso 606a49aa814SDavidlohr Bueso return 0; 607a49aa814SDavidlohr Bueso } 608a49aa814SDavidlohr Bueso 609a49aa814SDavidlohr Bueso static bool cxl_event_int_is_fw(u8 setting) 610a49aa814SDavidlohr Bueso { 611a49aa814SDavidlohr Bueso u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting); 612a49aa814SDavidlohr Bueso 613a49aa814SDavidlohr Bueso return mode == CXL_INT_FW; 614a49aa814SDavidlohr Bueso } 615a49aa814SDavidlohr Bueso 616a49aa814SDavidlohr Bueso static int cxl_event_config(struct pci_host_bridge *host_bridge, 617a49aa814SDavidlohr Bueso struct cxl_dev_state *cxlds) 618a49aa814SDavidlohr Bueso { 619a49aa814SDavidlohr Bueso struct cxl_event_interrupt_policy policy; 620a49aa814SDavidlohr Bueso int rc; 621a49aa814SDavidlohr Bueso 622a49aa814SDavidlohr Bueso /* 623a49aa814SDavidlohr Bueso * When BIOS maintains CXL error reporting control, it will process 624a49aa814SDavidlohr Bueso * event records. Only one agent can do so. 625a49aa814SDavidlohr Bueso */ 626a49aa814SDavidlohr Bueso if (!host_bridge->native_cxl_error) 627a49aa814SDavidlohr Bueso return 0; 628a49aa814SDavidlohr Bueso 629a49aa814SDavidlohr Bueso rc = cxl_mem_alloc_event_buf(cxlds); 630a49aa814SDavidlohr Bueso if (rc) 631a49aa814SDavidlohr Bueso return rc; 632a49aa814SDavidlohr Bueso 633a49aa814SDavidlohr Bueso rc = cxl_event_get_int_policy(cxlds, &policy); 634a49aa814SDavidlohr Bueso if (rc) 635a49aa814SDavidlohr Bueso return rc; 636a49aa814SDavidlohr Bueso 637a49aa814SDavidlohr Bueso if (cxl_event_int_is_fw(policy.info_settings) || 638a49aa814SDavidlohr Bueso cxl_event_int_is_fw(policy.warn_settings) || 639a49aa814SDavidlohr Bueso cxl_event_int_is_fw(policy.failure_settings) || 640a49aa814SDavidlohr Bueso cxl_event_int_is_fw(policy.fatal_settings)) { 641a49aa814SDavidlohr Bueso dev_err(cxlds->dev, "FW still in control of Event Logs despite _OSC settings\n"); 642a49aa814SDavidlohr Bueso return -EBUSY; 643a49aa814SDavidlohr Bueso } 644a49aa814SDavidlohr Bueso 645a49aa814SDavidlohr Bueso rc = cxl_event_irqsetup(cxlds); 646a49aa814SDavidlohr Bueso if (rc) 647a49aa814SDavidlohr Bueso return rc; 648a49aa814SDavidlohr Bueso 649a49aa814SDavidlohr Bueso cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); 650a49aa814SDavidlohr Bueso 651a49aa814SDavidlohr Bueso return 0; 652a49aa814SDavidlohr Bueso } 653a49aa814SDavidlohr Bueso 654ed97afb5SBen Widawsky static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 65521e9f767SBen Widawsky { 6566ebe28f9SIra Weiny struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); 65785afc317SBen Widawsky struct cxl_register_map map; 65821083f51SDan Williams struct cxl_memdev *cxlmd; 6595e2411aeSIra Weiny struct cxl_dev_state *cxlds; 6601d5a4159SBen Widawsky int rc; 66121e9f767SBen Widawsky 6625a2328f4SDan Williams /* 6635a2328f4SDan Williams * Double check the anonymous union trickery in struct cxl_regs 6645a2328f4SDan Williams * FIXME switch to struct_group() 6655a2328f4SDan Williams */ 6665a2328f4SDan Williams BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != 6675a2328f4SDan Williams offsetof(struct cxl_regs, device_regs.memdev)); 6685a2328f4SDan Williams 66921e9f767SBen Widawsky rc = pcim_enable_device(pdev); 67021e9f767SBen Widawsky if (rc) 67121e9f767SBen Widawsky return rc; 672a49aa814SDavidlohr Bueso pci_set_master(pdev); 67321e9f767SBen Widawsky 6745e2411aeSIra Weiny cxlds = cxl_dev_state_create(&pdev->dev); 6755e2411aeSIra Weiny if (IS_ERR(cxlds)) 6765e2411aeSIra Weiny return PTR_ERR(cxlds); 6772905cb52SDan Williams pci_set_drvdata(pdev, cxlds); 6781b0a1a2aSBen Widawsky 6790a19bfc8SDan Williams cxlds->rcd = is_cxl_restricted(pdev); 680bcc79ea3SDan Williams cxlds->serial = pci_get_dsn(pdev); 68106e279e5SBen Widawsky cxlds->cxl_dvsec = pci_find_dvsec_capability( 68206e279e5SBen Widawsky pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); 68306e279e5SBen Widawsky if (!cxlds->cxl_dvsec) 68406e279e5SBen Widawsky dev_warn(&pdev->dev, 68506e279e5SBen Widawsky "Device DVSEC not present, skip CXL.mem init\n"); 68606e279e5SBen Widawsky 68785afc317SBen Widawsky rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); 68885afc317SBen Widawsky if (rc) 68985afc317SBen Widawsky return rc; 69085afc317SBen Widawsky 6916c7f4f1eSDan Williams rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map); 69221e9f767SBen Widawsky if (rc) 69321e9f767SBen Widawsky return rc; 69421e9f767SBen Widawsky 6954112a08dSBen Widawsky /* 6964112a08dSBen Widawsky * If the component registers can't be found, the cxl_pci driver may 6974112a08dSBen Widawsky * still be useful for management functions so don't return an error. 6984112a08dSBen Widawsky */ 6994112a08dSBen Widawsky cxlds->component_reg_phys = CXL_RESOURCE_NONE; 7004112a08dSBen Widawsky rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 7014112a08dSBen Widawsky if (rc) 7024112a08dSBen Widawsky dev_warn(&pdev->dev, "No component registers (%d)\n", rc); 7034112a08dSBen Widawsky 7046c7f4f1eSDan Williams cxlds->component_reg_phys = map.resource; 7054112a08dSBen Widawsky 706bd09626bSDan Williams rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component, 707bd09626bSDan Williams &map, BIT(CXL_CM_CAP_CAP_ID_RAS)); 708bd09626bSDan Williams if (rc) 709bd09626bSDan Williams dev_dbg(&pdev->dev, "Failed to map RAS capability.\n"); 710bd09626bSDan Williams 711*e764f122SDave Jiang rc = cxl_await_media_ready(cxlds); 712*e764f122SDave Jiang if (rc == 0) 713*e764f122SDave Jiang cxlds->media_ready = true; 714*e764f122SDave Jiang else 715*e764f122SDave Jiang dev_warn(&pdev->dev, "Media not active (%d)\n", rc); 716*e764f122SDave Jiang 7175e2411aeSIra Weiny rc = cxl_pci_setup_mailbox(cxlds); 71821e9f767SBen Widawsky if (rc) 71921e9f767SBen Widawsky return rc; 72021e9f767SBen Widawsky 7215e2411aeSIra Weiny rc = cxl_enumerate_cmds(cxlds); 72221e9f767SBen Widawsky if (rc) 72321e9f767SBen Widawsky return rc; 72421e9f767SBen Widawsky 725fa884345SJonathan Cameron rc = cxl_set_timestamp(cxlds); 726fa884345SJonathan Cameron if (rc) 727fa884345SJonathan Cameron return rc; 728fa884345SJonathan Cameron 729d0abf578SAlison Schofield rc = cxl_poison_state_init(cxlds); 730d0abf578SAlison Schofield if (rc) 731d0abf578SAlison Schofield return rc; 732d0abf578SAlison Schofield 7335e2411aeSIra Weiny rc = cxl_dev_state_identify(cxlds); 73421e9f767SBen Widawsky if (rc) 73521e9f767SBen Widawsky return rc; 73621e9f767SBen Widawsky 7375e2411aeSIra Weiny rc = cxl_mem_create_range_info(cxlds); 738f847502aSIra Weiny if (rc) 739f847502aSIra Weiny return rc; 740f847502aSIra Weiny 741a49aa814SDavidlohr Bueso rc = cxl_alloc_irq_vectors(pdev); 742a49aa814SDavidlohr Bueso if (rc) 743a49aa814SDavidlohr Bueso return rc; 744a49aa814SDavidlohr Bueso 7455e2411aeSIra Weiny cxlmd = devm_cxl_add_memdev(cxlds); 74621083f51SDan Williams if (IS_ERR(cxlmd)) 74721083f51SDan Williams return PTR_ERR(cxlmd); 74821083f51SDan Williams 749a49aa814SDavidlohr Bueso rc = cxl_event_config(host_bridge, cxlds); 7506ebe28f9SIra Weiny if (rc) 7516ebe28f9SIra Weiny return rc; 7526ebe28f9SIra Weiny 753248529edSDave Jiang rc = cxl_pci_ras_unmask(pdev); 7542905cb52SDan Williams if (rc) 755248529edSDave Jiang dev_dbg(&pdev->dev, "No RAS reporting unmasked\n"); 756248529edSDave Jiang 7572905cb52SDan Williams pci_save_state(pdev); 7582905cb52SDan Williams 75921083f51SDan Williams return rc; 76021e9f767SBen Widawsky } 76121e9f767SBen Widawsky 76221e9f767SBen Widawsky static const struct pci_device_id cxl_mem_pci_tbl[] = { 76321e9f767SBen Widawsky /* PCI class code for CXL.mem Type-3 Devices */ 76421e9f767SBen Widawsky { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 76521e9f767SBen Widawsky { /* terminate list */ }, 76621e9f767SBen Widawsky }; 76721e9f767SBen Widawsky MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 76821e9f767SBen Widawsky 7692905cb52SDan Williams static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev) 7702905cb52SDan Williams { 7712905cb52SDan Williams struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 7722905cb52SDan Williams struct cxl_memdev *cxlmd = cxlds->cxlmd; 7732905cb52SDan Williams struct device *dev = &cxlmd->dev; 7742905cb52SDan Williams 7752905cb52SDan Williams dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n", 7762905cb52SDan Williams dev_name(dev)); 7772905cb52SDan Williams pci_restore_state(pdev); 7782905cb52SDan Williams if (device_attach(dev) <= 0) 7792905cb52SDan Williams return PCI_ERS_RESULT_DISCONNECT; 7802905cb52SDan Williams return PCI_ERS_RESULT_RECOVERED; 7812905cb52SDan Williams } 7822905cb52SDan Williams 7832905cb52SDan Williams static void cxl_error_resume(struct pci_dev *pdev) 7842905cb52SDan Williams { 7852905cb52SDan Williams struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 7862905cb52SDan Williams struct cxl_memdev *cxlmd = cxlds->cxlmd; 7872905cb52SDan Williams struct device *dev = &cxlmd->dev; 7882905cb52SDan Williams 7892905cb52SDan Williams dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev), 7902905cb52SDan Williams dev->driver ? "successful" : "failed"); 7912905cb52SDan Williams } 7922905cb52SDan Williams 7932905cb52SDan Williams static const struct pci_error_handlers cxl_error_handlers = { 7942905cb52SDan Williams .error_detected = cxl_error_detected, 7952905cb52SDan Williams .slot_reset = cxl_slot_reset, 7962905cb52SDan Williams .resume = cxl_error_resume, 7976155ccc9SDave Jiang .cor_error_detected = cxl_cor_error_detected, 7982905cb52SDan Williams }; 7992905cb52SDan Williams 800ed97afb5SBen Widawsky static struct pci_driver cxl_pci_driver = { 80121e9f767SBen Widawsky .name = KBUILD_MODNAME, 80221e9f767SBen Widawsky .id_table = cxl_mem_pci_tbl, 803ed97afb5SBen Widawsky .probe = cxl_pci_probe, 8042905cb52SDan Williams .err_handler = &cxl_error_handlers, 80521e9f767SBen Widawsky .driver = { 80621e9f767SBen Widawsky .probe_type = PROBE_PREFER_ASYNCHRONOUS, 80721e9f767SBen Widawsky }, 80821e9f767SBen Widawsky }; 80921e9f767SBen Widawsky 81021e9f767SBen Widawsky MODULE_LICENSE("GPL v2"); 811ed97afb5SBen Widawsky module_pci_driver(cxl_pci_driver); 81221e9f767SBen Widawsky MODULE_IMPORT_NS(CXL); 813