121e9f767SBen Widawsky // SPDX-License-Identifier: GPL-2.0-only 221e9f767SBen Widawsky /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 321e9f767SBen Widawsky #include <uapi/linux/cxl_mem.h> 421e9f767SBen Widawsky #include <linux/security.h> 521e9f767SBen Widawsky #include <linux/debugfs.h> 621e9f767SBen Widawsky #include <linux/module.h> 721e9f767SBen Widawsky #include <linux/sizes.h> 821e9f767SBen Widawsky #include <linux/mutex.h> 930af9729SIra Weiny #include <linux/list.h> 1021e9f767SBen Widawsky #include <linux/cdev.h> 1121e9f767SBen Widawsky #include <linux/idr.h> 1221e9f767SBen Widawsky #include <linux/pci.h> 1321e9f767SBen Widawsky #include <linux/io.h> 1421e9f767SBen Widawsky #include <linux/io-64-nonatomic-lo-hi.h> 155161a55cSBen Widawsky #include "cxlmem.h" 1621e9f767SBen Widawsky #include "pci.h" 1721e9f767SBen Widawsky #include "cxl.h" 1821e9f767SBen Widawsky 1921e9f767SBen Widawsky /** 2021e9f767SBen Widawsky * DOC: cxl pci 2121e9f767SBen Widawsky * 2221e9f767SBen Widawsky * This implements the PCI exclusive functionality for a CXL device as it is 2321e9f767SBen Widawsky * defined by the Compute Express Link specification. CXL devices may surface 2421e9f767SBen Widawsky * certain functionality even if it isn't CXL enabled. 2521e9f767SBen Widawsky * 2621e9f767SBen Widawsky * The driver has several responsibilities, mainly: 2721e9f767SBen Widawsky * - Create the memX device and register on the CXL bus. 2821e9f767SBen Widawsky * - Enumerate device's register interface and map them. 2921e9f767SBen Widawsky * - Probe the device attributes to establish sysfs interface. 3021e9f767SBen Widawsky * - Provide an IOCTL interface to userspace to communicate with the device for 3121e9f767SBen Widawsky * things like firmware update. 3221e9f767SBen Widawsky */ 3321e9f767SBen Widawsky 3421e9f767SBen Widawsky #define cxl_doorbell_busy(cxlm) \ 3521e9f767SBen Widawsky (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ 3621e9f767SBen Widawsky CXLDEV_MBOX_CTRL_DOORBELL) 3721e9f767SBen Widawsky 3821e9f767SBen Widawsky /* CXL 2.0 - 8.2.8.4 */ 3921e9f767SBen Widawsky #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) 4021e9f767SBen Widawsky 4121e9f767SBen Widawsky enum opcode { 4221e9f767SBen Widawsky CXL_MBOX_OP_INVALID = 0x0000, 4321e9f767SBen Widawsky CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, 4421e9f767SBen Widawsky CXL_MBOX_OP_GET_FW_INFO = 0x0200, 4521e9f767SBen Widawsky CXL_MBOX_OP_ACTIVATE_FW = 0x0202, 4621e9f767SBen Widawsky CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, 4721e9f767SBen Widawsky CXL_MBOX_OP_GET_LOG = 0x0401, 4821e9f767SBen Widawsky CXL_MBOX_OP_IDENTIFY = 0x4000, 4921e9f767SBen Widawsky CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, 5021e9f767SBen Widawsky CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, 5121e9f767SBen Widawsky CXL_MBOX_OP_GET_LSA = 0x4102, 5221e9f767SBen Widawsky CXL_MBOX_OP_SET_LSA = 0x4103, 5321e9f767SBen Widawsky CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200, 5487815ee9SBen Widawsky CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201, 5587815ee9SBen Widawsky CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202, 5687815ee9SBen Widawsky CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203, 5721e9f767SBen Widawsky CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204, 5887815ee9SBen Widawsky CXL_MBOX_OP_GET_POISON = 0x4300, 5987815ee9SBen Widawsky CXL_MBOX_OP_INJECT_POISON = 0x4301, 6087815ee9SBen Widawsky CXL_MBOX_OP_CLEAR_POISON = 0x4302, 6187815ee9SBen Widawsky CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, 6221e9f767SBen Widawsky CXL_MBOX_OP_SCAN_MEDIA = 0x4304, 6321e9f767SBen Widawsky CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, 6421e9f767SBen Widawsky CXL_MBOX_OP_MAX = 0x10000 6521e9f767SBen Widawsky }; 6621e9f767SBen Widawsky 6721e9f767SBen Widawsky static DECLARE_RWSEM(cxl_memdev_rwsem); 6821e9f767SBen Widawsky static struct dentry *cxl_debugfs; 6921e9f767SBen Widawsky static bool cxl_raw_allow_all; 7021e9f767SBen Widawsky 7121e9f767SBen Widawsky enum { 7221e9f767SBen Widawsky CEL_UUID, 7321e9f767SBen Widawsky VENDOR_DEBUG_UUID, 7421e9f767SBen Widawsky }; 7521e9f767SBen Widawsky 7621e9f767SBen Widawsky /* See CXL 2.0 Table 170. Get Log Input Payload */ 7721e9f767SBen Widawsky static const uuid_t log_uuid[] = { 7821e9f767SBen Widawsky [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 7921e9f767SBen Widawsky 0xb1, 0x62, 0x3b, 0x3f, 0x17), 8021e9f767SBen Widawsky [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 8121e9f767SBen Widawsky 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), 8221e9f767SBen Widawsky }; 8321e9f767SBen Widawsky 8421e9f767SBen Widawsky /** 8521e9f767SBen Widawsky * struct cxl_mem_command - Driver representation of a memory device command 8621e9f767SBen Widawsky * @info: Command information as it exists for the UAPI 8721e9f767SBen Widawsky * @opcode: The actual bits used for the mailbox protocol 8821e9f767SBen Widawsky * @flags: Set of flags effecting driver behavior. 8921e9f767SBen Widawsky * 9021e9f767SBen Widawsky * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag 9121e9f767SBen Widawsky * will be enabled by the driver regardless of what hardware may have 9221e9f767SBen Widawsky * advertised. 9321e9f767SBen Widawsky * 9421e9f767SBen Widawsky * The cxl_mem_command is the driver's internal representation of commands that 9521e9f767SBen Widawsky * are supported by the driver. Some of these commands may not be supported by 9621e9f767SBen Widawsky * the hardware. The driver will use @info to validate the fields passed in by 9721e9f767SBen Widawsky * the user then submit the @opcode to the hardware. 9821e9f767SBen Widawsky * 9921e9f767SBen Widawsky * See struct cxl_command_info. 10021e9f767SBen Widawsky */ 10121e9f767SBen Widawsky struct cxl_mem_command { 10221e9f767SBen Widawsky struct cxl_command_info info; 10321e9f767SBen Widawsky enum opcode opcode; 10421e9f767SBen Widawsky u32 flags; 10521e9f767SBen Widawsky #define CXL_CMD_FLAG_NONE 0 10621e9f767SBen Widawsky #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) 10721e9f767SBen Widawsky }; 10821e9f767SBen Widawsky 10921e9f767SBen Widawsky #define CXL_CMD(_id, sin, sout, _flags) \ 11021e9f767SBen Widawsky [CXL_MEM_COMMAND_ID_##_id] = { \ 11121e9f767SBen Widawsky .info = { \ 11221e9f767SBen Widawsky .id = CXL_MEM_COMMAND_ID_##_id, \ 11321e9f767SBen Widawsky .size_in = sin, \ 11421e9f767SBen Widawsky .size_out = sout, \ 11521e9f767SBen Widawsky }, \ 11621e9f767SBen Widawsky .opcode = CXL_MBOX_OP_##_id, \ 11721e9f767SBen Widawsky .flags = _flags, \ 11821e9f767SBen Widawsky } 11921e9f767SBen Widawsky 12021e9f767SBen Widawsky /* 12121e9f767SBen Widawsky * This table defines the supported mailbox commands for the driver. This table 12221e9f767SBen Widawsky * is made up of a UAPI structure. Non-negative values as parameters in the 12321e9f767SBen Widawsky * table will be validated against the user's input. For example, if size_in is 12421e9f767SBen Widawsky * 0, and the user passed in 1, it is an error. 12521e9f767SBen Widawsky */ 12621e9f767SBen Widawsky static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { 12721e9f767SBen Widawsky CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), 12821e9f767SBen Widawsky #ifdef CONFIG_CXL_MEM_RAW_COMMANDS 12921e9f767SBen Widawsky CXL_CMD(RAW, ~0, ~0, 0), 13021e9f767SBen Widawsky #endif 13121e9f767SBen Widawsky CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), 13221e9f767SBen Widawsky CXL_CMD(GET_FW_INFO, 0, 0x50, 0), 13321e9f767SBen Widawsky CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), 13421e9f767SBen Widawsky CXL_CMD(GET_LSA, 0x8, ~0, 0), 13521e9f767SBen Widawsky CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), 13621e9f767SBen Widawsky CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), 13787815ee9SBen Widawsky CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), 13887815ee9SBen Widawsky CXL_CMD(SET_LSA, ~0, 0, 0), 13987815ee9SBen Widawsky CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), 14087815ee9SBen Widawsky CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), 14187815ee9SBen Widawsky CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), 14287815ee9SBen Widawsky CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), 14387815ee9SBen Widawsky CXL_CMD(GET_POISON, 0x10, ~0, 0), 14487815ee9SBen Widawsky CXL_CMD(INJECT_POISON, 0x8, 0, 0), 14587815ee9SBen Widawsky CXL_CMD(CLEAR_POISON, 0x48, 0, 0), 14687815ee9SBen Widawsky CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), 14787815ee9SBen Widawsky CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), 14887815ee9SBen Widawsky CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), 14921e9f767SBen Widawsky }; 15021e9f767SBen Widawsky 15121e9f767SBen Widawsky /* 15221e9f767SBen Widawsky * Commands that RAW doesn't permit. The rationale for each: 15321e9f767SBen Widawsky * 15421e9f767SBen Widawsky * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / 15521e9f767SBen Widawsky * coordination of transaction timeout values at the root bridge level. 15621e9f767SBen Widawsky * 15721e9f767SBen Widawsky * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live 15821e9f767SBen Widawsky * and needs to be coordinated with HDM updates. 15921e9f767SBen Widawsky * 16021e9f767SBen Widawsky * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the 16121e9f767SBen Widawsky * driver and any writes from userspace invalidates those contents. 16221e9f767SBen Widawsky * 16321e9f767SBen Widawsky * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes 16421e9f767SBen Widawsky * to the device after it is marked clean, userspace can not make that 16521e9f767SBen Widawsky * assertion. 16621e9f767SBen Widawsky * 16721e9f767SBen Widawsky * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that 16821e9f767SBen Widawsky * is kept up to date with patrol notifications and error management. 16921e9f767SBen Widawsky */ 17021e9f767SBen Widawsky static u16 cxl_disabled_raw_commands[] = { 17121e9f767SBen Widawsky CXL_MBOX_OP_ACTIVATE_FW, 17221e9f767SBen Widawsky CXL_MBOX_OP_SET_PARTITION_INFO, 17321e9f767SBen Widawsky CXL_MBOX_OP_SET_LSA, 17421e9f767SBen Widawsky CXL_MBOX_OP_SET_SHUTDOWN_STATE, 17521e9f767SBen Widawsky CXL_MBOX_OP_SCAN_MEDIA, 17621e9f767SBen Widawsky CXL_MBOX_OP_GET_SCAN_MEDIA, 17721e9f767SBen Widawsky }; 17821e9f767SBen Widawsky 17921e9f767SBen Widawsky /* 18021e9f767SBen Widawsky * Command sets that RAW doesn't permit. All opcodes in this set are 18121e9f767SBen Widawsky * disabled because they pass plain text security payloads over the 18221e9f767SBen Widawsky * user/kernel boundary. This functionality is intended to be wrapped 18321e9f767SBen Widawsky * behind the keys ABI which allows for encrypted payloads in the UAPI 18421e9f767SBen Widawsky */ 18521e9f767SBen Widawsky static u8 security_command_sets[] = { 18621e9f767SBen Widawsky 0x44, /* Sanitize */ 18721e9f767SBen Widawsky 0x45, /* Persistent Memory Data-at-rest Security */ 18821e9f767SBen Widawsky 0x46, /* Security Passthrough */ 18921e9f767SBen Widawsky }; 19021e9f767SBen Widawsky 19121e9f767SBen Widawsky #define cxl_for_each_cmd(cmd) \ 19221e9f767SBen Widawsky for ((cmd) = &mem_commands[0]; \ 19321e9f767SBen Widawsky ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++) 19421e9f767SBen Widawsky 19521e9f767SBen Widawsky #define cxl_cmd_count ARRAY_SIZE(mem_commands) 19621e9f767SBen Widawsky 19721e9f767SBen Widawsky static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) 19821e9f767SBen Widawsky { 19921e9f767SBen Widawsky const unsigned long start = jiffies; 20021e9f767SBen Widawsky unsigned long end = start; 20121e9f767SBen Widawsky 20221e9f767SBen Widawsky while (cxl_doorbell_busy(cxlm)) { 20321e9f767SBen Widawsky end = jiffies; 20421e9f767SBen Widawsky 20521e9f767SBen Widawsky if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { 20621e9f767SBen Widawsky /* Check again in case preempted before timeout test */ 20721e9f767SBen Widawsky if (!cxl_doorbell_busy(cxlm)) 20821e9f767SBen Widawsky break; 20921e9f767SBen Widawsky return -ETIMEDOUT; 21021e9f767SBen Widawsky } 21121e9f767SBen Widawsky cpu_relax(); 21221e9f767SBen Widawsky } 21321e9f767SBen Widawsky 21499e222a5SDan Williams dev_dbg(cxlm->dev, "Doorbell wait took %dms", 21521e9f767SBen Widawsky jiffies_to_msecs(end) - jiffies_to_msecs(start)); 21621e9f767SBen Widawsky return 0; 21721e9f767SBen Widawsky } 21821e9f767SBen Widawsky 21921e9f767SBen Widawsky static bool cxl_is_security_command(u16 opcode) 22021e9f767SBen Widawsky { 22121e9f767SBen Widawsky int i; 22221e9f767SBen Widawsky 22321e9f767SBen Widawsky for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) 22421e9f767SBen Widawsky if (security_command_sets[i] == (opcode >> 8)) 22521e9f767SBen Widawsky return true; 22621e9f767SBen Widawsky return false; 22721e9f767SBen Widawsky } 22821e9f767SBen Widawsky 22921e9f767SBen Widawsky static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, 230*b64955a9SDan Williams struct cxl_mbox_cmd *mbox_cmd) 23121e9f767SBen Widawsky { 23299e222a5SDan Williams struct device *dev = cxlm->dev; 23321e9f767SBen Widawsky 23421e9f767SBen Widawsky dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", 23521e9f767SBen Widawsky mbox_cmd->opcode, mbox_cmd->size_in); 23621e9f767SBen Widawsky } 23721e9f767SBen Widawsky 23821e9f767SBen Widawsky /** 23921e9f767SBen Widawsky * __cxl_mem_mbox_send_cmd() - Execute a mailbox command 24021e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 24121e9f767SBen Widawsky * @mbox_cmd: Command to send to the memory device. 24221e9f767SBen Widawsky * 24321e9f767SBen Widawsky * Context: Any context. Expects mbox_mutex to be held. 24421e9f767SBen Widawsky * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. 24521e9f767SBen Widawsky * Caller should check the return code in @mbox_cmd to make sure it 24621e9f767SBen Widawsky * succeeded. 24721e9f767SBen Widawsky * 24821e9f767SBen Widawsky * This is a generic form of the CXL mailbox send command thus only using the 24921e9f767SBen Widawsky * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory 25021e9f767SBen Widawsky * devices, and perhaps other types of CXL devices may have further information 25121e9f767SBen Widawsky * available upon error conditions. Driver facilities wishing to send mailbox 25221e9f767SBen Widawsky * commands should use the wrapper command. 25321e9f767SBen Widawsky * 25421e9f767SBen Widawsky * The CXL spec allows for up to two mailboxes. The intention is for the primary 25521e9f767SBen Widawsky * mailbox to be OS controlled and the secondary mailbox to be used by system 25621e9f767SBen Widawsky * firmware. This allows the OS and firmware to communicate with the device and 25721e9f767SBen Widawsky * not need to coordinate with each other. The driver only uses the primary 25821e9f767SBen Widawsky * mailbox. 25921e9f767SBen Widawsky */ 26021e9f767SBen Widawsky static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, 261*b64955a9SDan Williams struct cxl_mbox_cmd *mbox_cmd) 26221e9f767SBen Widawsky { 26321e9f767SBen Widawsky void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; 26499e222a5SDan Williams struct device *dev = cxlm->dev; 26521e9f767SBen Widawsky u64 cmd_reg, status_reg; 26621e9f767SBen Widawsky size_t out_len; 26721e9f767SBen Widawsky int rc; 26821e9f767SBen Widawsky 26921e9f767SBen Widawsky lockdep_assert_held(&cxlm->mbox_mutex); 27021e9f767SBen Widawsky 27121e9f767SBen Widawsky /* 27221e9f767SBen Widawsky * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. 27321e9f767SBen Widawsky * 1. Caller reads MB Control Register to verify doorbell is clear 27421e9f767SBen Widawsky * 2. Caller writes Command Register 27521e9f767SBen Widawsky * 3. Caller writes Command Payload Registers if input payload is non-empty 27621e9f767SBen Widawsky * 4. Caller writes MB Control Register to set doorbell 27721e9f767SBen Widawsky * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured 27821e9f767SBen Widawsky * 6. Caller reads MB Status Register to fetch Return code 27921e9f767SBen Widawsky * 7. If command successful, Caller reads Command Register to get Payload Length 28021e9f767SBen Widawsky * 8. If output payload is non-empty, host reads Command Payload Registers 28121e9f767SBen Widawsky * 28221e9f767SBen Widawsky * Hardware is free to do whatever it wants before the doorbell is rung, 28321e9f767SBen Widawsky * and isn't allowed to change anything after it clears the doorbell. As 28421e9f767SBen Widawsky * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can 28521e9f767SBen Widawsky * also happen in any order (though some orders might not make sense). 28621e9f767SBen Widawsky */ 28721e9f767SBen Widawsky 28821e9f767SBen Widawsky /* #1 */ 28921e9f767SBen Widawsky if (cxl_doorbell_busy(cxlm)) { 29099e222a5SDan Williams dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n"); 29121e9f767SBen Widawsky return -EBUSY; 29221e9f767SBen Widawsky } 29321e9f767SBen Widawsky 29421e9f767SBen Widawsky cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, 29521e9f767SBen Widawsky mbox_cmd->opcode); 29621e9f767SBen Widawsky if (mbox_cmd->size_in) { 29721e9f767SBen Widawsky if (WARN_ON(!mbox_cmd->payload_in)) 29821e9f767SBen Widawsky return -EINVAL; 29921e9f767SBen Widawsky 30021e9f767SBen Widawsky cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, 30121e9f767SBen Widawsky mbox_cmd->size_in); 30221e9f767SBen Widawsky memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); 30321e9f767SBen Widawsky } 30421e9f767SBen Widawsky 30521e9f767SBen Widawsky /* #2, #3 */ 30621e9f767SBen Widawsky writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 30721e9f767SBen Widawsky 30821e9f767SBen Widawsky /* #4 */ 30999e222a5SDan Williams dev_dbg(dev, "Sending command\n"); 31021e9f767SBen Widawsky writel(CXLDEV_MBOX_CTRL_DOORBELL, 31121e9f767SBen Widawsky cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); 31221e9f767SBen Widawsky 31321e9f767SBen Widawsky /* #5 */ 31421e9f767SBen Widawsky rc = cxl_mem_wait_for_doorbell(cxlm); 31521e9f767SBen Widawsky if (rc == -ETIMEDOUT) { 31621e9f767SBen Widawsky cxl_mem_mbox_timeout(cxlm, mbox_cmd); 31721e9f767SBen Widawsky return rc; 31821e9f767SBen Widawsky } 31921e9f767SBen Widawsky 32021e9f767SBen Widawsky /* #6 */ 32121e9f767SBen Widawsky status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); 32221e9f767SBen Widawsky mbox_cmd->return_code = 32321e9f767SBen Widawsky FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); 32421e9f767SBen Widawsky 32521e9f767SBen Widawsky if (mbox_cmd->return_code != 0) { 32699e222a5SDan Williams dev_dbg(dev, "Mailbox operation had an error\n"); 32721e9f767SBen Widawsky return 0; 32821e9f767SBen Widawsky } 32921e9f767SBen Widawsky 33021e9f767SBen Widawsky /* #7 */ 33121e9f767SBen Widawsky cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 33221e9f767SBen Widawsky out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); 33321e9f767SBen Widawsky 33421e9f767SBen Widawsky /* #8 */ 33521e9f767SBen Widawsky if (out_len && mbox_cmd->payload_out) { 33621e9f767SBen Widawsky /* 33721e9f767SBen Widawsky * Sanitize the copy. If hardware misbehaves, out_len per the 33821e9f767SBen Widawsky * spec can actually be greater than the max allowed size (21 33921e9f767SBen Widawsky * bits available but spec defined 1M max). The caller also may 34021e9f767SBen Widawsky * have requested less data than the hardware supplied even 34121e9f767SBen Widawsky * within spec. 34221e9f767SBen Widawsky */ 34321e9f767SBen Widawsky size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); 34421e9f767SBen Widawsky 34521e9f767SBen Widawsky memcpy_fromio(mbox_cmd->payload_out, payload, n); 34621e9f767SBen Widawsky mbox_cmd->size_out = n; 34721e9f767SBen Widawsky } else { 34821e9f767SBen Widawsky mbox_cmd->size_out = 0; 34921e9f767SBen Widawsky } 35021e9f767SBen Widawsky 35121e9f767SBen Widawsky return 0; 35221e9f767SBen Widawsky } 35321e9f767SBen Widawsky 35421e9f767SBen Widawsky /** 35521e9f767SBen Widawsky * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox. 35621e9f767SBen Widawsky * @cxlm: The memory device to gain access to. 35721e9f767SBen Widawsky * 35821e9f767SBen Widawsky * Context: Any context. Takes the mbox_mutex. 35921e9f767SBen Widawsky * Return: 0 if exclusive access was acquired. 36021e9f767SBen Widawsky */ 36121e9f767SBen Widawsky static int cxl_mem_mbox_get(struct cxl_mem *cxlm) 36221e9f767SBen Widawsky { 36399e222a5SDan Williams struct device *dev = cxlm->dev; 36421e9f767SBen Widawsky u64 md_status; 36521e9f767SBen Widawsky int rc; 36621e9f767SBen Widawsky 36721e9f767SBen Widawsky mutex_lock_io(&cxlm->mbox_mutex); 36821e9f767SBen Widawsky 36921e9f767SBen Widawsky /* 37021e9f767SBen Widawsky * XXX: There is some amount of ambiguity in the 2.0 version of the spec 37121e9f767SBen Widawsky * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the 37221e9f767SBen Widawsky * bit is to allow firmware running on the device to notify the driver 37321e9f767SBen Widawsky * that it's ready to receive commands. It is unclear if the bit needs 37421e9f767SBen Widawsky * to be read for each transaction mailbox, ie. the firmware can switch 37521e9f767SBen Widawsky * it on and off as needed. Second, there is no defined timeout for 37621e9f767SBen Widawsky * mailbox ready, like there is for the doorbell interface. 37721e9f767SBen Widawsky * 37821e9f767SBen Widawsky * Assumptions: 37921e9f767SBen Widawsky * 1. The firmware might toggle the Mailbox Interface Ready bit, check 38021e9f767SBen Widawsky * it for every command. 38121e9f767SBen Widawsky * 38221e9f767SBen Widawsky * 2. If the doorbell is clear, the firmware should have first set the 38321e9f767SBen Widawsky * Mailbox Interface Ready bit. Therefore, waiting for the doorbell 38421e9f767SBen Widawsky * to be ready is sufficient. 38521e9f767SBen Widawsky */ 38621e9f767SBen Widawsky rc = cxl_mem_wait_for_doorbell(cxlm); 38721e9f767SBen Widawsky if (rc) { 38821e9f767SBen Widawsky dev_warn(dev, "Mailbox interface not ready\n"); 38921e9f767SBen Widawsky goto out; 39021e9f767SBen Widawsky } 39121e9f767SBen Widawsky 39221e9f767SBen Widawsky md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); 39321e9f767SBen Widawsky if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { 39421e9f767SBen Widawsky dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); 39521e9f767SBen Widawsky rc = -EBUSY; 39621e9f767SBen Widawsky goto out; 39721e9f767SBen Widawsky } 39821e9f767SBen Widawsky 39921e9f767SBen Widawsky /* 40021e9f767SBen Widawsky * Hardware shouldn't allow a ready status but also have failure bits 40121e9f767SBen Widawsky * set. Spit out an error, this should be a bug report 40221e9f767SBen Widawsky */ 40321e9f767SBen Widawsky rc = -EFAULT; 40421e9f767SBen Widawsky if (md_status & CXLMDEV_DEV_FATAL) { 40521e9f767SBen Widawsky dev_err(dev, "mbox: reported ready, but fatal\n"); 40621e9f767SBen Widawsky goto out; 40721e9f767SBen Widawsky } 40821e9f767SBen Widawsky if (md_status & CXLMDEV_FW_HALT) { 40921e9f767SBen Widawsky dev_err(dev, "mbox: reported ready, but halted\n"); 41021e9f767SBen Widawsky goto out; 41121e9f767SBen Widawsky } 41221e9f767SBen Widawsky if (CXLMDEV_RESET_NEEDED(md_status)) { 41321e9f767SBen Widawsky dev_err(dev, "mbox: reported ready, but reset needed\n"); 41421e9f767SBen Widawsky goto out; 41521e9f767SBen Widawsky } 41621e9f767SBen Widawsky 41721e9f767SBen Widawsky /* with lock held */ 41821e9f767SBen Widawsky return 0; 41921e9f767SBen Widawsky 42021e9f767SBen Widawsky out: 42121e9f767SBen Widawsky mutex_unlock(&cxlm->mbox_mutex); 42221e9f767SBen Widawsky return rc; 42321e9f767SBen Widawsky } 42421e9f767SBen Widawsky 42521e9f767SBen Widawsky /** 42621e9f767SBen Widawsky * cxl_mem_mbox_put() - Release exclusive access to the mailbox. 42721e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 42821e9f767SBen Widawsky * 42921e9f767SBen Widawsky * Context: Any context. Expects mbox_mutex to be held. 43021e9f767SBen Widawsky */ 43121e9f767SBen Widawsky static void cxl_mem_mbox_put(struct cxl_mem *cxlm) 43221e9f767SBen Widawsky { 43321e9f767SBen Widawsky mutex_unlock(&cxlm->mbox_mutex); 43421e9f767SBen Widawsky } 43521e9f767SBen Widawsky 436*b64955a9SDan Williams static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) 437*b64955a9SDan Williams { 438*b64955a9SDan Williams int rc; 439*b64955a9SDan Williams 440*b64955a9SDan Williams rc = cxl_mem_mbox_get(cxlm); 441*b64955a9SDan Williams if (rc) 442*b64955a9SDan Williams return rc; 443*b64955a9SDan Williams 444*b64955a9SDan Williams rc = __cxl_mem_mbox_send_cmd(cxlm, cmd); 445*b64955a9SDan Williams cxl_mem_mbox_put(cxlm); 446*b64955a9SDan Williams 447*b64955a9SDan Williams return rc; 448*b64955a9SDan Williams } 449*b64955a9SDan Williams 45021e9f767SBen Widawsky /** 45121e9f767SBen Widawsky * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. 45221e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 45321e9f767SBen Widawsky * @cmd: The validated command. 45421e9f767SBen Widawsky * @in_payload: Pointer to userspace's input payload. 45521e9f767SBen Widawsky * @out_payload: Pointer to userspace's output payload. 45621e9f767SBen Widawsky * @size_out: (Input) Max payload size to copy out. 45721e9f767SBen Widawsky * (Output) Payload size hardware generated. 45821e9f767SBen Widawsky * @retval: Hardware generated return code from the operation. 45921e9f767SBen Widawsky * 46021e9f767SBen Widawsky * Return: 46121e9f767SBen Widawsky * * %0 - Mailbox transaction succeeded. This implies the mailbox 46221e9f767SBen Widawsky * protocol completed successfully not that the operation itself 46321e9f767SBen Widawsky * was successful. 46421e9f767SBen Widawsky * * %-ENOMEM - Couldn't allocate a bounce buffer. 46521e9f767SBen Widawsky * * %-EFAULT - Something happened with copy_to/from_user. 46621e9f767SBen Widawsky * * %-EINTR - Mailbox acquisition interrupted. 46721e9f767SBen Widawsky * * %-EXXX - Transaction level failures. 46821e9f767SBen Widawsky * 46921e9f767SBen Widawsky * Creates the appropriate mailbox command and dispatches it on behalf of a 47021e9f767SBen Widawsky * userspace request. The input and output payloads are copied between 47121e9f767SBen Widawsky * userspace. 47221e9f767SBen Widawsky * 47321e9f767SBen Widawsky * See cxl_send_cmd(). 47421e9f767SBen Widawsky */ 47521e9f767SBen Widawsky static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, 47621e9f767SBen Widawsky const struct cxl_mem_command *cmd, 47721e9f767SBen Widawsky u64 in_payload, u64 out_payload, 47821e9f767SBen Widawsky s32 *size_out, u32 *retval) 47921e9f767SBen Widawsky { 48099e222a5SDan Williams struct device *dev = cxlm->dev; 481*b64955a9SDan Williams struct cxl_mbox_cmd mbox_cmd = { 48221e9f767SBen Widawsky .opcode = cmd->opcode, 48321e9f767SBen Widawsky .size_in = cmd->info.size_in, 48421e9f767SBen Widawsky .size_out = cmd->info.size_out, 48521e9f767SBen Widawsky }; 48621e9f767SBen Widawsky int rc; 48721e9f767SBen Widawsky 48821e9f767SBen Widawsky if (cmd->info.size_out) { 48921e9f767SBen Widawsky mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); 49021e9f767SBen Widawsky if (!mbox_cmd.payload_out) 49121e9f767SBen Widawsky return -ENOMEM; 49221e9f767SBen Widawsky } 49321e9f767SBen Widawsky 49421e9f767SBen Widawsky if (cmd->info.size_in) { 49521e9f767SBen Widawsky mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), 49621e9f767SBen Widawsky cmd->info.size_in); 49721e9f767SBen Widawsky if (IS_ERR(mbox_cmd.payload_in)) { 49821e9f767SBen Widawsky kvfree(mbox_cmd.payload_out); 49921e9f767SBen Widawsky return PTR_ERR(mbox_cmd.payload_in); 50021e9f767SBen Widawsky } 50121e9f767SBen Widawsky } 50221e9f767SBen Widawsky 50321e9f767SBen Widawsky dev_dbg(dev, 50421e9f767SBen Widawsky "Submitting %s command for user\n" 50521e9f767SBen Widawsky "\topcode: %x\n" 50621e9f767SBen Widawsky "\tsize: %ub\n", 50721e9f767SBen Widawsky cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, 50821e9f767SBen Widawsky cmd->info.size_in); 50921e9f767SBen Widawsky 51021e9f767SBen Widawsky dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, 51121e9f767SBen Widawsky "raw command path used\n"); 51221e9f767SBen Widawsky 513*b64955a9SDan Williams rc = cxlm->mbox_send(cxlm, &mbox_cmd); 51421e9f767SBen Widawsky if (rc) 51521e9f767SBen Widawsky goto out; 51621e9f767SBen Widawsky 51721e9f767SBen Widawsky /* 51821e9f767SBen Widawsky * @size_out contains the max size that's allowed to be written back out 51921e9f767SBen Widawsky * to userspace. While the payload may have written more output than 52021e9f767SBen Widawsky * this it will have to be ignored. 52121e9f767SBen Widawsky */ 52221e9f767SBen Widawsky if (mbox_cmd.size_out) { 52321e9f767SBen Widawsky dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, 52421e9f767SBen Widawsky "Invalid return size\n"); 52521e9f767SBen Widawsky if (copy_to_user(u64_to_user_ptr(out_payload), 52621e9f767SBen Widawsky mbox_cmd.payload_out, mbox_cmd.size_out)) { 52721e9f767SBen Widawsky rc = -EFAULT; 52821e9f767SBen Widawsky goto out; 52921e9f767SBen Widawsky } 53021e9f767SBen Widawsky } 53121e9f767SBen Widawsky 53221e9f767SBen Widawsky *size_out = mbox_cmd.size_out; 53321e9f767SBen Widawsky *retval = mbox_cmd.return_code; 53421e9f767SBen Widawsky 53521e9f767SBen Widawsky out: 53621e9f767SBen Widawsky kvfree(mbox_cmd.payload_in); 53721e9f767SBen Widawsky kvfree(mbox_cmd.payload_out); 53821e9f767SBen Widawsky return rc; 53921e9f767SBen Widawsky } 54021e9f767SBen Widawsky 54121e9f767SBen Widawsky static bool cxl_mem_raw_command_allowed(u16 opcode) 54221e9f767SBen Widawsky { 54321e9f767SBen Widawsky int i; 54421e9f767SBen Widawsky 54521e9f767SBen Widawsky if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) 54621e9f767SBen Widawsky return false; 54721e9f767SBen Widawsky 5489e56614cSDan Williams if (security_locked_down(LOCKDOWN_PCI_ACCESS)) 54921e9f767SBen Widawsky return false; 55021e9f767SBen Widawsky 55121e9f767SBen Widawsky if (cxl_raw_allow_all) 55221e9f767SBen Widawsky return true; 55321e9f767SBen Widawsky 55421e9f767SBen Widawsky if (cxl_is_security_command(opcode)) 55521e9f767SBen Widawsky return false; 55621e9f767SBen Widawsky 55721e9f767SBen Widawsky for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) 55821e9f767SBen Widawsky if (cxl_disabled_raw_commands[i] == opcode) 55921e9f767SBen Widawsky return false; 56021e9f767SBen Widawsky 56121e9f767SBen Widawsky return true; 56221e9f767SBen Widawsky } 56321e9f767SBen Widawsky 56421e9f767SBen Widawsky /** 56521e9f767SBen Widawsky * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. 56621e9f767SBen Widawsky * @cxlm: &struct cxl_mem device whose mailbox will be used. 56721e9f767SBen Widawsky * @send_cmd: &struct cxl_send_command copied in from userspace. 56821e9f767SBen Widawsky * @out_cmd: Sanitized and populated &struct cxl_mem_command. 56921e9f767SBen Widawsky * 57021e9f767SBen Widawsky * Return: 57121e9f767SBen Widawsky * * %0 - @out_cmd is ready to send. 57221e9f767SBen Widawsky * * %-ENOTTY - Invalid command specified. 57321e9f767SBen Widawsky * * %-EINVAL - Reserved fields or invalid values were used. 57421e9f767SBen Widawsky * * %-ENOMEM - Input or output buffer wasn't sized properly. 57521e9f767SBen Widawsky * * %-EPERM - Attempted to use a protected command. 57621e9f767SBen Widawsky * 57721e9f767SBen Widawsky * The result of this command is a fully validated command in @out_cmd that is 57821e9f767SBen Widawsky * safe to send to the hardware. 57921e9f767SBen Widawsky * 58021e9f767SBen Widawsky * See handle_mailbox_cmd_from_user() 58121e9f767SBen Widawsky */ 58221e9f767SBen Widawsky static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, 58321e9f767SBen Widawsky const struct cxl_send_command *send_cmd, 58421e9f767SBen Widawsky struct cxl_mem_command *out_cmd) 58521e9f767SBen Widawsky { 58621e9f767SBen Widawsky const struct cxl_command_info *info; 58721e9f767SBen Widawsky struct cxl_mem_command *c; 58821e9f767SBen Widawsky 58921e9f767SBen Widawsky if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) 59021e9f767SBen Widawsky return -ENOTTY; 59121e9f767SBen Widawsky 59221e9f767SBen Widawsky /* 59321e9f767SBen Widawsky * The user can never specify an input payload larger than what hardware 59421e9f767SBen Widawsky * supports, but output can be arbitrarily large (simply write out as 59521e9f767SBen Widawsky * much data as the hardware provides). 59621e9f767SBen Widawsky */ 59721e9f767SBen Widawsky if (send_cmd->in.size > cxlm->payload_size) 59821e9f767SBen Widawsky return -EINVAL; 59921e9f767SBen Widawsky 60021e9f767SBen Widawsky /* 60121e9f767SBen Widawsky * Checks are bypassed for raw commands but a WARN/taint will occur 60221e9f767SBen Widawsky * later in the callchain 60321e9f767SBen Widawsky */ 60421e9f767SBen Widawsky if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { 60521e9f767SBen Widawsky const struct cxl_mem_command temp = { 60621e9f767SBen Widawsky .info = { 60721e9f767SBen Widawsky .id = CXL_MEM_COMMAND_ID_RAW, 60821e9f767SBen Widawsky .flags = 0, 60921e9f767SBen Widawsky .size_in = send_cmd->in.size, 61021e9f767SBen Widawsky .size_out = send_cmd->out.size, 61121e9f767SBen Widawsky }, 61221e9f767SBen Widawsky .opcode = send_cmd->raw.opcode 61321e9f767SBen Widawsky }; 61421e9f767SBen Widawsky 61521e9f767SBen Widawsky if (send_cmd->raw.rsvd) 61621e9f767SBen Widawsky return -EINVAL; 61721e9f767SBen Widawsky 61821e9f767SBen Widawsky /* 61921e9f767SBen Widawsky * Unlike supported commands, the output size of RAW commands 62021e9f767SBen Widawsky * gets passed along without further checking, so it must be 62121e9f767SBen Widawsky * validated here. 62221e9f767SBen Widawsky */ 62321e9f767SBen Widawsky if (send_cmd->out.size > cxlm->payload_size) 62421e9f767SBen Widawsky return -EINVAL; 62521e9f767SBen Widawsky 62621e9f767SBen Widawsky if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) 62721e9f767SBen Widawsky return -EPERM; 62821e9f767SBen Widawsky 62921e9f767SBen Widawsky memcpy(out_cmd, &temp, sizeof(temp)); 63021e9f767SBen Widawsky 63121e9f767SBen Widawsky return 0; 63221e9f767SBen Widawsky } 63321e9f767SBen Widawsky 63421e9f767SBen Widawsky if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) 63521e9f767SBen Widawsky return -EINVAL; 63621e9f767SBen Widawsky 63721e9f767SBen Widawsky if (send_cmd->rsvd) 63821e9f767SBen Widawsky return -EINVAL; 63921e9f767SBen Widawsky 64021e9f767SBen Widawsky if (send_cmd->in.rsvd || send_cmd->out.rsvd) 64121e9f767SBen Widawsky return -EINVAL; 64221e9f767SBen Widawsky 64321e9f767SBen Widawsky /* Convert user's command into the internal representation */ 64421e9f767SBen Widawsky c = &mem_commands[send_cmd->id]; 64521e9f767SBen Widawsky info = &c->info; 64621e9f767SBen Widawsky 64721e9f767SBen Widawsky /* Check that the command is enabled for hardware */ 64821e9f767SBen Widawsky if (!test_bit(info->id, cxlm->enabled_cmds)) 64921e9f767SBen Widawsky return -ENOTTY; 65021e9f767SBen Widawsky 65121e9f767SBen Widawsky /* Check the input buffer is the expected size */ 65221e9f767SBen Widawsky if (info->size_in >= 0 && info->size_in != send_cmd->in.size) 65321e9f767SBen Widawsky return -ENOMEM; 65421e9f767SBen Widawsky 65521e9f767SBen Widawsky /* Check the output buffer is at least large enough */ 65621e9f767SBen Widawsky if (info->size_out >= 0 && send_cmd->out.size < info->size_out) 65721e9f767SBen Widawsky return -ENOMEM; 65821e9f767SBen Widawsky 65921e9f767SBen Widawsky memcpy(out_cmd, c, sizeof(*c)); 66021e9f767SBen Widawsky out_cmd->info.size_in = send_cmd->in.size; 66121e9f767SBen Widawsky /* 66221e9f767SBen Widawsky * XXX: out_cmd->info.size_out will be controlled by the driver, and the 66321e9f767SBen Widawsky * specified number of bytes @send_cmd->out.size will be copied back out 66421e9f767SBen Widawsky * to userspace. 66521e9f767SBen Widawsky */ 66621e9f767SBen Widawsky 66721e9f767SBen Widawsky return 0; 66821e9f767SBen Widawsky } 66921e9f767SBen Widawsky 67021e9f767SBen Widawsky static int cxl_query_cmd(struct cxl_memdev *cxlmd, 67121e9f767SBen Widawsky struct cxl_mem_query_commands __user *q) 67221e9f767SBen Widawsky { 67321e9f767SBen Widawsky struct device *dev = &cxlmd->dev; 67421e9f767SBen Widawsky struct cxl_mem_command *cmd; 67521e9f767SBen Widawsky u32 n_commands; 67621e9f767SBen Widawsky int j = 0; 67721e9f767SBen Widawsky 67821e9f767SBen Widawsky dev_dbg(dev, "Query IOCTL\n"); 67921e9f767SBen Widawsky 68021e9f767SBen Widawsky if (get_user(n_commands, &q->n_commands)) 68121e9f767SBen Widawsky return -EFAULT; 68221e9f767SBen Widawsky 68321e9f767SBen Widawsky /* returns the total number if 0 elements are requested. */ 68421e9f767SBen Widawsky if (n_commands == 0) 68521e9f767SBen Widawsky return put_user(cxl_cmd_count, &q->n_commands); 68621e9f767SBen Widawsky 68721e9f767SBen Widawsky /* 68821e9f767SBen Widawsky * otherwise, return max(n_commands, total commands) cxl_command_info 68921e9f767SBen Widawsky * structures. 69021e9f767SBen Widawsky */ 69121e9f767SBen Widawsky cxl_for_each_cmd(cmd) { 69221e9f767SBen Widawsky const struct cxl_command_info *info = &cmd->info; 69321e9f767SBen Widawsky 69421e9f767SBen Widawsky if (copy_to_user(&q->commands[j++], info, sizeof(*info))) 69521e9f767SBen Widawsky return -EFAULT; 69621e9f767SBen Widawsky 69721e9f767SBen Widawsky if (j == n_commands) 69821e9f767SBen Widawsky break; 69921e9f767SBen Widawsky } 70021e9f767SBen Widawsky 70121e9f767SBen Widawsky return 0; 70221e9f767SBen Widawsky } 70321e9f767SBen Widawsky 70421e9f767SBen Widawsky static int cxl_send_cmd(struct cxl_memdev *cxlmd, 70521e9f767SBen Widawsky struct cxl_send_command __user *s) 70621e9f767SBen Widawsky { 70721e9f767SBen Widawsky struct cxl_mem *cxlm = cxlmd->cxlm; 70821e9f767SBen Widawsky struct device *dev = &cxlmd->dev; 70921e9f767SBen Widawsky struct cxl_send_command send; 71021e9f767SBen Widawsky struct cxl_mem_command c; 71121e9f767SBen Widawsky int rc; 71221e9f767SBen Widawsky 71321e9f767SBen Widawsky dev_dbg(dev, "Send IOCTL\n"); 71421e9f767SBen Widawsky 71521e9f767SBen Widawsky if (copy_from_user(&send, s, sizeof(send))) 71621e9f767SBen Widawsky return -EFAULT; 71721e9f767SBen Widawsky 71821e9f767SBen Widawsky rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c); 71921e9f767SBen Widawsky if (rc) 72021e9f767SBen Widawsky return rc; 72121e9f767SBen Widawsky 72221e9f767SBen Widawsky /* Prepare to handle a full payload for variable sized output */ 72321e9f767SBen Widawsky if (c.info.size_out < 0) 72421e9f767SBen Widawsky c.info.size_out = cxlm->payload_size; 72521e9f767SBen Widawsky 72621e9f767SBen Widawsky rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload, 72721e9f767SBen Widawsky send.out.payload, &send.out.size, 72821e9f767SBen Widawsky &send.retval); 72921e9f767SBen Widawsky if (rc) 73021e9f767SBen Widawsky return rc; 73121e9f767SBen Widawsky 73221e9f767SBen Widawsky if (copy_to_user(s, &send, sizeof(send))) 73321e9f767SBen Widawsky return -EFAULT; 73421e9f767SBen Widawsky 73521e9f767SBen Widawsky return 0; 73621e9f767SBen Widawsky } 73721e9f767SBen Widawsky 73821e9f767SBen Widawsky static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, 73921e9f767SBen Widawsky unsigned long arg) 74021e9f767SBen Widawsky { 74121e9f767SBen Widawsky switch (cmd) { 74221e9f767SBen Widawsky case CXL_MEM_QUERY_COMMANDS: 74321e9f767SBen Widawsky return cxl_query_cmd(cxlmd, (void __user *)arg); 74421e9f767SBen Widawsky case CXL_MEM_SEND_COMMAND: 74521e9f767SBen Widawsky return cxl_send_cmd(cxlmd, (void __user *)arg); 74621e9f767SBen Widawsky default: 74721e9f767SBen Widawsky return -ENOTTY; 74821e9f767SBen Widawsky } 74921e9f767SBen Widawsky } 75021e9f767SBen Widawsky 75121e9f767SBen Widawsky static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, 75221e9f767SBen Widawsky unsigned long arg) 75321e9f767SBen Widawsky { 75421e9f767SBen Widawsky struct cxl_memdev *cxlmd = file->private_data; 75521e9f767SBen Widawsky int rc = -ENXIO; 75621e9f767SBen Widawsky 75721e9f767SBen Widawsky down_read(&cxl_memdev_rwsem); 75821e9f767SBen Widawsky if (cxlmd->cxlm) 75921e9f767SBen Widawsky rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); 76021e9f767SBen Widawsky up_read(&cxl_memdev_rwsem); 76121e9f767SBen Widawsky 76221e9f767SBen Widawsky return rc; 76321e9f767SBen Widawsky } 76421e9f767SBen Widawsky 76521e9f767SBen Widawsky static int cxl_memdev_open(struct inode *inode, struct file *file) 76621e9f767SBen Widawsky { 76721e9f767SBen Widawsky struct cxl_memdev *cxlmd = 76821e9f767SBen Widawsky container_of(inode->i_cdev, typeof(*cxlmd), cdev); 76921e9f767SBen Widawsky 77021e9f767SBen Widawsky get_device(&cxlmd->dev); 77121e9f767SBen Widawsky file->private_data = cxlmd; 77221e9f767SBen Widawsky 77321e9f767SBen Widawsky return 0; 77421e9f767SBen Widawsky } 77521e9f767SBen Widawsky 77621e9f767SBen Widawsky static int cxl_memdev_release_file(struct inode *inode, struct file *file) 77721e9f767SBen Widawsky { 77821e9f767SBen Widawsky struct cxl_memdev *cxlmd = 77921e9f767SBen Widawsky container_of(inode->i_cdev, typeof(*cxlmd), cdev); 78021e9f767SBen Widawsky 78121e9f767SBen Widawsky put_device(&cxlmd->dev); 78221e9f767SBen Widawsky 78321e9f767SBen Widawsky return 0; 78421e9f767SBen Widawsky } 78521e9f767SBen Widawsky 7869cc238c7SDan Williams static void cxl_memdev_shutdown(struct device *dev) 7879cc238c7SDan Williams { 7889cc238c7SDan Williams struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 7899cc238c7SDan Williams 7909cc238c7SDan Williams down_write(&cxl_memdev_rwsem); 7919cc238c7SDan Williams cxlmd->cxlm = NULL; 7929cc238c7SDan Williams up_write(&cxl_memdev_rwsem); 7939cc238c7SDan Williams } 7949cc238c7SDan Williams 7959cc238c7SDan Williams static const struct cdevm_file_operations cxl_memdev_fops = { 7969cc238c7SDan Williams .fops = { 79721e9f767SBen Widawsky .owner = THIS_MODULE, 79821e9f767SBen Widawsky .unlocked_ioctl = cxl_memdev_ioctl, 79921e9f767SBen Widawsky .open = cxl_memdev_open, 80021e9f767SBen Widawsky .release = cxl_memdev_release_file, 80121e9f767SBen Widawsky .compat_ioctl = compat_ptr_ioctl, 80221e9f767SBen Widawsky .llseek = noop_llseek, 8039cc238c7SDan Williams }, 8049cc238c7SDan Williams .shutdown = cxl_memdev_shutdown, 80521e9f767SBen Widawsky }; 80621e9f767SBen Widawsky 80721e9f767SBen Widawsky static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) 80821e9f767SBen Widawsky { 80921e9f767SBen Widawsky struct cxl_mem_command *c; 81021e9f767SBen Widawsky 81121e9f767SBen Widawsky cxl_for_each_cmd(c) 81221e9f767SBen Widawsky if (c->opcode == opcode) 81321e9f767SBen Widawsky return c; 81421e9f767SBen Widawsky 81521e9f767SBen Widawsky return NULL; 81621e9f767SBen Widawsky } 81721e9f767SBen Widawsky 81821e9f767SBen Widawsky /** 81921e9f767SBen Widawsky * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device. 82021e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 82121e9f767SBen Widawsky * @opcode: Opcode for the mailbox command. 82221e9f767SBen Widawsky * @in: The input payload for the mailbox command. 82321e9f767SBen Widawsky * @in_size: The length of the input payload 82421e9f767SBen Widawsky * @out: Caller allocated buffer for the output. 82521e9f767SBen Widawsky * @out_size: Expected size of output. 82621e9f767SBen Widawsky * 82721e9f767SBen Widawsky * Context: Any context. Will acquire and release mbox_mutex. 82821e9f767SBen Widawsky * Return: 82921e9f767SBen Widawsky * * %>=0 - Number of bytes returned in @out. 83021e9f767SBen Widawsky * * %-E2BIG - Payload is too large for hardware. 83121e9f767SBen Widawsky * * %-EBUSY - Couldn't acquire exclusive mailbox access. 83221e9f767SBen Widawsky * * %-EFAULT - Hardware error occurred. 83321e9f767SBen Widawsky * * %-ENXIO - Command completed, but device reported an error. 83421e9f767SBen Widawsky * * %-EIO - Unexpected output size. 83521e9f767SBen Widawsky * 83621e9f767SBen Widawsky * Mailbox commands may execute successfully yet the device itself reported an 83721e9f767SBen Widawsky * error. While this distinction can be useful for commands from userspace, the 83821e9f767SBen Widawsky * kernel will only be able to use results when both are successful. 83921e9f767SBen Widawsky * 84021e9f767SBen Widawsky * See __cxl_mem_mbox_send_cmd() 84121e9f767SBen Widawsky */ 84221e9f767SBen Widawsky static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, 84321e9f767SBen Widawsky void *in, size_t in_size, 84421e9f767SBen Widawsky void *out, size_t out_size) 84521e9f767SBen Widawsky { 84621e9f767SBen Widawsky const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 847*b64955a9SDan Williams struct cxl_mbox_cmd mbox_cmd = { 84821e9f767SBen Widawsky .opcode = opcode, 84921e9f767SBen Widawsky .payload_in = in, 85021e9f767SBen Widawsky .size_in = in_size, 85121e9f767SBen Widawsky .size_out = out_size, 85221e9f767SBen Widawsky .payload_out = out, 85321e9f767SBen Widawsky }; 85421e9f767SBen Widawsky int rc; 85521e9f767SBen Widawsky 85621e9f767SBen Widawsky if (out_size > cxlm->payload_size) 85721e9f767SBen Widawsky return -E2BIG; 85821e9f767SBen Widawsky 859*b64955a9SDan Williams rc = cxlm->mbox_send(cxlm, &mbox_cmd); 86021e9f767SBen Widawsky if (rc) 86121e9f767SBen Widawsky return rc; 86221e9f767SBen Widawsky 86321e9f767SBen Widawsky /* TODO: Map return code to proper kernel style errno */ 86421e9f767SBen Widawsky if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) 86521e9f767SBen Widawsky return -ENXIO; 86621e9f767SBen Widawsky 86721e9f767SBen Widawsky /* 86821e9f767SBen Widawsky * Variable sized commands can't be validated and so it's up to the 86921e9f767SBen Widawsky * caller to do that if they wish. 87021e9f767SBen Widawsky */ 87121e9f767SBen Widawsky if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) 87221e9f767SBen Widawsky return -EIO; 87321e9f767SBen Widawsky 87421e9f767SBen Widawsky return 0; 87521e9f767SBen Widawsky } 87621e9f767SBen Widawsky 87721e9f767SBen Widawsky static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) 87821e9f767SBen Widawsky { 87921e9f767SBen Widawsky const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); 88021e9f767SBen Widawsky 881*b64955a9SDan Williams cxlm->mbox_send = cxl_pci_mbox_send; 88221e9f767SBen Widawsky cxlm->payload_size = 88321e9f767SBen Widawsky 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); 88421e9f767SBen Widawsky 88521e9f767SBen Widawsky /* 88621e9f767SBen Widawsky * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register 88721e9f767SBen Widawsky * 88821e9f767SBen Widawsky * If the size is too small, mandatory commands will not work and so 88921e9f767SBen Widawsky * there's no point in going forward. If the size is too large, there's 89021e9f767SBen Widawsky * no harm is soft limiting it. 89121e9f767SBen Widawsky */ 89221e9f767SBen Widawsky cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); 89321e9f767SBen Widawsky if (cxlm->payload_size < 256) { 89499e222a5SDan Williams dev_err(cxlm->dev, "Mailbox is too small (%zub)", 89521e9f767SBen Widawsky cxlm->payload_size); 89621e9f767SBen Widawsky return -ENXIO; 89721e9f767SBen Widawsky } 89821e9f767SBen Widawsky 89999e222a5SDan Williams dev_dbg(cxlm->dev, "Mailbox payload sized %zu", 90021e9f767SBen Widawsky cxlm->payload_size); 90121e9f767SBen Widawsky 90221e9f767SBen Widawsky return 0; 90321e9f767SBen Widawsky } 90421e9f767SBen Widawsky 90599e222a5SDan Williams static struct cxl_mem *cxl_mem_create(struct device *dev) 90621e9f767SBen Widawsky { 90721e9f767SBen Widawsky struct cxl_mem *cxlm; 90821e9f767SBen Widawsky 9095d0c6f02SBen Widawsky cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); 91021e9f767SBen Widawsky if (!cxlm) { 91121e9f767SBen Widawsky dev_err(dev, "No memory available\n"); 9121b0a1a2aSBen Widawsky return ERR_PTR(-ENOMEM); 91321e9f767SBen Widawsky } 91421e9f767SBen Widawsky 9151b0a1a2aSBen Widawsky mutex_init(&cxlm->mbox_mutex); 91699e222a5SDan Williams cxlm->dev = dev; 9171b0a1a2aSBen Widawsky cxlm->enabled_cmds = 9181b0a1a2aSBen Widawsky devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), 9191b0a1a2aSBen Widawsky sizeof(unsigned long), 9201b0a1a2aSBen Widawsky GFP_KERNEL | __GFP_ZERO); 9211b0a1a2aSBen Widawsky if (!cxlm->enabled_cmds) { 9221b0a1a2aSBen Widawsky dev_err(dev, "No memory available for bitmap\n"); 9231b0a1a2aSBen Widawsky return ERR_PTR(-ENOMEM); 9241b0a1a2aSBen Widawsky } 9251b0a1a2aSBen Widawsky 9261b0a1a2aSBen Widawsky return cxlm; 9271b0a1a2aSBen Widawsky } 9281b0a1a2aSBen Widawsky 92907d62eacSIra Weiny static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, 93007d62eacSIra Weiny u8 bar, u64 offset) 9311b0a1a2aSBen Widawsky { 932f8a7e8c2SIra Weiny void __iomem *addr; 93399e222a5SDan Williams struct device *dev = cxlm->dev; 93499e222a5SDan Williams struct pci_dev *pdev = to_pci_dev(dev); 9351b0a1a2aSBen Widawsky 93621e9f767SBen Widawsky /* Basic sanity check that BAR is big enough */ 93721e9f767SBen Widawsky if (pci_resource_len(pdev, bar) < offset) { 93821e9f767SBen Widawsky dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, 93921e9f767SBen Widawsky &pdev->resource[bar], (unsigned long long)offset); 9406630d31cSBen Widawsky return IOMEM_ERR_PTR(-ENXIO); 94121e9f767SBen Widawsky } 94221e9f767SBen Widawsky 94330af9729SIra Weiny addr = pci_iomap(pdev, bar, 0); 944f8a7e8c2SIra Weiny if (!addr) { 94521e9f767SBen Widawsky dev_err(dev, "failed to map registers\n"); 946f8a7e8c2SIra Weiny return addr; 94721e9f767SBen Widawsky } 94821e9f767SBen Widawsky 949f8a7e8c2SIra Weiny dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", 950f8a7e8c2SIra Weiny bar, offset); 9516630d31cSBen Widawsky 95230af9729SIra Weiny return addr; 95330af9729SIra Weiny } 95430af9729SIra Weiny 95530af9729SIra Weiny static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) 95630af9729SIra Weiny { 95799e222a5SDan Williams pci_iounmap(to_pci_dev(cxlm->dev), base); 95821e9f767SBen Widawsky } 95921e9f767SBen Widawsky 96021e9f767SBen Widawsky static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) 96121e9f767SBen Widawsky { 96221e9f767SBen Widawsky int pos; 96321e9f767SBen Widawsky 96421e9f767SBen Widawsky pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); 96521e9f767SBen Widawsky if (!pos) 96621e9f767SBen Widawsky return 0; 96721e9f767SBen Widawsky 96821e9f767SBen Widawsky while (pos) { 96921e9f767SBen Widawsky u16 vendor, id; 97021e9f767SBen Widawsky 97121e9f767SBen Widawsky pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); 97221e9f767SBen Widawsky pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); 97321e9f767SBen Widawsky if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) 97421e9f767SBen Widawsky return pos; 97521e9f767SBen Widawsky 97621e9f767SBen Widawsky pos = pci_find_next_ext_capability(pdev, pos, 97721e9f767SBen Widawsky PCI_EXT_CAP_ID_DVSEC); 97821e9f767SBen Widawsky } 97921e9f767SBen Widawsky 98021e9f767SBen Widawsky return 0; 98121e9f767SBen Widawsky } 98221e9f767SBen Widawsky 98330af9729SIra Weiny static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, 98430af9729SIra Weiny struct cxl_register_map *map) 98530af9729SIra Weiny { 98608422378SBen Widawsky struct cxl_component_reg_map *comp_map; 98730af9729SIra Weiny struct cxl_device_reg_map *dev_map; 98899e222a5SDan Williams struct device *dev = cxlm->dev; 98930af9729SIra Weiny 99030af9729SIra Weiny switch (map->reg_type) { 99108422378SBen Widawsky case CXL_REGLOC_RBI_COMPONENT: 99208422378SBen Widawsky comp_map = &map->component_map; 99308422378SBen Widawsky cxl_probe_component_regs(dev, base, comp_map); 99408422378SBen Widawsky if (!comp_map->hdm_decoder.valid) { 99508422378SBen Widawsky dev_err(dev, "HDM decoder registers not found\n"); 99608422378SBen Widawsky return -ENXIO; 99708422378SBen Widawsky } 99808422378SBen Widawsky 99908422378SBen Widawsky dev_dbg(dev, "Set up component registers\n"); 100008422378SBen Widawsky break; 100130af9729SIra Weiny case CXL_REGLOC_RBI_MEMDEV: 100230af9729SIra Weiny dev_map = &map->device_map; 100330af9729SIra Weiny cxl_probe_device_regs(dev, base, dev_map); 100430af9729SIra Weiny if (!dev_map->status.valid || !dev_map->mbox.valid || 100530af9729SIra Weiny !dev_map->memdev.valid) { 100630af9729SIra Weiny dev_err(dev, "registers not found: %s%s%s\n", 100730af9729SIra Weiny !dev_map->status.valid ? "status " : "", 1008da582aa5SLi Qiang (Johnny Li) !dev_map->mbox.valid ? "mbox " : "", 1009da582aa5SLi Qiang (Johnny Li) !dev_map->memdev.valid ? "memdev " : ""); 101030af9729SIra Weiny return -ENXIO; 101130af9729SIra Weiny } 101230af9729SIra Weiny 101330af9729SIra Weiny dev_dbg(dev, "Probing device registers...\n"); 101430af9729SIra Weiny break; 101530af9729SIra Weiny default: 101630af9729SIra Weiny break; 101730af9729SIra Weiny } 101830af9729SIra Weiny 101930af9729SIra Weiny return 0; 102030af9729SIra Weiny } 102130af9729SIra Weiny 102230af9729SIra Weiny static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) 102330af9729SIra Weiny { 102499e222a5SDan Williams struct device *dev = cxlm->dev; 102599e222a5SDan Williams struct pci_dev *pdev = to_pci_dev(dev); 102630af9729SIra Weiny 102730af9729SIra Weiny switch (map->reg_type) { 102808422378SBen Widawsky case CXL_REGLOC_RBI_COMPONENT: 102908422378SBen Widawsky cxl_map_component_regs(pdev, &cxlm->regs.component, map); 103008422378SBen Widawsky dev_dbg(dev, "Mapping component registers...\n"); 103108422378SBen Widawsky break; 103230af9729SIra Weiny case CXL_REGLOC_RBI_MEMDEV: 103330af9729SIra Weiny cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); 103430af9729SIra Weiny dev_dbg(dev, "Probing device registers...\n"); 103530af9729SIra Weiny break; 103630af9729SIra Weiny default: 103730af9729SIra Weiny break; 103830af9729SIra Weiny } 103930af9729SIra Weiny 104030af9729SIra Weiny return 0; 104130af9729SIra Weiny } 104230af9729SIra Weiny 104307d62eacSIra Weiny static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, 104407d62eacSIra Weiny u8 *bar, u64 *offset, u8 *reg_type) 104507d62eacSIra Weiny { 104607d62eacSIra Weiny *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); 104707d62eacSIra Weiny *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); 104807d62eacSIra Weiny *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); 104907d62eacSIra Weiny } 105007d62eacSIra Weiny 10511d5a4159SBen Widawsky /** 10521d5a4159SBen Widawsky * cxl_mem_setup_regs() - Setup necessary MMIO. 10531d5a4159SBen Widawsky * @cxlm: The CXL memory device to communicate with. 10541d5a4159SBen Widawsky * 10551d5a4159SBen Widawsky * Return: 0 if all necessary registers mapped. 10561d5a4159SBen Widawsky * 10571d5a4159SBen Widawsky * A memory device is required by spec to implement a certain set of MMIO 10581d5a4159SBen Widawsky * regions. The purpose of this function is to enumerate and map those 10591d5a4159SBen Widawsky * registers. 10601d5a4159SBen Widawsky */ 10611d5a4159SBen Widawsky static int cxl_mem_setup_regs(struct cxl_mem *cxlm) 10621d5a4159SBen Widawsky { 10636630d31cSBen Widawsky void __iomem *base; 106499e222a5SDan Williams u32 regloc_size, regblocks; 106599e222a5SDan Williams int regloc, i, n_maps, ret = 0; 106699e222a5SDan Williams struct device *dev = cxlm->dev; 106799e222a5SDan Williams struct pci_dev *pdev = to_pci_dev(dev); 10685b68705dSBen Widawsky struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; 10691d5a4159SBen Widawsky 10704ad6181eSBen Widawsky regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); 10711d5a4159SBen Widawsky if (!regloc) { 10721d5a4159SBen Widawsky dev_err(dev, "register location dvsec not found\n"); 10731d5a4159SBen Widawsky return -ENXIO; 10741d5a4159SBen Widawsky } 10751d5a4159SBen Widawsky 1076f8a7e8c2SIra Weiny if (pci_request_mem_regions(pdev, pci_name(pdev))) 1077f8a7e8c2SIra Weiny return -ENODEV; 1078f8a7e8c2SIra Weiny 10791d5a4159SBen Widawsky /* Get the size of the Register Locator DVSEC */ 10801d5a4159SBen Widawsky pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); 10811d5a4159SBen Widawsky regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); 10821d5a4159SBen Widawsky 10831d5a4159SBen Widawsky regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; 10841d5a4159SBen Widawsky regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; 10851d5a4159SBen Widawsky 10865b68705dSBen Widawsky for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { 10871d5a4159SBen Widawsky u32 reg_lo, reg_hi; 10881d5a4159SBen Widawsky u8 reg_type; 108907d62eacSIra Weiny u64 offset; 109007d62eacSIra Weiny u8 bar; 10911d5a4159SBen Widawsky 10921d5a4159SBen Widawsky pci_read_config_dword(pdev, regloc, ®_lo); 10931d5a4159SBen Widawsky pci_read_config_dword(pdev, regloc + 4, ®_hi); 10941d5a4159SBen Widawsky 109507d62eacSIra Weiny cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, 109607d62eacSIra Weiny ®_type); 109707d62eacSIra Weiny 109807d62eacSIra Weiny dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", 109907d62eacSIra Weiny bar, offset, reg_type); 11001d5a4159SBen Widawsky 11011e39db57SBen Widawsky /* Ignore unknown register block types */ 11021e39db57SBen Widawsky if (reg_type > CXL_REGLOC_RBI_MEMDEV) 11031e39db57SBen Widawsky continue; 11041e39db57SBen Widawsky 110507d62eacSIra Weiny base = cxl_mem_map_regblock(cxlm, bar, offset); 11065b68705dSBen Widawsky if (!base) 11075b68705dSBen Widawsky return -ENOMEM; 11081d5a4159SBen Widawsky 11095b68705dSBen Widawsky map = &maps[n_maps]; 111030af9729SIra Weiny map->barno = bar; 111130af9729SIra Weiny map->block_offset = offset; 111230af9729SIra Weiny map->reg_type = reg_type; 111330af9729SIra Weiny 111430af9729SIra Weiny ret = cxl_probe_regs(cxlm, base + offset, map); 111530af9729SIra Weiny 111630af9729SIra Weiny /* Always unmap the regblock regardless of probe success */ 111730af9729SIra Weiny cxl_mem_unmap_regblock(cxlm, base); 111830af9729SIra Weiny 111930af9729SIra Weiny if (ret) 11205b68705dSBen Widawsky return ret; 11215b68705dSBen Widawsky 11225b68705dSBen Widawsky n_maps++; 11231d5a4159SBen Widawsky } 11241d5a4159SBen Widawsky 11259a016527SIra Weiny pci_release_mem_regions(pdev); 11269a016527SIra Weiny 11275b68705dSBen Widawsky for (i = 0; i < n_maps; i++) { 11285b68705dSBen Widawsky ret = cxl_map_regs(cxlm, &maps[i]); 112930af9729SIra Weiny if (ret) 11305b68705dSBen Widawsky break; 113130af9729SIra Weiny } 113230af9729SIra Weiny 113330af9729SIra Weiny return ret; 11341d5a4159SBen Widawsky } 11351d5a4159SBen Widawsky 113621e9f767SBen Widawsky static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) 113721e9f767SBen Widawsky { 113821e9f767SBen Widawsky u32 remaining = size; 113921e9f767SBen Widawsky u32 offset = 0; 114021e9f767SBen Widawsky 114121e9f767SBen Widawsky while (remaining) { 114221e9f767SBen Widawsky u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); 114321e9f767SBen Widawsky struct cxl_mbox_get_log { 114421e9f767SBen Widawsky uuid_t uuid; 114521e9f767SBen Widawsky __le32 offset; 114621e9f767SBen Widawsky __le32 length; 114721e9f767SBen Widawsky } __packed log = { 114821e9f767SBen Widawsky .uuid = *uuid, 114921e9f767SBen Widawsky .offset = cpu_to_le32(offset), 115021e9f767SBen Widawsky .length = cpu_to_le32(xfer_size) 115121e9f767SBen Widawsky }; 115221e9f767SBen Widawsky int rc; 115321e9f767SBen Widawsky 115421e9f767SBen Widawsky rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log, 115521e9f767SBen Widawsky sizeof(log), out, xfer_size); 115621e9f767SBen Widawsky if (rc < 0) 115721e9f767SBen Widawsky return rc; 115821e9f767SBen Widawsky 115921e9f767SBen Widawsky out += xfer_size; 116021e9f767SBen Widawsky remaining -= xfer_size; 116121e9f767SBen Widawsky offset += xfer_size; 116221e9f767SBen Widawsky } 116321e9f767SBen Widawsky 116421e9f767SBen Widawsky return 0; 116521e9f767SBen Widawsky } 116621e9f767SBen Widawsky 116721e9f767SBen Widawsky /** 116821e9f767SBen Widawsky * cxl_walk_cel() - Walk through the Command Effects Log. 116921e9f767SBen Widawsky * @cxlm: Device. 117021e9f767SBen Widawsky * @size: Length of the Command Effects Log. 117121e9f767SBen Widawsky * @cel: CEL 117221e9f767SBen Widawsky * 117321e9f767SBen Widawsky * Iterate over each entry in the CEL and determine if the driver supports the 117421e9f767SBen Widawsky * command. If so, the command is enabled for the device and can be used later. 117521e9f767SBen Widawsky */ 117621e9f767SBen Widawsky static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) 117721e9f767SBen Widawsky { 117821e9f767SBen Widawsky struct cel_entry { 117921e9f767SBen Widawsky __le16 opcode; 118021e9f767SBen Widawsky __le16 effect; 118121e9f767SBen Widawsky } __packed * cel_entry; 118221e9f767SBen Widawsky const int cel_entries = size / sizeof(*cel_entry); 118321e9f767SBen Widawsky int i; 118421e9f767SBen Widawsky 118521e9f767SBen Widawsky cel_entry = (struct cel_entry *)cel; 118621e9f767SBen Widawsky 118721e9f767SBen Widawsky for (i = 0; i < cel_entries; i++) { 118821e9f767SBen Widawsky u16 opcode = le16_to_cpu(cel_entry[i].opcode); 118921e9f767SBen Widawsky struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 119021e9f767SBen Widawsky 119121e9f767SBen Widawsky if (!cmd) { 119299e222a5SDan Williams dev_dbg(cxlm->dev, 119321e9f767SBen Widawsky "Opcode 0x%04x unsupported by driver", opcode); 119421e9f767SBen Widawsky continue; 119521e9f767SBen Widawsky } 119621e9f767SBen Widawsky 119721e9f767SBen Widawsky set_bit(cmd->info.id, cxlm->enabled_cmds); 119821e9f767SBen Widawsky } 119921e9f767SBen Widawsky } 120021e9f767SBen Widawsky 120121e9f767SBen Widawsky struct cxl_mbox_get_supported_logs { 120221e9f767SBen Widawsky __le16 entries; 120321e9f767SBen Widawsky u8 rsvd[6]; 120421e9f767SBen Widawsky struct gsl_entry { 120521e9f767SBen Widawsky uuid_t uuid; 120621e9f767SBen Widawsky __le32 size; 120721e9f767SBen Widawsky } __packed entry[]; 120821e9f767SBen Widawsky } __packed; 120921e9f767SBen Widawsky 121021e9f767SBen Widawsky static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) 121121e9f767SBen Widawsky { 121221e9f767SBen Widawsky struct cxl_mbox_get_supported_logs *ret; 121321e9f767SBen Widawsky int rc; 121421e9f767SBen Widawsky 121521e9f767SBen Widawsky ret = kvmalloc(cxlm->payload_size, GFP_KERNEL); 121621e9f767SBen Widawsky if (!ret) 121721e9f767SBen Widawsky return ERR_PTR(-ENOMEM); 121821e9f767SBen Widawsky 121921e9f767SBen Widawsky rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 122021e9f767SBen Widawsky 0, ret, cxlm->payload_size); 122121e9f767SBen Widawsky if (rc < 0) { 122221e9f767SBen Widawsky kvfree(ret); 122321e9f767SBen Widawsky return ERR_PTR(rc); 122421e9f767SBen Widawsky } 122521e9f767SBen Widawsky 122621e9f767SBen Widawsky return ret; 122721e9f767SBen Widawsky } 122821e9f767SBen Widawsky 122921e9f767SBen Widawsky /** 1230f847502aSIra Weiny * cxl_mem_get_partition_info - Get partition info 123113e7749dSDan Williams * @cxlm: cxl_mem instance to update partition info 1232f847502aSIra Weiny * 1233f847502aSIra Weiny * Retrieve the current partition info for the device specified. If not 0, the 1234f847502aSIra Weiny * 'next' values are pending and take affect on next cold reset. 1235f847502aSIra Weiny * 1236f847502aSIra Weiny * Return: 0 if no error: or the result of the mailbox command. 1237f847502aSIra Weiny * 1238f847502aSIra Weiny * See CXL @8.2.9.5.2.1 Get Partition Info 1239f847502aSIra Weiny */ 124013e7749dSDan Williams static int cxl_mem_get_partition_info(struct cxl_mem *cxlm) 1241f847502aSIra Weiny { 1242f847502aSIra Weiny struct cxl_mbox_get_partition_info { 1243f847502aSIra Weiny __le64 active_volatile_cap; 1244f847502aSIra Weiny __le64 active_persistent_cap; 1245f847502aSIra Weiny __le64 next_volatile_cap; 1246f847502aSIra Weiny __le64 next_persistent_cap; 1247f847502aSIra Weiny } __packed pi; 1248f847502aSIra Weiny int rc; 1249f847502aSIra Weiny 1250f847502aSIra Weiny rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO, 1251f847502aSIra Weiny NULL, 0, &pi, sizeof(pi)); 1252f847502aSIra Weiny if (rc) 1253f847502aSIra Weiny return rc; 1254f847502aSIra Weiny 125513e7749dSDan Williams cxlm->active_volatile_bytes = 125613e7749dSDan Williams le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 125713e7749dSDan Williams cxlm->active_persistent_bytes = 125813e7749dSDan Williams le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; 125913e7749dSDan Williams cxlm->next_volatile_bytes = 126013e7749dSDan Williams le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 126113e7749dSDan Williams cxlm->next_persistent_bytes = 126213e7749dSDan Williams le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 1263f847502aSIra Weiny 1264f847502aSIra Weiny return 0; 1265f847502aSIra Weiny } 1266f847502aSIra Weiny 1267f847502aSIra Weiny /** 126821e9f767SBen Widawsky * cxl_mem_enumerate_cmds() - Enumerate commands for a device. 126921e9f767SBen Widawsky * @cxlm: The device. 127021e9f767SBen Widawsky * 127121e9f767SBen Widawsky * Returns 0 if enumerate completed successfully. 127221e9f767SBen Widawsky * 127321e9f767SBen Widawsky * CXL devices have optional support for certain commands. This function will 127421e9f767SBen Widawsky * determine the set of supported commands for the hardware and update the 127521e9f767SBen Widawsky * enabled_cmds bitmap in the @cxlm. 127621e9f767SBen Widawsky */ 127721e9f767SBen Widawsky static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) 127821e9f767SBen Widawsky { 127921e9f767SBen Widawsky struct cxl_mbox_get_supported_logs *gsl; 128099e222a5SDan Williams struct device *dev = cxlm->dev; 128121e9f767SBen Widawsky struct cxl_mem_command *cmd; 128221e9f767SBen Widawsky int i, rc; 128321e9f767SBen Widawsky 128421e9f767SBen Widawsky gsl = cxl_get_gsl(cxlm); 128521e9f767SBen Widawsky if (IS_ERR(gsl)) 128621e9f767SBen Widawsky return PTR_ERR(gsl); 128721e9f767SBen Widawsky 128821e9f767SBen Widawsky rc = -ENOENT; 128921e9f767SBen Widawsky for (i = 0; i < le16_to_cpu(gsl->entries); i++) { 129021e9f767SBen Widawsky u32 size = le32_to_cpu(gsl->entry[i].size); 129121e9f767SBen Widawsky uuid_t uuid = gsl->entry[i].uuid; 129221e9f767SBen Widawsky u8 *log; 129321e9f767SBen Widawsky 129421e9f767SBen Widawsky dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); 129521e9f767SBen Widawsky 129621e9f767SBen Widawsky if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) 129721e9f767SBen Widawsky continue; 129821e9f767SBen Widawsky 129921e9f767SBen Widawsky log = kvmalloc(size, GFP_KERNEL); 130021e9f767SBen Widawsky if (!log) { 130121e9f767SBen Widawsky rc = -ENOMEM; 130221e9f767SBen Widawsky goto out; 130321e9f767SBen Widawsky } 130421e9f767SBen Widawsky 130521e9f767SBen Widawsky rc = cxl_xfer_log(cxlm, &uuid, size, log); 130621e9f767SBen Widawsky if (rc) { 130721e9f767SBen Widawsky kvfree(log); 130821e9f767SBen Widawsky goto out; 130921e9f767SBen Widawsky } 131021e9f767SBen Widawsky 131121e9f767SBen Widawsky cxl_walk_cel(cxlm, size, log); 131221e9f767SBen Widawsky kvfree(log); 131321e9f767SBen Widawsky 131421e9f767SBen Widawsky /* In case CEL was bogus, enable some default commands. */ 131521e9f767SBen Widawsky cxl_for_each_cmd(cmd) 131621e9f767SBen Widawsky if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) 131721e9f767SBen Widawsky set_bit(cmd->info.id, cxlm->enabled_cmds); 131821e9f767SBen Widawsky 131921e9f767SBen Widawsky /* Found the required CEL */ 132021e9f767SBen Widawsky rc = 0; 132121e9f767SBen Widawsky } 132221e9f767SBen Widawsky 132321e9f767SBen Widawsky out: 132421e9f767SBen Widawsky kvfree(gsl); 132521e9f767SBen Widawsky return rc; 132621e9f767SBen Widawsky } 132721e9f767SBen Widawsky 132821e9f767SBen Widawsky /** 132921e9f767SBen Widawsky * cxl_mem_identify() - Send the IDENTIFY command to the device. 133021e9f767SBen Widawsky * @cxlm: The device to identify. 133121e9f767SBen Widawsky * 133221e9f767SBen Widawsky * Return: 0 if identify was executed successfully. 133321e9f767SBen Widawsky * 133421e9f767SBen Widawsky * This will dispatch the identify command to the device and on success populate 133521e9f767SBen Widawsky * structures to be exported to sysfs. 133621e9f767SBen Widawsky */ 133721e9f767SBen Widawsky static int cxl_mem_identify(struct cxl_mem *cxlm) 133821e9f767SBen Widawsky { 133921e9f767SBen Widawsky /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ 134021e9f767SBen Widawsky struct cxl_mbox_identify { 134121e9f767SBen Widawsky char fw_revision[0x10]; 134221e9f767SBen Widawsky __le64 total_capacity; 134321e9f767SBen Widawsky __le64 volatile_capacity; 134421e9f767SBen Widawsky __le64 persistent_capacity; 134521e9f767SBen Widawsky __le64 partition_align; 134621e9f767SBen Widawsky __le16 info_event_log_size; 134721e9f767SBen Widawsky __le16 warning_event_log_size; 134821e9f767SBen Widawsky __le16 failure_event_log_size; 134921e9f767SBen Widawsky __le16 fatal_event_log_size; 135021e9f767SBen Widawsky __le32 lsa_size; 135121e9f767SBen Widawsky u8 poison_list_max_mer[3]; 135221e9f767SBen Widawsky __le16 inject_poison_limit; 135321e9f767SBen Widawsky u8 poison_caps; 135421e9f767SBen Widawsky u8 qos_telemetry_caps; 135521e9f767SBen Widawsky } __packed id; 135621e9f767SBen Widawsky int rc; 135721e9f767SBen Widawsky 135821e9f767SBen Widawsky rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, 135921e9f767SBen Widawsky sizeof(id)); 136021e9f767SBen Widawsky if (rc < 0) 136121e9f767SBen Widawsky return rc; 136221e9f767SBen Widawsky 13630b9159d0SIra Weiny cxlm->total_bytes = le64_to_cpu(id.total_capacity); 13640b9159d0SIra Weiny cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER; 13650b9159d0SIra Weiny 13660b9159d0SIra Weiny cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity); 13670b9159d0SIra Weiny cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER; 13680b9159d0SIra Weiny 13690b9159d0SIra Weiny cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity); 13700b9159d0SIra Weiny cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER; 13710b9159d0SIra Weiny 13720b9159d0SIra Weiny cxlm->partition_align_bytes = le64_to_cpu(id.partition_align); 13730b9159d0SIra Weiny cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER; 13740b9159d0SIra Weiny 137599e222a5SDan Williams dev_dbg(cxlm->dev, 137699e222a5SDan Williams "Identify Memory Device\n" 13770b9159d0SIra Weiny " total_bytes = %#llx\n" 13780b9159d0SIra Weiny " volatile_only_bytes = %#llx\n" 13790b9159d0SIra Weiny " persistent_only_bytes = %#llx\n" 13800b9159d0SIra Weiny " partition_align_bytes = %#llx\n", 138199e222a5SDan Williams cxlm->total_bytes, cxlm->volatile_only_bytes, 138299e222a5SDan Williams cxlm->persistent_only_bytes, cxlm->partition_align_bytes); 13830b9159d0SIra Weiny 1384f847502aSIra Weiny cxlm->lsa_size = le32_to_cpu(id.lsa_size); 1385f847502aSIra Weiny memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); 1386f847502aSIra Weiny 1387f847502aSIra Weiny return 0; 1388f847502aSIra Weiny } 1389f847502aSIra Weiny 1390f847502aSIra Weiny static int cxl_mem_create_range_info(struct cxl_mem *cxlm) 1391f847502aSIra Weiny { 1392f847502aSIra Weiny int rc; 1393f847502aSIra Weiny 1394f847502aSIra Weiny if (cxlm->partition_align_bytes == 0) { 1395f847502aSIra Weiny cxlm->ram_range.start = 0; 1396f847502aSIra Weiny cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; 1397ceeb0da0SIra Weiny cxlm->pmem_range.start = cxlm->volatile_only_bytes; 1398ceeb0da0SIra Weiny cxlm->pmem_range.end = cxlm->volatile_only_bytes + 1399ceeb0da0SIra Weiny cxlm->persistent_only_bytes - 1; 1400f847502aSIra Weiny return 0; 1401f847502aSIra Weiny } 1402f847502aSIra Weiny 140313e7749dSDan Williams rc = cxl_mem_get_partition_info(cxlm); 1404f847502aSIra Weiny if (rc < 0) { 140599e222a5SDan Williams dev_err(cxlm->dev, "Failed to query partition information\n"); 1406f847502aSIra Weiny return rc; 1407f847502aSIra Weiny } 1408f847502aSIra Weiny 140999e222a5SDan Williams dev_dbg(cxlm->dev, 141099e222a5SDan Williams "Get Partition Info\n" 1411f847502aSIra Weiny " active_volatile_bytes = %#llx\n" 1412f847502aSIra Weiny " active_persistent_bytes = %#llx\n" 1413f847502aSIra Weiny " next_volatile_bytes = %#llx\n" 1414f847502aSIra Weiny " next_persistent_bytes = %#llx\n", 141599e222a5SDan Williams cxlm->active_volatile_bytes, cxlm->active_persistent_bytes, 141699e222a5SDan Williams cxlm->next_volatile_bytes, cxlm->next_persistent_bytes); 1417f847502aSIra Weiny 141821e9f767SBen Widawsky cxlm->ram_range.start = 0; 1419f847502aSIra Weiny cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; 142021e9f767SBen Widawsky 1421ceeb0da0SIra Weiny cxlm->pmem_range.start = cxlm->active_volatile_bytes; 1422ceeb0da0SIra Weiny cxlm->pmem_range.end = cxlm->active_volatile_bytes + 1423ceeb0da0SIra Weiny cxlm->active_persistent_bytes - 1; 142421e9f767SBen Widawsky 142521e9f767SBen Widawsky return 0; 142621e9f767SBen Widawsky } 142721e9f767SBen Widawsky 142821e9f767SBen Widawsky static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) 142921e9f767SBen Widawsky { 143021083f51SDan Williams struct cxl_memdev *cxlmd; 14311b0a1a2aSBen Widawsky struct cxl_mem *cxlm; 14321d5a4159SBen Widawsky int rc; 143321e9f767SBen Widawsky 143421e9f767SBen Widawsky rc = pcim_enable_device(pdev); 143521e9f767SBen Widawsky if (rc) 143621e9f767SBen Widawsky return rc; 143721e9f767SBen Widawsky 143899e222a5SDan Williams cxlm = cxl_mem_create(&pdev->dev); 14391b0a1a2aSBen Widawsky if (IS_ERR(cxlm)) 14401b0a1a2aSBen Widawsky return PTR_ERR(cxlm); 14411b0a1a2aSBen Widawsky 144221e9f767SBen Widawsky rc = cxl_mem_setup_regs(cxlm); 144321e9f767SBen Widawsky if (rc) 144421e9f767SBen Widawsky return rc; 144521e9f767SBen Widawsky 144621e9f767SBen Widawsky rc = cxl_mem_setup_mailbox(cxlm); 144721e9f767SBen Widawsky if (rc) 144821e9f767SBen Widawsky return rc; 144921e9f767SBen Widawsky 145021e9f767SBen Widawsky rc = cxl_mem_enumerate_cmds(cxlm); 145121e9f767SBen Widawsky if (rc) 145221e9f767SBen Widawsky return rc; 145321e9f767SBen Widawsky 145421e9f767SBen Widawsky rc = cxl_mem_identify(cxlm); 145521e9f767SBen Widawsky if (rc) 145621e9f767SBen Widawsky return rc; 145721e9f767SBen Widawsky 1458f847502aSIra Weiny rc = cxl_mem_create_range_info(cxlm); 1459f847502aSIra Weiny if (rc) 1460f847502aSIra Weiny return rc; 1461f847502aSIra Weiny 146299e222a5SDan Williams cxlmd = devm_cxl_add_memdev(cxlm, &cxl_memdev_fops); 146321083f51SDan Williams if (IS_ERR(cxlmd)) 146421083f51SDan Williams return PTR_ERR(cxlmd); 146521083f51SDan Williams 146621083f51SDan Williams if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) 146721083f51SDan Williams rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); 146821083f51SDan Williams 146921083f51SDan Williams return rc; 147021e9f767SBen Widawsky } 147121e9f767SBen Widawsky 147221e9f767SBen Widawsky static const struct pci_device_id cxl_mem_pci_tbl[] = { 147321e9f767SBen Widawsky /* PCI class code for CXL.mem Type-3 Devices */ 147421e9f767SBen Widawsky { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 147521e9f767SBen Widawsky { /* terminate list */ }, 147621e9f767SBen Widawsky }; 147721e9f767SBen Widawsky MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 147821e9f767SBen Widawsky 147921e9f767SBen Widawsky static struct pci_driver cxl_mem_driver = { 148021e9f767SBen Widawsky .name = KBUILD_MODNAME, 148121e9f767SBen Widawsky .id_table = cxl_mem_pci_tbl, 148221e9f767SBen Widawsky .probe = cxl_mem_probe, 148321e9f767SBen Widawsky .driver = { 148421e9f767SBen Widawsky .probe_type = PROBE_PREFER_ASYNCHRONOUS, 148521e9f767SBen Widawsky }, 148621e9f767SBen Widawsky }; 148721e9f767SBen Widawsky 148821e9f767SBen Widawsky static __init int cxl_mem_init(void) 148921e9f767SBen Widawsky { 149021e9f767SBen Widawsky struct dentry *mbox_debugfs; 149121e9f767SBen Widawsky int rc; 149221e9f767SBen Widawsky 149321e9f767SBen Widawsky /* Double check the anonymous union trickery in struct cxl_regs */ 149421e9f767SBen Widawsky BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != 149521e9f767SBen Widawsky offsetof(struct cxl_regs, device_regs.memdev)); 149621e9f767SBen Widawsky 14973d135db5SBen Widawsky rc = pci_register_driver(&cxl_mem_driver); 149821e9f767SBen Widawsky if (rc) 149921e9f767SBen Widawsky return rc; 150021e9f767SBen Widawsky 150121e9f767SBen Widawsky cxl_debugfs = debugfs_create_dir("cxl", NULL); 150221e9f767SBen Widawsky mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); 150321e9f767SBen Widawsky debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, 150421e9f767SBen Widawsky &cxl_raw_allow_all); 150521e9f767SBen Widawsky 150621e9f767SBen Widawsky return 0; 150721e9f767SBen Widawsky } 150821e9f767SBen Widawsky 150921e9f767SBen Widawsky static __exit void cxl_mem_exit(void) 151021e9f767SBen Widawsky { 151121e9f767SBen Widawsky debugfs_remove_recursive(cxl_debugfs); 151221e9f767SBen Widawsky pci_unregister_driver(&cxl_mem_driver); 151321e9f767SBen Widawsky } 151421e9f767SBen Widawsky 151521e9f767SBen Widawsky MODULE_LICENSE("GPL v2"); 151621e9f767SBen Widawsky module_init(cxl_mem_init); 151721e9f767SBen Widawsky module_exit(cxl_mem_exit); 151821e9f767SBen Widawsky MODULE_IMPORT_NS(CXL); 1519