121e9f767SBen Widawsky // SPDX-License-Identifier: GPL-2.0-only 221e9f767SBen Widawsky /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 321e9f767SBen Widawsky #include <uapi/linux/cxl_mem.h> 421e9f767SBen Widawsky #include <linux/security.h> 521e9f767SBen Widawsky #include <linux/debugfs.h> 621e9f767SBen Widawsky #include <linux/module.h> 721e9f767SBen Widawsky #include <linux/sizes.h> 821e9f767SBen Widawsky #include <linux/mutex.h> 930af9729SIra Weiny #include <linux/list.h> 1021e9f767SBen Widawsky #include <linux/cdev.h> 1121e9f767SBen Widawsky #include <linux/idr.h> 1221e9f767SBen Widawsky #include <linux/pci.h> 1321e9f767SBen Widawsky #include <linux/io.h> 1421e9f767SBen Widawsky #include <linux/io-64-nonatomic-lo-hi.h> 155161a55cSBen Widawsky #include "cxlmem.h" 1621e9f767SBen Widawsky #include "pci.h" 1721e9f767SBen Widawsky #include "cxl.h" 1821e9f767SBen Widawsky 1921e9f767SBen Widawsky /** 2021e9f767SBen Widawsky * DOC: cxl pci 2121e9f767SBen Widawsky * 2221e9f767SBen Widawsky * This implements the PCI exclusive functionality for a CXL device as it is 2321e9f767SBen Widawsky * defined by the Compute Express Link specification. CXL devices may surface 2421e9f767SBen Widawsky * certain functionality even if it isn't CXL enabled. 2521e9f767SBen Widawsky * 2621e9f767SBen Widawsky * The driver has several responsibilities, mainly: 2721e9f767SBen Widawsky * - Create the memX device and register on the CXL bus. 2821e9f767SBen Widawsky * - Enumerate device's register interface and map them. 2921e9f767SBen Widawsky * - Probe the device attributes to establish sysfs interface. 3021e9f767SBen Widawsky * - Provide an IOCTL interface to userspace to communicate with the device for 3121e9f767SBen Widawsky * things like firmware update. 3221e9f767SBen Widawsky */ 3321e9f767SBen Widawsky 3421e9f767SBen Widawsky #define cxl_doorbell_busy(cxlm) \ 3521e9f767SBen Widawsky (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ 3621e9f767SBen Widawsky CXLDEV_MBOX_CTRL_DOORBELL) 3721e9f767SBen Widawsky 3821e9f767SBen Widawsky /* CXL 2.0 - 8.2.8.4 */ 3921e9f767SBen Widawsky #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) 4021e9f767SBen Widawsky 4121e9f767SBen Widawsky enum opcode { 4221e9f767SBen Widawsky CXL_MBOX_OP_INVALID = 0x0000, 4321e9f767SBen Widawsky CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, 4421e9f767SBen Widawsky CXL_MBOX_OP_GET_FW_INFO = 0x0200, 4521e9f767SBen Widawsky CXL_MBOX_OP_ACTIVATE_FW = 0x0202, 4621e9f767SBen Widawsky CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, 4721e9f767SBen Widawsky CXL_MBOX_OP_GET_LOG = 0x0401, 4821e9f767SBen Widawsky CXL_MBOX_OP_IDENTIFY = 0x4000, 4921e9f767SBen Widawsky CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, 5021e9f767SBen Widawsky CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, 5121e9f767SBen Widawsky CXL_MBOX_OP_GET_LSA = 0x4102, 5221e9f767SBen Widawsky CXL_MBOX_OP_SET_LSA = 0x4103, 5321e9f767SBen Widawsky CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200, 5487815ee9SBen Widawsky CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201, 5587815ee9SBen Widawsky CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202, 5687815ee9SBen Widawsky CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203, 5721e9f767SBen Widawsky CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204, 5887815ee9SBen Widawsky CXL_MBOX_OP_GET_POISON = 0x4300, 5987815ee9SBen Widawsky CXL_MBOX_OP_INJECT_POISON = 0x4301, 6087815ee9SBen Widawsky CXL_MBOX_OP_CLEAR_POISON = 0x4302, 6187815ee9SBen Widawsky CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, 6221e9f767SBen Widawsky CXL_MBOX_OP_SCAN_MEDIA = 0x4304, 6321e9f767SBen Widawsky CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, 6421e9f767SBen Widawsky CXL_MBOX_OP_MAX = 0x10000 6521e9f767SBen Widawsky }; 6621e9f767SBen Widawsky 6721e9f767SBen Widawsky /** 6821e9f767SBen Widawsky * struct mbox_cmd - A command to be submitted to hardware. 6921e9f767SBen Widawsky * @opcode: (input) The command set and command submitted to hardware. 7021e9f767SBen Widawsky * @payload_in: (input) Pointer to the input payload. 7121e9f767SBen Widawsky * @payload_out: (output) Pointer to the output payload. Must be allocated by 7221e9f767SBen Widawsky * the caller. 7321e9f767SBen Widawsky * @size_in: (input) Number of bytes to load from @payload_in. 7421e9f767SBen Widawsky * @size_out: (input) Max number of bytes loaded into @payload_out. 7521e9f767SBen Widawsky * (output) Number of bytes generated by the device. For fixed size 7621e9f767SBen Widawsky * outputs commands this is always expected to be deterministic. For 7721e9f767SBen Widawsky * variable sized output commands, it tells the exact number of bytes 7821e9f767SBen Widawsky * written. 7921e9f767SBen Widawsky * @return_code: (output) Error code returned from hardware. 8021e9f767SBen Widawsky * 8121e9f767SBen Widawsky * This is the primary mechanism used to send commands to the hardware. 8221e9f767SBen Widawsky * All the fields except @payload_* correspond exactly to the fields described in 8321e9f767SBen Widawsky * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and 8421e9f767SBen Widawsky * @payload_out are written to, and read from the Command Payload Registers 8521e9f767SBen Widawsky * defined in CXL 2.0 8.2.8.4.8. 8621e9f767SBen Widawsky */ 8721e9f767SBen Widawsky struct mbox_cmd { 8821e9f767SBen Widawsky u16 opcode; 8921e9f767SBen Widawsky void *payload_in; 9021e9f767SBen Widawsky void *payload_out; 9121e9f767SBen Widawsky size_t size_in; 9221e9f767SBen Widawsky size_t size_out; 9321e9f767SBen Widawsky u16 return_code; 9421e9f767SBen Widawsky #define CXL_MBOX_SUCCESS 0 9521e9f767SBen Widawsky }; 9621e9f767SBen Widawsky 9721e9f767SBen Widawsky static DECLARE_RWSEM(cxl_memdev_rwsem); 9821e9f767SBen Widawsky static struct dentry *cxl_debugfs; 9921e9f767SBen Widawsky static bool cxl_raw_allow_all; 10021e9f767SBen Widawsky 10121e9f767SBen Widawsky enum { 10221e9f767SBen Widawsky CEL_UUID, 10321e9f767SBen Widawsky VENDOR_DEBUG_UUID, 10421e9f767SBen Widawsky }; 10521e9f767SBen Widawsky 10621e9f767SBen Widawsky /* See CXL 2.0 Table 170. Get Log Input Payload */ 10721e9f767SBen Widawsky static const uuid_t log_uuid[] = { 10821e9f767SBen Widawsky [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 10921e9f767SBen Widawsky 0xb1, 0x62, 0x3b, 0x3f, 0x17), 11021e9f767SBen Widawsky [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 11121e9f767SBen Widawsky 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), 11221e9f767SBen Widawsky }; 11321e9f767SBen Widawsky 11421e9f767SBen Widawsky /** 11521e9f767SBen Widawsky * struct cxl_mem_command - Driver representation of a memory device command 11621e9f767SBen Widawsky * @info: Command information as it exists for the UAPI 11721e9f767SBen Widawsky * @opcode: The actual bits used for the mailbox protocol 11821e9f767SBen Widawsky * @flags: Set of flags effecting driver behavior. 11921e9f767SBen Widawsky * 12021e9f767SBen Widawsky * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag 12121e9f767SBen Widawsky * will be enabled by the driver regardless of what hardware may have 12221e9f767SBen Widawsky * advertised. 12321e9f767SBen Widawsky * 12421e9f767SBen Widawsky * The cxl_mem_command is the driver's internal representation of commands that 12521e9f767SBen Widawsky * are supported by the driver. Some of these commands may not be supported by 12621e9f767SBen Widawsky * the hardware. The driver will use @info to validate the fields passed in by 12721e9f767SBen Widawsky * the user then submit the @opcode to the hardware. 12821e9f767SBen Widawsky * 12921e9f767SBen Widawsky * See struct cxl_command_info. 13021e9f767SBen Widawsky */ 13121e9f767SBen Widawsky struct cxl_mem_command { 13221e9f767SBen Widawsky struct cxl_command_info info; 13321e9f767SBen Widawsky enum opcode opcode; 13421e9f767SBen Widawsky u32 flags; 13521e9f767SBen Widawsky #define CXL_CMD_FLAG_NONE 0 13621e9f767SBen Widawsky #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) 13721e9f767SBen Widawsky }; 13821e9f767SBen Widawsky 13921e9f767SBen Widawsky #define CXL_CMD(_id, sin, sout, _flags) \ 14021e9f767SBen Widawsky [CXL_MEM_COMMAND_ID_##_id] = { \ 14121e9f767SBen Widawsky .info = { \ 14221e9f767SBen Widawsky .id = CXL_MEM_COMMAND_ID_##_id, \ 14321e9f767SBen Widawsky .size_in = sin, \ 14421e9f767SBen Widawsky .size_out = sout, \ 14521e9f767SBen Widawsky }, \ 14621e9f767SBen Widawsky .opcode = CXL_MBOX_OP_##_id, \ 14721e9f767SBen Widawsky .flags = _flags, \ 14821e9f767SBen Widawsky } 14921e9f767SBen Widawsky 15021e9f767SBen Widawsky /* 15121e9f767SBen Widawsky * This table defines the supported mailbox commands for the driver. This table 15221e9f767SBen Widawsky * is made up of a UAPI structure. Non-negative values as parameters in the 15321e9f767SBen Widawsky * table will be validated against the user's input. For example, if size_in is 15421e9f767SBen Widawsky * 0, and the user passed in 1, it is an error. 15521e9f767SBen Widawsky */ 15621e9f767SBen Widawsky static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { 15721e9f767SBen Widawsky CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), 15821e9f767SBen Widawsky #ifdef CONFIG_CXL_MEM_RAW_COMMANDS 15921e9f767SBen Widawsky CXL_CMD(RAW, ~0, ~0, 0), 16021e9f767SBen Widawsky #endif 16121e9f767SBen Widawsky CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), 16221e9f767SBen Widawsky CXL_CMD(GET_FW_INFO, 0, 0x50, 0), 16321e9f767SBen Widawsky CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), 16421e9f767SBen Widawsky CXL_CMD(GET_LSA, 0x8, ~0, 0), 16521e9f767SBen Widawsky CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), 16621e9f767SBen Widawsky CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), 16787815ee9SBen Widawsky CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), 16887815ee9SBen Widawsky CXL_CMD(SET_LSA, ~0, 0, 0), 16987815ee9SBen Widawsky CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), 17087815ee9SBen Widawsky CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), 17187815ee9SBen Widawsky CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), 17287815ee9SBen Widawsky CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), 17387815ee9SBen Widawsky CXL_CMD(GET_POISON, 0x10, ~0, 0), 17487815ee9SBen Widawsky CXL_CMD(INJECT_POISON, 0x8, 0, 0), 17587815ee9SBen Widawsky CXL_CMD(CLEAR_POISON, 0x48, 0, 0), 17687815ee9SBen Widawsky CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), 17787815ee9SBen Widawsky CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), 17887815ee9SBen Widawsky CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), 17921e9f767SBen Widawsky }; 18021e9f767SBen Widawsky 18121e9f767SBen Widawsky /* 18221e9f767SBen Widawsky * Commands that RAW doesn't permit. The rationale for each: 18321e9f767SBen Widawsky * 18421e9f767SBen Widawsky * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / 18521e9f767SBen Widawsky * coordination of transaction timeout values at the root bridge level. 18621e9f767SBen Widawsky * 18721e9f767SBen Widawsky * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live 18821e9f767SBen Widawsky * and needs to be coordinated with HDM updates. 18921e9f767SBen Widawsky * 19021e9f767SBen Widawsky * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the 19121e9f767SBen Widawsky * driver and any writes from userspace invalidates those contents. 19221e9f767SBen Widawsky * 19321e9f767SBen Widawsky * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes 19421e9f767SBen Widawsky * to the device after it is marked clean, userspace can not make that 19521e9f767SBen Widawsky * assertion. 19621e9f767SBen Widawsky * 19721e9f767SBen Widawsky * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that 19821e9f767SBen Widawsky * is kept up to date with patrol notifications and error management. 19921e9f767SBen Widawsky */ 20021e9f767SBen Widawsky static u16 cxl_disabled_raw_commands[] = { 20121e9f767SBen Widawsky CXL_MBOX_OP_ACTIVATE_FW, 20221e9f767SBen Widawsky CXL_MBOX_OP_SET_PARTITION_INFO, 20321e9f767SBen Widawsky CXL_MBOX_OP_SET_LSA, 20421e9f767SBen Widawsky CXL_MBOX_OP_SET_SHUTDOWN_STATE, 20521e9f767SBen Widawsky CXL_MBOX_OP_SCAN_MEDIA, 20621e9f767SBen Widawsky CXL_MBOX_OP_GET_SCAN_MEDIA, 20721e9f767SBen Widawsky }; 20821e9f767SBen Widawsky 20921e9f767SBen Widawsky /* 21021e9f767SBen Widawsky * Command sets that RAW doesn't permit. All opcodes in this set are 21121e9f767SBen Widawsky * disabled because they pass plain text security payloads over the 21221e9f767SBen Widawsky * user/kernel boundary. This functionality is intended to be wrapped 21321e9f767SBen Widawsky * behind the keys ABI which allows for encrypted payloads in the UAPI 21421e9f767SBen Widawsky */ 21521e9f767SBen Widawsky static u8 security_command_sets[] = { 21621e9f767SBen Widawsky 0x44, /* Sanitize */ 21721e9f767SBen Widawsky 0x45, /* Persistent Memory Data-at-rest Security */ 21821e9f767SBen Widawsky 0x46, /* Security Passthrough */ 21921e9f767SBen Widawsky }; 22021e9f767SBen Widawsky 22121e9f767SBen Widawsky #define cxl_for_each_cmd(cmd) \ 22221e9f767SBen Widawsky for ((cmd) = &mem_commands[0]; \ 22321e9f767SBen Widawsky ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++) 22421e9f767SBen Widawsky 22521e9f767SBen Widawsky #define cxl_cmd_count ARRAY_SIZE(mem_commands) 22621e9f767SBen Widawsky 22721e9f767SBen Widawsky static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) 22821e9f767SBen Widawsky { 22921e9f767SBen Widawsky const unsigned long start = jiffies; 23021e9f767SBen Widawsky unsigned long end = start; 23121e9f767SBen Widawsky 23221e9f767SBen Widawsky while (cxl_doorbell_busy(cxlm)) { 23321e9f767SBen Widawsky end = jiffies; 23421e9f767SBen Widawsky 23521e9f767SBen Widawsky if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { 23621e9f767SBen Widawsky /* Check again in case preempted before timeout test */ 23721e9f767SBen Widawsky if (!cxl_doorbell_busy(cxlm)) 23821e9f767SBen Widawsky break; 23921e9f767SBen Widawsky return -ETIMEDOUT; 24021e9f767SBen Widawsky } 24121e9f767SBen Widawsky cpu_relax(); 24221e9f767SBen Widawsky } 24321e9f767SBen Widawsky 24421e9f767SBen Widawsky dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms", 24521e9f767SBen Widawsky jiffies_to_msecs(end) - jiffies_to_msecs(start)); 24621e9f767SBen Widawsky return 0; 24721e9f767SBen Widawsky } 24821e9f767SBen Widawsky 24921e9f767SBen Widawsky static bool cxl_is_security_command(u16 opcode) 25021e9f767SBen Widawsky { 25121e9f767SBen Widawsky int i; 25221e9f767SBen Widawsky 25321e9f767SBen Widawsky for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) 25421e9f767SBen Widawsky if (security_command_sets[i] == (opcode >> 8)) 25521e9f767SBen Widawsky return true; 25621e9f767SBen Widawsky return false; 25721e9f767SBen Widawsky } 25821e9f767SBen Widawsky 25921e9f767SBen Widawsky static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, 26021e9f767SBen Widawsky struct mbox_cmd *mbox_cmd) 26121e9f767SBen Widawsky { 26221e9f767SBen Widawsky struct device *dev = &cxlm->pdev->dev; 26321e9f767SBen Widawsky 26421e9f767SBen Widawsky dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", 26521e9f767SBen Widawsky mbox_cmd->opcode, mbox_cmd->size_in); 26621e9f767SBen Widawsky } 26721e9f767SBen Widawsky 26821e9f767SBen Widawsky /** 26921e9f767SBen Widawsky * __cxl_mem_mbox_send_cmd() - Execute a mailbox command 27021e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 27121e9f767SBen Widawsky * @mbox_cmd: Command to send to the memory device. 27221e9f767SBen Widawsky * 27321e9f767SBen Widawsky * Context: Any context. Expects mbox_mutex to be held. 27421e9f767SBen Widawsky * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. 27521e9f767SBen Widawsky * Caller should check the return code in @mbox_cmd to make sure it 27621e9f767SBen Widawsky * succeeded. 27721e9f767SBen Widawsky * 27821e9f767SBen Widawsky * This is a generic form of the CXL mailbox send command thus only using the 27921e9f767SBen Widawsky * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory 28021e9f767SBen Widawsky * devices, and perhaps other types of CXL devices may have further information 28121e9f767SBen Widawsky * available upon error conditions. Driver facilities wishing to send mailbox 28221e9f767SBen Widawsky * commands should use the wrapper command. 28321e9f767SBen Widawsky * 28421e9f767SBen Widawsky * The CXL spec allows for up to two mailboxes. The intention is for the primary 28521e9f767SBen Widawsky * mailbox to be OS controlled and the secondary mailbox to be used by system 28621e9f767SBen Widawsky * firmware. This allows the OS and firmware to communicate with the device and 28721e9f767SBen Widawsky * not need to coordinate with each other. The driver only uses the primary 28821e9f767SBen Widawsky * mailbox. 28921e9f767SBen Widawsky */ 29021e9f767SBen Widawsky static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, 29121e9f767SBen Widawsky struct mbox_cmd *mbox_cmd) 29221e9f767SBen Widawsky { 29321e9f767SBen Widawsky void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; 29421e9f767SBen Widawsky u64 cmd_reg, status_reg; 29521e9f767SBen Widawsky size_t out_len; 29621e9f767SBen Widawsky int rc; 29721e9f767SBen Widawsky 29821e9f767SBen Widawsky lockdep_assert_held(&cxlm->mbox_mutex); 29921e9f767SBen Widawsky 30021e9f767SBen Widawsky /* 30121e9f767SBen Widawsky * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. 30221e9f767SBen Widawsky * 1. Caller reads MB Control Register to verify doorbell is clear 30321e9f767SBen Widawsky * 2. Caller writes Command Register 30421e9f767SBen Widawsky * 3. Caller writes Command Payload Registers if input payload is non-empty 30521e9f767SBen Widawsky * 4. Caller writes MB Control Register to set doorbell 30621e9f767SBen Widawsky * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured 30721e9f767SBen Widawsky * 6. Caller reads MB Status Register to fetch Return code 30821e9f767SBen Widawsky * 7. If command successful, Caller reads Command Register to get Payload Length 30921e9f767SBen Widawsky * 8. If output payload is non-empty, host reads Command Payload Registers 31021e9f767SBen Widawsky * 31121e9f767SBen Widawsky * Hardware is free to do whatever it wants before the doorbell is rung, 31221e9f767SBen Widawsky * and isn't allowed to change anything after it clears the doorbell. As 31321e9f767SBen Widawsky * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can 31421e9f767SBen Widawsky * also happen in any order (though some orders might not make sense). 31521e9f767SBen Widawsky */ 31621e9f767SBen Widawsky 31721e9f767SBen Widawsky /* #1 */ 31821e9f767SBen Widawsky if (cxl_doorbell_busy(cxlm)) { 31921e9f767SBen Widawsky dev_err_ratelimited(&cxlm->pdev->dev, 32021e9f767SBen Widawsky "Mailbox re-busy after acquiring\n"); 32121e9f767SBen Widawsky return -EBUSY; 32221e9f767SBen Widawsky } 32321e9f767SBen Widawsky 32421e9f767SBen Widawsky cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, 32521e9f767SBen Widawsky mbox_cmd->opcode); 32621e9f767SBen Widawsky if (mbox_cmd->size_in) { 32721e9f767SBen Widawsky if (WARN_ON(!mbox_cmd->payload_in)) 32821e9f767SBen Widawsky return -EINVAL; 32921e9f767SBen Widawsky 33021e9f767SBen Widawsky cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, 33121e9f767SBen Widawsky mbox_cmd->size_in); 33221e9f767SBen Widawsky memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); 33321e9f767SBen Widawsky } 33421e9f767SBen Widawsky 33521e9f767SBen Widawsky /* #2, #3 */ 33621e9f767SBen Widawsky writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 33721e9f767SBen Widawsky 33821e9f767SBen Widawsky /* #4 */ 33921e9f767SBen Widawsky dev_dbg(&cxlm->pdev->dev, "Sending command\n"); 34021e9f767SBen Widawsky writel(CXLDEV_MBOX_CTRL_DOORBELL, 34121e9f767SBen Widawsky cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); 34221e9f767SBen Widawsky 34321e9f767SBen Widawsky /* #5 */ 34421e9f767SBen Widawsky rc = cxl_mem_wait_for_doorbell(cxlm); 34521e9f767SBen Widawsky if (rc == -ETIMEDOUT) { 34621e9f767SBen Widawsky cxl_mem_mbox_timeout(cxlm, mbox_cmd); 34721e9f767SBen Widawsky return rc; 34821e9f767SBen Widawsky } 34921e9f767SBen Widawsky 35021e9f767SBen Widawsky /* #6 */ 35121e9f767SBen Widawsky status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); 35221e9f767SBen Widawsky mbox_cmd->return_code = 35321e9f767SBen Widawsky FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); 35421e9f767SBen Widawsky 35521e9f767SBen Widawsky if (mbox_cmd->return_code != 0) { 35621e9f767SBen Widawsky dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n"); 35721e9f767SBen Widawsky return 0; 35821e9f767SBen Widawsky } 35921e9f767SBen Widawsky 36021e9f767SBen Widawsky /* #7 */ 36121e9f767SBen Widawsky cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 36221e9f767SBen Widawsky out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); 36321e9f767SBen Widawsky 36421e9f767SBen Widawsky /* #8 */ 36521e9f767SBen Widawsky if (out_len && mbox_cmd->payload_out) { 36621e9f767SBen Widawsky /* 36721e9f767SBen Widawsky * Sanitize the copy. If hardware misbehaves, out_len per the 36821e9f767SBen Widawsky * spec can actually be greater than the max allowed size (21 36921e9f767SBen Widawsky * bits available but spec defined 1M max). The caller also may 37021e9f767SBen Widawsky * have requested less data than the hardware supplied even 37121e9f767SBen Widawsky * within spec. 37221e9f767SBen Widawsky */ 37321e9f767SBen Widawsky size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); 37421e9f767SBen Widawsky 37521e9f767SBen Widawsky memcpy_fromio(mbox_cmd->payload_out, payload, n); 37621e9f767SBen Widawsky mbox_cmd->size_out = n; 37721e9f767SBen Widawsky } else { 37821e9f767SBen Widawsky mbox_cmd->size_out = 0; 37921e9f767SBen Widawsky } 38021e9f767SBen Widawsky 38121e9f767SBen Widawsky return 0; 38221e9f767SBen Widawsky } 38321e9f767SBen Widawsky 38421e9f767SBen Widawsky /** 38521e9f767SBen Widawsky * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox. 38621e9f767SBen Widawsky * @cxlm: The memory device to gain access to. 38721e9f767SBen Widawsky * 38821e9f767SBen Widawsky * Context: Any context. Takes the mbox_mutex. 38921e9f767SBen Widawsky * Return: 0 if exclusive access was acquired. 39021e9f767SBen Widawsky */ 39121e9f767SBen Widawsky static int cxl_mem_mbox_get(struct cxl_mem *cxlm) 39221e9f767SBen Widawsky { 39321e9f767SBen Widawsky struct device *dev = &cxlm->pdev->dev; 39421e9f767SBen Widawsky u64 md_status; 39521e9f767SBen Widawsky int rc; 39621e9f767SBen Widawsky 39721e9f767SBen Widawsky mutex_lock_io(&cxlm->mbox_mutex); 39821e9f767SBen Widawsky 39921e9f767SBen Widawsky /* 40021e9f767SBen Widawsky * XXX: There is some amount of ambiguity in the 2.0 version of the spec 40121e9f767SBen Widawsky * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the 40221e9f767SBen Widawsky * bit is to allow firmware running on the device to notify the driver 40321e9f767SBen Widawsky * that it's ready to receive commands. It is unclear if the bit needs 40421e9f767SBen Widawsky * to be read for each transaction mailbox, ie. the firmware can switch 40521e9f767SBen Widawsky * it on and off as needed. Second, there is no defined timeout for 40621e9f767SBen Widawsky * mailbox ready, like there is for the doorbell interface. 40721e9f767SBen Widawsky * 40821e9f767SBen Widawsky * Assumptions: 40921e9f767SBen Widawsky * 1. The firmware might toggle the Mailbox Interface Ready bit, check 41021e9f767SBen Widawsky * it for every command. 41121e9f767SBen Widawsky * 41221e9f767SBen Widawsky * 2. If the doorbell is clear, the firmware should have first set the 41321e9f767SBen Widawsky * Mailbox Interface Ready bit. Therefore, waiting for the doorbell 41421e9f767SBen Widawsky * to be ready is sufficient. 41521e9f767SBen Widawsky */ 41621e9f767SBen Widawsky rc = cxl_mem_wait_for_doorbell(cxlm); 41721e9f767SBen Widawsky if (rc) { 41821e9f767SBen Widawsky dev_warn(dev, "Mailbox interface not ready\n"); 41921e9f767SBen Widawsky goto out; 42021e9f767SBen Widawsky } 42121e9f767SBen Widawsky 42221e9f767SBen Widawsky md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); 42321e9f767SBen Widawsky if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { 42421e9f767SBen Widawsky dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); 42521e9f767SBen Widawsky rc = -EBUSY; 42621e9f767SBen Widawsky goto out; 42721e9f767SBen Widawsky } 42821e9f767SBen Widawsky 42921e9f767SBen Widawsky /* 43021e9f767SBen Widawsky * Hardware shouldn't allow a ready status but also have failure bits 43121e9f767SBen Widawsky * set. Spit out an error, this should be a bug report 43221e9f767SBen Widawsky */ 43321e9f767SBen Widawsky rc = -EFAULT; 43421e9f767SBen Widawsky if (md_status & CXLMDEV_DEV_FATAL) { 43521e9f767SBen Widawsky dev_err(dev, "mbox: reported ready, but fatal\n"); 43621e9f767SBen Widawsky goto out; 43721e9f767SBen Widawsky } 43821e9f767SBen Widawsky if (md_status & CXLMDEV_FW_HALT) { 43921e9f767SBen Widawsky dev_err(dev, "mbox: reported ready, but halted\n"); 44021e9f767SBen Widawsky goto out; 44121e9f767SBen Widawsky } 44221e9f767SBen Widawsky if (CXLMDEV_RESET_NEEDED(md_status)) { 44321e9f767SBen Widawsky dev_err(dev, "mbox: reported ready, but reset needed\n"); 44421e9f767SBen Widawsky goto out; 44521e9f767SBen Widawsky } 44621e9f767SBen Widawsky 44721e9f767SBen Widawsky /* with lock held */ 44821e9f767SBen Widawsky return 0; 44921e9f767SBen Widawsky 45021e9f767SBen Widawsky out: 45121e9f767SBen Widawsky mutex_unlock(&cxlm->mbox_mutex); 45221e9f767SBen Widawsky return rc; 45321e9f767SBen Widawsky } 45421e9f767SBen Widawsky 45521e9f767SBen Widawsky /** 45621e9f767SBen Widawsky * cxl_mem_mbox_put() - Release exclusive access to the mailbox. 45721e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 45821e9f767SBen Widawsky * 45921e9f767SBen Widawsky * Context: Any context. Expects mbox_mutex to be held. 46021e9f767SBen Widawsky */ 46121e9f767SBen Widawsky static void cxl_mem_mbox_put(struct cxl_mem *cxlm) 46221e9f767SBen Widawsky { 46321e9f767SBen Widawsky mutex_unlock(&cxlm->mbox_mutex); 46421e9f767SBen Widawsky } 46521e9f767SBen Widawsky 46621e9f767SBen Widawsky /** 46721e9f767SBen Widawsky * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. 46821e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 46921e9f767SBen Widawsky * @cmd: The validated command. 47021e9f767SBen Widawsky * @in_payload: Pointer to userspace's input payload. 47121e9f767SBen Widawsky * @out_payload: Pointer to userspace's output payload. 47221e9f767SBen Widawsky * @size_out: (Input) Max payload size to copy out. 47321e9f767SBen Widawsky * (Output) Payload size hardware generated. 47421e9f767SBen Widawsky * @retval: Hardware generated return code from the operation. 47521e9f767SBen Widawsky * 47621e9f767SBen Widawsky * Return: 47721e9f767SBen Widawsky * * %0 - Mailbox transaction succeeded. This implies the mailbox 47821e9f767SBen Widawsky * protocol completed successfully not that the operation itself 47921e9f767SBen Widawsky * was successful. 48021e9f767SBen Widawsky * * %-ENOMEM - Couldn't allocate a bounce buffer. 48121e9f767SBen Widawsky * * %-EFAULT - Something happened with copy_to/from_user. 48221e9f767SBen Widawsky * * %-EINTR - Mailbox acquisition interrupted. 48321e9f767SBen Widawsky * * %-EXXX - Transaction level failures. 48421e9f767SBen Widawsky * 48521e9f767SBen Widawsky * Creates the appropriate mailbox command and dispatches it on behalf of a 48621e9f767SBen Widawsky * userspace request. The input and output payloads are copied between 48721e9f767SBen Widawsky * userspace. 48821e9f767SBen Widawsky * 48921e9f767SBen Widawsky * See cxl_send_cmd(). 49021e9f767SBen Widawsky */ 49121e9f767SBen Widawsky static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, 49221e9f767SBen Widawsky const struct cxl_mem_command *cmd, 49321e9f767SBen Widawsky u64 in_payload, u64 out_payload, 49421e9f767SBen Widawsky s32 *size_out, u32 *retval) 49521e9f767SBen Widawsky { 49621e9f767SBen Widawsky struct device *dev = &cxlm->pdev->dev; 49721e9f767SBen Widawsky struct mbox_cmd mbox_cmd = { 49821e9f767SBen Widawsky .opcode = cmd->opcode, 49921e9f767SBen Widawsky .size_in = cmd->info.size_in, 50021e9f767SBen Widawsky .size_out = cmd->info.size_out, 50121e9f767SBen Widawsky }; 50221e9f767SBen Widawsky int rc; 50321e9f767SBen Widawsky 50421e9f767SBen Widawsky if (cmd->info.size_out) { 50521e9f767SBen Widawsky mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); 50621e9f767SBen Widawsky if (!mbox_cmd.payload_out) 50721e9f767SBen Widawsky return -ENOMEM; 50821e9f767SBen Widawsky } 50921e9f767SBen Widawsky 51021e9f767SBen Widawsky if (cmd->info.size_in) { 51121e9f767SBen Widawsky mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), 51221e9f767SBen Widawsky cmd->info.size_in); 51321e9f767SBen Widawsky if (IS_ERR(mbox_cmd.payload_in)) { 51421e9f767SBen Widawsky kvfree(mbox_cmd.payload_out); 51521e9f767SBen Widawsky return PTR_ERR(mbox_cmd.payload_in); 51621e9f767SBen Widawsky } 51721e9f767SBen Widawsky } 51821e9f767SBen Widawsky 51921e9f767SBen Widawsky rc = cxl_mem_mbox_get(cxlm); 52021e9f767SBen Widawsky if (rc) 52121e9f767SBen Widawsky goto out; 52221e9f767SBen Widawsky 52321e9f767SBen Widawsky dev_dbg(dev, 52421e9f767SBen Widawsky "Submitting %s command for user\n" 52521e9f767SBen Widawsky "\topcode: %x\n" 52621e9f767SBen Widawsky "\tsize: %ub\n", 52721e9f767SBen Widawsky cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, 52821e9f767SBen Widawsky cmd->info.size_in); 52921e9f767SBen Widawsky 53021e9f767SBen Widawsky dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, 53121e9f767SBen Widawsky "raw command path used\n"); 53221e9f767SBen Widawsky 53321e9f767SBen Widawsky rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); 53421e9f767SBen Widawsky cxl_mem_mbox_put(cxlm); 53521e9f767SBen Widawsky if (rc) 53621e9f767SBen Widawsky goto out; 53721e9f767SBen Widawsky 53821e9f767SBen Widawsky /* 53921e9f767SBen Widawsky * @size_out contains the max size that's allowed to be written back out 54021e9f767SBen Widawsky * to userspace. While the payload may have written more output than 54121e9f767SBen Widawsky * this it will have to be ignored. 54221e9f767SBen Widawsky */ 54321e9f767SBen Widawsky if (mbox_cmd.size_out) { 54421e9f767SBen Widawsky dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, 54521e9f767SBen Widawsky "Invalid return size\n"); 54621e9f767SBen Widawsky if (copy_to_user(u64_to_user_ptr(out_payload), 54721e9f767SBen Widawsky mbox_cmd.payload_out, mbox_cmd.size_out)) { 54821e9f767SBen Widawsky rc = -EFAULT; 54921e9f767SBen Widawsky goto out; 55021e9f767SBen Widawsky } 55121e9f767SBen Widawsky } 55221e9f767SBen Widawsky 55321e9f767SBen Widawsky *size_out = mbox_cmd.size_out; 55421e9f767SBen Widawsky *retval = mbox_cmd.return_code; 55521e9f767SBen Widawsky 55621e9f767SBen Widawsky out: 55721e9f767SBen Widawsky kvfree(mbox_cmd.payload_in); 55821e9f767SBen Widawsky kvfree(mbox_cmd.payload_out); 55921e9f767SBen Widawsky return rc; 56021e9f767SBen Widawsky } 56121e9f767SBen Widawsky 56221e9f767SBen Widawsky static bool cxl_mem_raw_command_allowed(u16 opcode) 56321e9f767SBen Widawsky { 56421e9f767SBen Widawsky int i; 56521e9f767SBen Widawsky 56621e9f767SBen Widawsky if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) 56721e9f767SBen Widawsky return false; 56821e9f767SBen Widawsky 56921e9f767SBen Widawsky if (security_locked_down(LOCKDOWN_NONE)) 57021e9f767SBen Widawsky return false; 57121e9f767SBen Widawsky 57221e9f767SBen Widawsky if (cxl_raw_allow_all) 57321e9f767SBen Widawsky return true; 57421e9f767SBen Widawsky 57521e9f767SBen Widawsky if (cxl_is_security_command(opcode)) 57621e9f767SBen Widawsky return false; 57721e9f767SBen Widawsky 57821e9f767SBen Widawsky for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) 57921e9f767SBen Widawsky if (cxl_disabled_raw_commands[i] == opcode) 58021e9f767SBen Widawsky return false; 58121e9f767SBen Widawsky 58221e9f767SBen Widawsky return true; 58321e9f767SBen Widawsky } 58421e9f767SBen Widawsky 58521e9f767SBen Widawsky /** 58621e9f767SBen Widawsky * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. 58721e9f767SBen Widawsky * @cxlm: &struct cxl_mem device whose mailbox will be used. 58821e9f767SBen Widawsky * @send_cmd: &struct cxl_send_command copied in from userspace. 58921e9f767SBen Widawsky * @out_cmd: Sanitized and populated &struct cxl_mem_command. 59021e9f767SBen Widawsky * 59121e9f767SBen Widawsky * Return: 59221e9f767SBen Widawsky * * %0 - @out_cmd is ready to send. 59321e9f767SBen Widawsky * * %-ENOTTY - Invalid command specified. 59421e9f767SBen Widawsky * * %-EINVAL - Reserved fields or invalid values were used. 59521e9f767SBen Widawsky * * %-ENOMEM - Input or output buffer wasn't sized properly. 59621e9f767SBen Widawsky * * %-EPERM - Attempted to use a protected command. 59721e9f767SBen Widawsky * 59821e9f767SBen Widawsky * The result of this command is a fully validated command in @out_cmd that is 59921e9f767SBen Widawsky * safe to send to the hardware. 60021e9f767SBen Widawsky * 60121e9f767SBen Widawsky * See handle_mailbox_cmd_from_user() 60221e9f767SBen Widawsky */ 60321e9f767SBen Widawsky static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, 60421e9f767SBen Widawsky const struct cxl_send_command *send_cmd, 60521e9f767SBen Widawsky struct cxl_mem_command *out_cmd) 60621e9f767SBen Widawsky { 60721e9f767SBen Widawsky const struct cxl_command_info *info; 60821e9f767SBen Widawsky struct cxl_mem_command *c; 60921e9f767SBen Widawsky 61021e9f767SBen Widawsky if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) 61121e9f767SBen Widawsky return -ENOTTY; 61221e9f767SBen Widawsky 61321e9f767SBen Widawsky /* 61421e9f767SBen Widawsky * The user can never specify an input payload larger than what hardware 61521e9f767SBen Widawsky * supports, but output can be arbitrarily large (simply write out as 61621e9f767SBen Widawsky * much data as the hardware provides). 61721e9f767SBen Widawsky */ 61821e9f767SBen Widawsky if (send_cmd->in.size > cxlm->payload_size) 61921e9f767SBen Widawsky return -EINVAL; 62021e9f767SBen Widawsky 62121e9f767SBen Widawsky /* 62221e9f767SBen Widawsky * Checks are bypassed for raw commands but a WARN/taint will occur 62321e9f767SBen Widawsky * later in the callchain 62421e9f767SBen Widawsky */ 62521e9f767SBen Widawsky if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { 62621e9f767SBen Widawsky const struct cxl_mem_command temp = { 62721e9f767SBen Widawsky .info = { 62821e9f767SBen Widawsky .id = CXL_MEM_COMMAND_ID_RAW, 62921e9f767SBen Widawsky .flags = 0, 63021e9f767SBen Widawsky .size_in = send_cmd->in.size, 63121e9f767SBen Widawsky .size_out = send_cmd->out.size, 63221e9f767SBen Widawsky }, 63321e9f767SBen Widawsky .opcode = send_cmd->raw.opcode 63421e9f767SBen Widawsky }; 63521e9f767SBen Widawsky 63621e9f767SBen Widawsky if (send_cmd->raw.rsvd) 63721e9f767SBen Widawsky return -EINVAL; 63821e9f767SBen Widawsky 63921e9f767SBen Widawsky /* 64021e9f767SBen Widawsky * Unlike supported commands, the output size of RAW commands 64121e9f767SBen Widawsky * gets passed along without further checking, so it must be 64221e9f767SBen Widawsky * validated here. 64321e9f767SBen Widawsky */ 64421e9f767SBen Widawsky if (send_cmd->out.size > cxlm->payload_size) 64521e9f767SBen Widawsky return -EINVAL; 64621e9f767SBen Widawsky 64721e9f767SBen Widawsky if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) 64821e9f767SBen Widawsky return -EPERM; 64921e9f767SBen Widawsky 65021e9f767SBen Widawsky memcpy(out_cmd, &temp, sizeof(temp)); 65121e9f767SBen Widawsky 65221e9f767SBen Widawsky return 0; 65321e9f767SBen Widawsky } 65421e9f767SBen Widawsky 65521e9f767SBen Widawsky if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) 65621e9f767SBen Widawsky return -EINVAL; 65721e9f767SBen Widawsky 65821e9f767SBen Widawsky if (send_cmd->rsvd) 65921e9f767SBen Widawsky return -EINVAL; 66021e9f767SBen Widawsky 66121e9f767SBen Widawsky if (send_cmd->in.rsvd || send_cmd->out.rsvd) 66221e9f767SBen Widawsky return -EINVAL; 66321e9f767SBen Widawsky 66421e9f767SBen Widawsky /* Convert user's command into the internal representation */ 66521e9f767SBen Widawsky c = &mem_commands[send_cmd->id]; 66621e9f767SBen Widawsky info = &c->info; 66721e9f767SBen Widawsky 66821e9f767SBen Widawsky /* Check that the command is enabled for hardware */ 66921e9f767SBen Widawsky if (!test_bit(info->id, cxlm->enabled_cmds)) 67021e9f767SBen Widawsky return -ENOTTY; 67121e9f767SBen Widawsky 67221e9f767SBen Widawsky /* Check the input buffer is the expected size */ 67321e9f767SBen Widawsky if (info->size_in >= 0 && info->size_in != send_cmd->in.size) 67421e9f767SBen Widawsky return -ENOMEM; 67521e9f767SBen Widawsky 67621e9f767SBen Widawsky /* Check the output buffer is at least large enough */ 67721e9f767SBen Widawsky if (info->size_out >= 0 && send_cmd->out.size < info->size_out) 67821e9f767SBen Widawsky return -ENOMEM; 67921e9f767SBen Widawsky 68021e9f767SBen Widawsky memcpy(out_cmd, c, sizeof(*c)); 68121e9f767SBen Widawsky out_cmd->info.size_in = send_cmd->in.size; 68221e9f767SBen Widawsky /* 68321e9f767SBen Widawsky * XXX: out_cmd->info.size_out will be controlled by the driver, and the 68421e9f767SBen Widawsky * specified number of bytes @send_cmd->out.size will be copied back out 68521e9f767SBen Widawsky * to userspace. 68621e9f767SBen Widawsky */ 68721e9f767SBen Widawsky 68821e9f767SBen Widawsky return 0; 68921e9f767SBen Widawsky } 69021e9f767SBen Widawsky 69121e9f767SBen Widawsky static int cxl_query_cmd(struct cxl_memdev *cxlmd, 69221e9f767SBen Widawsky struct cxl_mem_query_commands __user *q) 69321e9f767SBen Widawsky { 69421e9f767SBen Widawsky struct device *dev = &cxlmd->dev; 69521e9f767SBen Widawsky struct cxl_mem_command *cmd; 69621e9f767SBen Widawsky u32 n_commands; 69721e9f767SBen Widawsky int j = 0; 69821e9f767SBen Widawsky 69921e9f767SBen Widawsky dev_dbg(dev, "Query IOCTL\n"); 70021e9f767SBen Widawsky 70121e9f767SBen Widawsky if (get_user(n_commands, &q->n_commands)) 70221e9f767SBen Widawsky return -EFAULT; 70321e9f767SBen Widawsky 70421e9f767SBen Widawsky /* returns the total number if 0 elements are requested. */ 70521e9f767SBen Widawsky if (n_commands == 0) 70621e9f767SBen Widawsky return put_user(cxl_cmd_count, &q->n_commands); 70721e9f767SBen Widawsky 70821e9f767SBen Widawsky /* 70921e9f767SBen Widawsky * otherwise, return max(n_commands, total commands) cxl_command_info 71021e9f767SBen Widawsky * structures. 71121e9f767SBen Widawsky */ 71221e9f767SBen Widawsky cxl_for_each_cmd(cmd) { 71321e9f767SBen Widawsky const struct cxl_command_info *info = &cmd->info; 71421e9f767SBen Widawsky 71521e9f767SBen Widawsky if (copy_to_user(&q->commands[j++], info, sizeof(*info))) 71621e9f767SBen Widawsky return -EFAULT; 71721e9f767SBen Widawsky 71821e9f767SBen Widawsky if (j == n_commands) 71921e9f767SBen Widawsky break; 72021e9f767SBen Widawsky } 72121e9f767SBen Widawsky 72221e9f767SBen Widawsky return 0; 72321e9f767SBen Widawsky } 72421e9f767SBen Widawsky 72521e9f767SBen Widawsky static int cxl_send_cmd(struct cxl_memdev *cxlmd, 72621e9f767SBen Widawsky struct cxl_send_command __user *s) 72721e9f767SBen Widawsky { 72821e9f767SBen Widawsky struct cxl_mem *cxlm = cxlmd->cxlm; 72921e9f767SBen Widawsky struct device *dev = &cxlmd->dev; 73021e9f767SBen Widawsky struct cxl_send_command send; 73121e9f767SBen Widawsky struct cxl_mem_command c; 73221e9f767SBen Widawsky int rc; 73321e9f767SBen Widawsky 73421e9f767SBen Widawsky dev_dbg(dev, "Send IOCTL\n"); 73521e9f767SBen Widawsky 73621e9f767SBen Widawsky if (copy_from_user(&send, s, sizeof(send))) 73721e9f767SBen Widawsky return -EFAULT; 73821e9f767SBen Widawsky 73921e9f767SBen Widawsky rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c); 74021e9f767SBen Widawsky if (rc) 74121e9f767SBen Widawsky return rc; 74221e9f767SBen Widawsky 74321e9f767SBen Widawsky /* Prepare to handle a full payload for variable sized output */ 74421e9f767SBen Widawsky if (c.info.size_out < 0) 74521e9f767SBen Widawsky c.info.size_out = cxlm->payload_size; 74621e9f767SBen Widawsky 74721e9f767SBen Widawsky rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload, 74821e9f767SBen Widawsky send.out.payload, &send.out.size, 74921e9f767SBen Widawsky &send.retval); 75021e9f767SBen Widawsky if (rc) 75121e9f767SBen Widawsky return rc; 75221e9f767SBen Widawsky 75321e9f767SBen Widawsky if (copy_to_user(s, &send, sizeof(send))) 75421e9f767SBen Widawsky return -EFAULT; 75521e9f767SBen Widawsky 75621e9f767SBen Widawsky return 0; 75721e9f767SBen Widawsky } 75821e9f767SBen Widawsky 75921e9f767SBen Widawsky static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, 76021e9f767SBen Widawsky unsigned long arg) 76121e9f767SBen Widawsky { 76221e9f767SBen Widawsky switch (cmd) { 76321e9f767SBen Widawsky case CXL_MEM_QUERY_COMMANDS: 76421e9f767SBen Widawsky return cxl_query_cmd(cxlmd, (void __user *)arg); 76521e9f767SBen Widawsky case CXL_MEM_SEND_COMMAND: 76621e9f767SBen Widawsky return cxl_send_cmd(cxlmd, (void __user *)arg); 76721e9f767SBen Widawsky default: 76821e9f767SBen Widawsky return -ENOTTY; 76921e9f767SBen Widawsky } 77021e9f767SBen Widawsky } 77121e9f767SBen Widawsky 77221e9f767SBen Widawsky static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, 77321e9f767SBen Widawsky unsigned long arg) 77421e9f767SBen Widawsky { 77521e9f767SBen Widawsky struct cxl_memdev *cxlmd = file->private_data; 77621e9f767SBen Widawsky int rc = -ENXIO; 77721e9f767SBen Widawsky 77821e9f767SBen Widawsky down_read(&cxl_memdev_rwsem); 77921e9f767SBen Widawsky if (cxlmd->cxlm) 78021e9f767SBen Widawsky rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); 78121e9f767SBen Widawsky up_read(&cxl_memdev_rwsem); 78221e9f767SBen Widawsky 78321e9f767SBen Widawsky return rc; 78421e9f767SBen Widawsky } 78521e9f767SBen Widawsky 78621e9f767SBen Widawsky static int cxl_memdev_open(struct inode *inode, struct file *file) 78721e9f767SBen Widawsky { 78821e9f767SBen Widawsky struct cxl_memdev *cxlmd = 78921e9f767SBen Widawsky container_of(inode->i_cdev, typeof(*cxlmd), cdev); 79021e9f767SBen Widawsky 79121e9f767SBen Widawsky get_device(&cxlmd->dev); 79221e9f767SBen Widawsky file->private_data = cxlmd; 79321e9f767SBen Widawsky 79421e9f767SBen Widawsky return 0; 79521e9f767SBen Widawsky } 79621e9f767SBen Widawsky 79721e9f767SBen Widawsky static int cxl_memdev_release_file(struct inode *inode, struct file *file) 79821e9f767SBen Widawsky { 79921e9f767SBen Widawsky struct cxl_memdev *cxlmd = 80021e9f767SBen Widawsky container_of(inode->i_cdev, typeof(*cxlmd), cdev); 80121e9f767SBen Widawsky 80221e9f767SBen Widawsky put_device(&cxlmd->dev); 80321e9f767SBen Widawsky 80421e9f767SBen Widawsky return 0; 80521e9f767SBen Widawsky } 80621e9f767SBen Widawsky 8079cc238c7SDan Williams static void cxl_memdev_shutdown(struct device *dev) 8089cc238c7SDan Williams { 8099cc238c7SDan Williams struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 8109cc238c7SDan Williams 8119cc238c7SDan Williams down_write(&cxl_memdev_rwsem); 8129cc238c7SDan Williams cxlmd->cxlm = NULL; 8139cc238c7SDan Williams up_write(&cxl_memdev_rwsem); 8149cc238c7SDan Williams } 8159cc238c7SDan Williams 8169cc238c7SDan Williams static const struct cdevm_file_operations cxl_memdev_fops = { 8179cc238c7SDan Williams .fops = { 81821e9f767SBen Widawsky .owner = THIS_MODULE, 81921e9f767SBen Widawsky .unlocked_ioctl = cxl_memdev_ioctl, 82021e9f767SBen Widawsky .open = cxl_memdev_open, 82121e9f767SBen Widawsky .release = cxl_memdev_release_file, 82221e9f767SBen Widawsky .compat_ioctl = compat_ptr_ioctl, 82321e9f767SBen Widawsky .llseek = noop_llseek, 8249cc238c7SDan Williams }, 8259cc238c7SDan Williams .shutdown = cxl_memdev_shutdown, 82621e9f767SBen Widawsky }; 82721e9f767SBen Widawsky 82821e9f767SBen Widawsky static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) 82921e9f767SBen Widawsky { 83021e9f767SBen Widawsky struct cxl_mem_command *c; 83121e9f767SBen Widawsky 83221e9f767SBen Widawsky cxl_for_each_cmd(c) 83321e9f767SBen Widawsky if (c->opcode == opcode) 83421e9f767SBen Widawsky return c; 83521e9f767SBen Widawsky 83621e9f767SBen Widawsky return NULL; 83721e9f767SBen Widawsky } 83821e9f767SBen Widawsky 83921e9f767SBen Widawsky /** 84021e9f767SBen Widawsky * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device. 84121e9f767SBen Widawsky * @cxlm: The CXL memory device to communicate with. 84221e9f767SBen Widawsky * @opcode: Opcode for the mailbox command. 84321e9f767SBen Widawsky * @in: The input payload for the mailbox command. 84421e9f767SBen Widawsky * @in_size: The length of the input payload 84521e9f767SBen Widawsky * @out: Caller allocated buffer for the output. 84621e9f767SBen Widawsky * @out_size: Expected size of output. 84721e9f767SBen Widawsky * 84821e9f767SBen Widawsky * Context: Any context. Will acquire and release mbox_mutex. 84921e9f767SBen Widawsky * Return: 85021e9f767SBen Widawsky * * %>=0 - Number of bytes returned in @out. 85121e9f767SBen Widawsky * * %-E2BIG - Payload is too large for hardware. 85221e9f767SBen Widawsky * * %-EBUSY - Couldn't acquire exclusive mailbox access. 85321e9f767SBen Widawsky * * %-EFAULT - Hardware error occurred. 85421e9f767SBen Widawsky * * %-ENXIO - Command completed, but device reported an error. 85521e9f767SBen Widawsky * * %-EIO - Unexpected output size. 85621e9f767SBen Widawsky * 85721e9f767SBen Widawsky * Mailbox commands may execute successfully yet the device itself reported an 85821e9f767SBen Widawsky * error. While this distinction can be useful for commands from userspace, the 85921e9f767SBen Widawsky * kernel will only be able to use results when both are successful. 86021e9f767SBen Widawsky * 86121e9f767SBen Widawsky * See __cxl_mem_mbox_send_cmd() 86221e9f767SBen Widawsky */ 86321e9f767SBen Widawsky static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, 86421e9f767SBen Widawsky void *in, size_t in_size, 86521e9f767SBen Widawsky void *out, size_t out_size) 86621e9f767SBen Widawsky { 86721e9f767SBen Widawsky const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 86821e9f767SBen Widawsky struct mbox_cmd mbox_cmd = { 86921e9f767SBen Widawsky .opcode = opcode, 87021e9f767SBen Widawsky .payload_in = in, 87121e9f767SBen Widawsky .size_in = in_size, 87221e9f767SBen Widawsky .size_out = out_size, 87321e9f767SBen Widawsky .payload_out = out, 87421e9f767SBen Widawsky }; 87521e9f767SBen Widawsky int rc; 87621e9f767SBen Widawsky 87721e9f767SBen Widawsky if (out_size > cxlm->payload_size) 87821e9f767SBen Widawsky return -E2BIG; 87921e9f767SBen Widawsky 88021e9f767SBen Widawsky rc = cxl_mem_mbox_get(cxlm); 88121e9f767SBen Widawsky if (rc) 88221e9f767SBen Widawsky return rc; 88321e9f767SBen Widawsky 88421e9f767SBen Widawsky rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); 88521e9f767SBen Widawsky cxl_mem_mbox_put(cxlm); 88621e9f767SBen Widawsky if (rc) 88721e9f767SBen Widawsky return rc; 88821e9f767SBen Widawsky 88921e9f767SBen Widawsky /* TODO: Map return code to proper kernel style errno */ 89021e9f767SBen Widawsky if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) 89121e9f767SBen Widawsky return -ENXIO; 89221e9f767SBen Widawsky 89321e9f767SBen Widawsky /* 89421e9f767SBen Widawsky * Variable sized commands can't be validated and so it's up to the 89521e9f767SBen Widawsky * caller to do that if they wish. 89621e9f767SBen Widawsky */ 89721e9f767SBen Widawsky if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) 89821e9f767SBen Widawsky return -EIO; 89921e9f767SBen Widawsky 90021e9f767SBen Widawsky return 0; 90121e9f767SBen Widawsky } 90221e9f767SBen Widawsky 90321e9f767SBen Widawsky static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) 90421e9f767SBen Widawsky { 90521e9f767SBen Widawsky const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); 90621e9f767SBen Widawsky 90721e9f767SBen Widawsky cxlm->payload_size = 90821e9f767SBen Widawsky 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); 90921e9f767SBen Widawsky 91021e9f767SBen Widawsky /* 91121e9f767SBen Widawsky * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register 91221e9f767SBen Widawsky * 91321e9f767SBen Widawsky * If the size is too small, mandatory commands will not work and so 91421e9f767SBen Widawsky * there's no point in going forward. If the size is too large, there's 91521e9f767SBen Widawsky * no harm is soft limiting it. 91621e9f767SBen Widawsky */ 91721e9f767SBen Widawsky cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); 91821e9f767SBen Widawsky if (cxlm->payload_size < 256) { 91921e9f767SBen Widawsky dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)", 92021e9f767SBen Widawsky cxlm->payload_size); 92121e9f767SBen Widawsky return -ENXIO; 92221e9f767SBen Widawsky } 92321e9f767SBen Widawsky 92421e9f767SBen Widawsky dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu", 92521e9f767SBen Widawsky cxlm->payload_size); 92621e9f767SBen Widawsky 92721e9f767SBen Widawsky return 0; 92821e9f767SBen Widawsky } 92921e9f767SBen Widawsky 9301b0a1a2aSBen Widawsky static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev) 93121e9f767SBen Widawsky { 93221e9f767SBen Widawsky struct device *dev = &pdev->dev; 93321e9f767SBen Widawsky struct cxl_mem *cxlm; 93421e9f767SBen Widawsky 9355d0c6f02SBen Widawsky cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); 93621e9f767SBen Widawsky if (!cxlm) { 93721e9f767SBen Widawsky dev_err(dev, "No memory available\n"); 9381b0a1a2aSBen Widawsky return ERR_PTR(-ENOMEM); 93921e9f767SBen Widawsky } 94021e9f767SBen Widawsky 9411b0a1a2aSBen Widawsky mutex_init(&cxlm->mbox_mutex); 9421b0a1a2aSBen Widawsky cxlm->pdev = pdev; 9431b0a1a2aSBen Widawsky cxlm->enabled_cmds = 9441b0a1a2aSBen Widawsky devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), 9451b0a1a2aSBen Widawsky sizeof(unsigned long), 9461b0a1a2aSBen Widawsky GFP_KERNEL | __GFP_ZERO); 9471b0a1a2aSBen Widawsky if (!cxlm->enabled_cmds) { 9481b0a1a2aSBen Widawsky dev_err(dev, "No memory available for bitmap\n"); 9491b0a1a2aSBen Widawsky return ERR_PTR(-ENOMEM); 9501b0a1a2aSBen Widawsky } 9511b0a1a2aSBen Widawsky 9521b0a1a2aSBen Widawsky return cxlm; 9531b0a1a2aSBen Widawsky } 9541b0a1a2aSBen Widawsky 95507d62eacSIra Weiny static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, 95607d62eacSIra Weiny u8 bar, u64 offset) 9571b0a1a2aSBen Widawsky { 9581b0a1a2aSBen Widawsky struct pci_dev *pdev = cxlm->pdev; 9591b0a1a2aSBen Widawsky struct device *dev = &pdev->dev; 960f8a7e8c2SIra Weiny void __iomem *addr; 9611b0a1a2aSBen Widawsky 96221e9f767SBen Widawsky /* Basic sanity check that BAR is big enough */ 96321e9f767SBen Widawsky if (pci_resource_len(pdev, bar) < offset) { 96421e9f767SBen Widawsky dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, 96521e9f767SBen Widawsky &pdev->resource[bar], (unsigned long long)offset); 9666630d31cSBen Widawsky return IOMEM_ERR_PTR(-ENXIO); 96721e9f767SBen Widawsky } 96821e9f767SBen Widawsky 96930af9729SIra Weiny addr = pci_iomap(pdev, bar, 0); 970f8a7e8c2SIra Weiny if (!addr) { 97121e9f767SBen Widawsky dev_err(dev, "failed to map registers\n"); 972f8a7e8c2SIra Weiny return addr; 97321e9f767SBen Widawsky } 97421e9f767SBen Widawsky 975f8a7e8c2SIra Weiny dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", 976f8a7e8c2SIra Weiny bar, offset); 9776630d31cSBen Widawsky 97830af9729SIra Weiny return addr; 97930af9729SIra Weiny } 98030af9729SIra Weiny 98130af9729SIra Weiny static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) 98230af9729SIra Weiny { 98330af9729SIra Weiny pci_iounmap(cxlm->pdev, base); 98421e9f767SBen Widawsky } 98521e9f767SBen Widawsky 98621e9f767SBen Widawsky static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) 98721e9f767SBen Widawsky { 98821e9f767SBen Widawsky int pos; 98921e9f767SBen Widawsky 99021e9f767SBen Widawsky pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); 99121e9f767SBen Widawsky if (!pos) 99221e9f767SBen Widawsky return 0; 99321e9f767SBen Widawsky 99421e9f767SBen Widawsky while (pos) { 99521e9f767SBen Widawsky u16 vendor, id; 99621e9f767SBen Widawsky 99721e9f767SBen Widawsky pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); 99821e9f767SBen Widawsky pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); 99921e9f767SBen Widawsky if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) 100021e9f767SBen Widawsky return pos; 100121e9f767SBen Widawsky 100221e9f767SBen Widawsky pos = pci_find_next_ext_capability(pdev, pos, 100321e9f767SBen Widawsky PCI_EXT_CAP_ID_DVSEC); 100421e9f767SBen Widawsky } 100521e9f767SBen Widawsky 100621e9f767SBen Widawsky return 0; 100721e9f767SBen Widawsky } 100821e9f767SBen Widawsky 100930af9729SIra Weiny static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, 101030af9729SIra Weiny struct cxl_register_map *map) 101130af9729SIra Weiny { 101230af9729SIra Weiny struct pci_dev *pdev = cxlm->pdev; 101330af9729SIra Weiny struct device *dev = &pdev->dev; 101408422378SBen Widawsky struct cxl_component_reg_map *comp_map; 101530af9729SIra Weiny struct cxl_device_reg_map *dev_map; 101630af9729SIra Weiny 101730af9729SIra Weiny switch (map->reg_type) { 101808422378SBen Widawsky case CXL_REGLOC_RBI_COMPONENT: 101908422378SBen Widawsky comp_map = &map->component_map; 102008422378SBen Widawsky cxl_probe_component_regs(dev, base, comp_map); 102108422378SBen Widawsky if (!comp_map->hdm_decoder.valid) { 102208422378SBen Widawsky dev_err(dev, "HDM decoder registers not found\n"); 102308422378SBen Widawsky return -ENXIO; 102408422378SBen Widawsky } 102508422378SBen Widawsky 102608422378SBen Widawsky dev_dbg(dev, "Set up component registers\n"); 102708422378SBen Widawsky break; 102830af9729SIra Weiny case CXL_REGLOC_RBI_MEMDEV: 102930af9729SIra Weiny dev_map = &map->device_map; 103030af9729SIra Weiny cxl_probe_device_regs(dev, base, dev_map); 103130af9729SIra Weiny if (!dev_map->status.valid || !dev_map->mbox.valid || 103230af9729SIra Weiny !dev_map->memdev.valid) { 103330af9729SIra Weiny dev_err(dev, "registers not found: %s%s%s\n", 103430af9729SIra Weiny !dev_map->status.valid ? "status " : "", 103530af9729SIra Weiny !dev_map->mbox.valid ? "status " : "", 103630af9729SIra Weiny !dev_map->memdev.valid ? "status " : ""); 103730af9729SIra Weiny return -ENXIO; 103830af9729SIra Weiny } 103930af9729SIra Weiny 104030af9729SIra Weiny dev_dbg(dev, "Probing device registers...\n"); 104130af9729SIra Weiny break; 104230af9729SIra Weiny default: 104330af9729SIra Weiny break; 104430af9729SIra Weiny } 104530af9729SIra Weiny 104630af9729SIra Weiny return 0; 104730af9729SIra Weiny } 104830af9729SIra Weiny 104930af9729SIra Weiny static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) 105030af9729SIra Weiny { 105130af9729SIra Weiny struct pci_dev *pdev = cxlm->pdev; 105230af9729SIra Weiny struct device *dev = &pdev->dev; 105330af9729SIra Weiny 105430af9729SIra Weiny switch (map->reg_type) { 105508422378SBen Widawsky case CXL_REGLOC_RBI_COMPONENT: 105608422378SBen Widawsky cxl_map_component_regs(pdev, &cxlm->regs.component, map); 105708422378SBen Widawsky dev_dbg(dev, "Mapping component registers...\n"); 105808422378SBen Widawsky break; 105930af9729SIra Weiny case CXL_REGLOC_RBI_MEMDEV: 106030af9729SIra Weiny cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); 106130af9729SIra Weiny dev_dbg(dev, "Probing device registers...\n"); 106230af9729SIra Weiny break; 106330af9729SIra Weiny default: 106430af9729SIra Weiny break; 106530af9729SIra Weiny } 106630af9729SIra Weiny 106730af9729SIra Weiny return 0; 106830af9729SIra Weiny } 106930af9729SIra Weiny 107007d62eacSIra Weiny static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, 107107d62eacSIra Weiny u8 *bar, u64 *offset, u8 *reg_type) 107207d62eacSIra Weiny { 107307d62eacSIra Weiny *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); 107407d62eacSIra Weiny *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); 107507d62eacSIra Weiny *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); 107607d62eacSIra Weiny } 107707d62eacSIra Weiny 10781d5a4159SBen Widawsky /** 10791d5a4159SBen Widawsky * cxl_mem_setup_regs() - Setup necessary MMIO. 10801d5a4159SBen Widawsky * @cxlm: The CXL memory device to communicate with. 10811d5a4159SBen Widawsky * 10821d5a4159SBen Widawsky * Return: 0 if all necessary registers mapped. 10831d5a4159SBen Widawsky * 10841d5a4159SBen Widawsky * A memory device is required by spec to implement a certain set of MMIO 10851d5a4159SBen Widawsky * regions. The purpose of this function is to enumerate and map those 10861d5a4159SBen Widawsky * registers. 10871d5a4159SBen Widawsky */ 10881d5a4159SBen Widawsky static int cxl_mem_setup_regs(struct cxl_mem *cxlm) 10891d5a4159SBen Widawsky { 10901d5a4159SBen Widawsky struct pci_dev *pdev = cxlm->pdev; 10911d5a4159SBen Widawsky struct device *dev = &pdev->dev; 10921d5a4159SBen Widawsky u32 regloc_size, regblocks; 10936630d31cSBen Widawsky void __iomem *base; 10946630d31cSBen Widawsky int regloc, i; 109530af9729SIra Weiny struct cxl_register_map *map, *n; 109630af9729SIra Weiny LIST_HEAD(register_maps); 109730af9729SIra Weiny int ret = 0; 10981d5a4159SBen Widawsky 10994ad6181eSBen Widawsky regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); 11001d5a4159SBen Widawsky if (!regloc) { 11011d5a4159SBen Widawsky dev_err(dev, "register location dvsec not found\n"); 11021d5a4159SBen Widawsky return -ENXIO; 11031d5a4159SBen Widawsky } 11041d5a4159SBen Widawsky 1105f8a7e8c2SIra Weiny if (pci_request_mem_regions(pdev, pci_name(pdev))) 1106f8a7e8c2SIra Weiny return -ENODEV; 1107f8a7e8c2SIra Weiny 11081d5a4159SBen Widawsky /* Get the size of the Register Locator DVSEC */ 11091d5a4159SBen Widawsky pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); 11101d5a4159SBen Widawsky regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); 11111d5a4159SBen Widawsky 11121d5a4159SBen Widawsky regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; 11131d5a4159SBen Widawsky regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; 11141d5a4159SBen Widawsky 11151d5a4159SBen Widawsky for (i = 0; i < regblocks; i++, regloc += 8) { 11161d5a4159SBen Widawsky u32 reg_lo, reg_hi; 11171d5a4159SBen Widawsky u8 reg_type; 111807d62eacSIra Weiny u64 offset; 111907d62eacSIra Weiny u8 bar; 11201d5a4159SBen Widawsky 11211d5a4159SBen Widawsky pci_read_config_dword(pdev, regloc, ®_lo); 11221d5a4159SBen Widawsky pci_read_config_dword(pdev, regloc + 4, ®_hi); 11231d5a4159SBen Widawsky 112407d62eacSIra Weiny cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, 112507d62eacSIra Weiny ®_type); 112607d62eacSIra Weiny 112707d62eacSIra Weiny dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", 112807d62eacSIra Weiny bar, offset, reg_type); 11291d5a4159SBen Widawsky 1130*1e39db57SBen Widawsky /* Ignore unknown register block types */ 1131*1e39db57SBen Widawsky if (reg_type > CXL_REGLOC_RBI_MEMDEV) 1132*1e39db57SBen Widawsky continue; 1133*1e39db57SBen Widawsky 1134*1e39db57SBen Widawsky map = kzalloc(sizeof(*map), GFP_KERNEL); 1135*1e39db57SBen Widawsky if (!map) { 1136*1e39db57SBen Widawsky ret = -ENOMEM; 1137*1e39db57SBen Widawsky goto free_maps; 1138*1e39db57SBen Widawsky } 1139*1e39db57SBen Widawsky 1140*1e39db57SBen Widawsky list_add(&map->list, ®ister_maps); 1141*1e39db57SBen Widawsky 114207d62eacSIra Weiny base = cxl_mem_map_regblock(cxlm, bar, offset); 114330af9729SIra Weiny if (!base) { 114430af9729SIra Weiny ret = -ENOMEM; 114530af9729SIra Weiny goto free_maps; 11461d5a4159SBen Widawsky } 11471d5a4159SBen Widawsky 114830af9729SIra Weiny map->barno = bar; 114930af9729SIra Weiny map->block_offset = offset; 115030af9729SIra Weiny map->reg_type = reg_type; 115130af9729SIra Weiny 115230af9729SIra Weiny ret = cxl_probe_regs(cxlm, base + offset, map); 115330af9729SIra Weiny 115430af9729SIra Weiny /* Always unmap the regblock regardless of probe success */ 115530af9729SIra Weiny cxl_mem_unmap_regblock(cxlm, base); 115630af9729SIra Weiny 115730af9729SIra Weiny if (ret) 115830af9729SIra Weiny goto free_maps; 11591d5a4159SBen Widawsky } 11601d5a4159SBen Widawsky 11619a016527SIra Weiny pci_release_mem_regions(pdev); 11629a016527SIra Weiny 116330af9729SIra Weiny list_for_each_entry(map, ®ister_maps, list) { 116430af9729SIra Weiny ret = cxl_map_regs(cxlm, map); 116530af9729SIra Weiny if (ret) 116630af9729SIra Weiny goto free_maps; 11671d5a4159SBen Widawsky } 11681d5a4159SBen Widawsky 116930af9729SIra Weiny free_maps: 117030af9729SIra Weiny list_for_each_entry_safe(map, n, ®ister_maps, list) { 117130af9729SIra Weiny list_del(&map->list); 117230af9729SIra Weiny kfree(map); 117330af9729SIra Weiny } 117430af9729SIra Weiny 117530af9729SIra Weiny return ret; 11761d5a4159SBen Widawsky } 11771d5a4159SBen Widawsky 117821e9f767SBen Widawsky static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) 117921e9f767SBen Widawsky { 118021e9f767SBen Widawsky u32 remaining = size; 118121e9f767SBen Widawsky u32 offset = 0; 118221e9f767SBen Widawsky 118321e9f767SBen Widawsky while (remaining) { 118421e9f767SBen Widawsky u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); 118521e9f767SBen Widawsky struct cxl_mbox_get_log { 118621e9f767SBen Widawsky uuid_t uuid; 118721e9f767SBen Widawsky __le32 offset; 118821e9f767SBen Widawsky __le32 length; 118921e9f767SBen Widawsky } __packed log = { 119021e9f767SBen Widawsky .uuid = *uuid, 119121e9f767SBen Widawsky .offset = cpu_to_le32(offset), 119221e9f767SBen Widawsky .length = cpu_to_le32(xfer_size) 119321e9f767SBen Widawsky }; 119421e9f767SBen Widawsky int rc; 119521e9f767SBen Widawsky 119621e9f767SBen Widawsky rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log, 119721e9f767SBen Widawsky sizeof(log), out, xfer_size); 119821e9f767SBen Widawsky if (rc < 0) 119921e9f767SBen Widawsky return rc; 120021e9f767SBen Widawsky 120121e9f767SBen Widawsky out += xfer_size; 120221e9f767SBen Widawsky remaining -= xfer_size; 120321e9f767SBen Widawsky offset += xfer_size; 120421e9f767SBen Widawsky } 120521e9f767SBen Widawsky 120621e9f767SBen Widawsky return 0; 120721e9f767SBen Widawsky } 120821e9f767SBen Widawsky 120921e9f767SBen Widawsky /** 121021e9f767SBen Widawsky * cxl_walk_cel() - Walk through the Command Effects Log. 121121e9f767SBen Widawsky * @cxlm: Device. 121221e9f767SBen Widawsky * @size: Length of the Command Effects Log. 121321e9f767SBen Widawsky * @cel: CEL 121421e9f767SBen Widawsky * 121521e9f767SBen Widawsky * Iterate over each entry in the CEL and determine if the driver supports the 121621e9f767SBen Widawsky * command. If so, the command is enabled for the device and can be used later. 121721e9f767SBen Widawsky */ 121821e9f767SBen Widawsky static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) 121921e9f767SBen Widawsky { 122021e9f767SBen Widawsky struct cel_entry { 122121e9f767SBen Widawsky __le16 opcode; 122221e9f767SBen Widawsky __le16 effect; 122321e9f767SBen Widawsky } __packed * cel_entry; 122421e9f767SBen Widawsky const int cel_entries = size / sizeof(*cel_entry); 122521e9f767SBen Widawsky int i; 122621e9f767SBen Widawsky 122721e9f767SBen Widawsky cel_entry = (struct cel_entry *)cel; 122821e9f767SBen Widawsky 122921e9f767SBen Widawsky for (i = 0; i < cel_entries; i++) { 123021e9f767SBen Widawsky u16 opcode = le16_to_cpu(cel_entry[i].opcode); 123121e9f767SBen Widawsky struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 123221e9f767SBen Widawsky 123321e9f767SBen Widawsky if (!cmd) { 123421e9f767SBen Widawsky dev_dbg(&cxlm->pdev->dev, 123521e9f767SBen Widawsky "Opcode 0x%04x unsupported by driver", opcode); 123621e9f767SBen Widawsky continue; 123721e9f767SBen Widawsky } 123821e9f767SBen Widawsky 123921e9f767SBen Widawsky set_bit(cmd->info.id, cxlm->enabled_cmds); 124021e9f767SBen Widawsky } 124121e9f767SBen Widawsky } 124221e9f767SBen Widawsky 124321e9f767SBen Widawsky struct cxl_mbox_get_supported_logs { 124421e9f767SBen Widawsky __le16 entries; 124521e9f767SBen Widawsky u8 rsvd[6]; 124621e9f767SBen Widawsky struct gsl_entry { 124721e9f767SBen Widawsky uuid_t uuid; 124821e9f767SBen Widawsky __le32 size; 124921e9f767SBen Widawsky } __packed entry[]; 125021e9f767SBen Widawsky } __packed; 125121e9f767SBen Widawsky 125221e9f767SBen Widawsky static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) 125321e9f767SBen Widawsky { 125421e9f767SBen Widawsky struct cxl_mbox_get_supported_logs *ret; 125521e9f767SBen Widawsky int rc; 125621e9f767SBen Widawsky 125721e9f767SBen Widawsky ret = kvmalloc(cxlm->payload_size, GFP_KERNEL); 125821e9f767SBen Widawsky if (!ret) 125921e9f767SBen Widawsky return ERR_PTR(-ENOMEM); 126021e9f767SBen Widawsky 126121e9f767SBen Widawsky rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 126221e9f767SBen Widawsky 0, ret, cxlm->payload_size); 126321e9f767SBen Widawsky if (rc < 0) { 126421e9f767SBen Widawsky kvfree(ret); 126521e9f767SBen Widawsky return ERR_PTR(rc); 126621e9f767SBen Widawsky } 126721e9f767SBen Widawsky 126821e9f767SBen Widawsky return ret; 126921e9f767SBen Widawsky } 127021e9f767SBen Widawsky 127121e9f767SBen Widawsky /** 127221e9f767SBen Widawsky * cxl_mem_enumerate_cmds() - Enumerate commands for a device. 127321e9f767SBen Widawsky * @cxlm: The device. 127421e9f767SBen Widawsky * 127521e9f767SBen Widawsky * Returns 0 if enumerate completed successfully. 127621e9f767SBen Widawsky * 127721e9f767SBen Widawsky * CXL devices have optional support for certain commands. This function will 127821e9f767SBen Widawsky * determine the set of supported commands for the hardware and update the 127921e9f767SBen Widawsky * enabled_cmds bitmap in the @cxlm. 128021e9f767SBen Widawsky */ 128121e9f767SBen Widawsky static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) 128221e9f767SBen Widawsky { 128321e9f767SBen Widawsky struct cxl_mbox_get_supported_logs *gsl; 128421e9f767SBen Widawsky struct device *dev = &cxlm->pdev->dev; 128521e9f767SBen Widawsky struct cxl_mem_command *cmd; 128621e9f767SBen Widawsky int i, rc; 128721e9f767SBen Widawsky 128821e9f767SBen Widawsky gsl = cxl_get_gsl(cxlm); 128921e9f767SBen Widawsky if (IS_ERR(gsl)) 129021e9f767SBen Widawsky return PTR_ERR(gsl); 129121e9f767SBen Widawsky 129221e9f767SBen Widawsky rc = -ENOENT; 129321e9f767SBen Widawsky for (i = 0; i < le16_to_cpu(gsl->entries); i++) { 129421e9f767SBen Widawsky u32 size = le32_to_cpu(gsl->entry[i].size); 129521e9f767SBen Widawsky uuid_t uuid = gsl->entry[i].uuid; 129621e9f767SBen Widawsky u8 *log; 129721e9f767SBen Widawsky 129821e9f767SBen Widawsky dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); 129921e9f767SBen Widawsky 130021e9f767SBen Widawsky if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) 130121e9f767SBen Widawsky continue; 130221e9f767SBen Widawsky 130321e9f767SBen Widawsky log = kvmalloc(size, GFP_KERNEL); 130421e9f767SBen Widawsky if (!log) { 130521e9f767SBen Widawsky rc = -ENOMEM; 130621e9f767SBen Widawsky goto out; 130721e9f767SBen Widawsky } 130821e9f767SBen Widawsky 130921e9f767SBen Widawsky rc = cxl_xfer_log(cxlm, &uuid, size, log); 131021e9f767SBen Widawsky if (rc) { 131121e9f767SBen Widawsky kvfree(log); 131221e9f767SBen Widawsky goto out; 131321e9f767SBen Widawsky } 131421e9f767SBen Widawsky 131521e9f767SBen Widawsky cxl_walk_cel(cxlm, size, log); 131621e9f767SBen Widawsky kvfree(log); 131721e9f767SBen Widawsky 131821e9f767SBen Widawsky /* In case CEL was bogus, enable some default commands. */ 131921e9f767SBen Widawsky cxl_for_each_cmd(cmd) 132021e9f767SBen Widawsky if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) 132121e9f767SBen Widawsky set_bit(cmd->info.id, cxlm->enabled_cmds); 132221e9f767SBen Widawsky 132321e9f767SBen Widawsky /* Found the required CEL */ 132421e9f767SBen Widawsky rc = 0; 132521e9f767SBen Widawsky } 132621e9f767SBen Widawsky 132721e9f767SBen Widawsky out: 132821e9f767SBen Widawsky kvfree(gsl); 132921e9f767SBen Widawsky return rc; 133021e9f767SBen Widawsky } 133121e9f767SBen Widawsky 133221e9f767SBen Widawsky /** 133321e9f767SBen Widawsky * cxl_mem_identify() - Send the IDENTIFY command to the device. 133421e9f767SBen Widawsky * @cxlm: The device to identify. 133521e9f767SBen Widawsky * 133621e9f767SBen Widawsky * Return: 0 if identify was executed successfully. 133721e9f767SBen Widawsky * 133821e9f767SBen Widawsky * This will dispatch the identify command to the device and on success populate 133921e9f767SBen Widawsky * structures to be exported to sysfs. 134021e9f767SBen Widawsky */ 134121e9f767SBen Widawsky static int cxl_mem_identify(struct cxl_mem *cxlm) 134221e9f767SBen Widawsky { 134321e9f767SBen Widawsky /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ 134421e9f767SBen Widawsky struct cxl_mbox_identify { 134521e9f767SBen Widawsky char fw_revision[0x10]; 134621e9f767SBen Widawsky __le64 total_capacity; 134721e9f767SBen Widawsky __le64 volatile_capacity; 134821e9f767SBen Widawsky __le64 persistent_capacity; 134921e9f767SBen Widawsky __le64 partition_align; 135021e9f767SBen Widawsky __le16 info_event_log_size; 135121e9f767SBen Widawsky __le16 warning_event_log_size; 135221e9f767SBen Widawsky __le16 failure_event_log_size; 135321e9f767SBen Widawsky __le16 fatal_event_log_size; 135421e9f767SBen Widawsky __le32 lsa_size; 135521e9f767SBen Widawsky u8 poison_list_max_mer[3]; 135621e9f767SBen Widawsky __le16 inject_poison_limit; 135721e9f767SBen Widawsky u8 poison_caps; 135821e9f767SBen Widawsky u8 qos_telemetry_caps; 135921e9f767SBen Widawsky } __packed id; 136021e9f767SBen Widawsky int rc; 136121e9f767SBen Widawsky 136221e9f767SBen Widawsky rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, 136321e9f767SBen Widawsky sizeof(id)); 136421e9f767SBen Widawsky if (rc < 0) 136521e9f767SBen Widawsky return rc; 136621e9f767SBen Widawsky 136721e9f767SBen Widawsky /* 136821e9f767SBen Widawsky * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias. 136921e9f767SBen Widawsky * For now, only the capacity is exported in sysfs 137021e9f767SBen Widawsky */ 137121e9f767SBen Widawsky cxlm->ram_range.start = 0; 137221e9f767SBen Widawsky cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1; 137321e9f767SBen Widawsky 137421e9f767SBen Widawsky cxlm->pmem_range.start = 0; 137521e9f767SBen Widawsky cxlm->pmem_range.end = 137621e9f767SBen Widawsky le64_to_cpu(id.persistent_capacity) * SZ_256M - 1; 137721e9f767SBen Widawsky 1378199cf8c3SVishal Verma cxlm->lsa_size = le32_to_cpu(id.lsa_size); 137921e9f767SBen Widawsky memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); 138021e9f767SBen Widawsky 138121e9f767SBen Widawsky return 0; 138221e9f767SBen Widawsky } 138321e9f767SBen Widawsky 138421e9f767SBen Widawsky static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) 138521e9f767SBen Widawsky { 138621083f51SDan Williams struct cxl_memdev *cxlmd; 13871b0a1a2aSBen Widawsky struct cxl_mem *cxlm; 13881d5a4159SBen Widawsky int rc; 138921e9f767SBen Widawsky 139021e9f767SBen Widawsky rc = pcim_enable_device(pdev); 139121e9f767SBen Widawsky if (rc) 139221e9f767SBen Widawsky return rc; 139321e9f767SBen Widawsky 13941b0a1a2aSBen Widawsky cxlm = cxl_mem_create(pdev); 13951b0a1a2aSBen Widawsky if (IS_ERR(cxlm)) 13961b0a1a2aSBen Widawsky return PTR_ERR(cxlm); 13971b0a1a2aSBen Widawsky 139821e9f767SBen Widawsky rc = cxl_mem_setup_regs(cxlm); 139921e9f767SBen Widawsky if (rc) 140021e9f767SBen Widawsky return rc; 140121e9f767SBen Widawsky 140221e9f767SBen Widawsky rc = cxl_mem_setup_mailbox(cxlm); 140321e9f767SBen Widawsky if (rc) 140421e9f767SBen Widawsky return rc; 140521e9f767SBen Widawsky 140621e9f767SBen Widawsky rc = cxl_mem_enumerate_cmds(cxlm); 140721e9f767SBen Widawsky if (rc) 140821e9f767SBen Widawsky return rc; 140921e9f767SBen Widawsky 141021e9f767SBen Widawsky rc = cxl_mem_identify(cxlm); 141121e9f767SBen Widawsky if (rc) 141221e9f767SBen Widawsky return rc; 141321e9f767SBen Widawsky 14149cc238c7SDan Williams cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops); 141521083f51SDan Williams if (IS_ERR(cxlmd)) 141621083f51SDan Williams return PTR_ERR(cxlmd); 141721083f51SDan Williams 141821083f51SDan Williams if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) 141921083f51SDan Williams rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); 142021083f51SDan Williams 142121083f51SDan Williams return rc; 142221e9f767SBen Widawsky } 142321e9f767SBen Widawsky 142421e9f767SBen Widawsky static const struct pci_device_id cxl_mem_pci_tbl[] = { 142521e9f767SBen Widawsky /* PCI class code for CXL.mem Type-3 Devices */ 142621e9f767SBen Widawsky { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 142721e9f767SBen Widawsky { /* terminate list */ }, 142821e9f767SBen Widawsky }; 142921e9f767SBen Widawsky MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 143021e9f767SBen Widawsky 143121e9f767SBen Widawsky static struct pci_driver cxl_mem_driver = { 143221e9f767SBen Widawsky .name = KBUILD_MODNAME, 143321e9f767SBen Widawsky .id_table = cxl_mem_pci_tbl, 143421e9f767SBen Widawsky .probe = cxl_mem_probe, 143521e9f767SBen Widawsky .driver = { 143621e9f767SBen Widawsky .probe_type = PROBE_PREFER_ASYNCHRONOUS, 143721e9f767SBen Widawsky }, 143821e9f767SBen Widawsky }; 143921e9f767SBen Widawsky 144021e9f767SBen Widawsky static __init int cxl_mem_init(void) 144121e9f767SBen Widawsky { 144221e9f767SBen Widawsky struct dentry *mbox_debugfs; 144321e9f767SBen Widawsky int rc; 144421e9f767SBen Widawsky 144521e9f767SBen Widawsky /* Double check the anonymous union trickery in struct cxl_regs */ 144621e9f767SBen Widawsky BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != 144721e9f767SBen Widawsky offsetof(struct cxl_regs, device_regs.memdev)); 144821e9f767SBen Widawsky 14493d135db5SBen Widawsky rc = pci_register_driver(&cxl_mem_driver); 145021e9f767SBen Widawsky if (rc) 145121e9f767SBen Widawsky return rc; 145221e9f767SBen Widawsky 145321e9f767SBen Widawsky cxl_debugfs = debugfs_create_dir("cxl", NULL); 145421e9f767SBen Widawsky mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); 145521e9f767SBen Widawsky debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, 145621e9f767SBen Widawsky &cxl_raw_allow_all); 145721e9f767SBen Widawsky 145821e9f767SBen Widawsky return 0; 145921e9f767SBen Widawsky } 146021e9f767SBen Widawsky 146121e9f767SBen Widawsky static __exit void cxl_mem_exit(void) 146221e9f767SBen Widawsky { 146321e9f767SBen Widawsky debugfs_remove_recursive(cxl_debugfs); 146421e9f767SBen Widawsky pci_unregister_driver(&cxl_mem_driver); 146521e9f767SBen Widawsky } 146621e9f767SBen Widawsky 146721e9f767SBen Widawsky MODULE_LICENSE("GPL v2"); 146821e9f767SBen Widawsky module_init(cxl_mem_init); 146921e9f767SBen Widawsky module_exit(cxl_mem_exit); 147021e9f767SBen Widawsky MODULE_IMPORT_NS(CXL); 1471