1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <uapi/linux/cxl_mem.h> 4 #include <linux/security.h> 5 #include <linux/debugfs.h> 6 #include <linux/module.h> 7 #include <linux/sizes.h> 8 #include <linux/mutex.h> 9 #include <linux/list.h> 10 #include <linux/cdev.h> 11 #include <linux/idr.h> 12 #include <linux/pci.h> 13 #include <linux/io.h> 14 #include <linux/io-64-nonatomic-lo-hi.h> 15 #include "pci.h" 16 #include "cxl.h" 17 #include "mem.h" 18 19 /** 20 * DOC: cxl pci 21 * 22 * This implements the PCI exclusive functionality for a CXL device as it is 23 * defined by the Compute Express Link specification. CXL devices may surface 24 * certain functionality even if it isn't CXL enabled. 25 * 26 * The driver has several responsibilities, mainly: 27 * - Create the memX device and register on the CXL bus. 28 * - Enumerate device's register interface and map them. 29 * - Probe the device attributes to establish sysfs interface. 30 * - Provide an IOCTL interface to userspace to communicate with the device for 31 * things like firmware update. 32 */ 33 34 #define cxl_doorbell_busy(cxlm) \ 35 (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ 36 CXLDEV_MBOX_CTRL_DOORBELL) 37 38 /* CXL 2.0 - 8.2.8.4 */ 39 #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) 40 41 enum opcode { 42 CXL_MBOX_OP_INVALID = 0x0000, 43 CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, 44 CXL_MBOX_OP_GET_FW_INFO = 0x0200, 45 CXL_MBOX_OP_ACTIVATE_FW = 0x0202, 46 CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, 47 CXL_MBOX_OP_GET_LOG = 0x0401, 48 CXL_MBOX_OP_IDENTIFY = 0x4000, 49 CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, 50 CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, 51 CXL_MBOX_OP_GET_LSA = 0x4102, 52 CXL_MBOX_OP_SET_LSA = 0x4103, 53 CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200, 54 CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201, 55 CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202, 56 CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203, 57 CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204, 58 CXL_MBOX_OP_GET_POISON = 0x4300, 59 CXL_MBOX_OP_INJECT_POISON = 0x4301, 60 CXL_MBOX_OP_CLEAR_POISON = 0x4302, 61 CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, 62 CXL_MBOX_OP_SCAN_MEDIA = 0x4304, 63 CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, 64 CXL_MBOX_OP_MAX = 0x10000 65 }; 66 67 /** 68 * struct mbox_cmd - A command to be submitted to hardware. 69 * @opcode: (input) The command set and command submitted to hardware. 70 * @payload_in: (input) Pointer to the input payload. 71 * @payload_out: (output) Pointer to the output payload. Must be allocated by 72 * the caller. 73 * @size_in: (input) Number of bytes to load from @payload_in. 74 * @size_out: (input) Max number of bytes loaded into @payload_out. 75 * (output) Number of bytes generated by the device. For fixed size 76 * outputs commands this is always expected to be deterministic. For 77 * variable sized output commands, it tells the exact number of bytes 78 * written. 79 * @return_code: (output) Error code returned from hardware. 80 * 81 * This is the primary mechanism used to send commands to the hardware. 82 * All the fields except @payload_* correspond exactly to the fields described in 83 * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and 84 * @payload_out are written to, and read from the Command Payload Registers 85 * defined in CXL 2.0 8.2.8.4.8. 86 */ 87 struct mbox_cmd { 88 u16 opcode; 89 void *payload_in; 90 void *payload_out; 91 size_t size_in; 92 size_t size_out; 93 u16 return_code; 94 #define CXL_MBOX_SUCCESS 0 95 }; 96 97 static int cxl_mem_major; 98 static DEFINE_IDA(cxl_memdev_ida); 99 static DECLARE_RWSEM(cxl_memdev_rwsem); 100 static struct dentry *cxl_debugfs; 101 static bool cxl_raw_allow_all; 102 103 enum { 104 CEL_UUID, 105 VENDOR_DEBUG_UUID, 106 }; 107 108 /* See CXL 2.0 Table 170. Get Log Input Payload */ 109 static const uuid_t log_uuid[] = { 110 [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 111 0xb1, 0x62, 0x3b, 0x3f, 0x17), 112 [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 113 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), 114 }; 115 116 /** 117 * struct cxl_mem_command - Driver representation of a memory device command 118 * @info: Command information as it exists for the UAPI 119 * @opcode: The actual bits used for the mailbox protocol 120 * @flags: Set of flags effecting driver behavior. 121 * 122 * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag 123 * will be enabled by the driver regardless of what hardware may have 124 * advertised. 125 * 126 * The cxl_mem_command is the driver's internal representation of commands that 127 * are supported by the driver. Some of these commands may not be supported by 128 * the hardware. The driver will use @info to validate the fields passed in by 129 * the user then submit the @opcode to the hardware. 130 * 131 * See struct cxl_command_info. 132 */ 133 struct cxl_mem_command { 134 struct cxl_command_info info; 135 enum opcode opcode; 136 u32 flags; 137 #define CXL_CMD_FLAG_NONE 0 138 #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) 139 }; 140 141 #define CXL_CMD(_id, sin, sout, _flags) \ 142 [CXL_MEM_COMMAND_ID_##_id] = { \ 143 .info = { \ 144 .id = CXL_MEM_COMMAND_ID_##_id, \ 145 .size_in = sin, \ 146 .size_out = sout, \ 147 }, \ 148 .opcode = CXL_MBOX_OP_##_id, \ 149 .flags = _flags, \ 150 } 151 152 /* 153 * This table defines the supported mailbox commands for the driver. This table 154 * is made up of a UAPI structure. Non-negative values as parameters in the 155 * table will be validated against the user's input. For example, if size_in is 156 * 0, and the user passed in 1, it is an error. 157 */ 158 static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { 159 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), 160 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS 161 CXL_CMD(RAW, ~0, ~0, 0), 162 #endif 163 CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), 164 CXL_CMD(GET_FW_INFO, 0, 0x50, 0), 165 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), 166 CXL_CMD(GET_LSA, 0x8, ~0, 0), 167 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), 168 CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), 169 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), 170 CXL_CMD(SET_LSA, ~0, 0, 0), 171 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), 172 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), 173 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), 174 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), 175 CXL_CMD(GET_POISON, 0x10, ~0, 0), 176 CXL_CMD(INJECT_POISON, 0x8, 0, 0), 177 CXL_CMD(CLEAR_POISON, 0x48, 0, 0), 178 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), 179 CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), 180 CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), 181 }; 182 183 /* 184 * Commands that RAW doesn't permit. The rationale for each: 185 * 186 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / 187 * coordination of transaction timeout values at the root bridge level. 188 * 189 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live 190 * and needs to be coordinated with HDM updates. 191 * 192 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the 193 * driver and any writes from userspace invalidates those contents. 194 * 195 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes 196 * to the device after it is marked clean, userspace can not make that 197 * assertion. 198 * 199 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that 200 * is kept up to date with patrol notifications and error management. 201 */ 202 static u16 cxl_disabled_raw_commands[] = { 203 CXL_MBOX_OP_ACTIVATE_FW, 204 CXL_MBOX_OP_SET_PARTITION_INFO, 205 CXL_MBOX_OP_SET_LSA, 206 CXL_MBOX_OP_SET_SHUTDOWN_STATE, 207 CXL_MBOX_OP_SCAN_MEDIA, 208 CXL_MBOX_OP_GET_SCAN_MEDIA, 209 }; 210 211 /* 212 * Command sets that RAW doesn't permit. All opcodes in this set are 213 * disabled because they pass plain text security payloads over the 214 * user/kernel boundary. This functionality is intended to be wrapped 215 * behind the keys ABI which allows for encrypted payloads in the UAPI 216 */ 217 static u8 security_command_sets[] = { 218 0x44, /* Sanitize */ 219 0x45, /* Persistent Memory Data-at-rest Security */ 220 0x46, /* Security Passthrough */ 221 }; 222 223 #define cxl_for_each_cmd(cmd) \ 224 for ((cmd) = &mem_commands[0]; \ 225 ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++) 226 227 #define cxl_cmd_count ARRAY_SIZE(mem_commands) 228 229 static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) 230 { 231 const unsigned long start = jiffies; 232 unsigned long end = start; 233 234 while (cxl_doorbell_busy(cxlm)) { 235 end = jiffies; 236 237 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { 238 /* Check again in case preempted before timeout test */ 239 if (!cxl_doorbell_busy(cxlm)) 240 break; 241 return -ETIMEDOUT; 242 } 243 cpu_relax(); 244 } 245 246 dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms", 247 jiffies_to_msecs(end) - jiffies_to_msecs(start)); 248 return 0; 249 } 250 251 static bool cxl_is_security_command(u16 opcode) 252 { 253 int i; 254 255 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) 256 if (security_command_sets[i] == (opcode >> 8)) 257 return true; 258 return false; 259 } 260 261 static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, 262 struct mbox_cmd *mbox_cmd) 263 { 264 struct device *dev = &cxlm->pdev->dev; 265 266 dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", 267 mbox_cmd->opcode, mbox_cmd->size_in); 268 } 269 270 /** 271 * __cxl_mem_mbox_send_cmd() - Execute a mailbox command 272 * @cxlm: The CXL memory device to communicate with. 273 * @mbox_cmd: Command to send to the memory device. 274 * 275 * Context: Any context. Expects mbox_mutex to be held. 276 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. 277 * Caller should check the return code in @mbox_cmd to make sure it 278 * succeeded. 279 * 280 * This is a generic form of the CXL mailbox send command thus only using the 281 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory 282 * devices, and perhaps other types of CXL devices may have further information 283 * available upon error conditions. Driver facilities wishing to send mailbox 284 * commands should use the wrapper command. 285 * 286 * The CXL spec allows for up to two mailboxes. The intention is for the primary 287 * mailbox to be OS controlled and the secondary mailbox to be used by system 288 * firmware. This allows the OS and firmware to communicate with the device and 289 * not need to coordinate with each other. The driver only uses the primary 290 * mailbox. 291 */ 292 static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, 293 struct mbox_cmd *mbox_cmd) 294 { 295 void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; 296 u64 cmd_reg, status_reg; 297 size_t out_len; 298 int rc; 299 300 lockdep_assert_held(&cxlm->mbox_mutex); 301 302 /* 303 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. 304 * 1. Caller reads MB Control Register to verify doorbell is clear 305 * 2. Caller writes Command Register 306 * 3. Caller writes Command Payload Registers if input payload is non-empty 307 * 4. Caller writes MB Control Register to set doorbell 308 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured 309 * 6. Caller reads MB Status Register to fetch Return code 310 * 7. If command successful, Caller reads Command Register to get Payload Length 311 * 8. If output payload is non-empty, host reads Command Payload Registers 312 * 313 * Hardware is free to do whatever it wants before the doorbell is rung, 314 * and isn't allowed to change anything after it clears the doorbell. As 315 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can 316 * also happen in any order (though some orders might not make sense). 317 */ 318 319 /* #1 */ 320 if (cxl_doorbell_busy(cxlm)) { 321 dev_err_ratelimited(&cxlm->pdev->dev, 322 "Mailbox re-busy after acquiring\n"); 323 return -EBUSY; 324 } 325 326 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, 327 mbox_cmd->opcode); 328 if (mbox_cmd->size_in) { 329 if (WARN_ON(!mbox_cmd->payload_in)) 330 return -EINVAL; 331 332 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, 333 mbox_cmd->size_in); 334 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); 335 } 336 337 /* #2, #3 */ 338 writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 339 340 /* #4 */ 341 dev_dbg(&cxlm->pdev->dev, "Sending command\n"); 342 writel(CXLDEV_MBOX_CTRL_DOORBELL, 343 cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); 344 345 /* #5 */ 346 rc = cxl_mem_wait_for_doorbell(cxlm); 347 if (rc == -ETIMEDOUT) { 348 cxl_mem_mbox_timeout(cxlm, mbox_cmd); 349 return rc; 350 } 351 352 /* #6 */ 353 status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); 354 mbox_cmd->return_code = 355 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); 356 357 if (mbox_cmd->return_code != 0) { 358 dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n"); 359 return 0; 360 } 361 362 /* #7 */ 363 cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); 364 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); 365 366 /* #8 */ 367 if (out_len && mbox_cmd->payload_out) { 368 /* 369 * Sanitize the copy. If hardware misbehaves, out_len per the 370 * spec can actually be greater than the max allowed size (21 371 * bits available but spec defined 1M max). The caller also may 372 * have requested less data than the hardware supplied even 373 * within spec. 374 */ 375 size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); 376 377 memcpy_fromio(mbox_cmd->payload_out, payload, n); 378 mbox_cmd->size_out = n; 379 } else { 380 mbox_cmd->size_out = 0; 381 } 382 383 return 0; 384 } 385 386 /** 387 * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox. 388 * @cxlm: The memory device to gain access to. 389 * 390 * Context: Any context. Takes the mbox_mutex. 391 * Return: 0 if exclusive access was acquired. 392 */ 393 static int cxl_mem_mbox_get(struct cxl_mem *cxlm) 394 { 395 struct device *dev = &cxlm->pdev->dev; 396 u64 md_status; 397 int rc; 398 399 mutex_lock_io(&cxlm->mbox_mutex); 400 401 /* 402 * XXX: There is some amount of ambiguity in the 2.0 version of the spec 403 * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the 404 * bit is to allow firmware running on the device to notify the driver 405 * that it's ready to receive commands. It is unclear if the bit needs 406 * to be read for each transaction mailbox, ie. the firmware can switch 407 * it on and off as needed. Second, there is no defined timeout for 408 * mailbox ready, like there is for the doorbell interface. 409 * 410 * Assumptions: 411 * 1. The firmware might toggle the Mailbox Interface Ready bit, check 412 * it for every command. 413 * 414 * 2. If the doorbell is clear, the firmware should have first set the 415 * Mailbox Interface Ready bit. Therefore, waiting for the doorbell 416 * to be ready is sufficient. 417 */ 418 rc = cxl_mem_wait_for_doorbell(cxlm); 419 if (rc) { 420 dev_warn(dev, "Mailbox interface not ready\n"); 421 goto out; 422 } 423 424 md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); 425 if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { 426 dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); 427 rc = -EBUSY; 428 goto out; 429 } 430 431 /* 432 * Hardware shouldn't allow a ready status but also have failure bits 433 * set. Spit out an error, this should be a bug report 434 */ 435 rc = -EFAULT; 436 if (md_status & CXLMDEV_DEV_FATAL) { 437 dev_err(dev, "mbox: reported ready, but fatal\n"); 438 goto out; 439 } 440 if (md_status & CXLMDEV_FW_HALT) { 441 dev_err(dev, "mbox: reported ready, but halted\n"); 442 goto out; 443 } 444 if (CXLMDEV_RESET_NEEDED(md_status)) { 445 dev_err(dev, "mbox: reported ready, but reset needed\n"); 446 goto out; 447 } 448 449 /* with lock held */ 450 return 0; 451 452 out: 453 mutex_unlock(&cxlm->mbox_mutex); 454 return rc; 455 } 456 457 /** 458 * cxl_mem_mbox_put() - Release exclusive access to the mailbox. 459 * @cxlm: The CXL memory device to communicate with. 460 * 461 * Context: Any context. Expects mbox_mutex to be held. 462 */ 463 static void cxl_mem_mbox_put(struct cxl_mem *cxlm) 464 { 465 mutex_unlock(&cxlm->mbox_mutex); 466 } 467 468 /** 469 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. 470 * @cxlm: The CXL memory device to communicate with. 471 * @cmd: The validated command. 472 * @in_payload: Pointer to userspace's input payload. 473 * @out_payload: Pointer to userspace's output payload. 474 * @size_out: (Input) Max payload size to copy out. 475 * (Output) Payload size hardware generated. 476 * @retval: Hardware generated return code from the operation. 477 * 478 * Return: 479 * * %0 - Mailbox transaction succeeded. This implies the mailbox 480 * protocol completed successfully not that the operation itself 481 * was successful. 482 * * %-ENOMEM - Couldn't allocate a bounce buffer. 483 * * %-EFAULT - Something happened with copy_to/from_user. 484 * * %-EINTR - Mailbox acquisition interrupted. 485 * * %-EXXX - Transaction level failures. 486 * 487 * Creates the appropriate mailbox command and dispatches it on behalf of a 488 * userspace request. The input and output payloads are copied between 489 * userspace. 490 * 491 * See cxl_send_cmd(). 492 */ 493 static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, 494 const struct cxl_mem_command *cmd, 495 u64 in_payload, u64 out_payload, 496 s32 *size_out, u32 *retval) 497 { 498 struct device *dev = &cxlm->pdev->dev; 499 struct mbox_cmd mbox_cmd = { 500 .opcode = cmd->opcode, 501 .size_in = cmd->info.size_in, 502 .size_out = cmd->info.size_out, 503 }; 504 int rc; 505 506 if (cmd->info.size_out) { 507 mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); 508 if (!mbox_cmd.payload_out) 509 return -ENOMEM; 510 } 511 512 if (cmd->info.size_in) { 513 mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), 514 cmd->info.size_in); 515 if (IS_ERR(mbox_cmd.payload_in)) { 516 kvfree(mbox_cmd.payload_out); 517 return PTR_ERR(mbox_cmd.payload_in); 518 } 519 } 520 521 rc = cxl_mem_mbox_get(cxlm); 522 if (rc) 523 goto out; 524 525 dev_dbg(dev, 526 "Submitting %s command for user\n" 527 "\topcode: %x\n" 528 "\tsize: %ub\n", 529 cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, 530 cmd->info.size_in); 531 532 dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, 533 "raw command path used\n"); 534 535 rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); 536 cxl_mem_mbox_put(cxlm); 537 if (rc) 538 goto out; 539 540 /* 541 * @size_out contains the max size that's allowed to be written back out 542 * to userspace. While the payload may have written more output than 543 * this it will have to be ignored. 544 */ 545 if (mbox_cmd.size_out) { 546 dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, 547 "Invalid return size\n"); 548 if (copy_to_user(u64_to_user_ptr(out_payload), 549 mbox_cmd.payload_out, mbox_cmd.size_out)) { 550 rc = -EFAULT; 551 goto out; 552 } 553 } 554 555 *size_out = mbox_cmd.size_out; 556 *retval = mbox_cmd.return_code; 557 558 out: 559 kvfree(mbox_cmd.payload_in); 560 kvfree(mbox_cmd.payload_out); 561 return rc; 562 } 563 564 static bool cxl_mem_raw_command_allowed(u16 opcode) 565 { 566 int i; 567 568 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) 569 return false; 570 571 if (security_locked_down(LOCKDOWN_NONE)) 572 return false; 573 574 if (cxl_raw_allow_all) 575 return true; 576 577 if (cxl_is_security_command(opcode)) 578 return false; 579 580 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) 581 if (cxl_disabled_raw_commands[i] == opcode) 582 return false; 583 584 return true; 585 } 586 587 /** 588 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. 589 * @cxlm: &struct cxl_mem device whose mailbox will be used. 590 * @send_cmd: &struct cxl_send_command copied in from userspace. 591 * @out_cmd: Sanitized and populated &struct cxl_mem_command. 592 * 593 * Return: 594 * * %0 - @out_cmd is ready to send. 595 * * %-ENOTTY - Invalid command specified. 596 * * %-EINVAL - Reserved fields or invalid values were used. 597 * * %-ENOMEM - Input or output buffer wasn't sized properly. 598 * * %-EPERM - Attempted to use a protected command. 599 * 600 * The result of this command is a fully validated command in @out_cmd that is 601 * safe to send to the hardware. 602 * 603 * See handle_mailbox_cmd_from_user() 604 */ 605 static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, 606 const struct cxl_send_command *send_cmd, 607 struct cxl_mem_command *out_cmd) 608 { 609 const struct cxl_command_info *info; 610 struct cxl_mem_command *c; 611 612 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) 613 return -ENOTTY; 614 615 /* 616 * The user can never specify an input payload larger than what hardware 617 * supports, but output can be arbitrarily large (simply write out as 618 * much data as the hardware provides). 619 */ 620 if (send_cmd->in.size > cxlm->payload_size) 621 return -EINVAL; 622 623 /* 624 * Checks are bypassed for raw commands but a WARN/taint will occur 625 * later in the callchain 626 */ 627 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { 628 const struct cxl_mem_command temp = { 629 .info = { 630 .id = CXL_MEM_COMMAND_ID_RAW, 631 .flags = 0, 632 .size_in = send_cmd->in.size, 633 .size_out = send_cmd->out.size, 634 }, 635 .opcode = send_cmd->raw.opcode 636 }; 637 638 if (send_cmd->raw.rsvd) 639 return -EINVAL; 640 641 /* 642 * Unlike supported commands, the output size of RAW commands 643 * gets passed along without further checking, so it must be 644 * validated here. 645 */ 646 if (send_cmd->out.size > cxlm->payload_size) 647 return -EINVAL; 648 649 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) 650 return -EPERM; 651 652 memcpy(out_cmd, &temp, sizeof(temp)); 653 654 return 0; 655 } 656 657 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) 658 return -EINVAL; 659 660 if (send_cmd->rsvd) 661 return -EINVAL; 662 663 if (send_cmd->in.rsvd || send_cmd->out.rsvd) 664 return -EINVAL; 665 666 /* Convert user's command into the internal representation */ 667 c = &mem_commands[send_cmd->id]; 668 info = &c->info; 669 670 /* Check that the command is enabled for hardware */ 671 if (!test_bit(info->id, cxlm->enabled_cmds)) 672 return -ENOTTY; 673 674 /* Check the input buffer is the expected size */ 675 if (info->size_in >= 0 && info->size_in != send_cmd->in.size) 676 return -ENOMEM; 677 678 /* Check the output buffer is at least large enough */ 679 if (info->size_out >= 0 && send_cmd->out.size < info->size_out) 680 return -ENOMEM; 681 682 memcpy(out_cmd, c, sizeof(*c)); 683 out_cmd->info.size_in = send_cmd->in.size; 684 /* 685 * XXX: out_cmd->info.size_out will be controlled by the driver, and the 686 * specified number of bytes @send_cmd->out.size will be copied back out 687 * to userspace. 688 */ 689 690 return 0; 691 } 692 693 static int cxl_query_cmd(struct cxl_memdev *cxlmd, 694 struct cxl_mem_query_commands __user *q) 695 { 696 struct device *dev = &cxlmd->dev; 697 struct cxl_mem_command *cmd; 698 u32 n_commands; 699 int j = 0; 700 701 dev_dbg(dev, "Query IOCTL\n"); 702 703 if (get_user(n_commands, &q->n_commands)) 704 return -EFAULT; 705 706 /* returns the total number if 0 elements are requested. */ 707 if (n_commands == 0) 708 return put_user(cxl_cmd_count, &q->n_commands); 709 710 /* 711 * otherwise, return max(n_commands, total commands) cxl_command_info 712 * structures. 713 */ 714 cxl_for_each_cmd(cmd) { 715 const struct cxl_command_info *info = &cmd->info; 716 717 if (copy_to_user(&q->commands[j++], info, sizeof(*info))) 718 return -EFAULT; 719 720 if (j == n_commands) 721 break; 722 } 723 724 return 0; 725 } 726 727 static int cxl_send_cmd(struct cxl_memdev *cxlmd, 728 struct cxl_send_command __user *s) 729 { 730 struct cxl_mem *cxlm = cxlmd->cxlm; 731 struct device *dev = &cxlmd->dev; 732 struct cxl_send_command send; 733 struct cxl_mem_command c; 734 int rc; 735 736 dev_dbg(dev, "Send IOCTL\n"); 737 738 if (copy_from_user(&send, s, sizeof(send))) 739 return -EFAULT; 740 741 rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c); 742 if (rc) 743 return rc; 744 745 /* Prepare to handle a full payload for variable sized output */ 746 if (c.info.size_out < 0) 747 c.info.size_out = cxlm->payload_size; 748 749 rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload, 750 send.out.payload, &send.out.size, 751 &send.retval); 752 if (rc) 753 return rc; 754 755 if (copy_to_user(s, &send, sizeof(send))) 756 return -EFAULT; 757 758 return 0; 759 } 760 761 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, 762 unsigned long arg) 763 { 764 switch (cmd) { 765 case CXL_MEM_QUERY_COMMANDS: 766 return cxl_query_cmd(cxlmd, (void __user *)arg); 767 case CXL_MEM_SEND_COMMAND: 768 return cxl_send_cmd(cxlmd, (void __user *)arg); 769 default: 770 return -ENOTTY; 771 } 772 } 773 774 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, 775 unsigned long arg) 776 { 777 struct cxl_memdev *cxlmd = file->private_data; 778 int rc = -ENXIO; 779 780 down_read(&cxl_memdev_rwsem); 781 if (cxlmd->cxlm) 782 rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); 783 up_read(&cxl_memdev_rwsem); 784 785 return rc; 786 } 787 788 static int cxl_memdev_open(struct inode *inode, struct file *file) 789 { 790 struct cxl_memdev *cxlmd = 791 container_of(inode->i_cdev, typeof(*cxlmd), cdev); 792 793 get_device(&cxlmd->dev); 794 file->private_data = cxlmd; 795 796 return 0; 797 } 798 799 static int cxl_memdev_release_file(struct inode *inode, struct file *file) 800 { 801 struct cxl_memdev *cxlmd = 802 container_of(inode->i_cdev, typeof(*cxlmd), cdev); 803 804 put_device(&cxlmd->dev); 805 806 return 0; 807 } 808 809 static const struct file_operations cxl_memdev_fops = { 810 .owner = THIS_MODULE, 811 .unlocked_ioctl = cxl_memdev_ioctl, 812 .open = cxl_memdev_open, 813 .release = cxl_memdev_release_file, 814 .compat_ioctl = compat_ptr_ioctl, 815 .llseek = noop_llseek, 816 }; 817 818 static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) 819 { 820 struct cxl_mem_command *c; 821 822 cxl_for_each_cmd(c) 823 if (c->opcode == opcode) 824 return c; 825 826 return NULL; 827 } 828 829 /** 830 * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device. 831 * @cxlm: The CXL memory device to communicate with. 832 * @opcode: Opcode for the mailbox command. 833 * @in: The input payload for the mailbox command. 834 * @in_size: The length of the input payload 835 * @out: Caller allocated buffer for the output. 836 * @out_size: Expected size of output. 837 * 838 * Context: Any context. Will acquire and release mbox_mutex. 839 * Return: 840 * * %>=0 - Number of bytes returned in @out. 841 * * %-E2BIG - Payload is too large for hardware. 842 * * %-EBUSY - Couldn't acquire exclusive mailbox access. 843 * * %-EFAULT - Hardware error occurred. 844 * * %-ENXIO - Command completed, but device reported an error. 845 * * %-EIO - Unexpected output size. 846 * 847 * Mailbox commands may execute successfully yet the device itself reported an 848 * error. While this distinction can be useful for commands from userspace, the 849 * kernel will only be able to use results when both are successful. 850 * 851 * See __cxl_mem_mbox_send_cmd() 852 */ 853 static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, 854 void *in, size_t in_size, 855 void *out, size_t out_size) 856 { 857 const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 858 struct mbox_cmd mbox_cmd = { 859 .opcode = opcode, 860 .payload_in = in, 861 .size_in = in_size, 862 .size_out = out_size, 863 .payload_out = out, 864 }; 865 int rc; 866 867 if (out_size > cxlm->payload_size) 868 return -E2BIG; 869 870 rc = cxl_mem_mbox_get(cxlm); 871 if (rc) 872 return rc; 873 874 rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); 875 cxl_mem_mbox_put(cxlm); 876 if (rc) 877 return rc; 878 879 /* TODO: Map return code to proper kernel style errno */ 880 if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) 881 return -ENXIO; 882 883 /* 884 * Variable sized commands can't be validated and so it's up to the 885 * caller to do that if they wish. 886 */ 887 if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) 888 return -EIO; 889 890 return 0; 891 } 892 893 static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) 894 { 895 const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); 896 897 cxlm->payload_size = 898 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); 899 900 /* 901 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register 902 * 903 * If the size is too small, mandatory commands will not work and so 904 * there's no point in going forward. If the size is too large, there's 905 * no harm is soft limiting it. 906 */ 907 cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); 908 if (cxlm->payload_size < 256) { 909 dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)", 910 cxlm->payload_size); 911 return -ENXIO; 912 } 913 914 dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu", 915 cxlm->payload_size); 916 917 return 0; 918 } 919 920 static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev) 921 { 922 struct device *dev = &pdev->dev; 923 struct cxl_mem *cxlm; 924 925 cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); 926 if (!cxlm) { 927 dev_err(dev, "No memory available\n"); 928 return ERR_PTR(-ENOMEM); 929 } 930 931 mutex_init(&cxlm->mbox_mutex); 932 cxlm->pdev = pdev; 933 cxlm->enabled_cmds = 934 devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), 935 sizeof(unsigned long), 936 GFP_KERNEL | __GFP_ZERO); 937 if (!cxlm->enabled_cmds) { 938 dev_err(dev, "No memory available for bitmap\n"); 939 return ERR_PTR(-ENOMEM); 940 } 941 942 return cxlm; 943 } 944 945 static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, 946 u8 bar, u64 offset) 947 { 948 struct pci_dev *pdev = cxlm->pdev; 949 struct device *dev = &pdev->dev; 950 void __iomem *addr; 951 952 /* Basic sanity check that BAR is big enough */ 953 if (pci_resource_len(pdev, bar) < offset) { 954 dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, 955 &pdev->resource[bar], (unsigned long long)offset); 956 return IOMEM_ERR_PTR(-ENXIO); 957 } 958 959 addr = pci_iomap(pdev, bar, 0); 960 if (!addr) { 961 dev_err(dev, "failed to map registers\n"); 962 return addr; 963 } 964 965 dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", 966 bar, offset); 967 968 return addr; 969 } 970 971 static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) 972 { 973 pci_iounmap(cxlm->pdev, base); 974 } 975 976 static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) 977 { 978 int pos; 979 980 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); 981 if (!pos) 982 return 0; 983 984 while (pos) { 985 u16 vendor, id; 986 987 pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); 988 pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); 989 if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) 990 return pos; 991 992 pos = pci_find_next_ext_capability(pdev, pos, 993 PCI_EXT_CAP_ID_DVSEC); 994 } 995 996 return 0; 997 } 998 999 static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, 1000 struct cxl_register_map *map) 1001 { 1002 struct pci_dev *pdev = cxlm->pdev; 1003 struct device *dev = &pdev->dev; 1004 struct cxl_component_reg_map *comp_map; 1005 struct cxl_device_reg_map *dev_map; 1006 1007 switch (map->reg_type) { 1008 case CXL_REGLOC_RBI_COMPONENT: 1009 comp_map = &map->component_map; 1010 cxl_probe_component_regs(dev, base, comp_map); 1011 if (!comp_map->hdm_decoder.valid) { 1012 dev_err(dev, "HDM decoder registers not found\n"); 1013 return -ENXIO; 1014 } 1015 1016 dev_dbg(dev, "Set up component registers\n"); 1017 break; 1018 case CXL_REGLOC_RBI_MEMDEV: 1019 dev_map = &map->device_map; 1020 cxl_probe_device_regs(dev, base, dev_map); 1021 if (!dev_map->status.valid || !dev_map->mbox.valid || 1022 !dev_map->memdev.valid) { 1023 dev_err(dev, "registers not found: %s%s%s\n", 1024 !dev_map->status.valid ? "status " : "", 1025 !dev_map->mbox.valid ? "status " : "", 1026 !dev_map->memdev.valid ? "status " : ""); 1027 return -ENXIO; 1028 } 1029 1030 dev_dbg(dev, "Probing device registers...\n"); 1031 break; 1032 default: 1033 break; 1034 } 1035 1036 return 0; 1037 } 1038 1039 static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) 1040 { 1041 struct pci_dev *pdev = cxlm->pdev; 1042 struct device *dev = &pdev->dev; 1043 1044 switch (map->reg_type) { 1045 case CXL_REGLOC_RBI_COMPONENT: 1046 cxl_map_component_regs(pdev, &cxlm->regs.component, map); 1047 dev_dbg(dev, "Mapping component registers...\n"); 1048 break; 1049 case CXL_REGLOC_RBI_MEMDEV: 1050 cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); 1051 dev_dbg(dev, "Probing device registers...\n"); 1052 break; 1053 default: 1054 break; 1055 } 1056 1057 return 0; 1058 } 1059 1060 static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, 1061 u8 *bar, u64 *offset, u8 *reg_type) 1062 { 1063 *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); 1064 *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); 1065 *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); 1066 } 1067 1068 /** 1069 * cxl_mem_setup_regs() - Setup necessary MMIO. 1070 * @cxlm: The CXL memory device to communicate with. 1071 * 1072 * Return: 0 if all necessary registers mapped. 1073 * 1074 * A memory device is required by spec to implement a certain set of MMIO 1075 * regions. The purpose of this function is to enumerate and map those 1076 * registers. 1077 */ 1078 static int cxl_mem_setup_regs(struct cxl_mem *cxlm) 1079 { 1080 struct pci_dev *pdev = cxlm->pdev; 1081 struct device *dev = &pdev->dev; 1082 u32 regloc_size, regblocks; 1083 void __iomem *base; 1084 int regloc, i; 1085 struct cxl_register_map *map, *n; 1086 LIST_HEAD(register_maps); 1087 int ret = 0; 1088 1089 regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); 1090 if (!regloc) { 1091 dev_err(dev, "register location dvsec not found\n"); 1092 return -ENXIO; 1093 } 1094 1095 if (pci_request_mem_regions(pdev, pci_name(pdev))) 1096 return -ENODEV; 1097 1098 /* Get the size of the Register Locator DVSEC */ 1099 pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); 1100 regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); 1101 1102 regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; 1103 regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; 1104 1105 for (i = 0; i < regblocks; i++, regloc += 8) { 1106 u32 reg_lo, reg_hi; 1107 u8 reg_type; 1108 u64 offset; 1109 u8 bar; 1110 1111 map = kzalloc(sizeof(*map), GFP_KERNEL); 1112 if (!map) { 1113 ret = -ENOMEM; 1114 goto free_maps; 1115 } 1116 1117 list_add(&map->list, ®ister_maps); 1118 1119 pci_read_config_dword(pdev, regloc, ®_lo); 1120 pci_read_config_dword(pdev, regloc + 4, ®_hi); 1121 1122 cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, 1123 ®_type); 1124 1125 dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", 1126 bar, offset, reg_type); 1127 1128 base = cxl_mem_map_regblock(cxlm, bar, offset); 1129 if (!base) { 1130 ret = -ENOMEM; 1131 goto free_maps; 1132 } 1133 1134 map->barno = bar; 1135 map->block_offset = offset; 1136 map->reg_type = reg_type; 1137 1138 ret = cxl_probe_regs(cxlm, base + offset, map); 1139 1140 /* Always unmap the regblock regardless of probe success */ 1141 cxl_mem_unmap_regblock(cxlm, base); 1142 1143 if (ret) 1144 goto free_maps; 1145 } 1146 1147 pci_release_mem_regions(pdev); 1148 1149 list_for_each_entry(map, ®ister_maps, list) { 1150 ret = cxl_map_regs(cxlm, map); 1151 if (ret) 1152 goto free_maps; 1153 } 1154 1155 free_maps: 1156 list_for_each_entry_safe(map, n, ®ister_maps, list) { 1157 list_del(&map->list); 1158 kfree(map); 1159 } 1160 1161 return ret; 1162 } 1163 1164 static struct cxl_memdev *to_cxl_memdev(struct device *dev) 1165 { 1166 return container_of(dev, struct cxl_memdev, dev); 1167 } 1168 1169 static void cxl_memdev_release(struct device *dev) 1170 { 1171 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 1172 1173 ida_free(&cxl_memdev_ida, cxlmd->id); 1174 kfree(cxlmd); 1175 } 1176 1177 static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid, 1178 kgid_t *gid) 1179 { 1180 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); 1181 } 1182 1183 static ssize_t firmware_version_show(struct device *dev, 1184 struct device_attribute *attr, char *buf) 1185 { 1186 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 1187 struct cxl_mem *cxlm = cxlmd->cxlm; 1188 1189 return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); 1190 } 1191 static DEVICE_ATTR_RO(firmware_version); 1192 1193 static ssize_t payload_max_show(struct device *dev, 1194 struct device_attribute *attr, char *buf) 1195 { 1196 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 1197 struct cxl_mem *cxlm = cxlmd->cxlm; 1198 1199 return sysfs_emit(buf, "%zu\n", cxlm->payload_size); 1200 } 1201 static DEVICE_ATTR_RO(payload_max); 1202 1203 static ssize_t label_storage_size_show(struct device *dev, 1204 struct device_attribute *attr, char *buf) 1205 { 1206 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 1207 struct cxl_mem *cxlm = cxlmd->cxlm; 1208 1209 return sysfs_emit(buf, "%zu\n", cxlm->lsa_size); 1210 } 1211 static DEVICE_ATTR_RO(label_storage_size); 1212 1213 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, 1214 char *buf) 1215 { 1216 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 1217 struct cxl_mem *cxlm = cxlmd->cxlm; 1218 unsigned long long len = range_len(&cxlm->ram_range); 1219 1220 return sysfs_emit(buf, "%#llx\n", len); 1221 } 1222 1223 static struct device_attribute dev_attr_ram_size = 1224 __ATTR(size, 0444, ram_size_show, NULL); 1225 1226 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, 1227 char *buf) 1228 { 1229 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 1230 struct cxl_mem *cxlm = cxlmd->cxlm; 1231 unsigned long long len = range_len(&cxlm->pmem_range); 1232 1233 return sysfs_emit(buf, "%#llx\n", len); 1234 } 1235 1236 static struct device_attribute dev_attr_pmem_size = 1237 __ATTR(size, 0444, pmem_size_show, NULL); 1238 1239 static struct attribute *cxl_memdev_attributes[] = { 1240 &dev_attr_firmware_version.attr, 1241 &dev_attr_payload_max.attr, 1242 &dev_attr_label_storage_size.attr, 1243 NULL, 1244 }; 1245 1246 static struct attribute *cxl_memdev_pmem_attributes[] = { 1247 &dev_attr_pmem_size.attr, 1248 NULL, 1249 }; 1250 1251 static struct attribute *cxl_memdev_ram_attributes[] = { 1252 &dev_attr_ram_size.attr, 1253 NULL, 1254 }; 1255 1256 static struct attribute_group cxl_memdev_attribute_group = { 1257 .attrs = cxl_memdev_attributes, 1258 }; 1259 1260 static struct attribute_group cxl_memdev_ram_attribute_group = { 1261 .name = "ram", 1262 .attrs = cxl_memdev_ram_attributes, 1263 }; 1264 1265 static struct attribute_group cxl_memdev_pmem_attribute_group = { 1266 .name = "pmem", 1267 .attrs = cxl_memdev_pmem_attributes, 1268 }; 1269 1270 static const struct attribute_group *cxl_memdev_attribute_groups[] = { 1271 &cxl_memdev_attribute_group, 1272 &cxl_memdev_ram_attribute_group, 1273 &cxl_memdev_pmem_attribute_group, 1274 NULL, 1275 }; 1276 1277 static const struct device_type cxl_memdev_type = { 1278 .name = "cxl_memdev", 1279 .release = cxl_memdev_release, 1280 .devnode = cxl_memdev_devnode, 1281 .groups = cxl_memdev_attribute_groups, 1282 }; 1283 1284 static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd) 1285 { 1286 down_write(&cxl_memdev_rwsem); 1287 cxlmd->cxlm = NULL; 1288 up_write(&cxl_memdev_rwsem); 1289 } 1290 1291 static void cxl_memdev_unregister(void *_cxlmd) 1292 { 1293 struct cxl_memdev *cxlmd = _cxlmd; 1294 struct device *dev = &cxlmd->dev; 1295 1296 cdev_device_del(&cxlmd->cdev, dev); 1297 cxl_memdev_shutdown(cxlmd); 1298 put_device(dev); 1299 } 1300 1301 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm) 1302 { 1303 struct pci_dev *pdev = cxlm->pdev; 1304 struct cxl_memdev *cxlmd; 1305 struct device *dev; 1306 struct cdev *cdev; 1307 int rc; 1308 1309 cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); 1310 if (!cxlmd) 1311 return ERR_PTR(-ENOMEM); 1312 1313 rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); 1314 if (rc < 0) 1315 goto err; 1316 cxlmd->id = rc; 1317 1318 dev = &cxlmd->dev; 1319 device_initialize(dev); 1320 dev->parent = &pdev->dev; 1321 dev->bus = &cxl_bus_type; 1322 dev->devt = MKDEV(cxl_mem_major, cxlmd->id); 1323 dev->type = &cxl_memdev_type; 1324 device_set_pm_not_required(dev); 1325 1326 cdev = &cxlmd->cdev; 1327 cdev_init(cdev, &cxl_memdev_fops); 1328 return cxlmd; 1329 1330 err: 1331 kfree(cxlmd); 1332 return ERR_PTR(rc); 1333 } 1334 1335 static struct cxl_memdev *devm_cxl_add_memdev(struct device *host, 1336 struct cxl_mem *cxlm) 1337 { 1338 struct cxl_memdev *cxlmd; 1339 struct device *dev; 1340 struct cdev *cdev; 1341 int rc; 1342 1343 cxlmd = cxl_memdev_alloc(cxlm); 1344 if (IS_ERR(cxlmd)) 1345 return cxlmd; 1346 1347 dev = &cxlmd->dev; 1348 rc = dev_set_name(dev, "mem%d", cxlmd->id); 1349 if (rc) 1350 goto err; 1351 1352 /* 1353 * Activate ioctl operations, no cxl_memdev_rwsem manipulation 1354 * needed as this is ordered with cdev_add() publishing the device. 1355 */ 1356 cxlmd->cxlm = cxlm; 1357 1358 cdev = &cxlmd->cdev; 1359 rc = cdev_device_add(cdev, dev); 1360 if (rc) 1361 goto err; 1362 1363 rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); 1364 if (rc) 1365 return ERR_PTR(rc); 1366 return cxlmd; 1367 1368 err: 1369 /* 1370 * The cdev was briefly live, shutdown any ioctl operations that 1371 * saw that state. 1372 */ 1373 cxl_memdev_shutdown(cxlmd); 1374 put_device(dev); 1375 return ERR_PTR(rc); 1376 } 1377 1378 static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) 1379 { 1380 u32 remaining = size; 1381 u32 offset = 0; 1382 1383 while (remaining) { 1384 u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); 1385 struct cxl_mbox_get_log { 1386 uuid_t uuid; 1387 __le32 offset; 1388 __le32 length; 1389 } __packed log = { 1390 .uuid = *uuid, 1391 .offset = cpu_to_le32(offset), 1392 .length = cpu_to_le32(xfer_size) 1393 }; 1394 int rc; 1395 1396 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log, 1397 sizeof(log), out, xfer_size); 1398 if (rc < 0) 1399 return rc; 1400 1401 out += xfer_size; 1402 remaining -= xfer_size; 1403 offset += xfer_size; 1404 } 1405 1406 return 0; 1407 } 1408 1409 /** 1410 * cxl_walk_cel() - Walk through the Command Effects Log. 1411 * @cxlm: Device. 1412 * @size: Length of the Command Effects Log. 1413 * @cel: CEL 1414 * 1415 * Iterate over each entry in the CEL and determine if the driver supports the 1416 * command. If so, the command is enabled for the device and can be used later. 1417 */ 1418 static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) 1419 { 1420 struct cel_entry { 1421 __le16 opcode; 1422 __le16 effect; 1423 } __packed * cel_entry; 1424 const int cel_entries = size / sizeof(*cel_entry); 1425 int i; 1426 1427 cel_entry = (struct cel_entry *)cel; 1428 1429 for (i = 0; i < cel_entries; i++) { 1430 u16 opcode = le16_to_cpu(cel_entry[i].opcode); 1431 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 1432 1433 if (!cmd) { 1434 dev_dbg(&cxlm->pdev->dev, 1435 "Opcode 0x%04x unsupported by driver", opcode); 1436 continue; 1437 } 1438 1439 set_bit(cmd->info.id, cxlm->enabled_cmds); 1440 } 1441 } 1442 1443 struct cxl_mbox_get_supported_logs { 1444 __le16 entries; 1445 u8 rsvd[6]; 1446 struct gsl_entry { 1447 uuid_t uuid; 1448 __le32 size; 1449 } __packed entry[]; 1450 } __packed; 1451 1452 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) 1453 { 1454 struct cxl_mbox_get_supported_logs *ret; 1455 int rc; 1456 1457 ret = kvmalloc(cxlm->payload_size, GFP_KERNEL); 1458 if (!ret) 1459 return ERR_PTR(-ENOMEM); 1460 1461 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 1462 0, ret, cxlm->payload_size); 1463 if (rc < 0) { 1464 kvfree(ret); 1465 return ERR_PTR(rc); 1466 } 1467 1468 return ret; 1469 } 1470 1471 /** 1472 * cxl_mem_enumerate_cmds() - Enumerate commands for a device. 1473 * @cxlm: The device. 1474 * 1475 * Returns 0 if enumerate completed successfully. 1476 * 1477 * CXL devices have optional support for certain commands. This function will 1478 * determine the set of supported commands for the hardware and update the 1479 * enabled_cmds bitmap in the @cxlm. 1480 */ 1481 static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) 1482 { 1483 struct cxl_mbox_get_supported_logs *gsl; 1484 struct device *dev = &cxlm->pdev->dev; 1485 struct cxl_mem_command *cmd; 1486 int i, rc; 1487 1488 gsl = cxl_get_gsl(cxlm); 1489 if (IS_ERR(gsl)) 1490 return PTR_ERR(gsl); 1491 1492 rc = -ENOENT; 1493 for (i = 0; i < le16_to_cpu(gsl->entries); i++) { 1494 u32 size = le32_to_cpu(gsl->entry[i].size); 1495 uuid_t uuid = gsl->entry[i].uuid; 1496 u8 *log; 1497 1498 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); 1499 1500 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) 1501 continue; 1502 1503 log = kvmalloc(size, GFP_KERNEL); 1504 if (!log) { 1505 rc = -ENOMEM; 1506 goto out; 1507 } 1508 1509 rc = cxl_xfer_log(cxlm, &uuid, size, log); 1510 if (rc) { 1511 kvfree(log); 1512 goto out; 1513 } 1514 1515 cxl_walk_cel(cxlm, size, log); 1516 kvfree(log); 1517 1518 /* In case CEL was bogus, enable some default commands. */ 1519 cxl_for_each_cmd(cmd) 1520 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) 1521 set_bit(cmd->info.id, cxlm->enabled_cmds); 1522 1523 /* Found the required CEL */ 1524 rc = 0; 1525 } 1526 1527 out: 1528 kvfree(gsl); 1529 return rc; 1530 } 1531 1532 /** 1533 * cxl_mem_identify() - Send the IDENTIFY command to the device. 1534 * @cxlm: The device to identify. 1535 * 1536 * Return: 0 if identify was executed successfully. 1537 * 1538 * This will dispatch the identify command to the device and on success populate 1539 * structures to be exported to sysfs. 1540 */ 1541 static int cxl_mem_identify(struct cxl_mem *cxlm) 1542 { 1543 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ 1544 struct cxl_mbox_identify { 1545 char fw_revision[0x10]; 1546 __le64 total_capacity; 1547 __le64 volatile_capacity; 1548 __le64 persistent_capacity; 1549 __le64 partition_align; 1550 __le16 info_event_log_size; 1551 __le16 warning_event_log_size; 1552 __le16 failure_event_log_size; 1553 __le16 fatal_event_log_size; 1554 __le32 lsa_size; 1555 u8 poison_list_max_mer[3]; 1556 __le16 inject_poison_limit; 1557 u8 poison_caps; 1558 u8 qos_telemetry_caps; 1559 } __packed id; 1560 int rc; 1561 1562 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, 1563 sizeof(id)); 1564 if (rc < 0) 1565 return rc; 1566 1567 /* 1568 * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias. 1569 * For now, only the capacity is exported in sysfs 1570 */ 1571 cxlm->ram_range.start = 0; 1572 cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1; 1573 1574 cxlm->pmem_range.start = 0; 1575 cxlm->pmem_range.end = 1576 le64_to_cpu(id.persistent_capacity) * SZ_256M - 1; 1577 1578 cxlm->lsa_size = le32_to_cpu(id.lsa_size); 1579 memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); 1580 1581 return 0; 1582 } 1583 1584 static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1585 { 1586 struct cxl_memdev *cxlmd; 1587 struct cxl_mem *cxlm; 1588 int rc; 1589 1590 rc = pcim_enable_device(pdev); 1591 if (rc) 1592 return rc; 1593 1594 cxlm = cxl_mem_create(pdev); 1595 if (IS_ERR(cxlm)) 1596 return PTR_ERR(cxlm); 1597 1598 rc = cxl_mem_setup_regs(cxlm); 1599 if (rc) 1600 return rc; 1601 1602 rc = cxl_mem_setup_mailbox(cxlm); 1603 if (rc) 1604 return rc; 1605 1606 rc = cxl_mem_enumerate_cmds(cxlm); 1607 if (rc) 1608 return rc; 1609 1610 rc = cxl_mem_identify(cxlm); 1611 if (rc) 1612 return rc; 1613 1614 cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm); 1615 if (IS_ERR(cxlmd)) 1616 return PTR_ERR(cxlmd); 1617 1618 if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) 1619 rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); 1620 1621 return rc; 1622 } 1623 1624 static const struct pci_device_id cxl_mem_pci_tbl[] = { 1625 /* PCI class code for CXL.mem Type-3 Devices */ 1626 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 1627 { /* terminate list */ }, 1628 }; 1629 MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 1630 1631 static struct pci_driver cxl_mem_driver = { 1632 .name = KBUILD_MODNAME, 1633 .id_table = cxl_mem_pci_tbl, 1634 .probe = cxl_mem_probe, 1635 .driver = { 1636 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1637 }, 1638 }; 1639 1640 static __init int cxl_mem_init(void) 1641 { 1642 struct dentry *mbox_debugfs; 1643 dev_t devt; 1644 int rc; 1645 1646 /* Double check the anonymous union trickery in struct cxl_regs */ 1647 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != 1648 offsetof(struct cxl_regs, device_regs.memdev)); 1649 1650 rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); 1651 if (rc) 1652 return rc; 1653 1654 cxl_mem_major = MAJOR(devt); 1655 1656 rc = pci_register_driver(&cxl_mem_driver); 1657 if (rc) { 1658 unregister_chrdev_region(MKDEV(cxl_mem_major, 0), 1659 CXL_MEM_MAX_DEVS); 1660 return rc; 1661 } 1662 1663 cxl_debugfs = debugfs_create_dir("cxl", NULL); 1664 mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); 1665 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, 1666 &cxl_raw_allow_all); 1667 1668 return 0; 1669 } 1670 1671 static __exit void cxl_mem_exit(void) 1672 { 1673 debugfs_remove_recursive(cxl_debugfs); 1674 pci_unregister_driver(&cxl_mem_driver); 1675 unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); 1676 } 1677 1678 MODULE_LICENSE("GPL v2"); 1679 module_init(cxl_mem_init); 1680 module_exit(cxl_mem_exit); 1681 MODULE_IMPORT_NS(CXL); 1682