1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-lo-hi.h> 4 #include <linux/security.h> 5 #include <linux/debugfs.h> 6 #include <linux/mutex.h> 7 #include <cxlmem.h> 8 #include <cxl.h> 9 10 #include "core.h" 11 12 static bool cxl_raw_allow_all; 13 14 /** 15 * DOC: cxl mbox 16 * 17 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The 18 * implementation is used by the cxl_pci driver to initialize the device 19 * and implement the cxl_mem.h IOCTL UAPI. It also implements the 20 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM. 21 */ 22 23 #define cxl_for_each_cmd(cmd) \ 24 for ((cmd) = &cxl_mem_commands[0]; \ 25 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++) 26 27 #define CXL_CMD(_id, sin, sout, _flags) \ 28 [CXL_MEM_COMMAND_ID_##_id] = { \ 29 .info = { \ 30 .id = CXL_MEM_COMMAND_ID_##_id, \ 31 .size_in = sin, \ 32 .size_out = sout, \ 33 }, \ 34 .opcode = CXL_MBOX_OP_##_id, \ 35 .flags = _flags, \ 36 } 37 38 #define CXL_VARIABLE_PAYLOAD ~0U 39 /* 40 * This table defines the supported mailbox commands for the driver. This table 41 * is made up of a UAPI structure. Non-negative values as parameters in the 42 * table will be validated against the user's input. For example, if size_in is 43 * 0, and the user passed in 1, it is an error. 44 */ 45 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { 46 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), 47 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS 48 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0), 49 #endif 50 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), 51 CXL_CMD(GET_FW_INFO, 0, 0x50, 0), 52 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), 53 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), 54 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), 55 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), 56 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), 57 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), 58 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), 59 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), 60 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), 61 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), 62 CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0), 63 CXL_CMD(INJECT_POISON, 0x8, 0, 0), 64 CXL_CMD(CLEAR_POISON, 0x48, 0, 0), 65 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), 66 CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), 67 CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), 68 }; 69 70 /* 71 * Commands that RAW doesn't permit. The rationale for each: 72 * 73 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / 74 * coordination of transaction timeout values at the root bridge level. 75 * 76 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live 77 * and needs to be coordinated with HDM updates. 78 * 79 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the 80 * driver and any writes from userspace invalidates those contents. 81 * 82 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes 83 * to the device after it is marked clean, userspace can not make that 84 * assertion. 85 * 86 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that 87 * is kept up to date with patrol notifications and error management. 88 */ 89 static u16 cxl_disabled_raw_commands[] = { 90 CXL_MBOX_OP_ACTIVATE_FW, 91 CXL_MBOX_OP_SET_PARTITION_INFO, 92 CXL_MBOX_OP_SET_LSA, 93 CXL_MBOX_OP_SET_SHUTDOWN_STATE, 94 CXL_MBOX_OP_SCAN_MEDIA, 95 CXL_MBOX_OP_GET_SCAN_MEDIA, 96 }; 97 98 /* 99 * Command sets that RAW doesn't permit. All opcodes in this set are 100 * disabled because they pass plain text security payloads over the 101 * user/kernel boundary. This functionality is intended to be wrapped 102 * behind the keys ABI which allows for encrypted payloads in the UAPI 103 */ 104 static u8 security_command_sets[] = { 105 0x44, /* Sanitize */ 106 0x45, /* Persistent Memory Data-at-rest Security */ 107 0x46, /* Security Passthrough */ 108 }; 109 110 static bool cxl_is_security_command(u16 opcode) 111 { 112 int i; 113 114 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) 115 if (security_command_sets[i] == (opcode >> 8)) 116 return true; 117 return false; 118 } 119 120 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) 121 { 122 struct cxl_mem_command *c; 123 124 cxl_for_each_cmd(c) 125 if (c->opcode == opcode) 126 return c; 127 128 return NULL; 129 } 130 131 static const char *cxl_mem_opcode_to_name(u16 opcode) 132 { 133 struct cxl_mem_command *c; 134 135 c = cxl_mem_find_command(opcode); 136 if (!c) 137 return NULL; 138 139 return cxl_command_names[c->info.id].name; 140 } 141 142 /** 143 * cxl_mbox_send_cmd() - Send a mailbox command to a device. 144 * @cxlds: The device data for the operation 145 * @opcode: Opcode for the mailbox command. 146 * @in: The input payload for the mailbox command. 147 * @in_size: The length of the input payload 148 * @out: Caller allocated buffer for the output. 149 * @out_size: Expected size of output. 150 * 151 * Context: Any context. 152 * Return: 153 * * %>=0 - Number of bytes returned in @out. 154 * * %-E2BIG - Payload is too large for hardware. 155 * * %-EBUSY - Couldn't acquire exclusive mailbox access. 156 * * %-EFAULT - Hardware error occurred. 157 * * %-ENXIO - Command completed, but device reported an error. 158 * * %-EIO - Unexpected output size. 159 * 160 * Mailbox commands may execute successfully yet the device itself reported an 161 * error. While this distinction can be useful for commands from userspace, the 162 * kernel will only be able to use results when both are successful. 163 */ 164 int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, 165 size_t in_size, void *out, size_t out_size) 166 { 167 const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 168 struct cxl_mbox_cmd mbox_cmd = { 169 .opcode = opcode, 170 .payload_in = in, 171 .size_in = in_size, 172 .size_out = out_size, 173 .payload_out = out, 174 }; 175 int rc; 176 177 if (out_size > cxlds->payload_size) 178 return -E2BIG; 179 180 rc = cxlds->mbox_send(cxlds, &mbox_cmd); 181 if (rc) 182 return rc; 183 184 if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS) 185 return cxl_mbox_cmd_rc2errno(&mbox_cmd); 186 187 /* 188 * Variable sized commands can't be validated and so it's up to the 189 * caller to do that if they wish. 190 */ 191 if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) { 192 if (mbox_cmd.size_out != out_size) 193 return -EIO; 194 } 195 return 0; 196 } 197 EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL); 198 199 static bool cxl_mem_raw_command_allowed(u16 opcode) 200 { 201 int i; 202 203 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) 204 return false; 205 206 if (security_locked_down(LOCKDOWN_PCI_ACCESS)) 207 return false; 208 209 if (cxl_raw_allow_all) 210 return true; 211 212 if (cxl_is_security_command(opcode)) 213 return false; 214 215 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) 216 if (cxl_disabled_raw_commands[i] == opcode) 217 return false; 218 219 return true; 220 } 221 222 /** 223 * cxl_payload_from_user_allowed() - Check contents of in_payload. 224 * @opcode: The mailbox command opcode. 225 * @payload_in: Pointer to the input payload passed in from user space. 226 * 227 * Return: 228 * * true - payload_in passes check for @opcode. 229 * * false - payload_in contains invalid or unsupported values. 230 * 231 * The driver may inspect payload contents before sending a mailbox 232 * command from user space to the device. The intent is to reject 233 * commands with input payloads that are known to be unsafe. This 234 * check is not intended to replace the users careful selection of 235 * mailbox command parameters and makes no guarantee that the user 236 * command will succeed, nor that it is appropriate. 237 * 238 * The specific checks are determined by the opcode. 239 */ 240 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) 241 { 242 switch (opcode) { 243 case CXL_MBOX_OP_SET_PARTITION_INFO: { 244 struct cxl_mbox_set_partition_info *pi = payload_in; 245 246 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) 247 return false; 248 break; 249 } 250 default: 251 break; 252 } 253 return true; 254 } 255 256 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, 257 struct cxl_dev_state *cxlds, u16 opcode, 258 size_t in_size, size_t out_size, u64 in_payload) 259 { 260 *mbox = (struct cxl_mbox_cmd) { 261 .opcode = opcode, 262 .size_in = in_size, 263 }; 264 265 if (in_size) { 266 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), 267 in_size); 268 if (IS_ERR(mbox->payload_in)) 269 return PTR_ERR(mbox->payload_in); 270 271 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) { 272 dev_dbg(cxlds->dev, "%s: input payload not allowed\n", 273 cxl_mem_opcode_to_name(opcode)); 274 kvfree(mbox->payload_in); 275 return -EBUSY; 276 } 277 } 278 279 /* Prepare to handle a full payload for variable sized output */ 280 if (out_size == CXL_VARIABLE_PAYLOAD) 281 mbox->size_out = cxlds->payload_size; 282 else 283 mbox->size_out = out_size; 284 285 if (mbox->size_out) { 286 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL); 287 if (!mbox->payload_out) { 288 kvfree(mbox->payload_in); 289 return -ENOMEM; 290 } 291 } 292 return 0; 293 } 294 295 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox) 296 { 297 kvfree(mbox->payload_in); 298 kvfree(mbox->payload_out); 299 } 300 301 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, 302 const struct cxl_send_command *send_cmd, 303 struct cxl_dev_state *cxlds) 304 { 305 if (send_cmd->raw.rsvd) 306 return -EINVAL; 307 308 /* 309 * Unlike supported commands, the output size of RAW commands 310 * gets passed along without further checking, so it must be 311 * validated here. 312 */ 313 if (send_cmd->out.size > cxlds->payload_size) 314 return -EINVAL; 315 316 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) 317 return -EPERM; 318 319 dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n"); 320 321 *mem_cmd = (struct cxl_mem_command) { 322 .info = { 323 .id = CXL_MEM_COMMAND_ID_RAW, 324 .size_in = send_cmd->in.size, 325 .size_out = send_cmd->out.size, 326 }, 327 .opcode = send_cmd->raw.opcode 328 }; 329 330 return 0; 331 } 332 333 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, 334 const struct cxl_send_command *send_cmd, 335 struct cxl_dev_state *cxlds) 336 { 337 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; 338 const struct cxl_command_info *info = &c->info; 339 340 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) 341 return -EINVAL; 342 343 if (send_cmd->rsvd) 344 return -EINVAL; 345 346 if (send_cmd->in.rsvd || send_cmd->out.rsvd) 347 return -EINVAL; 348 349 /* Check that the command is enabled for hardware */ 350 if (!test_bit(info->id, cxlds->enabled_cmds)) 351 return -ENOTTY; 352 353 /* Check that the command is not claimed for exclusive kernel use */ 354 if (test_bit(info->id, cxlds->exclusive_cmds)) 355 return -EBUSY; 356 357 /* Check the input buffer is the expected size */ 358 if (info->size_in != send_cmd->in.size) 359 return -ENOMEM; 360 361 /* Check the output buffer is at least large enough */ 362 if (send_cmd->out.size < info->size_out) 363 return -ENOMEM; 364 365 *mem_cmd = (struct cxl_mem_command) { 366 .info = { 367 .id = info->id, 368 .flags = info->flags, 369 .size_in = send_cmd->in.size, 370 .size_out = send_cmd->out.size, 371 }, 372 .opcode = c->opcode 373 }; 374 375 return 0; 376 } 377 378 /** 379 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. 380 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. 381 * @cxlds: The device data for the operation 382 * @send_cmd: &struct cxl_send_command copied in from userspace. 383 * 384 * Return: 385 * * %0 - @out_cmd is ready to send. 386 * * %-ENOTTY - Invalid command specified. 387 * * %-EINVAL - Reserved fields or invalid values were used. 388 * * %-ENOMEM - Input or output buffer wasn't sized properly. 389 * * %-EPERM - Attempted to use a protected command. 390 * * %-EBUSY - Kernel has claimed exclusive access to this opcode 391 * 392 * The result of this command is a fully validated command in @mbox_cmd that is 393 * safe to send to the hardware. 394 */ 395 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, 396 struct cxl_dev_state *cxlds, 397 const struct cxl_send_command *send_cmd) 398 { 399 struct cxl_mem_command mem_cmd; 400 int rc; 401 402 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) 403 return -ENOTTY; 404 405 /* 406 * The user can never specify an input payload larger than what hardware 407 * supports, but output can be arbitrarily large (simply write out as 408 * much data as the hardware provides). 409 */ 410 if (send_cmd->in.size > cxlds->payload_size) 411 return -EINVAL; 412 413 /* Sanitize and construct a cxl_mem_command */ 414 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) 415 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds); 416 else 417 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds); 418 419 if (rc) 420 return rc; 421 422 /* Sanitize and construct a cxl_mbox_cmd */ 423 return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode, 424 mem_cmd.info.size_in, mem_cmd.info.size_out, 425 send_cmd->in.payload); 426 } 427 428 int cxl_query_cmd(struct cxl_memdev *cxlmd, 429 struct cxl_mem_query_commands __user *q) 430 { 431 struct device *dev = &cxlmd->dev; 432 struct cxl_mem_command *cmd; 433 u32 n_commands; 434 int j = 0; 435 436 dev_dbg(dev, "Query IOCTL\n"); 437 438 if (get_user(n_commands, &q->n_commands)) 439 return -EFAULT; 440 441 /* returns the total number if 0 elements are requested. */ 442 if (n_commands == 0) 443 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); 444 445 /* 446 * otherwise, return max(n_commands, total commands) cxl_command_info 447 * structures. 448 */ 449 cxl_for_each_cmd(cmd) { 450 const struct cxl_command_info *info = &cmd->info; 451 452 if (copy_to_user(&q->commands[j++], info, sizeof(*info))) 453 return -EFAULT; 454 455 if (j == n_commands) 456 break; 457 } 458 459 return 0; 460 } 461 462 /** 463 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. 464 * @cxlds: The device data for the operation 465 * @mbox_cmd: The validated mailbox command. 466 * @out_payload: Pointer to userspace's output payload. 467 * @size_out: (Input) Max payload size to copy out. 468 * (Output) Payload size hardware generated. 469 * @retval: Hardware generated return code from the operation. 470 * 471 * Return: 472 * * %0 - Mailbox transaction succeeded. This implies the mailbox 473 * protocol completed successfully not that the operation itself 474 * was successful. 475 * * %-ENOMEM - Couldn't allocate a bounce buffer. 476 * * %-EFAULT - Something happened with copy_to/from_user. 477 * * %-EINTR - Mailbox acquisition interrupted. 478 * * %-EXXX - Transaction level failures. 479 * 480 * Dispatches a mailbox command on behalf of a userspace request. 481 * The output payload is copied to userspace. 482 * 483 * See cxl_send_cmd(). 484 */ 485 static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds, 486 struct cxl_mbox_cmd *mbox_cmd, 487 u64 out_payload, s32 *size_out, 488 u32 *retval) 489 { 490 struct device *dev = cxlds->dev; 491 int rc; 492 493 dev_dbg(dev, 494 "Submitting %s command for user\n" 495 "\topcode: %x\n" 496 "\tsize: %zx\n", 497 cxl_mem_opcode_to_name(mbox_cmd->opcode), 498 mbox_cmd->opcode, mbox_cmd->size_in); 499 500 rc = cxlds->mbox_send(cxlds, mbox_cmd); 501 if (rc) 502 goto out; 503 504 /* 505 * @size_out contains the max size that's allowed to be written back out 506 * to userspace. While the payload may have written more output than 507 * this it will have to be ignored. 508 */ 509 if (mbox_cmd->size_out) { 510 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out, 511 "Invalid return size\n"); 512 if (copy_to_user(u64_to_user_ptr(out_payload), 513 mbox_cmd->payload_out, mbox_cmd->size_out)) { 514 rc = -EFAULT; 515 goto out; 516 } 517 } 518 519 *size_out = mbox_cmd->size_out; 520 *retval = mbox_cmd->return_code; 521 522 out: 523 cxl_mbox_cmd_dtor(mbox_cmd); 524 return rc; 525 } 526 527 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) 528 { 529 struct cxl_dev_state *cxlds = cxlmd->cxlds; 530 struct device *dev = &cxlmd->dev; 531 struct cxl_send_command send; 532 struct cxl_mbox_cmd mbox_cmd; 533 int rc; 534 535 dev_dbg(dev, "Send IOCTL\n"); 536 537 if (copy_from_user(&send, s, sizeof(send))) 538 return -EFAULT; 539 540 rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send); 541 if (rc) 542 return rc; 543 544 rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload, 545 &send.out.size, &send.retval); 546 if (rc) 547 return rc; 548 549 if (copy_to_user(s, &send, sizeof(send))) 550 return -EFAULT; 551 552 return 0; 553 } 554 555 static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out) 556 { 557 u32 remaining = size; 558 u32 offset = 0; 559 560 while (remaining) { 561 u32 xfer_size = min_t(u32, remaining, cxlds->payload_size); 562 struct cxl_mbox_get_log log = { 563 .uuid = *uuid, 564 .offset = cpu_to_le32(offset), 565 .length = cpu_to_le32(xfer_size) 566 }; 567 int rc; 568 569 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log), 570 out, xfer_size); 571 if (rc < 0) 572 return rc; 573 574 out += xfer_size; 575 remaining -= xfer_size; 576 offset += xfer_size; 577 } 578 579 return 0; 580 } 581 582 /** 583 * cxl_walk_cel() - Walk through the Command Effects Log. 584 * @cxlds: The device data for the operation 585 * @size: Length of the Command Effects Log. 586 * @cel: CEL 587 * 588 * Iterate over each entry in the CEL and determine if the driver supports the 589 * command. If so, the command is enabled for the device and can be used later. 590 */ 591 static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel) 592 { 593 struct cxl_cel_entry *cel_entry; 594 const int cel_entries = size / sizeof(*cel_entry); 595 int i; 596 597 cel_entry = (struct cxl_cel_entry *) cel; 598 599 for (i = 0; i < cel_entries; i++) { 600 u16 opcode = le16_to_cpu(cel_entry[i].opcode); 601 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 602 603 if (!cmd) { 604 dev_dbg(cxlds->dev, 605 "Opcode 0x%04x unsupported by driver", opcode); 606 continue; 607 } 608 609 set_bit(cmd->info.id, cxlds->enabled_cmds); 610 } 611 } 612 613 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds) 614 { 615 struct cxl_mbox_get_supported_logs *ret; 616 int rc; 617 618 ret = kvmalloc(cxlds->payload_size, GFP_KERNEL); 619 if (!ret) 620 return ERR_PTR(-ENOMEM); 621 622 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret, 623 cxlds->payload_size); 624 if (rc < 0) { 625 kvfree(ret); 626 return ERR_PTR(rc); 627 } 628 629 return ret; 630 } 631 632 enum { 633 CEL_UUID, 634 VENDOR_DEBUG_UUID, 635 }; 636 637 /* See CXL 2.0 Table 170. Get Log Input Payload */ 638 static const uuid_t log_uuid[] = { 639 [CEL_UUID] = DEFINE_CXL_CEL_UUID, 640 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID, 641 }; 642 643 /** 644 * cxl_enumerate_cmds() - Enumerate commands for a device. 645 * @cxlds: The device data for the operation 646 * 647 * Returns 0 if enumerate completed successfully. 648 * 649 * CXL devices have optional support for certain commands. This function will 650 * determine the set of supported commands for the hardware and update the 651 * enabled_cmds bitmap in the @cxlds. 652 */ 653 int cxl_enumerate_cmds(struct cxl_dev_state *cxlds) 654 { 655 struct cxl_mbox_get_supported_logs *gsl; 656 struct device *dev = cxlds->dev; 657 struct cxl_mem_command *cmd; 658 int i, rc; 659 660 gsl = cxl_get_gsl(cxlds); 661 if (IS_ERR(gsl)) 662 return PTR_ERR(gsl); 663 664 rc = -ENOENT; 665 for (i = 0; i < le16_to_cpu(gsl->entries); i++) { 666 u32 size = le32_to_cpu(gsl->entry[i].size); 667 uuid_t uuid = gsl->entry[i].uuid; 668 u8 *log; 669 670 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); 671 672 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) 673 continue; 674 675 log = kvmalloc(size, GFP_KERNEL); 676 if (!log) { 677 rc = -ENOMEM; 678 goto out; 679 } 680 681 rc = cxl_xfer_log(cxlds, &uuid, size, log); 682 if (rc) { 683 kvfree(log); 684 goto out; 685 } 686 687 cxl_walk_cel(cxlds, size, log); 688 kvfree(log); 689 690 /* In case CEL was bogus, enable some default commands. */ 691 cxl_for_each_cmd(cmd) 692 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) 693 set_bit(cmd->info.id, cxlds->enabled_cmds); 694 695 /* Found the required CEL */ 696 rc = 0; 697 } 698 699 out: 700 kvfree(gsl); 701 return rc; 702 } 703 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); 704 705 /** 706 * cxl_mem_get_partition_info - Get partition info 707 * @cxlds: The device data for the operation 708 * 709 * Retrieve the current partition info for the device specified. The active 710 * values are the current capacity in bytes. If not 0, the 'next' values are 711 * the pending values, in bytes, which take affect on next cold reset. 712 * 713 * Return: 0 if no error: or the result of the mailbox command. 714 * 715 * See CXL @8.2.9.5.2.1 Get Partition Info 716 */ 717 static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds) 718 { 719 struct cxl_mbox_get_partition_info { 720 __le64 active_volatile_cap; 721 __le64 active_persistent_cap; 722 __le64 next_volatile_cap; 723 __le64 next_persistent_cap; 724 } __packed pi; 725 int rc; 726 727 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0, 728 &pi, sizeof(pi)); 729 730 if (rc) 731 return rc; 732 733 cxlds->active_volatile_bytes = 734 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 735 cxlds->active_persistent_bytes = 736 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; 737 cxlds->next_volatile_bytes = 738 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 739 cxlds->next_persistent_bytes = 740 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; 741 742 return 0; 743 } 744 745 /** 746 * cxl_dev_state_identify() - Send the IDENTIFY command to the device. 747 * @cxlds: The device data for the operation 748 * 749 * Return: 0 if identify was executed successfully. 750 * 751 * This will dispatch the identify command to the device and on success populate 752 * structures to be exported to sysfs. 753 */ 754 int cxl_dev_state_identify(struct cxl_dev_state *cxlds) 755 { 756 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ 757 struct cxl_mbox_identify id; 758 int rc; 759 760 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, 761 sizeof(id)); 762 if (rc < 0) 763 return rc; 764 765 cxlds->total_bytes = 766 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER; 767 cxlds->volatile_only_bytes = 768 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER; 769 cxlds->persistent_only_bytes = 770 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER; 771 cxlds->partition_align_bytes = 772 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; 773 774 dev_dbg(cxlds->dev, 775 "Identify Memory Device\n" 776 " total_bytes = %#llx\n" 777 " volatile_only_bytes = %#llx\n" 778 " persistent_only_bytes = %#llx\n" 779 " partition_align_bytes = %#llx\n", 780 cxlds->total_bytes, cxlds->volatile_only_bytes, 781 cxlds->persistent_only_bytes, cxlds->partition_align_bytes); 782 783 cxlds->lsa_size = le32_to_cpu(id.lsa_size); 784 memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision)); 785 786 return 0; 787 } 788 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); 789 790 int cxl_mem_create_range_info(struct cxl_dev_state *cxlds) 791 { 792 int rc; 793 794 if (cxlds->partition_align_bytes == 0) { 795 cxlds->ram_range.start = 0; 796 cxlds->ram_range.end = cxlds->volatile_only_bytes - 1; 797 cxlds->pmem_range.start = cxlds->volatile_only_bytes; 798 cxlds->pmem_range.end = cxlds->volatile_only_bytes + 799 cxlds->persistent_only_bytes - 1; 800 return 0; 801 } 802 803 rc = cxl_mem_get_partition_info(cxlds); 804 if (rc) { 805 dev_err(cxlds->dev, "Failed to query partition information\n"); 806 return rc; 807 } 808 809 dev_dbg(cxlds->dev, 810 "Get Partition Info\n" 811 " active_volatile_bytes = %#llx\n" 812 " active_persistent_bytes = %#llx\n" 813 " next_volatile_bytes = %#llx\n" 814 " next_persistent_bytes = %#llx\n", 815 cxlds->active_volatile_bytes, cxlds->active_persistent_bytes, 816 cxlds->next_volatile_bytes, cxlds->next_persistent_bytes); 817 818 cxlds->ram_range.start = 0; 819 cxlds->ram_range.end = cxlds->active_volatile_bytes - 1; 820 821 cxlds->pmem_range.start = cxlds->active_volatile_bytes; 822 cxlds->pmem_range.end = 823 cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1; 824 825 return 0; 826 } 827 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); 828 829 struct cxl_dev_state *cxl_dev_state_create(struct device *dev) 830 { 831 struct cxl_dev_state *cxlds; 832 833 cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL); 834 if (!cxlds) { 835 dev_err(dev, "No memory available\n"); 836 return ERR_PTR(-ENOMEM); 837 } 838 839 mutex_init(&cxlds->mbox_mutex); 840 cxlds->dev = dev; 841 842 return cxlds; 843 } 844 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL); 845 846 static struct dentry *cxl_debugfs; 847 848 void __init cxl_mbox_init(void) 849 { 850 struct dentry *mbox_debugfs; 851 852 cxl_debugfs = debugfs_create_dir("cxl", NULL); 853 mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); 854 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, 855 &cxl_raw_allow_all); 856 } 857 858 void cxl_mbox_exit(void) 859 { 860 debugfs_remove_recursive(cxl_debugfs); 861 } 862