1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include <math.h> 11 12 #include "qemu/osdep.h" 13 #include "hw/pci/msi.h" 14 #include "hw/pci/msix.h" 15 #include "hw/cxl/cxl.h" 16 #include "hw/cxl/cxl_events.h" 17 #include "hw/cxl/cxl_mailbox.h" 18 #include "hw/pci/pci.h" 19 #include "hw/pci-bridge/cxl_upstream_port.h" 20 #include "qemu/cutils.h" 21 #include "qemu/host-utils.h" 22 #include "qemu/log.h" 23 #include "qemu/units.h" 24 #include "qemu/uuid.h" 25 #include "system/hostmem.h" 26 #include "qemu/range.h" 27 #include "qapi/qapi-types-cxl.h" 28 29 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 30 #define CXL_DC_EVENT_LOG_SIZE 8 31 #define CXL_NUM_EXTENTS_SUPPORTED 512 32 #define CXL_NUM_TAGS_SUPPORTED 0 33 #define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0) 34 #define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1) 35 #define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2) 36 #define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3) 37 #define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4) 38 39 /* 40 * How to add a new command, example. The command set FOO, with cmd BAR. 41 * 1. Add the command set and cmd to the enum. 42 * FOO = 0x7f, 43 * #define BAR 0 44 * 2. Implement the handler 45 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 46 * CXLDeviceState *cxl_dstate, uint16_t *len) 47 * 3. Add the command to the cxl_cmd_set[][] 48 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 49 * 4. Implement your handler 50 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 51 * 52 * 53 * Writing the handler: 54 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 55 * in/out length of the payload. The handler is responsible for consuming the 56 * payload from cmd->payload and operating upon it as necessary. It must then 57 * fill the output data into cmd->payload (overwriting what was there), 58 * setting the length, and returning a valid return code. 59 * 60 * XXX: The handler need not worry about endianness. The payload is read out of 61 * a register interface that already deals with it. 62 */ 63 64 enum { 65 INFOSTAT = 0x00, 66 #define IS_IDENTIFY 0x1 67 #define BACKGROUND_OPERATION_STATUS 0x2 68 #define GET_RESPONSE_MSG_LIMIT 0x3 69 #define SET_RESPONSE_MSG_LIMIT 0x4 70 #define BACKGROUND_OPERATION_ABORT 0x5 71 EVENTS = 0x01, 72 #define GET_RECORDS 0x0 73 #define CLEAR_RECORDS 0x1 74 #define GET_INTERRUPT_POLICY 0x2 75 #define SET_INTERRUPT_POLICY 0x3 76 FIRMWARE_UPDATE = 0x02, 77 #define GET_INFO 0x0 78 #define TRANSFER 0x1 79 #define ACTIVATE 0x2 80 TIMESTAMP = 0x03, 81 #define GET 0x0 82 #define SET 0x1 83 LOGS = 0x04, 84 #define GET_SUPPORTED 0x0 85 #define GET_LOG 0x1 86 FEATURES = 0x05, 87 #define GET_SUPPORTED 0x0 88 #define GET_FEATURE 0x1 89 #define SET_FEATURE 0x2 90 IDENTIFY = 0x40, 91 #define MEMORY_DEVICE 0x0 92 CCLS = 0x41, 93 #define GET_PARTITION_INFO 0x0 94 #define GET_LSA 0x2 95 #define SET_LSA 0x3 96 HEALTH_INFO_ALERTS = 0x42, 97 #define GET_ALERT_CONFIG 0x1 98 #define SET_ALERT_CONFIG 0x2 99 SANITIZE = 0x44, 100 #define OVERWRITE 0x0 101 #define SECURE_ERASE 0x1 102 #define MEDIA_OPERATIONS 0x2 103 PERSISTENT_MEM = 0x45, 104 #define GET_SECURITY_STATE 0x0 105 MEDIA_AND_POISON = 0x43, 106 #define GET_POISON_LIST 0x0 107 #define INJECT_POISON 0x1 108 #define CLEAR_POISON 0x2 109 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 110 #define SCAN_MEDIA 0x4 111 #define GET_SCAN_MEDIA_RESULTS 0x5 112 DCD_CONFIG = 0x48, 113 #define GET_DC_CONFIG 0x0 114 #define GET_DYN_CAP_EXT_LIST 0x1 115 #define ADD_DYN_CAP_RSP 0x2 116 #define RELEASE_DYN_CAP 0x3 117 PHYSICAL_SWITCH = 0x51, 118 #define IDENTIFY_SWITCH_DEVICE 0x0 119 #define GET_PHYSICAL_PORT_STATE 0x1 120 TUNNEL = 0x53, 121 #define MANAGEMENT_COMMAND 0x0 122 FMAPI_DCD_MGMT = 0x56, 123 #define GET_DCD_INFO 0x0 124 #define GET_HOST_DC_REGION_CONFIG 0x1 125 #define SET_DC_REGION_CONFIG 0x2 126 }; 127 128 /* CCI Message Format CXL r3.1 Figure 7-19 */ 129 typedef struct CXLCCIMessage { 130 uint8_t category; 131 #define CXL_CCI_CAT_REQ 0 132 #define CXL_CCI_CAT_RSP 1 133 uint8_t tag; 134 uint8_t resv1; 135 uint8_t command; 136 uint8_t command_set; 137 uint8_t pl_length[3]; 138 uint16_t rc; 139 uint16_t vendor_specific; 140 uint8_t payload[]; 141 } QEMU_PACKED CXLCCIMessage; 142 143 /* This command is only defined to an MLD FM Owned LD or an MHD */ 144 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 145 uint8_t *payload_in, 146 size_t len_in, 147 uint8_t *payload_out, 148 size_t *len_out, 149 CXLCCI *cci) 150 { 151 PCIDevice *tunnel_target; 152 CXLCCI *target_cci; 153 struct { 154 uint8_t port_or_ld_id; 155 uint8_t target_type; 156 uint16_t size; 157 CXLCCIMessage ccimessage; 158 } QEMU_PACKED *in; 159 struct { 160 uint16_t resp_len; 161 uint8_t resv[2]; 162 CXLCCIMessage ccimessage; 163 } QEMU_PACKED *out; 164 size_t pl_length, length_out; 165 bool bg_started; 166 int rc; 167 168 if (cmd->in < sizeof(*in)) { 169 return CXL_MBOX_INVALID_INPUT; 170 } 171 in = (void *)payload_in; 172 out = (void *)payload_out; 173 174 if (len_in < sizeof(*in)) { 175 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 176 } 177 /* Enough room for minimum sized message - no payload */ 178 if (in->size < sizeof(in->ccimessage)) { 179 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 180 } 181 /* Length of input payload should be in->size + a wrapping tunnel header */ 182 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 183 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 184 } 185 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 186 return CXL_MBOX_INVALID_INPUT; 187 } 188 189 if (in->target_type != 0) { 190 qemu_log_mask(LOG_UNIMP, 191 "Tunneled Command sent to non existent FM-LD"); 192 return CXL_MBOX_INVALID_INPUT; 193 } 194 195 /* 196 * Target of a tunnel unfortunately depends on type of CCI readint 197 * the message. 198 * If in a switch, then it's the port number. 199 * If in an MLD it is the ld number. 200 * If in an MHD target type indicate where we are going. 201 */ 202 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 203 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 204 if (in->port_or_ld_id != 0) { 205 /* Only pretending to have one for now! */ 206 return CXL_MBOX_INVALID_INPUT; 207 } 208 target_cci = &ct3d->ld0_cci; 209 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 210 CXLUpstreamPort *usp = CXL_USP(cci->d); 211 212 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 213 in->port_or_ld_id); 214 if (!tunnel_target) { 215 return CXL_MBOX_INVALID_INPUT; 216 } 217 tunnel_target = 218 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 219 if (!tunnel_target) { 220 return CXL_MBOX_INVALID_INPUT; 221 } 222 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 223 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 224 /* Tunneled VDMs always land on FM Owned LD */ 225 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 226 } else { 227 return CXL_MBOX_INVALID_INPUT; 228 } 229 } else { 230 return CXL_MBOX_INVALID_INPUT; 231 } 232 233 pl_length = in->ccimessage.pl_length[2] << 16 | 234 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 235 rc = cxl_process_cci_message(target_cci, 236 in->ccimessage.command_set, 237 in->ccimessage.command, 238 pl_length, in->ccimessage.payload, 239 &length_out, out->ccimessage.payload, 240 &bg_started); 241 /* Payload should be in place. Rest of CCI header and needs filling */ 242 out->resp_len = length_out + sizeof(CXLCCIMessage); 243 st24_le_p(out->ccimessage.pl_length, length_out); 244 out->ccimessage.rc = rc; 245 out->ccimessage.category = CXL_CCI_CAT_RSP; 246 out->ccimessage.command = in->ccimessage.command; 247 out->ccimessage.command_set = in->ccimessage.command_set; 248 out->ccimessage.tag = in->ccimessage.tag; 249 *len_out = length_out + sizeof(*out); 250 251 return CXL_MBOX_SUCCESS; 252 } 253 254 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 255 uint8_t *payload_in, size_t len_in, 256 uint8_t *payload_out, size_t *len_out, 257 CXLCCI *cci) 258 { 259 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 260 CXLGetEventPayload *pl; 261 uint8_t log_type; 262 int max_recs; 263 264 if (cmd->in < sizeof(log_type)) { 265 return CXL_MBOX_INVALID_INPUT; 266 } 267 268 log_type = payload_in[0]; 269 270 pl = (CXLGetEventPayload *)payload_out; 271 272 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 273 CXL_EVENT_RECORD_SIZE; 274 if (max_recs > 0xFFFF) { 275 max_recs = 0xFFFF; 276 } 277 278 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 279 } 280 281 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 282 uint8_t *payload_in, 283 size_t len_in, 284 uint8_t *payload_out, 285 size_t *len_out, 286 CXLCCI *cci) 287 { 288 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 289 CXLClearEventPayload *pl; 290 291 pl = (CXLClearEventPayload *)payload_in; 292 293 if (len_in < sizeof(*pl) || 294 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { 295 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 296 } 297 298 *len_out = 0; 299 return cxl_event_clear_records(cxlds, pl); 300 } 301 302 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 303 uint8_t *payload_in, 304 size_t len_in, 305 uint8_t *payload_out, 306 size_t *len_out, 307 CXLCCI *cci) 308 { 309 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 310 CXLEventInterruptPolicy *policy; 311 CXLEventLog *log; 312 313 policy = (CXLEventInterruptPolicy *)payload_out; 314 315 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 316 if (log->irq_enabled) { 317 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 318 } 319 320 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 321 if (log->irq_enabled) { 322 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 323 } 324 325 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 326 if (log->irq_enabled) { 327 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 328 } 329 330 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 331 if (log->irq_enabled) { 332 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 333 } 334 335 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 336 if (log->irq_enabled) { 337 /* Dynamic Capacity borrows the same vector as info */ 338 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 339 } 340 341 *len_out = sizeof(*policy); 342 return CXL_MBOX_SUCCESS; 343 } 344 345 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 346 uint8_t *payload_in, 347 size_t len_in, 348 uint8_t *payload_out, 349 size_t *len_out, 350 CXLCCI *cci) 351 { 352 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 353 CXLEventInterruptPolicy *policy; 354 CXLEventLog *log; 355 356 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 357 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 358 } 359 360 policy = (CXLEventInterruptPolicy *)payload_in; 361 362 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 363 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 364 CXL_INT_MSI_MSIX; 365 366 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 367 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 368 CXL_INT_MSI_MSIX; 369 370 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 371 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 372 CXL_INT_MSI_MSIX; 373 374 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 375 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 376 CXL_INT_MSI_MSIX; 377 378 /* DCD is optional */ 379 if (len_in < sizeof(*policy)) { 380 return CXL_MBOX_SUCCESS; 381 } 382 383 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 384 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 385 CXL_INT_MSI_MSIX; 386 387 *len_out = 0; 388 return CXL_MBOX_SUCCESS; 389 } 390 391 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 392 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 393 uint8_t *payload_in, 394 size_t len_in, 395 uint8_t *payload_out, 396 size_t *len_out, 397 CXLCCI *cci) 398 { 399 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 400 struct { 401 uint16_t pcie_vid; 402 uint16_t pcie_did; 403 uint16_t pcie_subsys_vid; 404 uint16_t pcie_subsys_id; 405 uint64_t sn; 406 uint8_t max_message_size; 407 uint8_t component_type; 408 } QEMU_PACKED *is_identify; 409 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 410 411 is_identify = (void *)payload_out; 412 is_identify->pcie_vid = class->vendor_id; 413 is_identify->pcie_did = class->device_id; 414 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 415 is_identify->sn = CXL_USP(cci->d)->sn; 416 /* Subsystem info not defined for a USP */ 417 is_identify->pcie_subsys_vid = 0; 418 is_identify->pcie_subsys_id = 0; 419 is_identify->component_type = 0x0; /* Switch */ 420 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 421 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 422 423 is_identify->sn = CXL_TYPE3(cci->d)->sn; 424 /* 425 * We can't always use class->subsystem_vendor_id as 426 * it is not set if the defaults are used. 427 */ 428 is_identify->pcie_subsys_vid = 429 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 430 is_identify->pcie_subsys_id = 431 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 432 is_identify->component_type = 0x3; /* Type 3 */ 433 } 434 435 is_identify->max_message_size = (uint8_t)log2(cci->payload_max); 436 *len_out = sizeof(*is_identify); 437 return CXL_MBOX_SUCCESS; 438 } 439 440 /* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */ 441 static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd, 442 uint8_t *payload_in, 443 size_t len_in, 444 uint8_t *payload_out, 445 size_t *len_out, 446 CXLCCI *cci) 447 { 448 struct { 449 uint8_t rsp_limit; 450 } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out; 451 QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1); 452 453 get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max); 454 455 *len_out = sizeof(*get_rsp_msg_limit); 456 return CXL_MBOX_SUCCESS; 457 } 458 459 /* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */ 460 static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd, 461 uint8_t *payload_in, 462 size_t len_in, 463 uint8_t *payload_out, 464 size_t *len_out, 465 CXLCCI *cci) 466 { 467 struct { 468 uint8_t rsp_limit; 469 } QEMU_PACKED *in = (void *)payload_in; 470 QEMU_BUILD_BUG_ON(sizeof(*in) != 1); 471 struct { 472 uint8_t rsp_limit; 473 } QEMU_PACKED *out = (void *)payload_out; 474 QEMU_BUILD_BUG_ON(sizeof(*out) != 1); 475 476 if (in->rsp_limit < 8 || in->rsp_limit > 10) { 477 return CXL_MBOX_INVALID_INPUT; 478 } 479 480 cci->payload_max = 1 << in->rsp_limit; 481 out->rsp_limit = in->rsp_limit; 482 483 *len_out = sizeof(*out); 484 return CXL_MBOX_SUCCESS; 485 } 486 487 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 488 void *private) 489 { 490 uint8_t *bm = private; 491 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 492 uint8_t port = PCIE_PORT(d)->port; 493 bm[port / 8] |= 1 << (port % 8); 494 } 495 } 496 497 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 498 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 499 uint8_t *payload_in, 500 size_t len_in, 501 uint8_t *payload_out, 502 size_t *len_out, 503 CXLCCI *cci) 504 { 505 PCIEPort *usp = PCIE_PORT(cci->d); 506 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 507 int num_phys_ports = pcie_count_ds_ports(bus); 508 509 struct cxl_fmapi_ident_switch_dev_resp_pl { 510 uint8_t ingress_port_id; 511 uint8_t rsvd; 512 uint8_t num_physical_ports; 513 uint8_t num_vcss; 514 uint8_t active_port_bitmask[0x20]; 515 uint8_t active_vcs_bitmask[0x20]; 516 uint16_t total_vppbs; 517 uint16_t bound_vppbs; 518 uint8_t num_hdm_decoders_per_usp; 519 } QEMU_PACKED *out; 520 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 521 522 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 523 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 524 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 525 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 526 .active_vcs_bitmask[0] = 0x1, 527 .total_vppbs = num_phys_ports + 1, 528 .bound_vppbs = num_phys_ports + 1, 529 .num_hdm_decoders_per_usp = 4, 530 }; 531 532 /* Depends on the CCI type */ 533 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 534 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 535 } else { 536 /* MCTP? */ 537 out->ingress_port_id = 0; 538 } 539 540 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 541 out->active_port_bitmask); 542 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 543 544 *len_out = sizeof(*out); 545 546 return CXL_MBOX_SUCCESS; 547 } 548 549 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 550 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 551 uint8_t *payload_in, 552 size_t len_in, 553 uint8_t *payload_out, 554 size_t *len_out, 555 CXLCCI *cci) 556 { 557 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 558 struct cxl_fmapi_get_phys_port_state_req_pl { 559 uint8_t num_ports; 560 uint8_t ports[]; 561 } QEMU_PACKED *in; 562 563 /* 564 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 565 * Format 566 */ 567 struct cxl_fmapi_port_state_info_block { 568 uint8_t port_id; 569 uint8_t config_state; 570 uint8_t connected_device_cxl_version; 571 uint8_t rsv1; 572 uint8_t connected_device_type; 573 uint8_t port_cxl_version_bitmask; 574 uint8_t max_link_width; 575 uint8_t negotiated_link_width; 576 uint8_t supported_link_speeds_vector; 577 uint8_t max_link_speed; 578 uint8_t current_link_speed; 579 uint8_t ltssm_state; 580 uint8_t first_lane_num; 581 uint16_t link_state; 582 uint8_t supported_ld_count; 583 } QEMU_PACKED; 584 585 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 586 struct cxl_fmapi_get_phys_port_state_resp_pl { 587 uint8_t num_ports; 588 uint8_t rsv1[3]; 589 struct cxl_fmapi_port_state_info_block ports[]; 590 } QEMU_PACKED *out; 591 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 592 PCIEPort *usp = PCIE_PORT(cci->d); 593 size_t pl_size; 594 int i; 595 596 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 597 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 598 599 if (len_in < sizeof(*in)) { 600 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 601 } 602 /* Check if what was requested can fit */ 603 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 604 return CXL_MBOX_INVALID_INPUT; 605 } 606 607 /* For success there should be a match for each requested */ 608 out->num_ports = in->num_ports; 609 610 for (i = 0; i < in->num_ports; i++) { 611 struct cxl_fmapi_port_state_info_block *port; 612 /* First try to match on downstream port */ 613 PCIDevice *port_dev; 614 uint16_t lnkcap, lnkcap2, lnksta; 615 616 port = &out->ports[i]; 617 618 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 619 if (port_dev) { /* DSP */ 620 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 621 ->devices[0]; 622 port->config_state = 3; 623 if (ds_dev) { 624 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 625 port->connected_device_type = 5; /* Assume MLD for now */ 626 } else { 627 port->connected_device_type = 1; 628 } 629 } else { 630 port->connected_device_type = 0; 631 } 632 port->supported_ld_count = 3; 633 } else if (usp->port == in->ports[i]) { /* USP */ 634 port_dev = PCI_DEVICE(usp); 635 port->config_state = 4; 636 port->connected_device_type = 0; 637 } else { 638 return CXL_MBOX_INVALID_INPUT; 639 } 640 641 port->port_id = in->ports[i]; 642 /* Information on status of this port in lnksta, lnkcap */ 643 if (!port_dev->exp.exp_cap) { 644 return CXL_MBOX_INTERNAL_ERROR; 645 } 646 lnksta = port_dev->config_read(port_dev, 647 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 648 sizeof(lnksta)); 649 lnkcap = port_dev->config_read(port_dev, 650 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 651 sizeof(lnkcap)); 652 lnkcap2 = port_dev->config_read(port_dev, 653 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 654 sizeof(lnkcap2)); 655 656 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 657 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 658 /* No definition for SLS field in linux/pci_regs.h */ 659 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 660 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 661 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 662 /* TODO: Track down if we can get the rest of the info */ 663 port->ltssm_state = 0x7; 664 port->first_lane_num = 0; 665 port->link_state = 0; 666 port->port_cxl_version_bitmask = 0x2; 667 port->connected_device_cxl_version = 0x2; 668 } 669 670 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 671 *len_out = pl_size; 672 673 return CXL_MBOX_SUCCESS; 674 } 675 676 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 677 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 678 uint8_t *payload_in, 679 size_t len_in, 680 uint8_t *payload_out, 681 size_t *len_out, 682 CXLCCI *cci) 683 { 684 struct { 685 uint8_t status; 686 uint8_t rsvd; 687 uint16_t opcode; 688 uint16_t returncode; 689 uint16_t vendor_ext_status; 690 } QEMU_PACKED *bg_op_status; 691 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 692 693 bg_op_status = (void *)payload_out; 694 bg_op_status->status = cci->bg.complete_pct << 1; 695 if (cci->bg.runtime > 0) { 696 bg_op_status->status |= 1U << 0; 697 } 698 bg_op_status->opcode = cci->bg.opcode; 699 bg_op_status->returncode = cci->bg.ret_code; 700 *len_out = sizeof(*bg_op_status); 701 702 return CXL_MBOX_SUCCESS; 703 } 704 705 /* 706 * CXL r3.1 Section 8.2.9.1.5: 707 * Request Abort Background Operation (Opcode 0005h) 708 */ 709 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd, 710 uint8_t *payload_in, 711 size_t len_in, 712 uint8_t *payload_out, 713 size_t *len_out, 714 CXLCCI *cci) 715 { 716 int bg_set = cci->bg.opcode >> 8; 717 int bg_cmd = cci->bg.opcode & 0xff; 718 const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd]; 719 720 if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) { 721 return CXL_MBOX_REQUEST_ABORT_NOTSUP; 722 } 723 724 qemu_mutex_lock(&cci->bg.lock); 725 if (cci->bg.runtime) { 726 /* operation is near complete, let it finish */ 727 if (cci->bg.complete_pct < 85) { 728 timer_del(cci->bg.timer); 729 cci->bg.ret_code = CXL_MBOX_ABORTED; 730 cci->bg.starttime = 0; 731 cci->bg.runtime = 0; 732 cci->bg.aborted = true; 733 } 734 } 735 qemu_mutex_unlock(&cci->bg.lock); 736 737 return CXL_MBOX_SUCCESS; 738 } 739 740 #define CXL_FW_SLOTS 2 741 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 742 743 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 744 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 745 uint8_t *payload_in, 746 size_t len, 747 uint8_t *payload_out, 748 size_t *len_out, 749 CXLCCI *cci) 750 { 751 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 752 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 753 struct { 754 uint8_t slots_supported; 755 uint8_t slot_info; 756 uint8_t caps; 757 uint8_t rsvd[0xd]; 758 char fw_rev1[0x10]; 759 char fw_rev2[0x10]; 760 char fw_rev3[0x10]; 761 char fw_rev4[0x10]; 762 } QEMU_PACKED *fw_info; 763 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 764 765 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || 766 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || 767 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { 768 return CXL_MBOX_INTERNAL_ERROR; 769 } 770 771 fw_info = (void *)payload_out; 772 773 fw_info->slots_supported = CXL_FW_SLOTS; 774 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 775 ((cci->fw.staged_slot & 0x7) << 3); 776 fw_info->caps = BIT(0); /* online update supported */ 777 778 if (cci->fw.slot[0]) { 779 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 780 } 781 if (cci->fw.slot[1]) { 782 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 783 } 784 785 *len_out = sizeof(*fw_info); 786 return CXL_MBOX_SUCCESS; 787 } 788 789 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 790 #define CXL_FW_XFER_ALIGNMENT 128 791 792 #define CXL_FW_XFER_ACTION_FULL 0x0 793 #define CXL_FW_XFER_ACTION_INIT 0x1 794 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 795 #define CXL_FW_XFER_ACTION_END 0x3 796 #define CXL_FW_XFER_ACTION_ABORT 0x4 797 798 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 799 uint8_t *payload_in, 800 size_t len, 801 uint8_t *payload_out, 802 size_t *len_out, 803 CXLCCI *cci) 804 { 805 struct { 806 uint8_t action; 807 uint8_t slot; 808 uint8_t rsvd1[2]; 809 uint32_t offset; 810 uint8_t rsvd2[0x78]; 811 uint8_t data[]; 812 } QEMU_PACKED *fw_transfer = (void *)payload_in; 813 size_t offset, length; 814 815 if (len < sizeof(*fw_transfer)) { 816 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 817 } 818 819 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 820 /* 821 * At this point there aren't any on-going transfers 822 * running in the bg - this is serialized before this 823 * call altogether. Just mark the state machine and 824 * disregard any other input. 825 */ 826 cci->fw.transferring = false; 827 return CXL_MBOX_SUCCESS; 828 } 829 830 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 831 length = len - sizeof(*fw_transfer); 832 if (offset + length > CXL_FW_SIZE) { 833 return CXL_MBOX_INVALID_INPUT; 834 } 835 836 if (cci->fw.transferring) { 837 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 838 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 839 return CXL_MBOX_FW_XFER_IN_PROGRESS; 840 } 841 /* 842 * Abort partitioned package transfer if over 30 secs 843 * between parts. As opposed to the explicit ABORT action, 844 * semantically treat this condition as an error - as 845 * if a part action were passed without a previous INIT. 846 */ 847 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 848 cci->fw.transferring = false; 849 return CXL_MBOX_INVALID_INPUT; 850 } 851 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 852 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 853 return CXL_MBOX_INVALID_INPUT; 854 } 855 856 /* allow back-to-back retransmission */ 857 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 858 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 859 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 860 /* verify no overlaps */ 861 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 862 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 863 } 864 } 865 866 switch (fw_transfer->action) { 867 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 868 case CXL_FW_XFER_ACTION_END: 869 if (fw_transfer->slot == 0 || 870 fw_transfer->slot == cci->fw.active_slot || 871 fw_transfer->slot > CXL_FW_SLOTS) { 872 return CXL_MBOX_FW_INVALID_SLOT; 873 } 874 875 /* mark the slot used upon bg completion */ 876 break; 877 case CXL_FW_XFER_ACTION_INIT: 878 if (offset != 0) { 879 return CXL_MBOX_INVALID_INPUT; 880 } 881 882 cci->fw.transferring = true; 883 cci->fw.prev_offset = offset; 884 cci->fw.prev_len = length; 885 break; 886 case CXL_FW_XFER_ACTION_CONTINUE: 887 cci->fw.prev_offset = offset; 888 cci->fw.prev_len = length; 889 break; 890 default: 891 return CXL_MBOX_INVALID_INPUT; 892 } 893 894 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 895 cci->bg.runtime = 10 * 1000UL; 896 } else { 897 cci->bg.runtime = 2 * 1000UL; 898 } 899 /* keep relevant context for bg completion */ 900 cci->fw.curr_action = fw_transfer->action; 901 cci->fw.curr_slot = fw_transfer->slot; 902 *len_out = 0; 903 904 return CXL_MBOX_BG_STARTED; 905 } 906 907 static void __do_firmware_xfer(CXLCCI *cci) 908 { 909 switch (cci->fw.curr_action) { 910 case CXL_FW_XFER_ACTION_FULL: 911 case CXL_FW_XFER_ACTION_END: 912 cci->fw.slot[cci->fw.curr_slot - 1] = true; 913 cci->fw.transferring = false; 914 break; 915 case CXL_FW_XFER_ACTION_INIT: 916 case CXL_FW_XFER_ACTION_CONTINUE: 917 time(&cci->fw.last_partxfer); 918 break; 919 default: 920 break; 921 } 922 } 923 924 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 925 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 926 uint8_t *payload_in, 927 size_t len, 928 uint8_t *payload_out, 929 size_t *len_out, 930 CXLCCI *cci) 931 { 932 struct { 933 uint8_t action; 934 uint8_t slot; 935 } QEMU_PACKED *fw_activate = (void *)payload_in; 936 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 937 938 if (fw_activate->slot == 0 || 939 fw_activate->slot == cci->fw.active_slot || 940 fw_activate->slot > CXL_FW_SLOTS) { 941 return CXL_MBOX_FW_INVALID_SLOT; 942 } 943 944 /* ensure that an actual fw package is there */ 945 if (!cci->fw.slot[fw_activate->slot - 1]) { 946 return CXL_MBOX_FW_INVALID_SLOT; 947 } 948 949 switch (fw_activate->action) { 950 case 0: /* online */ 951 cci->fw.active_slot = fw_activate->slot; 952 break; 953 case 1: /* reset */ 954 cci->fw.staged_slot = fw_activate->slot; 955 break; 956 default: 957 return CXL_MBOX_INVALID_INPUT; 958 } 959 960 return CXL_MBOX_SUCCESS; 961 } 962 963 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 964 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 965 uint8_t *payload_in, 966 size_t len_in, 967 uint8_t *payload_out, 968 size_t *len_out, 969 CXLCCI *cci) 970 { 971 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 972 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 973 974 stq_le_p(payload_out, final_time); 975 *len_out = 8; 976 977 return CXL_MBOX_SUCCESS; 978 } 979 980 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 981 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 982 uint8_t *payload_in, 983 size_t len_in, 984 uint8_t *payload_out, 985 size_t *len_out, 986 CXLCCI *cci) 987 { 988 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 989 990 cxl_dstate->timestamp.set = true; 991 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 992 993 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 994 995 *len_out = 0; 996 return CXL_MBOX_SUCCESS; 997 } 998 999 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 1000 static const QemuUUID cel_uuid = { 1001 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 1002 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 1003 }; 1004 1005 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 1006 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 1007 uint8_t *payload_in, 1008 size_t len_in, 1009 uint8_t *payload_out, 1010 size_t *len_out, 1011 CXLCCI *cci) 1012 { 1013 struct { 1014 uint16_t entries; 1015 uint8_t rsvd[6]; 1016 struct { 1017 QemuUUID uuid; 1018 uint32_t size; 1019 } log_entries[1]; 1020 } QEMU_PACKED *supported_logs = (void *)payload_out; 1021 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 1022 1023 supported_logs->entries = 1; 1024 supported_logs->log_entries[0].uuid = cel_uuid; 1025 supported_logs->log_entries[0].size = 4 * cci->cel_size; 1026 1027 *len_out = sizeof(*supported_logs); 1028 return CXL_MBOX_SUCCESS; 1029 } 1030 1031 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 1032 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 1033 uint8_t *payload_in, 1034 size_t len_in, 1035 uint8_t *payload_out, 1036 size_t *len_out, 1037 CXLCCI *cci) 1038 { 1039 struct { 1040 QemuUUID uuid; 1041 uint32_t offset; 1042 uint32_t length; 1043 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 1044 1045 get_log = (void *)payload_in; 1046 1047 if (get_log->length > cci->payload_max) { 1048 return CXL_MBOX_INVALID_INPUT; 1049 } 1050 1051 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 1052 return CXL_MBOX_INVALID_LOG; 1053 } 1054 1055 /* 1056 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 1057 * The device shall return Invalid Input if the Offset or Length 1058 * fields attempt to access beyond the size of the log as reported by Get 1059 * Supported Log. 1060 * 1061 * Only valid for there to be one entry per opcode, but the length + offset 1062 * may still be greater than that if the inputs are not valid and so access 1063 * beyond the end of cci->cel_log. 1064 */ 1065 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { 1066 return CXL_MBOX_INVALID_INPUT; 1067 } 1068 1069 /* Store off everything to local variables so we can wipe out the payload */ 1070 *len_out = get_log->length; 1071 1072 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 1073 1074 return CXL_MBOX_SUCCESS; 1075 } 1076 1077 /* CXL r3.1 section 8.2.9.6: Features */ 1078 /* 1079 * Get Supported Features output payload 1080 * CXL r3.1 section 8.2.9.6.1 Table 8-96 1081 */ 1082 typedef struct CXLSupportedFeatureHeader { 1083 uint16_t entries; 1084 uint16_t nsuppfeats_dev; 1085 uint32_t reserved; 1086 } QEMU_PACKED CXLSupportedFeatureHeader; 1087 1088 /* 1089 * Get Supported Features Supported Feature Entry 1090 * CXL r3.1 section 8.2.9.6.1 Table 8-97 1091 */ 1092 typedef struct CXLSupportedFeatureEntry { 1093 QemuUUID uuid; 1094 uint16_t feat_index; 1095 uint16_t get_feat_size; 1096 uint16_t set_feat_size; 1097 uint32_t attr_flags; 1098 uint8_t get_feat_version; 1099 uint8_t set_feat_version; 1100 uint16_t set_feat_effects; 1101 uint8_t rsvd[18]; 1102 } QEMU_PACKED CXLSupportedFeatureEntry; 1103 1104 /* 1105 * Get Supported Features Supported Feature Entry 1106 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 1107 */ 1108 /* Supported Feature Entry : attribute flags */ 1109 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 1110 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 1111 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 1112 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 1113 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 1114 1115 /* Supported Feature Entry : set feature effects */ 1116 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 1117 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 1118 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 1119 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 1120 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1121 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1122 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1123 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1124 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1125 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1126 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1127 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1128 1129 enum CXL_SUPPORTED_FEATURES_LIST { 1130 CXL_FEATURE_PATROL_SCRUB = 0, 1131 CXL_FEATURE_ECS, 1132 CXL_FEATURE_MAX 1133 }; 1134 1135 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1136 /* 1137 * Get Feature input payload 1138 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1139 */ 1140 /* Get Feature : Payload in selection */ 1141 enum CXL_GET_FEATURE_SELECTION { 1142 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1143 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1144 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1145 CXL_GET_FEATURE_SEL_MAX 1146 }; 1147 1148 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1149 /* 1150 * Set Feature input payload 1151 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1152 */ 1153 typedef struct CXLSetFeatureInHeader { 1154 QemuUUID uuid; 1155 uint32_t flags; 1156 uint16_t offset; 1157 uint8_t version; 1158 uint8_t rsvd[9]; 1159 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1160 1161 /* Set Feature : Payload in flags */ 1162 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1163 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1164 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1165 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1166 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1167 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1168 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1169 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1170 }; 1171 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1172 1173 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1174 static const QemuUUID patrol_scrub_uuid = { 1175 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1176 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1177 }; 1178 1179 typedef struct CXLMemPatrolScrubSetFeature { 1180 CXLSetFeatureInHeader hdr; 1181 CXLMemPatrolScrubWriteAttrs feat_data; 1182 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1183 1184 /* 1185 * CXL r3.1 section 8.2.9.9.11.2: 1186 * DDR5 Error Check Scrub (ECS) Control Feature 1187 */ 1188 static const QemuUUID ecs_uuid = { 1189 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1190 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1191 }; 1192 1193 typedef struct CXLMemECSSetFeature { 1194 CXLSetFeatureInHeader hdr; 1195 CXLMemECSWriteAttrs feat_data[]; 1196 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1197 1198 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1199 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1200 uint8_t *payload_in, 1201 size_t len_in, 1202 uint8_t *payload_out, 1203 size_t *len_out, 1204 CXLCCI *cci) 1205 { 1206 struct { 1207 uint32_t count; 1208 uint16_t start_index; 1209 uint16_t reserved; 1210 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1211 1212 struct { 1213 CXLSupportedFeatureHeader hdr; 1214 CXLSupportedFeatureEntry feat_entries[]; 1215 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1216 uint16_t index, req_entries; 1217 uint16_t entry; 1218 1219 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1220 return CXL_MBOX_UNSUPPORTED; 1221 } 1222 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1223 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1224 return CXL_MBOX_INVALID_INPUT; 1225 } 1226 1227 req_entries = (get_feats_in->count - 1228 sizeof(CXLSupportedFeatureHeader)) / 1229 sizeof(CXLSupportedFeatureEntry); 1230 req_entries = MIN(req_entries, 1231 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1232 1233 for (entry = 0, index = get_feats_in->start_index; 1234 entry < req_entries; index++) { 1235 switch (index) { 1236 case CXL_FEATURE_PATROL_SCRUB: 1237 /* Fill supported feature entry for device patrol scrub control */ 1238 get_feats_out->feat_entries[entry++] = 1239 (struct CXLSupportedFeatureEntry) { 1240 .uuid = patrol_scrub_uuid, 1241 .feat_index = index, 1242 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1243 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1244 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1245 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1246 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1247 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1248 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1249 }; 1250 break; 1251 case CXL_FEATURE_ECS: 1252 /* Fill supported feature entry for device DDR5 ECS control */ 1253 get_feats_out->feat_entries[entry++] = 1254 (struct CXLSupportedFeatureEntry) { 1255 .uuid = ecs_uuid, 1256 .feat_index = index, 1257 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1258 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1259 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1260 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1261 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1262 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1263 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1264 }; 1265 break; 1266 default: 1267 __builtin_unreachable(); 1268 } 1269 } 1270 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1271 get_feats_out->hdr.entries = req_entries; 1272 *len_out = sizeof(CXLSupportedFeatureHeader) + 1273 req_entries * sizeof(CXLSupportedFeatureEntry); 1274 1275 return CXL_MBOX_SUCCESS; 1276 } 1277 1278 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1279 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1280 uint8_t *payload_in, 1281 size_t len_in, 1282 uint8_t *payload_out, 1283 size_t *len_out, 1284 CXLCCI *cci) 1285 { 1286 struct { 1287 QemuUUID uuid; 1288 uint16_t offset; 1289 uint16_t count; 1290 uint8_t selection; 1291 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1292 uint16_t bytes_to_copy = 0; 1293 CXLType3Dev *ct3d; 1294 CXLSetFeatureInfo *set_feat_info; 1295 1296 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1297 return CXL_MBOX_UNSUPPORTED; 1298 } 1299 1300 ct3d = CXL_TYPE3(cci->d); 1301 get_feature = (void *)payload_in; 1302 1303 set_feat_info = &ct3d->set_feat_info; 1304 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1305 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1306 } 1307 1308 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1309 return CXL_MBOX_UNSUPPORTED; 1310 } 1311 if (get_feature->offset + get_feature->count > cci->payload_max) { 1312 return CXL_MBOX_INVALID_INPUT; 1313 } 1314 1315 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1316 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1317 return CXL_MBOX_INVALID_INPUT; 1318 } 1319 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1320 get_feature->offset; 1321 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1322 memcpy(payload_out, 1323 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1324 bytes_to_copy); 1325 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1326 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1327 return CXL_MBOX_INVALID_INPUT; 1328 } 1329 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1330 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1331 memcpy(payload_out, 1332 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1333 bytes_to_copy); 1334 } else { 1335 return CXL_MBOX_UNSUPPORTED; 1336 } 1337 1338 *len_out = bytes_to_copy; 1339 1340 return CXL_MBOX_SUCCESS; 1341 } 1342 1343 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1344 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1345 uint8_t *payload_in, 1346 size_t len_in, 1347 uint8_t *payload_out, 1348 size_t *len_out, 1349 CXLCCI *cci) 1350 { 1351 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1352 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1353 CXLMemPatrolScrubSetFeature *ps_set_feature; 1354 CXLMemECSWriteAttrs *ecs_write_attrs; 1355 CXLMemECSSetFeature *ecs_set_feature; 1356 CXLSetFeatureInfo *set_feat_info; 1357 uint16_t bytes_to_copy = 0; 1358 uint8_t data_transfer_flag; 1359 CXLType3Dev *ct3d; 1360 uint16_t count; 1361 1362 if (len_in < sizeof(*hdr)) { 1363 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1364 } 1365 1366 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1367 return CXL_MBOX_UNSUPPORTED; 1368 } 1369 ct3d = CXL_TYPE3(cci->d); 1370 set_feat_info = &ct3d->set_feat_info; 1371 1372 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1373 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1374 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1375 } 1376 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1377 set_feat_info->data_saved_across_reset = true; 1378 } else { 1379 set_feat_info->data_saved_across_reset = false; 1380 } 1381 1382 data_transfer_flag = 1383 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1384 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1385 set_feat_info->uuid = hdr->uuid; 1386 set_feat_info->data_size = 0; 1387 } 1388 set_feat_info->data_transfer_flag = data_transfer_flag; 1389 set_feat_info->data_offset = hdr->offset; 1390 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1391 1392 if (bytes_to_copy == 0) { 1393 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1394 } 1395 1396 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1397 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1398 return CXL_MBOX_UNSUPPORTED; 1399 } 1400 1401 ps_set_feature = (void *)payload_in; 1402 ps_write_attrs = &ps_set_feature->feat_data; 1403 1404 if ((uint32_t)hdr->offset + bytes_to_copy > 1405 sizeof(ct3d->patrol_scrub_wr_attrs)) { 1406 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1407 } 1408 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1409 ps_write_attrs, 1410 bytes_to_copy); 1411 set_feat_info->data_size += bytes_to_copy; 1412 1413 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1414 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1415 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1416 ct3d->patrol_scrub_attrs.scrub_cycle |= 1417 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1418 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1419 ct3d->patrol_scrub_attrs.scrub_flags |= 1420 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1421 } 1422 } else if (qemu_uuid_is_equal(&hdr->uuid, 1423 &ecs_uuid)) { 1424 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1425 return CXL_MBOX_UNSUPPORTED; 1426 } 1427 1428 ecs_set_feature = (void *)payload_in; 1429 ecs_write_attrs = ecs_set_feature->feat_data; 1430 1431 if ((uint32_t)hdr->offset + bytes_to_copy > 1432 sizeof(ct3d->ecs_wr_attrs)) { 1433 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1434 } 1435 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1436 ecs_write_attrs, 1437 bytes_to_copy); 1438 set_feat_info->data_size += bytes_to_copy; 1439 1440 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1441 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1442 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1443 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1444 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1445 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1446 } 1447 } 1448 } else { 1449 return CXL_MBOX_UNSUPPORTED; 1450 } 1451 1452 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1453 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1454 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1455 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1456 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1457 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1458 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1459 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1460 } 1461 set_feat_info->data_transfer_flag = 0; 1462 set_feat_info->data_saved_across_reset = false; 1463 set_feat_info->data_offset = 0; 1464 set_feat_info->data_size = 0; 1465 } 1466 1467 return CXL_MBOX_SUCCESS; 1468 } 1469 1470 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1471 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1472 uint8_t *payload_in, 1473 size_t len_in, 1474 uint8_t *payload_out, 1475 size_t *len_out, 1476 CXLCCI *cci) 1477 { 1478 struct { 1479 char fw_revision[0x10]; 1480 uint64_t total_capacity; 1481 uint64_t volatile_capacity; 1482 uint64_t persistent_capacity; 1483 uint64_t partition_align; 1484 uint16_t info_event_log_size; 1485 uint16_t warning_event_log_size; 1486 uint16_t failure_event_log_size; 1487 uint16_t fatal_event_log_size; 1488 uint32_t lsa_size; 1489 uint8_t poison_list_max_mer[3]; 1490 uint16_t inject_poison_limit; 1491 uint8_t poison_caps; 1492 uint8_t qos_telemetry_caps; 1493 uint16_t dc_event_log_size; 1494 } QEMU_PACKED *id; 1495 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1496 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1497 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1498 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1499 1500 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1501 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1502 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1503 return CXL_MBOX_INTERNAL_ERROR; 1504 } 1505 1506 id = (void *)payload_out; 1507 1508 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1509 1510 stq_le_p(&id->total_capacity, 1511 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1512 stq_le_p(&id->persistent_capacity, 1513 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1514 stq_le_p(&id->volatile_capacity, 1515 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1516 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1517 /* 256 poison records */ 1518 st24_le_p(id->poison_list_max_mer, 256); 1519 /* No limit - so limited by main poison record limit */ 1520 stw_le_p(&id->inject_poison_limit, 0); 1521 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1522 1523 *len_out = sizeof(*id); 1524 return CXL_MBOX_SUCCESS; 1525 } 1526 1527 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1528 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1529 uint8_t *payload_in, 1530 size_t len_in, 1531 uint8_t *payload_out, 1532 size_t *len_out, 1533 CXLCCI *cci) 1534 { 1535 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1536 struct { 1537 uint64_t active_vmem; 1538 uint64_t active_pmem; 1539 uint64_t next_vmem; 1540 uint64_t next_pmem; 1541 } QEMU_PACKED *part_info = (void *)payload_out; 1542 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1543 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1544 1545 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1546 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1547 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1548 return CXL_MBOX_INTERNAL_ERROR; 1549 } 1550 1551 stq_le_p(&part_info->active_vmem, 1552 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1553 /* 1554 * When both next_vmem and next_pmem are 0, there is no pending change to 1555 * partitioning. 1556 */ 1557 stq_le_p(&part_info->next_vmem, 0); 1558 stq_le_p(&part_info->active_pmem, 1559 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1560 stq_le_p(&part_info->next_pmem, 0); 1561 1562 *len_out = sizeof(*part_info); 1563 return CXL_MBOX_SUCCESS; 1564 } 1565 1566 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1567 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1568 uint8_t *payload_in, 1569 size_t len_in, 1570 uint8_t *payload_out, 1571 size_t *len_out, 1572 CXLCCI *cci) 1573 { 1574 struct { 1575 uint32_t offset; 1576 uint32_t length; 1577 } QEMU_PACKED *get_lsa; 1578 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1579 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1580 uint64_t offset, length; 1581 1582 get_lsa = (void *)payload_in; 1583 offset = get_lsa->offset; 1584 length = get_lsa->length; 1585 1586 if (offset + length > cvc->get_lsa_size(ct3d)) { 1587 *len_out = 0; 1588 return CXL_MBOX_INVALID_INPUT; 1589 } 1590 1591 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1592 return CXL_MBOX_SUCCESS; 1593 } 1594 1595 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1596 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1597 uint8_t *payload_in, 1598 size_t len_in, 1599 uint8_t *payload_out, 1600 size_t *len_out, 1601 CXLCCI *cci) 1602 { 1603 struct set_lsa_pl { 1604 uint32_t offset; 1605 uint32_t rsvd; 1606 uint8_t data[]; 1607 } QEMU_PACKED; 1608 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1609 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1610 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1611 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1612 1613 *len_out = 0; 1614 if (len_in < hdr_len) { 1615 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1616 } 1617 1618 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1619 return CXL_MBOX_INVALID_INPUT; 1620 } 1621 len_in -= hdr_len; 1622 1623 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1624 return CXL_MBOX_SUCCESS; 1625 } 1626 1627 /* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */ 1628 static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd, 1629 uint8_t *payload_in, 1630 size_t len_in, 1631 uint8_t *payload_out, 1632 size_t *len_out, 1633 CXLCCI *cci) 1634 { 1635 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1636 CXLAlertConfig *out = (CXLAlertConfig *)payload_out; 1637 1638 memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config)); 1639 *len_out = sizeof(ct3d->alert_config); 1640 1641 return CXL_MBOX_SUCCESS; 1642 } 1643 1644 /* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */ 1645 static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd, 1646 uint8_t *payload_in, 1647 size_t len_in, 1648 uint8_t *payload_out, 1649 size_t *len_out, 1650 CXLCCI *cci) 1651 { 1652 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1653 CXLAlertConfig *alert_config = &ct3d->alert_config; 1654 struct { 1655 uint8_t valid_alert_actions; 1656 uint8_t enable_alert_actions; 1657 uint8_t life_used_warn_thresh; 1658 uint8_t rsvd; 1659 uint16_t over_temp_warn_thresh; 1660 uint16_t under_temp_warn_thresh; 1661 uint16_t cor_vmem_err_warn_thresh; 1662 uint16_t cor_pmem_err_warn_thresh; 1663 } QEMU_PACKED *in = (void *)payload_in; 1664 1665 if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) { 1666 /* 1667 * CXL r3.2 Table 8-149 The life used warning threshold shall be 1668 * less than the life used critical alert value. 1669 */ 1670 if (in->life_used_warn_thresh >= 1671 alert_config->life_used_crit_alert_thresh) { 1672 return CXL_MBOX_INVALID_INPUT; 1673 } 1674 alert_config->life_used_warn_thresh = in->life_used_warn_thresh; 1675 alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH; 1676 } 1677 1678 if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) { 1679 /* 1680 * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold 1681 * shall be less than the the Device Over-Temperature Critical 1682 * Alert Threshold. 1683 */ 1684 if (in->over_temp_warn_thresh >= 1685 alert_config->over_temp_crit_alert_thresh) { 1686 return CXL_MBOX_INVALID_INPUT; 1687 } 1688 alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh; 1689 alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH; 1690 } 1691 1692 if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) { 1693 /* 1694 * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold 1695 * shall be higher than the the Device Under-Temperature Critical 1696 * Alert Threshold. 1697 */ 1698 if (in->under_temp_warn_thresh <= 1699 alert_config->under_temp_crit_alert_thresh) { 1700 return CXL_MBOX_INVALID_INPUT; 1701 } 1702 alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh; 1703 alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH; 1704 } 1705 1706 if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) { 1707 alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh; 1708 alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH; 1709 } 1710 1711 if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) { 1712 alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh; 1713 alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH; 1714 } 1715 return CXL_MBOX_SUCCESS; 1716 } 1717 1718 /* Perform the actual device zeroing */ 1719 static void __do_sanitization(CXLType3Dev *ct3d) 1720 { 1721 MemoryRegion *mr; 1722 1723 if (ct3d->hostvmem) { 1724 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1725 if (mr) { 1726 void *hostmem = memory_region_get_ram_ptr(mr); 1727 memset(hostmem, 0, memory_region_size(mr)); 1728 } 1729 } 1730 1731 if (ct3d->hostpmem) { 1732 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1733 if (mr) { 1734 void *hostmem = memory_region_get_ram_ptr(mr); 1735 memset(hostmem, 0, memory_region_size(mr)); 1736 } 1737 } 1738 if (ct3d->lsa) { 1739 mr = host_memory_backend_get_memory(ct3d->lsa); 1740 if (mr) { 1741 void *lsa = memory_region_get_ram_ptr(mr); 1742 memset(lsa, 0, memory_region_size(mr)); 1743 } 1744 } 1745 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1746 } 1747 1748 static int get_sanitize_duration(uint64_t total_mem) 1749 { 1750 int secs = 0; 1751 1752 if (total_mem <= 512) { 1753 secs = 4; 1754 } else if (total_mem <= 1024) { 1755 secs = 8; 1756 } else if (total_mem <= 2 * 1024) { 1757 secs = 15; 1758 } else if (total_mem <= 4 * 1024) { 1759 secs = 30; 1760 } else if (total_mem <= 8 * 1024) { 1761 secs = 60; 1762 } else if (total_mem <= 16 * 1024) { 1763 secs = 2 * 60; 1764 } else if (total_mem <= 32 * 1024) { 1765 secs = 4 * 60; 1766 } else if (total_mem <= 64 * 1024) { 1767 secs = 8 * 60; 1768 } else if (total_mem <= 128 * 1024) { 1769 secs = 15 * 60; 1770 } else if (total_mem <= 256 * 1024) { 1771 secs = 30 * 60; 1772 } else if (total_mem <= 512 * 1024) { 1773 secs = 60 * 60; 1774 } else if (total_mem <= 1024 * 1024) { 1775 secs = 120 * 60; 1776 } else { 1777 secs = 240 * 60; /* max 4 hrs */ 1778 } 1779 1780 return secs; 1781 } 1782 1783 /* 1784 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1785 * 1786 * Once the Sanitize command has started successfully, the device shall be 1787 * placed in the media disabled state. If the command fails or is interrupted 1788 * by a reset or power failure, it shall remain in the media disabled state 1789 * until a successful Sanitize command has been completed. During this state: 1790 * 1791 * 1. Memory writes to the device will have no effect, and all memory reads 1792 * will return random values (no user data returned, even for locations that 1793 * the failed Sanitize operation didn’t sanitize yet). 1794 * 1795 * 2. Mailbox commands shall still be processed in the disabled state, except 1796 * that commands that access Sanitized areas shall fail with the Media Disabled 1797 * error code. 1798 */ 1799 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1800 uint8_t *payload_in, 1801 size_t len_in, 1802 uint8_t *payload_out, 1803 size_t *len_out, 1804 CXLCCI *cci) 1805 { 1806 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1807 uint64_t total_mem; /* in Mb */ 1808 int secs; 1809 1810 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1811 secs = get_sanitize_duration(total_mem); 1812 1813 /* EBUSY other bg cmds as of now */ 1814 cci->bg.runtime = secs * 1000UL; 1815 *len_out = 0; 1816 1817 cxl_dev_disable_media(&ct3d->cxl_dstate); 1818 1819 /* sanitize when done */ 1820 return CXL_MBOX_BG_STARTED; 1821 } 1822 1823 struct dpa_range_list_entry { 1824 uint64_t starting_dpa; 1825 uint64_t length; 1826 } QEMU_PACKED; 1827 1828 struct CXLSanitizeInfo { 1829 uint32_t dpa_range_count; 1830 uint8_t fill_value; 1831 struct dpa_range_list_entry dpa_range_list[]; 1832 } QEMU_PACKED; 1833 1834 static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr) 1835 { 1836 MemoryRegion *mr; 1837 if (ct3d->hostvmem) { 1838 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1839 if (vmr) { 1840 *vmr = mr; 1841 } 1842 return memory_region_size(mr); 1843 } 1844 return 0; 1845 } 1846 1847 static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr) 1848 { 1849 MemoryRegion *mr; 1850 if (ct3d->hostpmem) { 1851 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1852 if (pmr) { 1853 *pmr = mr; 1854 } 1855 return memory_region_size(mr); 1856 } 1857 return 0; 1858 } 1859 1860 static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr) 1861 { 1862 MemoryRegion *mr; 1863 if (ct3d->dc.host_dc) { 1864 mr = host_memory_backend_get_memory(ct3d->dc.host_dc); 1865 if (dc_mr) { 1866 *dc_mr = mr; 1867 } 1868 return memory_region_size(mr); 1869 } 1870 return 0; 1871 } 1872 1873 static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr, 1874 size_t length) 1875 { 1876 uint64_t vmr_size, pmr_size, dc_size; 1877 1878 if ((dpa_addr % CXL_CACHE_LINE_SIZE) || 1879 (length % CXL_CACHE_LINE_SIZE) || 1880 (length <= 0)) { 1881 return -EINVAL; 1882 } 1883 1884 vmr_size = get_vmr_size(ct3d, NULL); 1885 pmr_size = get_pmr_size(ct3d, NULL); 1886 dc_size = get_dc_size(ct3d, NULL); 1887 1888 if (dpa_addr + length > vmr_size + pmr_size + dc_size) { 1889 return -EINVAL; 1890 } 1891 1892 if (dpa_addr > vmr_size + pmr_size) { 1893 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1894 return -ENODEV; 1895 } 1896 } 1897 1898 return 0; 1899 } 1900 1901 static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length, 1902 uint8_t fill_value) 1903 { 1904 1905 uint64_t vmr_size, pmr_size; 1906 AddressSpace *as = NULL; 1907 MemTxAttrs mem_attrs = {}; 1908 1909 vmr_size = get_vmr_size(ct3d, NULL); 1910 pmr_size = get_pmr_size(ct3d, NULL); 1911 1912 if (dpa_addr < vmr_size) { 1913 as = &ct3d->hostvmem_as; 1914 } else if (dpa_addr < vmr_size + pmr_size) { 1915 as = &ct3d->hostpmem_as; 1916 } else { 1917 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1918 return -ENODEV; 1919 } 1920 as = &ct3d->dc.host_dc_as; 1921 } 1922 1923 return address_space_set(as, dpa_addr, fill_value, length, mem_attrs); 1924 } 1925 1926 /* Perform the actual device zeroing */ 1927 static void __do_sanitize(CXLType3Dev *ct3d) 1928 { 1929 struct CXLSanitizeInfo *san_info = ct3d->media_op_sanitize; 1930 int dpa_range_count = san_info->dpa_range_count; 1931 int rc = 0; 1932 int i; 1933 1934 for (i = 0; i < dpa_range_count; i++) { 1935 rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa, 1936 san_info->dpa_range_list[i].length, 1937 san_info->fill_value); 1938 if (rc) { 1939 goto exit; 1940 } 1941 } 1942 exit: 1943 g_free(ct3d->media_op_sanitize); 1944 ct3d->media_op_sanitize = NULL; 1945 return; 1946 } 1947 1948 enum { 1949 MEDIA_OP_CLASS_GENERAL = 0x0, 1950 #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0 1951 MEDIA_OP_CLASS_SANITIZE = 0x1, 1952 #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0 1953 #define MEDIA_OP_SAN_SUBC_ZERO 0x1 1954 }; 1955 1956 struct media_op_supported_list_entry { 1957 uint8_t media_op_class; 1958 uint8_t media_op_subclass; 1959 }; 1960 1961 struct media_op_discovery_out_pl { 1962 uint64_t dpa_range_granularity; 1963 uint16_t total_supported_operations; 1964 uint16_t num_of_supported_operations; 1965 struct media_op_supported_list_entry entry[]; 1966 } QEMU_PACKED; 1967 1968 static const struct media_op_supported_list_entry media_op_matrix[] = { 1969 { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY }, 1970 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE }, 1971 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO }, 1972 }; 1973 1974 static CXLRetCode media_operations_discovery(uint8_t *payload_in, 1975 size_t len_in, 1976 uint8_t *payload_out, 1977 size_t *len_out) 1978 { 1979 struct { 1980 uint8_t media_operation_class; 1981 uint8_t media_operation_subclass; 1982 uint8_t rsvd[2]; 1983 uint32_t dpa_range_count; 1984 struct { 1985 uint16_t start_index; 1986 uint16_t num_ops; 1987 } discovery_osa; 1988 } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in; 1989 struct media_op_discovery_out_pl *media_out_pl = 1990 (struct media_op_discovery_out_pl *)payload_out; 1991 int num_ops, start_index, i; 1992 int count = 0; 1993 1994 if (len_in < sizeof(*media_op_in_disc_pl)) { 1995 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1996 } 1997 1998 num_ops = media_op_in_disc_pl->discovery_osa.num_ops; 1999 start_index = media_op_in_disc_pl->discovery_osa.start_index; 2000 2001 /* 2002 * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and 2003 * start index should not exceed the total number of entries for discovery 2004 * sub class command. 2005 */ 2006 if (media_op_in_disc_pl->dpa_range_count || 2007 start_index > ARRAY_SIZE(media_op_matrix)) { 2008 return CXL_MBOX_INVALID_INPUT; 2009 } 2010 2011 media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE; 2012 media_out_pl->total_supported_operations = 2013 ARRAY_SIZE(media_op_matrix); 2014 if (num_ops > 0) { 2015 for (i = start_index; i < start_index + num_ops; i++) { 2016 media_out_pl->entry[count].media_op_class = 2017 media_op_matrix[i].media_op_class; 2018 media_out_pl->entry[count].media_op_subclass = 2019 media_op_matrix[i].media_op_subclass; 2020 count++; 2021 if (count == num_ops) { 2022 break; 2023 } 2024 } 2025 } 2026 2027 media_out_pl->num_of_supported_operations = count; 2028 *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry); 2029 return CXL_MBOX_SUCCESS; 2030 } 2031 2032 static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d, 2033 uint8_t *payload_in, 2034 size_t len_in, 2035 uint8_t *payload_out, 2036 size_t *len_out, 2037 uint8_t fill_value, 2038 CXLCCI *cci) 2039 { 2040 struct media_operations_sanitize { 2041 uint8_t media_operation_class; 2042 uint8_t media_operation_subclass; 2043 uint8_t rsvd[2]; 2044 uint32_t dpa_range_count; 2045 struct dpa_range_list_entry dpa_range_list[]; 2046 } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in; 2047 uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count; 2048 uint64_t total_mem = 0; 2049 size_t dpa_range_list_size; 2050 int secs = 0, i; 2051 2052 if (dpa_range_count == 0) { 2053 return CXL_MBOX_SUCCESS; 2054 } 2055 2056 dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry); 2057 if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) { 2058 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2059 } 2060 2061 for (i = 0; i < dpa_range_count; i++) { 2062 uint64_t start_dpa = 2063 media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa; 2064 uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length; 2065 2066 if (validate_dpa_addr(ct3d, start_dpa, length)) { 2067 return CXL_MBOX_INVALID_INPUT; 2068 } 2069 total_mem += length; 2070 } 2071 ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) + 2072 dpa_range_list_size); 2073 2074 ct3d->media_op_sanitize->dpa_range_count = dpa_range_count; 2075 ct3d->media_op_sanitize->fill_value = fill_value; 2076 memcpy(ct3d->media_op_sanitize->dpa_range_list, 2077 media_op_in_sanitize_pl->dpa_range_list, 2078 dpa_range_list_size); 2079 secs = get_sanitize_duration(total_mem >> 20); 2080 2081 /* EBUSY other bg cmds as of now */ 2082 cci->bg.runtime = secs * 1000UL; 2083 *len_out = 0; 2084 /* 2085 * media op sanitize is targeted so no need to disable media or 2086 * clear event logs 2087 */ 2088 return CXL_MBOX_BG_STARTED; 2089 } 2090 2091 static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd, 2092 uint8_t *payload_in, 2093 size_t len_in, 2094 uint8_t *payload_out, 2095 size_t *len_out, 2096 CXLCCI *cci) 2097 { 2098 struct { 2099 uint8_t media_operation_class; 2100 uint8_t media_operation_subclass; 2101 uint8_t rsvd[2]; 2102 uint32_t dpa_range_count; 2103 } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in; 2104 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2105 uint8_t media_op_cl = 0; 2106 uint8_t media_op_subclass = 0; 2107 2108 if (len_in < sizeof(*media_op_in_common_pl)) { 2109 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2110 } 2111 2112 media_op_cl = media_op_in_common_pl->media_operation_class; 2113 media_op_subclass = media_op_in_common_pl->media_operation_subclass; 2114 2115 switch (media_op_cl) { 2116 case MEDIA_OP_CLASS_GENERAL: 2117 if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) { 2118 return CXL_MBOX_UNSUPPORTED; 2119 } 2120 2121 return media_operations_discovery(payload_in, len_in, payload_out, 2122 len_out); 2123 case MEDIA_OP_CLASS_SANITIZE: 2124 switch (media_op_subclass) { 2125 case MEDIA_OP_SAN_SUBC_SANITIZE: 2126 return media_operations_sanitize(ct3d, payload_in, len_in, 2127 payload_out, len_out, 0xF, 2128 cci); 2129 case MEDIA_OP_SAN_SUBC_ZERO: 2130 return media_operations_sanitize(ct3d, payload_in, len_in, 2131 payload_out, len_out, 0, 2132 cci); 2133 default: 2134 return CXL_MBOX_UNSUPPORTED; 2135 } 2136 default: 2137 return CXL_MBOX_UNSUPPORTED; 2138 } 2139 } 2140 2141 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 2142 uint8_t *payload_in, 2143 size_t len_in, 2144 uint8_t *payload_out, 2145 size_t *len_out, 2146 CXLCCI *cci) 2147 { 2148 uint32_t *state = (uint32_t *)payload_out; 2149 2150 *state = 0; 2151 *len_out = 4; 2152 return CXL_MBOX_SUCCESS; 2153 } 2154 2155 /* 2156 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 2157 * 2158 * This is very inefficient, but good enough for now! 2159 * Also the payload will always fit, so no need to handle the MORE flag and 2160 * make this stateful. We may want to allow longer poison lists to aid 2161 * testing that kernel functionality. 2162 */ 2163 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 2164 uint8_t *payload_in, 2165 size_t len_in, 2166 uint8_t *payload_out, 2167 size_t *len_out, 2168 CXLCCI *cci) 2169 { 2170 struct get_poison_list_pl { 2171 uint64_t pa; 2172 uint64_t length; 2173 } QEMU_PACKED; 2174 2175 struct get_poison_list_out_pl { 2176 uint8_t flags; 2177 uint8_t rsvd1; 2178 uint64_t overflow_timestamp; 2179 uint16_t count; 2180 uint8_t rsvd2[0x14]; 2181 struct { 2182 uint64_t addr; 2183 uint32_t length; 2184 uint32_t resv; 2185 } QEMU_PACKED records[]; 2186 } QEMU_PACKED; 2187 2188 struct get_poison_list_pl *in = (void *)payload_in; 2189 struct get_poison_list_out_pl *out = (void *)payload_out; 2190 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2191 uint16_t record_count = 0, i = 0; 2192 uint64_t query_start, query_length; 2193 CXLPoisonList *poison_list = &ct3d->poison_list; 2194 CXLPoison *ent; 2195 uint16_t out_pl_len; 2196 2197 query_start = ldq_le_p(&in->pa); 2198 /* 64 byte alignment required */ 2199 if (query_start & 0x3f) { 2200 return CXL_MBOX_INVALID_INPUT; 2201 } 2202 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2203 2204 QLIST_FOREACH(ent, poison_list, node) { 2205 /* Check for no overlap */ 2206 if (!ranges_overlap(ent->start, ent->length, 2207 query_start, query_length)) { 2208 continue; 2209 } 2210 record_count++; 2211 } 2212 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2213 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2214 2215 QLIST_FOREACH(ent, poison_list, node) { 2216 uint64_t start, stop; 2217 2218 /* Check for no overlap */ 2219 if (!ranges_overlap(ent->start, ent->length, 2220 query_start, query_length)) { 2221 continue; 2222 } 2223 2224 /* Deal with overlap */ 2225 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 2226 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 2227 query_start + query_length); 2228 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 2229 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2230 i++; 2231 } 2232 if (ct3d->poison_list_overflowed) { 2233 out->flags = (1 << 1); 2234 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 2235 } 2236 if (scan_media_running(cci)) { 2237 out->flags |= (1 << 2); 2238 } 2239 2240 stw_le_p(&out->count, record_count); 2241 *len_out = out_pl_len; 2242 return CXL_MBOX_SUCCESS; 2243 } 2244 2245 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 2246 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 2247 uint8_t *payload_in, 2248 size_t len_in, 2249 uint8_t *payload_out, 2250 size_t *len_out, 2251 CXLCCI *cci) 2252 { 2253 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2254 CXLPoisonList *poison_list = &ct3d->poison_list; 2255 CXLPoison *ent; 2256 struct inject_poison_pl { 2257 uint64_t dpa; 2258 }; 2259 struct inject_poison_pl *in = (void *)payload_in; 2260 uint64_t dpa = ldq_le_p(&in->dpa); 2261 CXLPoison *p; 2262 2263 QLIST_FOREACH(ent, poison_list, node) { 2264 if (dpa >= ent->start && 2265 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 2266 return CXL_MBOX_SUCCESS; 2267 } 2268 } 2269 /* 2270 * Freeze the list if there is an on-going scan media operation. 2271 */ 2272 if (scan_media_running(cci)) { 2273 /* 2274 * XXX: Spec is ambiguous - is this case considered 2275 * a successful return despite not adding to the list? 2276 */ 2277 goto success; 2278 } 2279 2280 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2281 return CXL_MBOX_INJECT_POISON_LIMIT; 2282 } 2283 p = g_new0(CXLPoison, 1); 2284 2285 p->length = CXL_CACHE_LINE_SIZE; 2286 p->start = dpa; 2287 p->type = CXL_POISON_TYPE_INJECTED; 2288 2289 /* 2290 * Possible todo: Merge with existing entry if next to it and if same type 2291 */ 2292 QLIST_INSERT_HEAD(poison_list, p, node); 2293 ct3d->poison_list_cnt++; 2294 success: 2295 *len_out = 0; 2296 2297 return CXL_MBOX_SUCCESS; 2298 } 2299 2300 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 2301 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 2302 uint8_t *payload_in, 2303 size_t len_in, 2304 uint8_t *payload_out, 2305 size_t *len_out, 2306 CXLCCI *cci) 2307 { 2308 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2309 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2310 CXLPoisonList *poison_list = &ct3d->poison_list; 2311 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 2312 struct clear_poison_pl { 2313 uint64_t dpa; 2314 uint8_t data[64]; 2315 }; 2316 CXLPoison *ent; 2317 uint64_t dpa; 2318 2319 struct clear_poison_pl *in = (void *)payload_in; 2320 2321 dpa = ldq_le_p(&in->dpa); 2322 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 2323 ct3d->dc.total_capacity) { 2324 return CXL_MBOX_INVALID_PA; 2325 } 2326 2327 /* Clearing a region with no poison is not an error so always do so */ 2328 if (cvc->set_cacheline) { 2329 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 2330 return CXL_MBOX_INTERNAL_ERROR; 2331 } 2332 } 2333 2334 /* 2335 * Freeze the list if there is an on-going scan media operation. 2336 */ 2337 if (scan_media_running(cci)) { 2338 /* 2339 * XXX: Spec is ambiguous - is this case considered 2340 * a successful return despite not removing from the list? 2341 */ 2342 goto success; 2343 } 2344 2345 QLIST_FOREACH(ent, poison_list, node) { 2346 /* 2347 * Test for contained in entry. Simpler than general case 2348 * as clearing 64 bytes and entries 64 byte aligned 2349 */ 2350 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 2351 break; 2352 } 2353 } 2354 if (!ent) { 2355 goto success; 2356 } 2357 2358 QLIST_REMOVE(ent, node); 2359 ct3d->poison_list_cnt--; 2360 2361 if (dpa > ent->start) { 2362 CXLPoison *frag; 2363 /* Cannot overflow as replacing existing entry */ 2364 2365 frag = g_new0(CXLPoison, 1); 2366 2367 frag->start = ent->start; 2368 frag->length = dpa - ent->start; 2369 frag->type = ent->type; 2370 2371 QLIST_INSERT_HEAD(poison_list, frag, node); 2372 ct3d->poison_list_cnt++; 2373 } 2374 2375 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 2376 CXLPoison *frag; 2377 2378 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2379 cxl_set_poison_list_overflowed(ct3d); 2380 } else { 2381 frag = g_new0(CXLPoison, 1); 2382 2383 frag->start = dpa + CXL_CACHE_LINE_SIZE; 2384 frag->length = ent->start + ent->length - frag->start; 2385 frag->type = ent->type; 2386 QLIST_INSERT_HEAD(poison_list, frag, node); 2387 ct3d->poison_list_cnt++; 2388 } 2389 } 2390 /* Any fragments have been added, free original entry */ 2391 g_free(ent); 2392 success: 2393 *len_out = 0; 2394 2395 return CXL_MBOX_SUCCESS; 2396 } 2397 2398 /* 2399 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 2400 */ 2401 static CXLRetCode 2402 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 2403 uint8_t *payload_in, 2404 size_t len_in, 2405 uint8_t *payload_out, 2406 size_t *len_out, 2407 CXLCCI *cci) 2408 { 2409 struct get_scan_media_capabilities_pl { 2410 uint64_t pa; 2411 uint64_t length; 2412 } QEMU_PACKED; 2413 2414 struct get_scan_media_capabilities_out_pl { 2415 uint32_t estimated_runtime_ms; 2416 }; 2417 2418 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2419 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2420 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 2421 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 2422 uint64_t query_start; 2423 uint64_t query_length; 2424 2425 query_start = ldq_le_p(&in->pa); 2426 /* 64 byte alignment required */ 2427 if (query_start & 0x3f) { 2428 return CXL_MBOX_INVALID_INPUT; 2429 } 2430 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2431 2432 if (query_start + query_length > cxl_dstate->static_mem_size) { 2433 return CXL_MBOX_INVALID_PA; 2434 } 2435 2436 /* 2437 * Just use 400 nanosecond access/read latency + 100 ns for 2438 * the cost of updating the poison list. For small enough 2439 * chunks return at least 1 ms. 2440 */ 2441 stl_le_p(&out->estimated_runtime_ms, 2442 MAX(1, query_length * (0.0005L / 64))); 2443 2444 *len_out = sizeof(*out); 2445 return CXL_MBOX_SUCCESS; 2446 } 2447 2448 static void __do_scan_media(CXLType3Dev *ct3d) 2449 { 2450 CXLPoison *ent; 2451 unsigned int results_cnt = 0; 2452 2453 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 2454 results_cnt++; 2455 } 2456 2457 /* only scan media may clear the overflow */ 2458 if (ct3d->poison_list_overflowed && 2459 ct3d->poison_list_cnt == results_cnt) { 2460 cxl_clear_poison_list_overflowed(ct3d); 2461 } 2462 /* scan media has run since last conventional reset */ 2463 ct3d->scan_media_hasrun = true; 2464 } 2465 2466 /* 2467 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 2468 */ 2469 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 2470 uint8_t *payload_in, 2471 size_t len_in, 2472 uint8_t *payload_out, 2473 size_t *len_out, 2474 CXLCCI *cci) 2475 { 2476 struct scan_media_pl { 2477 uint64_t pa; 2478 uint64_t length; 2479 uint8_t flags; 2480 } QEMU_PACKED; 2481 2482 struct scan_media_pl *in = (void *)payload_in; 2483 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2484 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2485 uint64_t query_start; 2486 uint64_t query_length; 2487 CXLPoison *ent, *next; 2488 2489 query_start = ldq_le_p(&in->pa); 2490 /* 64 byte alignment required */ 2491 if (query_start & 0x3f) { 2492 return CXL_MBOX_INVALID_INPUT; 2493 } 2494 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2495 2496 if (query_start + query_length > cxl_dstate->static_mem_size) { 2497 return CXL_MBOX_INVALID_PA; 2498 } 2499 if (ct3d->dc.num_regions && query_start + query_length >= 2500 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 2501 return CXL_MBOX_INVALID_PA; 2502 } 2503 2504 if (in->flags == 0) { /* TODO */ 2505 qemu_log_mask(LOG_UNIMP, 2506 "Scan Media Event Log is unsupported\n"); 2507 } 2508 2509 /* any previous results are discarded upon a new Scan Media */ 2510 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 2511 QLIST_REMOVE(ent, node); 2512 g_free(ent); 2513 } 2514 2515 /* kill the poison list - it will be recreated */ 2516 if (ct3d->poison_list_overflowed) { 2517 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 2518 QLIST_REMOVE(ent, node); 2519 g_free(ent); 2520 ct3d->poison_list_cnt--; 2521 } 2522 } 2523 2524 /* 2525 * Scan the backup list and move corresponding entries 2526 * into the results list, updating the poison list 2527 * when possible. 2528 */ 2529 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 2530 CXLPoison *res; 2531 2532 if (ent->start >= query_start + query_length || 2533 ent->start + ent->length <= query_start) { 2534 continue; 2535 } 2536 2537 /* 2538 * If a Get Poison List cmd comes in while this 2539 * scan is being done, it will see the new complete 2540 * list, while setting the respective flag. 2541 */ 2542 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 2543 CXLPoison *p = g_new0(CXLPoison, 1); 2544 2545 p->start = ent->start; 2546 p->length = ent->length; 2547 p->type = ent->type; 2548 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 2549 ct3d->poison_list_cnt++; 2550 } 2551 2552 res = g_new0(CXLPoison, 1); 2553 res->start = ent->start; 2554 res->length = ent->length; 2555 res->type = ent->type; 2556 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2557 2558 QLIST_REMOVE(ent, node); 2559 g_free(ent); 2560 } 2561 2562 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2563 *len_out = 0; 2564 2565 return CXL_MBOX_BG_STARTED; 2566 } 2567 2568 /* 2569 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2570 */ 2571 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2572 uint8_t *payload_in, 2573 size_t len_in, 2574 uint8_t *payload_out, 2575 size_t *len_out, 2576 CXLCCI *cci) 2577 { 2578 struct get_scan_media_results_out_pl { 2579 uint64_t dpa_restart; 2580 uint64_t length; 2581 uint8_t flags; 2582 uint8_t rsvd1; 2583 uint16_t count; 2584 uint8_t rsvd2[0xc]; 2585 struct { 2586 uint64_t addr; 2587 uint32_t length; 2588 uint32_t resv; 2589 } QEMU_PACKED records[]; 2590 } QEMU_PACKED; 2591 2592 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2593 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2594 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2595 CXLPoison *ent, *next; 2596 uint16_t total_count = 0, record_count = 0, i = 0; 2597 uint16_t out_pl_len; 2598 2599 if (!ct3d->scan_media_hasrun) { 2600 return CXL_MBOX_UNSUPPORTED; 2601 } 2602 2603 /* 2604 * Calculate limits, all entries are within the same address range of the 2605 * last scan media call. 2606 */ 2607 QLIST_FOREACH(ent, scan_media_results, node) { 2608 size_t rec_size = record_count * sizeof(out->records[0]); 2609 2610 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2611 record_count++; 2612 } 2613 total_count++; 2614 } 2615 2616 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2617 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2618 2619 memset(out, 0, out_pl_len); 2620 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2621 uint64_t start, stop; 2622 2623 if (i == record_count) { 2624 break; 2625 } 2626 2627 start = ROUND_DOWN(ent->start, 64ull); 2628 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2629 stq_le_p(&out->records[i].addr, start); 2630 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2631 i++; 2632 2633 /* consume the returning entry */ 2634 QLIST_REMOVE(ent, node); 2635 g_free(ent); 2636 } 2637 2638 stw_le_p(&out->count, record_count); 2639 if (total_count > record_count) { 2640 out->flags = (1 << 0); /* More Media Error Records */ 2641 } 2642 2643 *len_out = out_pl_len; 2644 return CXL_MBOX_SUCCESS; 2645 } 2646 2647 /* 2648 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2649 * (Opcode: 4800h) 2650 */ 2651 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2652 uint8_t *payload_in, 2653 size_t len_in, 2654 uint8_t *payload_out, 2655 size_t *len_out, 2656 CXLCCI *cci) 2657 { 2658 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2659 struct { 2660 uint8_t region_cnt; 2661 uint8_t start_rid; 2662 } QEMU_PACKED *in = (void *)payload_in; 2663 struct { 2664 uint8_t num_regions; 2665 uint8_t regions_returned; 2666 uint8_t rsvd1[6]; 2667 struct { 2668 uint64_t base; 2669 uint64_t decode_len; 2670 uint64_t region_len; 2671 uint64_t block_size; 2672 uint32_t dsmadhandle; 2673 uint8_t flags; 2674 uint8_t rsvd2[3]; 2675 } QEMU_PACKED records[]; 2676 } QEMU_PACKED *out = (void *)payload_out; 2677 struct { 2678 uint32_t num_extents_supported; 2679 uint32_t num_extents_available; 2680 uint32_t num_tags_supported; 2681 uint32_t num_tags_available; 2682 } QEMU_PACKED *extra_out; 2683 uint16_t record_count; 2684 uint16_t i; 2685 uint16_t out_pl_len; 2686 uint8_t start_rid; 2687 2688 start_rid = in->start_rid; 2689 if (start_rid >= ct3d->dc.num_regions) { 2690 return CXL_MBOX_INVALID_INPUT; 2691 } 2692 2693 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2694 2695 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2696 extra_out = (void *)(payload_out + out_pl_len); 2697 out_pl_len += sizeof(*extra_out); 2698 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2699 2700 out->num_regions = ct3d->dc.num_regions; 2701 out->regions_returned = record_count; 2702 for (i = 0; i < record_count; i++) { 2703 stq_le_p(&out->records[i].base, 2704 ct3d->dc.regions[start_rid + i].base); 2705 stq_le_p(&out->records[i].decode_len, 2706 ct3d->dc.regions[start_rid + i].decode_len / 2707 CXL_CAPACITY_MULTIPLIER); 2708 stq_le_p(&out->records[i].region_len, 2709 ct3d->dc.regions[start_rid + i].len); 2710 stq_le_p(&out->records[i].block_size, 2711 ct3d->dc.regions[start_rid + i].block_size); 2712 stl_le_p(&out->records[i].dsmadhandle, 2713 ct3d->dc.regions[start_rid + i].dsmadhandle); 2714 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2715 } 2716 /* 2717 * TODO: Assign values once extents and tags are introduced 2718 * to use. 2719 */ 2720 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2721 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2722 ct3d->dc.total_extent_count); 2723 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2724 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2725 2726 *len_out = out_pl_len; 2727 return CXL_MBOX_SUCCESS; 2728 } 2729 2730 /* 2731 * CXL r3.1 section 8.2.9.9.9.2: 2732 * Get Dynamic Capacity Extent List (Opcode 4801h) 2733 */ 2734 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2735 uint8_t *payload_in, 2736 size_t len_in, 2737 uint8_t *payload_out, 2738 size_t *len_out, 2739 CXLCCI *cci) 2740 { 2741 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2742 struct { 2743 uint32_t extent_cnt; 2744 uint32_t start_extent_id; 2745 } QEMU_PACKED *in = (void *)payload_in; 2746 struct { 2747 uint32_t count; 2748 uint32_t total_extents; 2749 uint32_t generation_num; 2750 uint8_t rsvd[4]; 2751 CXLDCExtentRaw records[]; 2752 } QEMU_PACKED *out = (void *)payload_out; 2753 uint32_t start_extent_id = in->start_extent_id; 2754 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2755 uint16_t record_count = 0, i = 0, record_done = 0; 2756 uint16_t out_pl_len, size; 2757 CXLDCExtent *ent; 2758 2759 if (start_extent_id > ct3d->dc.nr_extents_accepted) { 2760 return CXL_MBOX_INVALID_INPUT; 2761 } 2762 2763 record_count = MIN(in->extent_cnt, 2764 ct3d->dc.total_extent_count - start_extent_id); 2765 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2766 record_count = MIN(record_count, size / sizeof(out->records[0])); 2767 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2768 2769 stl_le_p(&out->count, record_count); 2770 stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); 2771 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2772 2773 if (record_count > 0) { 2774 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2775 2776 QTAILQ_FOREACH(ent, extent_list, node) { 2777 if (i++ < start_extent_id) { 2778 continue; 2779 } 2780 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2781 stq_le_p(&out_rec->len, ent->len); 2782 memcpy(&out_rec->tag, ent->tag, 0x10); 2783 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2784 2785 record_done++; 2786 out_rec++; 2787 if (record_done == record_count) { 2788 break; 2789 } 2790 } 2791 } 2792 2793 *len_out = out_pl_len; 2794 return CXL_MBOX_SUCCESS; 2795 } 2796 2797 /* 2798 * Check whether any bit between addr[nr, nr+size) is set, 2799 * return true if any bit is set, otherwise return false 2800 */ 2801 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2802 unsigned long size) 2803 { 2804 unsigned long res = find_next_bit(addr, size + nr, nr); 2805 2806 return res < nr + size; 2807 } 2808 2809 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2810 { 2811 int i; 2812 CXLDCRegion *region = &ct3d->dc.regions[0]; 2813 2814 if (dpa < region->base || 2815 dpa >= region->base + ct3d->dc.total_capacity) { 2816 return NULL; 2817 } 2818 2819 /* 2820 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2821 * 2822 * Regions are used in increasing-DPA order, with Region 0 being used for 2823 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2824 * So check from the last region to find where the dpa belongs. Extents that 2825 * cross multiple regions are not allowed. 2826 */ 2827 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2828 region = &ct3d->dc.regions[i]; 2829 if (dpa >= region->base) { 2830 if (dpa + len > region->base + region->len) { 2831 return NULL; 2832 } 2833 return region; 2834 } 2835 } 2836 2837 return NULL; 2838 } 2839 2840 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2841 uint64_t dpa, 2842 uint64_t len, 2843 uint8_t *tag, 2844 uint16_t shared_seq) 2845 { 2846 CXLDCExtent *extent; 2847 2848 extent = g_new0(CXLDCExtent, 1); 2849 extent->start_dpa = dpa; 2850 extent->len = len; 2851 if (tag) { 2852 memcpy(extent->tag, tag, 0x10); 2853 } 2854 extent->shared_seq = shared_seq; 2855 2856 QTAILQ_INSERT_TAIL(list, extent, node); 2857 } 2858 2859 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2860 CXLDCExtent *extent) 2861 { 2862 QTAILQ_REMOVE(list, extent, node); 2863 g_free(extent); 2864 } 2865 2866 /* 2867 * Add a new extent to the extent "group" if group exists; 2868 * otherwise, create a new group 2869 * Return value: the extent group where the extent is inserted. 2870 */ 2871 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2872 uint64_t dpa, 2873 uint64_t len, 2874 uint8_t *tag, 2875 uint16_t shared_seq) 2876 { 2877 if (!group) { 2878 group = g_new0(CXLDCExtentGroup, 1); 2879 QTAILQ_INIT(&group->list); 2880 } 2881 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2882 tag, shared_seq); 2883 return group; 2884 } 2885 2886 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2887 CXLDCExtentGroup *group) 2888 { 2889 QTAILQ_INSERT_TAIL(list, group, node); 2890 } 2891 2892 uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2893 { 2894 CXLDCExtent *ent, *ent_next; 2895 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2896 uint32_t extents_deleted = 0; 2897 2898 QTAILQ_REMOVE(list, group, node); 2899 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2900 cxl_remove_extent_from_extent_list(&group->list, ent); 2901 extents_deleted++; 2902 } 2903 g_free(group); 2904 2905 return extents_deleted; 2906 } 2907 2908 /* 2909 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2910 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2911 */ 2912 typedef struct CXLUpdateDCExtentListInPl { 2913 uint32_t num_entries_updated; 2914 uint8_t flags; 2915 uint8_t rsvd[3]; 2916 /* CXL r3.1 Table 8-169: Updated Extent */ 2917 struct { 2918 uint64_t start_dpa; 2919 uint64_t len; 2920 uint8_t rsvd[8]; 2921 } QEMU_PACKED updated_entries[]; 2922 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2923 2924 /* 2925 * For the extents in the extent list to operate, check whether they are valid 2926 * 1. The extent should be in the range of a valid DC region; 2927 * 2. The extent should not cross multiple regions; 2928 * 3. The start DPA and the length of the extent should align with the block 2929 * size of the region; 2930 * 4. The address range of multiple extents in the list should not overlap. 2931 */ 2932 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2933 const CXLUpdateDCExtentListInPl *in) 2934 { 2935 uint64_t min_block_size = UINT64_MAX; 2936 CXLDCRegion *region; 2937 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2938 g_autofree unsigned long *blk_bitmap = NULL; 2939 uint64_t dpa, len; 2940 uint32_t i; 2941 2942 for (i = 0; i < ct3d->dc.num_regions; i++) { 2943 region = &ct3d->dc.regions[i]; 2944 min_block_size = MIN(min_block_size, region->block_size); 2945 } 2946 2947 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2948 ct3d->dc.regions[0].base) / min_block_size); 2949 2950 for (i = 0; i < in->num_entries_updated; i++) { 2951 dpa = in->updated_entries[i].start_dpa; 2952 len = in->updated_entries[i].len; 2953 2954 region = cxl_find_dc_region(ct3d, dpa, len); 2955 if (!region) { 2956 return CXL_MBOX_INVALID_PA; 2957 } 2958 2959 dpa -= ct3d->dc.regions[0].base; 2960 if (dpa % region->block_size || len % region->block_size) { 2961 return CXL_MBOX_INVALID_EXTENT_LIST; 2962 } 2963 /* the dpa range already covered by some other extents in the list */ 2964 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2965 len / min_block_size)) { 2966 return CXL_MBOX_INVALID_EXTENT_LIST; 2967 } 2968 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2969 } 2970 2971 return CXL_MBOX_SUCCESS; 2972 } 2973 2974 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2975 const CXLUpdateDCExtentListInPl *in) 2976 { 2977 uint32_t i; 2978 CXLDCExtent *ent; 2979 CXLDCExtentGroup *ext_group; 2980 uint64_t dpa, len; 2981 Range range1, range2; 2982 2983 for (i = 0; i < in->num_entries_updated; i++) { 2984 dpa = in->updated_entries[i].start_dpa; 2985 len = in->updated_entries[i].len; 2986 2987 range_init_nofail(&range1, dpa, len); 2988 2989 /* 2990 * The host-accepted DPA range must be contained by the first extent 2991 * group in the pending list 2992 */ 2993 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2994 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2995 return CXL_MBOX_INVALID_PA; 2996 } 2997 2998 /* to-be-added range should not overlap with range already accepted */ 2999 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 3000 range_init_nofail(&range2, ent->start_dpa, ent->len); 3001 if (range_overlaps_range(&range1, &range2)) { 3002 return CXL_MBOX_INVALID_PA; 3003 } 3004 } 3005 } 3006 return CXL_MBOX_SUCCESS; 3007 } 3008 3009 /* 3010 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 3011 * An extent is added to the extent list and becomes usable only after the 3012 * response is processed successfully. 3013 */ 3014 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 3015 uint8_t *payload_in, 3016 size_t len_in, 3017 uint8_t *payload_out, 3018 size_t *len_out, 3019 CXLCCI *cci) 3020 { 3021 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3022 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3023 CXLDCExtentList *extent_list = &ct3d->dc.extents; 3024 uint32_t i, num; 3025 uint64_t dpa, len; 3026 CXLRetCode ret; 3027 3028 if (len_in < sizeof(*in)) { 3029 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3030 } 3031 3032 if (in->num_entries_updated == 0) { 3033 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3034 ct3d->dc.total_extent_count -= num; 3035 return CXL_MBOX_SUCCESS; 3036 } 3037 3038 if (len_in < 3039 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3040 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3041 } 3042 3043 /* Adding extents causes exceeding device's extent tracking ability. */ 3044 if (in->num_entries_updated + ct3d->dc.total_extent_count > 3045 CXL_NUM_EXTENTS_SUPPORTED) { 3046 return CXL_MBOX_RESOURCES_EXHAUSTED; 3047 } 3048 3049 ret = cxl_detect_malformed_extent_list(ct3d, in); 3050 if (ret != CXL_MBOX_SUCCESS) { 3051 return ret; 3052 } 3053 3054 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 3055 if (ret != CXL_MBOX_SUCCESS) { 3056 return ret; 3057 } 3058 3059 for (i = 0; i < in->num_entries_updated; i++) { 3060 dpa = in->updated_entries[i].start_dpa; 3061 len = in->updated_entries[i].len; 3062 3063 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 3064 ct3d->dc.total_extent_count += 1; 3065 ct3d->dc.nr_extents_accepted += 1; 3066 ct3_set_region_block_backed(ct3d, dpa, len); 3067 } 3068 /* Remove the first extent group in the pending list */ 3069 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3070 ct3d->dc.total_extent_count -= num; 3071 3072 return CXL_MBOX_SUCCESS; 3073 } 3074 3075 /* 3076 * Copy extent list from src to dst 3077 * Return value: number of extents copied 3078 */ 3079 static uint32_t copy_extent_list(CXLDCExtentList *dst, 3080 const CXLDCExtentList *src) 3081 { 3082 uint32_t cnt = 0; 3083 CXLDCExtent *ent; 3084 3085 if (!dst || !src) { 3086 return 0; 3087 } 3088 3089 QTAILQ_FOREACH(ent, src, node) { 3090 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 3091 ent->tag, ent->shared_seq); 3092 cnt++; 3093 } 3094 return cnt; 3095 } 3096 3097 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 3098 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 3099 uint32_t *updated_list_size) 3100 { 3101 CXLDCExtent *ent, *ent_next; 3102 uint64_t dpa, len; 3103 uint32_t i; 3104 int cnt_delta = 0; 3105 CXLRetCode ret = CXL_MBOX_SUCCESS; 3106 3107 QTAILQ_INIT(updated_list); 3108 copy_extent_list(updated_list, &ct3d->dc.extents); 3109 3110 for (i = 0; i < in->num_entries_updated; i++) { 3111 Range range; 3112 3113 dpa = in->updated_entries[i].start_dpa; 3114 len = in->updated_entries[i].len; 3115 3116 /* Check if the DPA range is not fully backed with valid extents */ 3117 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 3118 ret = CXL_MBOX_INVALID_PA; 3119 goto free_and_exit; 3120 } 3121 3122 /* After this point, extent overflow is the only error can happen */ 3123 while (len > 0) { 3124 QTAILQ_FOREACH(ent, updated_list, node) { 3125 range_init_nofail(&range, ent->start_dpa, ent->len); 3126 3127 if (range_contains(&range, dpa)) { 3128 uint64_t len1, len2 = 0, len_done = 0; 3129 uint64_t ent_start_dpa = ent->start_dpa; 3130 uint64_t ent_len = ent->len; 3131 3132 len1 = dpa - ent->start_dpa; 3133 /* Found the extent or the subset of an existing extent */ 3134 if (range_contains(&range, dpa + len - 1)) { 3135 len2 = ent_start_dpa + ent_len - dpa - len; 3136 } else { 3137 dpa = ent_start_dpa + ent_len; 3138 } 3139 len_done = ent_len - len1 - len2; 3140 3141 cxl_remove_extent_from_extent_list(updated_list, ent); 3142 cnt_delta--; 3143 3144 if (len1) { 3145 cxl_insert_extent_to_extent_list(updated_list, 3146 ent_start_dpa, 3147 len1, NULL, 0); 3148 cnt_delta++; 3149 } 3150 if (len2) { 3151 cxl_insert_extent_to_extent_list(updated_list, 3152 dpa + len, 3153 len2, NULL, 0); 3154 cnt_delta++; 3155 } 3156 3157 if (cnt_delta + ct3d->dc.total_extent_count > 3158 CXL_NUM_EXTENTS_SUPPORTED) { 3159 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 3160 goto free_and_exit; 3161 } 3162 3163 len -= len_done; 3164 break; 3165 } 3166 } 3167 } 3168 } 3169 free_and_exit: 3170 if (ret != CXL_MBOX_SUCCESS) { 3171 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 3172 cxl_remove_extent_from_extent_list(updated_list, ent); 3173 } 3174 *updated_list_size = 0; 3175 } else { 3176 *updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta; 3177 } 3178 3179 return ret; 3180 } 3181 3182 /* 3183 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 3184 */ 3185 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 3186 uint8_t *payload_in, 3187 size_t len_in, 3188 uint8_t *payload_out, 3189 size_t *len_out, 3190 CXLCCI *cci) 3191 { 3192 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3193 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3194 CXLDCExtentList updated_list; 3195 CXLDCExtent *ent, *ent_next; 3196 uint32_t updated_list_size; 3197 CXLRetCode ret; 3198 3199 if (len_in < sizeof(*in)) { 3200 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3201 } 3202 3203 if (in->num_entries_updated == 0) { 3204 return CXL_MBOX_INVALID_INPUT; 3205 } 3206 3207 if (len_in < 3208 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3209 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3210 } 3211 3212 ret = cxl_detect_malformed_extent_list(ct3d, in); 3213 if (ret != CXL_MBOX_SUCCESS) { 3214 return ret; 3215 } 3216 3217 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 3218 &updated_list_size); 3219 if (ret != CXL_MBOX_SUCCESS) { 3220 return ret; 3221 } 3222 3223 /* 3224 * If the dry run release passes, the returned updated_list will 3225 * be the updated extent list and we just need to clear the extents 3226 * in the accepted list and copy extents in the updated_list to accepted 3227 * list and update the extent count; 3228 */ 3229 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 3230 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 3231 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 3232 } 3233 copy_extent_list(&ct3d->dc.extents, &updated_list); 3234 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 3235 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 3236 cxl_remove_extent_from_extent_list(&updated_list, ent); 3237 } 3238 ct3d->dc.total_extent_count += (updated_list_size - 3239 ct3d->dc.nr_extents_accepted); 3240 3241 ct3d->dc.nr_extents_accepted = updated_list_size; 3242 3243 return CXL_MBOX_SUCCESS; 3244 } 3245 3246 /* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */ 3247 static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd, 3248 uint8_t *payload_in, 3249 size_t len_in, 3250 uint8_t *payload_out, 3251 size_t *len_out, 3252 CXLCCI *cci) 3253 { 3254 struct { 3255 uint8_t num_hosts; 3256 uint8_t num_regions_supported; 3257 uint8_t rsvd1[2]; 3258 uint16_t supported_add_sel_policy_bitmask; 3259 uint8_t rsvd2[2]; 3260 uint16_t supported_removal_policy_bitmask; 3261 uint8_t sanitize_on_release_bitmask; 3262 uint8_t rsvd3; 3263 uint64_t total_dynamic_capacity; 3264 uint64_t region_blk_size_bitmasks[8]; 3265 } QEMU_PACKED *out = (void *)payload_out; 3266 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3267 CXLDCRegion *region; 3268 int i; 3269 3270 out->num_hosts = 1; 3271 out->num_regions_supported = ct3d->dc.num_regions; 3272 stw_le_p(&out->supported_add_sel_policy_bitmask, 3273 BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE)); 3274 stw_le_p(&out->supported_removal_policy_bitmask, 3275 BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE)); 3276 out->sanitize_on_release_bitmask = 0; 3277 3278 stq_le_p(&out->total_dynamic_capacity, 3279 ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER); 3280 3281 for (i = 0; i < ct3d->dc.num_regions; i++) { 3282 region = &ct3d->dc.regions[i]; 3283 memcpy(&out->region_blk_size_bitmasks[i], 3284 ®ion->supported_blk_size_bitmask, 3285 sizeof(out->region_blk_size_bitmasks[i])); 3286 } 3287 3288 *len_out = sizeof(*out); 3289 return CXL_MBOX_SUCCESS; 3290 } 3291 3292 static void build_dsmas_flags(uint8_t *flags, CXLDCRegion *region) 3293 { 3294 *flags = 0; 3295 3296 if (region->nonvolatile) { 3297 *flags |= BIT(CXL_DSMAS_FLAGS_NONVOLATILE); 3298 } 3299 if (region->sharable) { 3300 *flags |= BIT(CXL_DSMAS_FLAGS_SHARABLE); 3301 } 3302 if (region->hw_managed_coherency) { 3303 *flags |= BIT(CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY); 3304 } 3305 if (region->ic_specific_dc_management) { 3306 *flags |= BIT(CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT); 3307 } 3308 if (region->rdonly) { 3309 *flags |= BIT(CXL_DSMAS_FLAGS_RDONLY); 3310 } 3311 } 3312 3313 /* 3314 * CXL r3.2 section 7.6.7.6.2: 3315 * Get Host DC Region Configuration (Opcode 5601h) 3316 */ 3317 static CXLRetCode cmd_fm_get_host_dc_region_config(const struct cxl_cmd *cmd, 3318 uint8_t *payload_in, 3319 size_t len_in, 3320 uint8_t *payload_out, 3321 size_t *len_out, 3322 CXLCCI *cci) 3323 { 3324 struct { 3325 uint16_t host_id; 3326 uint8_t region_cnt; 3327 uint8_t start_rid; 3328 } QEMU_PACKED *in = (void *)payload_in; 3329 struct { 3330 uint16_t host_id; 3331 uint8_t num_regions; 3332 uint8_t regions_returned; 3333 struct { 3334 uint64_t base; 3335 uint64_t decode_len; 3336 uint64_t region_len; 3337 uint64_t block_size; 3338 uint8_t flags; 3339 uint8_t rsvd1[3]; 3340 uint8_t sanitize; 3341 uint8_t rsvd2[3]; 3342 } QEMU_PACKED records[]; 3343 } QEMU_PACKED *out = (void *)payload_out; 3344 struct { 3345 uint32_t num_extents_supported; 3346 uint32_t num_extents_available; 3347 uint32_t num_tags_supported; 3348 uint32_t num_tags_available; 3349 } QEMU_PACKED *extra_out; 3350 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3351 uint16_t record_count, out_pl_len, i; 3352 3353 if (in->start_rid >= ct3d->dc.num_regions) { 3354 return CXL_MBOX_INVALID_INPUT; 3355 } 3356 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 3357 3358 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 3359 extra_out = (void *)out + out_pl_len; 3360 out_pl_len += sizeof(*extra_out); 3361 3362 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 3363 3364 stw_le_p(&out->host_id, 0); 3365 out->num_regions = ct3d->dc.num_regions; 3366 out->regions_returned = record_count; 3367 3368 for (i = 0; i < record_count; i++) { 3369 stq_le_p(&out->records[i].base, 3370 ct3d->dc.regions[in->start_rid + i].base); 3371 stq_le_p(&out->records[i].decode_len, 3372 ct3d->dc.regions[in->start_rid + i].decode_len / 3373 CXL_CAPACITY_MULTIPLIER); 3374 stq_le_p(&out->records[i].region_len, 3375 ct3d->dc.regions[in->start_rid + i].len); 3376 stq_le_p(&out->records[i].block_size, 3377 ct3d->dc.regions[in->start_rid + i].block_size); 3378 build_dsmas_flags(&out->records[i].flags, 3379 &ct3d->dc.regions[in->start_rid + i]); 3380 /* Sanitize is bit 0 of flags. */ 3381 out->records[i].sanitize = 3382 ct3d->dc.regions[in->start_rid + i].flags & BIT(0); 3383 } 3384 3385 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 3386 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 3387 ct3d->dc.total_extent_count); 3388 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 3389 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 3390 3391 *len_out = out_pl_len; 3392 return CXL_MBOX_SUCCESS; 3393 } 3394 3395 /* CXL r3.2 section 7.6.7.6.3: Set Host DC Region Configuration (Opcode 5602) */ 3396 static CXLRetCode cmd_fm_set_dc_region_config(const struct cxl_cmd *cmd, 3397 uint8_t *payload_in, 3398 size_t len_in, 3399 uint8_t *payload_out, 3400 size_t *len_out, 3401 CXLCCI *cci) 3402 { 3403 struct { 3404 uint8_t reg_id; 3405 uint8_t rsvd[3]; 3406 uint64_t block_sz; 3407 uint8_t flags; 3408 uint8_t rsvd2[3]; 3409 } QEMU_PACKED *in = (void *)payload_in; 3410 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3411 CXLEventDynamicCapacity dcEvent = {}; 3412 CXLDCRegion *region = &ct3d->dc.regions[in->reg_id]; 3413 3414 /* 3415 * CXL r3.2 7.6.7.6.3: Set DC Region Configuration 3416 * This command shall fail with Unsupported when the Sanitize on Release 3417 * field does not match the region’s configuration... and the device 3418 * does not support reconfiguration of the Sanitize on Release setting. 3419 * 3420 * Currently not reconfigurable, so always fail if sanitize bit (bit 0) 3421 * doesn't match. 3422 */ 3423 if ((in->flags & 0x1) != (region->flags & 0x1)) { 3424 return CXL_MBOX_UNSUPPORTED; 3425 } 3426 3427 if (in->reg_id >= DCD_MAX_NUM_REGION) { 3428 return CXL_MBOX_UNSUPPORTED; 3429 } 3430 3431 /* Check that no extents are in the region being reconfigured */ 3432 if (!bitmap_empty(region->blk_bitmap, region->len / region->block_size)) { 3433 return CXL_MBOX_UNSUPPORTED; 3434 } 3435 3436 /* Check that new block size is supported */ 3437 if (!is_power_of_2(in->block_sz) || 3438 !(in->block_sz & region->supported_blk_size_bitmask)) { 3439 return CXL_MBOX_INVALID_INPUT; 3440 } 3441 3442 /* Return success if new block size == current block size */ 3443 if (in->block_sz == region->block_size) { 3444 return CXL_MBOX_SUCCESS; 3445 } 3446 3447 /* Free bitmap and create new one for new block size. */ 3448 qemu_mutex_lock(®ion->bitmap_lock); 3449 g_free(region->blk_bitmap); 3450 region->blk_bitmap = bitmap_new(region->len / in->block_sz); 3451 qemu_mutex_unlock(®ion->bitmap_lock); 3452 region->block_size = in->block_sz; 3453 3454 /* Create event record and insert into event log */ 3455 cxl_assign_event_header(&dcEvent.hdr, 3456 &dynamic_capacity_uuid, 3457 (1 << CXL_EVENT_TYPE_INFO), 3458 sizeof(dcEvent), 3459 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 3460 dcEvent.type = DC_EVENT_REGION_CONFIG_UPDATED; 3461 dcEvent.validity_flags = 1; 3462 dcEvent.host_id = 0; 3463 dcEvent.updated_region_id = in->reg_id; 3464 3465 if (cxl_event_insert(&ct3d->cxl_dstate, 3466 CXL_EVENT_TYPE_DYNAMIC_CAP, 3467 (CXLEventRecordRaw *)&dcEvent)) { 3468 cxl_event_irq_assert(ct3d); 3469 } 3470 return CXL_MBOX_SUCCESS; 3471 } 3472 3473 static const struct cxl_cmd cxl_cmd_set[256][256] = { 3474 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3475 cmd_infostat_bg_op_abort, 0, 0 }, 3476 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 3477 cmd_events_get_records, 1, 0 }, 3478 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 3479 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 3480 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 3481 cmd_events_get_interrupt_policy, 0, 0 }, 3482 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 3483 cmd_events_set_interrupt_policy, 3484 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 3485 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 3486 cmd_firmware_update_get_info, 0, 0 }, 3487 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 3488 cmd_firmware_update_transfer, ~0, 3489 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3490 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 3491 cmd_firmware_update_activate, 2, 3492 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3493 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3494 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 3495 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3496 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 3497 0, 0 }, 3498 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3499 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 3500 cmd_features_get_supported, 0x8, 0 }, 3501 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 3502 cmd_features_get_feature, 0x15, 0 }, 3503 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 3504 cmd_features_set_feature, 3505 ~0, 3506 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3507 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3508 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 3509 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 3510 CXL_MBOX_SECURITY_STATE_CHANGE)}, 3511 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 3512 cmd_identify_memory_device, 0, 0 }, 3513 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 3514 cmd_ccls_get_partition_info, 0, 0 }, 3515 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 3516 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 3517 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3518 [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = { 3519 "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG", 3520 cmd_get_alert_config, 0, 0 }, 3521 [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = { 3522 "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG", 3523 cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3524 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 3525 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3526 CXL_MBOX_SECURITY_STATE_CHANGE | 3527 CXL_MBOX_BACKGROUND_OPERATION | 3528 CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3529 [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations, 3530 ~0, 3531 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3532 CXL_MBOX_BACKGROUND_OPERATION)}, 3533 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 3534 cmd_get_security_state, 0, 0 }, 3535 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 3536 cmd_media_get_poison_list, 16, 0 }, 3537 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 3538 cmd_media_inject_poison, 8, 0 }, 3539 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 3540 cmd_media_clear_poison, 72, 0 }, 3541 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 3542 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 3543 cmd_media_get_scan_media_capabilities, 16, 0 }, 3544 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 3545 cmd_media_scan_media, 17, 3546 (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3547 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 3548 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 3549 cmd_media_get_scan_media_results, 0, 0 }, 3550 }; 3551 3552 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 3553 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 3554 cmd_dcd_get_dyn_cap_config, 2, 0 }, 3555 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 3556 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 3557 8, 0 }, 3558 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 3559 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 3560 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3561 [DCD_CONFIG][RELEASE_DYN_CAP] = { 3562 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 3563 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3564 }; 3565 3566 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 3567 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3568 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 3569 cmd_infostat_bg_op_sts, 0, 0 }, 3570 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3571 cmd_infostat_bg_op_abort, 0, 0 }, 3572 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3573 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 3574 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3575 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3576 0 }, 3577 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3578 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 3579 cmd_identify_switch_device, 0, 0 }, 3580 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 3581 cmd_get_physical_port_state, ~0, 0 }, 3582 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3583 cmd_tunnel_management_cmd, ~0, 0 }, 3584 }; 3585 3586 static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = { 3587 [FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO", 3588 cmd_fm_get_dcd_info, 0, 0 }, 3589 [FMAPI_DCD_MGMT][GET_HOST_DC_REGION_CONFIG] = { "GET_HOST_DC_REGION_CONFIG", 3590 cmd_fm_get_host_dc_region_config, 4, 0 }, 3591 [FMAPI_DCD_MGMT][SET_DC_REGION_CONFIG] = { "SET_DC_REGION_CONFIG", 3592 cmd_fm_set_dc_region_config, 16, 3593 (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | 3594 CXL_MBOX_CONFIG_CHANGE_CONV_RESET | 3595 CXL_MBOX_CONFIG_CHANGE_CXL_RESET | 3596 CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3597 CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, 3598 }; 3599 3600 /* 3601 * While the command is executing in the background, the device should 3602 * update the percentage complete in the Background Command Status Register 3603 * at least once per second. 3604 */ 3605 3606 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 3607 3608 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 3609 size_t len_in, uint8_t *pl_in, size_t *len_out, 3610 uint8_t *pl_out, bool *bg_started) 3611 { 3612 int ret; 3613 const struct cxl_cmd *cxl_cmd; 3614 opcode_handler h; 3615 CXLDeviceState *cxl_dstate; 3616 3617 *len_out = 0; 3618 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 3619 h = cxl_cmd->handler; 3620 if (!h) { 3621 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 3622 set << 8 | cmd); 3623 return CXL_MBOX_UNSUPPORTED; 3624 } 3625 3626 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 3627 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3628 } 3629 3630 /* Only one bg command at a time */ 3631 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3632 cci->bg.runtime > 0) { 3633 return CXL_MBOX_BUSY; 3634 } 3635 3636 /* forbid any selected commands while the media is disabled */ 3637 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 3638 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 3639 3640 if (cxl_dev_media_disabled(cxl_dstate)) { 3641 if (h == cmd_events_get_records || 3642 h == cmd_ccls_get_partition_info || 3643 h == cmd_ccls_set_lsa || 3644 h == cmd_ccls_get_lsa || 3645 h == cmd_logs_get_log || 3646 h == cmd_media_get_poison_list || 3647 h == cmd_media_inject_poison || 3648 h == cmd_media_clear_poison || 3649 h == cmd_sanitize_overwrite || 3650 h == cmd_firmware_update_transfer || 3651 h == cmd_firmware_update_activate) { 3652 return CXL_MBOX_MEDIA_DISABLED; 3653 } 3654 } 3655 } 3656 3657 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 3658 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3659 ret == CXL_MBOX_BG_STARTED) { 3660 *bg_started = true; 3661 } else { 3662 *bg_started = false; 3663 } 3664 3665 /* Set bg and the return code */ 3666 if (*bg_started) { 3667 uint64_t now; 3668 3669 cci->bg.opcode = (set << 8) | cmd; 3670 3671 cci->bg.complete_pct = 0; 3672 cci->bg.aborted = false; 3673 cci->bg.ret_code = 0; 3674 3675 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3676 cci->bg.starttime = now; 3677 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3678 } 3679 3680 return ret; 3681 } 3682 3683 static void bg_timercb(void *opaque) 3684 { 3685 CXLCCI *cci = opaque; 3686 uint64_t now, total_time; 3687 3688 qemu_mutex_lock(&cci->bg.lock); 3689 3690 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3691 total_time = cci->bg.starttime + cci->bg.runtime; 3692 3693 if (now >= total_time) { /* we are done */ 3694 uint16_t ret = CXL_MBOX_SUCCESS; 3695 3696 cci->bg.complete_pct = 100; 3697 cci->bg.ret_code = ret; 3698 switch (cci->bg.opcode) { 3699 case 0x0201: /* fw transfer */ 3700 __do_firmware_xfer(cci); 3701 break; 3702 case 0x4400: /* sanitize */ 3703 { 3704 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3705 3706 __do_sanitization(ct3d); 3707 cxl_dev_enable_media(&ct3d->cxl_dstate); 3708 } 3709 break; 3710 case 0x4402: /* Media Operations sanitize */ 3711 { 3712 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3713 __do_sanitize(ct3d); 3714 } 3715 break; 3716 case 0x4304: /* scan media */ 3717 { 3718 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3719 3720 __do_scan_media(ct3d); 3721 break; 3722 } 3723 default: 3724 __builtin_unreachable(); 3725 break; 3726 } 3727 } else { 3728 /* estimate only */ 3729 cci->bg.complete_pct = 3730 100 * (now - cci->bg.starttime) / cci->bg.runtime; 3731 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3732 } 3733 3734 if (cci->bg.complete_pct == 100) { 3735 /* TODO: generalize to switch CCI */ 3736 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3737 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 3738 PCIDevice *pdev = PCI_DEVICE(cci->d); 3739 3740 cci->bg.starttime = 0; 3741 /* registers are updated, allow new bg-capable cmds */ 3742 cci->bg.runtime = 0; 3743 3744 if (msix_enabled(pdev)) { 3745 msix_notify(pdev, cxl_dstate->mbox_msi_n); 3746 } else if (msi_enabled(pdev)) { 3747 msi_notify(pdev, cxl_dstate->mbox_msi_n); 3748 } 3749 } 3750 3751 qemu_mutex_unlock(&cci->bg.lock); 3752 } 3753 3754 static void cxl_rebuild_cel(CXLCCI *cci) 3755 { 3756 cci->cel_size = 0; /* Reset for a fresh build */ 3757 for (int set = 0; set < 256; set++) { 3758 for (int cmd = 0; cmd < 256; cmd++) { 3759 if (cci->cxl_cmd_set[set][cmd].handler) { 3760 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 3761 struct cel_log *log = 3762 &cci->cel_log[cci->cel_size]; 3763 3764 log->opcode = (set << 8) | cmd; 3765 log->effect = c->effect; 3766 cci->cel_size++; 3767 } 3768 } 3769 } 3770 } 3771 3772 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 3773 { 3774 cci->payload_max = payload_max; 3775 cxl_rebuild_cel(cci); 3776 3777 cci->bg.complete_pct = 0; 3778 cci->bg.starttime = 0; 3779 cci->bg.runtime = 0; 3780 cci->bg.aborted = false; 3781 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3782 bg_timercb, cci); 3783 qemu_mutex_init(&cci->bg.lock); 3784 3785 memset(&cci->fw, 0, sizeof(cci->fw)); 3786 cci->fw.active_slot = 1; 3787 cci->fw.slot[cci->fw.active_slot - 1] = true; 3788 cci->initialized = true; 3789 } 3790 3791 void cxl_destroy_cci(CXLCCI *cci) 3792 { 3793 qemu_mutex_destroy(&cci->bg.lock); 3794 cci->initialized = false; 3795 } 3796 3797 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 3798 { 3799 for (int set = 0; set < 256; set++) { 3800 for (int cmd = 0; cmd < 256; cmd++) { 3801 if (cxl_cmds[set][cmd].handler) { 3802 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 3803 } 3804 } 3805 } 3806 } 3807 3808 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 3809 size_t payload_max) 3810 { 3811 cci->payload_max = MAX(payload_max, cci->payload_max); 3812 cxl_copy_cci_commands(cci, cxl_cmd_set); 3813 cxl_rebuild_cel(cci); 3814 } 3815 3816 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 3817 DeviceState *d, size_t payload_max) 3818 { 3819 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 3820 cci->d = d; 3821 cci->intf = intf; 3822 cxl_init_cci(cci, payload_max); 3823 } 3824 3825 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 3826 { 3827 CXLType3Dev *ct3d = CXL_TYPE3(d); 3828 3829 cxl_copy_cci_commands(cci, cxl_cmd_set); 3830 if (ct3d->dc.num_regions) { 3831 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 3832 } 3833 cci->d = d; 3834 3835 /* No separation for PCI MB as protocol handled in PCI device */ 3836 cci->intf = d; 3837 cxl_init_cci(cci, payload_max); 3838 } 3839 3840 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 3841 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3842 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3843 0 }, 3844 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3845 }; 3846 3847 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 3848 size_t payload_max) 3849 { 3850 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 3851 cci->d = d; 3852 cci->intf = intf; 3853 cxl_init_cci(cci, payload_max); 3854 } 3855 3856 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 3857 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 3858 [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT", 3859 cmd_get_response_msg_limit, 0, 0 }, 3860 [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT", 3861 cmd_set_response_msg_limit, 1, 0 }, 3862 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3863 0 }, 3864 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3865 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3866 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3867 cmd_tunnel_management_cmd, ~0, 0 }, 3868 }; 3869 3870 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 3871 DeviceState *intf, 3872 size_t payload_max) 3873 { 3874 CXLType3Dev *ct3d = CXL_TYPE3(d); 3875 3876 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 3877 if (ct3d->dc.num_regions) { 3878 cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd); 3879 } 3880 cci->d = d; 3881 cci->intf = intf; 3882 cxl_init_cci(cci, payload_max); 3883 } 3884