1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include <math.h> 11 12 #include "qemu/osdep.h" 13 #include "hw/pci/msi.h" 14 #include "hw/pci/msix.h" 15 #include "hw/cxl/cxl.h" 16 #include "hw/cxl/cxl_events.h" 17 #include "hw/cxl/cxl_mailbox.h" 18 #include "hw/pci/pci.h" 19 #include "hw/pci-bridge/cxl_upstream_port.h" 20 #include "qemu/cutils.h" 21 #include "qemu/log.h" 22 #include "qemu/units.h" 23 #include "qemu/uuid.h" 24 #include "system/hostmem.h" 25 #include "qemu/range.h" 26 #include "qapi/qapi-types-cxl.h" 27 28 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 29 #define CXL_DC_EVENT_LOG_SIZE 8 30 #define CXL_NUM_EXTENTS_SUPPORTED 512 31 #define CXL_NUM_TAGS_SUPPORTED 0 32 #define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0) 33 #define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1) 34 #define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2) 35 #define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3) 36 #define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4) 37 38 /* 39 * How to add a new command, example. The command set FOO, with cmd BAR. 40 * 1. Add the command set and cmd to the enum. 41 * FOO = 0x7f, 42 * #define BAR 0 43 * 2. Implement the handler 44 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 45 * CXLDeviceState *cxl_dstate, uint16_t *len) 46 * 3. Add the command to the cxl_cmd_set[][] 47 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 48 * 4. Implement your handler 49 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 50 * 51 * 52 * Writing the handler: 53 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 54 * in/out length of the payload. The handler is responsible for consuming the 55 * payload from cmd->payload and operating upon it as necessary. It must then 56 * fill the output data into cmd->payload (overwriting what was there), 57 * setting the length, and returning a valid return code. 58 * 59 * XXX: The handler need not worry about endianness. The payload is read out of 60 * a register interface that already deals with it. 61 */ 62 63 enum { 64 INFOSTAT = 0x00, 65 #define IS_IDENTIFY 0x1 66 #define BACKGROUND_OPERATION_STATUS 0x2 67 #define GET_RESPONSE_MSG_LIMIT 0x3 68 #define SET_RESPONSE_MSG_LIMIT 0x4 69 #define BACKGROUND_OPERATION_ABORT 0x5 70 EVENTS = 0x01, 71 #define GET_RECORDS 0x0 72 #define CLEAR_RECORDS 0x1 73 #define GET_INTERRUPT_POLICY 0x2 74 #define SET_INTERRUPT_POLICY 0x3 75 FIRMWARE_UPDATE = 0x02, 76 #define GET_INFO 0x0 77 #define TRANSFER 0x1 78 #define ACTIVATE 0x2 79 TIMESTAMP = 0x03, 80 #define GET 0x0 81 #define SET 0x1 82 LOGS = 0x04, 83 #define GET_SUPPORTED 0x0 84 #define GET_LOG 0x1 85 FEATURES = 0x05, 86 #define GET_SUPPORTED 0x0 87 #define GET_FEATURE 0x1 88 #define SET_FEATURE 0x2 89 IDENTIFY = 0x40, 90 #define MEMORY_DEVICE 0x0 91 CCLS = 0x41, 92 #define GET_PARTITION_INFO 0x0 93 #define GET_LSA 0x2 94 #define SET_LSA 0x3 95 HEALTH_INFO_ALERTS = 0x42, 96 #define GET_ALERT_CONFIG 0x1 97 #define SET_ALERT_CONFIG 0x2 98 SANITIZE = 0x44, 99 #define OVERWRITE 0x0 100 #define SECURE_ERASE 0x1 101 #define MEDIA_OPERATIONS 0x2 102 PERSISTENT_MEM = 0x45, 103 #define GET_SECURITY_STATE 0x0 104 MEDIA_AND_POISON = 0x43, 105 #define GET_POISON_LIST 0x0 106 #define INJECT_POISON 0x1 107 #define CLEAR_POISON 0x2 108 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 109 #define SCAN_MEDIA 0x4 110 #define GET_SCAN_MEDIA_RESULTS 0x5 111 DCD_CONFIG = 0x48, 112 #define GET_DC_CONFIG 0x0 113 #define GET_DYN_CAP_EXT_LIST 0x1 114 #define ADD_DYN_CAP_RSP 0x2 115 #define RELEASE_DYN_CAP 0x3 116 PHYSICAL_SWITCH = 0x51, 117 #define IDENTIFY_SWITCH_DEVICE 0x0 118 #define GET_PHYSICAL_PORT_STATE 0x1 119 TUNNEL = 0x53, 120 #define MANAGEMENT_COMMAND 0x0 121 FMAPI_DCD_MGMT = 0x56, 122 #define GET_DCD_INFO 0x0 123 #define GET_HOST_DC_REGION_CONFIG 0x1 124 }; 125 126 /* CCI Message Format CXL r3.1 Figure 7-19 */ 127 typedef struct CXLCCIMessage { 128 uint8_t category; 129 #define CXL_CCI_CAT_REQ 0 130 #define CXL_CCI_CAT_RSP 1 131 uint8_t tag; 132 uint8_t resv1; 133 uint8_t command; 134 uint8_t command_set; 135 uint8_t pl_length[3]; 136 uint16_t rc; 137 uint16_t vendor_specific; 138 uint8_t payload[]; 139 } QEMU_PACKED CXLCCIMessage; 140 141 /* This command is only defined to an MLD FM Owned LD or an MHD */ 142 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 143 uint8_t *payload_in, 144 size_t len_in, 145 uint8_t *payload_out, 146 size_t *len_out, 147 CXLCCI *cci) 148 { 149 PCIDevice *tunnel_target; 150 CXLCCI *target_cci; 151 struct { 152 uint8_t port_or_ld_id; 153 uint8_t target_type; 154 uint16_t size; 155 CXLCCIMessage ccimessage; 156 } QEMU_PACKED *in; 157 struct { 158 uint16_t resp_len; 159 uint8_t resv[2]; 160 CXLCCIMessage ccimessage; 161 } QEMU_PACKED *out; 162 size_t pl_length, length_out; 163 bool bg_started; 164 int rc; 165 166 if (cmd->in < sizeof(*in)) { 167 return CXL_MBOX_INVALID_INPUT; 168 } 169 in = (void *)payload_in; 170 out = (void *)payload_out; 171 172 if (len_in < sizeof(*in)) { 173 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 174 } 175 /* Enough room for minimum sized message - no payload */ 176 if (in->size < sizeof(in->ccimessage)) { 177 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 178 } 179 /* Length of input payload should be in->size + a wrapping tunnel header */ 180 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 181 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 182 } 183 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 184 return CXL_MBOX_INVALID_INPUT; 185 } 186 187 if (in->target_type != 0) { 188 qemu_log_mask(LOG_UNIMP, 189 "Tunneled Command sent to non existent FM-LD"); 190 return CXL_MBOX_INVALID_INPUT; 191 } 192 193 /* 194 * Target of a tunnel unfortunately depends on type of CCI readint 195 * the message. 196 * If in a switch, then it's the port number. 197 * If in an MLD it is the ld number. 198 * If in an MHD target type indicate where we are going. 199 */ 200 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 201 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 202 if (in->port_or_ld_id != 0) { 203 /* Only pretending to have one for now! */ 204 return CXL_MBOX_INVALID_INPUT; 205 } 206 target_cci = &ct3d->ld0_cci; 207 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 208 CXLUpstreamPort *usp = CXL_USP(cci->d); 209 210 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 211 in->port_or_ld_id); 212 if (!tunnel_target) { 213 return CXL_MBOX_INVALID_INPUT; 214 } 215 tunnel_target = 216 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 217 if (!tunnel_target) { 218 return CXL_MBOX_INVALID_INPUT; 219 } 220 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 221 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 222 /* Tunneled VDMs always land on FM Owned LD */ 223 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 224 } else { 225 return CXL_MBOX_INVALID_INPUT; 226 } 227 } else { 228 return CXL_MBOX_INVALID_INPUT; 229 } 230 231 pl_length = in->ccimessage.pl_length[2] << 16 | 232 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 233 rc = cxl_process_cci_message(target_cci, 234 in->ccimessage.command_set, 235 in->ccimessage.command, 236 pl_length, in->ccimessage.payload, 237 &length_out, out->ccimessage.payload, 238 &bg_started); 239 /* Payload should be in place. Rest of CCI header and needs filling */ 240 out->resp_len = length_out + sizeof(CXLCCIMessage); 241 st24_le_p(out->ccimessage.pl_length, length_out); 242 out->ccimessage.rc = rc; 243 out->ccimessage.category = CXL_CCI_CAT_RSP; 244 out->ccimessage.command = in->ccimessage.command; 245 out->ccimessage.command_set = in->ccimessage.command_set; 246 out->ccimessage.tag = in->ccimessage.tag; 247 *len_out = length_out + sizeof(*out); 248 249 return CXL_MBOX_SUCCESS; 250 } 251 252 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 253 uint8_t *payload_in, size_t len_in, 254 uint8_t *payload_out, size_t *len_out, 255 CXLCCI *cci) 256 { 257 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 258 CXLGetEventPayload *pl; 259 uint8_t log_type; 260 int max_recs; 261 262 if (cmd->in < sizeof(log_type)) { 263 return CXL_MBOX_INVALID_INPUT; 264 } 265 266 log_type = payload_in[0]; 267 268 pl = (CXLGetEventPayload *)payload_out; 269 270 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 271 CXL_EVENT_RECORD_SIZE; 272 if (max_recs > 0xFFFF) { 273 max_recs = 0xFFFF; 274 } 275 276 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 277 } 278 279 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 280 uint8_t *payload_in, 281 size_t len_in, 282 uint8_t *payload_out, 283 size_t *len_out, 284 CXLCCI *cci) 285 { 286 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 287 CXLClearEventPayload *pl; 288 289 pl = (CXLClearEventPayload *)payload_in; 290 291 if (len_in < sizeof(*pl) || 292 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { 293 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 294 } 295 296 *len_out = 0; 297 return cxl_event_clear_records(cxlds, pl); 298 } 299 300 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 301 uint8_t *payload_in, 302 size_t len_in, 303 uint8_t *payload_out, 304 size_t *len_out, 305 CXLCCI *cci) 306 { 307 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 308 CXLEventInterruptPolicy *policy; 309 CXLEventLog *log; 310 311 policy = (CXLEventInterruptPolicy *)payload_out; 312 313 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 314 if (log->irq_enabled) { 315 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 316 } 317 318 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 319 if (log->irq_enabled) { 320 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 321 } 322 323 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 324 if (log->irq_enabled) { 325 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 326 } 327 328 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 329 if (log->irq_enabled) { 330 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 331 } 332 333 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 334 if (log->irq_enabled) { 335 /* Dynamic Capacity borrows the same vector as info */ 336 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 337 } 338 339 *len_out = sizeof(*policy); 340 return CXL_MBOX_SUCCESS; 341 } 342 343 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 344 uint8_t *payload_in, 345 size_t len_in, 346 uint8_t *payload_out, 347 size_t *len_out, 348 CXLCCI *cci) 349 { 350 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 351 CXLEventInterruptPolicy *policy; 352 CXLEventLog *log; 353 354 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 355 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 356 } 357 358 policy = (CXLEventInterruptPolicy *)payload_in; 359 360 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 361 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 362 CXL_INT_MSI_MSIX; 363 364 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 365 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 366 CXL_INT_MSI_MSIX; 367 368 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 369 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 370 CXL_INT_MSI_MSIX; 371 372 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 373 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 374 CXL_INT_MSI_MSIX; 375 376 /* DCD is optional */ 377 if (len_in < sizeof(*policy)) { 378 return CXL_MBOX_SUCCESS; 379 } 380 381 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 382 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 383 CXL_INT_MSI_MSIX; 384 385 *len_out = 0; 386 return CXL_MBOX_SUCCESS; 387 } 388 389 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 390 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 391 uint8_t *payload_in, 392 size_t len_in, 393 uint8_t *payload_out, 394 size_t *len_out, 395 CXLCCI *cci) 396 { 397 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 398 struct { 399 uint16_t pcie_vid; 400 uint16_t pcie_did; 401 uint16_t pcie_subsys_vid; 402 uint16_t pcie_subsys_id; 403 uint64_t sn; 404 uint8_t max_message_size; 405 uint8_t component_type; 406 } QEMU_PACKED *is_identify; 407 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 408 409 is_identify = (void *)payload_out; 410 is_identify->pcie_vid = class->vendor_id; 411 is_identify->pcie_did = class->device_id; 412 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 413 is_identify->sn = CXL_USP(cci->d)->sn; 414 /* Subsystem info not defined for a USP */ 415 is_identify->pcie_subsys_vid = 0; 416 is_identify->pcie_subsys_id = 0; 417 is_identify->component_type = 0x0; /* Switch */ 418 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 419 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 420 421 is_identify->sn = CXL_TYPE3(cci->d)->sn; 422 /* 423 * We can't always use class->subsystem_vendor_id as 424 * it is not set if the defaults are used. 425 */ 426 is_identify->pcie_subsys_vid = 427 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 428 is_identify->pcie_subsys_id = 429 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 430 is_identify->component_type = 0x3; /* Type 3 */ 431 } 432 433 is_identify->max_message_size = (uint8_t)log2(cci->payload_max); 434 *len_out = sizeof(*is_identify); 435 return CXL_MBOX_SUCCESS; 436 } 437 438 /* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */ 439 static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd, 440 uint8_t *payload_in, 441 size_t len_in, 442 uint8_t *payload_out, 443 size_t *len_out, 444 CXLCCI *cci) 445 { 446 struct { 447 uint8_t rsp_limit; 448 } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out; 449 QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1); 450 451 get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max); 452 453 *len_out = sizeof(*get_rsp_msg_limit); 454 return CXL_MBOX_SUCCESS; 455 } 456 457 /* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */ 458 static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd, 459 uint8_t *payload_in, 460 size_t len_in, 461 uint8_t *payload_out, 462 size_t *len_out, 463 CXLCCI *cci) 464 { 465 struct { 466 uint8_t rsp_limit; 467 } QEMU_PACKED *in = (void *)payload_in; 468 QEMU_BUILD_BUG_ON(sizeof(*in) != 1); 469 struct { 470 uint8_t rsp_limit; 471 } QEMU_PACKED *out = (void *)payload_out; 472 QEMU_BUILD_BUG_ON(sizeof(*out) != 1); 473 474 if (in->rsp_limit < 8 || in->rsp_limit > 10) { 475 return CXL_MBOX_INVALID_INPUT; 476 } 477 478 cci->payload_max = 1 << in->rsp_limit; 479 out->rsp_limit = in->rsp_limit; 480 481 *len_out = sizeof(*out); 482 return CXL_MBOX_SUCCESS; 483 } 484 485 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 486 void *private) 487 { 488 uint8_t *bm = private; 489 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 490 uint8_t port = PCIE_PORT(d)->port; 491 bm[port / 8] |= 1 << (port % 8); 492 } 493 } 494 495 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 496 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 497 uint8_t *payload_in, 498 size_t len_in, 499 uint8_t *payload_out, 500 size_t *len_out, 501 CXLCCI *cci) 502 { 503 PCIEPort *usp = PCIE_PORT(cci->d); 504 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 505 int num_phys_ports = pcie_count_ds_ports(bus); 506 507 struct cxl_fmapi_ident_switch_dev_resp_pl { 508 uint8_t ingress_port_id; 509 uint8_t rsvd; 510 uint8_t num_physical_ports; 511 uint8_t num_vcss; 512 uint8_t active_port_bitmask[0x20]; 513 uint8_t active_vcs_bitmask[0x20]; 514 uint16_t total_vppbs; 515 uint16_t bound_vppbs; 516 uint8_t num_hdm_decoders_per_usp; 517 } QEMU_PACKED *out; 518 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 519 520 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 521 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 522 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 523 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 524 .active_vcs_bitmask[0] = 0x1, 525 .total_vppbs = num_phys_ports + 1, 526 .bound_vppbs = num_phys_ports + 1, 527 .num_hdm_decoders_per_usp = 4, 528 }; 529 530 /* Depends on the CCI type */ 531 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 532 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 533 } else { 534 /* MCTP? */ 535 out->ingress_port_id = 0; 536 } 537 538 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 539 out->active_port_bitmask); 540 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 541 542 *len_out = sizeof(*out); 543 544 return CXL_MBOX_SUCCESS; 545 } 546 547 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 548 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 549 uint8_t *payload_in, 550 size_t len_in, 551 uint8_t *payload_out, 552 size_t *len_out, 553 CXLCCI *cci) 554 { 555 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 556 struct cxl_fmapi_get_phys_port_state_req_pl { 557 uint8_t num_ports; 558 uint8_t ports[]; 559 } QEMU_PACKED *in; 560 561 /* 562 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 563 * Format 564 */ 565 struct cxl_fmapi_port_state_info_block { 566 uint8_t port_id; 567 uint8_t config_state; 568 uint8_t connected_device_cxl_version; 569 uint8_t rsv1; 570 uint8_t connected_device_type; 571 uint8_t port_cxl_version_bitmask; 572 uint8_t max_link_width; 573 uint8_t negotiated_link_width; 574 uint8_t supported_link_speeds_vector; 575 uint8_t max_link_speed; 576 uint8_t current_link_speed; 577 uint8_t ltssm_state; 578 uint8_t first_lane_num; 579 uint16_t link_state; 580 uint8_t supported_ld_count; 581 } QEMU_PACKED; 582 583 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 584 struct cxl_fmapi_get_phys_port_state_resp_pl { 585 uint8_t num_ports; 586 uint8_t rsv1[3]; 587 struct cxl_fmapi_port_state_info_block ports[]; 588 } QEMU_PACKED *out; 589 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 590 PCIEPort *usp = PCIE_PORT(cci->d); 591 size_t pl_size; 592 int i; 593 594 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 595 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 596 597 if (len_in < sizeof(*in)) { 598 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 599 } 600 /* Check if what was requested can fit */ 601 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 602 return CXL_MBOX_INVALID_INPUT; 603 } 604 605 /* For success there should be a match for each requested */ 606 out->num_ports = in->num_ports; 607 608 for (i = 0; i < in->num_ports; i++) { 609 struct cxl_fmapi_port_state_info_block *port; 610 /* First try to match on downstream port */ 611 PCIDevice *port_dev; 612 uint16_t lnkcap, lnkcap2, lnksta; 613 614 port = &out->ports[i]; 615 616 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 617 if (port_dev) { /* DSP */ 618 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 619 ->devices[0]; 620 port->config_state = 3; 621 if (ds_dev) { 622 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 623 port->connected_device_type = 5; /* Assume MLD for now */ 624 } else { 625 port->connected_device_type = 1; 626 } 627 } else { 628 port->connected_device_type = 0; 629 } 630 port->supported_ld_count = 3; 631 } else if (usp->port == in->ports[i]) { /* USP */ 632 port_dev = PCI_DEVICE(usp); 633 port->config_state = 4; 634 port->connected_device_type = 0; 635 } else { 636 return CXL_MBOX_INVALID_INPUT; 637 } 638 639 port->port_id = in->ports[i]; 640 /* Information on status of this port in lnksta, lnkcap */ 641 if (!port_dev->exp.exp_cap) { 642 return CXL_MBOX_INTERNAL_ERROR; 643 } 644 lnksta = port_dev->config_read(port_dev, 645 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 646 sizeof(lnksta)); 647 lnkcap = port_dev->config_read(port_dev, 648 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 649 sizeof(lnkcap)); 650 lnkcap2 = port_dev->config_read(port_dev, 651 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 652 sizeof(lnkcap2)); 653 654 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 655 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 656 /* No definition for SLS field in linux/pci_regs.h */ 657 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 658 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 659 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 660 /* TODO: Track down if we can get the rest of the info */ 661 port->ltssm_state = 0x7; 662 port->first_lane_num = 0; 663 port->link_state = 0; 664 port->port_cxl_version_bitmask = 0x2; 665 port->connected_device_cxl_version = 0x2; 666 } 667 668 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 669 *len_out = pl_size; 670 671 return CXL_MBOX_SUCCESS; 672 } 673 674 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 675 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 676 uint8_t *payload_in, 677 size_t len_in, 678 uint8_t *payload_out, 679 size_t *len_out, 680 CXLCCI *cci) 681 { 682 struct { 683 uint8_t status; 684 uint8_t rsvd; 685 uint16_t opcode; 686 uint16_t returncode; 687 uint16_t vendor_ext_status; 688 } QEMU_PACKED *bg_op_status; 689 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 690 691 bg_op_status = (void *)payload_out; 692 bg_op_status->status = cci->bg.complete_pct << 1; 693 if (cci->bg.runtime > 0) { 694 bg_op_status->status |= 1U << 0; 695 } 696 bg_op_status->opcode = cci->bg.opcode; 697 bg_op_status->returncode = cci->bg.ret_code; 698 *len_out = sizeof(*bg_op_status); 699 700 return CXL_MBOX_SUCCESS; 701 } 702 703 /* 704 * CXL r3.1 Section 8.2.9.1.5: 705 * Request Abort Background Operation (Opcode 0005h) 706 */ 707 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd, 708 uint8_t *payload_in, 709 size_t len_in, 710 uint8_t *payload_out, 711 size_t *len_out, 712 CXLCCI *cci) 713 { 714 int bg_set = cci->bg.opcode >> 8; 715 int bg_cmd = cci->bg.opcode & 0xff; 716 const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd]; 717 718 if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) { 719 return CXL_MBOX_REQUEST_ABORT_NOTSUP; 720 } 721 722 qemu_mutex_lock(&cci->bg.lock); 723 if (cci->bg.runtime) { 724 /* operation is near complete, let it finish */ 725 if (cci->bg.complete_pct < 85) { 726 timer_del(cci->bg.timer); 727 cci->bg.ret_code = CXL_MBOX_ABORTED; 728 cci->bg.starttime = 0; 729 cci->bg.runtime = 0; 730 cci->bg.aborted = true; 731 } 732 } 733 qemu_mutex_unlock(&cci->bg.lock); 734 735 return CXL_MBOX_SUCCESS; 736 } 737 738 #define CXL_FW_SLOTS 2 739 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 740 741 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 742 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 743 uint8_t *payload_in, 744 size_t len, 745 uint8_t *payload_out, 746 size_t *len_out, 747 CXLCCI *cci) 748 { 749 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 750 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 751 struct { 752 uint8_t slots_supported; 753 uint8_t slot_info; 754 uint8_t caps; 755 uint8_t rsvd[0xd]; 756 char fw_rev1[0x10]; 757 char fw_rev2[0x10]; 758 char fw_rev3[0x10]; 759 char fw_rev4[0x10]; 760 } QEMU_PACKED *fw_info; 761 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 762 763 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || 764 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || 765 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { 766 return CXL_MBOX_INTERNAL_ERROR; 767 } 768 769 fw_info = (void *)payload_out; 770 771 fw_info->slots_supported = CXL_FW_SLOTS; 772 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 773 ((cci->fw.staged_slot & 0x7) << 3); 774 fw_info->caps = BIT(0); /* online update supported */ 775 776 if (cci->fw.slot[0]) { 777 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 778 } 779 if (cci->fw.slot[1]) { 780 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 781 } 782 783 *len_out = sizeof(*fw_info); 784 return CXL_MBOX_SUCCESS; 785 } 786 787 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 788 #define CXL_FW_XFER_ALIGNMENT 128 789 790 #define CXL_FW_XFER_ACTION_FULL 0x0 791 #define CXL_FW_XFER_ACTION_INIT 0x1 792 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 793 #define CXL_FW_XFER_ACTION_END 0x3 794 #define CXL_FW_XFER_ACTION_ABORT 0x4 795 796 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 797 uint8_t *payload_in, 798 size_t len, 799 uint8_t *payload_out, 800 size_t *len_out, 801 CXLCCI *cci) 802 { 803 struct { 804 uint8_t action; 805 uint8_t slot; 806 uint8_t rsvd1[2]; 807 uint32_t offset; 808 uint8_t rsvd2[0x78]; 809 uint8_t data[]; 810 } QEMU_PACKED *fw_transfer = (void *)payload_in; 811 size_t offset, length; 812 813 if (len < sizeof(*fw_transfer)) { 814 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 815 } 816 817 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 818 /* 819 * At this point there aren't any on-going transfers 820 * running in the bg - this is serialized before this 821 * call altogether. Just mark the state machine and 822 * disregard any other input. 823 */ 824 cci->fw.transferring = false; 825 return CXL_MBOX_SUCCESS; 826 } 827 828 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 829 length = len - sizeof(*fw_transfer); 830 if (offset + length > CXL_FW_SIZE) { 831 return CXL_MBOX_INVALID_INPUT; 832 } 833 834 if (cci->fw.transferring) { 835 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 836 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 837 return CXL_MBOX_FW_XFER_IN_PROGRESS; 838 } 839 /* 840 * Abort partitioned package transfer if over 30 secs 841 * between parts. As opposed to the explicit ABORT action, 842 * semantically treat this condition as an error - as 843 * if a part action were passed without a previous INIT. 844 */ 845 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 846 cci->fw.transferring = false; 847 return CXL_MBOX_INVALID_INPUT; 848 } 849 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 850 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 851 return CXL_MBOX_INVALID_INPUT; 852 } 853 854 /* allow back-to-back retransmission */ 855 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 856 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 857 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 858 /* verify no overlaps */ 859 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 860 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 861 } 862 } 863 864 switch (fw_transfer->action) { 865 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 866 case CXL_FW_XFER_ACTION_END: 867 if (fw_transfer->slot == 0 || 868 fw_transfer->slot == cci->fw.active_slot || 869 fw_transfer->slot > CXL_FW_SLOTS) { 870 return CXL_MBOX_FW_INVALID_SLOT; 871 } 872 873 /* mark the slot used upon bg completion */ 874 break; 875 case CXL_FW_XFER_ACTION_INIT: 876 if (offset != 0) { 877 return CXL_MBOX_INVALID_INPUT; 878 } 879 880 cci->fw.transferring = true; 881 cci->fw.prev_offset = offset; 882 cci->fw.prev_len = length; 883 break; 884 case CXL_FW_XFER_ACTION_CONTINUE: 885 cci->fw.prev_offset = offset; 886 cci->fw.prev_len = length; 887 break; 888 default: 889 return CXL_MBOX_INVALID_INPUT; 890 } 891 892 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 893 cci->bg.runtime = 10 * 1000UL; 894 } else { 895 cci->bg.runtime = 2 * 1000UL; 896 } 897 /* keep relevant context for bg completion */ 898 cci->fw.curr_action = fw_transfer->action; 899 cci->fw.curr_slot = fw_transfer->slot; 900 *len_out = 0; 901 902 return CXL_MBOX_BG_STARTED; 903 } 904 905 static void __do_firmware_xfer(CXLCCI *cci) 906 { 907 switch (cci->fw.curr_action) { 908 case CXL_FW_XFER_ACTION_FULL: 909 case CXL_FW_XFER_ACTION_END: 910 cci->fw.slot[cci->fw.curr_slot - 1] = true; 911 cci->fw.transferring = false; 912 break; 913 case CXL_FW_XFER_ACTION_INIT: 914 case CXL_FW_XFER_ACTION_CONTINUE: 915 time(&cci->fw.last_partxfer); 916 break; 917 default: 918 break; 919 } 920 } 921 922 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 923 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 924 uint8_t *payload_in, 925 size_t len, 926 uint8_t *payload_out, 927 size_t *len_out, 928 CXLCCI *cci) 929 { 930 struct { 931 uint8_t action; 932 uint8_t slot; 933 } QEMU_PACKED *fw_activate = (void *)payload_in; 934 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 935 936 if (fw_activate->slot == 0 || 937 fw_activate->slot == cci->fw.active_slot || 938 fw_activate->slot > CXL_FW_SLOTS) { 939 return CXL_MBOX_FW_INVALID_SLOT; 940 } 941 942 /* ensure that an actual fw package is there */ 943 if (!cci->fw.slot[fw_activate->slot - 1]) { 944 return CXL_MBOX_FW_INVALID_SLOT; 945 } 946 947 switch (fw_activate->action) { 948 case 0: /* online */ 949 cci->fw.active_slot = fw_activate->slot; 950 break; 951 case 1: /* reset */ 952 cci->fw.staged_slot = fw_activate->slot; 953 break; 954 default: 955 return CXL_MBOX_INVALID_INPUT; 956 } 957 958 return CXL_MBOX_SUCCESS; 959 } 960 961 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 962 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 963 uint8_t *payload_in, 964 size_t len_in, 965 uint8_t *payload_out, 966 size_t *len_out, 967 CXLCCI *cci) 968 { 969 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 970 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 971 972 stq_le_p(payload_out, final_time); 973 *len_out = 8; 974 975 return CXL_MBOX_SUCCESS; 976 } 977 978 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 979 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 980 uint8_t *payload_in, 981 size_t len_in, 982 uint8_t *payload_out, 983 size_t *len_out, 984 CXLCCI *cci) 985 { 986 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 987 988 cxl_dstate->timestamp.set = true; 989 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 990 991 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 992 993 *len_out = 0; 994 return CXL_MBOX_SUCCESS; 995 } 996 997 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 998 static const QemuUUID cel_uuid = { 999 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 1000 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 1001 }; 1002 1003 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 1004 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 1005 uint8_t *payload_in, 1006 size_t len_in, 1007 uint8_t *payload_out, 1008 size_t *len_out, 1009 CXLCCI *cci) 1010 { 1011 struct { 1012 uint16_t entries; 1013 uint8_t rsvd[6]; 1014 struct { 1015 QemuUUID uuid; 1016 uint32_t size; 1017 } log_entries[1]; 1018 } QEMU_PACKED *supported_logs = (void *)payload_out; 1019 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 1020 1021 supported_logs->entries = 1; 1022 supported_logs->log_entries[0].uuid = cel_uuid; 1023 supported_logs->log_entries[0].size = 4 * cci->cel_size; 1024 1025 *len_out = sizeof(*supported_logs); 1026 return CXL_MBOX_SUCCESS; 1027 } 1028 1029 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 1030 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 1031 uint8_t *payload_in, 1032 size_t len_in, 1033 uint8_t *payload_out, 1034 size_t *len_out, 1035 CXLCCI *cci) 1036 { 1037 struct { 1038 QemuUUID uuid; 1039 uint32_t offset; 1040 uint32_t length; 1041 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 1042 1043 get_log = (void *)payload_in; 1044 1045 if (get_log->length > cci->payload_max) { 1046 return CXL_MBOX_INVALID_INPUT; 1047 } 1048 1049 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 1050 return CXL_MBOX_INVALID_LOG; 1051 } 1052 1053 /* 1054 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 1055 * The device shall return Invalid Input if the Offset or Length 1056 * fields attempt to access beyond the size of the log as reported by Get 1057 * Supported Log. 1058 * 1059 * Only valid for there to be one entry per opcode, but the length + offset 1060 * may still be greater than that if the inputs are not valid and so access 1061 * beyond the end of cci->cel_log. 1062 */ 1063 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { 1064 return CXL_MBOX_INVALID_INPUT; 1065 } 1066 1067 /* Store off everything to local variables so we can wipe out the payload */ 1068 *len_out = get_log->length; 1069 1070 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 1071 1072 return CXL_MBOX_SUCCESS; 1073 } 1074 1075 /* CXL r3.1 section 8.2.9.6: Features */ 1076 /* 1077 * Get Supported Features output payload 1078 * CXL r3.1 section 8.2.9.6.1 Table 8-96 1079 */ 1080 typedef struct CXLSupportedFeatureHeader { 1081 uint16_t entries; 1082 uint16_t nsuppfeats_dev; 1083 uint32_t reserved; 1084 } QEMU_PACKED CXLSupportedFeatureHeader; 1085 1086 /* 1087 * Get Supported Features Supported Feature Entry 1088 * CXL r3.1 section 8.2.9.6.1 Table 8-97 1089 */ 1090 typedef struct CXLSupportedFeatureEntry { 1091 QemuUUID uuid; 1092 uint16_t feat_index; 1093 uint16_t get_feat_size; 1094 uint16_t set_feat_size; 1095 uint32_t attr_flags; 1096 uint8_t get_feat_version; 1097 uint8_t set_feat_version; 1098 uint16_t set_feat_effects; 1099 uint8_t rsvd[18]; 1100 } QEMU_PACKED CXLSupportedFeatureEntry; 1101 1102 /* 1103 * Get Supported Features Supported Feature Entry 1104 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 1105 */ 1106 /* Supported Feature Entry : attribute flags */ 1107 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 1108 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 1109 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 1110 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 1111 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 1112 1113 /* Supported Feature Entry : set feature effects */ 1114 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 1115 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 1116 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 1117 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 1118 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1119 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1120 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1121 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1122 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1123 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1124 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1125 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1126 1127 enum CXL_SUPPORTED_FEATURES_LIST { 1128 CXL_FEATURE_PATROL_SCRUB = 0, 1129 CXL_FEATURE_ECS, 1130 CXL_FEATURE_MAX 1131 }; 1132 1133 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1134 /* 1135 * Get Feature input payload 1136 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1137 */ 1138 /* Get Feature : Payload in selection */ 1139 enum CXL_GET_FEATURE_SELECTION { 1140 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1141 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1142 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1143 CXL_GET_FEATURE_SEL_MAX 1144 }; 1145 1146 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1147 /* 1148 * Set Feature input payload 1149 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1150 */ 1151 typedef struct CXLSetFeatureInHeader { 1152 QemuUUID uuid; 1153 uint32_t flags; 1154 uint16_t offset; 1155 uint8_t version; 1156 uint8_t rsvd[9]; 1157 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1158 1159 /* Set Feature : Payload in flags */ 1160 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1161 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1162 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1163 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1164 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1165 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1166 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1167 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1168 }; 1169 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1170 1171 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1172 static const QemuUUID patrol_scrub_uuid = { 1173 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1174 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1175 }; 1176 1177 typedef struct CXLMemPatrolScrubSetFeature { 1178 CXLSetFeatureInHeader hdr; 1179 CXLMemPatrolScrubWriteAttrs feat_data; 1180 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1181 1182 /* 1183 * CXL r3.1 section 8.2.9.9.11.2: 1184 * DDR5 Error Check Scrub (ECS) Control Feature 1185 */ 1186 static const QemuUUID ecs_uuid = { 1187 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1188 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1189 }; 1190 1191 typedef struct CXLMemECSSetFeature { 1192 CXLSetFeatureInHeader hdr; 1193 CXLMemECSWriteAttrs feat_data[]; 1194 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1195 1196 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1197 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1198 uint8_t *payload_in, 1199 size_t len_in, 1200 uint8_t *payload_out, 1201 size_t *len_out, 1202 CXLCCI *cci) 1203 { 1204 struct { 1205 uint32_t count; 1206 uint16_t start_index; 1207 uint16_t reserved; 1208 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1209 1210 struct { 1211 CXLSupportedFeatureHeader hdr; 1212 CXLSupportedFeatureEntry feat_entries[]; 1213 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1214 uint16_t index, req_entries; 1215 uint16_t entry; 1216 1217 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1218 return CXL_MBOX_UNSUPPORTED; 1219 } 1220 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1221 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1222 return CXL_MBOX_INVALID_INPUT; 1223 } 1224 1225 req_entries = (get_feats_in->count - 1226 sizeof(CXLSupportedFeatureHeader)) / 1227 sizeof(CXLSupportedFeatureEntry); 1228 req_entries = MIN(req_entries, 1229 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1230 1231 for (entry = 0, index = get_feats_in->start_index; 1232 entry < req_entries; index++) { 1233 switch (index) { 1234 case CXL_FEATURE_PATROL_SCRUB: 1235 /* Fill supported feature entry for device patrol scrub control */ 1236 get_feats_out->feat_entries[entry++] = 1237 (struct CXLSupportedFeatureEntry) { 1238 .uuid = patrol_scrub_uuid, 1239 .feat_index = index, 1240 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1241 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1242 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1243 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1244 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1245 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1246 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1247 }; 1248 break; 1249 case CXL_FEATURE_ECS: 1250 /* Fill supported feature entry for device DDR5 ECS control */ 1251 get_feats_out->feat_entries[entry++] = 1252 (struct CXLSupportedFeatureEntry) { 1253 .uuid = ecs_uuid, 1254 .feat_index = index, 1255 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1256 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1257 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1258 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1259 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1260 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1261 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1262 }; 1263 break; 1264 default: 1265 __builtin_unreachable(); 1266 } 1267 } 1268 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1269 get_feats_out->hdr.entries = req_entries; 1270 *len_out = sizeof(CXLSupportedFeatureHeader) + 1271 req_entries * sizeof(CXLSupportedFeatureEntry); 1272 1273 return CXL_MBOX_SUCCESS; 1274 } 1275 1276 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1277 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1278 uint8_t *payload_in, 1279 size_t len_in, 1280 uint8_t *payload_out, 1281 size_t *len_out, 1282 CXLCCI *cci) 1283 { 1284 struct { 1285 QemuUUID uuid; 1286 uint16_t offset; 1287 uint16_t count; 1288 uint8_t selection; 1289 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1290 uint16_t bytes_to_copy = 0; 1291 CXLType3Dev *ct3d; 1292 CXLSetFeatureInfo *set_feat_info; 1293 1294 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1295 return CXL_MBOX_UNSUPPORTED; 1296 } 1297 1298 ct3d = CXL_TYPE3(cci->d); 1299 get_feature = (void *)payload_in; 1300 1301 set_feat_info = &ct3d->set_feat_info; 1302 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1303 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1304 } 1305 1306 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1307 return CXL_MBOX_UNSUPPORTED; 1308 } 1309 if (get_feature->offset + get_feature->count > cci->payload_max) { 1310 return CXL_MBOX_INVALID_INPUT; 1311 } 1312 1313 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1314 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1315 return CXL_MBOX_INVALID_INPUT; 1316 } 1317 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1318 get_feature->offset; 1319 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1320 memcpy(payload_out, 1321 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1322 bytes_to_copy); 1323 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1324 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1325 return CXL_MBOX_INVALID_INPUT; 1326 } 1327 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1328 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1329 memcpy(payload_out, 1330 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1331 bytes_to_copy); 1332 } else { 1333 return CXL_MBOX_UNSUPPORTED; 1334 } 1335 1336 *len_out = bytes_to_copy; 1337 1338 return CXL_MBOX_SUCCESS; 1339 } 1340 1341 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1342 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1343 uint8_t *payload_in, 1344 size_t len_in, 1345 uint8_t *payload_out, 1346 size_t *len_out, 1347 CXLCCI *cci) 1348 { 1349 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1350 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1351 CXLMemPatrolScrubSetFeature *ps_set_feature; 1352 CXLMemECSWriteAttrs *ecs_write_attrs; 1353 CXLMemECSSetFeature *ecs_set_feature; 1354 CXLSetFeatureInfo *set_feat_info; 1355 uint16_t bytes_to_copy = 0; 1356 uint8_t data_transfer_flag; 1357 CXLType3Dev *ct3d; 1358 uint16_t count; 1359 1360 if (len_in < sizeof(*hdr)) { 1361 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1362 } 1363 1364 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1365 return CXL_MBOX_UNSUPPORTED; 1366 } 1367 ct3d = CXL_TYPE3(cci->d); 1368 set_feat_info = &ct3d->set_feat_info; 1369 1370 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1371 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1372 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1373 } 1374 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1375 set_feat_info->data_saved_across_reset = true; 1376 } else { 1377 set_feat_info->data_saved_across_reset = false; 1378 } 1379 1380 data_transfer_flag = 1381 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1382 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1383 set_feat_info->uuid = hdr->uuid; 1384 set_feat_info->data_size = 0; 1385 } 1386 set_feat_info->data_transfer_flag = data_transfer_flag; 1387 set_feat_info->data_offset = hdr->offset; 1388 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1389 1390 if (bytes_to_copy == 0) { 1391 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1392 } 1393 1394 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1395 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1396 return CXL_MBOX_UNSUPPORTED; 1397 } 1398 1399 ps_set_feature = (void *)payload_in; 1400 ps_write_attrs = &ps_set_feature->feat_data; 1401 1402 if ((uint32_t)hdr->offset + bytes_to_copy > 1403 sizeof(ct3d->patrol_scrub_wr_attrs)) { 1404 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1405 } 1406 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1407 ps_write_attrs, 1408 bytes_to_copy); 1409 set_feat_info->data_size += bytes_to_copy; 1410 1411 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1412 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1413 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1414 ct3d->patrol_scrub_attrs.scrub_cycle |= 1415 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1416 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1417 ct3d->patrol_scrub_attrs.scrub_flags |= 1418 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1419 } 1420 } else if (qemu_uuid_is_equal(&hdr->uuid, 1421 &ecs_uuid)) { 1422 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1423 return CXL_MBOX_UNSUPPORTED; 1424 } 1425 1426 ecs_set_feature = (void *)payload_in; 1427 ecs_write_attrs = ecs_set_feature->feat_data; 1428 1429 if ((uint32_t)hdr->offset + bytes_to_copy > 1430 sizeof(ct3d->ecs_wr_attrs)) { 1431 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1432 } 1433 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1434 ecs_write_attrs, 1435 bytes_to_copy); 1436 set_feat_info->data_size += bytes_to_copy; 1437 1438 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1439 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1440 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1441 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1442 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1443 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1444 } 1445 } 1446 } else { 1447 return CXL_MBOX_UNSUPPORTED; 1448 } 1449 1450 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1451 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1452 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1453 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1454 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1455 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1456 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1457 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1458 } 1459 set_feat_info->data_transfer_flag = 0; 1460 set_feat_info->data_saved_across_reset = false; 1461 set_feat_info->data_offset = 0; 1462 set_feat_info->data_size = 0; 1463 } 1464 1465 return CXL_MBOX_SUCCESS; 1466 } 1467 1468 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1469 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1470 uint8_t *payload_in, 1471 size_t len_in, 1472 uint8_t *payload_out, 1473 size_t *len_out, 1474 CXLCCI *cci) 1475 { 1476 struct { 1477 char fw_revision[0x10]; 1478 uint64_t total_capacity; 1479 uint64_t volatile_capacity; 1480 uint64_t persistent_capacity; 1481 uint64_t partition_align; 1482 uint16_t info_event_log_size; 1483 uint16_t warning_event_log_size; 1484 uint16_t failure_event_log_size; 1485 uint16_t fatal_event_log_size; 1486 uint32_t lsa_size; 1487 uint8_t poison_list_max_mer[3]; 1488 uint16_t inject_poison_limit; 1489 uint8_t poison_caps; 1490 uint8_t qos_telemetry_caps; 1491 uint16_t dc_event_log_size; 1492 } QEMU_PACKED *id; 1493 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1494 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1495 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1496 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1497 1498 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1499 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1500 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1501 return CXL_MBOX_INTERNAL_ERROR; 1502 } 1503 1504 id = (void *)payload_out; 1505 1506 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1507 1508 stq_le_p(&id->total_capacity, 1509 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1510 stq_le_p(&id->persistent_capacity, 1511 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1512 stq_le_p(&id->volatile_capacity, 1513 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1514 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1515 /* 256 poison records */ 1516 st24_le_p(id->poison_list_max_mer, 256); 1517 /* No limit - so limited by main poison record limit */ 1518 stw_le_p(&id->inject_poison_limit, 0); 1519 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1520 1521 *len_out = sizeof(*id); 1522 return CXL_MBOX_SUCCESS; 1523 } 1524 1525 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1526 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1527 uint8_t *payload_in, 1528 size_t len_in, 1529 uint8_t *payload_out, 1530 size_t *len_out, 1531 CXLCCI *cci) 1532 { 1533 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1534 struct { 1535 uint64_t active_vmem; 1536 uint64_t active_pmem; 1537 uint64_t next_vmem; 1538 uint64_t next_pmem; 1539 } QEMU_PACKED *part_info = (void *)payload_out; 1540 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1541 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1542 1543 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1544 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1545 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1546 return CXL_MBOX_INTERNAL_ERROR; 1547 } 1548 1549 stq_le_p(&part_info->active_vmem, 1550 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1551 /* 1552 * When both next_vmem and next_pmem are 0, there is no pending change to 1553 * partitioning. 1554 */ 1555 stq_le_p(&part_info->next_vmem, 0); 1556 stq_le_p(&part_info->active_pmem, 1557 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1558 stq_le_p(&part_info->next_pmem, 0); 1559 1560 *len_out = sizeof(*part_info); 1561 return CXL_MBOX_SUCCESS; 1562 } 1563 1564 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1565 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1566 uint8_t *payload_in, 1567 size_t len_in, 1568 uint8_t *payload_out, 1569 size_t *len_out, 1570 CXLCCI *cci) 1571 { 1572 struct { 1573 uint32_t offset; 1574 uint32_t length; 1575 } QEMU_PACKED *get_lsa; 1576 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1577 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1578 uint64_t offset, length; 1579 1580 get_lsa = (void *)payload_in; 1581 offset = get_lsa->offset; 1582 length = get_lsa->length; 1583 1584 if (offset + length > cvc->get_lsa_size(ct3d)) { 1585 *len_out = 0; 1586 return CXL_MBOX_INVALID_INPUT; 1587 } 1588 1589 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1590 return CXL_MBOX_SUCCESS; 1591 } 1592 1593 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1594 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1595 uint8_t *payload_in, 1596 size_t len_in, 1597 uint8_t *payload_out, 1598 size_t *len_out, 1599 CXLCCI *cci) 1600 { 1601 struct set_lsa_pl { 1602 uint32_t offset; 1603 uint32_t rsvd; 1604 uint8_t data[]; 1605 } QEMU_PACKED; 1606 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1607 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1608 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1609 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1610 1611 *len_out = 0; 1612 if (len_in < hdr_len) { 1613 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1614 } 1615 1616 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1617 return CXL_MBOX_INVALID_INPUT; 1618 } 1619 len_in -= hdr_len; 1620 1621 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1622 return CXL_MBOX_SUCCESS; 1623 } 1624 1625 /* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */ 1626 static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd, 1627 uint8_t *payload_in, 1628 size_t len_in, 1629 uint8_t *payload_out, 1630 size_t *len_out, 1631 CXLCCI *cci) 1632 { 1633 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1634 CXLAlertConfig *out = (CXLAlertConfig *)payload_out; 1635 1636 memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config)); 1637 *len_out = sizeof(ct3d->alert_config); 1638 1639 return CXL_MBOX_SUCCESS; 1640 } 1641 1642 /* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */ 1643 static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd, 1644 uint8_t *payload_in, 1645 size_t len_in, 1646 uint8_t *payload_out, 1647 size_t *len_out, 1648 CXLCCI *cci) 1649 { 1650 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1651 CXLAlertConfig *alert_config = &ct3d->alert_config; 1652 struct { 1653 uint8_t valid_alert_actions; 1654 uint8_t enable_alert_actions; 1655 uint8_t life_used_warn_thresh; 1656 uint8_t rsvd; 1657 uint16_t over_temp_warn_thresh; 1658 uint16_t under_temp_warn_thresh; 1659 uint16_t cor_vmem_err_warn_thresh; 1660 uint16_t cor_pmem_err_warn_thresh; 1661 } QEMU_PACKED *in = (void *)payload_in; 1662 1663 if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) { 1664 /* 1665 * CXL r3.2 Table 8-149 The life used warning threshold shall be 1666 * less than the life used critical alert value. 1667 */ 1668 if (in->life_used_warn_thresh >= 1669 alert_config->life_used_crit_alert_thresh) { 1670 return CXL_MBOX_INVALID_INPUT; 1671 } 1672 alert_config->life_used_warn_thresh = in->life_used_warn_thresh; 1673 alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH; 1674 } 1675 1676 if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) { 1677 /* 1678 * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold 1679 * shall be less than the the Device Over-Temperature Critical 1680 * Alert Threshold. 1681 */ 1682 if (in->over_temp_warn_thresh >= 1683 alert_config->over_temp_crit_alert_thresh) { 1684 return CXL_MBOX_INVALID_INPUT; 1685 } 1686 alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh; 1687 alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH; 1688 } 1689 1690 if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) { 1691 /* 1692 * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold 1693 * shall be higher than the the Device Under-Temperature Critical 1694 * Alert Threshold. 1695 */ 1696 if (in->under_temp_warn_thresh <= 1697 alert_config->under_temp_crit_alert_thresh) { 1698 return CXL_MBOX_INVALID_INPUT; 1699 } 1700 alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh; 1701 alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH; 1702 } 1703 1704 if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) { 1705 alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh; 1706 alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH; 1707 } 1708 1709 if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) { 1710 alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh; 1711 alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH; 1712 } 1713 return CXL_MBOX_SUCCESS; 1714 } 1715 1716 /* Perform the actual device zeroing */ 1717 static void __do_sanitization(CXLType3Dev *ct3d) 1718 { 1719 MemoryRegion *mr; 1720 1721 if (ct3d->hostvmem) { 1722 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1723 if (mr) { 1724 void *hostmem = memory_region_get_ram_ptr(mr); 1725 memset(hostmem, 0, memory_region_size(mr)); 1726 } 1727 } 1728 1729 if (ct3d->hostpmem) { 1730 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1731 if (mr) { 1732 void *hostmem = memory_region_get_ram_ptr(mr); 1733 memset(hostmem, 0, memory_region_size(mr)); 1734 } 1735 } 1736 if (ct3d->lsa) { 1737 mr = host_memory_backend_get_memory(ct3d->lsa); 1738 if (mr) { 1739 void *lsa = memory_region_get_ram_ptr(mr); 1740 memset(lsa, 0, memory_region_size(mr)); 1741 } 1742 } 1743 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1744 } 1745 1746 static int get_sanitize_duration(uint64_t total_mem) 1747 { 1748 int secs = 0; 1749 1750 if (total_mem <= 512) { 1751 secs = 4; 1752 } else if (total_mem <= 1024) { 1753 secs = 8; 1754 } else if (total_mem <= 2 * 1024) { 1755 secs = 15; 1756 } else if (total_mem <= 4 * 1024) { 1757 secs = 30; 1758 } else if (total_mem <= 8 * 1024) { 1759 secs = 60; 1760 } else if (total_mem <= 16 * 1024) { 1761 secs = 2 * 60; 1762 } else if (total_mem <= 32 * 1024) { 1763 secs = 4 * 60; 1764 } else if (total_mem <= 64 * 1024) { 1765 secs = 8 * 60; 1766 } else if (total_mem <= 128 * 1024) { 1767 secs = 15 * 60; 1768 } else if (total_mem <= 256 * 1024) { 1769 secs = 30 * 60; 1770 } else if (total_mem <= 512 * 1024) { 1771 secs = 60 * 60; 1772 } else if (total_mem <= 1024 * 1024) { 1773 secs = 120 * 60; 1774 } else { 1775 secs = 240 * 60; /* max 4 hrs */ 1776 } 1777 1778 return secs; 1779 } 1780 1781 /* 1782 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1783 * 1784 * Once the Sanitize command has started successfully, the device shall be 1785 * placed in the media disabled state. If the command fails or is interrupted 1786 * by a reset or power failure, it shall remain in the media disabled state 1787 * until a successful Sanitize command has been completed. During this state: 1788 * 1789 * 1. Memory writes to the device will have no effect, and all memory reads 1790 * will return random values (no user data returned, even for locations that 1791 * the failed Sanitize operation didn’t sanitize yet). 1792 * 1793 * 2. Mailbox commands shall still be processed in the disabled state, except 1794 * that commands that access Sanitized areas shall fail with the Media Disabled 1795 * error code. 1796 */ 1797 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1798 uint8_t *payload_in, 1799 size_t len_in, 1800 uint8_t *payload_out, 1801 size_t *len_out, 1802 CXLCCI *cci) 1803 { 1804 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1805 uint64_t total_mem; /* in Mb */ 1806 int secs; 1807 1808 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1809 secs = get_sanitize_duration(total_mem); 1810 1811 /* EBUSY other bg cmds as of now */ 1812 cci->bg.runtime = secs * 1000UL; 1813 *len_out = 0; 1814 1815 cxl_dev_disable_media(&ct3d->cxl_dstate); 1816 1817 /* sanitize when done */ 1818 return CXL_MBOX_BG_STARTED; 1819 } 1820 1821 struct dpa_range_list_entry { 1822 uint64_t starting_dpa; 1823 uint64_t length; 1824 } QEMU_PACKED; 1825 1826 struct CXLSanitizeInfo { 1827 uint32_t dpa_range_count; 1828 uint8_t fill_value; 1829 struct dpa_range_list_entry dpa_range_list[]; 1830 } QEMU_PACKED; 1831 1832 static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr) 1833 { 1834 MemoryRegion *mr; 1835 if (ct3d->hostvmem) { 1836 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1837 if (vmr) { 1838 *vmr = mr; 1839 } 1840 return memory_region_size(mr); 1841 } 1842 return 0; 1843 } 1844 1845 static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr) 1846 { 1847 MemoryRegion *mr; 1848 if (ct3d->hostpmem) { 1849 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1850 if (pmr) { 1851 *pmr = mr; 1852 } 1853 return memory_region_size(mr); 1854 } 1855 return 0; 1856 } 1857 1858 static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr) 1859 { 1860 MemoryRegion *mr; 1861 if (ct3d->dc.host_dc) { 1862 mr = host_memory_backend_get_memory(ct3d->dc.host_dc); 1863 if (dc_mr) { 1864 *dc_mr = mr; 1865 } 1866 return memory_region_size(mr); 1867 } 1868 return 0; 1869 } 1870 1871 static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr, 1872 size_t length) 1873 { 1874 uint64_t vmr_size, pmr_size, dc_size; 1875 1876 if ((dpa_addr % CXL_CACHE_LINE_SIZE) || 1877 (length % CXL_CACHE_LINE_SIZE) || 1878 (length <= 0)) { 1879 return -EINVAL; 1880 } 1881 1882 vmr_size = get_vmr_size(ct3d, NULL); 1883 pmr_size = get_pmr_size(ct3d, NULL); 1884 dc_size = get_dc_size(ct3d, NULL); 1885 1886 if (dpa_addr + length > vmr_size + pmr_size + dc_size) { 1887 return -EINVAL; 1888 } 1889 1890 if (dpa_addr > vmr_size + pmr_size) { 1891 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1892 return -ENODEV; 1893 } 1894 } 1895 1896 return 0; 1897 } 1898 1899 static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length, 1900 uint8_t fill_value) 1901 { 1902 1903 uint64_t vmr_size, pmr_size; 1904 AddressSpace *as = NULL; 1905 MemTxAttrs mem_attrs = {}; 1906 1907 vmr_size = get_vmr_size(ct3d, NULL); 1908 pmr_size = get_pmr_size(ct3d, NULL); 1909 1910 if (dpa_addr < vmr_size) { 1911 as = &ct3d->hostvmem_as; 1912 } else if (dpa_addr < vmr_size + pmr_size) { 1913 as = &ct3d->hostpmem_as; 1914 } else { 1915 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1916 return -ENODEV; 1917 } 1918 as = &ct3d->dc.host_dc_as; 1919 } 1920 1921 return address_space_set(as, dpa_addr, fill_value, length, mem_attrs); 1922 } 1923 1924 /* Perform the actual device zeroing */ 1925 static void __do_sanitize(CXLType3Dev *ct3d) 1926 { 1927 struct CXLSanitizeInfo *san_info = ct3d->media_op_sanitize; 1928 int dpa_range_count = san_info->dpa_range_count; 1929 int rc = 0; 1930 int i; 1931 1932 for (i = 0; i < dpa_range_count; i++) { 1933 rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa, 1934 san_info->dpa_range_list[i].length, 1935 san_info->fill_value); 1936 if (rc) { 1937 goto exit; 1938 } 1939 } 1940 exit: 1941 g_free(ct3d->media_op_sanitize); 1942 ct3d->media_op_sanitize = NULL; 1943 return; 1944 } 1945 1946 enum { 1947 MEDIA_OP_CLASS_GENERAL = 0x0, 1948 #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0 1949 MEDIA_OP_CLASS_SANITIZE = 0x1, 1950 #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0 1951 #define MEDIA_OP_SAN_SUBC_ZERO 0x1 1952 }; 1953 1954 struct media_op_supported_list_entry { 1955 uint8_t media_op_class; 1956 uint8_t media_op_subclass; 1957 }; 1958 1959 struct media_op_discovery_out_pl { 1960 uint64_t dpa_range_granularity; 1961 uint16_t total_supported_operations; 1962 uint16_t num_of_supported_operations; 1963 struct media_op_supported_list_entry entry[]; 1964 } QEMU_PACKED; 1965 1966 static const struct media_op_supported_list_entry media_op_matrix[] = { 1967 { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY }, 1968 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE }, 1969 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO }, 1970 }; 1971 1972 static CXLRetCode media_operations_discovery(uint8_t *payload_in, 1973 size_t len_in, 1974 uint8_t *payload_out, 1975 size_t *len_out) 1976 { 1977 struct { 1978 uint8_t media_operation_class; 1979 uint8_t media_operation_subclass; 1980 uint8_t rsvd[2]; 1981 uint32_t dpa_range_count; 1982 struct { 1983 uint16_t start_index; 1984 uint16_t num_ops; 1985 } discovery_osa; 1986 } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in; 1987 struct media_op_discovery_out_pl *media_out_pl = 1988 (struct media_op_discovery_out_pl *)payload_out; 1989 int num_ops, start_index, i; 1990 int count = 0; 1991 1992 if (len_in < sizeof(*media_op_in_disc_pl)) { 1993 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1994 } 1995 1996 num_ops = media_op_in_disc_pl->discovery_osa.num_ops; 1997 start_index = media_op_in_disc_pl->discovery_osa.start_index; 1998 1999 /* 2000 * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and 2001 * start index should not exceed the total number of entries for discovery 2002 * sub class command. 2003 */ 2004 if (media_op_in_disc_pl->dpa_range_count || 2005 start_index > ARRAY_SIZE(media_op_matrix)) { 2006 return CXL_MBOX_INVALID_INPUT; 2007 } 2008 2009 media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE; 2010 media_out_pl->total_supported_operations = 2011 ARRAY_SIZE(media_op_matrix); 2012 if (num_ops > 0) { 2013 for (i = start_index; i < start_index + num_ops; i++) { 2014 media_out_pl->entry[count].media_op_class = 2015 media_op_matrix[i].media_op_class; 2016 media_out_pl->entry[count].media_op_subclass = 2017 media_op_matrix[i].media_op_subclass; 2018 count++; 2019 if (count == num_ops) { 2020 break; 2021 } 2022 } 2023 } 2024 2025 media_out_pl->num_of_supported_operations = count; 2026 *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry); 2027 return CXL_MBOX_SUCCESS; 2028 } 2029 2030 static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d, 2031 uint8_t *payload_in, 2032 size_t len_in, 2033 uint8_t *payload_out, 2034 size_t *len_out, 2035 uint8_t fill_value, 2036 CXLCCI *cci) 2037 { 2038 struct media_operations_sanitize { 2039 uint8_t media_operation_class; 2040 uint8_t media_operation_subclass; 2041 uint8_t rsvd[2]; 2042 uint32_t dpa_range_count; 2043 struct dpa_range_list_entry dpa_range_list[]; 2044 } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in; 2045 uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count; 2046 uint64_t total_mem = 0; 2047 size_t dpa_range_list_size; 2048 int secs = 0, i; 2049 2050 if (dpa_range_count == 0) { 2051 return CXL_MBOX_SUCCESS; 2052 } 2053 2054 dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry); 2055 if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) { 2056 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2057 } 2058 2059 for (i = 0; i < dpa_range_count; i++) { 2060 uint64_t start_dpa = 2061 media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa; 2062 uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length; 2063 2064 if (validate_dpa_addr(ct3d, start_dpa, length)) { 2065 return CXL_MBOX_INVALID_INPUT; 2066 } 2067 total_mem += length; 2068 } 2069 ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) + 2070 dpa_range_list_size); 2071 2072 ct3d->media_op_sanitize->dpa_range_count = dpa_range_count; 2073 ct3d->media_op_sanitize->fill_value = fill_value; 2074 memcpy(ct3d->media_op_sanitize->dpa_range_list, 2075 media_op_in_sanitize_pl->dpa_range_list, 2076 dpa_range_list_size); 2077 secs = get_sanitize_duration(total_mem >> 20); 2078 2079 /* EBUSY other bg cmds as of now */ 2080 cci->bg.runtime = secs * 1000UL; 2081 *len_out = 0; 2082 /* 2083 * media op sanitize is targeted so no need to disable media or 2084 * clear event logs 2085 */ 2086 return CXL_MBOX_BG_STARTED; 2087 } 2088 2089 static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd, 2090 uint8_t *payload_in, 2091 size_t len_in, 2092 uint8_t *payload_out, 2093 size_t *len_out, 2094 CXLCCI *cci) 2095 { 2096 struct { 2097 uint8_t media_operation_class; 2098 uint8_t media_operation_subclass; 2099 uint8_t rsvd[2]; 2100 uint32_t dpa_range_count; 2101 } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in; 2102 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2103 uint8_t media_op_cl = 0; 2104 uint8_t media_op_subclass = 0; 2105 2106 if (len_in < sizeof(*media_op_in_common_pl)) { 2107 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2108 } 2109 2110 media_op_cl = media_op_in_common_pl->media_operation_class; 2111 media_op_subclass = media_op_in_common_pl->media_operation_subclass; 2112 2113 switch (media_op_cl) { 2114 case MEDIA_OP_CLASS_GENERAL: 2115 if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) { 2116 return CXL_MBOX_UNSUPPORTED; 2117 } 2118 2119 return media_operations_discovery(payload_in, len_in, payload_out, 2120 len_out); 2121 case MEDIA_OP_CLASS_SANITIZE: 2122 switch (media_op_subclass) { 2123 case MEDIA_OP_SAN_SUBC_SANITIZE: 2124 return media_operations_sanitize(ct3d, payload_in, len_in, 2125 payload_out, len_out, 0xF, 2126 cci); 2127 case MEDIA_OP_SAN_SUBC_ZERO: 2128 return media_operations_sanitize(ct3d, payload_in, len_in, 2129 payload_out, len_out, 0, 2130 cci); 2131 default: 2132 return CXL_MBOX_UNSUPPORTED; 2133 } 2134 default: 2135 return CXL_MBOX_UNSUPPORTED; 2136 } 2137 } 2138 2139 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 2140 uint8_t *payload_in, 2141 size_t len_in, 2142 uint8_t *payload_out, 2143 size_t *len_out, 2144 CXLCCI *cci) 2145 { 2146 uint32_t *state = (uint32_t *)payload_out; 2147 2148 *state = 0; 2149 *len_out = 4; 2150 return CXL_MBOX_SUCCESS; 2151 } 2152 2153 /* 2154 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 2155 * 2156 * This is very inefficient, but good enough for now! 2157 * Also the payload will always fit, so no need to handle the MORE flag and 2158 * make this stateful. We may want to allow longer poison lists to aid 2159 * testing that kernel functionality. 2160 */ 2161 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 2162 uint8_t *payload_in, 2163 size_t len_in, 2164 uint8_t *payload_out, 2165 size_t *len_out, 2166 CXLCCI *cci) 2167 { 2168 struct get_poison_list_pl { 2169 uint64_t pa; 2170 uint64_t length; 2171 } QEMU_PACKED; 2172 2173 struct get_poison_list_out_pl { 2174 uint8_t flags; 2175 uint8_t rsvd1; 2176 uint64_t overflow_timestamp; 2177 uint16_t count; 2178 uint8_t rsvd2[0x14]; 2179 struct { 2180 uint64_t addr; 2181 uint32_t length; 2182 uint32_t resv; 2183 } QEMU_PACKED records[]; 2184 } QEMU_PACKED; 2185 2186 struct get_poison_list_pl *in = (void *)payload_in; 2187 struct get_poison_list_out_pl *out = (void *)payload_out; 2188 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2189 uint16_t record_count = 0, i = 0; 2190 uint64_t query_start, query_length; 2191 CXLPoisonList *poison_list = &ct3d->poison_list; 2192 CXLPoison *ent; 2193 uint16_t out_pl_len; 2194 2195 query_start = ldq_le_p(&in->pa); 2196 /* 64 byte alignment required */ 2197 if (query_start & 0x3f) { 2198 return CXL_MBOX_INVALID_INPUT; 2199 } 2200 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2201 2202 QLIST_FOREACH(ent, poison_list, node) { 2203 /* Check for no overlap */ 2204 if (!ranges_overlap(ent->start, ent->length, 2205 query_start, query_length)) { 2206 continue; 2207 } 2208 record_count++; 2209 } 2210 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2211 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2212 2213 QLIST_FOREACH(ent, poison_list, node) { 2214 uint64_t start, stop; 2215 2216 /* Check for no overlap */ 2217 if (!ranges_overlap(ent->start, ent->length, 2218 query_start, query_length)) { 2219 continue; 2220 } 2221 2222 /* Deal with overlap */ 2223 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 2224 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 2225 query_start + query_length); 2226 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 2227 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2228 i++; 2229 } 2230 if (ct3d->poison_list_overflowed) { 2231 out->flags = (1 << 1); 2232 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 2233 } 2234 if (scan_media_running(cci)) { 2235 out->flags |= (1 << 2); 2236 } 2237 2238 stw_le_p(&out->count, record_count); 2239 *len_out = out_pl_len; 2240 return CXL_MBOX_SUCCESS; 2241 } 2242 2243 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 2244 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 2245 uint8_t *payload_in, 2246 size_t len_in, 2247 uint8_t *payload_out, 2248 size_t *len_out, 2249 CXLCCI *cci) 2250 { 2251 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2252 CXLPoisonList *poison_list = &ct3d->poison_list; 2253 CXLPoison *ent; 2254 struct inject_poison_pl { 2255 uint64_t dpa; 2256 }; 2257 struct inject_poison_pl *in = (void *)payload_in; 2258 uint64_t dpa = ldq_le_p(&in->dpa); 2259 CXLPoison *p; 2260 2261 QLIST_FOREACH(ent, poison_list, node) { 2262 if (dpa >= ent->start && 2263 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 2264 return CXL_MBOX_SUCCESS; 2265 } 2266 } 2267 /* 2268 * Freeze the list if there is an on-going scan media operation. 2269 */ 2270 if (scan_media_running(cci)) { 2271 /* 2272 * XXX: Spec is ambiguous - is this case considered 2273 * a successful return despite not adding to the list? 2274 */ 2275 goto success; 2276 } 2277 2278 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2279 return CXL_MBOX_INJECT_POISON_LIMIT; 2280 } 2281 p = g_new0(CXLPoison, 1); 2282 2283 p->length = CXL_CACHE_LINE_SIZE; 2284 p->start = dpa; 2285 p->type = CXL_POISON_TYPE_INJECTED; 2286 2287 /* 2288 * Possible todo: Merge with existing entry if next to it and if same type 2289 */ 2290 QLIST_INSERT_HEAD(poison_list, p, node); 2291 ct3d->poison_list_cnt++; 2292 success: 2293 *len_out = 0; 2294 2295 return CXL_MBOX_SUCCESS; 2296 } 2297 2298 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 2299 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 2300 uint8_t *payload_in, 2301 size_t len_in, 2302 uint8_t *payload_out, 2303 size_t *len_out, 2304 CXLCCI *cci) 2305 { 2306 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2307 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2308 CXLPoisonList *poison_list = &ct3d->poison_list; 2309 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 2310 struct clear_poison_pl { 2311 uint64_t dpa; 2312 uint8_t data[64]; 2313 }; 2314 CXLPoison *ent; 2315 uint64_t dpa; 2316 2317 struct clear_poison_pl *in = (void *)payload_in; 2318 2319 dpa = ldq_le_p(&in->dpa); 2320 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 2321 ct3d->dc.total_capacity) { 2322 return CXL_MBOX_INVALID_PA; 2323 } 2324 2325 /* Clearing a region with no poison is not an error so always do so */ 2326 if (cvc->set_cacheline) { 2327 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 2328 return CXL_MBOX_INTERNAL_ERROR; 2329 } 2330 } 2331 2332 /* 2333 * Freeze the list if there is an on-going scan media operation. 2334 */ 2335 if (scan_media_running(cci)) { 2336 /* 2337 * XXX: Spec is ambiguous - is this case considered 2338 * a successful return despite not removing from the list? 2339 */ 2340 goto success; 2341 } 2342 2343 QLIST_FOREACH(ent, poison_list, node) { 2344 /* 2345 * Test for contained in entry. Simpler than general case 2346 * as clearing 64 bytes and entries 64 byte aligned 2347 */ 2348 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 2349 break; 2350 } 2351 } 2352 if (!ent) { 2353 goto success; 2354 } 2355 2356 QLIST_REMOVE(ent, node); 2357 ct3d->poison_list_cnt--; 2358 2359 if (dpa > ent->start) { 2360 CXLPoison *frag; 2361 /* Cannot overflow as replacing existing entry */ 2362 2363 frag = g_new0(CXLPoison, 1); 2364 2365 frag->start = ent->start; 2366 frag->length = dpa - ent->start; 2367 frag->type = ent->type; 2368 2369 QLIST_INSERT_HEAD(poison_list, frag, node); 2370 ct3d->poison_list_cnt++; 2371 } 2372 2373 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 2374 CXLPoison *frag; 2375 2376 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2377 cxl_set_poison_list_overflowed(ct3d); 2378 } else { 2379 frag = g_new0(CXLPoison, 1); 2380 2381 frag->start = dpa + CXL_CACHE_LINE_SIZE; 2382 frag->length = ent->start + ent->length - frag->start; 2383 frag->type = ent->type; 2384 QLIST_INSERT_HEAD(poison_list, frag, node); 2385 ct3d->poison_list_cnt++; 2386 } 2387 } 2388 /* Any fragments have been added, free original entry */ 2389 g_free(ent); 2390 success: 2391 *len_out = 0; 2392 2393 return CXL_MBOX_SUCCESS; 2394 } 2395 2396 /* 2397 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 2398 */ 2399 static CXLRetCode 2400 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 2401 uint8_t *payload_in, 2402 size_t len_in, 2403 uint8_t *payload_out, 2404 size_t *len_out, 2405 CXLCCI *cci) 2406 { 2407 struct get_scan_media_capabilities_pl { 2408 uint64_t pa; 2409 uint64_t length; 2410 } QEMU_PACKED; 2411 2412 struct get_scan_media_capabilities_out_pl { 2413 uint32_t estimated_runtime_ms; 2414 }; 2415 2416 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2417 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2418 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 2419 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 2420 uint64_t query_start; 2421 uint64_t query_length; 2422 2423 query_start = ldq_le_p(&in->pa); 2424 /* 64 byte alignment required */ 2425 if (query_start & 0x3f) { 2426 return CXL_MBOX_INVALID_INPUT; 2427 } 2428 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2429 2430 if (query_start + query_length > cxl_dstate->static_mem_size) { 2431 return CXL_MBOX_INVALID_PA; 2432 } 2433 2434 /* 2435 * Just use 400 nanosecond access/read latency + 100 ns for 2436 * the cost of updating the poison list. For small enough 2437 * chunks return at least 1 ms. 2438 */ 2439 stl_le_p(&out->estimated_runtime_ms, 2440 MAX(1, query_length * (0.0005L / 64))); 2441 2442 *len_out = sizeof(*out); 2443 return CXL_MBOX_SUCCESS; 2444 } 2445 2446 static void __do_scan_media(CXLType3Dev *ct3d) 2447 { 2448 CXLPoison *ent; 2449 unsigned int results_cnt = 0; 2450 2451 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 2452 results_cnt++; 2453 } 2454 2455 /* only scan media may clear the overflow */ 2456 if (ct3d->poison_list_overflowed && 2457 ct3d->poison_list_cnt == results_cnt) { 2458 cxl_clear_poison_list_overflowed(ct3d); 2459 } 2460 /* scan media has run since last conventional reset */ 2461 ct3d->scan_media_hasrun = true; 2462 } 2463 2464 /* 2465 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 2466 */ 2467 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 2468 uint8_t *payload_in, 2469 size_t len_in, 2470 uint8_t *payload_out, 2471 size_t *len_out, 2472 CXLCCI *cci) 2473 { 2474 struct scan_media_pl { 2475 uint64_t pa; 2476 uint64_t length; 2477 uint8_t flags; 2478 } QEMU_PACKED; 2479 2480 struct scan_media_pl *in = (void *)payload_in; 2481 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2482 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2483 uint64_t query_start; 2484 uint64_t query_length; 2485 CXLPoison *ent, *next; 2486 2487 query_start = ldq_le_p(&in->pa); 2488 /* 64 byte alignment required */ 2489 if (query_start & 0x3f) { 2490 return CXL_MBOX_INVALID_INPUT; 2491 } 2492 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2493 2494 if (query_start + query_length > cxl_dstate->static_mem_size) { 2495 return CXL_MBOX_INVALID_PA; 2496 } 2497 if (ct3d->dc.num_regions && query_start + query_length >= 2498 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 2499 return CXL_MBOX_INVALID_PA; 2500 } 2501 2502 if (in->flags == 0) { /* TODO */ 2503 qemu_log_mask(LOG_UNIMP, 2504 "Scan Media Event Log is unsupported\n"); 2505 } 2506 2507 /* any previous results are discarded upon a new Scan Media */ 2508 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 2509 QLIST_REMOVE(ent, node); 2510 g_free(ent); 2511 } 2512 2513 /* kill the poison list - it will be recreated */ 2514 if (ct3d->poison_list_overflowed) { 2515 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 2516 QLIST_REMOVE(ent, node); 2517 g_free(ent); 2518 ct3d->poison_list_cnt--; 2519 } 2520 } 2521 2522 /* 2523 * Scan the backup list and move corresponding entries 2524 * into the results list, updating the poison list 2525 * when possible. 2526 */ 2527 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 2528 CXLPoison *res; 2529 2530 if (ent->start >= query_start + query_length || 2531 ent->start + ent->length <= query_start) { 2532 continue; 2533 } 2534 2535 /* 2536 * If a Get Poison List cmd comes in while this 2537 * scan is being done, it will see the new complete 2538 * list, while setting the respective flag. 2539 */ 2540 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 2541 CXLPoison *p = g_new0(CXLPoison, 1); 2542 2543 p->start = ent->start; 2544 p->length = ent->length; 2545 p->type = ent->type; 2546 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 2547 ct3d->poison_list_cnt++; 2548 } 2549 2550 res = g_new0(CXLPoison, 1); 2551 res->start = ent->start; 2552 res->length = ent->length; 2553 res->type = ent->type; 2554 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2555 2556 QLIST_REMOVE(ent, node); 2557 g_free(ent); 2558 } 2559 2560 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2561 *len_out = 0; 2562 2563 return CXL_MBOX_BG_STARTED; 2564 } 2565 2566 /* 2567 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2568 */ 2569 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2570 uint8_t *payload_in, 2571 size_t len_in, 2572 uint8_t *payload_out, 2573 size_t *len_out, 2574 CXLCCI *cci) 2575 { 2576 struct get_scan_media_results_out_pl { 2577 uint64_t dpa_restart; 2578 uint64_t length; 2579 uint8_t flags; 2580 uint8_t rsvd1; 2581 uint16_t count; 2582 uint8_t rsvd2[0xc]; 2583 struct { 2584 uint64_t addr; 2585 uint32_t length; 2586 uint32_t resv; 2587 } QEMU_PACKED records[]; 2588 } QEMU_PACKED; 2589 2590 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2591 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2592 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2593 CXLPoison *ent, *next; 2594 uint16_t total_count = 0, record_count = 0, i = 0; 2595 uint16_t out_pl_len; 2596 2597 if (!ct3d->scan_media_hasrun) { 2598 return CXL_MBOX_UNSUPPORTED; 2599 } 2600 2601 /* 2602 * Calculate limits, all entries are within the same address range of the 2603 * last scan media call. 2604 */ 2605 QLIST_FOREACH(ent, scan_media_results, node) { 2606 size_t rec_size = record_count * sizeof(out->records[0]); 2607 2608 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2609 record_count++; 2610 } 2611 total_count++; 2612 } 2613 2614 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2615 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2616 2617 memset(out, 0, out_pl_len); 2618 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2619 uint64_t start, stop; 2620 2621 if (i == record_count) { 2622 break; 2623 } 2624 2625 start = ROUND_DOWN(ent->start, 64ull); 2626 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2627 stq_le_p(&out->records[i].addr, start); 2628 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2629 i++; 2630 2631 /* consume the returning entry */ 2632 QLIST_REMOVE(ent, node); 2633 g_free(ent); 2634 } 2635 2636 stw_le_p(&out->count, record_count); 2637 if (total_count > record_count) { 2638 out->flags = (1 << 0); /* More Media Error Records */ 2639 } 2640 2641 *len_out = out_pl_len; 2642 return CXL_MBOX_SUCCESS; 2643 } 2644 2645 /* 2646 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2647 * (Opcode: 4800h) 2648 */ 2649 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2650 uint8_t *payload_in, 2651 size_t len_in, 2652 uint8_t *payload_out, 2653 size_t *len_out, 2654 CXLCCI *cci) 2655 { 2656 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2657 struct { 2658 uint8_t region_cnt; 2659 uint8_t start_rid; 2660 } QEMU_PACKED *in = (void *)payload_in; 2661 struct { 2662 uint8_t num_regions; 2663 uint8_t regions_returned; 2664 uint8_t rsvd1[6]; 2665 struct { 2666 uint64_t base; 2667 uint64_t decode_len; 2668 uint64_t region_len; 2669 uint64_t block_size; 2670 uint32_t dsmadhandle; 2671 uint8_t flags; 2672 uint8_t rsvd2[3]; 2673 } QEMU_PACKED records[]; 2674 } QEMU_PACKED *out = (void *)payload_out; 2675 struct { 2676 uint32_t num_extents_supported; 2677 uint32_t num_extents_available; 2678 uint32_t num_tags_supported; 2679 uint32_t num_tags_available; 2680 } QEMU_PACKED *extra_out; 2681 uint16_t record_count; 2682 uint16_t i; 2683 uint16_t out_pl_len; 2684 uint8_t start_rid; 2685 2686 start_rid = in->start_rid; 2687 if (start_rid >= ct3d->dc.num_regions) { 2688 return CXL_MBOX_INVALID_INPUT; 2689 } 2690 2691 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2692 2693 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2694 extra_out = (void *)(payload_out + out_pl_len); 2695 out_pl_len += sizeof(*extra_out); 2696 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2697 2698 out->num_regions = ct3d->dc.num_regions; 2699 out->regions_returned = record_count; 2700 for (i = 0; i < record_count; i++) { 2701 stq_le_p(&out->records[i].base, 2702 ct3d->dc.regions[start_rid + i].base); 2703 stq_le_p(&out->records[i].decode_len, 2704 ct3d->dc.regions[start_rid + i].decode_len / 2705 CXL_CAPACITY_MULTIPLIER); 2706 stq_le_p(&out->records[i].region_len, 2707 ct3d->dc.regions[start_rid + i].len); 2708 stq_le_p(&out->records[i].block_size, 2709 ct3d->dc.regions[start_rid + i].block_size); 2710 stl_le_p(&out->records[i].dsmadhandle, 2711 ct3d->dc.regions[start_rid + i].dsmadhandle); 2712 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2713 } 2714 /* 2715 * TODO: Assign values once extents and tags are introduced 2716 * to use. 2717 */ 2718 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2719 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2720 ct3d->dc.total_extent_count); 2721 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2722 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2723 2724 *len_out = out_pl_len; 2725 return CXL_MBOX_SUCCESS; 2726 } 2727 2728 /* 2729 * CXL r3.1 section 8.2.9.9.9.2: 2730 * Get Dynamic Capacity Extent List (Opcode 4801h) 2731 */ 2732 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2733 uint8_t *payload_in, 2734 size_t len_in, 2735 uint8_t *payload_out, 2736 size_t *len_out, 2737 CXLCCI *cci) 2738 { 2739 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2740 struct { 2741 uint32_t extent_cnt; 2742 uint32_t start_extent_id; 2743 } QEMU_PACKED *in = (void *)payload_in; 2744 struct { 2745 uint32_t count; 2746 uint32_t total_extents; 2747 uint32_t generation_num; 2748 uint8_t rsvd[4]; 2749 CXLDCExtentRaw records[]; 2750 } QEMU_PACKED *out = (void *)payload_out; 2751 uint32_t start_extent_id = in->start_extent_id; 2752 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2753 uint16_t record_count = 0, i = 0, record_done = 0; 2754 uint16_t out_pl_len, size; 2755 CXLDCExtent *ent; 2756 2757 if (start_extent_id > ct3d->dc.nr_extents_accepted) { 2758 return CXL_MBOX_INVALID_INPUT; 2759 } 2760 2761 record_count = MIN(in->extent_cnt, 2762 ct3d->dc.total_extent_count - start_extent_id); 2763 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2764 record_count = MIN(record_count, size / sizeof(out->records[0])); 2765 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2766 2767 stl_le_p(&out->count, record_count); 2768 stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); 2769 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2770 2771 if (record_count > 0) { 2772 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2773 2774 QTAILQ_FOREACH(ent, extent_list, node) { 2775 if (i++ < start_extent_id) { 2776 continue; 2777 } 2778 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2779 stq_le_p(&out_rec->len, ent->len); 2780 memcpy(&out_rec->tag, ent->tag, 0x10); 2781 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2782 2783 record_done++; 2784 out_rec++; 2785 if (record_done == record_count) { 2786 break; 2787 } 2788 } 2789 } 2790 2791 *len_out = out_pl_len; 2792 return CXL_MBOX_SUCCESS; 2793 } 2794 2795 /* 2796 * Check whether any bit between addr[nr, nr+size) is set, 2797 * return true if any bit is set, otherwise return false 2798 */ 2799 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2800 unsigned long size) 2801 { 2802 unsigned long res = find_next_bit(addr, size + nr, nr); 2803 2804 return res < nr + size; 2805 } 2806 2807 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2808 { 2809 int i; 2810 CXLDCRegion *region = &ct3d->dc.regions[0]; 2811 2812 if (dpa < region->base || 2813 dpa >= region->base + ct3d->dc.total_capacity) { 2814 return NULL; 2815 } 2816 2817 /* 2818 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2819 * 2820 * Regions are used in increasing-DPA order, with Region 0 being used for 2821 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2822 * So check from the last region to find where the dpa belongs. Extents that 2823 * cross multiple regions are not allowed. 2824 */ 2825 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2826 region = &ct3d->dc.regions[i]; 2827 if (dpa >= region->base) { 2828 if (dpa + len > region->base + region->len) { 2829 return NULL; 2830 } 2831 return region; 2832 } 2833 } 2834 2835 return NULL; 2836 } 2837 2838 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2839 uint64_t dpa, 2840 uint64_t len, 2841 uint8_t *tag, 2842 uint16_t shared_seq) 2843 { 2844 CXLDCExtent *extent; 2845 2846 extent = g_new0(CXLDCExtent, 1); 2847 extent->start_dpa = dpa; 2848 extent->len = len; 2849 if (tag) { 2850 memcpy(extent->tag, tag, 0x10); 2851 } 2852 extent->shared_seq = shared_seq; 2853 2854 QTAILQ_INSERT_TAIL(list, extent, node); 2855 } 2856 2857 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2858 CXLDCExtent *extent) 2859 { 2860 QTAILQ_REMOVE(list, extent, node); 2861 g_free(extent); 2862 } 2863 2864 /* 2865 * Add a new extent to the extent "group" if group exists; 2866 * otherwise, create a new group 2867 * Return value: the extent group where the extent is inserted. 2868 */ 2869 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2870 uint64_t dpa, 2871 uint64_t len, 2872 uint8_t *tag, 2873 uint16_t shared_seq) 2874 { 2875 if (!group) { 2876 group = g_new0(CXLDCExtentGroup, 1); 2877 QTAILQ_INIT(&group->list); 2878 } 2879 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2880 tag, shared_seq); 2881 return group; 2882 } 2883 2884 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2885 CXLDCExtentGroup *group) 2886 { 2887 QTAILQ_INSERT_TAIL(list, group, node); 2888 } 2889 2890 uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2891 { 2892 CXLDCExtent *ent, *ent_next; 2893 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2894 uint32_t extents_deleted = 0; 2895 2896 QTAILQ_REMOVE(list, group, node); 2897 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2898 cxl_remove_extent_from_extent_list(&group->list, ent); 2899 extents_deleted++; 2900 } 2901 g_free(group); 2902 2903 return extents_deleted; 2904 } 2905 2906 /* 2907 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2908 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2909 */ 2910 typedef struct CXLUpdateDCExtentListInPl { 2911 uint32_t num_entries_updated; 2912 uint8_t flags; 2913 uint8_t rsvd[3]; 2914 /* CXL r3.1 Table 8-169: Updated Extent */ 2915 struct { 2916 uint64_t start_dpa; 2917 uint64_t len; 2918 uint8_t rsvd[8]; 2919 } QEMU_PACKED updated_entries[]; 2920 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2921 2922 /* 2923 * For the extents in the extent list to operate, check whether they are valid 2924 * 1. The extent should be in the range of a valid DC region; 2925 * 2. The extent should not cross multiple regions; 2926 * 3. The start DPA and the length of the extent should align with the block 2927 * size of the region; 2928 * 4. The address range of multiple extents in the list should not overlap. 2929 */ 2930 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2931 const CXLUpdateDCExtentListInPl *in) 2932 { 2933 uint64_t min_block_size = UINT64_MAX; 2934 CXLDCRegion *region; 2935 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2936 g_autofree unsigned long *blk_bitmap = NULL; 2937 uint64_t dpa, len; 2938 uint32_t i; 2939 2940 for (i = 0; i < ct3d->dc.num_regions; i++) { 2941 region = &ct3d->dc.regions[i]; 2942 min_block_size = MIN(min_block_size, region->block_size); 2943 } 2944 2945 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2946 ct3d->dc.regions[0].base) / min_block_size); 2947 2948 for (i = 0; i < in->num_entries_updated; i++) { 2949 dpa = in->updated_entries[i].start_dpa; 2950 len = in->updated_entries[i].len; 2951 2952 region = cxl_find_dc_region(ct3d, dpa, len); 2953 if (!region) { 2954 return CXL_MBOX_INVALID_PA; 2955 } 2956 2957 dpa -= ct3d->dc.regions[0].base; 2958 if (dpa % region->block_size || len % region->block_size) { 2959 return CXL_MBOX_INVALID_EXTENT_LIST; 2960 } 2961 /* the dpa range already covered by some other extents in the list */ 2962 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2963 len / min_block_size)) { 2964 return CXL_MBOX_INVALID_EXTENT_LIST; 2965 } 2966 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2967 } 2968 2969 return CXL_MBOX_SUCCESS; 2970 } 2971 2972 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2973 const CXLUpdateDCExtentListInPl *in) 2974 { 2975 uint32_t i; 2976 CXLDCExtent *ent; 2977 CXLDCExtentGroup *ext_group; 2978 uint64_t dpa, len; 2979 Range range1, range2; 2980 2981 for (i = 0; i < in->num_entries_updated; i++) { 2982 dpa = in->updated_entries[i].start_dpa; 2983 len = in->updated_entries[i].len; 2984 2985 range_init_nofail(&range1, dpa, len); 2986 2987 /* 2988 * The host-accepted DPA range must be contained by the first extent 2989 * group in the pending list 2990 */ 2991 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2992 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2993 return CXL_MBOX_INVALID_PA; 2994 } 2995 2996 /* to-be-added range should not overlap with range already accepted */ 2997 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 2998 range_init_nofail(&range2, ent->start_dpa, ent->len); 2999 if (range_overlaps_range(&range1, &range2)) { 3000 return CXL_MBOX_INVALID_PA; 3001 } 3002 } 3003 } 3004 return CXL_MBOX_SUCCESS; 3005 } 3006 3007 /* 3008 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 3009 * An extent is added to the extent list and becomes usable only after the 3010 * response is processed successfully. 3011 */ 3012 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 3013 uint8_t *payload_in, 3014 size_t len_in, 3015 uint8_t *payload_out, 3016 size_t *len_out, 3017 CXLCCI *cci) 3018 { 3019 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3020 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3021 CXLDCExtentList *extent_list = &ct3d->dc.extents; 3022 uint32_t i, num; 3023 uint64_t dpa, len; 3024 CXLRetCode ret; 3025 3026 if (len_in < sizeof(*in)) { 3027 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3028 } 3029 3030 if (in->num_entries_updated == 0) { 3031 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3032 ct3d->dc.total_extent_count -= num; 3033 return CXL_MBOX_SUCCESS; 3034 } 3035 3036 if (len_in < 3037 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3038 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3039 } 3040 3041 /* Adding extents causes exceeding device's extent tracking ability. */ 3042 if (in->num_entries_updated + ct3d->dc.total_extent_count > 3043 CXL_NUM_EXTENTS_SUPPORTED) { 3044 return CXL_MBOX_RESOURCES_EXHAUSTED; 3045 } 3046 3047 ret = cxl_detect_malformed_extent_list(ct3d, in); 3048 if (ret != CXL_MBOX_SUCCESS) { 3049 return ret; 3050 } 3051 3052 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 3053 if (ret != CXL_MBOX_SUCCESS) { 3054 return ret; 3055 } 3056 3057 for (i = 0; i < in->num_entries_updated; i++) { 3058 dpa = in->updated_entries[i].start_dpa; 3059 len = in->updated_entries[i].len; 3060 3061 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 3062 ct3d->dc.total_extent_count += 1; 3063 ct3d->dc.nr_extents_accepted += 1; 3064 ct3_set_region_block_backed(ct3d, dpa, len); 3065 } 3066 /* Remove the first extent group in the pending list */ 3067 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3068 ct3d->dc.total_extent_count -= num; 3069 3070 return CXL_MBOX_SUCCESS; 3071 } 3072 3073 /* 3074 * Copy extent list from src to dst 3075 * Return value: number of extents copied 3076 */ 3077 static uint32_t copy_extent_list(CXLDCExtentList *dst, 3078 const CXLDCExtentList *src) 3079 { 3080 uint32_t cnt = 0; 3081 CXLDCExtent *ent; 3082 3083 if (!dst || !src) { 3084 return 0; 3085 } 3086 3087 QTAILQ_FOREACH(ent, src, node) { 3088 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 3089 ent->tag, ent->shared_seq); 3090 cnt++; 3091 } 3092 return cnt; 3093 } 3094 3095 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 3096 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 3097 uint32_t *updated_list_size) 3098 { 3099 CXLDCExtent *ent, *ent_next; 3100 uint64_t dpa, len; 3101 uint32_t i; 3102 int cnt_delta = 0; 3103 CXLRetCode ret = CXL_MBOX_SUCCESS; 3104 3105 QTAILQ_INIT(updated_list); 3106 copy_extent_list(updated_list, &ct3d->dc.extents); 3107 3108 for (i = 0; i < in->num_entries_updated; i++) { 3109 Range range; 3110 3111 dpa = in->updated_entries[i].start_dpa; 3112 len = in->updated_entries[i].len; 3113 3114 /* Check if the DPA range is not fully backed with valid extents */ 3115 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 3116 ret = CXL_MBOX_INVALID_PA; 3117 goto free_and_exit; 3118 } 3119 3120 /* After this point, extent overflow is the only error can happen */ 3121 while (len > 0) { 3122 QTAILQ_FOREACH(ent, updated_list, node) { 3123 range_init_nofail(&range, ent->start_dpa, ent->len); 3124 3125 if (range_contains(&range, dpa)) { 3126 uint64_t len1, len2 = 0, len_done = 0; 3127 uint64_t ent_start_dpa = ent->start_dpa; 3128 uint64_t ent_len = ent->len; 3129 3130 len1 = dpa - ent->start_dpa; 3131 /* Found the extent or the subset of an existing extent */ 3132 if (range_contains(&range, dpa + len - 1)) { 3133 len2 = ent_start_dpa + ent_len - dpa - len; 3134 } else { 3135 dpa = ent_start_dpa + ent_len; 3136 } 3137 len_done = ent_len - len1 - len2; 3138 3139 cxl_remove_extent_from_extent_list(updated_list, ent); 3140 cnt_delta--; 3141 3142 if (len1) { 3143 cxl_insert_extent_to_extent_list(updated_list, 3144 ent_start_dpa, 3145 len1, NULL, 0); 3146 cnt_delta++; 3147 } 3148 if (len2) { 3149 cxl_insert_extent_to_extent_list(updated_list, 3150 dpa + len, 3151 len2, NULL, 0); 3152 cnt_delta++; 3153 } 3154 3155 if (cnt_delta + ct3d->dc.total_extent_count > 3156 CXL_NUM_EXTENTS_SUPPORTED) { 3157 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 3158 goto free_and_exit; 3159 } 3160 3161 len -= len_done; 3162 break; 3163 } 3164 } 3165 } 3166 } 3167 free_and_exit: 3168 if (ret != CXL_MBOX_SUCCESS) { 3169 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 3170 cxl_remove_extent_from_extent_list(updated_list, ent); 3171 } 3172 *updated_list_size = 0; 3173 } else { 3174 *updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta; 3175 } 3176 3177 return ret; 3178 } 3179 3180 /* 3181 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 3182 */ 3183 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 3184 uint8_t *payload_in, 3185 size_t len_in, 3186 uint8_t *payload_out, 3187 size_t *len_out, 3188 CXLCCI *cci) 3189 { 3190 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3191 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3192 CXLDCExtentList updated_list; 3193 CXLDCExtent *ent, *ent_next; 3194 uint32_t updated_list_size; 3195 CXLRetCode ret; 3196 3197 if (len_in < sizeof(*in)) { 3198 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3199 } 3200 3201 if (in->num_entries_updated == 0) { 3202 return CXL_MBOX_INVALID_INPUT; 3203 } 3204 3205 if (len_in < 3206 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3207 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3208 } 3209 3210 ret = cxl_detect_malformed_extent_list(ct3d, in); 3211 if (ret != CXL_MBOX_SUCCESS) { 3212 return ret; 3213 } 3214 3215 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 3216 &updated_list_size); 3217 if (ret != CXL_MBOX_SUCCESS) { 3218 return ret; 3219 } 3220 3221 /* 3222 * If the dry run release passes, the returned updated_list will 3223 * be the updated extent list and we just need to clear the extents 3224 * in the accepted list and copy extents in the updated_list to accepted 3225 * list and update the extent count; 3226 */ 3227 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 3228 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 3229 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 3230 } 3231 copy_extent_list(&ct3d->dc.extents, &updated_list); 3232 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 3233 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 3234 cxl_remove_extent_from_extent_list(&updated_list, ent); 3235 } 3236 ct3d->dc.total_extent_count += (updated_list_size - 3237 ct3d->dc.nr_extents_accepted); 3238 3239 ct3d->dc.nr_extents_accepted = updated_list_size; 3240 3241 return CXL_MBOX_SUCCESS; 3242 } 3243 3244 /* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */ 3245 static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd, 3246 uint8_t *payload_in, 3247 size_t len_in, 3248 uint8_t *payload_out, 3249 size_t *len_out, 3250 CXLCCI *cci) 3251 { 3252 struct { 3253 uint8_t num_hosts; 3254 uint8_t num_regions_supported; 3255 uint8_t rsvd1[2]; 3256 uint16_t supported_add_sel_policy_bitmask; 3257 uint8_t rsvd2[2]; 3258 uint16_t supported_removal_policy_bitmask; 3259 uint8_t sanitize_on_release_bitmask; 3260 uint8_t rsvd3; 3261 uint64_t total_dynamic_capacity; 3262 uint64_t region_blk_size_bitmasks[8]; 3263 } QEMU_PACKED *out = (void *)payload_out; 3264 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3265 CXLDCRegion *region; 3266 int i; 3267 3268 out->num_hosts = 1; 3269 out->num_regions_supported = ct3d->dc.num_regions; 3270 stw_le_p(&out->supported_add_sel_policy_bitmask, 3271 BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE)); 3272 stw_le_p(&out->supported_removal_policy_bitmask, 3273 BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE)); 3274 out->sanitize_on_release_bitmask = 0; 3275 3276 stq_le_p(&out->total_dynamic_capacity, 3277 ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER); 3278 3279 for (i = 0; i < ct3d->dc.num_regions; i++) { 3280 region = &ct3d->dc.regions[i]; 3281 memcpy(&out->region_blk_size_bitmasks[i], 3282 ®ion->supported_blk_size_bitmask, 3283 sizeof(out->region_blk_size_bitmasks[i])); 3284 } 3285 3286 *len_out = sizeof(*out); 3287 return CXL_MBOX_SUCCESS; 3288 } 3289 3290 static void build_dsmas_flags(uint8_t *flags, CXLDCRegion *region) 3291 { 3292 *flags = 0; 3293 3294 if (region->nonvolatile) { 3295 *flags |= BIT(CXL_DSMAS_FLAGS_NONVOLATILE); 3296 } 3297 if (region->sharable) { 3298 *flags |= BIT(CXL_DSMAS_FLAGS_SHARABLE); 3299 } 3300 if (region->hw_managed_coherency) { 3301 *flags |= BIT(CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY); 3302 } 3303 if (region->ic_specific_dc_management) { 3304 *flags |= BIT(CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT); 3305 } 3306 if (region->rdonly) { 3307 *flags |= BIT(CXL_DSMAS_FLAGS_RDONLY); 3308 } 3309 } 3310 3311 /* 3312 * CXL r3.2 section 7.6.7.6.2: 3313 * Get Host DC Region Configuration (Opcode 5601h) 3314 */ 3315 static CXLRetCode cmd_fm_get_host_dc_region_config(const struct cxl_cmd *cmd, 3316 uint8_t *payload_in, 3317 size_t len_in, 3318 uint8_t *payload_out, 3319 size_t *len_out, 3320 CXLCCI *cci) 3321 { 3322 struct { 3323 uint16_t host_id; 3324 uint8_t region_cnt; 3325 uint8_t start_rid; 3326 } QEMU_PACKED *in = (void *)payload_in; 3327 struct { 3328 uint16_t host_id; 3329 uint8_t num_regions; 3330 uint8_t regions_returned; 3331 struct { 3332 uint64_t base; 3333 uint64_t decode_len; 3334 uint64_t region_len; 3335 uint64_t block_size; 3336 uint8_t flags; 3337 uint8_t rsvd1[3]; 3338 uint8_t sanitize; 3339 uint8_t rsvd2[3]; 3340 } QEMU_PACKED records[]; 3341 } QEMU_PACKED *out = (void *)payload_out; 3342 struct { 3343 uint32_t num_extents_supported; 3344 uint32_t num_extents_available; 3345 uint32_t num_tags_supported; 3346 uint32_t num_tags_available; 3347 } QEMU_PACKED *extra_out; 3348 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3349 uint16_t record_count, out_pl_len, i; 3350 3351 if (in->start_rid >= ct3d->dc.num_regions) { 3352 return CXL_MBOX_INVALID_INPUT; 3353 } 3354 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 3355 3356 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 3357 extra_out = (void *)out + out_pl_len; 3358 out_pl_len += sizeof(*extra_out); 3359 3360 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 3361 3362 stw_le_p(&out->host_id, 0); 3363 out->num_regions = ct3d->dc.num_regions; 3364 out->regions_returned = record_count; 3365 3366 for (i = 0; i < record_count; i++) { 3367 stq_le_p(&out->records[i].base, 3368 ct3d->dc.regions[in->start_rid + i].base); 3369 stq_le_p(&out->records[i].decode_len, 3370 ct3d->dc.regions[in->start_rid + i].decode_len / 3371 CXL_CAPACITY_MULTIPLIER); 3372 stq_le_p(&out->records[i].region_len, 3373 ct3d->dc.regions[in->start_rid + i].len); 3374 stq_le_p(&out->records[i].block_size, 3375 ct3d->dc.regions[in->start_rid + i].block_size); 3376 build_dsmas_flags(&out->records[i].flags, 3377 &ct3d->dc.regions[in->start_rid + i]); 3378 /* Sanitize is bit 0 of flags. */ 3379 out->records[i].sanitize = 3380 ct3d->dc.regions[in->start_rid + i].flags & BIT(0); 3381 } 3382 3383 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 3384 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 3385 ct3d->dc.total_extent_count); 3386 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 3387 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 3388 3389 *len_out = out_pl_len; 3390 return CXL_MBOX_SUCCESS; 3391 } 3392 3393 static const struct cxl_cmd cxl_cmd_set[256][256] = { 3394 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3395 cmd_infostat_bg_op_abort, 0, 0 }, 3396 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 3397 cmd_events_get_records, 1, 0 }, 3398 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 3399 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 3400 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 3401 cmd_events_get_interrupt_policy, 0, 0 }, 3402 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 3403 cmd_events_set_interrupt_policy, 3404 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 3405 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 3406 cmd_firmware_update_get_info, 0, 0 }, 3407 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 3408 cmd_firmware_update_transfer, ~0, 3409 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3410 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 3411 cmd_firmware_update_activate, 2, 3412 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3413 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3414 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 3415 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3416 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 3417 0, 0 }, 3418 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3419 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 3420 cmd_features_get_supported, 0x8, 0 }, 3421 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 3422 cmd_features_get_feature, 0x15, 0 }, 3423 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 3424 cmd_features_set_feature, 3425 ~0, 3426 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3427 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3428 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 3429 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 3430 CXL_MBOX_SECURITY_STATE_CHANGE)}, 3431 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 3432 cmd_identify_memory_device, 0, 0 }, 3433 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 3434 cmd_ccls_get_partition_info, 0, 0 }, 3435 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 3436 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 3437 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3438 [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = { 3439 "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG", 3440 cmd_get_alert_config, 0, 0 }, 3441 [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = { 3442 "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG", 3443 cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3444 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 3445 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3446 CXL_MBOX_SECURITY_STATE_CHANGE | 3447 CXL_MBOX_BACKGROUND_OPERATION | 3448 CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3449 [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations, 3450 ~0, 3451 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3452 CXL_MBOX_BACKGROUND_OPERATION)}, 3453 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 3454 cmd_get_security_state, 0, 0 }, 3455 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 3456 cmd_media_get_poison_list, 16, 0 }, 3457 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 3458 cmd_media_inject_poison, 8, 0 }, 3459 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 3460 cmd_media_clear_poison, 72, 0 }, 3461 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 3462 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 3463 cmd_media_get_scan_media_capabilities, 16, 0 }, 3464 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 3465 cmd_media_scan_media, 17, 3466 (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3467 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 3468 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 3469 cmd_media_get_scan_media_results, 0, 0 }, 3470 }; 3471 3472 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 3473 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 3474 cmd_dcd_get_dyn_cap_config, 2, 0 }, 3475 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 3476 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 3477 8, 0 }, 3478 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 3479 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 3480 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3481 [DCD_CONFIG][RELEASE_DYN_CAP] = { 3482 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 3483 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3484 }; 3485 3486 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 3487 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3488 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 3489 cmd_infostat_bg_op_sts, 0, 0 }, 3490 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3491 cmd_infostat_bg_op_abort, 0, 0 }, 3492 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3493 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 3494 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3495 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3496 0 }, 3497 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3498 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 3499 cmd_identify_switch_device, 0, 0 }, 3500 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 3501 cmd_get_physical_port_state, ~0, 0 }, 3502 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3503 cmd_tunnel_management_cmd, ~0, 0 }, 3504 }; 3505 3506 static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = { 3507 [FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO", 3508 cmd_fm_get_dcd_info, 0, 0 }, 3509 [FMAPI_DCD_MGMT][GET_HOST_DC_REGION_CONFIG] = { "GET_HOST_DC_REGION_CONFIG", 3510 cmd_fm_get_host_dc_region_config, 4, 0 }, 3511 }; 3512 3513 /* 3514 * While the command is executing in the background, the device should 3515 * update the percentage complete in the Background Command Status Register 3516 * at least once per second. 3517 */ 3518 3519 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 3520 3521 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 3522 size_t len_in, uint8_t *pl_in, size_t *len_out, 3523 uint8_t *pl_out, bool *bg_started) 3524 { 3525 int ret; 3526 const struct cxl_cmd *cxl_cmd; 3527 opcode_handler h; 3528 CXLDeviceState *cxl_dstate; 3529 3530 *len_out = 0; 3531 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 3532 h = cxl_cmd->handler; 3533 if (!h) { 3534 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 3535 set << 8 | cmd); 3536 return CXL_MBOX_UNSUPPORTED; 3537 } 3538 3539 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 3540 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3541 } 3542 3543 /* Only one bg command at a time */ 3544 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3545 cci->bg.runtime > 0) { 3546 return CXL_MBOX_BUSY; 3547 } 3548 3549 /* forbid any selected commands while the media is disabled */ 3550 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 3551 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 3552 3553 if (cxl_dev_media_disabled(cxl_dstate)) { 3554 if (h == cmd_events_get_records || 3555 h == cmd_ccls_get_partition_info || 3556 h == cmd_ccls_set_lsa || 3557 h == cmd_ccls_get_lsa || 3558 h == cmd_logs_get_log || 3559 h == cmd_media_get_poison_list || 3560 h == cmd_media_inject_poison || 3561 h == cmd_media_clear_poison || 3562 h == cmd_sanitize_overwrite || 3563 h == cmd_firmware_update_transfer || 3564 h == cmd_firmware_update_activate) { 3565 return CXL_MBOX_MEDIA_DISABLED; 3566 } 3567 } 3568 } 3569 3570 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 3571 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3572 ret == CXL_MBOX_BG_STARTED) { 3573 *bg_started = true; 3574 } else { 3575 *bg_started = false; 3576 } 3577 3578 /* Set bg and the return code */ 3579 if (*bg_started) { 3580 uint64_t now; 3581 3582 cci->bg.opcode = (set << 8) | cmd; 3583 3584 cci->bg.complete_pct = 0; 3585 cci->bg.aborted = false; 3586 cci->bg.ret_code = 0; 3587 3588 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3589 cci->bg.starttime = now; 3590 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3591 } 3592 3593 return ret; 3594 } 3595 3596 static void bg_timercb(void *opaque) 3597 { 3598 CXLCCI *cci = opaque; 3599 uint64_t now, total_time; 3600 3601 qemu_mutex_lock(&cci->bg.lock); 3602 3603 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3604 total_time = cci->bg.starttime + cci->bg.runtime; 3605 3606 if (now >= total_time) { /* we are done */ 3607 uint16_t ret = CXL_MBOX_SUCCESS; 3608 3609 cci->bg.complete_pct = 100; 3610 cci->bg.ret_code = ret; 3611 switch (cci->bg.opcode) { 3612 case 0x0201: /* fw transfer */ 3613 __do_firmware_xfer(cci); 3614 break; 3615 case 0x4400: /* sanitize */ 3616 { 3617 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3618 3619 __do_sanitization(ct3d); 3620 cxl_dev_enable_media(&ct3d->cxl_dstate); 3621 } 3622 break; 3623 case 0x4402: /* Media Operations sanitize */ 3624 { 3625 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3626 __do_sanitize(ct3d); 3627 } 3628 break; 3629 case 0x4304: /* scan media */ 3630 { 3631 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3632 3633 __do_scan_media(ct3d); 3634 break; 3635 } 3636 default: 3637 __builtin_unreachable(); 3638 break; 3639 } 3640 } else { 3641 /* estimate only */ 3642 cci->bg.complete_pct = 3643 100 * (now - cci->bg.starttime) / cci->bg.runtime; 3644 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3645 } 3646 3647 if (cci->bg.complete_pct == 100) { 3648 /* TODO: generalize to switch CCI */ 3649 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3650 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 3651 PCIDevice *pdev = PCI_DEVICE(cci->d); 3652 3653 cci->bg.starttime = 0; 3654 /* registers are updated, allow new bg-capable cmds */ 3655 cci->bg.runtime = 0; 3656 3657 if (msix_enabled(pdev)) { 3658 msix_notify(pdev, cxl_dstate->mbox_msi_n); 3659 } else if (msi_enabled(pdev)) { 3660 msi_notify(pdev, cxl_dstate->mbox_msi_n); 3661 } 3662 } 3663 3664 qemu_mutex_unlock(&cci->bg.lock); 3665 } 3666 3667 static void cxl_rebuild_cel(CXLCCI *cci) 3668 { 3669 cci->cel_size = 0; /* Reset for a fresh build */ 3670 for (int set = 0; set < 256; set++) { 3671 for (int cmd = 0; cmd < 256; cmd++) { 3672 if (cci->cxl_cmd_set[set][cmd].handler) { 3673 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 3674 struct cel_log *log = 3675 &cci->cel_log[cci->cel_size]; 3676 3677 log->opcode = (set << 8) | cmd; 3678 log->effect = c->effect; 3679 cci->cel_size++; 3680 } 3681 } 3682 } 3683 } 3684 3685 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 3686 { 3687 cci->payload_max = payload_max; 3688 cxl_rebuild_cel(cci); 3689 3690 cci->bg.complete_pct = 0; 3691 cci->bg.starttime = 0; 3692 cci->bg.runtime = 0; 3693 cci->bg.aborted = false; 3694 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3695 bg_timercb, cci); 3696 qemu_mutex_init(&cci->bg.lock); 3697 3698 memset(&cci->fw, 0, sizeof(cci->fw)); 3699 cci->fw.active_slot = 1; 3700 cci->fw.slot[cci->fw.active_slot - 1] = true; 3701 cci->initialized = true; 3702 } 3703 3704 void cxl_destroy_cci(CXLCCI *cci) 3705 { 3706 qemu_mutex_destroy(&cci->bg.lock); 3707 cci->initialized = false; 3708 } 3709 3710 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 3711 { 3712 for (int set = 0; set < 256; set++) { 3713 for (int cmd = 0; cmd < 256; cmd++) { 3714 if (cxl_cmds[set][cmd].handler) { 3715 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 3716 } 3717 } 3718 } 3719 } 3720 3721 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 3722 size_t payload_max) 3723 { 3724 cci->payload_max = MAX(payload_max, cci->payload_max); 3725 cxl_copy_cci_commands(cci, cxl_cmd_set); 3726 cxl_rebuild_cel(cci); 3727 } 3728 3729 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 3730 DeviceState *d, size_t payload_max) 3731 { 3732 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 3733 cci->d = d; 3734 cci->intf = intf; 3735 cxl_init_cci(cci, payload_max); 3736 } 3737 3738 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 3739 { 3740 CXLType3Dev *ct3d = CXL_TYPE3(d); 3741 3742 cxl_copy_cci_commands(cci, cxl_cmd_set); 3743 if (ct3d->dc.num_regions) { 3744 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 3745 } 3746 cci->d = d; 3747 3748 /* No separation for PCI MB as protocol handled in PCI device */ 3749 cci->intf = d; 3750 cxl_init_cci(cci, payload_max); 3751 } 3752 3753 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 3754 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3755 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3756 0 }, 3757 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3758 }; 3759 3760 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 3761 size_t payload_max) 3762 { 3763 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 3764 cci->d = d; 3765 cci->intf = intf; 3766 cxl_init_cci(cci, payload_max); 3767 } 3768 3769 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 3770 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 3771 [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT", 3772 cmd_get_response_msg_limit, 0, 0 }, 3773 [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT", 3774 cmd_set_response_msg_limit, 1, 0 }, 3775 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3776 0 }, 3777 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3778 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3779 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3780 cmd_tunnel_management_cmd, ~0, 0 }, 3781 }; 3782 3783 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 3784 DeviceState *intf, 3785 size_t payload_max) 3786 { 3787 CXLType3Dev *ct3d = CXL_TYPE3(d); 3788 3789 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 3790 if (ct3d->dc.num_regions) { 3791 cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd); 3792 } 3793 cci->d = d; 3794 cci->intf = intf; 3795 cxl_init_cci(cci, payload_max); 3796 } 3797