1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/pci/msi.h" 12 #include "hw/pci/msix.h" 13 #include "hw/cxl/cxl.h" 14 #include "hw/cxl/cxl_events.h" 15 #include "hw/cxl/cxl_mailbox.h" 16 #include "hw/pci/pci.h" 17 #include "hw/pci-bridge/cxl_upstream_port.h" 18 #include "qemu/cutils.h" 19 #include "qemu/log.h" 20 #include "qemu/units.h" 21 #include "qemu/uuid.h" 22 #include "system/hostmem.h" 23 #include "qemu/range.h" 24 25 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 26 #define CXL_DC_EVENT_LOG_SIZE 8 27 #define CXL_NUM_EXTENTS_SUPPORTED 512 28 #define CXL_NUM_TAGS_SUPPORTED 0 29 30 /* 31 * How to add a new command, example. The command set FOO, with cmd BAR. 32 * 1. Add the command set and cmd to the enum. 33 * FOO = 0x7f, 34 * #define BAR 0 35 * 2. Implement the handler 36 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 37 * CXLDeviceState *cxl_dstate, uint16_t *len) 38 * 3. Add the command to the cxl_cmd_set[][] 39 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 40 * 4. Implement your handler 41 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 42 * 43 * 44 * Writing the handler: 45 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 46 * in/out length of the payload. The handler is responsible for consuming the 47 * payload from cmd->payload and operating upon it as necessary. It must then 48 * fill the output data into cmd->payload (overwriting what was there), 49 * setting the length, and returning a valid return code. 50 * 51 * XXX: The handler need not worry about endianness. The payload is read out of 52 * a register interface that already deals with it. 53 */ 54 55 enum { 56 INFOSTAT = 0x00, 57 #define IS_IDENTIFY 0x1 58 #define BACKGROUND_OPERATION_STATUS 0x2 59 #define BACKGROUND_OPERATION_ABORT 0x5 60 EVENTS = 0x01, 61 #define GET_RECORDS 0x0 62 #define CLEAR_RECORDS 0x1 63 #define GET_INTERRUPT_POLICY 0x2 64 #define SET_INTERRUPT_POLICY 0x3 65 FIRMWARE_UPDATE = 0x02, 66 #define GET_INFO 0x0 67 #define TRANSFER 0x1 68 #define ACTIVATE 0x2 69 TIMESTAMP = 0x03, 70 #define GET 0x0 71 #define SET 0x1 72 LOGS = 0x04, 73 #define GET_SUPPORTED 0x0 74 #define GET_LOG 0x1 75 FEATURES = 0x05, 76 #define GET_SUPPORTED 0x0 77 #define GET_FEATURE 0x1 78 #define SET_FEATURE 0x2 79 IDENTIFY = 0x40, 80 #define MEMORY_DEVICE 0x0 81 CCLS = 0x41, 82 #define GET_PARTITION_INFO 0x0 83 #define GET_LSA 0x2 84 #define SET_LSA 0x3 85 SANITIZE = 0x44, 86 #define OVERWRITE 0x0 87 #define SECURE_ERASE 0x1 88 PERSISTENT_MEM = 0x45, 89 #define GET_SECURITY_STATE 0x0 90 MEDIA_AND_POISON = 0x43, 91 #define GET_POISON_LIST 0x0 92 #define INJECT_POISON 0x1 93 #define CLEAR_POISON 0x2 94 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 95 #define SCAN_MEDIA 0x4 96 #define GET_SCAN_MEDIA_RESULTS 0x5 97 DCD_CONFIG = 0x48, 98 #define GET_DC_CONFIG 0x0 99 #define GET_DYN_CAP_EXT_LIST 0x1 100 #define ADD_DYN_CAP_RSP 0x2 101 #define RELEASE_DYN_CAP 0x3 102 PHYSICAL_SWITCH = 0x51, 103 #define IDENTIFY_SWITCH_DEVICE 0x0 104 #define GET_PHYSICAL_PORT_STATE 0x1 105 TUNNEL = 0x53, 106 #define MANAGEMENT_COMMAND 0x0 107 }; 108 109 /* CCI Message Format CXL r3.1 Figure 7-19 */ 110 typedef struct CXLCCIMessage { 111 uint8_t category; 112 #define CXL_CCI_CAT_REQ 0 113 #define CXL_CCI_CAT_RSP 1 114 uint8_t tag; 115 uint8_t resv1; 116 uint8_t command; 117 uint8_t command_set; 118 uint8_t pl_length[3]; 119 uint16_t rc; 120 uint16_t vendor_specific; 121 uint8_t payload[]; 122 } QEMU_PACKED CXLCCIMessage; 123 124 /* This command is only defined to an MLD FM Owned LD or an MHD */ 125 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 126 uint8_t *payload_in, 127 size_t len_in, 128 uint8_t *payload_out, 129 size_t *len_out, 130 CXLCCI *cci) 131 { 132 PCIDevice *tunnel_target; 133 CXLCCI *target_cci; 134 struct { 135 uint8_t port_or_ld_id; 136 uint8_t target_type; 137 uint16_t size; 138 CXLCCIMessage ccimessage; 139 } QEMU_PACKED *in; 140 struct { 141 uint16_t resp_len; 142 uint8_t resv[2]; 143 CXLCCIMessage ccimessage; 144 } QEMU_PACKED *out; 145 size_t pl_length, length_out; 146 bool bg_started; 147 int rc; 148 149 if (cmd->in < sizeof(*in)) { 150 return CXL_MBOX_INVALID_INPUT; 151 } 152 in = (void *)payload_in; 153 out = (void *)payload_out; 154 155 if (len_in < sizeof(*in)) { 156 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 157 } 158 /* Enough room for minimum sized message - no payload */ 159 if (in->size < sizeof(in->ccimessage)) { 160 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 161 } 162 /* Length of input payload should be in->size + a wrapping tunnel header */ 163 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 164 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 165 } 166 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 167 return CXL_MBOX_INVALID_INPUT; 168 } 169 170 if (in->target_type != 0) { 171 qemu_log_mask(LOG_UNIMP, 172 "Tunneled Command sent to non existent FM-LD"); 173 return CXL_MBOX_INVALID_INPUT; 174 } 175 176 /* 177 * Target of a tunnel unfortunately depends on type of CCI readint 178 * the message. 179 * If in a switch, then it's the port number. 180 * If in an MLD it is the ld number. 181 * If in an MHD target type indicate where we are going. 182 */ 183 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 184 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 185 if (in->port_or_ld_id != 0) { 186 /* Only pretending to have one for now! */ 187 return CXL_MBOX_INVALID_INPUT; 188 } 189 target_cci = &ct3d->ld0_cci; 190 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 191 CXLUpstreamPort *usp = CXL_USP(cci->d); 192 193 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 194 in->port_or_ld_id); 195 if (!tunnel_target) { 196 return CXL_MBOX_INVALID_INPUT; 197 } 198 tunnel_target = 199 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 200 if (!tunnel_target) { 201 return CXL_MBOX_INVALID_INPUT; 202 } 203 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 204 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 205 /* Tunneled VDMs always land on FM Owned LD */ 206 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 207 } else { 208 return CXL_MBOX_INVALID_INPUT; 209 } 210 } else { 211 return CXL_MBOX_INVALID_INPUT; 212 } 213 214 pl_length = in->ccimessage.pl_length[2] << 16 | 215 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 216 rc = cxl_process_cci_message(target_cci, 217 in->ccimessage.command_set, 218 in->ccimessage.command, 219 pl_length, in->ccimessage.payload, 220 &length_out, out->ccimessage.payload, 221 &bg_started); 222 /* Payload should be in place. Rest of CCI header and needs filling */ 223 out->resp_len = length_out + sizeof(CXLCCIMessage); 224 st24_le_p(out->ccimessage.pl_length, length_out); 225 out->ccimessage.rc = rc; 226 out->ccimessage.category = CXL_CCI_CAT_RSP; 227 out->ccimessage.command = in->ccimessage.command; 228 out->ccimessage.command_set = in->ccimessage.command_set; 229 out->ccimessage.tag = in->ccimessage.tag; 230 *len_out = length_out + sizeof(*out); 231 232 return CXL_MBOX_SUCCESS; 233 } 234 235 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 236 uint8_t *payload_in, size_t len_in, 237 uint8_t *payload_out, size_t *len_out, 238 CXLCCI *cci) 239 { 240 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 241 CXLGetEventPayload *pl; 242 uint8_t log_type; 243 int max_recs; 244 245 if (cmd->in < sizeof(log_type)) { 246 return CXL_MBOX_INVALID_INPUT; 247 } 248 249 log_type = payload_in[0]; 250 251 pl = (CXLGetEventPayload *)payload_out; 252 253 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 254 CXL_EVENT_RECORD_SIZE; 255 if (max_recs > 0xFFFF) { 256 max_recs = 0xFFFF; 257 } 258 259 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 260 } 261 262 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 263 uint8_t *payload_in, 264 size_t len_in, 265 uint8_t *payload_out, 266 size_t *len_out, 267 CXLCCI *cci) 268 { 269 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 270 CXLClearEventPayload *pl; 271 272 pl = (CXLClearEventPayload *)payload_in; 273 274 if (len_in < sizeof(*pl) || 275 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { 276 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 277 } 278 279 *len_out = 0; 280 return cxl_event_clear_records(cxlds, pl); 281 } 282 283 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 284 uint8_t *payload_in, 285 size_t len_in, 286 uint8_t *payload_out, 287 size_t *len_out, 288 CXLCCI *cci) 289 { 290 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 291 CXLEventInterruptPolicy *policy; 292 CXLEventLog *log; 293 294 policy = (CXLEventInterruptPolicy *)payload_out; 295 296 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 297 if (log->irq_enabled) { 298 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 299 } 300 301 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 302 if (log->irq_enabled) { 303 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 304 } 305 306 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 307 if (log->irq_enabled) { 308 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 309 } 310 311 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 312 if (log->irq_enabled) { 313 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 314 } 315 316 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 317 if (log->irq_enabled) { 318 /* Dynamic Capacity borrows the same vector as info */ 319 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 320 } 321 322 *len_out = sizeof(*policy); 323 return CXL_MBOX_SUCCESS; 324 } 325 326 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 327 uint8_t *payload_in, 328 size_t len_in, 329 uint8_t *payload_out, 330 size_t *len_out, 331 CXLCCI *cci) 332 { 333 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 334 CXLEventInterruptPolicy *policy; 335 CXLEventLog *log; 336 337 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 338 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 339 } 340 341 policy = (CXLEventInterruptPolicy *)payload_in; 342 343 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 344 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 345 CXL_INT_MSI_MSIX; 346 347 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 348 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 349 CXL_INT_MSI_MSIX; 350 351 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 352 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 353 CXL_INT_MSI_MSIX; 354 355 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 356 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 357 CXL_INT_MSI_MSIX; 358 359 /* DCD is optional */ 360 if (len_in < sizeof(*policy)) { 361 return CXL_MBOX_SUCCESS; 362 } 363 364 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 365 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 366 CXL_INT_MSI_MSIX; 367 368 *len_out = 0; 369 return CXL_MBOX_SUCCESS; 370 } 371 372 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 373 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 374 uint8_t *payload_in, 375 size_t len_in, 376 uint8_t *payload_out, 377 size_t *len_out, 378 CXLCCI *cci) 379 { 380 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 381 struct { 382 uint16_t pcie_vid; 383 uint16_t pcie_did; 384 uint16_t pcie_subsys_vid; 385 uint16_t pcie_subsys_id; 386 uint64_t sn; 387 uint8_t max_message_size; 388 uint8_t component_type; 389 } QEMU_PACKED *is_identify; 390 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 391 392 is_identify = (void *)payload_out; 393 is_identify->pcie_vid = class->vendor_id; 394 is_identify->pcie_did = class->device_id; 395 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 396 is_identify->sn = CXL_USP(cci->d)->sn; 397 /* Subsystem info not defined for a USP */ 398 is_identify->pcie_subsys_vid = 0; 399 is_identify->pcie_subsys_id = 0; 400 is_identify->component_type = 0x0; /* Switch */ 401 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 402 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 403 404 is_identify->sn = CXL_TYPE3(cci->d)->sn; 405 /* 406 * We can't always use class->subsystem_vendor_id as 407 * it is not set if the defaults are used. 408 */ 409 is_identify->pcie_subsys_vid = 410 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 411 is_identify->pcie_subsys_id = 412 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 413 is_identify->component_type = 0x3; /* Type 3 */ 414 } 415 416 /* TODO: Allow this to vary across different CCIs */ 417 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ 418 *len_out = sizeof(*is_identify); 419 return CXL_MBOX_SUCCESS; 420 } 421 422 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 423 void *private) 424 { 425 uint8_t *bm = private; 426 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 427 uint8_t port = PCIE_PORT(d)->port; 428 bm[port / 8] |= 1 << (port % 8); 429 } 430 } 431 432 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 433 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 434 uint8_t *payload_in, 435 size_t len_in, 436 uint8_t *payload_out, 437 size_t *len_out, 438 CXLCCI *cci) 439 { 440 PCIEPort *usp = PCIE_PORT(cci->d); 441 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 442 int num_phys_ports = pcie_count_ds_ports(bus); 443 444 struct cxl_fmapi_ident_switch_dev_resp_pl { 445 uint8_t ingress_port_id; 446 uint8_t rsvd; 447 uint8_t num_physical_ports; 448 uint8_t num_vcss; 449 uint8_t active_port_bitmask[0x20]; 450 uint8_t active_vcs_bitmask[0x20]; 451 uint16_t total_vppbs; 452 uint16_t bound_vppbs; 453 uint8_t num_hdm_decoders_per_usp; 454 } QEMU_PACKED *out; 455 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 456 457 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 458 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 459 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 460 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 461 .active_vcs_bitmask[0] = 0x1, 462 .total_vppbs = num_phys_ports + 1, 463 .bound_vppbs = num_phys_ports + 1, 464 .num_hdm_decoders_per_usp = 4, 465 }; 466 467 /* Depends on the CCI type */ 468 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 469 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 470 } else { 471 /* MCTP? */ 472 out->ingress_port_id = 0; 473 } 474 475 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 476 out->active_port_bitmask); 477 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 478 479 *len_out = sizeof(*out); 480 481 return CXL_MBOX_SUCCESS; 482 } 483 484 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 485 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 486 uint8_t *payload_in, 487 size_t len_in, 488 uint8_t *payload_out, 489 size_t *len_out, 490 CXLCCI *cci) 491 { 492 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 493 struct cxl_fmapi_get_phys_port_state_req_pl { 494 uint8_t num_ports; 495 uint8_t ports[]; 496 } QEMU_PACKED *in; 497 498 /* 499 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 500 * Format 501 */ 502 struct cxl_fmapi_port_state_info_block { 503 uint8_t port_id; 504 uint8_t config_state; 505 uint8_t connected_device_cxl_version; 506 uint8_t rsv1; 507 uint8_t connected_device_type; 508 uint8_t port_cxl_version_bitmask; 509 uint8_t max_link_width; 510 uint8_t negotiated_link_width; 511 uint8_t supported_link_speeds_vector; 512 uint8_t max_link_speed; 513 uint8_t current_link_speed; 514 uint8_t ltssm_state; 515 uint8_t first_lane_num; 516 uint16_t link_state; 517 uint8_t supported_ld_count; 518 } QEMU_PACKED; 519 520 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 521 struct cxl_fmapi_get_phys_port_state_resp_pl { 522 uint8_t num_ports; 523 uint8_t rsv1[3]; 524 struct cxl_fmapi_port_state_info_block ports[]; 525 } QEMU_PACKED *out; 526 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 527 PCIEPort *usp = PCIE_PORT(cci->d); 528 size_t pl_size; 529 int i; 530 531 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 532 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 533 534 if (len_in < sizeof(*in)) { 535 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 536 } 537 /* Check if what was requested can fit */ 538 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 539 return CXL_MBOX_INVALID_INPUT; 540 } 541 542 /* For success there should be a match for each requested */ 543 out->num_ports = in->num_ports; 544 545 for (i = 0; i < in->num_ports; i++) { 546 struct cxl_fmapi_port_state_info_block *port; 547 /* First try to match on downstream port */ 548 PCIDevice *port_dev; 549 uint16_t lnkcap, lnkcap2, lnksta; 550 551 port = &out->ports[i]; 552 553 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 554 if (port_dev) { /* DSP */ 555 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 556 ->devices[0]; 557 port->config_state = 3; 558 if (ds_dev) { 559 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 560 port->connected_device_type = 5; /* Assume MLD for now */ 561 } else { 562 port->connected_device_type = 1; 563 } 564 } else { 565 port->connected_device_type = 0; 566 } 567 port->supported_ld_count = 3; 568 } else if (usp->port == in->ports[i]) { /* USP */ 569 port_dev = PCI_DEVICE(usp); 570 port->config_state = 4; 571 port->connected_device_type = 0; 572 } else { 573 return CXL_MBOX_INVALID_INPUT; 574 } 575 576 port->port_id = in->ports[i]; 577 /* Information on status of this port in lnksta, lnkcap */ 578 if (!port_dev->exp.exp_cap) { 579 return CXL_MBOX_INTERNAL_ERROR; 580 } 581 lnksta = port_dev->config_read(port_dev, 582 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 583 sizeof(lnksta)); 584 lnkcap = port_dev->config_read(port_dev, 585 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 586 sizeof(lnkcap)); 587 lnkcap2 = port_dev->config_read(port_dev, 588 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 589 sizeof(lnkcap2)); 590 591 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 592 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 593 /* No definition for SLS field in linux/pci_regs.h */ 594 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 595 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 596 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 597 /* TODO: Track down if we can get the rest of the info */ 598 port->ltssm_state = 0x7; 599 port->first_lane_num = 0; 600 port->link_state = 0; 601 port->port_cxl_version_bitmask = 0x2; 602 port->connected_device_cxl_version = 0x2; 603 } 604 605 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 606 *len_out = pl_size; 607 608 return CXL_MBOX_SUCCESS; 609 } 610 611 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 612 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 613 uint8_t *payload_in, 614 size_t len_in, 615 uint8_t *payload_out, 616 size_t *len_out, 617 CXLCCI *cci) 618 { 619 struct { 620 uint8_t status; 621 uint8_t rsvd; 622 uint16_t opcode; 623 uint16_t returncode; 624 uint16_t vendor_ext_status; 625 } QEMU_PACKED *bg_op_status; 626 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 627 628 bg_op_status = (void *)payload_out; 629 bg_op_status->status = cci->bg.complete_pct << 1; 630 if (cci->bg.runtime > 0) { 631 bg_op_status->status |= 1U << 0; 632 } 633 bg_op_status->opcode = cci->bg.opcode; 634 bg_op_status->returncode = cci->bg.ret_code; 635 *len_out = sizeof(*bg_op_status); 636 637 return CXL_MBOX_SUCCESS; 638 } 639 640 /* 641 * CXL r3.1 Section 8.2.9.1.5: 642 * Request Abort Background Operation (Opcode 0005h) 643 */ 644 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd, 645 uint8_t *payload_in, 646 size_t len_in, 647 uint8_t *payload_out, 648 size_t *len_out, 649 CXLCCI *cci) 650 { 651 int bg_set = cci->bg.opcode >> 8; 652 int bg_cmd = cci->bg.opcode & 0xff; 653 const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd]; 654 655 if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) { 656 return CXL_MBOX_REQUEST_ABORT_NOTSUP; 657 } 658 659 qemu_mutex_lock(&cci->bg.lock); 660 if (cci->bg.runtime) { 661 /* operation is near complete, let it finish */ 662 if (cci->bg.complete_pct < 85) { 663 timer_del(cci->bg.timer); 664 cci->bg.ret_code = CXL_MBOX_ABORTED; 665 cci->bg.starttime = 0; 666 cci->bg.runtime = 0; 667 cci->bg.aborted = true; 668 } 669 } 670 qemu_mutex_unlock(&cci->bg.lock); 671 672 return CXL_MBOX_SUCCESS; 673 } 674 675 #define CXL_FW_SLOTS 2 676 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 677 678 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 679 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 680 uint8_t *payload_in, 681 size_t len, 682 uint8_t *payload_out, 683 size_t *len_out, 684 CXLCCI *cci) 685 { 686 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 687 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 688 struct { 689 uint8_t slots_supported; 690 uint8_t slot_info; 691 uint8_t caps; 692 uint8_t rsvd[0xd]; 693 char fw_rev1[0x10]; 694 char fw_rev2[0x10]; 695 char fw_rev3[0x10]; 696 char fw_rev4[0x10]; 697 } QEMU_PACKED *fw_info; 698 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 699 700 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || 701 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || 702 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { 703 return CXL_MBOX_INTERNAL_ERROR; 704 } 705 706 fw_info = (void *)payload_out; 707 708 fw_info->slots_supported = CXL_FW_SLOTS; 709 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 710 ((cci->fw.staged_slot & 0x7) << 3); 711 fw_info->caps = BIT(0); /* online update supported */ 712 713 if (cci->fw.slot[0]) { 714 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 715 } 716 if (cci->fw.slot[1]) { 717 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 718 } 719 720 *len_out = sizeof(*fw_info); 721 return CXL_MBOX_SUCCESS; 722 } 723 724 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 725 #define CXL_FW_XFER_ALIGNMENT 128 726 727 #define CXL_FW_XFER_ACTION_FULL 0x0 728 #define CXL_FW_XFER_ACTION_INIT 0x1 729 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 730 #define CXL_FW_XFER_ACTION_END 0x3 731 #define CXL_FW_XFER_ACTION_ABORT 0x4 732 733 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 734 uint8_t *payload_in, 735 size_t len, 736 uint8_t *payload_out, 737 size_t *len_out, 738 CXLCCI *cci) 739 { 740 struct { 741 uint8_t action; 742 uint8_t slot; 743 uint8_t rsvd1[2]; 744 uint32_t offset; 745 uint8_t rsvd2[0x78]; 746 uint8_t data[]; 747 } QEMU_PACKED *fw_transfer = (void *)payload_in; 748 size_t offset, length; 749 750 if (len < sizeof(*fw_transfer)) { 751 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 752 } 753 754 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 755 /* 756 * At this point there aren't any on-going transfers 757 * running in the bg - this is serialized before this 758 * call altogether. Just mark the state machine and 759 * disregard any other input. 760 */ 761 cci->fw.transferring = false; 762 return CXL_MBOX_SUCCESS; 763 } 764 765 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 766 length = len - sizeof(*fw_transfer); 767 if (offset + length > CXL_FW_SIZE) { 768 return CXL_MBOX_INVALID_INPUT; 769 } 770 771 if (cci->fw.transferring) { 772 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 773 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 774 return CXL_MBOX_FW_XFER_IN_PROGRESS; 775 } 776 /* 777 * Abort partitioned package transfer if over 30 secs 778 * between parts. As opposed to the explicit ABORT action, 779 * semantically treat this condition as an error - as 780 * if a part action were passed without a previous INIT. 781 */ 782 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 783 cci->fw.transferring = false; 784 return CXL_MBOX_INVALID_INPUT; 785 } 786 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 787 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 788 return CXL_MBOX_INVALID_INPUT; 789 } 790 791 /* allow back-to-back retransmission */ 792 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 793 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 794 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 795 /* verify no overlaps */ 796 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 797 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 798 } 799 } 800 801 switch (fw_transfer->action) { 802 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 803 case CXL_FW_XFER_ACTION_END: 804 if (fw_transfer->slot == 0 || 805 fw_transfer->slot == cci->fw.active_slot || 806 fw_transfer->slot > CXL_FW_SLOTS) { 807 return CXL_MBOX_FW_INVALID_SLOT; 808 } 809 810 /* mark the slot used upon bg completion */ 811 break; 812 case CXL_FW_XFER_ACTION_INIT: 813 if (offset != 0) { 814 return CXL_MBOX_INVALID_INPUT; 815 } 816 817 cci->fw.transferring = true; 818 cci->fw.prev_offset = offset; 819 cci->fw.prev_len = length; 820 break; 821 case CXL_FW_XFER_ACTION_CONTINUE: 822 cci->fw.prev_offset = offset; 823 cci->fw.prev_len = length; 824 break; 825 default: 826 return CXL_MBOX_INVALID_INPUT; 827 } 828 829 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 830 cci->bg.runtime = 10 * 1000UL; 831 } else { 832 cci->bg.runtime = 2 * 1000UL; 833 } 834 /* keep relevant context for bg completion */ 835 cci->fw.curr_action = fw_transfer->action; 836 cci->fw.curr_slot = fw_transfer->slot; 837 *len_out = 0; 838 839 return CXL_MBOX_BG_STARTED; 840 } 841 842 static void __do_firmware_xfer(CXLCCI *cci) 843 { 844 switch (cci->fw.curr_action) { 845 case CXL_FW_XFER_ACTION_FULL: 846 case CXL_FW_XFER_ACTION_END: 847 cci->fw.slot[cci->fw.curr_slot - 1] = true; 848 cci->fw.transferring = false; 849 break; 850 case CXL_FW_XFER_ACTION_INIT: 851 case CXL_FW_XFER_ACTION_CONTINUE: 852 time(&cci->fw.last_partxfer); 853 break; 854 default: 855 break; 856 } 857 } 858 859 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 860 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 861 uint8_t *payload_in, 862 size_t len, 863 uint8_t *payload_out, 864 size_t *len_out, 865 CXLCCI *cci) 866 { 867 struct { 868 uint8_t action; 869 uint8_t slot; 870 } QEMU_PACKED *fw_activate = (void *)payload_in; 871 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 872 873 if (fw_activate->slot == 0 || 874 fw_activate->slot == cci->fw.active_slot || 875 fw_activate->slot > CXL_FW_SLOTS) { 876 return CXL_MBOX_FW_INVALID_SLOT; 877 } 878 879 /* ensure that an actual fw package is there */ 880 if (!cci->fw.slot[fw_activate->slot - 1]) { 881 return CXL_MBOX_FW_INVALID_SLOT; 882 } 883 884 switch (fw_activate->action) { 885 case 0: /* online */ 886 cci->fw.active_slot = fw_activate->slot; 887 break; 888 case 1: /* reset */ 889 cci->fw.staged_slot = fw_activate->slot; 890 break; 891 default: 892 return CXL_MBOX_INVALID_INPUT; 893 } 894 895 return CXL_MBOX_SUCCESS; 896 } 897 898 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 899 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 900 uint8_t *payload_in, 901 size_t len_in, 902 uint8_t *payload_out, 903 size_t *len_out, 904 CXLCCI *cci) 905 { 906 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 907 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 908 909 stq_le_p(payload_out, final_time); 910 *len_out = 8; 911 912 return CXL_MBOX_SUCCESS; 913 } 914 915 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 916 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 917 uint8_t *payload_in, 918 size_t len_in, 919 uint8_t *payload_out, 920 size_t *len_out, 921 CXLCCI *cci) 922 { 923 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 924 925 cxl_dstate->timestamp.set = true; 926 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 927 928 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 929 930 *len_out = 0; 931 return CXL_MBOX_SUCCESS; 932 } 933 934 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 935 static const QemuUUID cel_uuid = { 936 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 937 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 938 }; 939 940 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 941 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 942 uint8_t *payload_in, 943 size_t len_in, 944 uint8_t *payload_out, 945 size_t *len_out, 946 CXLCCI *cci) 947 { 948 struct { 949 uint16_t entries; 950 uint8_t rsvd[6]; 951 struct { 952 QemuUUID uuid; 953 uint32_t size; 954 } log_entries[1]; 955 } QEMU_PACKED *supported_logs = (void *)payload_out; 956 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 957 958 supported_logs->entries = 1; 959 supported_logs->log_entries[0].uuid = cel_uuid; 960 supported_logs->log_entries[0].size = 4 * cci->cel_size; 961 962 *len_out = sizeof(*supported_logs); 963 return CXL_MBOX_SUCCESS; 964 } 965 966 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 967 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 968 uint8_t *payload_in, 969 size_t len_in, 970 uint8_t *payload_out, 971 size_t *len_out, 972 CXLCCI *cci) 973 { 974 struct { 975 QemuUUID uuid; 976 uint32_t offset; 977 uint32_t length; 978 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 979 980 get_log = (void *)payload_in; 981 982 if (get_log->length > cci->payload_max) { 983 return CXL_MBOX_INVALID_INPUT; 984 } 985 986 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 987 return CXL_MBOX_INVALID_LOG; 988 } 989 990 /* 991 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 992 * The device shall return Invalid Input if the Offset or Length 993 * fields attempt to access beyond the size of the log as reported by Get 994 * Supported Log. 995 * 996 * Only valid for there to be one entry per opcode, but the length + offset 997 * may still be greater than that if the inputs are not valid and so access 998 * beyond the end of cci->cel_log. 999 */ 1000 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { 1001 return CXL_MBOX_INVALID_INPUT; 1002 } 1003 1004 /* Store off everything to local variables so we can wipe out the payload */ 1005 *len_out = get_log->length; 1006 1007 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 1008 1009 return CXL_MBOX_SUCCESS; 1010 } 1011 1012 /* CXL r3.1 section 8.2.9.6: Features */ 1013 /* 1014 * Get Supported Features output payload 1015 * CXL r3.1 section 8.2.9.6.1 Table 8-96 1016 */ 1017 typedef struct CXLSupportedFeatureHeader { 1018 uint16_t entries; 1019 uint16_t nsuppfeats_dev; 1020 uint32_t reserved; 1021 } QEMU_PACKED CXLSupportedFeatureHeader; 1022 1023 /* 1024 * Get Supported Features Supported Feature Entry 1025 * CXL r3.1 section 8.2.9.6.1 Table 8-97 1026 */ 1027 typedef struct CXLSupportedFeatureEntry { 1028 QemuUUID uuid; 1029 uint16_t feat_index; 1030 uint16_t get_feat_size; 1031 uint16_t set_feat_size; 1032 uint32_t attr_flags; 1033 uint8_t get_feat_version; 1034 uint8_t set_feat_version; 1035 uint16_t set_feat_effects; 1036 uint8_t rsvd[18]; 1037 } QEMU_PACKED CXLSupportedFeatureEntry; 1038 1039 /* 1040 * Get Supported Features Supported Feature Entry 1041 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 1042 */ 1043 /* Supported Feature Entry : attribute flags */ 1044 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 1045 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 1046 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 1047 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 1048 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 1049 1050 /* Supported Feature Entry : set feature effects */ 1051 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 1052 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 1053 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 1054 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 1055 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1056 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1057 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1058 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1059 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1060 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1061 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1062 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1063 1064 enum CXL_SUPPORTED_FEATURES_LIST { 1065 CXL_FEATURE_PATROL_SCRUB = 0, 1066 CXL_FEATURE_ECS, 1067 CXL_FEATURE_MAX 1068 }; 1069 1070 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1071 /* 1072 * Get Feature input payload 1073 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1074 */ 1075 /* Get Feature : Payload in selection */ 1076 enum CXL_GET_FEATURE_SELECTION { 1077 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1078 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1079 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1080 CXL_GET_FEATURE_SEL_MAX 1081 }; 1082 1083 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1084 /* 1085 * Set Feature input payload 1086 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1087 */ 1088 typedef struct CXLSetFeatureInHeader { 1089 QemuUUID uuid; 1090 uint32_t flags; 1091 uint16_t offset; 1092 uint8_t version; 1093 uint8_t rsvd[9]; 1094 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1095 1096 /* Set Feature : Payload in flags */ 1097 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1098 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1099 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1100 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1101 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1102 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1103 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1104 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1105 }; 1106 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1107 1108 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1109 static const QemuUUID patrol_scrub_uuid = { 1110 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1111 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1112 }; 1113 1114 typedef struct CXLMemPatrolScrubSetFeature { 1115 CXLSetFeatureInHeader hdr; 1116 CXLMemPatrolScrubWriteAttrs feat_data; 1117 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1118 1119 /* 1120 * CXL r3.1 section 8.2.9.9.11.2: 1121 * DDR5 Error Check Scrub (ECS) Control Feature 1122 */ 1123 static const QemuUUID ecs_uuid = { 1124 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1125 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1126 }; 1127 1128 typedef struct CXLMemECSSetFeature { 1129 CXLSetFeatureInHeader hdr; 1130 CXLMemECSWriteAttrs feat_data[]; 1131 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1132 1133 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1134 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1135 uint8_t *payload_in, 1136 size_t len_in, 1137 uint8_t *payload_out, 1138 size_t *len_out, 1139 CXLCCI *cci) 1140 { 1141 struct { 1142 uint32_t count; 1143 uint16_t start_index; 1144 uint16_t reserved; 1145 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1146 1147 struct { 1148 CXLSupportedFeatureHeader hdr; 1149 CXLSupportedFeatureEntry feat_entries[]; 1150 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1151 uint16_t index, req_entries; 1152 uint16_t entry; 1153 1154 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1155 return CXL_MBOX_UNSUPPORTED; 1156 } 1157 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1158 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1159 return CXL_MBOX_INVALID_INPUT; 1160 } 1161 1162 req_entries = (get_feats_in->count - 1163 sizeof(CXLSupportedFeatureHeader)) / 1164 sizeof(CXLSupportedFeatureEntry); 1165 req_entries = MIN(req_entries, 1166 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1167 1168 for (entry = 0, index = get_feats_in->start_index; 1169 entry < req_entries; index++) { 1170 switch (index) { 1171 case CXL_FEATURE_PATROL_SCRUB: 1172 /* Fill supported feature entry for device patrol scrub control */ 1173 get_feats_out->feat_entries[entry++] = 1174 (struct CXLSupportedFeatureEntry) { 1175 .uuid = patrol_scrub_uuid, 1176 .feat_index = index, 1177 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1178 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1179 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1180 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1181 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1182 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1183 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1184 }; 1185 break; 1186 case CXL_FEATURE_ECS: 1187 /* Fill supported feature entry for device DDR5 ECS control */ 1188 get_feats_out->feat_entries[entry++] = 1189 (struct CXLSupportedFeatureEntry) { 1190 .uuid = ecs_uuid, 1191 .feat_index = index, 1192 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1193 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1194 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1195 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1196 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1197 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1198 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1199 }; 1200 break; 1201 default: 1202 __builtin_unreachable(); 1203 } 1204 } 1205 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1206 get_feats_out->hdr.entries = req_entries; 1207 *len_out = sizeof(CXLSupportedFeatureHeader) + 1208 req_entries * sizeof(CXLSupportedFeatureEntry); 1209 1210 return CXL_MBOX_SUCCESS; 1211 } 1212 1213 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1214 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1215 uint8_t *payload_in, 1216 size_t len_in, 1217 uint8_t *payload_out, 1218 size_t *len_out, 1219 CXLCCI *cci) 1220 { 1221 struct { 1222 QemuUUID uuid; 1223 uint16_t offset; 1224 uint16_t count; 1225 uint8_t selection; 1226 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1227 uint16_t bytes_to_copy = 0; 1228 CXLType3Dev *ct3d; 1229 CXLSetFeatureInfo *set_feat_info; 1230 1231 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1232 return CXL_MBOX_UNSUPPORTED; 1233 } 1234 1235 ct3d = CXL_TYPE3(cci->d); 1236 get_feature = (void *)payload_in; 1237 1238 set_feat_info = &ct3d->set_feat_info; 1239 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1240 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1241 } 1242 1243 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1244 return CXL_MBOX_UNSUPPORTED; 1245 } 1246 if (get_feature->offset + get_feature->count > cci->payload_max) { 1247 return CXL_MBOX_INVALID_INPUT; 1248 } 1249 1250 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1251 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1252 return CXL_MBOX_INVALID_INPUT; 1253 } 1254 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1255 get_feature->offset; 1256 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1257 memcpy(payload_out, 1258 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1259 bytes_to_copy); 1260 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1261 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1262 return CXL_MBOX_INVALID_INPUT; 1263 } 1264 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1265 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1266 memcpy(payload_out, 1267 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1268 bytes_to_copy); 1269 } else { 1270 return CXL_MBOX_UNSUPPORTED; 1271 } 1272 1273 *len_out = bytes_to_copy; 1274 1275 return CXL_MBOX_SUCCESS; 1276 } 1277 1278 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1279 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1280 uint8_t *payload_in, 1281 size_t len_in, 1282 uint8_t *payload_out, 1283 size_t *len_out, 1284 CXLCCI *cci) 1285 { 1286 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1287 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1288 CXLMemPatrolScrubSetFeature *ps_set_feature; 1289 CXLMemECSWriteAttrs *ecs_write_attrs; 1290 CXLMemECSSetFeature *ecs_set_feature; 1291 CXLSetFeatureInfo *set_feat_info; 1292 uint16_t bytes_to_copy = 0; 1293 uint8_t data_transfer_flag; 1294 CXLType3Dev *ct3d; 1295 uint16_t count; 1296 1297 if (len_in < sizeof(*hdr)) { 1298 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1299 } 1300 1301 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1302 return CXL_MBOX_UNSUPPORTED; 1303 } 1304 ct3d = CXL_TYPE3(cci->d); 1305 set_feat_info = &ct3d->set_feat_info; 1306 1307 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1308 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1309 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1310 } 1311 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1312 set_feat_info->data_saved_across_reset = true; 1313 } else { 1314 set_feat_info->data_saved_across_reset = false; 1315 } 1316 1317 data_transfer_flag = 1318 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1319 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1320 set_feat_info->uuid = hdr->uuid; 1321 set_feat_info->data_size = 0; 1322 } 1323 set_feat_info->data_transfer_flag = data_transfer_flag; 1324 set_feat_info->data_offset = hdr->offset; 1325 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1326 1327 if (bytes_to_copy == 0) { 1328 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1329 } 1330 1331 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1332 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1333 return CXL_MBOX_UNSUPPORTED; 1334 } 1335 1336 ps_set_feature = (void *)payload_in; 1337 ps_write_attrs = &ps_set_feature->feat_data; 1338 1339 if ((uint32_t)hdr->offset + bytes_to_copy > 1340 sizeof(ct3d->patrol_scrub_wr_attrs)) { 1341 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1342 } 1343 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1344 ps_write_attrs, 1345 bytes_to_copy); 1346 set_feat_info->data_size += bytes_to_copy; 1347 1348 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1349 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1350 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1351 ct3d->patrol_scrub_attrs.scrub_cycle |= 1352 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1353 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1354 ct3d->patrol_scrub_attrs.scrub_flags |= 1355 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1356 } 1357 } else if (qemu_uuid_is_equal(&hdr->uuid, 1358 &ecs_uuid)) { 1359 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1360 return CXL_MBOX_UNSUPPORTED; 1361 } 1362 1363 ecs_set_feature = (void *)payload_in; 1364 ecs_write_attrs = ecs_set_feature->feat_data; 1365 1366 if ((uint32_t)hdr->offset + bytes_to_copy > 1367 sizeof(ct3d->ecs_wr_attrs)) { 1368 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1369 } 1370 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1371 ecs_write_attrs, 1372 bytes_to_copy); 1373 set_feat_info->data_size += bytes_to_copy; 1374 1375 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1376 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1377 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1378 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1379 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1380 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1381 } 1382 } 1383 } else { 1384 return CXL_MBOX_UNSUPPORTED; 1385 } 1386 1387 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1388 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1389 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1390 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1391 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1392 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1393 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1394 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1395 } 1396 set_feat_info->data_transfer_flag = 0; 1397 set_feat_info->data_saved_across_reset = false; 1398 set_feat_info->data_offset = 0; 1399 set_feat_info->data_size = 0; 1400 } 1401 1402 return CXL_MBOX_SUCCESS; 1403 } 1404 1405 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1406 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1407 uint8_t *payload_in, 1408 size_t len_in, 1409 uint8_t *payload_out, 1410 size_t *len_out, 1411 CXLCCI *cci) 1412 { 1413 struct { 1414 char fw_revision[0x10]; 1415 uint64_t total_capacity; 1416 uint64_t volatile_capacity; 1417 uint64_t persistent_capacity; 1418 uint64_t partition_align; 1419 uint16_t info_event_log_size; 1420 uint16_t warning_event_log_size; 1421 uint16_t failure_event_log_size; 1422 uint16_t fatal_event_log_size; 1423 uint32_t lsa_size; 1424 uint8_t poison_list_max_mer[3]; 1425 uint16_t inject_poison_limit; 1426 uint8_t poison_caps; 1427 uint8_t qos_telemetry_caps; 1428 uint16_t dc_event_log_size; 1429 } QEMU_PACKED *id; 1430 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1431 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1432 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1433 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1434 1435 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1436 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1437 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1438 return CXL_MBOX_INTERNAL_ERROR; 1439 } 1440 1441 id = (void *)payload_out; 1442 1443 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1444 1445 stq_le_p(&id->total_capacity, 1446 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1447 stq_le_p(&id->persistent_capacity, 1448 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1449 stq_le_p(&id->volatile_capacity, 1450 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1451 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1452 /* 256 poison records */ 1453 st24_le_p(id->poison_list_max_mer, 256); 1454 /* No limit - so limited by main poison record limit */ 1455 stw_le_p(&id->inject_poison_limit, 0); 1456 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1457 1458 *len_out = sizeof(*id); 1459 return CXL_MBOX_SUCCESS; 1460 } 1461 1462 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1463 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1464 uint8_t *payload_in, 1465 size_t len_in, 1466 uint8_t *payload_out, 1467 size_t *len_out, 1468 CXLCCI *cci) 1469 { 1470 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1471 struct { 1472 uint64_t active_vmem; 1473 uint64_t active_pmem; 1474 uint64_t next_vmem; 1475 uint64_t next_pmem; 1476 } QEMU_PACKED *part_info = (void *)payload_out; 1477 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1478 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1479 1480 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1481 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1482 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1483 return CXL_MBOX_INTERNAL_ERROR; 1484 } 1485 1486 stq_le_p(&part_info->active_vmem, 1487 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1488 /* 1489 * When both next_vmem and next_pmem are 0, there is no pending change to 1490 * partitioning. 1491 */ 1492 stq_le_p(&part_info->next_vmem, 0); 1493 stq_le_p(&part_info->active_pmem, 1494 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1495 stq_le_p(&part_info->next_pmem, 0); 1496 1497 *len_out = sizeof(*part_info); 1498 return CXL_MBOX_SUCCESS; 1499 } 1500 1501 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1502 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1503 uint8_t *payload_in, 1504 size_t len_in, 1505 uint8_t *payload_out, 1506 size_t *len_out, 1507 CXLCCI *cci) 1508 { 1509 struct { 1510 uint32_t offset; 1511 uint32_t length; 1512 } QEMU_PACKED *get_lsa; 1513 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1514 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1515 uint64_t offset, length; 1516 1517 get_lsa = (void *)payload_in; 1518 offset = get_lsa->offset; 1519 length = get_lsa->length; 1520 1521 if (offset + length > cvc->get_lsa_size(ct3d)) { 1522 *len_out = 0; 1523 return CXL_MBOX_INVALID_INPUT; 1524 } 1525 1526 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1527 return CXL_MBOX_SUCCESS; 1528 } 1529 1530 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1531 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1532 uint8_t *payload_in, 1533 size_t len_in, 1534 uint8_t *payload_out, 1535 size_t *len_out, 1536 CXLCCI *cci) 1537 { 1538 struct set_lsa_pl { 1539 uint32_t offset; 1540 uint32_t rsvd; 1541 uint8_t data[]; 1542 } QEMU_PACKED; 1543 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1544 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1545 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1546 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1547 1548 *len_out = 0; 1549 if (len_in < hdr_len) { 1550 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1551 } 1552 1553 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1554 return CXL_MBOX_INVALID_INPUT; 1555 } 1556 len_in -= hdr_len; 1557 1558 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1559 return CXL_MBOX_SUCCESS; 1560 } 1561 1562 /* Perform the actual device zeroing */ 1563 static void __do_sanitization(CXLType3Dev *ct3d) 1564 { 1565 MemoryRegion *mr; 1566 1567 if (ct3d->hostvmem) { 1568 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1569 if (mr) { 1570 void *hostmem = memory_region_get_ram_ptr(mr); 1571 memset(hostmem, 0, memory_region_size(mr)); 1572 } 1573 } 1574 1575 if (ct3d->hostpmem) { 1576 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1577 if (mr) { 1578 void *hostmem = memory_region_get_ram_ptr(mr); 1579 memset(hostmem, 0, memory_region_size(mr)); 1580 } 1581 } 1582 if (ct3d->lsa) { 1583 mr = host_memory_backend_get_memory(ct3d->lsa); 1584 if (mr) { 1585 void *lsa = memory_region_get_ram_ptr(mr); 1586 memset(lsa, 0, memory_region_size(mr)); 1587 } 1588 } 1589 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1590 } 1591 1592 /* 1593 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1594 * 1595 * Once the Sanitize command has started successfully, the device shall be 1596 * placed in the media disabled state. If the command fails or is interrupted 1597 * by a reset or power failure, it shall remain in the media disabled state 1598 * until a successful Sanitize command has been completed. During this state: 1599 * 1600 * 1. Memory writes to the device will have no effect, and all memory reads 1601 * will return random values (no user data returned, even for locations that 1602 * the failed Sanitize operation didn’t sanitize yet). 1603 * 1604 * 2. Mailbox commands shall still be processed in the disabled state, except 1605 * that commands that access Sanitized areas shall fail with the Media Disabled 1606 * error code. 1607 */ 1608 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1609 uint8_t *payload_in, 1610 size_t len_in, 1611 uint8_t *payload_out, 1612 size_t *len_out, 1613 CXLCCI *cci) 1614 { 1615 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1616 uint64_t total_mem; /* in Mb */ 1617 int secs; 1618 1619 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1620 if (total_mem <= 512) { 1621 secs = 4; 1622 } else if (total_mem <= 1024) { 1623 secs = 8; 1624 } else if (total_mem <= 2 * 1024) { 1625 secs = 15; 1626 } else if (total_mem <= 4 * 1024) { 1627 secs = 30; 1628 } else if (total_mem <= 8 * 1024) { 1629 secs = 60; 1630 } else if (total_mem <= 16 * 1024) { 1631 secs = 2 * 60; 1632 } else if (total_mem <= 32 * 1024) { 1633 secs = 4 * 60; 1634 } else if (total_mem <= 64 * 1024) { 1635 secs = 8 * 60; 1636 } else if (total_mem <= 128 * 1024) { 1637 secs = 15 * 60; 1638 } else if (total_mem <= 256 * 1024) { 1639 secs = 30 * 60; 1640 } else if (total_mem <= 512 * 1024) { 1641 secs = 60 * 60; 1642 } else if (total_mem <= 1024 * 1024) { 1643 secs = 120 * 60; 1644 } else { 1645 secs = 240 * 60; /* max 4 hrs */ 1646 } 1647 1648 /* EBUSY other bg cmds as of now */ 1649 cci->bg.runtime = secs * 1000UL; 1650 *len_out = 0; 1651 1652 cxl_dev_disable_media(&ct3d->cxl_dstate); 1653 1654 /* sanitize when done */ 1655 return CXL_MBOX_BG_STARTED; 1656 } 1657 1658 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 1659 uint8_t *payload_in, 1660 size_t len_in, 1661 uint8_t *payload_out, 1662 size_t *len_out, 1663 CXLCCI *cci) 1664 { 1665 uint32_t *state = (uint32_t *)payload_out; 1666 1667 *state = 0; 1668 *len_out = 4; 1669 return CXL_MBOX_SUCCESS; 1670 } 1671 1672 /* 1673 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 1674 * 1675 * This is very inefficient, but good enough for now! 1676 * Also the payload will always fit, so no need to handle the MORE flag and 1677 * make this stateful. We may want to allow longer poison lists to aid 1678 * testing that kernel functionality. 1679 */ 1680 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 1681 uint8_t *payload_in, 1682 size_t len_in, 1683 uint8_t *payload_out, 1684 size_t *len_out, 1685 CXLCCI *cci) 1686 { 1687 struct get_poison_list_pl { 1688 uint64_t pa; 1689 uint64_t length; 1690 } QEMU_PACKED; 1691 1692 struct get_poison_list_out_pl { 1693 uint8_t flags; 1694 uint8_t rsvd1; 1695 uint64_t overflow_timestamp; 1696 uint16_t count; 1697 uint8_t rsvd2[0x14]; 1698 struct { 1699 uint64_t addr; 1700 uint32_t length; 1701 uint32_t resv; 1702 } QEMU_PACKED records[]; 1703 } QEMU_PACKED; 1704 1705 struct get_poison_list_pl *in = (void *)payload_in; 1706 struct get_poison_list_out_pl *out = (void *)payload_out; 1707 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1708 uint16_t record_count = 0, i = 0; 1709 uint64_t query_start, query_length; 1710 CXLPoisonList *poison_list = &ct3d->poison_list; 1711 CXLPoison *ent; 1712 uint16_t out_pl_len; 1713 1714 query_start = ldq_le_p(&in->pa); 1715 /* 64 byte alignment required */ 1716 if (query_start & 0x3f) { 1717 return CXL_MBOX_INVALID_INPUT; 1718 } 1719 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1720 1721 QLIST_FOREACH(ent, poison_list, node) { 1722 /* Check for no overlap */ 1723 if (!ranges_overlap(ent->start, ent->length, 1724 query_start, query_length)) { 1725 continue; 1726 } 1727 record_count++; 1728 } 1729 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1730 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 1731 1732 QLIST_FOREACH(ent, poison_list, node) { 1733 uint64_t start, stop; 1734 1735 /* Check for no overlap */ 1736 if (!ranges_overlap(ent->start, ent->length, 1737 query_start, query_length)) { 1738 continue; 1739 } 1740 1741 /* Deal with overlap */ 1742 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 1743 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 1744 query_start + query_length); 1745 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 1746 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 1747 i++; 1748 } 1749 if (ct3d->poison_list_overflowed) { 1750 out->flags = (1 << 1); 1751 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 1752 } 1753 if (scan_media_running(cci)) { 1754 out->flags |= (1 << 2); 1755 } 1756 1757 stw_le_p(&out->count, record_count); 1758 *len_out = out_pl_len; 1759 return CXL_MBOX_SUCCESS; 1760 } 1761 1762 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 1763 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 1764 uint8_t *payload_in, 1765 size_t len_in, 1766 uint8_t *payload_out, 1767 size_t *len_out, 1768 CXLCCI *cci) 1769 { 1770 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1771 CXLPoisonList *poison_list = &ct3d->poison_list; 1772 CXLPoison *ent; 1773 struct inject_poison_pl { 1774 uint64_t dpa; 1775 }; 1776 struct inject_poison_pl *in = (void *)payload_in; 1777 uint64_t dpa = ldq_le_p(&in->dpa); 1778 CXLPoison *p; 1779 1780 QLIST_FOREACH(ent, poison_list, node) { 1781 if (dpa >= ent->start && 1782 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 1783 return CXL_MBOX_SUCCESS; 1784 } 1785 } 1786 /* 1787 * Freeze the list if there is an on-going scan media operation. 1788 */ 1789 if (scan_media_running(cci)) { 1790 /* 1791 * XXX: Spec is ambiguous - is this case considered 1792 * a successful return despite not adding to the list? 1793 */ 1794 goto success; 1795 } 1796 1797 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1798 return CXL_MBOX_INJECT_POISON_LIMIT; 1799 } 1800 p = g_new0(CXLPoison, 1); 1801 1802 p->length = CXL_CACHE_LINE_SIZE; 1803 p->start = dpa; 1804 p->type = CXL_POISON_TYPE_INJECTED; 1805 1806 /* 1807 * Possible todo: Merge with existing entry if next to it and if same type 1808 */ 1809 QLIST_INSERT_HEAD(poison_list, p, node); 1810 ct3d->poison_list_cnt++; 1811 success: 1812 *len_out = 0; 1813 1814 return CXL_MBOX_SUCCESS; 1815 } 1816 1817 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 1818 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 1819 uint8_t *payload_in, 1820 size_t len_in, 1821 uint8_t *payload_out, 1822 size_t *len_out, 1823 CXLCCI *cci) 1824 { 1825 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1826 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1827 CXLPoisonList *poison_list = &ct3d->poison_list; 1828 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1829 struct clear_poison_pl { 1830 uint64_t dpa; 1831 uint8_t data[64]; 1832 }; 1833 CXLPoison *ent; 1834 uint64_t dpa; 1835 1836 struct clear_poison_pl *in = (void *)payload_in; 1837 1838 dpa = ldq_le_p(&in->dpa); 1839 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 1840 ct3d->dc.total_capacity) { 1841 return CXL_MBOX_INVALID_PA; 1842 } 1843 1844 /* Clearing a region with no poison is not an error so always do so */ 1845 if (cvc->set_cacheline) { 1846 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 1847 return CXL_MBOX_INTERNAL_ERROR; 1848 } 1849 } 1850 1851 /* 1852 * Freeze the list if there is an on-going scan media operation. 1853 */ 1854 if (scan_media_running(cci)) { 1855 /* 1856 * XXX: Spec is ambiguous - is this case considered 1857 * a successful return despite not removing from the list? 1858 */ 1859 goto success; 1860 } 1861 1862 QLIST_FOREACH(ent, poison_list, node) { 1863 /* 1864 * Test for contained in entry. Simpler than general case 1865 * as clearing 64 bytes and entries 64 byte aligned 1866 */ 1867 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 1868 break; 1869 } 1870 } 1871 if (!ent) { 1872 goto success; 1873 } 1874 1875 QLIST_REMOVE(ent, node); 1876 ct3d->poison_list_cnt--; 1877 1878 if (dpa > ent->start) { 1879 CXLPoison *frag; 1880 /* Cannot overflow as replacing existing entry */ 1881 1882 frag = g_new0(CXLPoison, 1); 1883 1884 frag->start = ent->start; 1885 frag->length = dpa - ent->start; 1886 frag->type = ent->type; 1887 1888 QLIST_INSERT_HEAD(poison_list, frag, node); 1889 ct3d->poison_list_cnt++; 1890 } 1891 1892 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 1893 CXLPoison *frag; 1894 1895 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1896 cxl_set_poison_list_overflowed(ct3d); 1897 } else { 1898 frag = g_new0(CXLPoison, 1); 1899 1900 frag->start = dpa + CXL_CACHE_LINE_SIZE; 1901 frag->length = ent->start + ent->length - frag->start; 1902 frag->type = ent->type; 1903 QLIST_INSERT_HEAD(poison_list, frag, node); 1904 ct3d->poison_list_cnt++; 1905 } 1906 } 1907 /* Any fragments have been added, free original entry */ 1908 g_free(ent); 1909 success: 1910 *len_out = 0; 1911 1912 return CXL_MBOX_SUCCESS; 1913 } 1914 1915 /* 1916 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 1917 */ 1918 static CXLRetCode 1919 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 1920 uint8_t *payload_in, 1921 size_t len_in, 1922 uint8_t *payload_out, 1923 size_t *len_out, 1924 CXLCCI *cci) 1925 { 1926 struct get_scan_media_capabilities_pl { 1927 uint64_t pa; 1928 uint64_t length; 1929 } QEMU_PACKED; 1930 1931 struct get_scan_media_capabilities_out_pl { 1932 uint32_t estimated_runtime_ms; 1933 }; 1934 1935 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1936 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1937 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 1938 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 1939 uint64_t query_start; 1940 uint64_t query_length; 1941 1942 query_start = ldq_le_p(&in->pa); 1943 /* 64 byte alignment required */ 1944 if (query_start & 0x3f) { 1945 return CXL_MBOX_INVALID_INPUT; 1946 } 1947 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1948 1949 if (query_start + query_length > cxl_dstate->static_mem_size) { 1950 return CXL_MBOX_INVALID_PA; 1951 } 1952 1953 /* 1954 * Just use 400 nanosecond access/read latency + 100 ns for 1955 * the cost of updating the poison list. For small enough 1956 * chunks return at least 1 ms. 1957 */ 1958 stl_le_p(&out->estimated_runtime_ms, 1959 MAX(1, query_length * (0.0005L / 64))); 1960 1961 *len_out = sizeof(*out); 1962 return CXL_MBOX_SUCCESS; 1963 } 1964 1965 static void __do_scan_media(CXLType3Dev *ct3d) 1966 { 1967 CXLPoison *ent; 1968 unsigned int results_cnt = 0; 1969 1970 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 1971 results_cnt++; 1972 } 1973 1974 /* only scan media may clear the overflow */ 1975 if (ct3d->poison_list_overflowed && 1976 ct3d->poison_list_cnt == results_cnt) { 1977 cxl_clear_poison_list_overflowed(ct3d); 1978 } 1979 /* scan media has run since last conventional reset */ 1980 ct3d->scan_media_hasrun = true; 1981 } 1982 1983 /* 1984 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 1985 */ 1986 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 1987 uint8_t *payload_in, 1988 size_t len_in, 1989 uint8_t *payload_out, 1990 size_t *len_out, 1991 CXLCCI *cci) 1992 { 1993 struct scan_media_pl { 1994 uint64_t pa; 1995 uint64_t length; 1996 uint8_t flags; 1997 } QEMU_PACKED; 1998 1999 struct scan_media_pl *in = (void *)payload_in; 2000 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2001 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2002 uint64_t query_start; 2003 uint64_t query_length; 2004 CXLPoison *ent, *next; 2005 2006 query_start = ldq_le_p(&in->pa); 2007 /* 64 byte alignment required */ 2008 if (query_start & 0x3f) { 2009 return CXL_MBOX_INVALID_INPUT; 2010 } 2011 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2012 2013 if (query_start + query_length > cxl_dstate->static_mem_size) { 2014 return CXL_MBOX_INVALID_PA; 2015 } 2016 if (ct3d->dc.num_regions && query_start + query_length >= 2017 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 2018 return CXL_MBOX_INVALID_PA; 2019 } 2020 2021 if (in->flags == 0) { /* TODO */ 2022 qemu_log_mask(LOG_UNIMP, 2023 "Scan Media Event Log is unsupported\n"); 2024 } 2025 2026 /* any previous results are discarded upon a new Scan Media */ 2027 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 2028 QLIST_REMOVE(ent, node); 2029 g_free(ent); 2030 } 2031 2032 /* kill the poison list - it will be recreated */ 2033 if (ct3d->poison_list_overflowed) { 2034 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 2035 QLIST_REMOVE(ent, node); 2036 g_free(ent); 2037 ct3d->poison_list_cnt--; 2038 } 2039 } 2040 2041 /* 2042 * Scan the backup list and move corresponding entries 2043 * into the results list, updating the poison list 2044 * when possible. 2045 */ 2046 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 2047 CXLPoison *res; 2048 2049 if (ent->start >= query_start + query_length || 2050 ent->start + ent->length <= query_start) { 2051 continue; 2052 } 2053 2054 /* 2055 * If a Get Poison List cmd comes in while this 2056 * scan is being done, it will see the new complete 2057 * list, while setting the respective flag. 2058 */ 2059 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 2060 CXLPoison *p = g_new0(CXLPoison, 1); 2061 2062 p->start = ent->start; 2063 p->length = ent->length; 2064 p->type = ent->type; 2065 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 2066 ct3d->poison_list_cnt++; 2067 } 2068 2069 res = g_new0(CXLPoison, 1); 2070 res->start = ent->start; 2071 res->length = ent->length; 2072 res->type = ent->type; 2073 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2074 2075 QLIST_REMOVE(ent, node); 2076 g_free(ent); 2077 } 2078 2079 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2080 *len_out = 0; 2081 2082 return CXL_MBOX_BG_STARTED; 2083 } 2084 2085 /* 2086 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2087 */ 2088 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2089 uint8_t *payload_in, 2090 size_t len_in, 2091 uint8_t *payload_out, 2092 size_t *len_out, 2093 CXLCCI *cci) 2094 { 2095 struct get_scan_media_results_out_pl { 2096 uint64_t dpa_restart; 2097 uint64_t length; 2098 uint8_t flags; 2099 uint8_t rsvd1; 2100 uint16_t count; 2101 uint8_t rsvd2[0xc]; 2102 struct { 2103 uint64_t addr; 2104 uint32_t length; 2105 uint32_t resv; 2106 } QEMU_PACKED records[]; 2107 } QEMU_PACKED; 2108 2109 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2110 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2111 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2112 CXLPoison *ent, *next; 2113 uint16_t total_count = 0, record_count = 0, i = 0; 2114 uint16_t out_pl_len; 2115 2116 if (!ct3d->scan_media_hasrun) { 2117 return CXL_MBOX_UNSUPPORTED; 2118 } 2119 2120 /* 2121 * Calculate limits, all entries are within the same address range of the 2122 * last scan media call. 2123 */ 2124 QLIST_FOREACH(ent, scan_media_results, node) { 2125 size_t rec_size = record_count * sizeof(out->records[0]); 2126 2127 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2128 record_count++; 2129 } 2130 total_count++; 2131 } 2132 2133 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2134 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2135 2136 memset(out, 0, out_pl_len); 2137 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2138 uint64_t start, stop; 2139 2140 if (i == record_count) { 2141 break; 2142 } 2143 2144 start = ROUND_DOWN(ent->start, 64ull); 2145 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2146 stq_le_p(&out->records[i].addr, start); 2147 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2148 i++; 2149 2150 /* consume the returning entry */ 2151 QLIST_REMOVE(ent, node); 2152 g_free(ent); 2153 } 2154 2155 stw_le_p(&out->count, record_count); 2156 if (total_count > record_count) { 2157 out->flags = (1 << 0); /* More Media Error Records */ 2158 } 2159 2160 *len_out = out_pl_len; 2161 return CXL_MBOX_SUCCESS; 2162 } 2163 2164 /* 2165 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2166 * (Opcode: 4800h) 2167 */ 2168 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2169 uint8_t *payload_in, 2170 size_t len_in, 2171 uint8_t *payload_out, 2172 size_t *len_out, 2173 CXLCCI *cci) 2174 { 2175 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2176 struct { 2177 uint8_t region_cnt; 2178 uint8_t start_rid; 2179 } QEMU_PACKED *in = (void *)payload_in; 2180 struct { 2181 uint8_t num_regions; 2182 uint8_t regions_returned; 2183 uint8_t rsvd1[6]; 2184 struct { 2185 uint64_t base; 2186 uint64_t decode_len; 2187 uint64_t region_len; 2188 uint64_t block_size; 2189 uint32_t dsmadhandle; 2190 uint8_t flags; 2191 uint8_t rsvd2[3]; 2192 } QEMU_PACKED records[]; 2193 } QEMU_PACKED *out = (void *)payload_out; 2194 struct { 2195 uint32_t num_extents_supported; 2196 uint32_t num_extents_available; 2197 uint32_t num_tags_supported; 2198 uint32_t num_tags_available; 2199 } QEMU_PACKED *extra_out; 2200 uint16_t record_count; 2201 uint16_t i; 2202 uint16_t out_pl_len; 2203 uint8_t start_rid; 2204 2205 start_rid = in->start_rid; 2206 if (start_rid >= ct3d->dc.num_regions) { 2207 return CXL_MBOX_INVALID_INPUT; 2208 } 2209 2210 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2211 2212 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2213 extra_out = (void *)(payload_out + out_pl_len); 2214 out_pl_len += sizeof(*extra_out); 2215 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2216 2217 out->num_regions = ct3d->dc.num_regions; 2218 out->regions_returned = record_count; 2219 for (i = 0; i < record_count; i++) { 2220 stq_le_p(&out->records[i].base, 2221 ct3d->dc.regions[start_rid + i].base); 2222 stq_le_p(&out->records[i].decode_len, 2223 ct3d->dc.regions[start_rid + i].decode_len / 2224 CXL_CAPACITY_MULTIPLIER); 2225 stq_le_p(&out->records[i].region_len, 2226 ct3d->dc.regions[start_rid + i].len); 2227 stq_le_p(&out->records[i].block_size, 2228 ct3d->dc.regions[start_rid + i].block_size); 2229 stl_le_p(&out->records[i].dsmadhandle, 2230 ct3d->dc.regions[start_rid + i].dsmadhandle); 2231 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2232 } 2233 /* 2234 * TODO: Assign values once extents and tags are introduced 2235 * to use. 2236 */ 2237 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2238 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2239 ct3d->dc.total_extent_count); 2240 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2241 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2242 2243 *len_out = out_pl_len; 2244 return CXL_MBOX_SUCCESS; 2245 } 2246 2247 /* 2248 * CXL r3.1 section 8.2.9.9.9.2: 2249 * Get Dynamic Capacity Extent List (Opcode 4801h) 2250 */ 2251 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2252 uint8_t *payload_in, 2253 size_t len_in, 2254 uint8_t *payload_out, 2255 size_t *len_out, 2256 CXLCCI *cci) 2257 { 2258 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2259 struct { 2260 uint32_t extent_cnt; 2261 uint32_t start_extent_id; 2262 } QEMU_PACKED *in = (void *)payload_in; 2263 struct { 2264 uint32_t count; 2265 uint32_t total_extents; 2266 uint32_t generation_num; 2267 uint8_t rsvd[4]; 2268 CXLDCExtentRaw records[]; 2269 } QEMU_PACKED *out = (void *)payload_out; 2270 uint32_t start_extent_id = in->start_extent_id; 2271 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2272 uint16_t record_count = 0, i = 0, record_done = 0; 2273 uint16_t out_pl_len, size; 2274 CXLDCExtent *ent; 2275 2276 if (start_extent_id > ct3d->dc.total_extent_count) { 2277 return CXL_MBOX_INVALID_INPUT; 2278 } 2279 2280 record_count = MIN(in->extent_cnt, 2281 ct3d->dc.total_extent_count - start_extent_id); 2282 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2283 record_count = MIN(record_count, size / sizeof(out->records[0])); 2284 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2285 2286 stl_le_p(&out->count, record_count); 2287 stl_le_p(&out->total_extents, ct3d->dc.total_extent_count); 2288 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2289 2290 if (record_count > 0) { 2291 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2292 2293 QTAILQ_FOREACH(ent, extent_list, node) { 2294 if (i++ < start_extent_id) { 2295 continue; 2296 } 2297 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2298 stq_le_p(&out_rec->len, ent->len); 2299 memcpy(&out_rec->tag, ent->tag, 0x10); 2300 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2301 2302 record_done++; 2303 out_rec++; 2304 if (record_done == record_count) { 2305 break; 2306 } 2307 } 2308 } 2309 2310 *len_out = out_pl_len; 2311 return CXL_MBOX_SUCCESS; 2312 } 2313 2314 /* 2315 * Check whether any bit between addr[nr, nr+size) is set, 2316 * return true if any bit is set, otherwise return false 2317 */ 2318 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2319 unsigned long size) 2320 { 2321 unsigned long res = find_next_bit(addr, size + nr, nr); 2322 2323 return res < nr + size; 2324 } 2325 2326 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2327 { 2328 int i; 2329 CXLDCRegion *region = &ct3d->dc.regions[0]; 2330 2331 if (dpa < region->base || 2332 dpa >= region->base + ct3d->dc.total_capacity) { 2333 return NULL; 2334 } 2335 2336 /* 2337 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2338 * 2339 * Regions are used in increasing-DPA order, with Region 0 being used for 2340 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2341 * So check from the last region to find where the dpa belongs. Extents that 2342 * cross multiple regions are not allowed. 2343 */ 2344 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2345 region = &ct3d->dc.regions[i]; 2346 if (dpa >= region->base) { 2347 if (dpa + len > region->base + region->len) { 2348 return NULL; 2349 } 2350 return region; 2351 } 2352 } 2353 2354 return NULL; 2355 } 2356 2357 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2358 uint64_t dpa, 2359 uint64_t len, 2360 uint8_t *tag, 2361 uint16_t shared_seq) 2362 { 2363 CXLDCExtent *extent; 2364 2365 extent = g_new0(CXLDCExtent, 1); 2366 extent->start_dpa = dpa; 2367 extent->len = len; 2368 if (tag) { 2369 memcpy(extent->tag, tag, 0x10); 2370 } 2371 extent->shared_seq = shared_seq; 2372 2373 QTAILQ_INSERT_TAIL(list, extent, node); 2374 } 2375 2376 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2377 CXLDCExtent *extent) 2378 { 2379 QTAILQ_REMOVE(list, extent, node); 2380 g_free(extent); 2381 } 2382 2383 /* 2384 * Add a new extent to the extent "group" if group exists; 2385 * otherwise, create a new group 2386 * Return value: the extent group where the extent is inserted. 2387 */ 2388 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2389 uint64_t dpa, 2390 uint64_t len, 2391 uint8_t *tag, 2392 uint16_t shared_seq) 2393 { 2394 if (!group) { 2395 group = g_new0(CXLDCExtentGroup, 1); 2396 QTAILQ_INIT(&group->list); 2397 } 2398 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2399 tag, shared_seq); 2400 return group; 2401 } 2402 2403 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2404 CXLDCExtentGroup *group) 2405 { 2406 QTAILQ_INSERT_TAIL(list, group, node); 2407 } 2408 2409 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2410 { 2411 CXLDCExtent *ent, *ent_next; 2412 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2413 2414 QTAILQ_REMOVE(list, group, node); 2415 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2416 cxl_remove_extent_from_extent_list(&group->list, ent); 2417 } 2418 g_free(group); 2419 } 2420 2421 /* 2422 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2423 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2424 */ 2425 typedef struct CXLUpdateDCExtentListInPl { 2426 uint32_t num_entries_updated; 2427 uint8_t flags; 2428 uint8_t rsvd[3]; 2429 /* CXL r3.1 Table 8-169: Updated Extent */ 2430 struct { 2431 uint64_t start_dpa; 2432 uint64_t len; 2433 uint8_t rsvd[8]; 2434 } QEMU_PACKED updated_entries[]; 2435 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2436 2437 /* 2438 * For the extents in the extent list to operate, check whether they are valid 2439 * 1. The extent should be in the range of a valid DC region; 2440 * 2. The extent should not cross multiple regions; 2441 * 3. The start DPA and the length of the extent should align with the block 2442 * size of the region; 2443 * 4. The address range of multiple extents in the list should not overlap. 2444 */ 2445 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2446 const CXLUpdateDCExtentListInPl *in) 2447 { 2448 uint64_t min_block_size = UINT64_MAX; 2449 CXLDCRegion *region; 2450 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2451 g_autofree unsigned long *blk_bitmap = NULL; 2452 uint64_t dpa, len; 2453 uint32_t i; 2454 2455 for (i = 0; i < ct3d->dc.num_regions; i++) { 2456 region = &ct3d->dc.regions[i]; 2457 min_block_size = MIN(min_block_size, region->block_size); 2458 } 2459 2460 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2461 ct3d->dc.regions[0].base) / min_block_size); 2462 2463 for (i = 0; i < in->num_entries_updated; i++) { 2464 dpa = in->updated_entries[i].start_dpa; 2465 len = in->updated_entries[i].len; 2466 2467 region = cxl_find_dc_region(ct3d, dpa, len); 2468 if (!region) { 2469 return CXL_MBOX_INVALID_PA; 2470 } 2471 2472 dpa -= ct3d->dc.regions[0].base; 2473 if (dpa % region->block_size || len % region->block_size) { 2474 return CXL_MBOX_INVALID_EXTENT_LIST; 2475 } 2476 /* the dpa range already covered by some other extents in the list */ 2477 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2478 len / min_block_size)) { 2479 return CXL_MBOX_INVALID_EXTENT_LIST; 2480 } 2481 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2482 } 2483 2484 return CXL_MBOX_SUCCESS; 2485 } 2486 2487 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2488 const CXLUpdateDCExtentListInPl *in) 2489 { 2490 uint32_t i; 2491 CXLDCExtent *ent; 2492 CXLDCExtentGroup *ext_group; 2493 uint64_t dpa, len; 2494 Range range1, range2; 2495 2496 for (i = 0; i < in->num_entries_updated; i++) { 2497 dpa = in->updated_entries[i].start_dpa; 2498 len = in->updated_entries[i].len; 2499 2500 range_init_nofail(&range1, dpa, len); 2501 2502 /* 2503 * The host-accepted DPA range must be contained by the first extent 2504 * group in the pending list 2505 */ 2506 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2507 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2508 return CXL_MBOX_INVALID_PA; 2509 } 2510 2511 /* to-be-added range should not overlap with range already accepted */ 2512 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 2513 range_init_nofail(&range2, ent->start_dpa, ent->len); 2514 if (range_overlaps_range(&range1, &range2)) { 2515 return CXL_MBOX_INVALID_PA; 2516 } 2517 } 2518 } 2519 return CXL_MBOX_SUCCESS; 2520 } 2521 2522 /* 2523 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 2524 * An extent is added to the extent list and becomes usable only after the 2525 * response is processed successfully. 2526 */ 2527 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 2528 uint8_t *payload_in, 2529 size_t len_in, 2530 uint8_t *payload_out, 2531 size_t *len_out, 2532 CXLCCI *cci) 2533 { 2534 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 2535 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2536 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2537 uint32_t i; 2538 uint64_t dpa, len; 2539 CXLRetCode ret; 2540 2541 if (len_in < sizeof(*in)) { 2542 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2543 } 2544 2545 if (in->num_entries_updated == 0) { 2546 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 2547 return CXL_MBOX_SUCCESS; 2548 } 2549 2550 if (len_in < 2551 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 2552 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2553 } 2554 2555 /* Adding extents causes exceeding device's extent tracking ability. */ 2556 if (in->num_entries_updated + ct3d->dc.total_extent_count > 2557 CXL_NUM_EXTENTS_SUPPORTED) { 2558 return CXL_MBOX_RESOURCES_EXHAUSTED; 2559 } 2560 2561 ret = cxl_detect_malformed_extent_list(ct3d, in); 2562 if (ret != CXL_MBOX_SUCCESS) { 2563 return ret; 2564 } 2565 2566 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 2567 if (ret != CXL_MBOX_SUCCESS) { 2568 return ret; 2569 } 2570 2571 for (i = 0; i < in->num_entries_updated; i++) { 2572 dpa = in->updated_entries[i].start_dpa; 2573 len = in->updated_entries[i].len; 2574 2575 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 2576 ct3d->dc.total_extent_count += 1; 2577 ct3_set_region_block_backed(ct3d, dpa, len); 2578 } 2579 /* Remove the first extent group in the pending list */ 2580 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 2581 2582 return CXL_MBOX_SUCCESS; 2583 } 2584 2585 /* 2586 * Copy extent list from src to dst 2587 * Return value: number of extents copied 2588 */ 2589 static uint32_t copy_extent_list(CXLDCExtentList *dst, 2590 const CXLDCExtentList *src) 2591 { 2592 uint32_t cnt = 0; 2593 CXLDCExtent *ent; 2594 2595 if (!dst || !src) { 2596 return 0; 2597 } 2598 2599 QTAILQ_FOREACH(ent, src, node) { 2600 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 2601 ent->tag, ent->shared_seq); 2602 cnt++; 2603 } 2604 return cnt; 2605 } 2606 2607 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 2608 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 2609 uint32_t *updated_list_size) 2610 { 2611 CXLDCExtent *ent, *ent_next; 2612 uint64_t dpa, len; 2613 uint32_t i; 2614 int cnt_delta = 0; 2615 CXLRetCode ret = CXL_MBOX_SUCCESS; 2616 2617 QTAILQ_INIT(updated_list); 2618 copy_extent_list(updated_list, &ct3d->dc.extents); 2619 2620 for (i = 0; i < in->num_entries_updated; i++) { 2621 Range range; 2622 2623 dpa = in->updated_entries[i].start_dpa; 2624 len = in->updated_entries[i].len; 2625 2626 /* Check if the DPA range is not fully backed with valid extents */ 2627 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 2628 ret = CXL_MBOX_INVALID_PA; 2629 goto free_and_exit; 2630 } 2631 2632 /* After this point, extent overflow is the only error can happen */ 2633 while (len > 0) { 2634 QTAILQ_FOREACH(ent, updated_list, node) { 2635 range_init_nofail(&range, ent->start_dpa, ent->len); 2636 2637 if (range_contains(&range, dpa)) { 2638 uint64_t len1, len2 = 0, len_done = 0; 2639 uint64_t ent_start_dpa = ent->start_dpa; 2640 uint64_t ent_len = ent->len; 2641 2642 len1 = dpa - ent->start_dpa; 2643 /* Found the extent or the subset of an existing extent */ 2644 if (range_contains(&range, dpa + len - 1)) { 2645 len2 = ent_start_dpa + ent_len - dpa - len; 2646 } else { 2647 dpa = ent_start_dpa + ent_len; 2648 } 2649 len_done = ent_len - len1 - len2; 2650 2651 cxl_remove_extent_from_extent_list(updated_list, ent); 2652 cnt_delta--; 2653 2654 if (len1) { 2655 cxl_insert_extent_to_extent_list(updated_list, 2656 ent_start_dpa, 2657 len1, NULL, 0); 2658 cnt_delta++; 2659 } 2660 if (len2) { 2661 cxl_insert_extent_to_extent_list(updated_list, 2662 dpa + len, 2663 len2, NULL, 0); 2664 cnt_delta++; 2665 } 2666 2667 if (cnt_delta + ct3d->dc.total_extent_count > 2668 CXL_NUM_EXTENTS_SUPPORTED) { 2669 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 2670 goto free_and_exit; 2671 } 2672 2673 len -= len_done; 2674 break; 2675 } 2676 } 2677 } 2678 } 2679 free_and_exit: 2680 if (ret != CXL_MBOX_SUCCESS) { 2681 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 2682 cxl_remove_extent_from_extent_list(updated_list, ent); 2683 } 2684 *updated_list_size = 0; 2685 } else { 2686 *updated_list_size = ct3d->dc.total_extent_count + cnt_delta; 2687 } 2688 2689 return ret; 2690 } 2691 2692 /* 2693 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 2694 */ 2695 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 2696 uint8_t *payload_in, 2697 size_t len_in, 2698 uint8_t *payload_out, 2699 size_t *len_out, 2700 CXLCCI *cci) 2701 { 2702 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 2703 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2704 CXLDCExtentList updated_list; 2705 CXLDCExtent *ent, *ent_next; 2706 uint32_t updated_list_size; 2707 CXLRetCode ret; 2708 2709 if (len_in < sizeof(*in)) { 2710 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2711 } 2712 2713 if (in->num_entries_updated == 0) { 2714 return CXL_MBOX_INVALID_INPUT; 2715 } 2716 2717 if (len_in < 2718 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 2719 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2720 } 2721 2722 ret = cxl_detect_malformed_extent_list(ct3d, in); 2723 if (ret != CXL_MBOX_SUCCESS) { 2724 return ret; 2725 } 2726 2727 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 2728 &updated_list_size); 2729 if (ret != CXL_MBOX_SUCCESS) { 2730 return ret; 2731 } 2732 2733 /* 2734 * If the dry run release passes, the returned updated_list will 2735 * be the updated extent list and we just need to clear the extents 2736 * in the accepted list and copy extents in the updated_list to accepted 2737 * list and update the extent count; 2738 */ 2739 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 2740 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 2741 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 2742 } 2743 copy_extent_list(&ct3d->dc.extents, &updated_list); 2744 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 2745 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 2746 cxl_remove_extent_from_extent_list(&updated_list, ent); 2747 } 2748 ct3d->dc.total_extent_count = updated_list_size; 2749 2750 return CXL_MBOX_SUCCESS; 2751 } 2752 2753 static const struct cxl_cmd cxl_cmd_set[256][256] = { 2754 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 2755 cmd_infostat_bg_op_abort, 0, 0 }, 2756 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 2757 cmd_events_get_records, 1, 0 }, 2758 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 2759 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 2760 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 2761 cmd_events_get_interrupt_policy, 0, 0 }, 2762 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 2763 cmd_events_set_interrupt_policy, 2764 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 2765 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 2766 cmd_firmware_update_get_info, 0, 0 }, 2767 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 2768 cmd_firmware_update_transfer, ~0, 2769 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 2770 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 2771 cmd_firmware_update_activate, 2, 2772 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 2773 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2774 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 2775 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 2776 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 2777 0, 0 }, 2778 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2779 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 2780 cmd_features_get_supported, 0x8, 0 }, 2781 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 2782 cmd_features_get_feature, 0x15, 0 }, 2783 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 2784 cmd_features_set_feature, 2785 ~0, 2786 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 2787 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 2788 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 2789 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 2790 CXL_MBOX_SECURITY_STATE_CHANGE)}, 2791 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 2792 cmd_identify_memory_device, 0, 0 }, 2793 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 2794 cmd_ccls_get_partition_info, 0, 0 }, 2795 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 2796 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 2797 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2798 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 2799 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 2800 CXL_MBOX_SECURITY_STATE_CHANGE | 2801 CXL_MBOX_BACKGROUND_OPERATION | 2802 CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 2803 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 2804 cmd_get_security_state, 0, 0 }, 2805 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 2806 cmd_media_get_poison_list, 16, 0 }, 2807 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 2808 cmd_media_inject_poison, 8, 0 }, 2809 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 2810 cmd_media_clear_poison, 72, 0 }, 2811 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 2812 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 2813 cmd_media_get_scan_media_capabilities, 16, 0 }, 2814 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 2815 cmd_media_scan_media, 17, 2816 (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 2817 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 2818 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 2819 cmd_media_get_scan_media_results, 0, 0 }, 2820 }; 2821 2822 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 2823 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 2824 cmd_dcd_get_dyn_cap_config, 2, 0 }, 2825 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 2826 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 2827 8, 0 }, 2828 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 2829 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 2830 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2831 [DCD_CONFIG][RELEASE_DYN_CAP] = { 2832 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 2833 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2834 }; 2835 2836 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 2837 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 2838 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 2839 cmd_infostat_bg_op_sts, 0, 0 }, 2840 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 2841 cmd_infostat_bg_op_abort, 0, 0 }, 2842 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2843 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 2844 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 2845 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 2846 0 }, 2847 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2848 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 2849 cmd_identify_switch_device, 0, 0 }, 2850 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 2851 cmd_get_physical_port_state, ~0, 0 }, 2852 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 2853 cmd_tunnel_management_cmd, ~0, 0 }, 2854 }; 2855 2856 /* 2857 * While the command is executing in the background, the device should 2858 * update the percentage complete in the Background Command Status Register 2859 * at least once per second. 2860 */ 2861 2862 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 2863 2864 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 2865 size_t len_in, uint8_t *pl_in, size_t *len_out, 2866 uint8_t *pl_out, bool *bg_started) 2867 { 2868 int ret; 2869 const struct cxl_cmd *cxl_cmd; 2870 opcode_handler h; 2871 CXLDeviceState *cxl_dstate; 2872 2873 *len_out = 0; 2874 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 2875 h = cxl_cmd->handler; 2876 if (!h) { 2877 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 2878 set << 8 | cmd); 2879 return CXL_MBOX_UNSUPPORTED; 2880 } 2881 2882 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 2883 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2884 } 2885 2886 /* Only one bg command at a time */ 2887 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 2888 cci->bg.runtime > 0) { 2889 return CXL_MBOX_BUSY; 2890 } 2891 2892 /* forbid any selected commands while the media is disabled */ 2893 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 2894 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 2895 2896 if (cxl_dev_media_disabled(cxl_dstate)) { 2897 if (h == cmd_events_get_records || 2898 h == cmd_ccls_get_partition_info || 2899 h == cmd_ccls_set_lsa || 2900 h == cmd_ccls_get_lsa || 2901 h == cmd_logs_get_log || 2902 h == cmd_media_get_poison_list || 2903 h == cmd_media_inject_poison || 2904 h == cmd_media_clear_poison || 2905 h == cmd_sanitize_overwrite || 2906 h == cmd_firmware_update_transfer || 2907 h == cmd_firmware_update_activate) { 2908 return CXL_MBOX_MEDIA_DISABLED; 2909 } 2910 } 2911 } 2912 2913 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 2914 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 2915 ret == CXL_MBOX_BG_STARTED) { 2916 *bg_started = true; 2917 } else { 2918 *bg_started = false; 2919 } 2920 2921 /* Set bg and the return code */ 2922 if (*bg_started) { 2923 uint64_t now; 2924 2925 cci->bg.opcode = (set << 8) | cmd; 2926 2927 cci->bg.complete_pct = 0; 2928 cci->bg.aborted = false; 2929 cci->bg.ret_code = 0; 2930 2931 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 2932 cci->bg.starttime = now; 2933 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 2934 } 2935 2936 return ret; 2937 } 2938 2939 static void bg_timercb(void *opaque) 2940 { 2941 CXLCCI *cci = opaque; 2942 uint64_t now, total_time; 2943 2944 qemu_mutex_lock(&cci->bg.lock); 2945 2946 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 2947 total_time = cci->bg.starttime + cci->bg.runtime; 2948 2949 if (now >= total_time) { /* we are done */ 2950 uint16_t ret = CXL_MBOX_SUCCESS; 2951 2952 cci->bg.complete_pct = 100; 2953 cci->bg.ret_code = ret; 2954 switch (cci->bg.opcode) { 2955 case 0x0201: /* fw transfer */ 2956 __do_firmware_xfer(cci); 2957 break; 2958 case 0x4400: /* sanitize */ 2959 { 2960 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2961 2962 __do_sanitization(ct3d); 2963 cxl_dev_enable_media(&ct3d->cxl_dstate); 2964 } 2965 break; 2966 case 0x4304: /* scan media */ 2967 { 2968 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2969 2970 __do_scan_media(ct3d); 2971 break; 2972 } 2973 default: 2974 __builtin_unreachable(); 2975 break; 2976 } 2977 } else { 2978 /* estimate only */ 2979 cci->bg.complete_pct = 2980 100 * (now - cci->bg.starttime) / cci->bg.runtime; 2981 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 2982 } 2983 2984 if (cci->bg.complete_pct == 100) { 2985 /* TODO: generalize to switch CCI */ 2986 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2987 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2988 PCIDevice *pdev = PCI_DEVICE(cci->d); 2989 2990 cci->bg.starttime = 0; 2991 /* registers are updated, allow new bg-capable cmds */ 2992 cci->bg.runtime = 0; 2993 2994 if (msix_enabled(pdev)) { 2995 msix_notify(pdev, cxl_dstate->mbox_msi_n); 2996 } else if (msi_enabled(pdev)) { 2997 msi_notify(pdev, cxl_dstate->mbox_msi_n); 2998 } 2999 } 3000 3001 qemu_mutex_unlock(&cci->bg.lock); 3002 } 3003 3004 static void cxl_rebuild_cel(CXLCCI *cci) 3005 { 3006 cci->cel_size = 0; /* Reset for a fresh build */ 3007 for (int set = 0; set < 256; set++) { 3008 for (int cmd = 0; cmd < 256; cmd++) { 3009 if (cci->cxl_cmd_set[set][cmd].handler) { 3010 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 3011 struct cel_log *log = 3012 &cci->cel_log[cci->cel_size]; 3013 3014 log->opcode = (set << 8) | cmd; 3015 log->effect = c->effect; 3016 cci->cel_size++; 3017 } 3018 } 3019 } 3020 } 3021 3022 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 3023 { 3024 cci->payload_max = payload_max; 3025 cxl_rebuild_cel(cci); 3026 3027 cci->bg.complete_pct = 0; 3028 cci->bg.starttime = 0; 3029 cci->bg.runtime = 0; 3030 cci->bg.aborted = false; 3031 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3032 bg_timercb, cci); 3033 qemu_mutex_init(&cci->bg.lock); 3034 3035 memset(&cci->fw, 0, sizeof(cci->fw)); 3036 cci->fw.active_slot = 1; 3037 cci->fw.slot[cci->fw.active_slot - 1] = true; 3038 cci->initialized = true; 3039 } 3040 3041 void cxl_destroy_cci(CXLCCI *cci) 3042 { 3043 qemu_mutex_destroy(&cci->bg.lock); 3044 cci->initialized = false; 3045 } 3046 3047 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 3048 { 3049 for (int set = 0; set < 256; set++) { 3050 for (int cmd = 0; cmd < 256; cmd++) { 3051 if (cxl_cmds[set][cmd].handler) { 3052 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 3053 } 3054 } 3055 } 3056 } 3057 3058 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 3059 size_t payload_max) 3060 { 3061 cci->payload_max = MAX(payload_max, cci->payload_max); 3062 cxl_copy_cci_commands(cci, cxl_cmd_set); 3063 cxl_rebuild_cel(cci); 3064 } 3065 3066 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 3067 DeviceState *d, size_t payload_max) 3068 { 3069 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 3070 cci->d = d; 3071 cci->intf = intf; 3072 cxl_init_cci(cci, payload_max); 3073 } 3074 3075 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 3076 { 3077 CXLType3Dev *ct3d = CXL_TYPE3(d); 3078 3079 cxl_copy_cci_commands(cci, cxl_cmd_set); 3080 if (ct3d->dc.num_regions) { 3081 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 3082 } 3083 cci->d = d; 3084 3085 /* No separation for PCI MB as protocol handled in PCI device */ 3086 cci->intf = d; 3087 cxl_init_cci(cci, payload_max); 3088 } 3089 3090 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 3091 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3092 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3093 0 }, 3094 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3095 }; 3096 3097 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 3098 size_t payload_max) 3099 { 3100 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 3101 cci->d = d; 3102 cci->intf = intf; 3103 cxl_init_cci(cci, payload_max); 3104 } 3105 3106 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 3107 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 3108 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3109 0 }, 3110 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3111 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3112 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3113 cmd_tunnel_management_cmd, ~0, 0 }, 3114 }; 3115 3116 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 3117 DeviceState *intf, 3118 size_t payload_max) 3119 { 3120 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 3121 cci->d = d; 3122 cci->intf = intf; 3123 cxl_init_cci(cci, payload_max); 3124 } 3125