1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/pci/msi.h" 12 #include "hw/pci/msix.h" 13 #include "hw/cxl/cxl.h" 14 #include "hw/cxl/cxl_events.h" 15 #include "hw/cxl/cxl_mailbox.h" 16 #include "hw/pci/pci.h" 17 #include "hw/pci-bridge/cxl_upstream_port.h" 18 #include "qemu/cutils.h" 19 #include "qemu/log.h" 20 #include "qemu/units.h" 21 #include "qemu/uuid.h" 22 #include "sysemu/hostmem.h" 23 #include "qemu/range.h" 24 25 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 26 #define CXL_DC_EVENT_LOG_SIZE 8 27 #define CXL_NUM_EXTENTS_SUPPORTED 512 28 #define CXL_NUM_TAGS_SUPPORTED 0 29 30 /* 31 * How to add a new command, example. The command set FOO, with cmd BAR. 32 * 1. Add the command set and cmd to the enum. 33 * FOO = 0x7f, 34 * #define BAR 0 35 * 2. Implement the handler 36 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 37 * CXLDeviceState *cxl_dstate, uint16_t *len) 38 * 3. Add the command to the cxl_cmd_set[][] 39 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 40 * 4. Implement your handler 41 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 42 * 43 * 44 * Writing the handler: 45 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 46 * in/out length of the payload. The handler is responsible for consuming the 47 * payload from cmd->payload and operating upon it as necessary. It must then 48 * fill the output data into cmd->payload (overwriting what was there), 49 * setting the length, and returning a valid return code. 50 * 51 * XXX: The handler need not worry about endianness. The payload is read out of 52 * a register interface that already deals with it. 53 */ 54 55 enum { 56 INFOSTAT = 0x00, 57 #define IS_IDENTIFY 0x1 58 #define BACKGROUND_OPERATION_STATUS 0x2 59 EVENTS = 0x01, 60 #define GET_RECORDS 0x0 61 #define CLEAR_RECORDS 0x1 62 #define GET_INTERRUPT_POLICY 0x2 63 #define SET_INTERRUPT_POLICY 0x3 64 FIRMWARE_UPDATE = 0x02, 65 #define GET_INFO 0x0 66 #define TRANSFER 0x1 67 #define ACTIVATE 0x2 68 TIMESTAMP = 0x03, 69 #define GET 0x0 70 #define SET 0x1 71 LOGS = 0x04, 72 #define GET_SUPPORTED 0x0 73 #define GET_LOG 0x1 74 FEATURES = 0x05, 75 #define GET_SUPPORTED 0x0 76 #define GET_FEATURE 0x1 77 #define SET_FEATURE 0x2 78 IDENTIFY = 0x40, 79 #define MEMORY_DEVICE 0x0 80 CCLS = 0x41, 81 #define GET_PARTITION_INFO 0x0 82 #define GET_LSA 0x2 83 #define SET_LSA 0x3 84 SANITIZE = 0x44, 85 #define OVERWRITE 0x0 86 #define SECURE_ERASE 0x1 87 PERSISTENT_MEM = 0x45, 88 #define GET_SECURITY_STATE 0x0 89 MEDIA_AND_POISON = 0x43, 90 #define GET_POISON_LIST 0x0 91 #define INJECT_POISON 0x1 92 #define CLEAR_POISON 0x2 93 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 94 #define SCAN_MEDIA 0x4 95 #define GET_SCAN_MEDIA_RESULTS 0x5 96 DCD_CONFIG = 0x48, 97 #define GET_DC_CONFIG 0x0 98 #define GET_DYN_CAP_EXT_LIST 0x1 99 #define ADD_DYN_CAP_RSP 0x2 100 #define RELEASE_DYN_CAP 0x3 101 PHYSICAL_SWITCH = 0x51, 102 #define IDENTIFY_SWITCH_DEVICE 0x0 103 #define GET_PHYSICAL_PORT_STATE 0x1 104 TUNNEL = 0x53, 105 #define MANAGEMENT_COMMAND 0x0 106 }; 107 108 /* CCI Message Format CXL r3.1 Figure 7-19 */ 109 typedef struct CXLCCIMessage { 110 uint8_t category; 111 #define CXL_CCI_CAT_REQ 0 112 #define CXL_CCI_CAT_RSP 1 113 uint8_t tag; 114 uint8_t resv1; 115 uint8_t command; 116 uint8_t command_set; 117 uint8_t pl_length[3]; 118 uint16_t rc; 119 uint16_t vendor_specific; 120 uint8_t payload[]; 121 } QEMU_PACKED CXLCCIMessage; 122 123 /* This command is only defined to an MLD FM Owned LD or an MHD */ 124 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 125 uint8_t *payload_in, 126 size_t len_in, 127 uint8_t *payload_out, 128 size_t *len_out, 129 CXLCCI *cci) 130 { 131 PCIDevice *tunnel_target; 132 CXLCCI *target_cci; 133 struct { 134 uint8_t port_or_ld_id; 135 uint8_t target_type; 136 uint16_t size; 137 CXLCCIMessage ccimessage; 138 } QEMU_PACKED *in; 139 struct { 140 uint16_t resp_len; 141 uint8_t resv[2]; 142 CXLCCIMessage ccimessage; 143 } QEMU_PACKED *out; 144 size_t pl_length, length_out; 145 bool bg_started; 146 int rc; 147 148 if (cmd->in < sizeof(*in)) { 149 return CXL_MBOX_INVALID_INPUT; 150 } 151 in = (void *)payload_in; 152 out = (void *)payload_out; 153 154 if (len_in < sizeof(*in)) { 155 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 156 } 157 /* Enough room for minimum sized message - no payload */ 158 if (in->size < sizeof(in->ccimessage)) { 159 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 160 } 161 /* Length of input payload should be in->size + a wrapping tunnel header */ 162 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 163 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 164 } 165 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 166 return CXL_MBOX_INVALID_INPUT; 167 } 168 169 if (in->target_type != 0) { 170 qemu_log_mask(LOG_UNIMP, 171 "Tunneled Command sent to non existent FM-LD"); 172 return CXL_MBOX_INVALID_INPUT; 173 } 174 175 /* 176 * Target of a tunnel unfortunately depends on type of CCI readint 177 * the message. 178 * If in a switch, then it's the port number. 179 * If in an MLD it is the ld number. 180 * If in an MHD target type indicate where we are going. 181 */ 182 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 183 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 184 if (in->port_or_ld_id != 0) { 185 /* Only pretending to have one for now! */ 186 return CXL_MBOX_INVALID_INPUT; 187 } 188 target_cci = &ct3d->ld0_cci; 189 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 190 CXLUpstreamPort *usp = CXL_USP(cci->d); 191 192 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 193 in->port_or_ld_id); 194 if (!tunnel_target) { 195 return CXL_MBOX_INVALID_INPUT; 196 } 197 tunnel_target = 198 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 199 if (!tunnel_target) { 200 return CXL_MBOX_INVALID_INPUT; 201 } 202 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 203 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 204 /* Tunneled VDMs always land on FM Owned LD */ 205 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 206 } else { 207 return CXL_MBOX_INVALID_INPUT; 208 } 209 } else { 210 return CXL_MBOX_INVALID_INPUT; 211 } 212 213 pl_length = in->ccimessage.pl_length[2] << 16 | 214 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 215 rc = cxl_process_cci_message(target_cci, 216 in->ccimessage.command_set, 217 in->ccimessage.command, 218 pl_length, in->ccimessage.payload, 219 &length_out, out->ccimessage.payload, 220 &bg_started); 221 /* Payload should be in place. Rest of CCI header and needs filling */ 222 out->resp_len = length_out + sizeof(CXLCCIMessage); 223 st24_le_p(out->ccimessage.pl_length, length_out); 224 out->ccimessage.rc = rc; 225 out->ccimessage.category = CXL_CCI_CAT_RSP; 226 out->ccimessage.command = in->ccimessage.command; 227 out->ccimessage.command_set = in->ccimessage.command_set; 228 out->ccimessage.tag = in->ccimessage.tag; 229 *len_out = length_out + sizeof(*out); 230 231 return CXL_MBOX_SUCCESS; 232 } 233 234 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 235 uint8_t *payload_in, size_t len_in, 236 uint8_t *payload_out, size_t *len_out, 237 CXLCCI *cci) 238 { 239 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 240 CXLGetEventPayload *pl; 241 uint8_t log_type; 242 int max_recs; 243 244 if (cmd->in < sizeof(log_type)) { 245 return CXL_MBOX_INVALID_INPUT; 246 } 247 248 log_type = payload_in[0]; 249 250 pl = (CXLGetEventPayload *)payload_out; 251 252 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 253 CXL_EVENT_RECORD_SIZE; 254 if (max_recs > 0xFFFF) { 255 max_recs = 0xFFFF; 256 } 257 258 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 259 } 260 261 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 262 uint8_t *payload_in, 263 size_t len_in, 264 uint8_t *payload_out, 265 size_t *len_out, 266 CXLCCI *cci) 267 { 268 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 269 CXLClearEventPayload *pl; 270 271 pl = (CXLClearEventPayload *)payload_in; 272 273 if (len_in < sizeof(*pl) || 274 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { 275 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 276 } 277 278 *len_out = 0; 279 return cxl_event_clear_records(cxlds, pl); 280 } 281 282 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 283 uint8_t *payload_in, 284 size_t len_in, 285 uint8_t *payload_out, 286 size_t *len_out, 287 CXLCCI *cci) 288 { 289 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 290 CXLEventInterruptPolicy *policy; 291 CXLEventLog *log; 292 293 policy = (CXLEventInterruptPolicy *)payload_out; 294 295 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 296 if (log->irq_enabled) { 297 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 298 } 299 300 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 301 if (log->irq_enabled) { 302 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 303 } 304 305 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 306 if (log->irq_enabled) { 307 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 308 } 309 310 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 311 if (log->irq_enabled) { 312 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 313 } 314 315 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 316 if (log->irq_enabled) { 317 /* Dynamic Capacity borrows the same vector as info */ 318 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 319 } 320 321 *len_out = sizeof(*policy); 322 return CXL_MBOX_SUCCESS; 323 } 324 325 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 326 uint8_t *payload_in, 327 size_t len_in, 328 uint8_t *payload_out, 329 size_t *len_out, 330 CXLCCI *cci) 331 { 332 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 333 CXLEventInterruptPolicy *policy; 334 CXLEventLog *log; 335 336 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 337 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 338 } 339 340 policy = (CXLEventInterruptPolicy *)payload_in; 341 342 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 343 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 344 CXL_INT_MSI_MSIX; 345 346 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 347 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 348 CXL_INT_MSI_MSIX; 349 350 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 351 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 352 CXL_INT_MSI_MSIX; 353 354 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 355 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 356 CXL_INT_MSI_MSIX; 357 358 /* DCD is optional */ 359 if (len_in < sizeof(*policy)) { 360 return CXL_MBOX_SUCCESS; 361 } 362 363 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 364 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 365 CXL_INT_MSI_MSIX; 366 367 *len_out = 0; 368 return CXL_MBOX_SUCCESS; 369 } 370 371 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 372 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 373 uint8_t *payload_in, 374 size_t len_in, 375 uint8_t *payload_out, 376 size_t *len_out, 377 CXLCCI *cci) 378 { 379 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 380 struct { 381 uint16_t pcie_vid; 382 uint16_t pcie_did; 383 uint16_t pcie_subsys_vid; 384 uint16_t pcie_subsys_id; 385 uint64_t sn; 386 uint8_t max_message_size; 387 uint8_t component_type; 388 } QEMU_PACKED *is_identify; 389 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 390 391 is_identify = (void *)payload_out; 392 is_identify->pcie_vid = class->vendor_id; 393 is_identify->pcie_did = class->device_id; 394 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 395 is_identify->sn = CXL_USP(cci->d)->sn; 396 /* Subsystem info not defined for a USP */ 397 is_identify->pcie_subsys_vid = 0; 398 is_identify->pcie_subsys_id = 0; 399 is_identify->component_type = 0x0; /* Switch */ 400 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 401 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 402 403 is_identify->sn = CXL_TYPE3(cci->d)->sn; 404 /* 405 * We can't always use class->subsystem_vendor_id as 406 * it is not set if the defaults are used. 407 */ 408 is_identify->pcie_subsys_vid = 409 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 410 is_identify->pcie_subsys_id = 411 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 412 is_identify->component_type = 0x3; /* Type 3 */ 413 } 414 415 /* TODO: Allow this to vary across different CCIs */ 416 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ 417 *len_out = sizeof(*is_identify); 418 return CXL_MBOX_SUCCESS; 419 } 420 421 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 422 void *private) 423 { 424 uint8_t *bm = private; 425 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 426 uint8_t port = PCIE_PORT(d)->port; 427 bm[port / 8] |= 1 << (port % 8); 428 } 429 } 430 431 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 432 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 433 uint8_t *payload_in, 434 size_t len_in, 435 uint8_t *payload_out, 436 size_t *len_out, 437 CXLCCI *cci) 438 { 439 PCIEPort *usp = PCIE_PORT(cci->d); 440 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 441 int num_phys_ports = pcie_count_ds_ports(bus); 442 443 struct cxl_fmapi_ident_switch_dev_resp_pl { 444 uint8_t ingress_port_id; 445 uint8_t rsvd; 446 uint8_t num_physical_ports; 447 uint8_t num_vcss; 448 uint8_t active_port_bitmask[0x20]; 449 uint8_t active_vcs_bitmask[0x20]; 450 uint16_t total_vppbs; 451 uint16_t bound_vppbs; 452 uint8_t num_hdm_decoders_per_usp; 453 } QEMU_PACKED *out; 454 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 455 456 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 457 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 458 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 459 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 460 .active_vcs_bitmask[0] = 0x1, 461 .total_vppbs = num_phys_ports + 1, 462 .bound_vppbs = num_phys_ports + 1, 463 .num_hdm_decoders_per_usp = 4, 464 }; 465 466 /* Depends on the CCI type */ 467 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 468 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 469 } else { 470 /* MCTP? */ 471 out->ingress_port_id = 0; 472 } 473 474 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 475 out->active_port_bitmask); 476 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 477 478 *len_out = sizeof(*out); 479 480 return CXL_MBOX_SUCCESS; 481 } 482 483 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 484 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 485 uint8_t *payload_in, 486 size_t len_in, 487 uint8_t *payload_out, 488 size_t *len_out, 489 CXLCCI *cci) 490 { 491 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 492 struct cxl_fmapi_get_phys_port_state_req_pl { 493 uint8_t num_ports; 494 uint8_t ports[]; 495 } QEMU_PACKED *in; 496 497 /* 498 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 499 * Format 500 */ 501 struct cxl_fmapi_port_state_info_block { 502 uint8_t port_id; 503 uint8_t config_state; 504 uint8_t connected_device_cxl_version; 505 uint8_t rsv1; 506 uint8_t connected_device_type; 507 uint8_t port_cxl_version_bitmask; 508 uint8_t max_link_width; 509 uint8_t negotiated_link_width; 510 uint8_t supported_link_speeds_vector; 511 uint8_t max_link_speed; 512 uint8_t current_link_speed; 513 uint8_t ltssm_state; 514 uint8_t first_lane_num; 515 uint16_t link_state; 516 uint8_t supported_ld_count; 517 } QEMU_PACKED; 518 519 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 520 struct cxl_fmapi_get_phys_port_state_resp_pl { 521 uint8_t num_ports; 522 uint8_t rsv1[3]; 523 struct cxl_fmapi_port_state_info_block ports[]; 524 } QEMU_PACKED *out; 525 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 526 PCIEPort *usp = PCIE_PORT(cci->d); 527 size_t pl_size; 528 int i; 529 530 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 531 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 532 533 if (len_in < sizeof(*in)) { 534 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 535 } 536 /* Check if what was requested can fit */ 537 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 538 return CXL_MBOX_INVALID_INPUT; 539 } 540 541 /* For success there should be a match for each requested */ 542 out->num_ports = in->num_ports; 543 544 for (i = 0; i < in->num_ports; i++) { 545 struct cxl_fmapi_port_state_info_block *port; 546 /* First try to match on downstream port */ 547 PCIDevice *port_dev; 548 uint16_t lnkcap, lnkcap2, lnksta; 549 550 port = &out->ports[i]; 551 552 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 553 if (port_dev) { /* DSP */ 554 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 555 ->devices[0]; 556 port->config_state = 3; 557 if (ds_dev) { 558 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 559 port->connected_device_type = 5; /* Assume MLD for now */ 560 } else { 561 port->connected_device_type = 1; 562 } 563 } else { 564 port->connected_device_type = 0; 565 } 566 port->supported_ld_count = 3; 567 } else if (usp->port == in->ports[i]) { /* USP */ 568 port_dev = PCI_DEVICE(usp); 569 port->config_state = 4; 570 port->connected_device_type = 0; 571 } else { 572 return CXL_MBOX_INVALID_INPUT; 573 } 574 575 port->port_id = in->ports[i]; 576 /* Information on status of this port in lnksta, lnkcap */ 577 if (!port_dev->exp.exp_cap) { 578 return CXL_MBOX_INTERNAL_ERROR; 579 } 580 lnksta = port_dev->config_read(port_dev, 581 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 582 sizeof(lnksta)); 583 lnkcap = port_dev->config_read(port_dev, 584 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 585 sizeof(lnkcap)); 586 lnkcap2 = port_dev->config_read(port_dev, 587 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 588 sizeof(lnkcap2)); 589 590 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 591 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 592 /* No definition for SLS field in linux/pci_regs.h */ 593 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 594 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 595 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 596 /* TODO: Track down if we can get the rest of the info */ 597 port->ltssm_state = 0x7; 598 port->first_lane_num = 0; 599 port->link_state = 0; 600 port->port_cxl_version_bitmask = 0x2; 601 port->connected_device_cxl_version = 0x2; 602 } 603 604 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 605 *len_out = pl_size; 606 607 return CXL_MBOX_SUCCESS; 608 } 609 610 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 611 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 612 uint8_t *payload_in, 613 size_t len_in, 614 uint8_t *payload_out, 615 size_t *len_out, 616 CXLCCI *cci) 617 { 618 struct { 619 uint8_t status; 620 uint8_t rsvd; 621 uint16_t opcode; 622 uint16_t returncode; 623 uint16_t vendor_ext_status; 624 } QEMU_PACKED *bg_op_status; 625 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 626 627 bg_op_status = (void *)payload_out; 628 bg_op_status->status = cci->bg.complete_pct << 1; 629 if (cci->bg.runtime > 0) { 630 bg_op_status->status |= 1U << 0; 631 } 632 bg_op_status->opcode = cci->bg.opcode; 633 bg_op_status->returncode = cci->bg.ret_code; 634 *len_out = sizeof(*bg_op_status); 635 636 return CXL_MBOX_SUCCESS; 637 } 638 639 #define CXL_FW_SLOTS 2 640 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 641 642 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 643 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 644 uint8_t *payload_in, 645 size_t len, 646 uint8_t *payload_out, 647 size_t *len_out, 648 CXLCCI *cci) 649 { 650 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 651 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 652 struct { 653 uint8_t slots_supported; 654 uint8_t slot_info; 655 uint8_t caps; 656 uint8_t rsvd[0xd]; 657 char fw_rev1[0x10]; 658 char fw_rev2[0x10]; 659 char fw_rev3[0x10]; 660 char fw_rev4[0x10]; 661 } QEMU_PACKED *fw_info; 662 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 663 664 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || 665 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || 666 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { 667 return CXL_MBOX_INTERNAL_ERROR; 668 } 669 670 fw_info = (void *)payload_out; 671 672 fw_info->slots_supported = CXL_FW_SLOTS; 673 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 674 ((cci->fw.staged_slot & 0x7) << 3); 675 fw_info->caps = BIT(0); /* online update supported */ 676 677 if (cci->fw.slot[0]) { 678 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 679 } 680 if (cci->fw.slot[1]) { 681 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 682 } 683 684 *len_out = sizeof(*fw_info); 685 return CXL_MBOX_SUCCESS; 686 } 687 688 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 689 #define CXL_FW_XFER_ALIGNMENT 128 690 691 #define CXL_FW_XFER_ACTION_FULL 0x0 692 #define CXL_FW_XFER_ACTION_INIT 0x1 693 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 694 #define CXL_FW_XFER_ACTION_END 0x3 695 #define CXL_FW_XFER_ACTION_ABORT 0x4 696 697 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 698 uint8_t *payload_in, 699 size_t len, 700 uint8_t *payload_out, 701 size_t *len_out, 702 CXLCCI *cci) 703 { 704 struct { 705 uint8_t action; 706 uint8_t slot; 707 uint8_t rsvd1[2]; 708 uint32_t offset; 709 uint8_t rsvd2[0x78]; 710 uint8_t data[]; 711 } QEMU_PACKED *fw_transfer = (void *)payload_in; 712 size_t offset, length; 713 714 if (len < sizeof(*fw_transfer)) { 715 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 716 } 717 718 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 719 /* 720 * At this point there aren't any on-going transfers 721 * running in the bg - this is serialized before this 722 * call altogether. Just mark the state machine and 723 * disregard any other input. 724 */ 725 cci->fw.transferring = false; 726 return CXL_MBOX_SUCCESS; 727 } 728 729 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 730 length = len - sizeof(*fw_transfer); 731 if (offset + length > CXL_FW_SIZE) { 732 return CXL_MBOX_INVALID_INPUT; 733 } 734 735 if (cci->fw.transferring) { 736 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 737 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 738 return CXL_MBOX_FW_XFER_IN_PROGRESS; 739 } 740 /* 741 * Abort partitioned package transfer if over 30 secs 742 * between parts. As opposed to the explicit ABORT action, 743 * semantically treat this condition as an error - as 744 * if a part action were passed without a previous INIT. 745 */ 746 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 747 cci->fw.transferring = false; 748 return CXL_MBOX_INVALID_INPUT; 749 } 750 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 751 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 752 return CXL_MBOX_INVALID_INPUT; 753 } 754 755 /* allow back-to-back retransmission */ 756 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 757 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 758 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 759 /* verify no overlaps */ 760 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 761 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 762 } 763 } 764 765 switch (fw_transfer->action) { 766 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 767 case CXL_FW_XFER_ACTION_END: 768 if (fw_transfer->slot == 0 || 769 fw_transfer->slot == cci->fw.active_slot || 770 fw_transfer->slot > CXL_FW_SLOTS) { 771 return CXL_MBOX_FW_INVALID_SLOT; 772 } 773 774 /* mark the slot used upon bg completion */ 775 break; 776 case CXL_FW_XFER_ACTION_INIT: 777 if (offset != 0) { 778 return CXL_MBOX_INVALID_INPUT; 779 } 780 781 cci->fw.transferring = true; 782 cci->fw.prev_offset = offset; 783 cci->fw.prev_len = length; 784 break; 785 case CXL_FW_XFER_ACTION_CONTINUE: 786 cci->fw.prev_offset = offset; 787 cci->fw.prev_len = length; 788 break; 789 default: 790 return CXL_MBOX_INVALID_INPUT; 791 } 792 793 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 794 cci->bg.runtime = 10 * 1000UL; 795 } else { 796 cci->bg.runtime = 2 * 1000UL; 797 } 798 /* keep relevant context for bg completion */ 799 cci->fw.curr_action = fw_transfer->action; 800 cci->fw.curr_slot = fw_transfer->slot; 801 *len_out = 0; 802 803 return CXL_MBOX_BG_STARTED; 804 } 805 806 static void __do_firmware_xfer(CXLCCI *cci) 807 { 808 switch (cci->fw.curr_action) { 809 case CXL_FW_XFER_ACTION_FULL: 810 case CXL_FW_XFER_ACTION_END: 811 cci->fw.slot[cci->fw.curr_slot - 1] = true; 812 cci->fw.transferring = false; 813 break; 814 case CXL_FW_XFER_ACTION_INIT: 815 case CXL_FW_XFER_ACTION_CONTINUE: 816 time(&cci->fw.last_partxfer); 817 break; 818 default: 819 break; 820 } 821 } 822 823 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 824 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 825 uint8_t *payload_in, 826 size_t len, 827 uint8_t *payload_out, 828 size_t *len_out, 829 CXLCCI *cci) 830 { 831 struct { 832 uint8_t action; 833 uint8_t slot; 834 } QEMU_PACKED *fw_activate = (void *)payload_in; 835 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 836 837 if (fw_activate->slot == 0 || 838 fw_activate->slot == cci->fw.active_slot || 839 fw_activate->slot > CXL_FW_SLOTS) { 840 return CXL_MBOX_FW_INVALID_SLOT; 841 } 842 843 /* ensure that an actual fw package is there */ 844 if (!cci->fw.slot[fw_activate->slot - 1]) { 845 return CXL_MBOX_FW_INVALID_SLOT; 846 } 847 848 switch (fw_activate->action) { 849 case 0: /* online */ 850 cci->fw.active_slot = fw_activate->slot; 851 break; 852 case 1: /* reset */ 853 cci->fw.staged_slot = fw_activate->slot; 854 break; 855 default: 856 return CXL_MBOX_INVALID_INPUT; 857 } 858 859 return CXL_MBOX_SUCCESS; 860 } 861 862 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 863 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 864 uint8_t *payload_in, 865 size_t len_in, 866 uint8_t *payload_out, 867 size_t *len_out, 868 CXLCCI *cci) 869 { 870 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 871 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 872 873 stq_le_p(payload_out, final_time); 874 *len_out = 8; 875 876 return CXL_MBOX_SUCCESS; 877 } 878 879 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 880 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 881 uint8_t *payload_in, 882 size_t len_in, 883 uint8_t *payload_out, 884 size_t *len_out, 885 CXLCCI *cci) 886 { 887 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 888 889 cxl_dstate->timestamp.set = true; 890 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 891 892 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 893 894 *len_out = 0; 895 return CXL_MBOX_SUCCESS; 896 } 897 898 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 899 static const QemuUUID cel_uuid = { 900 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 901 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 902 }; 903 904 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 905 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 906 uint8_t *payload_in, 907 size_t len_in, 908 uint8_t *payload_out, 909 size_t *len_out, 910 CXLCCI *cci) 911 { 912 struct { 913 uint16_t entries; 914 uint8_t rsvd[6]; 915 struct { 916 QemuUUID uuid; 917 uint32_t size; 918 } log_entries[1]; 919 } QEMU_PACKED *supported_logs = (void *)payload_out; 920 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 921 922 supported_logs->entries = 1; 923 supported_logs->log_entries[0].uuid = cel_uuid; 924 supported_logs->log_entries[0].size = 4 * cci->cel_size; 925 926 *len_out = sizeof(*supported_logs); 927 return CXL_MBOX_SUCCESS; 928 } 929 930 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 931 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 932 uint8_t *payload_in, 933 size_t len_in, 934 uint8_t *payload_out, 935 size_t *len_out, 936 CXLCCI *cci) 937 { 938 struct { 939 QemuUUID uuid; 940 uint32_t offset; 941 uint32_t length; 942 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 943 944 get_log = (void *)payload_in; 945 946 if (get_log->length > cci->payload_max) { 947 return CXL_MBOX_INVALID_INPUT; 948 } 949 950 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 951 return CXL_MBOX_INVALID_LOG; 952 } 953 954 /* 955 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 956 * The device shall return Invalid Input if the Offset or Length 957 * fields attempt to access beyond the size of the log as reported by Get 958 * Supported Log. 959 * 960 * Only valid for there to be one entry per opcode, but the length + offset 961 * may still be greater than that if the inputs are not valid and so access 962 * beyond the end of cci->cel_log. 963 */ 964 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { 965 return CXL_MBOX_INVALID_INPUT; 966 } 967 968 /* Store off everything to local variables so we can wipe out the payload */ 969 *len_out = get_log->length; 970 971 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 972 973 return CXL_MBOX_SUCCESS; 974 } 975 976 /* CXL r3.1 section 8.2.9.6: Features */ 977 /* 978 * Get Supported Features output payload 979 * CXL r3.1 section 8.2.9.6.1 Table 8-96 980 */ 981 typedef struct CXLSupportedFeatureHeader { 982 uint16_t entries; 983 uint16_t nsuppfeats_dev; 984 uint32_t reserved; 985 } QEMU_PACKED CXLSupportedFeatureHeader; 986 987 /* 988 * Get Supported Features Supported Feature Entry 989 * CXL r3.1 section 8.2.9.6.1 Table 8-97 990 */ 991 typedef struct CXLSupportedFeatureEntry { 992 QemuUUID uuid; 993 uint16_t feat_index; 994 uint16_t get_feat_size; 995 uint16_t set_feat_size; 996 uint32_t attr_flags; 997 uint8_t get_feat_version; 998 uint8_t set_feat_version; 999 uint16_t set_feat_effects; 1000 uint8_t rsvd[18]; 1001 } QEMU_PACKED CXLSupportedFeatureEntry; 1002 1003 /* 1004 * Get Supported Features Supported Feature Entry 1005 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 1006 */ 1007 /* Supported Feature Entry : attribute flags */ 1008 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 1009 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 1010 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 1011 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 1012 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 1013 1014 /* Supported Feature Entry : set feature effects */ 1015 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 1016 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 1017 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 1018 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 1019 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1020 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1021 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1022 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1023 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1024 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1025 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1026 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1027 1028 enum CXL_SUPPORTED_FEATURES_LIST { 1029 CXL_FEATURE_PATROL_SCRUB = 0, 1030 CXL_FEATURE_ECS, 1031 CXL_FEATURE_MAX 1032 }; 1033 1034 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1035 /* 1036 * Get Feature input payload 1037 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1038 */ 1039 /* Get Feature : Payload in selection */ 1040 enum CXL_GET_FEATURE_SELECTION { 1041 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1042 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1043 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1044 CXL_GET_FEATURE_SEL_MAX 1045 }; 1046 1047 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1048 /* 1049 * Set Feature input payload 1050 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1051 */ 1052 typedef struct CXLSetFeatureInHeader { 1053 QemuUUID uuid; 1054 uint32_t flags; 1055 uint16_t offset; 1056 uint8_t version; 1057 uint8_t rsvd[9]; 1058 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1059 1060 /* Set Feature : Payload in flags */ 1061 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1062 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1063 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1064 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1065 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1066 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1067 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1068 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1069 }; 1070 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1071 1072 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1073 static const QemuUUID patrol_scrub_uuid = { 1074 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1075 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1076 }; 1077 1078 typedef struct CXLMemPatrolScrubSetFeature { 1079 CXLSetFeatureInHeader hdr; 1080 CXLMemPatrolScrubWriteAttrs feat_data; 1081 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1082 1083 /* 1084 * CXL r3.1 section 8.2.9.9.11.2: 1085 * DDR5 Error Check Scrub (ECS) Control Feature 1086 */ 1087 static const QemuUUID ecs_uuid = { 1088 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1089 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1090 }; 1091 1092 typedef struct CXLMemECSSetFeature { 1093 CXLSetFeatureInHeader hdr; 1094 CXLMemECSWriteAttrs feat_data[]; 1095 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1096 1097 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1098 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1099 uint8_t *payload_in, 1100 size_t len_in, 1101 uint8_t *payload_out, 1102 size_t *len_out, 1103 CXLCCI *cci) 1104 { 1105 struct { 1106 uint32_t count; 1107 uint16_t start_index; 1108 uint16_t reserved; 1109 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1110 1111 struct { 1112 CXLSupportedFeatureHeader hdr; 1113 CXLSupportedFeatureEntry feat_entries[]; 1114 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1115 uint16_t index, req_entries; 1116 uint16_t entry; 1117 1118 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1119 return CXL_MBOX_UNSUPPORTED; 1120 } 1121 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1122 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1123 return CXL_MBOX_INVALID_INPUT; 1124 } 1125 1126 req_entries = (get_feats_in->count - 1127 sizeof(CXLSupportedFeatureHeader)) / 1128 sizeof(CXLSupportedFeatureEntry); 1129 req_entries = MIN(req_entries, 1130 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1131 1132 for (entry = 0, index = get_feats_in->start_index; 1133 entry < req_entries; index++) { 1134 switch (index) { 1135 case CXL_FEATURE_PATROL_SCRUB: 1136 /* Fill supported feature entry for device patrol scrub control */ 1137 get_feats_out->feat_entries[entry++] = 1138 (struct CXLSupportedFeatureEntry) { 1139 .uuid = patrol_scrub_uuid, 1140 .feat_index = index, 1141 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1142 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1143 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1144 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1145 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1146 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1147 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1148 }; 1149 break; 1150 case CXL_FEATURE_ECS: 1151 /* Fill supported feature entry for device DDR5 ECS control */ 1152 get_feats_out->feat_entries[entry++] = 1153 (struct CXLSupportedFeatureEntry) { 1154 .uuid = ecs_uuid, 1155 .feat_index = index, 1156 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1157 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1158 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1159 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1160 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1161 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1162 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1163 }; 1164 break; 1165 default: 1166 __builtin_unreachable(); 1167 } 1168 } 1169 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1170 get_feats_out->hdr.entries = req_entries; 1171 *len_out = sizeof(CXLSupportedFeatureHeader) + 1172 req_entries * sizeof(CXLSupportedFeatureEntry); 1173 1174 return CXL_MBOX_SUCCESS; 1175 } 1176 1177 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1178 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1179 uint8_t *payload_in, 1180 size_t len_in, 1181 uint8_t *payload_out, 1182 size_t *len_out, 1183 CXLCCI *cci) 1184 { 1185 struct { 1186 QemuUUID uuid; 1187 uint16_t offset; 1188 uint16_t count; 1189 uint8_t selection; 1190 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1191 uint16_t bytes_to_copy = 0; 1192 CXLType3Dev *ct3d; 1193 CXLSetFeatureInfo *set_feat_info; 1194 1195 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1196 return CXL_MBOX_UNSUPPORTED; 1197 } 1198 1199 ct3d = CXL_TYPE3(cci->d); 1200 get_feature = (void *)payload_in; 1201 1202 set_feat_info = &ct3d->set_feat_info; 1203 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1204 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1205 } 1206 1207 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1208 return CXL_MBOX_UNSUPPORTED; 1209 } 1210 if (get_feature->offset + get_feature->count > cci->payload_max) { 1211 return CXL_MBOX_INVALID_INPUT; 1212 } 1213 1214 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1215 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1216 return CXL_MBOX_INVALID_INPUT; 1217 } 1218 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1219 get_feature->offset; 1220 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1221 memcpy(payload_out, 1222 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1223 bytes_to_copy); 1224 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1225 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1226 return CXL_MBOX_INVALID_INPUT; 1227 } 1228 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1229 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1230 memcpy(payload_out, 1231 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1232 bytes_to_copy); 1233 } else { 1234 return CXL_MBOX_UNSUPPORTED; 1235 } 1236 1237 *len_out = bytes_to_copy; 1238 1239 return CXL_MBOX_SUCCESS; 1240 } 1241 1242 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1243 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1244 uint8_t *payload_in, 1245 size_t len_in, 1246 uint8_t *payload_out, 1247 size_t *len_out, 1248 CXLCCI *cci) 1249 { 1250 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1251 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1252 CXLMemPatrolScrubSetFeature *ps_set_feature; 1253 CXLMemECSWriteAttrs *ecs_write_attrs; 1254 CXLMemECSSetFeature *ecs_set_feature; 1255 CXLSetFeatureInfo *set_feat_info; 1256 uint16_t bytes_to_copy = 0; 1257 uint8_t data_transfer_flag; 1258 CXLType3Dev *ct3d; 1259 uint16_t count; 1260 1261 if (len_in < sizeof(*hdr)) { 1262 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1263 } 1264 1265 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1266 return CXL_MBOX_UNSUPPORTED; 1267 } 1268 ct3d = CXL_TYPE3(cci->d); 1269 set_feat_info = &ct3d->set_feat_info; 1270 1271 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1272 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1273 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1274 } 1275 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1276 set_feat_info->data_saved_across_reset = true; 1277 } else { 1278 set_feat_info->data_saved_across_reset = false; 1279 } 1280 1281 data_transfer_flag = 1282 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1283 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1284 set_feat_info->uuid = hdr->uuid; 1285 set_feat_info->data_size = 0; 1286 } 1287 set_feat_info->data_transfer_flag = data_transfer_flag; 1288 set_feat_info->data_offset = hdr->offset; 1289 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1290 1291 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1292 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1293 return CXL_MBOX_UNSUPPORTED; 1294 } 1295 1296 ps_set_feature = (void *)payload_in; 1297 ps_write_attrs = &ps_set_feature->feat_data; 1298 1299 if ((uint32_t)hdr->offset + bytes_to_copy > 1300 sizeof(ct3d->patrol_scrub_wr_attrs)) { 1301 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1302 } 1303 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1304 ps_write_attrs, 1305 bytes_to_copy); 1306 set_feat_info->data_size += bytes_to_copy; 1307 1308 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1309 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1310 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1311 ct3d->patrol_scrub_attrs.scrub_cycle |= 1312 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1313 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1314 ct3d->patrol_scrub_attrs.scrub_flags |= 1315 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1316 } 1317 } else if (qemu_uuid_is_equal(&hdr->uuid, 1318 &ecs_uuid)) { 1319 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1320 return CXL_MBOX_UNSUPPORTED; 1321 } 1322 1323 ecs_set_feature = (void *)payload_in; 1324 ecs_write_attrs = ecs_set_feature->feat_data; 1325 1326 if ((uint32_t)hdr->offset + bytes_to_copy > 1327 sizeof(ct3d->ecs_wr_attrs)) { 1328 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1329 } 1330 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1331 ecs_write_attrs, 1332 bytes_to_copy); 1333 set_feat_info->data_size += bytes_to_copy; 1334 1335 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1336 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1337 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1338 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1339 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1340 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1341 } 1342 } 1343 } else { 1344 return CXL_MBOX_UNSUPPORTED; 1345 } 1346 1347 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1348 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1349 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1350 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1351 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1352 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1353 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1354 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1355 } 1356 set_feat_info->data_transfer_flag = 0; 1357 set_feat_info->data_saved_across_reset = false; 1358 set_feat_info->data_offset = 0; 1359 set_feat_info->data_size = 0; 1360 } 1361 1362 return CXL_MBOX_SUCCESS; 1363 } 1364 1365 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1366 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1367 uint8_t *payload_in, 1368 size_t len_in, 1369 uint8_t *payload_out, 1370 size_t *len_out, 1371 CXLCCI *cci) 1372 { 1373 struct { 1374 char fw_revision[0x10]; 1375 uint64_t total_capacity; 1376 uint64_t volatile_capacity; 1377 uint64_t persistent_capacity; 1378 uint64_t partition_align; 1379 uint16_t info_event_log_size; 1380 uint16_t warning_event_log_size; 1381 uint16_t failure_event_log_size; 1382 uint16_t fatal_event_log_size; 1383 uint32_t lsa_size; 1384 uint8_t poison_list_max_mer[3]; 1385 uint16_t inject_poison_limit; 1386 uint8_t poison_caps; 1387 uint8_t qos_telemetry_caps; 1388 uint16_t dc_event_log_size; 1389 } QEMU_PACKED *id; 1390 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1391 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1392 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1393 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1394 1395 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1396 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1397 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1398 return CXL_MBOX_INTERNAL_ERROR; 1399 } 1400 1401 id = (void *)payload_out; 1402 1403 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1404 1405 stq_le_p(&id->total_capacity, 1406 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1407 stq_le_p(&id->persistent_capacity, 1408 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1409 stq_le_p(&id->volatile_capacity, 1410 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1411 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1412 /* 256 poison records */ 1413 st24_le_p(id->poison_list_max_mer, 256); 1414 /* No limit - so limited by main poison record limit */ 1415 stw_le_p(&id->inject_poison_limit, 0); 1416 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1417 1418 *len_out = sizeof(*id); 1419 return CXL_MBOX_SUCCESS; 1420 } 1421 1422 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1423 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1424 uint8_t *payload_in, 1425 size_t len_in, 1426 uint8_t *payload_out, 1427 size_t *len_out, 1428 CXLCCI *cci) 1429 { 1430 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1431 struct { 1432 uint64_t active_vmem; 1433 uint64_t active_pmem; 1434 uint64_t next_vmem; 1435 uint64_t next_pmem; 1436 } QEMU_PACKED *part_info = (void *)payload_out; 1437 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1438 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1439 1440 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1441 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1442 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1443 return CXL_MBOX_INTERNAL_ERROR; 1444 } 1445 1446 stq_le_p(&part_info->active_vmem, 1447 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1448 /* 1449 * When both next_vmem and next_pmem are 0, there is no pending change to 1450 * partitioning. 1451 */ 1452 stq_le_p(&part_info->next_vmem, 0); 1453 stq_le_p(&part_info->active_pmem, 1454 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1455 stq_le_p(&part_info->next_pmem, 0); 1456 1457 *len_out = sizeof(*part_info); 1458 return CXL_MBOX_SUCCESS; 1459 } 1460 1461 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1462 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1463 uint8_t *payload_in, 1464 size_t len_in, 1465 uint8_t *payload_out, 1466 size_t *len_out, 1467 CXLCCI *cci) 1468 { 1469 struct { 1470 uint32_t offset; 1471 uint32_t length; 1472 } QEMU_PACKED *get_lsa; 1473 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1474 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1475 uint64_t offset, length; 1476 1477 get_lsa = (void *)payload_in; 1478 offset = get_lsa->offset; 1479 length = get_lsa->length; 1480 1481 if (offset + length > cvc->get_lsa_size(ct3d)) { 1482 *len_out = 0; 1483 return CXL_MBOX_INVALID_INPUT; 1484 } 1485 1486 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1487 return CXL_MBOX_SUCCESS; 1488 } 1489 1490 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1491 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1492 uint8_t *payload_in, 1493 size_t len_in, 1494 uint8_t *payload_out, 1495 size_t *len_out, 1496 CXLCCI *cci) 1497 { 1498 struct set_lsa_pl { 1499 uint32_t offset; 1500 uint32_t rsvd; 1501 uint8_t data[]; 1502 } QEMU_PACKED; 1503 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1504 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1505 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1506 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1507 1508 *len_out = 0; 1509 if (len_in < hdr_len) { 1510 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1511 } 1512 1513 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1514 return CXL_MBOX_INVALID_INPUT; 1515 } 1516 len_in -= hdr_len; 1517 1518 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1519 return CXL_MBOX_SUCCESS; 1520 } 1521 1522 /* Perform the actual device zeroing */ 1523 static void __do_sanitization(CXLType3Dev *ct3d) 1524 { 1525 MemoryRegion *mr; 1526 1527 if (ct3d->hostvmem) { 1528 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1529 if (mr) { 1530 void *hostmem = memory_region_get_ram_ptr(mr); 1531 memset(hostmem, 0, memory_region_size(mr)); 1532 } 1533 } 1534 1535 if (ct3d->hostpmem) { 1536 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1537 if (mr) { 1538 void *hostmem = memory_region_get_ram_ptr(mr); 1539 memset(hostmem, 0, memory_region_size(mr)); 1540 } 1541 } 1542 if (ct3d->lsa) { 1543 mr = host_memory_backend_get_memory(ct3d->lsa); 1544 if (mr) { 1545 void *lsa = memory_region_get_ram_ptr(mr); 1546 memset(lsa, 0, memory_region_size(mr)); 1547 } 1548 } 1549 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1550 } 1551 1552 /* 1553 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1554 * 1555 * Once the Sanitize command has started successfully, the device shall be 1556 * placed in the media disabled state. If the command fails or is interrupted 1557 * by a reset or power failure, it shall remain in the media disabled state 1558 * until a successful Sanitize command has been completed. During this state: 1559 * 1560 * 1. Memory writes to the device will have no effect, and all memory reads 1561 * will return random values (no user data returned, even for locations that 1562 * the failed Sanitize operation didn’t sanitize yet). 1563 * 1564 * 2. Mailbox commands shall still be processed in the disabled state, except 1565 * that commands that access Sanitized areas shall fail with the Media Disabled 1566 * error code. 1567 */ 1568 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1569 uint8_t *payload_in, 1570 size_t len_in, 1571 uint8_t *payload_out, 1572 size_t *len_out, 1573 CXLCCI *cci) 1574 { 1575 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1576 uint64_t total_mem; /* in Mb */ 1577 int secs; 1578 1579 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1580 if (total_mem <= 512) { 1581 secs = 4; 1582 } else if (total_mem <= 1024) { 1583 secs = 8; 1584 } else if (total_mem <= 2 * 1024) { 1585 secs = 15; 1586 } else if (total_mem <= 4 * 1024) { 1587 secs = 30; 1588 } else if (total_mem <= 8 * 1024) { 1589 secs = 60; 1590 } else if (total_mem <= 16 * 1024) { 1591 secs = 2 * 60; 1592 } else if (total_mem <= 32 * 1024) { 1593 secs = 4 * 60; 1594 } else if (total_mem <= 64 * 1024) { 1595 secs = 8 * 60; 1596 } else if (total_mem <= 128 * 1024) { 1597 secs = 15 * 60; 1598 } else if (total_mem <= 256 * 1024) { 1599 secs = 30 * 60; 1600 } else if (total_mem <= 512 * 1024) { 1601 secs = 60 * 60; 1602 } else if (total_mem <= 1024 * 1024) { 1603 secs = 120 * 60; 1604 } else { 1605 secs = 240 * 60; /* max 4 hrs */ 1606 } 1607 1608 /* EBUSY other bg cmds as of now */ 1609 cci->bg.runtime = secs * 1000UL; 1610 *len_out = 0; 1611 1612 cxl_dev_disable_media(&ct3d->cxl_dstate); 1613 1614 /* sanitize when done */ 1615 return CXL_MBOX_BG_STARTED; 1616 } 1617 1618 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 1619 uint8_t *payload_in, 1620 size_t len_in, 1621 uint8_t *payload_out, 1622 size_t *len_out, 1623 CXLCCI *cci) 1624 { 1625 uint32_t *state = (uint32_t *)payload_out; 1626 1627 *state = 0; 1628 *len_out = 4; 1629 return CXL_MBOX_SUCCESS; 1630 } 1631 1632 /* 1633 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 1634 * 1635 * This is very inefficient, but good enough for now! 1636 * Also the payload will always fit, so no need to handle the MORE flag and 1637 * make this stateful. We may want to allow longer poison lists to aid 1638 * testing that kernel functionality. 1639 */ 1640 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 1641 uint8_t *payload_in, 1642 size_t len_in, 1643 uint8_t *payload_out, 1644 size_t *len_out, 1645 CXLCCI *cci) 1646 { 1647 struct get_poison_list_pl { 1648 uint64_t pa; 1649 uint64_t length; 1650 } QEMU_PACKED; 1651 1652 struct get_poison_list_out_pl { 1653 uint8_t flags; 1654 uint8_t rsvd1; 1655 uint64_t overflow_timestamp; 1656 uint16_t count; 1657 uint8_t rsvd2[0x14]; 1658 struct { 1659 uint64_t addr; 1660 uint32_t length; 1661 uint32_t resv; 1662 } QEMU_PACKED records[]; 1663 } QEMU_PACKED; 1664 1665 struct get_poison_list_pl *in = (void *)payload_in; 1666 struct get_poison_list_out_pl *out = (void *)payload_out; 1667 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1668 uint16_t record_count = 0, i = 0; 1669 uint64_t query_start, query_length; 1670 CXLPoisonList *poison_list = &ct3d->poison_list; 1671 CXLPoison *ent; 1672 uint16_t out_pl_len; 1673 1674 query_start = ldq_le_p(&in->pa); 1675 /* 64 byte alignment required */ 1676 if (query_start & 0x3f) { 1677 return CXL_MBOX_INVALID_INPUT; 1678 } 1679 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1680 1681 QLIST_FOREACH(ent, poison_list, node) { 1682 /* Check for no overlap */ 1683 if (!ranges_overlap(ent->start, ent->length, 1684 query_start, query_length)) { 1685 continue; 1686 } 1687 record_count++; 1688 } 1689 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1690 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 1691 1692 QLIST_FOREACH(ent, poison_list, node) { 1693 uint64_t start, stop; 1694 1695 /* Check for no overlap */ 1696 if (!ranges_overlap(ent->start, ent->length, 1697 query_start, query_length)) { 1698 continue; 1699 } 1700 1701 /* Deal with overlap */ 1702 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 1703 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 1704 query_start + query_length); 1705 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 1706 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 1707 i++; 1708 } 1709 if (ct3d->poison_list_overflowed) { 1710 out->flags = (1 << 1); 1711 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 1712 } 1713 if (scan_media_running(cci)) { 1714 out->flags |= (1 << 2); 1715 } 1716 1717 stw_le_p(&out->count, record_count); 1718 *len_out = out_pl_len; 1719 return CXL_MBOX_SUCCESS; 1720 } 1721 1722 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 1723 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 1724 uint8_t *payload_in, 1725 size_t len_in, 1726 uint8_t *payload_out, 1727 size_t *len_out, 1728 CXLCCI *cci) 1729 { 1730 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1731 CXLPoisonList *poison_list = &ct3d->poison_list; 1732 CXLPoison *ent; 1733 struct inject_poison_pl { 1734 uint64_t dpa; 1735 }; 1736 struct inject_poison_pl *in = (void *)payload_in; 1737 uint64_t dpa = ldq_le_p(&in->dpa); 1738 CXLPoison *p; 1739 1740 QLIST_FOREACH(ent, poison_list, node) { 1741 if (dpa >= ent->start && 1742 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 1743 return CXL_MBOX_SUCCESS; 1744 } 1745 } 1746 /* 1747 * Freeze the list if there is an on-going scan media operation. 1748 */ 1749 if (scan_media_running(cci)) { 1750 /* 1751 * XXX: Spec is ambiguous - is this case considered 1752 * a successful return despite not adding to the list? 1753 */ 1754 goto success; 1755 } 1756 1757 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1758 return CXL_MBOX_INJECT_POISON_LIMIT; 1759 } 1760 p = g_new0(CXLPoison, 1); 1761 1762 p->length = CXL_CACHE_LINE_SIZE; 1763 p->start = dpa; 1764 p->type = CXL_POISON_TYPE_INJECTED; 1765 1766 /* 1767 * Possible todo: Merge with existing entry if next to it and if same type 1768 */ 1769 QLIST_INSERT_HEAD(poison_list, p, node); 1770 ct3d->poison_list_cnt++; 1771 success: 1772 *len_out = 0; 1773 1774 return CXL_MBOX_SUCCESS; 1775 } 1776 1777 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 1778 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 1779 uint8_t *payload_in, 1780 size_t len_in, 1781 uint8_t *payload_out, 1782 size_t *len_out, 1783 CXLCCI *cci) 1784 { 1785 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1786 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1787 CXLPoisonList *poison_list = &ct3d->poison_list; 1788 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1789 struct clear_poison_pl { 1790 uint64_t dpa; 1791 uint8_t data[64]; 1792 }; 1793 CXLPoison *ent; 1794 uint64_t dpa; 1795 1796 struct clear_poison_pl *in = (void *)payload_in; 1797 1798 dpa = ldq_le_p(&in->dpa); 1799 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 1800 ct3d->dc.total_capacity) { 1801 return CXL_MBOX_INVALID_PA; 1802 } 1803 1804 /* Clearing a region with no poison is not an error so always do so */ 1805 if (cvc->set_cacheline) { 1806 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 1807 return CXL_MBOX_INTERNAL_ERROR; 1808 } 1809 } 1810 1811 /* 1812 * Freeze the list if there is an on-going scan media operation. 1813 */ 1814 if (scan_media_running(cci)) { 1815 /* 1816 * XXX: Spec is ambiguous - is this case considered 1817 * a successful return despite not removing from the list? 1818 */ 1819 goto success; 1820 } 1821 1822 QLIST_FOREACH(ent, poison_list, node) { 1823 /* 1824 * Test for contained in entry. Simpler than general case 1825 * as clearing 64 bytes and entries 64 byte aligned 1826 */ 1827 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 1828 break; 1829 } 1830 } 1831 if (!ent) { 1832 goto success; 1833 } 1834 1835 QLIST_REMOVE(ent, node); 1836 ct3d->poison_list_cnt--; 1837 1838 if (dpa > ent->start) { 1839 CXLPoison *frag; 1840 /* Cannot overflow as replacing existing entry */ 1841 1842 frag = g_new0(CXLPoison, 1); 1843 1844 frag->start = ent->start; 1845 frag->length = dpa - ent->start; 1846 frag->type = ent->type; 1847 1848 QLIST_INSERT_HEAD(poison_list, frag, node); 1849 ct3d->poison_list_cnt++; 1850 } 1851 1852 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 1853 CXLPoison *frag; 1854 1855 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1856 cxl_set_poison_list_overflowed(ct3d); 1857 } else { 1858 frag = g_new0(CXLPoison, 1); 1859 1860 frag->start = dpa + CXL_CACHE_LINE_SIZE; 1861 frag->length = ent->start + ent->length - frag->start; 1862 frag->type = ent->type; 1863 QLIST_INSERT_HEAD(poison_list, frag, node); 1864 ct3d->poison_list_cnt++; 1865 } 1866 } 1867 /* Any fragments have been added, free original entry */ 1868 g_free(ent); 1869 success: 1870 *len_out = 0; 1871 1872 return CXL_MBOX_SUCCESS; 1873 } 1874 1875 /* 1876 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 1877 */ 1878 static CXLRetCode 1879 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 1880 uint8_t *payload_in, 1881 size_t len_in, 1882 uint8_t *payload_out, 1883 size_t *len_out, 1884 CXLCCI *cci) 1885 { 1886 struct get_scan_media_capabilities_pl { 1887 uint64_t pa; 1888 uint64_t length; 1889 } QEMU_PACKED; 1890 1891 struct get_scan_media_capabilities_out_pl { 1892 uint32_t estimated_runtime_ms; 1893 }; 1894 1895 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1896 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1897 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 1898 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 1899 uint64_t query_start; 1900 uint64_t query_length; 1901 1902 query_start = ldq_le_p(&in->pa); 1903 /* 64 byte alignment required */ 1904 if (query_start & 0x3f) { 1905 return CXL_MBOX_INVALID_INPUT; 1906 } 1907 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1908 1909 if (query_start + query_length > cxl_dstate->static_mem_size) { 1910 return CXL_MBOX_INVALID_PA; 1911 } 1912 1913 /* 1914 * Just use 400 nanosecond access/read latency + 100 ns for 1915 * the cost of updating the poison list. For small enough 1916 * chunks return at least 1 ms. 1917 */ 1918 stl_le_p(&out->estimated_runtime_ms, 1919 MAX(1, query_length * (0.0005L / 64))); 1920 1921 *len_out = sizeof(*out); 1922 return CXL_MBOX_SUCCESS; 1923 } 1924 1925 static void __do_scan_media(CXLType3Dev *ct3d) 1926 { 1927 CXLPoison *ent; 1928 unsigned int results_cnt = 0; 1929 1930 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 1931 results_cnt++; 1932 } 1933 1934 /* only scan media may clear the overflow */ 1935 if (ct3d->poison_list_overflowed && 1936 ct3d->poison_list_cnt == results_cnt) { 1937 cxl_clear_poison_list_overflowed(ct3d); 1938 } 1939 /* scan media has run since last conventional reset */ 1940 ct3d->scan_media_hasrun = true; 1941 } 1942 1943 /* 1944 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 1945 */ 1946 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 1947 uint8_t *payload_in, 1948 size_t len_in, 1949 uint8_t *payload_out, 1950 size_t *len_out, 1951 CXLCCI *cci) 1952 { 1953 struct scan_media_pl { 1954 uint64_t pa; 1955 uint64_t length; 1956 uint8_t flags; 1957 } QEMU_PACKED; 1958 1959 struct scan_media_pl *in = (void *)payload_in; 1960 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1961 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1962 uint64_t query_start; 1963 uint64_t query_length; 1964 CXLPoison *ent, *next; 1965 1966 query_start = ldq_le_p(&in->pa); 1967 /* 64 byte alignment required */ 1968 if (query_start & 0x3f) { 1969 return CXL_MBOX_INVALID_INPUT; 1970 } 1971 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1972 1973 if (query_start + query_length > cxl_dstate->static_mem_size) { 1974 return CXL_MBOX_INVALID_PA; 1975 } 1976 if (ct3d->dc.num_regions && query_start + query_length >= 1977 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 1978 return CXL_MBOX_INVALID_PA; 1979 } 1980 1981 if (in->flags == 0) { /* TODO */ 1982 qemu_log_mask(LOG_UNIMP, 1983 "Scan Media Event Log is unsupported\n"); 1984 } 1985 1986 /* any previous results are discarded upon a new Scan Media */ 1987 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 1988 QLIST_REMOVE(ent, node); 1989 g_free(ent); 1990 } 1991 1992 /* kill the poison list - it will be recreated */ 1993 if (ct3d->poison_list_overflowed) { 1994 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 1995 QLIST_REMOVE(ent, node); 1996 g_free(ent); 1997 ct3d->poison_list_cnt--; 1998 } 1999 } 2000 2001 /* 2002 * Scan the backup list and move corresponding entries 2003 * into the results list, updating the poison list 2004 * when possible. 2005 */ 2006 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 2007 CXLPoison *res; 2008 2009 if (ent->start >= query_start + query_length || 2010 ent->start + ent->length <= query_start) { 2011 continue; 2012 } 2013 2014 /* 2015 * If a Get Poison List cmd comes in while this 2016 * scan is being done, it will see the new complete 2017 * list, while setting the respective flag. 2018 */ 2019 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 2020 CXLPoison *p = g_new0(CXLPoison, 1); 2021 2022 p->start = ent->start; 2023 p->length = ent->length; 2024 p->type = ent->type; 2025 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 2026 ct3d->poison_list_cnt++; 2027 } 2028 2029 res = g_new0(CXLPoison, 1); 2030 res->start = ent->start; 2031 res->length = ent->length; 2032 res->type = ent->type; 2033 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2034 2035 QLIST_REMOVE(ent, node); 2036 g_free(ent); 2037 } 2038 2039 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2040 *len_out = 0; 2041 2042 return CXL_MBOX_BG_STARTED; 2043 } 2044 2045 /* 2046 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2047 */ 2048 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2049 uint8_t *payload_in, 2050 size_t len_in, 2051 uint8_t *payload_out, 2052 size_t *len_out, 2053 CXLCCI *cci) 2054 { 2055 struct get_scan_media_results_out_pl { 2056 uint64_t dpa_restart; 2057 uint64_t length; 2058 uint8_t flags; 2059 uint8_t rsvd1; 2060 uint16_t count; 2061 uint8_t rsvd2[0xc]; 2062 struct { 2063 uint64_t addr; 2064 uint32_t length; 2065 uint32_t resv; 2066 } QEMU_PACKED records[]; 2067 } QEMU_PACKED; 2068 2069 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2070 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2071 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2072 CXLPoison *ent, *next; 2073 uint16_t total_count = 0, record_count = 0, i = 0; 2074 uint16_t out_pl_len; 2075 2076 if (!ct3d->scan_media_hasrun) { 2077 return CXL_MBOX_UNSUPPORTED; 2078 } 2079 2080 /* 2081 * Calculate limits, all entries are within the same address range of the 2082 * last scan media call. 2083 */ 2084 QLIST_FOREACH(ent, scan_media_results, node) { 2085 size_t rec_size = record_count * sizeof(out->records[0]); 2086 2087 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2088 record_count++; 2089 } 2090 total_count++; 2091 } 2092 2093 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2094 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2095 2096 memset(out, 0, out_pl_len); 2097 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2098 uint64_t start, stop; 2099 2100 if (i == record_count) { 2101 break; 2102 } 2103 2104 start = ROUND_DOWN(ent->start, 64ull); 2105 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2106 stq_le_p(&out->records[i].addr, start); 2107 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2108 i++; 2109 2110 /* consume the returning entry */ 2111 QLIST_REMOVE(ent, node); 2112 g_free(ent); 2113 } 2114 2115 stw_le_p(&out->count, record_count); 2116 if (total_count > record_count) { 2117 out->flags = (1 << 0); /* More Media Error Records */ 2118 } 2119 2120 *len_out = out_pl_len; 2121 return CXL_MBOX_SUCCESS; 2122 } 2123 2124 /* 2125 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2126 * (Opcode: 4800h) 2127 */ 2128 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2129 uint8_t *payload_in, 2130 size_t len_in, 2131 uint8_t *payload_out, 2132 size_t *len_out, 2133 CXLCCI *cci) 2134 { 2135 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2136 struct { 2137 uint8_t region_cnt; 2138 uint8_t start_rid; 2139 } QEMU_PACKED *in = (void *)payload_in; 2140 struct { 2141 uint8_t num_regions; 2142 uint8_t regions_returned; 2143 uint8_t rsvd1[6]; 2144 struct { 2145 uint64_t base; 2146 uint64_t decode_len; 2147 uint64_t region_len; 2148 uint64_t block_size; 2149 uint32_t dsmadhandle; 2150 uint8_t flags; 2151 uint8_t rsvd2[3]; 2152 } QEMU_PACKED records[]; 2153 } QEMU_PACKED *out = (void *)payload_out; 2154 struct { 2155 uint32_t num_extents_supported; 2156 uint32_t num_extents_available; 2157 uint32_t num_tags_supported; 2158 uint32_t num_tags_available; 2159 } QEMU_PACKED *extra_out; 2160 uint16_t record_count; 2161 uint16_t i; 2162 uint16_t out_pl_len; 2163 uint8_t start_rid; 2164 2165 start_rid = in->start_rid; 2166 if (start_rid >= ct3d->dc.num_regions) { 2167 return CXL_MBOX_INVALID_INPUT; 2168 } 2169 2170 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2171 2172 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2173 extra_out = (void *)(payload_out + out_pl_len); 2174 out_pl_len += sizeof(*extra_out); 2175 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2176 2177 out->num_regions = ct3d->dc.num_regions; 2178 out->regions_returned = record_count; 2179 for (i = 0; i < record_count; i++) { 2180 stq_le_p(&out->records[i].base, 2181 ct3d->dc.regions[start_rid + i].base); 2182 stq_le_p(&out->records[i].decode_len, 2183 ct3d->dc.regions[start_rid + i].decode_len / 2184 CXL_CAPACITY_MULTIPLIER); 2185 stq_le_p(&out->records[i].region_len, 2186 ct3d->dc.regions[start_rid + i].len); 2187 stq_le_p(&out->records[i].block_size, 2188 ct3d->dc.regions[start_rid + i].block_size); 2189 stl_le_p(&out->records[i].dsmadhandle, 2190 ct3d->dc.regions[start_rid + i].dsmadhandle); 2191 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2192 } 2193 /* 2194 * TODO: Assign values once extents and tags are introduced 2195 * to use. 2196 */ 2197 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2198 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2199 ct3d->dc.total_extent_count); 2200 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2201 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2202 2203 *len_out = out_pl_len; 2204 return CXL_MBOX_SUCCESS; 2205 } 2206 2207 /* 2208 * CXL r3.1 section 8.2.9.9.9.2: 2209 * Get Dynamic Capacity Extent List (Opcode 4801h) 2210 */ 2211 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2212 uint8_t *payload_in, 2213 size_t len_in, 2214 uint8_t *payload_out, 2215 size_t *len_out, 2216 CXLCCI *cci) 2217 { 2218 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2219 struct { 2220 uint32_t extent_cnt; 2221 uint32_t start_extent_id; 2222 } QEMU_PACKED *in = (void *)payload_in; 2223 struct { 2224 uint32_t count; 2225 uint32_t total_extents; 2226 uint32_t generation_num; 2227 uint8_t rsvd[4]; 2228 CXLDCExtentRaw records[]; 2229 } QEMU_PACKED *out = (void *)payload_out; 2230 uint32_t start_extent_id = in->start_extent_id; 2231 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2232 uint16_t record_count = 0, i = 0, record_done = 0; 2233 uint16_t out_pl_len, size; 2234 CXLDCExtent *ent; 2235 2236 if (start_extent_id > ct3d->dc.total_extent_count) { 2237 return CXL_MBOX_INVALID_INPUT; 2238 } 2239 2240 record_count = MIN(in->extent_cnt, 2241 ct3d->dc.total_extent_count - start_extent_id); 2242 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2243 record_count = MIN(record_count, size / sizeof(out->records[0])); 2244 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2245 2246 stl_le_p(&out->count, record_count); 2247 stl_le_p(&out->total_extents, ct3d->dc.total_extent_count); 2248 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2249 2250 if (record_count > 0) { 2251 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2252 2253 QTAILQ_FOREACH(ent, extent_list, node) { 2254 if (i++ < start_extent_id) { 2255 continue; 2256 } 2257 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2258 stq_le_p(&out_rec->len, ent->len); 2259 memcpy(&out_rec->tag, ent->tag, 0x10); 2260 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2261 2262 record_done++; 2263 out_rec++; 2264 if (record_done == record_count) { 2265 break; 2266 } 2267 } 2268 } 2269 2270 *len_out = out_pl_len; 2271 return CXL_MBOX_SUCCESS; 2272 } 2273 2274 /* 2275 * Check whether any bit between addr[nr, nr+size) is set, 2276 * return true if any bit is set, otherwise return false 2277 */ 2278 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2279 unsigned long size) 2280 { 2281 unsigned long res = find_next_bit(addr, size + nr, nr); 2282 2283 return res < nr + size; 2284 } 2285 2286 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2287 { 2288 int i; 2289 CXLDCRegion *region = &ct3d->dc.regions[0]; 2290 2291 if (dpa < region->base || 2292 dpa >= region->base + ct3d->dc.total_capacity) { 2293 return NULL; 2294 } 2295 2296 /* 2297 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2298 * 2299 * Regions are used in increasing-DPA order, with Region 0 being used for 2300 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2301 * So check from the last region to find where the dpa belongs. Extents that 2302 * cross multiple regions are not allowed. 2303 */ 2304 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2305 region = &ct3d->dc.regions[i]; 2306 if (dpa >= region->base) { 2307 if (dpa + len > region->base + region->len) { 2308 return NULL; 2309 } 2310 return region; 2311 } 2312 } 2313 2314 return NULL; 2315 } 2316 2317 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2318 uint64_t dpa, 2319 uint64_t len, 2320 uint8_t *tag, 2321 uint16_t shared_seq) 2322 { 2323 CXLDCExtent *extent; 2324 2325 extent = g_new0(CXLDCExtent, 1); 2326 extent->start_dpa = dpa; 2327 extent->len = len; 2328 if (tag) { 2329 memcpy(extent->tag, tag, 0x10); 2330 } 2331 extent->shared_seq = shared_seq; 2332 2333 QTAILQ_INSERT_TAIL(list, extent, node); 2334 } 2335 2336 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2337 CXLDCExtent *extent) 2338 { 2339 QTAILQ_REMOVE(list, extent, node); 2340 g_free(extent); 2341 } 2342 2343 /* 2344 * Add a new extent to the extent "group" if group exists; 2345 * otherwise, create a new group 2346 * Return value: the extent group where the extent is inserted. 2347 */ 2348 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2349 uint64_t dpa, 2350 uint64_t len, 2351 uint8_t *tag, 2352 uint16_t shared_seq) 2353 { 2354 if (!group) { 2355 group = g_new0(CXLDCExtentGroup, 1); 2356 QTAILQ_INIT(&group->list); 2357 } 2358 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2359 tag, shared_seq); 2360 return group; 2361 } 2362 2363 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2364 CXLDCExtentGroup *group) 2365 { 2366 QTAILQ_INSERT_TAIL(list, group, node); 2367 } 2368 2369 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2370 { 2371 CXLDCExtent *ent, *ent_next; 2372 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2373 2374 QTAILQ_REMOVE(list, group, node); 2375 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2376 cxl_remove_extent_from_extent_list(&group->list, ent); 2377 } 2378 g_free(group); 2379 } 2380 2381 /* 2382 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2383 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2384 */ 2385 typedef struct CXLUpdateDCExtentListInPl { 2386 uint32_t num_entries_updated; 2387 uint8_t flags; 2388 uint8_t rsvd[3]; 2389 /* CXL r3.1 Table 8-169: Updated Extent */ 2390 struct { 2391 uint64_t start_dpa; 2392 uint64_t len; 2393 uint8_t rsvd[8]; 2394 } QEMU_PACKED updated_entries[]; 2395 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2396 2397 /* 2398 * For the extents in the extent list to operate, check whether they are valid 2399 * 1. The extent should be in the range of a valid DC region; 2400 * 2. The extent should not cross multiple regions; 2401 * 3. The start DPA and the length of the extent should align with the block 2402 * size of the region; 2403 * 4. The address range of multiple extents in the list should not overlap. 2404 */ 2405 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2406 const CXLUpdateDCExtentListInPl *in) 2407 { 2408 uint64_t min_block_size = UINT64_MAX; 2409 CXLDCRegion *region; 2410 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2411 g_autofree unsigned long *blk_bitmap = NULL; 2412 uint64_t dpa, len; 2413 uint32_t i; 2414 2415 for (i = 0; i < ct3d->dc.num_regions; i++) { 2416 region = &ct3d->dc.regions[i]; 2417 min_block_size = MIN(min_block_size, region->block_size); 2418 } 2419 2420 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2421 ct3d->dc.regions[0].base) / min_block_size); 2422 2423 for (i = 0; i < in->num_entries_updated; i++) { 2424 dpa = in->updated_entries[i].start_dpa; 2425 len = in->updated_entries[i].len; 2426 2427 region = cxl_find_dc_region(ct3d, dpa, len); 2428 if (!region) { 2429 return CXL_MBOX_INVALID_PA; 2430 } 2431 2432 dpa -= ct3d->dc.regions[0].base; 2433 if (dpa % region->block_size || len % region->block_size) { 2434 return CXL_MBOX_INVALID_EXTENT_LIST; 2435 } 2436 /* the dpa range already covered by some other extents in the list */ 2437 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2438 len / min_block_size)) { 2439 return CXL_MBOX_INVALID_EXTENT_LIST; 2440 } 2441 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2442 } 2443 2444 return CXL_MBOX_SUCCESS; 2445 } 2446 2447 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2448 const CXLUpdateDCExtentListInPl *in) 2449 { 2450 uint32_t i; 2451 CXLDCExtent *ent; 2452 CXLDCExtentGroup *ext_group; 2453 uint64_t dpa, len; 2454 Range range1, range2; 2455 2456 for (i = 0; i < in->num_entries_updated; i++) { 2457 dpa = in->updated_entries[i].start_dpa; 2458 len = in->updated_entries[i].len; 2459 2460 range_init_nofail(&range1, dpa, len); 2461 2462 /* 2463 * The host-accepted DPA range must be contained by the first extent 2464 * group in the pending list 2465 */ 2466 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2467 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2468 return CXL_MBOX_INVALID_PA; 2469 } 2470 2471 /* to-be-added range should not overlap with range already accepted */ 2472 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 2473 range_init_nofail(&range2, ent->start_dpa, ent->len); 2474 if (range_overlaps_range(&range1, &range2)) { 2475 return CXL_MBOX_INVALID_PA; 2476 } 2477 } 2478 } 2479 return CXL_MBOX_SUCCESS; 2480 } 2481 2482 /* 2483 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 2484 * An extent is added to the extent list and becomes usable only after the 2485 * response is processed successfully. 2486 */ 2487 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 2488 uint8_t *payload_in, 2489 size_t len_in, 2490 uint8_t *payload_out, 2491 size_t *len_out, 2492 CXLCCI *cci) 2493 { 2494 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 2495 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2496 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2497 uint32_t i; 2498 uint64_t dpa, len; 2499 CXLRetCode ret; 2500 2501 if (len_in < sizeof(*in)) { 2502 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2503 } 2504 2505 if (in->num_entries_updated == 0) { 2506 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 2507 return CXL_MBOX_SUCCESS; 2508 } 2509 2510 if (len_in < 2511 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 2512 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2513 } 2514 2515 /* Adding extents causes exceeding device's extent tracking ability. */ 2516 if (in->num_entries_updated + ct3d->dc.total_extent_count > 2517 CXL_NUM_EXTENTS_SUPPORTED) { 2518 return CXL_MBOX_RESOURCES_EXHAUSTED; 2519 } 2520 2521 ret = cxl_detect_malformed_extent_list(ct3d, in); 2522 if (ret != CXL_MBOX_SUCCESS) { 2523 return ret; 2524 } 2525 2526 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 2527 if (ret != CXL_MBOX_SUCCESS) { 2528 return ret; 2529 } 2530 2531 for (i = 0; i < in->num_entries_updated; i++) { 2532 dpa = in->updated_entries[i].start_dpa; 2533 len = in->updated_entries[i].len; 2534 2535 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 2536 ct3d->dc.total_extent_count += 1; 2537 ct3_set_region_block_backed(ct3d, dpa, len); 2538 } 2539 /* Remove the first extent group in the pending list */ 2540 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 2541 2542 return CXL_MBOX_SUCCESS; 2543 } 2544 2545 /* 2546 * Copy extent list from src to dst 2547 * Return value: number of extents copied 2548 */ 2549 static uint32_t copy_extent_list(CXLDCExtentList *dst, 2550 const CXLDCExtentList *src) 2551 { 2552 uint32_t cnt = 0; 2553 CXLDCExtent *ent; 2554 2555 if (!dst || !src) { 2556 return 0; 2557 } 2558 2559 QTAILQ_FOREACH(ent, src, node) { 2560 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 2561 ent->tag, ent->shared_seq); 2562 cnt++; 2563 } 2564 return cnt; 2565 } 2566 2567 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 2568 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 2569 uint32_t *updated_list_size) 2570 { 2571 CXLDCExtent *ent, *ent_next; 2572 uint64_t dpa, len; 2573 uint32_t i; 2574 int cnt_delta = 0; 2575 CXLRetCode ret = CXL_MBOX_SUCCESS; 2576 2577 QTAILQ_INIT(updated_list); 2578 copy_extent_list(updated_list, &ct3d->dc.extents); 2579 2580 for (i = 0; i < in->num_entries_updated; i++) { 2581 Range range; 2582 2583 dpa = in->updated_entries[i].start_dpa; 2584 len = in->updated_entries[i].len; 2585 2586 /* Check if the DPA range is not fully backed with valid extents */ 2587 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 2588 ret = CXL_MBOX_INVALID_PA; 2589 goto free_and_exit; 2590 } 2591 2592 /* After this point, extent overflow is the only error can happen */ 2593 while (len > 0) { 2594 QTAILQ_FOREACH(ent, updated_list, node) { 2595 range_init_nofail(&range, ent->start_dpa, ent->len); 2596 2597 if (range_contains(&range, dpa)) { 2598 uint64_t len1, len2 = 0, len_done = 0; 2599 uint64_t ent_start_dpa = ent->start_dpa; 2600 uint64_t ent_len = ent->len; 2601 2602 len1 = dpa - ent->start_dpa; 2603 /* Found the extent or the subset of an existing extent */ 2604 if (range_contains(&range, dpa + len - 1)) { 2605 len2 = ent_start_dpa + ent_len - dpa - len; 2606 } else { 2607 dpa = ent_start_dpa + ent_len; 2608 } 2609 len_done = ent_len - len1 - len2; 2610 2611 cxl_remove_extent_from_extent_list(updated_list, ent); 2612 cnt_delta--; 2613 2614 if (len1) { 2615 cxl_insert_extent_to_extent_list(updated_list, 2616 ent_start_dpa, 2617 len1, NULL, 0); 2618 cnt_delta++; 2619 } 2620 if (len2) { 2621 cxl_insert_extent_to_extent_list(updated_list, 2622 dpa + len, 2623 len2, NULL, 0); 2624 cnt_delta++; 2625 } 2626 2627 if (cnt_delta + ct3d->dc.total_extent_count > 2628 CXL_NUM_EXTENTS_SUPPORTED) { 2629 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 2630 goto free_and_exit; 2631 } 2632 2633 len -= len_done; 2634 break; 2635 } 2636 } 2637 } 2638 } 2639 free_and_exit: 2640 if (ret != CXL_MBOX_SUCCESS) { 2641 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 2642 cxl_remove_extent_from_extent_list(updated_list, ent); 2643 } 2644 *updated_list_size = 0; 2645 } else { 2646 *updated_list_size = ct3d->dc.total_extent_count + cnt_delta; 2647 } 2648 2649 return ret; 2650 } 2651 2652 /* 2653 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 2654 */ 2655 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 2656 uint8_t *payload_in, 2657 size_t len_in, 2658 uint8_t *payload_out, 2659 size_t *len_out, 2660 CXLCCI *cci) 2661 { 2662 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 2663 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2664 CXLDCExtentList updated_list; 2665 CXLDCExtent *ent, *ent_next; 2666 uint32_t updated_list_size; 2667 CXLRetCode ret; 2668 2669 if (len_in < sizeof(*in)) { 2670 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2671 } 2672 2673 if (in->num_entries_updated == 0) { 2674 return CXL_MBOX_INVALID_INPUT; 2675 } 2676 2677 if (len_in < 2678 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 2679 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2680 } 2681 2682 ret = cxl_detect_malformed_extent_list(ct3d, in); 2683 if (ret != CXL_MBOX_SUCCESS) { 2684 return ret; 2685 } 2686 2687 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 2688 &updated_list_size); 2689 if (ret != CXL_MBOX_SUCCESS) { 2690 return ret; 2691 } 2692 2693 /* 2694 * If the dry run release passes, the returned updated_list will 2695 * be the updated extent list and we just need to clear the extents 2696 * in the accepted list and copy extents in the updated_list to accepted 2697 * list and update the extent count; 2698 */ 2699 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 2700 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 2701 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 2702 } 2703 copy_extent_list(&ct3d->dc.extents, &updated_list); 2704 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 2705 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 2706 cxl_remove_extent_from_extent_list(&updated_list, ent); 2707 } 2708 ct3d->dc.total_extent_count = updated_list_size; 2709 2710 return CXL_MBOX_SUCCESS; 2711 } 2712 2713 static const struct cxl_cmd cxl_cmd_set[256][256] = { 2714 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 2715 cmd_events_get_records, 1, 0 }, 2716 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 2717 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 2718 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 2719 cmd_events_get_interrupt_policy, 0, 0 }, 2720 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 2721 cmd_events_set_interrupt_policy, 2722 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 2723 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 2724 cmd_firmware_update_get_info, 0, 0 }, 2725 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 2726 cmd_firmware_update_transfer, ~0, CXL_MBOX_BACKGROUND_OPERATION }, 2727 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 2728 cmd_firmware_update_activate, 2, CXL_MBOX_BACKGROUND_OPERATION }, 2729 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2730 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 2731 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 2732 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 2733 0, 0 }, 2734 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2735 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 2736 cmd_features_get_supported, 0x8, 0 }, 2737 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 2738 cmd_features_get_feature, 0x15, 0 }, 2739 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 2740 cmd_features_set_feature, 2741 ~0, 2742 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 2743 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 2744 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 2745 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 2746 CXL_MBOX_SECURITY_STATE_CHANGE)}, 2747 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 2748 cmd_identify_memory_device, 0, 0 }, 2749 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 2750 cmd_ccls_get_partition_info, 0, 0 }, 2751 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 2752 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 2753 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2754 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 2755 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 2756 CXL_MBOX_SECURITY_STATE_CHANGE | 2757 CXL_MBOX_BACKGROUND_OPERATION)}, 2758 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 2759 cmd_get_security_state, 0, 0 }, 2760 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 2761 cmd_media_get_poison_list, 16, 0 }, 2762 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 2763 cmd_media_inject_poison, 8, 0 }, 2764 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 2765 cmd_media_clear_poison, 72, 0 }, 2766 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 2767 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 2768 cmd_media_get_scan_media_capabilities, 16, 0 }, 2769 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 2770 cmd_media_scan_media, 17, CXL_MBOX_BACKGROUND_OPERATION }, 2771 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 2772 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 2773 cmd_media_get_scan_media_results, 0, 0 }, 2774 }; 2775 2776 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 2777 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 2778 cmd_dcd_get_dyn_cap_config, 2, 0 }, 2779 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 2780 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 2781 8, 0 }, 2782 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 2783 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 2784 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2785 [DCD_CONFIG][RELEASE_DYN_CAP] = { 2786 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 2787 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2788 }; 2789 2790 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 2791 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 2792 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 2793 cmd_infostat_bg_op_sts, 0, 0 }, 2794 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2795 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 2796 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 2797 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 2798 0 }, 2799 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2800 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 2801 cmd_identify_switch_device, 0, 0 }, 2802 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 2803 cmd_get_physical_port_state, ~0, 0 }, 2804 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 2805 cmd_tunnel_management_cmd, ~0, 0 }, 2806 }; 2807 2808 /* 2809 * While the command is executing in the background, the device should 2810 * update the percentage complete in the Background Command Status Register 2811 * at least once per second. 2812 */ 2813 2814 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 2815 2816 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 2817 size_t len_in, uint8_t *pl_in, size_t *len_out, 2818 uint8_t *pl_out, bool *bg_started) 2819 { 2820 int ret; 2821 const struct cxl_cmd *cxl_cmd; 2822 opcode_handler h; 2823 CXLDeviceState *cxl_dstate; 2824 2825 *len_out = 0; 2826 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 2827 h = cxl_cmd->handler; 2828 if (!h) { 2829 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 2830 set << 8 | cmd); 2831 return CXL_MBOX_UNSUPPORTED; 2832 } 2833 2834 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 2835 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2836 } 2837 2838 /* Only one bg command at a time */ 2839 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 2840 cci->bg.runtime > 0) { 2841 return CXL_MBOX_BUSY; 2842 } 2843 2844 /* forbid any selected commands while the media is disabled */ 2845 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 2846 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 2847 2848 if (cxl_dev_media_disabled(cxl_dstate)) { 2849 if (h == cmd_events_get_records || 2850 h == cmd_ccls_get_partition_info || 2851 h == cmd_ccls_set_lsa || 2852 h == cmd_ccls_get_lsa || 2853 h == cmd_logs_get_log || 2854 h == cmd_media_get_poison_list || 2855 h == cmd_media_inject_poison || 2856 h == cmd_media_clear_poison || 2857 h == cmd_sanitize_overwrite || 2858 h == cmd_firmware_update_transfer || 2859 h == cmd_firmware_update_activate) { 2860 return CXL_MBOX_MEDIA_DISABLED; 2861 } 2862 } 2863 } 2864 2865 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 2866 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 2867 ret == CXL_MBOX_BG_STARTED) { 2868 *bg_started = true; 2869 } else { 2870 *bg_started = false; 2871 } 2872 2873 /* Set bg and the return code */ 2874 if (*bg_started) { 2875 uint64_t now; 2876 2877 cci->bg.opcode = (set << 8) | cmd; 2878 2879 cci->bg.complete_pct = 0; 2880 cci->bg.ret_code = 0; 2881 2882 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 2883 cci->bg.starttime = now; 2884 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 2885 } 2886 2887 return ret; 2888 } 2889 2890 static void bg_timercb(void *opaque) 2891 { 2892 CXLCCI *cci = opaque; 2893 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 2894 uint64_t total_time = cci->bg.starttime + cci->bg.runtime; 2895 2896 assert(cci->bg.runtime > 0); 2897 2898 if (now >= total_time) { /* we are done */ 2899 uint16_t ret = CXL_MBOX_SUCCESS; 2900 2901 cci->bg.complete_pct = 100; 2902 cci->bg.ret_code = ret; 2903 switch (cci->bg.opcode) { 2904 case 0x0201: /* fw transfer */ 2905 __do_firmware_xfer(cci); 2906 break; 2907 case 0x4400: /* sanitize */ 2908 { 2909 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2910 2911 __do_sanitization(ct3d); 2912 cxl_dev_enable_media(&ct3d->cxl_dstate); 2913 } 2914 break; 2915 case 0x4304: /* scan media */ 2916 { 2917 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2918 2919 __do_scan_media(ct3d); 2920 break; 2921 } 2922 default: 2923 __builtin_unreachable(); 2924 break; 2925 } 2926 } else { 2927 /* estimate only */ 2928 cci->bg.complete_pct = 2929 100 * (now - cci->bg.starttime) / cci->bg.runtime; 2930 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 2931 } 2932 2933 if (cci->bg.complete_pct == 100) { 2934 /* TODO: generalize to switch CCI */ 2935 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2936 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2937 PCIDevice *pdev = PCI_DEVICE(cci->d); 2938 2939 cci->bg.starttime = 0; 2940 /* registers are updated, allow new bg-capable cmds */ 2941 cci->bg.runtime = 0; 2942 2943 if (msix_enabled(pdev)) { 2944 msix_notify(pdev, cxl_dstate->mbox_msi_n); 2945 } else if (msi_enabled(pdev)) { 2946 msi_notify(pdev, cxl_dstate->mbox_msi_n); 2947 } 2948 } 2949 } 2950 2951 static void cxl_rebuild_cel(CXLCCI *cci) 2952 { 2953 cci->cel_size = 0; /* Reset for a fresh build */ 2954 for (int set = 0; set < 256; set++) { 2955 for (int cmd = 0; cmd < 256; cmd++) { 2956 if (cci->cxl_cmd_set[set][cmd].handler) { 2957 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 2958 struct cel_log *log = 2959 &cci->cel_log[cci->cel_size]; 2960 2961 log->opcode = (set << 8) | cmd; 2962 log->effect = c->effect; 2963 cci->cel_size++; 2964 } 2965 } 2966 } 2967 } 2968 2969 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 2970 { 2971 cci->payload_max = payload_max; 2972 cxl_rebuild_cel(cci); 2973 2974 cci->bg.complete_pct = 0; 2975 cci->bg.starttime = 0; 2976 cci->bg.runtime = 0; 2977 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 2978 bg_timercb, cci); 2979 2980 memset(&cci->fw, 0, sizeof(cci->fw)); 2981 cci->fw.active_slot = 1; 2982 cci->fw.slot[cci->fw.active_slot - 1] = true; 2983 } 2984 2985 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 2986 { 2987 for (int set = 0; set < 256; set++) { 2988 for (int cmd = 0; cmd < 256; cmd++) { 2989 if (cxl_cmds[set][cmd].handler) { 2990 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 2991 } 2992 } 2993 } 2994 } 2995 2996 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 2997 size_t payload_max) 2998 { 2999 cci->payload_max = MAX(payload_max, cci->payload_max); 3000 cxl_copy_cci_commands(cci, cxl_cmd_set); 3001 cxl_rebuild_cel(cci); 3002 } 3003 3004 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 3005 DeviceState *d, size_t payload_max) 3006 { 3007 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 3008 cci->d = d; 3009 cci->intf = intf; 3010 cxl_init_cci(cci, payload_max); 3011 } 3012 3013 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 3014 { 3015 CXLType3Dev *ct3d = CXL_TYPE3(d); 3016 3017 cxl_copy_cci_commands(cci, cxl_cmd_set); 3018 if (ct3d->dc.num_regions) { 3019 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 3020 } 3021 cci->d = d; 3022 3023 /* No separation for PCI MB as protocol handled in PCI device */ 3024 cci->intf = d; 3025 cxl_init_cci(cci, payload_max); 3026 } 3027 3028 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 3029 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3030 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3031 0 }, 3032 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3033 }; 3034 3035 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 3036 size_t payload_max) 3037 { 3038 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 3039 cci->d = d; 3040 cci->intf = intf; 3041 cxl_init_cci(cci, payload_max); 3042 } 3043 3044 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 3045 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 3046 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3047 0 }, 3048 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3049 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3050 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3051 cmd_tunnel_management_cmd, ~0, 0 }, 3052 }; 3053 3054 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 3055 DeviceState *intf, 3056 size_t payload_max) 3057 { 3058 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 3059 cci->d = d; 3060 cci->intf = intf; 3061 cxl_init_cci(cci, payload_max); 3062 } 3063