1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/pci/msi.h" 12 #include "hw/pci/msix.h" 13 #include "hw/cxl/cxl.h" 14 #include "hw/cxl/cxl_events.h" 15 #include "hw/cxl/cxl_mailbox.h" 16 #include "hw/pci/pci.h" 17 #include "hw/pci-bridge/cxl_upstream_port.h" 18 #include "qemu/cutils.h" 19 #include "qemu/log.h" 20 #include "qemu/units.h" 21 #include "qemu/uuid.h" 22 #include "sysemu/hostmem.h" 23 #include "qemu/range.h" 24 25 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 26 #define CXL_DC_EVENT_LOG_SIZE 8 27 #define CXL_NUM_EXTENTS_SUPPORTED 512 28 #define CXL_NUM_TAGS_SUPPORTED 0 29 30 /* 31 * How to add a new command, example. The command set FOO, with cmd BAR. 32 * 1. Add the command set and cmd to the enum. 33 * FOO = 0x7f, 34 * #define BAR 0 35 * 2. Implement the handler 36 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 37 * CXLDeviceState *cxl_dstate, uint16_t *len) 38 * 3. Add the command to the cxl_cmd_set[][] 39 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 40 * 4. Implement your handler 41 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 42 * 43 * 44 * Writing the handler: 45 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 46 * in/out length of the payload. The handler is responsible for consuming the 47 * payload from cmd->payload and operating upon it as necessary. It must then 48 * fill the output data into cmd->payload (overwriting what was there), 49 * setting the length, and returning a valid return code. 50 * 51 * XXX: The handler need not worry about endianness. The payload is read out of 52 * a register interface that already deals with it. 53 */ 54 55 enum { 56 INFOSTAT = 0x00, 57 #define IS_IDENTIFY 0x1 58 #define BACKGROUND_OPERATION_STATUS 0x2 59 EVENTS = 0x01, 60 #define GET_RECORDS 0x0 61 #define CLEAR_RECORDS 0x1 62 #define GET_INTERRUPT_POLICY 0x2 63 #define SET_INTERRUPT_POLICY 0x3 64 FIRMWARE_UPDATE = 0x02, 65 #define GET_INFO 0x0 66 #define TRANSFER 0x1 67 #define ACTIVATE 0x2 68 TIMESTAMP = 0x03, 69 #define GET 0x0 70 #define SET 0x1 71 LOGS = 0x04, 72 #define GET_SUPPORTED 0x0 73 #define GET_LOG 0x1 74 FEATURES = 0x05, 75 #define GET_SUPPORTED 0x0 76 #define GET_FEATURE 0x1 77 #define SET_FEATURE 0x2 78 IDENTIFY = 0x40, 79 #define MEMORY_DEVICE 0x0 80 CCLS = 0x41, 81 #define GET_PARTITION_INFO 0x0 82 #define GET_LSA 0x2 83 #define SET_LSA 0x3 84 SANITIZE = 0x44, 85 #define OVERWRITE 0x0 86 #define SECURE_ERASE 0x1 87 PERSISTENT_MEM = 0x45, 88 #define GET_SECURITY_STATE 0x0 89 MEDIA_AND_POISON = 0x43, 90 #define GET_POISON_LIST 0x0 91 #define INJECT_POISON 0x1 92 #define CLEAR_POISON 0x2 93 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 94 #define SCAN_MEDIA 0x4 95 #define GET_SCAN_MEDIA_RESULTS 0x5 96 DCD_CONFIG = 0x48, 97 #define GET_DC_CONFIG 0x0 98 #define GET_DYN_CAP_EXT_LIST 0x1 99 #define ADD_DYN_CAP_RSP 0x2 100 #define RELEASE_DYN_CAP 0x3 101 PHYSICAL_SWITCH = 0x51, 102 #define IDENTIFY_SWITCH_DEVICE 0x0 103 #define GET_PHYSICAL_PORT_STATE 0x1 104 TUNNEL = 0x53, 105 #define MANAGEMENT_COMMAND 0x0 106 }; 107 108 /* CCI Message Format CXL r3.1 Figure 7-19 */ 109 typedef struct CXLCCIMessage { 110 uint8_t category; 111 #define CXL_CCI_CAT_REQ 0 112 #define CXL_CCI_CAT_RSP 1 113 uint8_t tag; 114 uint8_t resv1; 115 uint8_t command; 116 uint8_t command_set; 117 uint8_t pl_length[3]; 118 uint16_t rc; 119 uint16_t vendor_specific; 120 uint8_t payload[]; 121 } QEMU_PACKED CXLCCIMessage; 122 123 /* This command is only defined to an MLD FM Owned LD or an MHD */ 124 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 125 uint8_t *payload_in, 126 size_t len_in, 127 uint8_t *payload_out, 128 size_t *len_out, 129 CXLCCI *cci) 130 { 131 PCIDevice *tunnel_target; 132 CXLCCI *target_cci; 133 struct { 134 uint8_t port_or_ld_id; 135 uint8_t target_type; 136 uint16_t size; 137 CXLCCIMessage ccimessage; 138 } QEMU_PACKED *in; 139 struct { 140 uint16_t resp_len; 141 uint8_t resv[2]; 142 CXLCCIMessage ccimessage; 143 } QEMU_PACKED *out; 144 size_t pl_length, length_out; 145 bool bg_started; 146 int rc; 147 148 if (cmd->in < sizeof(*in)) { 149 return CXL_MBOX_INVALID_INPUT; 150 } 151 in = (void *)payload_in; 152 out = (void *)payload_out; 153 154 /* Enough room for minimum sized message - no payload */ 155 if (in->size < sizeof(in->ccimessage)) { 156 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 157 } 158 /* Length of input payload should be in->size + a wrapping tunnel header */ 159 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 160 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 161 } 162 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 163 return CXL_MBOX_INVALID_INPUT; 164 } 165 166 if (in->target_type != 0) { 167 qemu_log_mask(LOG_UNIMP, 168 "Tunneled Command sent to non existent FM-LD"); 169 return CXL_MBOX_INVALID_INPUT; 170 } 171 172 /* 173 * Target of a tunnel unfortunately depends on type of CCI readint 174 * the message. 175 * If in a switch, then it's the port number. 176 * If in an MLD it is the ld number. 177 * If in an MHD target type indicate where we are going. 178 */ 179 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 180 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 181 if (in->port_or_ld_id != 0) { 182 /* Only pretending to have one for now! */ 183 return CXL_MBOX_INVALID_INPUT; 184 } 185 target_cci = &ct3d->ld0_cci; 186 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 187 CXLUpstreamPort *usp = CXL_USP(cci->d); 188 189 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 190 in->port_or_ld_id); 191 if (!tunnel_target) { 192 return CXL_MBOX_INVALID_INPUT; 193 } 194 tunnel_target = 195 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 196 if (!tunnel_target) { 197 return CXL_MBOX_INVALID_INPUT; 198 } 199 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 200 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 201 /* Tunneled VDMs always land on FM Owned LD */ 202 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 203 } else { 204 return CXL_MBOX_INVALID_INPUT; 205 } 206 } else { 207 return CXL_MBOX_INVALID_INPUT; 208 } 209 210 pl_length = in->ccimessage.pl_length[2] << 16 | 211 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 212 rc = cxl_process_cci_message(target_cci, 213 in->ccimessage.command_set, 214 in->ccimessage.command, 215 pl_length, in->ccimessage.payload, 216 &length_out, out->ccimessage.payload, 217 &bg_started); 218 /* Payload should be in place. Rest of CCI header and needs filling */ 219 out->resp_len = length_out + sizeof(CXLCCIMessage); 220 st24_le_p(out->ccimessage.pl_length, length_out); 221 out->ccimessage.rc = rc; 222 out->ccimessage.category = CXL_CCI_CAT_RSP; 223 out->ccimessage.command = in->ccimessage.command; 224 out->ccimessage.command_set = in->ccimessage.command_set; 225 out->ccimessage.tag = in->ccimessage.tag; 226 *len_out = length_out + sizeof(*out); 227 228 return CXL_MBOX_SUCCESS; 229 } 230 231 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 232 uint8_t *payload_in, size_t len_in, 233 uint8_t *payload_out, size_t *len_out, 234 CXLCCI *cci) 235 { 236 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 237 CXLGetEventPayload *pl; 238 uint8_t log_type; 239 int max_recs; 240 241 if (cmd->in < sizeof(log_type)) { 242 return CXL_MBOX_INVALID_INPUT; 243 } 244 245 log_type = payload_in[0]; 246 247 pl = (CXLGetEventPayload *)payload_out; 248 249 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 250 CXL_EVENT_RECORD_SIZE; 251 if (max_recs > 0xFFFF) { 252 max_recs = 0xFFFF; 253 } 254 255 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 256 } 257 258 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 259 uint8_t *payload_in, 260 size_t len_in, 261 uint8_t *payload_out, 262 size_t *len_out, 263 CXLCCI *cci) 264 { 265 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 266 CXLClearEventPayload *pl; 267 268 pl = (CXLClearEventPayload *)payload_in; 269 *len_out = 0; 270 return cxl_event_clear_records(cxlds, pl); 271 } 272 273 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 274 uint8_t *payload_in, 275 size_t len_in, 276 uint8_t *payload_out, 277 size_t *len_out, 278 CXLCCI *cci) 279 { 280 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 281 CXLEventInterruptPolicy *policy; 282 CXLEventLog *log; 283 284 policy = (CXLEventInterruptPolicy *)payload_out; 285 286 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 287 if (log->irq_enabled) { 288 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 289 } 290 291 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 292 if (log->irq_enabled) { 293 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 294 } 295 296 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 297 if (log->irq_enabled) { 298 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 299 } 300 301 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 302 if (log->irq_enabled) { 303 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 304 } 305 306 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 307 if (log->irq_enabled) { 308 /* Dynamic Capacity borrows the same vector as info */ 309 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 310 } 311 312 *len_out = sizeof(*policy); 313 return CXL_MBOX_SUCCESS; 314 } 315 316 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 317 uint8_t *payload_in, 318 size_t len_in, 319 uint8_t *payload_out, 320 size_t *len_out, 321 CXLCCI *cci) 322 { 323 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 324 CXLEventInterruptPolicy *policy; 325 CXLEventLog *log; 326 327 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 328 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 329 } 330 331 policy = (CXLEventInterruptPolicy *)payload_in; 332 333 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 334 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 335 CXL_INT_MSI_MSIX; 336 337 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 338 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 339 CXL_INT_MSI_MSIX; 340 341 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 342 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 343 CXL_INT_MSI_MSIX; 344 345 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 346 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 347 CXL_INT_MSI_MSIX; 348 349 /* DCD is optional */ 350 if (len_in < sizeof(*policy)) { 351 return CXL_MBOX_SUCCESS; 352 } 353 354 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 355 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 356 CXL_INT_MSI_MSIX; 357 358 *len_out = 0; 359 return CXL_MBOX_SUCCESS; 360 } 361 362 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 363 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 364 uint8_t *payload_in, 365 size_t len_in, 366 uint8_t *payload_out, 367 size_t *len_out, 368 CXLCCI *cci) 369 { 370 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 371 struct { 372 uint16_t pcie_vid; 373 uint16_t pcie_did; 374 uint16_t pcie_subsys_vid; 375 uint16_t pcie_subsys_id; 376 uint64_t sn; 377 uint8_t max_message_size; 378 uint8_t component_type; 379 } QEMU_PACKED *is_identify; 380 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 381 382 is_identify = (void *)payload_out; 383 is_identify->pcie_vid = class->vendor_id; 384 is_identify->pcie_did = class->device_id; 385 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 386 is_identify->sn = CXL_USP(cci->d)->sn; 387 /* Subsystem info not defined for a USP */ 388 is_identify->pcie_subsys_vid = 0; 389 is_identify->pcie_subsys_id = 0; 390 is_identify->component_type = 0x0; /* Switch */ 391 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 392 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 393 394 is_identify->sn = CXL_TYPE3(cci->d)->sn; 395 /* 396 * We can't always use class->subsystem_vendor_id as 397 * it is not set if the defaults are used. 398 */ 399 is_identify->pcie_subsys_vid = 400 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 401 is_identify->pcie_subsys_id = 402 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 403 is_identify->component_type = 0x3; /* Type 3 */ 404 } 405 406 /* TODO: Allow this to vary across different CCIs */ 407 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ 408 *len_out = sizeof(*is_identify); 409 return CXL_MBOX_SUCCESS; 410 } 411 412 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 413 void *private) 414 { 415 uint8_t *bm = private; 416 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 417 uint8_t port = PCIE_PORT(d)->port; 418 bm[port / 8] |= 1 << (port % 8); 419 } 420 } 421 422 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 423 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 424 uint8_t *payload_in, 425 size_t len_in, 426 uint8_t *payload_out, 427 size_t *len_out, 428 CXLCCI *cci) 429 { 430 PCIEPort *usp = PCIE_PORT(cci->d); 431 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 432 int num_phys_ports = pcie_count_ds_ports(bus); 433 434 struct cxl_fmapi_ident_switch_dev_resp_pl { 435 uint8_t ingress_port_id; 436 uint8_t rsvd; 437 uint8_t num_physical_ports; 438 uint8_t num_vcss; 439 uint8_t active_port_bitmask[0x20]; 440 uint8_t active_vcs_bitmask[0x20]; 441 uint16_t total_vppbs; 442 uint16_t bound_vppbs; 443 uint8_t num_hdm_decoders_per_usp; 444 } QEMU_PACKED *out; 445 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 446 447 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 448 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 449 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 450 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 451 .active_vcs_bitmask[0] = 0x1, 452 .total_vppbs = num_phys_ports + 1, 453 .bound_vppbs = num_phys_ports + 1, 454 .num_hdm_decoders_per_usp = 4, 455 }; 456 457 /* Depends on the CCI type */ 458 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 459 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 460 } else { 461 /* MCTP? */ 462 out->ingress_port_id = 0; 463 } 464 465 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 466 out->active_port_bitmask); 467 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 468 469 *len_out = sizeof(*out); 470 471 return CXL_MBOX_SUCCESS; 472 } 473 474 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 475 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 476 uint8_t *payload_in, 477 size_t len_in, 478 uint8_t *payload_out, 479 size_t *len_out, 480 CXLCCI *cci) 481 { 482 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 483 struct cxl_fmapi_get_phys_port_state_req_pl { 484 uint8_t num_ports; 485 uint8_t ports[]; 486 } QEMU_PACKED *in; 487 488 /* 489 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 490 * Format 491 */ 492 struct cxl_fmapi_port_state_info_block { 493 uint8_t port_id; 494 uint8_t config_state; 495 uint8_t connected_device_cxl_version; 496 uint8_t rsv1; 497 uint8_t connected_device_type; 498 uint8_t port_cxl_version_bitmask; 499 uint8_t max_link_width; 500 uint8_t negotiated_link_width; 501 uint8_t supported_link_speeds_vector; 502 uint8_t max_link_speed; 503 uint8_t current_link_speed; 504 uint8_t ltssm_state; 505 uint8_t first_lane_num; 506 uint16_t link_state; 507 uint8_t supported_ld_count; 508 } QEMU_PACKED; 509 510 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 511 struct cxl_fmapi_get_phys_port_state_resp_pl { 512 uint8_t num_ports; 513 uint8_t rsv1[3]; 514 struct cxl_fmapi_port_state_info_block ports[]; 515 } QEMU_PACKED *out; 516 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 517 PCIEPort *usp = PCIE_PORT(cci->d); 518 size_t pl_size; 519 int i; 520 521 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 522 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 523 524 /* Check if what was requested can fit */ 525 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 526 return CXL_MBOX_INVALID_INPUT; 527 } 528 529 /* For success there should be a match for each requested */ 530 out->num_ports = in->num_ports; 531 532 for (i = 0; i < in->num_ports; i++) { 533 struct cxl_fmapi_port_state_info_block *port; 534 /* First try to match on downstream port */ 535 PCIDevice *port_dev; 536 uint16_t lnkcap, lnkcap2, lnksta; 537 538 port = &out->ports[i]; 539 540 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 541 if (port_dev) { /* DSP */ 542 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 543 ->devices[0]; 544 port->config_state = 3; 545 if (ds_dev) { 546 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 547 port->connected_device_type = 5; /* Assume MLD for now */ 548 } else { 549 port->connected_device_type = 1; 550 } 551 } else { 552 port->connected_device_type = 0; 553 } 554 port->supported_ld_count = 3; 555 } else if (usp->port == in->ports[i]) { /* USP */ 556 port_dev = PCI_DEVICE(usp); 557 port->config_state = 4; 558 port->connected_device_type = 0; 559 } else { 560 return CXL_MBOX_INVALID_INPUT; 561 } 562 563 port->port_id = in->ports[i]; 564 /* Information on status of this port in lnksta, lnkcap */ 565 if (!port_dev->exp.exp_cap) { 566 return CXL_MBOX_INTERNAL_ERROR; 567 } 568 lnksta = port_dev->config_read(port_dev, 569 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 570 sizeof(lnksta)); 571 lnkcap = port_dev->config_read(port_dev, 572 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 573 sizeof(lnkcap)); 574 lnkcap2 = port_dev->config_read(port_dev, 575 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 576 sizeof(lnkcap2)); 577 578 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 579 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 580 /* No definition for SLS field in linux/pci_regs.h */ 581 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 582 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 583 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 584 /* TODO: Track down if we can get the rest of the info */ 585 port->ltssm_state = 0x7; 586 port->first_lane_num = 0; 587 port->link_state = 0; 588 port->port_cxl_version_bitmask = 0x2; 589 port->connected_device_cxl_version = 0x2; 590 } 591 592 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 593 *len_out = pl_size; 594 595 return CXL_MBOX_SUCCESS; 596 } 597 598 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 599 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 600 uint8_t *payload_in, 601 size_t len_in, 602 uint8_t *payload_out, 603 size_t *len_out, 604 CXLCCI *cci) 605 { 606 struct { 607 uint8_t status; 608 uint8_t rsvd; 609 uint16_t opcode; 610 uint16_t returncode; 611 uint16_t vendor_ext_status; 612 } QEMU_PACKED *bg_op_status; 613 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 614 615 bg_op_status = (void *)payload_out; 616 bg_op_status->status = cci->bg.complete_pct << 1; 617 if (cci->bg.runtime > 0) { 618 bg_op_status->status |= 1U << 0; 619 } 620 bg_op_status->opcode = cci->bg.opcode; 621 bg_op_status->returncode = cci->bg.ret_code; 622 *len_out = sizeof(*bg_op_status); 623 624 return CXL_MBOX_SUCCESS; 625 } 626 627 #define CXL_FW_SLOTS 2 628 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 629 630 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 631 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 632 uint8_t *payload_in, 633 size_t len, 634 uint8_t *payload_out, 635 size_t *len_out, 636 CXLCCI *cci) 637 { 638 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 639 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 640 struct { 641 uint8_t slots_supported; 642 uint8_t slot_info; 643 uint8_t caps; 644 uint8_t rsvd[0xd]; 645 char fw_rev1[0x10]; 646 char fw_rev2[0x10]; 647 char fw_rev3[0x10]; 648 char fw_rev4[0x10]; 649 } QEMU_PACKED *fw_info; 650 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 651 652 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || 653 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) || 654 (ct3d->dc.total_capacity < CXL_CAPACITY_MULTIPLIER)) { 655 return CXL_MBOX_INTERNAL_ERROR; 656 } 657 658 fw_info = (void *)payload_out; 659 660 fw_info->slots_supported = CXL_FW_SLOTS; 661 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 662 ((cci->fw.staged_slot & 0x7) << 3); 663 fw_info->caps = BIT(0); /* online update supported */ 664 665 if (cci->fw.slot[0]) { 666 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 667 } 668 if (cci->fw.slot[1]) { 669 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 670 } 671 672 *len_out = sizeof(*fw_info); 673 return CXL_MBOX_SUCCESS; 674 } 675 676 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 677 #define CXL_FW_XFER_ALIGNMENT 128 678 679 #define CXL_FW_XFER_ACTION_FULL 0x0 680 #define CXL_FW_XFER_ACTION_INIT 0x1 681 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 682 #define CXL_FW_XFER_ACTION_END 0x3 683 #define CXL_FW_XFER_ACTION_ABORT 0x4 684 685 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 686 uint8_t *payload_in, 687 size_t len, 688 uint8_t *payload_out, 689 size_t *len_out, 690 CXLCCI *cci) 691 { 692 struct { 693 uint8_t action; 694 uint8_t slot; 695 uint8_t rsvd1[2]; 696 uint32_t offset; 697 uint8_t rsvd2[0x78]; 698 uint8_t data[]; 699 } QEMU_PACKED *fw_transfer = (void *)payload_in; 700 size_t offset, length; 701 702 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 703 /* 704 * At this point there aren't any on-going transfers 705 * running in the bg - this is serialized before this 706 * call altogether. Just mark the state machine and 707 * disregard any other input. 708 */ 709 cci->fw.transferring = false; 710 return CXL_MBOX_SUCCESS; 711 } 712 713 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 714 length = len - sizeof(*fw_transfer); 715 if (offset + length > CXL_FW_SIZE) { 716 return CXL_MBOX_INVALID_INPUT; 717 } 718 719 if (cci->fw.transferring) { 720 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 721 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 722 return CXL_MBOX_FW_XFER_IN_PROGRESS; 723 } 724 /* 725 * Abort partitioned package transfer if over 30 secs 726 * between parts. As opposed to the explicit ABORT action, 727 * semantically treat this condition as an error - as 728 * if a part action were passed without a previous INIT. 729 */ 730 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 731 cci->fw.transferring = false; 732 return CXL_MBOX_INVALID_INPUT; 733 } 734 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 735 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 736 return CXL_MBOX_INVALID_INPUT; 737 } 738 739 /* allow back-to-back retransmission */ 740 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 741 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 742 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 743 /* verify no overlaps */ 744 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 745 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 746 } 747 } 748 749 switch (fw_transfer->action) { 750 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 751 case CXL_FW_XFER_ACTION_END: 752 if (fw_transfer->slot == 0 || 753 fw_transfer->slot == cci->fw.active_slot || 754 fw_transfer->slot > CXL_FW_SLOTS) { 755 return CXL_MBOX_FW_INVALID_SLOT; 756 } 757 758 /* mark the slot used upon bg completion */ 759 break; 760 case CXL_FW_XFER_ACTION_INIT: 761 if (offset != 0) { 762 return CXL_MBOX_INVALID_INPUT; 763 } 764 765 cci->fw.transferring = true; 766 cci->fw.prev_offset = offset; 767 cci->fw.prev_len = length; 768 break; 769 case CXL_FW_XFER_ACTION_CONTINUE: 770 cci->fw.prev_offset = offset; 771 cci->fw.prev_len = length; 772 break; 773 default: 774 return CXL_MBOX_INVALID_INPUT; 775 } 776 777 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 778 cci->bg.runtime = 10 * 1000UL; 779 } else { 780 cci->bg.runtime = 2 * 1000UL; 781 } 782 /* keep relevant context for bg completion */ 783 cci->fw.curr_action = fw_transfer->action; 784 cci->fw.curr_slot = fw_transfer->slot; 785 *len_out = 0; 786 787 return CXL_MBOX_BG_STARTED; 788 } 789 790 static void __do_firmware_xfer(CXLCCI *cci) 791 { 792 switch (cci->fw.curr_action) { 793 case CXL_FW_XFER_ACTION_FULL: 794 case CXL_FW_XFER_ACTION_END: 795 cci->fw.slot[cci->fw.curr_slot - 1] = true; 796 cci->fw.transferring = false; 797 break; 798 case CXL_FW_XFER_ACTION_INIT: 799 case CXL_FW_XFER_ACTION_CONTINUE: 800 time(&cci->fw.last_partxfer); 801 break; 802 default: 803 break; 804 } 805 } 806 807 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 808 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 809 uint8_t *payload_in, 810 size_t len, 811 uint8_t *payload_out, 812 size_t *len_out, 813 CXLCCI *cci) 814 { 815 struct { 816 uint8_t action; 817 uint8_t slot; 818 } QEMU_PACKED *fw_activate = (void *)payload_in; 819 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 820 821 if (fw_activate->slot == 0 || 822 fw_activate->slot == cci->fw.active_slot || 823 fw_activate->slot > CXL_FW_SLOTS) { 824 return CXL_MBOX_FW_INVALID_SLOT; 825 } 826 827 /* ensure that an actual fw package is there */ 828 if (!cci->fw.slot[fw_activate->slot - 1]) { 829 return CXL_MBOX_FW_INVALID_SLOT; 830 } 831 832 switch (fw_activate->action) { 833 case 0: /* online */ 834 cci->fw.active_slot = fw_activate->slot; 835 break; 836 case 1: /* reset */ 837 cci->fw.staged_slot = fw_activate->slot; 838 break; 839 default: 840 return CXL_MBOX_INVALID_INPUT; 841 } 842 843 return CXL_MBOX_SUCCESS; 844 } 845 846 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 847 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 848 uint8_t *payload_in, 849 size_t len_in, 850 uint8_t *payload_out, 851 size_t *len_out, 852 CXLCCI *cci) 853 { 854 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 855 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 856 857 stq_le_p(payload_out, final_time); 858 *len_out = 8; 859 860 return CXL_MBOX_SUCCESS; 861 } 862 863 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 864 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 865 uint8_t *payload_in, 866 size_t len_in, 867 uint8_t *payload_out, 868 size_t *len_out, 869 CXLCCI *cci) 870 { 871 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 872 873 cxl_dstate->timestamp.set = true; 874 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 875 876 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 877 878 *len_out = 0; 879 return CXL_MBOX_SUCCESS; 880 } 881 882 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 883 static const QemuUUID cel_uuid = { 884 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 885 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 886 }; 887 888 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 889 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 890 uint8_t *payload_in, 891 size_t len_in, 892 uint8_t *payload_out, 893 size_t *len_out, 894 CXLCCI *cci) 895 { 896 struct { 897 uint16_t entries; 898 uint8_t rsvd[6]; 899 struct { 900 QemuUUID uuid; 901 uint32_t size; 902 } log_entries[1]; 903 } QEMU_PACKED *supported_logs = (void *)payload_out; 904 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 905 906 supported_logs->entries = 1; 907 supported_logs->log_entries[0].uuid = cel_uuid; 908 supported_logs->log_entries[0].size = 4 * cci->cel_size; 909 910 *len_out = sizeof(*supported_logs); 911 return CXL_MBOX_SUCCESS; 912 } 913 914 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 915 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 916 uint8_t *payload_in, 917 size_t len_in, 918 uint8_t *payload_out, 919 size_t *len_out, 920 CXLCCI *cci) 921 { 922 struct { 923 QemuUUID uuid; 924 uint32_t offset; 925 uint32_t length; 926 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 927 928 get_log = (void *)payload_in; 929 930 /* 931 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 932 * The device shall return Invalid Input if the Offset or Length 933 * fields attempt to access beyond the size of the log as reported by Get 934 * Supported Logs. 935 * 936 * The CEL buffer is large enough to fit all commands in the emulation, so 937 * the only possible failure would be if the mailbox itself isn't big 938 * enough. 939 */ 940 if (get_log->offset + get_log->length > cci->payload_max) { 941 return CXL_MBOX_INVALID_INPUT; 942 } 943 944 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 945 return CXL_MBOX_INVALID_LOG; 946 } 947 948 /* Store off everything to local variables so we can wipe out the payload */ 949 *len_out = get_log->length; 950 951 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 952 953 return CXL_MBOX_SUCCESS; 954 } 955 956 /* CXL r3.1 section 8.2.9.6: Features */ 957 /* 958 * Get Supported Features output payload 959 * CXL r3.1 section 8.2.9.6.1 Table 8-96 960 */ 961 typedef struct CXLSupportedFeatureHeader { 962 uint16_t entries; 963 uint16_t nsuppfeats_dev; 964 uint32_t reserved; 965 } QEMU_PACKED CXLSupportedFeatureHeader; 966 967 /* 968 * Get Supported Features Supported Feature Entry 969 * CXL r3.1 section 8.2.9.6.1 Table 8-97 970 */ 971 typedef struct CXLSupportedFeatureEntry { 972 QemuUUID uuid; 973 uint16_t feat_index; 974 uint16_t get_feat_size; 975 uint16_t set_feat_size; 976 uint32_t attr_flags; 977 uint8_t get_feat_version; 978 uint8_t set_feat_version; 979 uint16_t set_feat_effects; 980 uint8_t rsvd[18]; 981 } QEMU_PACKED CXLSupportedFeatureEntry; 982 983 /* 984 * Get Supported Features Supported Feature Entry 985 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 986 */ 987 /* Supported Feature Entry : attribute flags */ 988 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 989 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 990 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 991 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 992 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 993 994 /* Supported Feature Entry : set feature effects */ 995 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 996 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 997 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 998 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 999 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1000 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1001 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1002 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1003 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1004 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1005 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1006 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1007 1008 enum CXL_SUPPORTED_FEATURES_LIST { 1009 CXL_FEATURE_PATROL_SCRUB = 0, 1010 CXL_FEATURE_ECS, 1011 CXL_FEATURE_MAX 1012 }; 1013 1014 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1015 /* 1016 * Get Feature input payload 1017 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1018 */ 1019 /* Get Feature : Payload in selection */ 1020 enum CXL_GET_FEATURE_SELECTION { 1021 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1022 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1023 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1024 CXL_GET_FEATURE_SEL_MAX 1025 }; 1026 1027 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1028 /* 1029 * Set Feature input payload 1030 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1031 */ 1032 typedef struct CXLSetFeatureInHeader { 1033 QemuUUID uuid; 1034 uint32_t flags; 1035 uint16_t offset; 1036 uint8_t version; 1037 uint8_t rsvd[9]; 1038 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1039 1040 /* Set Feature : Payload in flags */ 1041 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1042 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1043 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1044 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1045 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1046 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1047 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1048 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1049 }; 1050 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1051 1052 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1053 static const QemuUUID patrol_scrub_uuid = { 1054 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1055 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1056 }; 1057 1058 typedef struct CXLMemPatrolScrubSetFeature { 1059 CXLSetFeatureInHeader hdr; 1060 CXLMemPatrolScrubWriteAttrs feat_data; 1061 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1062 1063 /* 1064 * CXL r3.1 section 8.2.9.9.11.2: 1065 * DDR5 Error Check Scrub (ECS) Control Feature 1066 */ 1067 static const QemuUUID ecs_uuid = { 1068 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1069 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1070 }; 1071 1072 typedef struct CXLMemECSSetFeature { 1073 CXLSetFeatureInHeader hdr; 1074 CXLMemECSWriteAttrs feat_data[]; 1075 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1076 1077 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1078 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1079 uint8_t *payload_in, 1080 size_t len_in, 1081 uint8_t *payload_out, 1082 size_t *len_out, 1083 CXLCCI *cci) 1084 { 1085 struct { 1086 uint32_t count; 1087 uint16_t start_index; 1088 uint16_t reserved; 1089 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1090 1091 struct { 1092 CXLSupportedFeatureHeader hdr; 1093 CXLSupportedFeatureEntry feat_entries[]; 1094 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1095 uint16_t index, req_entries; 1096 uint16_t entry; 1097 1098 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1099 return CXL_MBOX_UNSUPPORTED; 1100 } 1101 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1102 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1103 return CXL_MBOX_INVALID_INPUT; 1104 } 1105 1106 req_entries = (get_feats_in->count - 1107 sizeof(CXLSupportedFeatureHeader)) / 1108 sizeof(CXLSupportedFeatureEntry); 1109 req_entries = MIN(req_entries, 1110 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1111 1112 for (entry = 0, index = get_feats_in->start_index; 1113 entry < req_entries; index++) { 1114 switch (index) { 1115 case CXL_FEATURE_PATROL_SCRUB: 1116 /* Fill supported feature entry for device patrol scrub control */ 1117 get_feats_out->feat_entries[entry++] = 1118 (struct CXLSupportedFeatureEntry) { 1119 .uuid = patrol_scrub_uuid, 1120 .feat_index = index, 1121 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1122 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1123 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1124 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1125 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1126 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1127 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1128 }; 1129 break; 1130 case CXL_FEATURE_ECS: 1131 /* Fill supported feature entry for device DDR5 ECS control */ 1132 get_feats_out->feat_entries[entry++] = 1133 (struct CXLSupportedFeatureEntry) { 1134 .uuid = ecs_uuid, 1135 .feat_index = index, 1136 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1137 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1138 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1139 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1140 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1141 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1142 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1143 }; 1144 break; 1145 default: 1146 __builtin_unreachable(); 1147 } 1148 } 1149 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1150 get_feats_out->hdr.entries = req_entries; 1151 *len_out = sizeof(CXLSupportedFeatureHeader) + 1152 req_entries * sizeof(CXLSupportedFeatureEntry); 1153 1154 return CXL_MBOX_SUCCESS; 1155 } 1156 1157 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1158 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1159 uint8_t *payload_in, 1160 size_t len_in, 1161 uint8_t *payload_out, 1162 size_t *len_out, 1163 CXLCCI *cci) 1164 { 1165 struct { 1166 QemuUUID uuid; 1167 uint16_t offset; 1168 uint16_t count; 1169 uint8_t selection; 1170 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1171 uint16_t bytes_to_copy = 0; 1172 CXLType3Dev *ct3d; 1173 CXLSetFeatureInfo *set_feat_info; 1174 1175 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1176 return CXL_MBOX_UNSUPPORTED; 1177 } 1178 1179 ct3d = CXL_TYPE3(cci->d); 1180 get_feature = (void *)payload_in; 1181 1182 set_feat_info = &ct3d->set_feat_info; 1183 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1184 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1185 } 1186 1187 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1188 return CXL_MBOX_UNSUPPORTED; 1189 } 1190 if (get_feature->offset + get_feature->count > cci->payload_max) { 1191 return CXL_MBOX_INVALID_INPUT; 1192 } 1193 1194 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1195 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1196 return CXL_MBOX_INVALID_INPUT; 1197 } 1198 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1199 get_feature->offset; 1200 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1201 memcpy(payload_out, 1202 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1203 bytes_to_copy); 1204 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1205 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1206 return CXL_MBOX_INVALID_INPUT; 1207 } 1208 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1209 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1210 memcpy(payload_out, 1211 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1212 bytes_to_copy); 1213 } else { 1214 return CXL_MBOX_UNSUPPORTED; 1215 } 1216 1217 *len_out = bytes_to_copy; 1218 1219 return CXL_MBOX_SUCCESS; 1220 } 1221 1222 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1223 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1224 uint8_t *payload_in, 1225 size_t len_in, 1226 uint8_t *payload_out, 1227 size_t *len_out, 1228 CXLCCI *cci) 1229 { 1230 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1231 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1232 CXLMemPatrolScrubSetFeature *ps_set_feature; 1233 CXLMemECSWriteAttrs *ecs_write_attrs; 1234 CXLMemECSSetFeature *ecs_set_feature; 1235 CXLSetFeatureInfo *set_feat_info; 1236 uint16_t bytes_to_copy = 0; 1237 uint8_t data_transfer_flag; 1238 CXLType3Dev *ct3d; 1239 uint16_t count; 1240 1241 1242 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1243 return CXL_MBOX_UNSUPPORTED; 1244 } 1245 ct3d = CXL_TYPE3(cci->d); 1246 set_feat_info = &ct3d->set_feat_info; 1247 1248 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1249 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1250 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1251 } 1252 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1253 set_feat_info->data_saved_across_reset = true; 1254 } else { 1255 set_feat_info->data_saved_across_reset = false; 1256 } 1257 1258 data_transfer_flag = 1259 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1260 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1261 set_feat_info->uuid = hdr->uuid; 1262 set_feat_info->data_size = 0; 1263 } 1264 set_feat_info->data_transfer_flag = data_transfer_flag; 1265 set_feat_info->data_offset = hdr->offset; 1266 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1267 1268 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1269 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1270 return CXL_MBOX_UNSUPPORTED; 1271 } 1272 1273 ps_set_feature = (void *)payload_in; 1274 ps_write_attrs = &ps_set_feature->feat_data; 1275 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1276 ps_write_attrs, 1277 bytes_to_copy); 1278 set_feat_info->data_size += bytes_to_copy; 1279 1280 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1281 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1282 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1283 ct3d->patrol_scrub_attrs.scrub_cycle |= 1284 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1285 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1286 ct3d->patrol_scrub_attrs.scrub_flags |= 1287 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1288 } 1289 } else if (qemu_uuid_is_equal(&hdr->uuid, 1290 &ecs_uuid)) { 1291 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1292 return CXL_MBOX_UNSUPPORTED; 1293 } 1294 1295 ecs_set_feature = (void *)payload_in; 1296 ecs_write_attrs = ecs_set_feature->feat_data; 1297 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1298 ecs_write_attrs, 1299 bytes_to_copy); 1300 set_feat_info->data_size += bytes_to_copy; 1301 1302 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1303 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1304 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1305 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1306 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1307 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1308 } 1309 } 1310 } else { 1311 return CXL_MBOX_UNSUPPORTED; 1312 } 1313 1314 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1315 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1316 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1317 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1318 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1319 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1320 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1321 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1322 } 1323 set_feat_info->data_transfer_flag = 0; 1324 set_feat_info->data_saved_across_reset = false; 1325 set_feat_info->data_offset = 0; 1326 set_feat_info->data_size = 0; 1327 } 1328 1329 return CXL_MBOX_SUCCESS; 1330 } 1331 1332 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1333 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1334 uint8_t *payload_in, 1335 size_t len_in, 1336 uint8_t *payload_out, 1337 size_t *len_out, 1338 CXLCCI *cci) 1339 { 1340 struct { 1341 char fw_revision[0x10]; 1342 uint64_t total_capacity; 1343 uint64_t volatile_capacity; 1344 uint64_t persistent_capacity; 1345 uint64_t partition_align; 1346 uint16_t info_event_log_size; 1347 uint16_t warning_event_log_size; 1348 uint16_t failure_event_log_size; 1349 uint16_t fatal_event_log_size; 1350 uint32_t lsa_size; 1351 uint8_t poison_list_max_mer[3]; 1352 uint16_t inject_poison_limit; 1353 uint8_t poison_caps; 1354 uint8_t qos_telemetry_caps; 1355 uint16_t dc_event_log_size; 1356 } QEMU_PACKED *id; 1357 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1358 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1359 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1360 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1361 1362 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1363 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1364 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1365 return CXL_MBOX_INTERNAL_ERROR; 1366 } 1367 1368 id = (void *)payload_out; 1369 1370 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1371 1372 stq_le_p(&id->total_capacity, 1373 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1374 stq_le_p(&id->persistent_capacity, 1375 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1376 stq_le_p(&id->volatile_capacity, 1377 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1378 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1379 /* 256 poison records */ 1380 st24_le_p(id->poison_list_max_mer, 256); 1381 /* No limit - so limited by main poison record limit */ 1382 stw_le_p(&id->inject_poison_limit, 0); 1383 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1384 1385 *len_out = sizeof(*id); 1386 return CXL_MBOX_SUCCESS; 1387 } 1388 1389 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1390 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1391 uint8_t *payload_in, 1392 size_t len_in, 1393 uint8_t *payload_out, 1394 size_t *len_out, 1395 CXLCCI *cci) 1396 { 1397 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1398 struct { 1399 uint64_t active_vmem; 1400 uint64_t active_pmem; 1401 uint64_t next_vmem; 1402 uint64_t next_pmem; 1403 } QEMU_PACKED *part_info = (void *)payload_out; 1404 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1405 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1406 1407 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1408 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1409 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1410 return CXL_MBOX_INTERNAL_ERROR; 1411 } 1412 1413 stq_le_p(&part_info->active_vmem, 1414 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1415 /* 1416 * When both next_vmem and next_pmem are 0, there is no pending change to 1417 * partitioning. 1418 */ 1419 stq_le_p(&part_info->next_vmem, 0); 1420 stq_le_p(&part_info->active_pmem, 1421 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1422 stq_le_p(&part_info->next_pmem, 0); 1423 1424 *len_out = sizeof(*part_info); 1425 return CXL_MBOX_SUCCESS; 1426 } 1427 1428 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1429 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1430 uint8_t *payload_in, 1431 size_t len_in, 1432 uint8_t *payload_out, 1433 size_t *len_out, 1434 CXLCCI *cci) 1435 { 1436 struct { 1437 uint32_t offset; 1438 uint32_t length; 1439 } QEMU_PACKED *get_lsa; 1440 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1441 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1442 uint64_t offset, length; 1443 1444 get_lsa = (void *)payload_in; 1445 offset = get_lsa->offset; 1446 length = get_lsa->length; 1447 1448 if (offset + length > cvc->get_lsa_size(ct3d)) { 1449 *len_out = 0; 1450 return CXL_MBOX_INVALID_INPUT; 1451 } 1452 1453 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1454 return CXL_MBOX_SUCCESS; 1455 } 1456 1457 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1458 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1459 uint8_t *payload_in, 1460 size_t len_in, 1461 uint8_t *payload_out, 1462 size_t *len_out, 1463 CXLCCI *cci) 1464 { 1465 struct set_lsa_pl { 1466 uint32_t offset; 1467 uint32_t rsvd; 1468 uint8_t data[]; 1469 } QEMU_PACKED; 1470 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1471 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1472 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1473 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1474 1475 *len_out = 0; 1476 if (!len_in) { 1477 return CXL_MBOX_SUCCESS; 1478 } 1479 1480 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1481 return CXL_MBOX_INVALID_INPUT; 1482 } 1483 len_in -= hdr_len; 1484 1485 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1486 return CXL_MBOX_SUCCESS; 1487 } 1488 1489 /* Perform the actual device zeroing */ 1490 static void __do_sanitization(CXLType3Dev *ct3d) 1491 { 1492 MemoryRegion *mr; 1493 1494 if (ct3d->hostvmem) { 1495 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1496 if (mr) { 1497 void *hostmem = memory_region_get_ram_ptr(mr); 1498 memset(hostmem, 0, memory_region_size(mr)); 1499 } 1500 } 1501 1502 if (ct3d->hostpmem) { 1503 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1504 if (mr) { 1505 void *hostmem = memory_region_get_ram_ptr(mr); 1506 memset(hostmem, 0, memory_region_size(mr)); 1507 } 1508 } 1509 if (ct3d->lsa) { 1510 mr = host_memory_backend_get_memory(ct3d->lsa); 1511 if (mr) { 1512 void *lsa = memory_region_get_ram_ptr(mr); 1513 memset(lsa, 0, memory_region_size(mr)); 1514 } 1515 } 1516 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1517 } 1518 1519 /* 1520 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1521 * 1522 * Once the Sanitize command has started successfully, the device shall be 1523 * placed in the media disabled state. If the command fails or is interrupted 1524 * by a reset or power failure, it shall remain in the media disabled state 1525 * until a successful Sanitize command has been completed. During this state: 1526 * 1527 * 1. Memory writes to the device will have no effect, and all memory reads 1528 * will return random values (no user data returned, even for locations that 1529 * the failed Sanitize operation didn’t sanitize yet). 1530 * 1531 * 2. Mailbox commands shall still be processed in the disabled state, except 1532 * that commands that access Sanitized areas shall fail with the Media Disabled 1533 * error code. 1534 */ 1535 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1536 uint8_t *payload_in, 1537 size_t len_in, 1538 uint8_t *payload_out, 1539 size_t *len_out, 1540 CXLCCI *cci) 1541 { 1542 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1543 uint64_t total_mem; /* in Mb */ 1544 int secs; 1545 1546 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1547 if (total_mem <= 512) { 1548 secs = 4; 1549 } else if (total_mem <= 1024) { 1550 secs = 8; 1551 } else if (total_mem <= 2 * 1024) { 1552 secs = 15; 1553 } else if (total_mem <= 4 * 1024) { 1554 secs = 30; 1555 } else if (total_mem <= 8 * 1024) { 1556 secs = 60; 1557 } else if (total_mem <= 16 * 1024) { 1558 secs = 2 * 60; 1559 } else if (total_mem <= 32 * 1024) { 1560 secs = 4 * 60; 1561 } else if (total_mem <= 64 * 1024) { 1562 secs = 8 * 60; 1563 } else if (total_mem <= 128 * 1024) { 1564 secs = 15 * 60; 1565 } else if (total_mem <= 256 * 1024) { 1566 secs = 30 * 60; 1567 } else if (total_mem <= 512 * 1024) { 1568 secs = 60 * 60; 1569 } else if (total_mem <= 1024 * 1024) { 1570 secs = 120 * 60; 1571 } else { 1572 secs = 240 * 60; /* max 4 hrs */ 1573 } 1574 1575 /* EBUSY other bg cmds as of now */ 1576 cci->bg.runtime = secs * 1000UL; 1577 *len_out = 0; 1578 1579 cxl_dev_disable_media(&ct3d->cxl_dstate); 1580 1581 /* sanitize when done */ 1582 return CXL_MBOX_BG_STARTED; 1583 } 1584 1585 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 1586 uint8_t *payload_in, 1587 size_t len_in, 1588 uint8_t *payload_out, 1589 size_t *len_out, 1590 CXLCCI *cci) 1591 { 1592 uint32_t *state = (uint32_t *)payload_out; 1593 1594 *state = 0; 1595 *len_out = 4; 1596 return CXL_MBOX_SUCCESS; 1597 } 1598 1599 /* 1600 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 1601 * 1602 * This is very inefficient, but good enough for now! 1603 * Also the payload will always fit, so no need to handle the MORE flag and 1604 * make this stateful. We may want to allow longer poison lists to aid 1605 * testing that kernel functionality. 1606 */ 1607 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 1608 uint8_t *payload_in, 1609 size_t len_in, 1610 uint8_t *payload_out, 1611 size_t *len_out, 1612 CXLCCI *cci) 1613 { 1614 struct get_poison_list_pl { 1615 uint64_t pa; 1616 uint64_t length; 1617 } QEMU_PACKED; 1618 1619 struct get_poison_list_out_pl { 1620 uint8_t flags; 1621 uint8_t rsvd1; 1622 uint64_t overflow_timestamp; 1623 uint16_t count; 1624 uint8_t rsvd2[0x14]; 1625 struct { 1626 uint64_t addr; 1627 uint32_t length; 1628 uint32_t resv; 1629 } QEMU_PACKED records[]; 1630 } QEMU_PACKED; 1631 1632 struct get_poison_list_pl *in = (void *)payload_in; 1633 struct get_poison_list_out_pl *out = (void *)payload_out; 1634 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1635 uint16_t record_count = 0, i = 0; 1636 uint64_t query_start, query_length; 1637 CXLPoisonList *poison_list = &ct3d->poison_list; 1638 CXLPoison *ent; 1639 uint16_t out_pl_len; 1640 1641 query_start = ldq_le_p(&in->pa); 1642 /* 64 byte alignment required */ 1643 if (query_start & 0x3f) { 1644 return CXL_MBOX_INVALID_INPUT; 1645 } 1646 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1647 1648 QLIST_FOREACH(ent, poison_list, node) { 1649 /* Check for no overlap */ 1650 if (!ranges_overlap(ent->start, ent->length, 1651 query_start, query_length)) { 1652 continue; 1653 } 1654 record_count++; 1655 } 1656 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1657 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 1658 1659 QLIST_FOREACH(ent, poison_list, node) { 1660 uint64_t start, stop; 1661 1662 /* Check for no overlap */ 1663 if (!ranges_overlap(ent->start, ent->length, 1664 query_start, query_length)) { 1665 continue; 1666 } 1667 1668 /* Deal with overlap */ 1669 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 1670 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 1671 query_start + query_length); 1672 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 1673 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 1674 i++; 1675 } 1676 if (ct3d->poison_list_overflowed) { 1677 out->flags = (1 << 1); 1678 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 1679 } 1680 if (scan_media_running(cci)) { 1681 out->flags |= (1 << 2); 1682 } 1683 1684 stw_le_p(&out->count, record_count); 1685 *len_out = out_pl_len; 1686 return CXL_MBOX_SUCCESS; 1687 } 1688 1689 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 1690 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 1691 uint8_t *payload_in, 1692 size_t len_in, 1693 uint8_t *payload_out, 1694 size_t *len_out, 1695 CXLCCI *cci) 1696 { 1697 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1698 CXLPoisonList *poison_list = &ct3d->poison_list; 1699 CXLPoison *ent; 1700 struct inject_poison_pl { 1701 uint64_t dpa; 1702 }; 1703 struct inject_poison_pl *in = (void *)payload_in; 1704 uint64_t dpa = ldq_le_p(&in->dpa); 1705 CXLPoison *p; 1706 1707 QLIST_FOREACH(ent, poison_list, node) { 1708 if (dpa >= ent->start && 1709 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 1710 return CXL_MBOX_SUCCESS; 1711 } 1712 } 1713 /* 1714 * Freeze the list if there is an on-going scan media operation. 1715 */ 1716 if (scan_media_running(cci)) { 1717 /* 1718 * XXX: Spec is ambiguous - is this case considered 1719 * a successful return despite not adding to the list? 1720 */ 1721 goto success; 1722 } 1723 1724 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1725 return CXL_MBOX_INJECT_POISON_LIMIT; 1726 } 1727 p = g_new0(CXLPoison, 1); 1728 1729 p->length = CXL_CACHE_LINE_SIZE; 1730 p->start = dpa; 1731 p->type = CXL_POISON_TYPE_INJECTED; 1732 1733 /* 1734 * Possible todo: Merge with existing entry if next to it and if same type 1735 */ 1736 QLIST_INSERT_HEAD(poison_list, p, node); 1737 ct3d->poison_list_cnt++; 1738 success: 1739 *len_out = 0; 1740 1741 return CXL_MBOX_SUCCESS; 1742 } 1743 1744 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 1745 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 1746 uint8_t *payload_in, 1747 size_t len_in, 1748 uint8_t *payload_out, 1749 size_t *len_out, 1750 CXLCCI *cci) 1751 { 1752 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1753 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1754 CXLPoisonList *poison_list = &ct3d->poison_list; 1755 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1756 struct clear_poison_pl { 1757 uint64_t dpa; 1758 uint8_t data[64]; 1759 }; 1760 CXLPoison *ent; 1761 uint64_t dpa; 1762 1763 struct clear_poison_pl *in = (void *)payload_in; 1764 1765 dpa = ldq_le_p(&in->dpa); 1766 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 1767 ct3d->dc.total_capacity) { 1768 return CXL_MBOX_INVALID_PA; 1769 } 1770 1771 /* Clearing a region with no poison is not an error so always do so */ 1772 if (cvc->set_cacheline) { 1773 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 1774 return CXL_MBOX_INTERNAL_ERROR; 1775 } 1776 } 1777 1778 /* 1779 * Freeze the list if there is an on-going scan media operation. 1780 */ 1781 if (scan_media_running(cci)) { 1782 /* 1783 * XXX: Spec is ambiguous - is this case considered 1784 * a successful return despite not removing from the list? 1785 */ 1786 goto success; 1787 } 1788 1789 QLIST_FOREACH(ent, poison_list, node) { 1790 /* 1791 * Test for contained in entry. Simpler than general case 1792 * as clearing 64 bytes and entries 64 byte aligned 1793 */ 1794 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 1795 break; 1796 } 1797 } 1798 if (!ent) { 1799 goto success; 1800 } 1801 1802 QLIST_REMOVE(ent, node); 1803 ct3d->poison_list_cnt--; 1804 1805 if (dpa > ent->start) { 1806 CXLPoison *frag; 1807 /* Cannot overflow as replacing existing entry */ 1808 1809 frag = g_new0(CXLPoison, 1); 1810 1811 frag->start = ent->start; 1812 frag->length = dpa - ent->start; 1813 frag->type = ent->type; 1814 1815 QLIST_INSERT_HEAD(poison_list, frag, node); 1816 ct3d->poison_list_cnt++; 1817 } 1818 1819 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 1820 CXLPoison *frag; 1821 1822 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1823 cxl_set_poison_list_overflowed(ct3d); 1824 } else { 1825 frag = g_new0(CXLPoison, 1); 1826 1827 frag->start = dpa + CXL_CACHE_LINE_SIZE; 1828 frag->length = ent->start + ent->length - frag->start; 1829 frag->type = ent->type; 1830 QLIST_INSERT_HEAD(poison_list, frag, node); 1831 ct3d->poison_list_cnt++; 1832 } 1833 } 1834 /* Any fragments have been added, free original entry */ 1835 g_free(ent); 1836 success: 1837 *len_out = 0; 1838 1839 return CXL_MBOX_SUCCESS; 1840 } 1841 1842 /* 1843 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 1844 */ 1845 static CXLRetCode 1846 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 1847 uint8_t *payload_in, 1848 size_t len_in, 1849 uint8_t *payload_out, 1850 size_t *len_out, 1851 CXLCCI *cci) 1852 { 1853 struct get_scan_media_capabilities_pl { 1854 uint64_t pa; 1855 uint64_t length; 1856 } QEMU_PACKED; 1857 1858 struct get_scan_media_capabilities_out_pl { 1859 uint32_t estimated_runtime_ms; 1860 }; 1861 1862 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1863 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1864 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 1865 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 1866 uint64_t query_start; 1867 uint64_t query_length; 1868 1869 query_start = ldq_le_p(&in->pa); 1870 /* 64 byte alignment required */ 1871 if (query_start & 0x3f) { 1872 return CXL_MBOX_INVALID_INPUT; 1873 } 1874 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1875 1876 if (query_start + query_length > cxl_dstate->static_mem_size) { 1877 return CXL_MBOX_INVALID_PA; 1878 } 1879 1880 /* 1881 * Just use 400 nanosecond access/read latency + 100 ns for 1882 * the cost of updating the poison list. For small enough 1883 * chunks return at least 1 ms. 1884 */ 1885 stl_le_p(&out->estimated_runtime_ms, 1886 MAX(1, query_length * (0.0005L / 64))); 1887 1888 *len_out = sizeof(*out); 1889 return CXL_MBOX_SUCCESS; 1890 } 1891 1892 static void __do_scan_media(CXLType3Dev *ct3d) 1893 { 1894 CXLPoison *ent; 1895 unsigned int results_cnt = 0; 1896 1897 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 1898 results_cnt++; 1899 } 1900 1901 /* only scan media may clear the overflow */ 1902 if (ct3d->poison_list_overflowed && 1903 ct3d->poison_list_cnt == results_cnt) { 1904 cxl_clear_poison_list_overflowed(ct3d); 1905 } 1906 /* scan media has run since last conventional reset */ 1907 ct3d->scan_media_hasrun = true; 1908 } 1909 1910 /* 1911 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 1912 */ 1913 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 1914 uint8_t *payload_in, 1915 size_t len_in, 1916 uint8_t *payload_out, 1917 size_t *len_out, 1918 CXLCCI *cci) 1919 { 1920 struct scan_media_pl { 1921 uint64_t pa; 1922 uint64_t length; 1923 uint8_t flags; 1924 } QEMU_PACKED; 1925 1926 struct scan_media_pl *in = (void *)payload_in; 1927 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1928 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1929 uint64_t query_start; 1930 uint64_t query_length; 1931 CXLPoison *ent, *next; 1932 1933 query_start = ldq_le_p(&in->pa); 1934 /* 64 byte alignment required */ 1935 if (query_start & 0x3f) { 1936 return CXL_MBOX_INVALID_INPUT; 1937 } 1938 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1939 1940 if (query_start + query_length > cxl_dstate->static_mem_size) { 1941 return CXL_MBOX_INVALID_PA; 1942 } 1943 if (ct3d->dc.num_regions && query_start + query_length >= 1944 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 1945 return CXL_MBOX_INVALID_PA; 1946 } 1947 1948 if (in->flags == 0) { /* TODO */ 1949 qemu_log_mask(LOG_UNIMP, 1950 "Scan Media Event Log is unsupported\n"); 1951 } 1952 1953 /* any previous results are discarded upon a new Scan Media */ 1954 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 1955 QLIST_REMOVE(ent, node); 1956 g_free(ent); 1957 } 1958 1959 /* kill the poison list - it will be recreated */ 1960 if (ct3d->poison_list_overflowed) { 1961 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 1962 QLIST_REMOVE(ent, node); 1963 g_free(ent); 1964 ct3d->poison_list_cnt--; 1965 } 1966 } 1967 1968 /* 1969 * Scan the backup list and move corresponding entries 1970 * into the results list, updating the poison list 1971 * when possible. 1972 */ 1973 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 1974 CXLPoison *res; 1975 1976 if (ent->start >= query_start + query_length || 1977 ent->start + ent->length <= query_start) { 1978 continue; 1979 } 1980 1981 /* 1982 * If a Get Poison List cmd comes in while this 1983 * scan is being done, it will see the new complete 1984 * list, while setting the respective flag. 1985 */ 1986 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 1987 CXLPoison *p = g_new0(CXLPoison, 1); 1988 1989 p->start = ent->start; 1990 p->length = ent->length; 1991 p->type = ent->type; 1992 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 1993 ct3d->poison_list_cnt++; 1994 } 1995 1996 res = g_new0(CXLPoison, 1); 1997 res->start = ent->start; 1998 res->length = ent->length; 1999 res->type = ent->type; 2000 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2001 2002 QLIST_REMOVE(ent, node); 2003 g_free(ent); 2004 } 2005 2006 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2007 *len_out = 0; 2008 2009 return CXL_MBOX_BG_STARTED; 2010 } 2011 2012 /* 2013 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2014 */ 2015 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2016 uint8_t *payload_in, 2017 size_t len_in, 2018 uint8_t *payload_out, 2019 size_t *len_out, 2020 CXLCCI *cci) 2021 { 2022 struct get_scan_media_results_out_pl { 2023 uint64_t dpa_restart; 2024 uint64_t length; 2025 uint8_t flags; 2026 uint8_t rsvd1; 2027 uint16_t count; 2028 uint8_t rsvd2[0xc]; 2029 struct { 2030 uint64_t addr; 2031 uint32_t length; 2032 uint32_t resv; 2033 } QEMU_PACKED records[]; 2034 } QEMU_PACKED; 2035 2036 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2037 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2038 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2039 CXLPoison *ent, *next; 2040 uint16_t total_count = 0, record_count = 0, i = 0; 2041 uint16_t out_pl_len; 2042 2043 if (!ct3d->scan_media_hasrun) { 2044 return CXL_MBOX_UNSUPPORTED; 2045 } 2046 2047 /* 2048 * Calculate limits, all entries are within the same address range of the 2049 * last scan media call. 2050 */ 2051 QLIST_FOREACH(ent, scan_media_results, node) { 2052 size_t rec_size = record_count * sizeof(out->records[0]); 2053 2054 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2055 record_count++; 2056 } 2057 total_count++; 2058 } 2059 2060 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2061 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2062 2063 memset(out, 0, out_pl_len); 2064 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2065 uint64_t start, stop; 2066 2067 if (i == record_count) { 2068 break; 2069 } 2070 2071 start = ROUND_DOWN(ent->start, 64ull); 2072 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2073 stq_le_p(&out->records[i].addr, start); 2074 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2075 i++; 2076 2077 /* consume the returning entry */ 2078 QLIST_REMOVE(ent, node); 2079 g_free(ent); 2080 } 2081 2082 stw_le_p(&out->count, record_count); 2083 if (total_count > record_count) { 2084 out->flags = (1 << 0); /* More Media Error Records */ 2085 } 2086 2087 *len_out = out_pl_len; 2088 return CXL_MBOX_SUCCESS; 2089 } 2090 2091 /* 2092 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2093 * (Opcode: 4800h) 2094 */ 2095 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2096 uint8_t *payload_in, 2097 size_t len_in, 2098 uint8_t *payload_out, 2099 size_t *len_out, 2100 CXLCCI *cci) 2101 { 2102 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2103 struct { 2104 uint8_t region_cnt; 2105 uint8_t start_rid; 2106 } QEMU_PACKED *in = (void *)payload_in; 2107 struct { 2108 uint8_t num_regions; 2109 uint8_t regions_returned; 2110 uint8_t rsvd1[6]; 2111 struct { 2112 uint64_t base; 2113 uint64_t decode_len; 2114 uint64_t region_len; 2115 uint64_t block_size; 2116 uint32_t dsmadhandle; 2117 uint8_t flags; 2118 uint8_t rsvd2[3]; 2119 } QEMU_PACKED records[]; 2120 } QEMU_PACKED *out = (void *)payload_out; 2121 struct { 2122 uint32_t num_extents_supported; 2123 uint32_t num_extents_available; 2124 uint32_t num_tags_supported; 2125 uint32_t num_tags_available; 2126 } QEMU_PACKED *extra_out; 2127 uint16_t record_count; 2128 uint16_t i; 2129 uint16_t out_pl_len; 2130 uint8_t start_rid; 2131 2132 start_rid = in->start_rid; 2133 if (start_rid >= ct3d->dc.num_regions) { 2134 return CXL_MBOX_INVALID_INPUT; 2135 } 2136 2137 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2138 2139 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2140 extra_out = (void *)(payload_out + out_pl_len); 2141 out_pl_len += sizeof(*extra_out); 2142 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2143 2144 out->num_regions = ct3d->dc.num_regions; 2145 out->regions_returned = record_count; 2146 for (i = 0; i < record_count; i++) { 2147 stq_le_p(&out->records[i].base, 2148 ct3d->dc.regions[start_rid + i].base); 2149 stq_le_p(&out->records[i].decode_len, 2150 ct3d->dc.regions[start_rid + i].decode_len / 2151 CXL_CAPACITY_MULTIPLIER); 2152 stq_le_p(&out->records[i].region_len, 2153 ct3d->dc.regions[start_rid + i].len); 2154 stq_le_p(&out->records[i].block_size, 2155 ct3d->dc.regions[start_rid + i].block_size); 2156 stl_le_p(&out->records[i].dsmadhandle, 2157 ct3d->dc.regions[start_rid + i].dsmadhandle); 2158 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2159 } 2160 /* 2161 * TODO: Assign values once extents and tags are introduced 2162 * to use. 2163 */ 2164 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2165 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2166 ct3d->dc.total_extent_count); 2167 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2168 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2169 2170 *len_out = out_pl_len; 2171 return CXL_MBOX_SUCCESS; 2172 } 2173 2174 /* 2175 * CXL r3.1 section 8.2.9.9.9.2: 2176 * Get Dynamic Capacity Extent List (Opcode 4801h) 2177 */ 2178 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2179 uint8_t *payload_in, 2180 size_t len_in, 2181 uint8_t *payload_out, 2182 size_t *len_out, 2183 CXLCCI *cci) 2184 { 2185 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2186 struct { 2187 uint32_t extent_cnt; 2188 uint32_t start_extent_id; 2189 } QEMU_PACKED *in = (void *)payload_in; 2190 struct { 2191 uint32_t count; 2192 uint32_t total_extents; 2193 uint32_t generation_num; 2194 uint8_t rsvd[4]; 2195 CXLDCExtentRaw records[]; 2196 } QEMU_PACKED *out = (void *)payload_out; 2197 uint32_t start_extent_id = in->start_extent_id; 2198 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2199 uint16_t record_count = 0, i = 0, record_done = 0; 2200 uint16_t out_pl_len, size; 2201 CXLDCExtent *ent; 2202 2203 if (start_extent_id > ct3d->dc.total_extent_count) { 2204 return CXL_MBOX_INVALID_INPUT; 2205 } 2206 2207 record_count = MIN(in->extent_cnt, 2208 ct3d->dc.total_extent_count - start_extent_id); 2209 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2210 record_count = MIN(record_count, size / sizeof(out->records[0])); 2211 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2212 2213 stl_le_p(&out->count, record_count); 2214 stl_le_p(&out->total_extents, ct3d->dc.total_extent_count); 2215 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2216 2217 if (record_count > 0) { 2218 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2219 2220 QTAILQ_FOREACH(ent, extent_list, node) { 2221 if (i++ < start_extent_id) { 2222 continue; 2223 } 2224 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2225 stq_le_p(&out_rec->len, ent->len); 2226 memcpy(&out_rec->tag, ent->tag, 0x10); 2227 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2228 2229 record_done++; 2230 if (record_done == record_count) { 2231 break; 2232 } 2233 } 2234 } 2235 2236 *len_out = out_pl_len; 2237 return CXL_MBOX_SUCCESS; 2238 } 2239 2240 /* 2241 * Check whether any bit between addr[nr, nr+size) is set, 2242 * return true if any bit is set, otherwise return false 2243 */ 2244 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2245 unsigned long size) 2246 { 2247 unsigned long res = find_next_bit(addr, size + nr, nr); 2248 2249 return res < nr + size; 2250 } 2251 2252 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2253 { 2254 int i; 2255 CXLDCRegion *region = &ct3d->dc.regions[0]; 2256 2257 if (dpa < region->base || 2258 dpa >= region->base + ct3d->dc.total_capacity) { 2259 return NULL; 2260 } 2261 2262 /* 2263 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2264 * 2265 * Regions are used in increasing-DPA order, with Region 0 being used for 2266 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2267 * So check from the last region to find where the dpa belongs. Extents that 2268 * cross multiple regions are not allowed. 2269 */ 2270 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2271 region = &ct3d->dc.regions[i]; 2272 if (dpa >= region->base) { 2273 if (dpa + len > region->base + region->len) { 2274 return NULL; 2275 } 2276 return region; 2277 } 2278 } 2279 2280 return NULL; 2281 } 2282 2283 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2284 uint64_t dpa, 2285 uint64_t len, 2286 uint8_t *tag, 2287 uint16_t shared_seq) 2288 { 2289 CXLDCExtent *extent; 2290 2291 extent = g_new0(CXLDCExtent, 1); 2292 extent->start_dpa = dpa; 2293 extent->len = len; 2294 if (tag) { 2295 memcpy(extent->tag, tag, 0x10); 2296 } 2297 extent->shared_seq = shared_seq; 2298 2299 QTAILQ_INSERT_TAIL(list, extent, node); 2300 } 2301 2302 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2303 CXLDCExtent *extent) 2304 { 2305 QTAILQ_REMOVE(list, extent, node); 2306 g_free(extent); 2307 } 2308 2309 /* 2310 * Add a new extent to the extent "group" if group exists; 2311 * otherwise, create a new group 2312 * Return value: the extent group where the extent is inserted. 2313 */ 2314 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2315 uint64_t dpa, 2316 uint64_t len, 2317 uint8_t *tag, 2318 uint16_t shared_seq) 2319 { 2320 if (!group) { 2321 group = g_new0(CXLDCExtentGroup, 1); 2322 QTAILQ_INIT(&group->list); 2323 } 2324 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2325 tag, shared_seq); 2326 return group; 2327 } 2328 2329 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2330 CXLDCExtentGroup *group) 2331 { 2332 QTAILQ_INSERT_TAIL(list, group, node); 2333 } 2334 2335 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2336 { 2337 CXLDCExtent *ent, *ent_next; 2338 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2339 2340 QTAILQ_REMOVE(list, group, node); 2341 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2342 cxl_remove_extent_from_extent_list(&group->list, ent); 2343 } 2344 g_free(group); 2345 } 2346 2347 /* 2348 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2349 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2350 */ 2351 typedef struct CXLUpdateDCExtentListInPl { 2352 uint32_t num_entries_updated; 2353 uint8_t flags; 2354 uint8_t rsvd[3]; 2355 /* CXL r3.1 Table 8-169: Updated Extent */ 2356 struct { 2357 uint64_t start_dpa; 2358 uint64_t len; 2359 uint8_t rsvd[8]; 2360 } QEMU_PACKED updated_entries[]; 2361 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2362 2363 /* 2364 * For the extents in the extent list to operate, check whether they are valid 2365 * 1. The extent should be in the range of a valid DC region; 2366 * 2. The extent should not cross multiple regions; 2367 * 3. The start DPA and the length of the extent should align with the block 2368 * size of the region; 2369 * 4. The address range of multiple extents in the list should not overlap. 2370 */ 2371 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2372 const CXLUpdateDCExtentListInPl *in) 2373 { 2374 uint64_t min_block_size = UINT64_MAX; 2375 CXLDCRegion *region; 2376 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2377 g_autofree unsigned long *blk_bitmap = NULL; 2378 uint64_t dpa, len; 2379 uint32_t i; 2380 2381 for (i = 0; i < ct3d->dc.num_regions; i++) { 2382 region = &ct3d->dc.regions[i]; 2383 min_block_size = MIN(min_block_size, region->block_size); 2384 } 2385 2386 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2387 ct3d->dc.regions[0].base) / min_block_size); 2388 2389 for (i = 0; i < in->num_entries_updated; i++) { 2390 dpa = in->updated_entries[i].start_dpa; 2391 len = in->updated_entries[i].len; 2392 2393 region = cxl_find_dc_region(ct3d, dpa, len); 2394 if (!region) { 2395 return CXL_MBOX_INVALID_PA; 2396 } 2397 2398 dpa -= ct3d->dc.regions[0].base; 2399 if (dpa % region->block_size || len % region->block_size) { 2400 return CXL_MBOX_INVALID_EXTENT_LIST; 2401 } 2402 /* the dpa range already covered by some other extents in the list */ 2403 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2404 len / min_block_size)) { 2405 return CXL_MBOX_INVALID_EXTENT_LIST; 2406 } 2407 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2408 } 2409 2410 return CXL_MBOX_SUCCESS; 2411 } 2412 2413 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2414 const CXLUpdateDCExtentListInPl *in) 2415 { 2416 uint32_t i; 2417 CXLDCExtent *ent; 2418 CXLDCExtentGroup *ext_group; 2419 uint64_t dpa, len; 2420 Range range1, range2; 2421 2422 for (i = 0; i < in->num_entries_updated; i++) { 2423 dpa = in->updated_entries[i].start_dpa; 2424 len = in->updated_entries[i].len; 2425 2426 range_init_nofail(&range1, dpa, len); 2427 2428 /* 2429 * The host-accepted DPA range must be contained by the first extent 2430 * group in the pending list 2431 */ 2432 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2433 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2434 return CXL_MBOX_INVALID_PA; 2435 } 2436 2437 /* to-be-added range should not overlap with range already accepted */ 2438 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 2439 range_init_nofail(&range2, ent->start_dpa, ent->len); 2440 if (range_overlaps_range(&range1, &range2)) { 2441 return CXL_MBOX_INVALID_PA; 2442 } 2443 } 2444 } 2445 return CXL_MBOX_SUCCESS; 2446 } 2447 2448 /* 2449 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 2450 * An extent is added to the extent list and becomes usable only after the 2451 * response is processed successfully. 2452 */ 2453 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 2454 uint8_t *payload_in, 2455 size_t len_in, 2456 uint8_t *payload_out, 2457 size_t *len_out, 2458 CXLCCI *cci) 2459 { 2460 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 2461 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2462 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2463 uint32_t i; 2464 uint64_t dpa, len; 2465 CXLRetCode ret; 2466 2467 if (in->num_entries_updated == 0) { 2468 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 2469 return CXL_MBOX_SUCCESS; 2470 } 2471 2472 /* Adding extents causes exceeding device's extent tracking ability. */ 2473 if (in->num_entries_updated + ct3d->dc.total_extent_count > 2474 CXL_NUM_EXTENTS_SUPPORTED) { 2475 return CXL_MBOX_RESOURCES_EXHAUSTED; 2476 } 2477 2478 ret = cxl_detect_malformed_extent_list(ct3d, in); 2479 if (ret != CXL_MBOX_SUCCESS) { 2480 return ret; 2481 } 2482 2483 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 2484 if (ret != CXL_MBOX_SUCCESS) { 2485 return ret; 2486 } 2487 2488 for (i = 0; i < in->num_entries_updated; i++) { 2489 dpa = in->updated_entries[i].start_dpa; 2490 len = in->updated_entries[i].len; 2491 2492 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 2493 ct3d->dc.total_extent_count += 1; 2494 ct3_set_region_block_backed(ct3d, dpa, len); 2495 } 2496 /* Remove the first extent group in the pending list */ 2497 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 2498 2499 return CXL_MBOX_SUCCESS; 2500 } 2501 2502 /* 2503 * Copy extent list from src to dst 2504 * Return value: number of extents copied 2505 */ 2506 static uint32_t copy_extent_list(CXLDCExtentList *dst, 2507 const CXLDCExtentList *src) 2508 { 2509 uint32_t cnt = 0; 2510 CXLDCExtent *ent; 2511 2512 if (!dst || !src) { 2513 return 0; 2514 } 2515 2516 QTAILQ_FOREACH(ent, src, node) { 2517 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 2518 ent->tag, ent->shared_seq); 2519 cnt++; 2520 } 2521 return cnt; 2522 } 2523 2524 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 2525 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 2526 uint32_t *updated_list_size) 2527 { 2528 CXLDCExtent *ent, *ent_next; 2529 uint64_t dpa, len; 2530 uint32_t i; 2531 int cnt_delta = 0; 2532 CXLRetCode ret = CXL_MBOX_SUCCESS; 2533 2534 QTAILQ_INIT(updated_list); 2535 copy_extent_list(updated_list, &ct3d->dc.extents); 2536 2537 for (i = 0; i < in->num_entries_updated; i++) { 2538 Range range; 2539 2540 dpa = in->updated_entries[i].start_dpa; 2541 len = in->updated_entries[i].len; 2542 2543 /* Check if the DPA range is not fully backed with valid extents */ 2544 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 2545 ret = CXL_MBOX_INVALID_PA; 2546 goto free_and_exit; 2547 } 2548 2549 /* After this point, extent overflow is the only error can happen */ 2550 while (len > 0) { 2551 QTAILQ_FOREACH(ent, updated_list, node) { 2552 range_init_nofail(&range, ent->start_dpa, ent->len); 2553 2554 if (range_contains(&range, dpa)) { 2555 uint64_t len1, len2 = 0, len_done = 0; 2556 uint64_t ent_start_dpa = ent->start_dpa; 2557 uint64_t ent_len = ent->len; 2558 2559 len1 = dpa - ent->start_dpa; 2560 /* Found the extent or the subset of an existing extent */ 2561 if (range_contains(&range, dpa + len - 1)) { 2562 len2 = ent_start_dpa + ent_len - dpa - len; 2563 } else { 2564 dpa = ent_start_dpa + ent_len; 2565 } 2566 len_done = ent_len - len1 - len2; 2567 2568 cxl_remove_extent_from_extent_list(updated_list, ent); 2569 cnt_delta--; 2570 2571 if (len1) { 2572 cxl_insert_extent_to_extent_list(updated_list, 2573 ent_start_dpa, 2574 len1, NULL, 0); 2575 cnt_delta++; 2576 } 2577 if (len2) { 2578 cxl_insert_extent_to_extent_list(updated_list, 2579 dpa + len, 2580 len2, NULL, 0); 2581 cnt_delta++; 2582 } 2583 2584 if (cnt_delta + ct3d->dc.total_extent_count > 2585 CXL_NUM_EXTENTS_SUPPORTED) { 2586 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 2587 goto free_and_exit; 2588 } 2589 2590 len -= len_done; 2591 break; 2592 } 2593 } 2594 } 2595 } 2596 free_and_exit: 2597 if (ret != CXL_MBOX_SUCCESS) { 2598 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 2599 cxl_remove_extent_from_extent_list(updated_list, ent); 2600 } 2601 *updated_list_size = 0; 2602 } else { 2603 *updated_list_size = ct3d->dc.total_extent_count + cnt_delta; 2604 } 2605 2606 return ret; 2607 } 2608 2609 /* 2610 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 2611 */ 2612 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 2613 uint8_t *payload_in, 2614 size_t len_in, 2615 uint8_t *payload_out, 2616 size_t *len_out, 2617 CXLCCI *cci) 2618 { 2619 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 2620 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2621 CXLDCExtentList updated_list; 2622 CXLDCExtent *ent, *ent_next; 2623 uint32_t updated_list_size; 2624 CXLRetCode ret; 2625 2626 if (in->num_entries_updated == 0) { 2627 return CXL_MBOX_INVALID_INPUT; 2628 } 2629 2630 ret = cxl_detect_malformed_extent_list(ct3d, in); 2631 if (ret != CXL_MBOX_SUCCESS) { 2632 return ret; 2633 } 2634 2635 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 2636 &updated_list_size); 2637 if (ret != CXL_MBOX_SUCCESS) { 2638 return ret; 2639 } 2640 2641 /* 2642 * If the dry run release passes, the returned updated_list will 2643 * be the updated extent list and we just need to clear the extents 2644 * in the accepted list and copy extents in the updated_list to accepted 2645 * list and update the extent count; 2646 */ 2647 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 2648 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 2649 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 2650 } 2651 copy_extent_list(&ct3d->dc.extents, &updated_list); 2652 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 2653 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 2654 cxl_remove_extent_from_extent_list(&updated_list, ent); 2655 } 2656 ct3d->dc.total_extent_count = updated_list_size; 2657 2658 return CXL_MBOX_SUCCESS; 2659 } 2660 2661 static const struct cxl_cmd cxl_cmd_set[256][256] = { 2662 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 2663 cmd_events_get_records, 1, 0 }, 2664 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 2665 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 2666 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 2667 cmd_events_get_interrupt_policy, 0, 0 }, 2668 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 2669 cmd_events_set_interrupt_policy, 2670 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 2671 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 2672 cmd_firmware_update_get_info, 0, 0 }, 2673 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 2674 cmd_firmware_update_transfer, ~0, CXL_MBOX_BACKGROUND_OPERATION }, 2675 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 2676 cmd_firmware_update_activate, 2, CXL_MBOX_BACKGROUND_OPERATION }, 2677 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2678 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 2679 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 2680 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 2681 0, 0 }, 2682 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2683 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 2684 cmd_features_get_supported, 0x8, 0 }, 2685 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 2686 cmd_features_get_feature, 0x15, 0 }, 2687 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 2688 cmd_features_set_feature, 2689 ~0, 2690 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 2691 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 2692 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 2693 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 2694 CXL_MBOX_SECURITY_STATE_CHANGE)}, 2695 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 2696 cmd_identify_memory_device, 0, 0 }, 2697 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 2698 cmd_ccls_get_partition_info, 0, 0 }, 2699 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 2700 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 2701 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2702 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 2703 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 2704 CXL_MBOX_SECURITY_STATE_CHANGE | 2705 CXL_MBOX_BACKGROUND_OPERATION)}, 2706 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 2707 cmd_get_security_state, 0, 0 }, 2708 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 2709 cmd_media_get_poison_list, 16, 0 }, 2710 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 2711 cmd_media_inject_poison, 8, 0 }, 2712 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 2713 cmd_media_clear_poison, 72, 0 }, 2714 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 2715 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 2716 cmd_media_get_scan_media_capabilities, 16, 0 }, 2717 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 2718 cmd_media_scan_media, 17, CXL_MBOX_BACKGROUND_OPERATION }, 2719 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 2720 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 2721 cmd_media_get_scan_media_results, 0, 0 }, 2722 }; 2723 2724 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 2725 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 2726 cmd_dcd_get_dyn_cap_config, 2, 0 }, 2727 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 2728 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 2729 8, 0 }, 2730 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 2731 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 2732 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2733 [DCD_CONFIG][RELEASE_DYN_CAP] = { 2734 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 2735 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 2736 }; 2737 2738 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 2739 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 2740 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 2741 cmd_infostat_bg_op_sts, 0, 0 }, 2742 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2743 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 2744 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 2745 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 2746 0 }, 2747 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2748 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 2749 cmd_identify_switch_device, 0, 0 }, 2750 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 2751 cmd_get_physical_port_state, ~0, 0 }, 2752 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 2753 cmd_tunnel_management_cmd, ~0, 0 }, 2754 }; 2755 2756 /* 2757 * While the command is executing in the background, the device should 2758 * update the percentage complete in the Background Command Status Register 2759 * at least once per second. 2760 */ 2761 2762 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 2763 2764 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 2765 size_t len_in, uint8_t *pl_in, size_t *len_out, 2766 uint8_t *pl_out, bool *bg_started) 2767 { 2768 int ret; 2769 const struct cxl_cmd *cxl_cmd; 2770 opcode_handler h; 2771 CXLDeviceState *cxl_dstate; 2772 2773 *len_out = 0; 2774 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 2775 h = cxl_cmd->handler; 2776 if (!h) { 2777 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 2778 set << 8 | cmd); 2779 return CXL_MBOX_UNSUPPORTED; 2780 } 2781 2782 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 2783 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2784 } 2785 2786 /* Only one bg command at a time */ 2787 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 2788 cci->bg.runtime > 0) { 2789 return CXL_MBOX_BUSY; 2790 } 2791 2792 /* forbid any selected commands while the media is disabled */ 2793 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 2794 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 2795 2796 if (cxl_dev_media_disabled(cxl_dstate)) { 2797 if (h == cmd_events_get_records || 2798 h == cmd_ccls_get_partition_info || 2799 h == cmd_ccls_set_lsa || 2800 h == cmd_ccls_get_lsa || 2801 h == cmd_logs_get_log || 2802 h == cmd_media_get_poison_list || 2803 h == cmd_media_inject_poison || 2804 h == cmd_media_clear_poison || 2805 h == cmd_sanitize_overwrite || 2806 h == cmd_firmware_update_transfer || 2807 h == cmd_firmware_update_activate) { 2808 return CXL_MBOX_MEDIA_DISABLED; 2809 } 2810 } 2811 } 2812 2813 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 2814 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 2815 ret == CXL_MBOX_BG_STARTED) { 2816 *bg_started = true; 2817 } else { 2818 *bg_started = false; 2819 } 2820 2821 /* Set bg and the return code */ 2822 if (*bg_started) { 2823 uint64_t now; 2824 2825 cci->bg.opcode = (set << 8) | cmd; 2826 2827 cci->bg.complete_pct = 0; 2828 cci->bg.ret_code = 0; 2829 2830 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 2831 cci->bg.starttime = now; 2832 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 2833 } 2834 2835 return ret; 2836 } 2837 2838 static void bg_timercb(void *opaque) 2839 { 2840 CXLCCI *cci = opaque; 2841 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 2842 uint64_t total_time = cci->bg.starttime + cci->bg.runtime; 2843 2844 assert(cci->bg.runtime > 0); 2845 2846 if (now >= total_time) { /* we are done */ 2847 uint16_t ret = CXL_MBOX_SUCCESS; 2848 2849 cci->bg.complete_pct = 100; 2850 cci->bg.ret_code = ret; 2851 switch (cci->bg.opcode) { 2852 case 0x0201: /* fw transfer */ 2853 __do_firmware_xfer(cci); 2854 break; 2855 case 0x4400: /* sanitize */ 2856 { 2857 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2858 2859 __do_sanitization(ct3d); 2860 cxl_dev_enable_media(&ct3d->cxl_dstate); 2861 } 2862 break; 2863 case 0x4304: /* scan media */ 2864 { 2865 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2866 2867 __do_scan_media(ct3d); 2868 break; 2869 } 2870 default: 2871 __builtin_unreachable(); 2872 break; 2873 } 2874 } else { 2875 /* estimate only */ 2876 cci->bg.complete_pct = 2877 100 * (now - cci->bg.starttime) / cci->bg.runtime; 2878 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 2879 } 2880 2881 if (cci->bg.complete_pct == 100) { 2882 /* TODO: generalize to switch CCI */ 2883 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2884 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2885 PCIDevice *pdev = PCI_DEVICE(cci->d); 2886 2887 cci->bg.starttime = 0; 2888 /* registers are updated, allow new bg-capable cmds */ 2889 cci->bg.runtime = 0; 2890 2891 if (msix_enabled(pdev)) { 2892 msix_notify(pdev, cxl_dstate->mbox_msi_n); 2893 } else if (msi_enabled(pdev)) { 2894 msi_notify(pdev, cxl_dstate->mbox_msi_n); 2895 } 2896 } 2897 } 2898 2899 static void cxl_rebuild_cel(CXLCCI *cci) 2900 { 2901 cci->cel_size = 0; /* Reset for a fresh build */ 2902 for (int set = 0; set < 256; set++) { 2903 for (int cmd = 0; cmd < 256; cmd++) { 2904 if (cci->cxl_cmd_set[set][cmd].handler) { 2905 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 2906 struct cel_log *log = 2907 &cci->cel_log[cci->cel_size]; 2908 2909 log->opcode = (set << 8) | cmd; 2910 log->effect = c->effect; 2911 cci->cel_size++; 2912 } 2913 } 2914 } 2915 } 2916 2917 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 2918 { 2919 cci->payload_max = payload_max; 2920 cxl_rebuild_cel(cci); 2921 2922 cci->bg.complete_pct = 0; 2923 cci->bg.starttime = 0; 2924 cci->bg.runtime = 0; 2925 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 2926 bg_timercb, cci); 2927 2928 memset(&cci->fw, 0, sizeof(cci->fw)); 2929 cci->fw.active_slot = 1; 2930 cci->fw.slot[cci->fw.active_slot - 1] = true; 2931 } 2932 2933 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 2934 { 2935 for (int set = 0; set < 256; set++) { 2936 for (int cmd = 0; cmd < 256; cmd++) { 2937 if (cxl_cmds[set][cmd].handler) { 2938 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 2939 } 2940 } 2941 } 2942 } 2943 2944 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 2945 size_t payload_max) 2946 { 2947 cci->payload_max = MAX(payload_max, cci->payload_max); 2948 cxl_copy_cci_commands(cci, cxl_cmd_set); 2949 cxl_rebuild_cel(cci); 2950 } 2951 2952 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 2953 DeviceState *d, size_t payload_max) 2954 { 2955 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 2956 cci->d = d; 2957 cci->intf = intf; 2958 cxl_init_cci(cci, payload_max); 2959 } 2960 2961 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 2962 { 2963 CXLType3Dev *ct3d = CXL_TYPE3(d); 2964 2965 cxl_copy_cci_commands(cci, cxl_cmd_set); 2966 if (ct3d->dc.num_regions) { 2967 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 2968 } 2969 cci->d = d; 2970 2971 /* No separation for PCI MB as protocol handled in PCI device */ 2972 cci->intf = d; 2973 cxl_init_cci(cci, payload_max); 2974 } 2975 2976 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 2977 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 2978 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 2979 0 }, 2980 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2981 }; 2982 2983 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 2984 size_t payload_max) 2985 { 2986 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 2987 cci->d = d; 2988 cci->intf = intf; 2989 cxl_init_cci(cci, payload_max); 2990 } 2991 2992 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 2993 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 2994 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 2995 0 }, 2996 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2997 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2998 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 2999 cmd_tunnel_management_cmd, ~0, 0 }, 3000 }; 3001 3002 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 3003 DeviceState *intf, 3004 size_t payload_max) 3005 { 3006 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 3007 cci->d = d; 3008 cci->intf = intf; 3009 cxl_init_cci(cci, payload_max); 3010 } 3011