1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include <math.h> 11 12 #include "qemu/osdep.h" 13 #include "hw/pci/msi.h" 14 #include "hw/pci/msix.h" 15 #include "hw/cxl/cxl.h" 16 #include "hw/cxl/cxl_events.h" 17 #include "hw/cxl/cxl_mailbox.h" 18 #include "hw/pci/pci.h" 19 #include "hw/pci-bridge/cxl_upstream_port.h" 20 #include "qemu/cutils.h" 21 #include "qemu/host-utils.h" 22 #include "qemu/log.h" 23 #include "qemu/units.h" 24 #include "qemu/uuid.h" 25 #include "system/hostmem.h" 26 #include "qemu/range.h" 27 #include "qapi/qapi-types-cxl.h" 28 29 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 30 #define CXL_DC_EVENT_LOG_SIZE 8 31 #define CXL_NUM_TAGS_SUPPORTED 0 32 #define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0) 33 #define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1) 34 #define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2) 35 #define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3) 36 #define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4) 37 38 /* 39 * How to add a new command, example. The command set FOO, with cmd BAR. 40 * 1. Add the command set and cmd to the enum. 41 * FOO = 0x7f, 42 * #define BAR 0 43 * 2. Implement the handler 44 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 45 * CXLDeviceState *cxl_dstate, uint16_t *len) 46 * 3. Add the command to the cxl_cmd_set[][] 47 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 48 * 4. Implement your handler 49 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 50 * 51 * 52 * Writing the handler: 53 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 54 * in/out length of the payload. The handler is responsible for consuming the 55 * payload from cmd->payload and operating upon it as necessary. It must then 56 * fill the output data into cmd->payload (overwriting what was there), 57 * setting the length, and returning a valid return code. 58 * 59 * XXX: The handler need not worry about endianness. The payload is read out of 60 * a register interface that already deals with it. 61 */ 62 63 enum { 64 INFOSTAT = 0x00, 65 #define IS_IDENTIFY 0x1 66 #define BACKGROUND_OPERATION_STATUS 0x2 67 #define GET_RESPONSE_MSG_LIMIT 0x3 68 #define SET_RESPONSE_MSG_LIMIT 0x4 69 #define BACKGROUND_OPERATION_ABORT 0x5 70 EVENTS = 0x01, 71 #define GET_RECORDS 0x0 72 #define CLEAR_RECORDS 0x1 73 #define GET_INTERRUPT_POLICY 0x2 74 #define SET_INTERRUPT_POLICY 0x3 75 FIRMWARE_UPDATE = 0x02, 76 #define GET_INFO 0x0 77 #define TRANSFER 0x1 78 #define ACTIVATE 0x2 79 TIMESTAMP = 0x03, 80 #define GET 0x0 81 #define SET 0x1 82 LOGS = 0x04, 83 #define GET_SUPPORTED 0x0 84 #define GET_LOG 0x1 85 FEATURES = 0x05, 86 #define GET_SUPPORTED 0x0 87 #define GET_FEATURE 0x1 88 #define SET_FEATURE 0x2 89 IDENTIFY = 0x40, 90 #define MEMORY_DEVICE 0x0 91 CCLS = 0x41, 92 #define GET_PARTITION_INFO 0x0 93 #define GET_LSA 0x2 94 #define SET_LSA 0x3 95 HEALTH_INFO_ALERTS = 0x42, 96 #define GET_ALERT_CONFIG 0x1 97 #define SET_ALERT_CONFIG 0x2 98 SANITIZE = 0x44, 99 #define OVERWRITE 0x0 100 #define SECURE_ERASE 0x1 101 #define MEDIA_OPERATIONS 0x2 102 PERSISTENT_MEM = 0x45, 103 #define GET_SECURITY_STATE 0x0 104 MEDIA_AND_POISON = 0x43, 105 #define GET_POISON_LIST 0x0 106 #define INJECT_POISON 0x1 107 #define CLEAR_POISON 0x2 108 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 109 #define SCAN_MEDIA 0x4 110 #define GET_SCAN_MEDIA_RESULTS 0x5 111 DCD_CONFIG = 0x48, 112 #define GET_DC_CONFIG 0x0 113 #define GET_DYN_CAP_EXT_LIST 0x1 114 #define ADD_DYN_CAP_RSP 0x2 115 #define RELEASE_DYN_CAP 0x3 116 PHYSICAL_SWITCH = 0x51, 117 #define IDENTIFY_SWITCH_DEVICE 0x0 118 #define GET_PHYSICAL_PORT_STATE 0x1 119 TUNNEL = 0x53, 120 #define MANAGEMENT_COMMAND 0x0 121 FMAPI_DCD_MGMT = 0x56, 122 #define GET_DCD_INFO 0x0 123 #define GET_HOST_DC_REGION_CONFIG 0x1 124 #define SET_DC_REGION_CONFIG 0x2 125 #define GET_DC_REGION_EXTENT_LIST 0x3 126 #define INITIATE_DC_ADD 0x4 127 }; 128 129 /* CCI Message Format CXL r3.1 Figure 7-19 */ 130 typedef struct CXLCCIMessage { 131 uint8_t category; 132 #define CXL_CCI_CAT_REQ 0 133 #define CXL_CCI_CAT_RSP 1 134 uint8_t tag; 135 uint8_t resv1; 136 uint8_t command; 137 uint8_t command_set; 138 uint8_t pl_length[3]; 139 uint16_t rc; 140 uint16_t vendor_specific; 141 uint8_t payload[]; 142 } QEMU_PACKED CXLCCIMessage; 143 144 /* This command is only defined to an MLD FM Owned LD or an MHD */ 145 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 146 uint8_t *payload_in, 147 size_t len_in, 148 uint8_t *payload_out, 149 size_t *len_out, 150 CXLCCI *cci) 151 { 152 PCIDevice *tunnel_target; 153 CXLCCI *target_cci; 154 struct { 155 uint8_t port_or_ld_id; 156 uint8_t target_type; 157 uint16_t size; 158 CXLCCIMessage ccimessage; 159 } QEMU_PACKED *in; 160 struct { 161 uint16_t resp_len; 162 uint8_t resv[2]; 163 CXLCCIMessage ccimessage; 164 } QEMU_PACKED *out; 165 size_t pl_length, length_out; 166 bool bg_started; 167 int rc; 168 169 if (cmd->in < sizeof(*in)) { 170 return CXL_MBOX_INVALID_INPUT; 171 } 172 in = (void *)payload_in; 173 out = (void *)payload_out; 174 175 if (len_in < sizeof(*in)) { 176 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 177 } 178 /* Enough room for minimum sized message - no payload */ 179 if (in->size < sizeof(in->ccimessage)) { 180 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 181 } 182 /* Length of input payload should be in->size + a wrapping tunnel header */ 183 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 184 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 185 } 186 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 187 return CXL_MBOX_INVALID_INPUT; 188 } 189 190 if (in->target_type != 0) { 191 qemu_log_mask(LOG_UNIMP, 192 "Tunneled Command sent to non existent FM-LD"); 193 return CXL_MBOX_INVALID_INPUT; 194 } 195 196 /* 197 * Target of a tunnel unfortunately depends on type of CCI readint 198 * the message. 199 * If in a switch, then it's the port number. 200 * If in an MLD it is the ld number. 201 * If in an MHD target type indicate where we are going. 202 */ 203 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 204 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 205 if (in->port_or_ld_id != 0) { 206 /* Only pretending to have one for now! */ 207 return CXL_MBOX_INVALID_INPUT; 208 } 209 target_cci = &ct3d->ld0_cci; 210 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 211 CXLUpstreamPort *usp = CXL_USP(cci->d); 212 213 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 214 in->port_or_ld_id); 215 if (!tunnel_target) { 216 return CXL_MBOX_INVALID_INPUT; 217 } 218 tunnel_target = 219 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 220 if (!tunnel_target) { 221 return CXL_MBOX_INVALID_INPUT; 222 } 223 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 224 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 225 /* Tunneled VDMs always land on FM Owned LD */ 226 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 227 } else { 228 return CXL_MBOX_INVALID_INPUT; 229 } 230 } else { 231 return CXL_MBOX_INVALID_INPUT; 232 } 233 234 pl_length = in->ccimessage.pl_length[2] << 16 | 235 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 236 rc = cxl_process_cci_message(target_cci, 237 in->ccimessage.command_set, 238 in->ccimessage.command, 239 pl_length, in->ccimessage.payload, 240 &length_out, out->ccimessage.payload, 241 &bg_started); 242 /* Payload should be in place. Rest of CCI header and needs filling */ 243 out->resp_len = length_out + sizeof(CXLCCIMessage); 244 st24_le_p(out->ccimessage.pl_length, length_out); 245 out->ccimessage.rc = rc; 246 out->ccimessage.category = CXL_CCI_CAT_RSP; 247 out->ccimessage.command = in->ccimessage.command; 248 out->ccimessage.command_set = in->ccimessage.command_set; 249 out->ccimessage.tag = in->ccimessage.tag; 250 *len_out = length_out + sizeof(*out); 251 252 return CXL_MBOX_SUCCESS; 253 } 254 255 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 256 uint8_t *payload_in, size_t len_in, 257 uint8_t *payload_out, size_t *len_out, 258 CXLCCI *cci) 259 { 260 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 261 CXLGetEventPayload *pl; 262 uint8_t log_type; 263 int max_recs; 264 265 if (cmd->in < sizeof(log_type)) { 266 return CXL_MBOX_INVALID_INPUT; 267 } 268 269 log_type = payload_in[0]; 270 271 pl = (CXLGetEventPayload *)payload_out; 272 273 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 274 CXL_EVENT_RECORD_SIZE; 275 if (max_recs > 0xFFFF) { 276 max_recs = 0xFFFF; 277 } 278 279 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 280 } 281 282 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 283 uint8_t *payload_in, 284 size_t len_in, 285 uint8_t *payload_out, 286 size_t *len_out, 287 CXLCCI *cci) 288 { 289 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 290 CXLClearEventPayload *pl; 291 292 pl = (CXLClearEventPayload *)payload_in; 293 294 if (len_in < sizeof(*pl) || 295 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { 296 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 297 } 298 299 *len_out = 0; 300 return cxl_event_clear_records(cxlds, pl); 301 } 302 303 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 304 uint8_t *payload_in, 305 size_t len_in, 306 uint8_t *payload_out, 307 size_t *len_out, 308 CXLCCI *cci) 309 { 310 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 311 CXLEventInterruptPolicy *policy; 312 CXLEventLog *log; 313 314 policy = (CXLEventInterruptPolicy *)payload_out; 315 316 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 317 if (log->irq_enabled) { 318 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 319 } 320 321 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 322 if (log->irq_enabled) { 323 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 324 } 325 326 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 327 if (log->irq_enabled) { 328 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 329 } 330 331 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 332 if (log->irq_enabled) { 333 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 334 } 335 336 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 337 if (log->irq_enabled) { 338 /* Dynamic Capacity borrows the same vector as info */ 339 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 340 } 341 342 *len_out = sizeof(*policy); 343 return CXL_MBOX_SUCCESS; 344 } 345 346 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 347 uint8_t *payload_in, 348 size_t len_in, 349 uint8_t *payload_out, 350 size_t *len_out, 351 CXLCCI *cci) 352 { 353 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 354 CXLEventInterruptPolicy *policy; 355 CXLEventLog *log; 356 357 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 358 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 359 } 360 361 policy = (CXLEventInterruptPolicy *)payload_in; 362 363 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 364 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 365 CXL_INT_MSI_MSIX; 366 367 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 368 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 369 CXL_INT_MSI_MSIX; 370 371 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 372 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 373 CXL_INT_MSI_MSIX; 374 375 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 376 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 377 CXL_INT_MSI_MSIX; 378 379 /* DCD is optional */ 380 if (len_in < sizeof(*policy)) { 381 return CXL_MBOX_SUCCESS; 382 } 383 384 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 385 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 386 CXL_INT_MSI_MSIX; 387 388 *len_out = 0; 389 return CXL_MBOX_SUCCESS; 390 } 391 392 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 393 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 394 uint8_t *payload_in, 395 size_t len_in, 396 uint8_t *payload_out, 397 size_t *len_out, 398 CXLCCI *cci) 399 { 400 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 401 struct { 402 uint16_t pcie_vid; 403 uint16_t pcie_did; 404 uint16_t pcie_subsys_vid; 405 uint16_t pcie_subsys_id; 406 uint64_t sn; 407 uint8_t max_message_size; 408 uint8_t component_type; 409 } QEMU_PACKED *is_identify; 410 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 411 412 is_identify = (void *)payload_out; 413 is_identify->pcie_vid = class->vendor_id; 414 is_identify->pcie_did = class->device_id; 415 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 416 is_identify->sn = CXL_USP(cci->d)->sn; 417 /* Subsystem info not defined for a USP */ 418 is_identify->pcie_subsys_vid = 0; 419 is_identify->pcie_subsys_id = 0; 420 is_identify->component_type = 0x0; /* Switch */ 421 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 422 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 423 424 is_identify->sn = CXL_TYPE3(cci->d)->sn; 425 /* 426 * We can't always use class->subsystem_vendor_id as 427 * it is not set if the defaults are used. 428 */ 429 is_identify->pcie_subsys_vid = 430 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 431 is_identify->pcie_subsys_id = 432 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 433 is_identify->component_type = 0x3; /* Type 3 */ 434 } 435 436 is_identify->max_message_size = (uint8_t)log2(cci->payload_max); 437 *len_out = sizeof(*is_identify); 438 return CXL_MBOX_SUCCESS; 439 } 440 441 /* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */ 442 static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd, 443 uint8_t *payload_in, 444 size_t len_in, 445 uint8_t *payload_out, 446 size_t *len_out, 447 CXLCCI *cci) 448 { 449 struct { 450 uint8_t rsp_limit; 451 } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out; 452 QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1); 453 454 get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max); 455 456 *len_out = sizeof(*get_rsp_msg_limit); 457 return CXL_MBOX_SUCCESS; 458 } 459 460 /* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */ 461 static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd, 462 uint8_t *payload_in, 463 size_t len_in, 464 uint8_t *payload_out, 465 size_t *len_out, 466 CXLCCI *cci) 467 { 468 struct { 469 uint8_t rsp_limit; 470 } QEMU_PACKED *in = (void *)payload_in; 471 QEMU_BUILD_BUG_ON(sizeof(*in) != 1); 472 struct { 473 uint8_t rsp_limit; 474 } QEMU_PACKED *out = (void *)payload_out; 475 QEMU_BUILD_BUG_ON(sizeof(*out) != 1); 476 477 if (in->rsp_limit < 8 || in->rsp_limit > 10) { 478 return CXL_MBOX_INVALID_INPUT; 479 } 480 481 cci->payload_max = 1 << in->rsp_limit; 482 out->rsp_limit = in->rsp_limit; 483 484 *len_out = sizeof(*out); 485 return CXL_MBOX_SUCCESS; 486 } 487 488 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 489 void *private) 490 { 491 uint8_t *bm = private; 492 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 493 uint8_t port = PCIE_PORT(d)->port; 494 bm[port / 8] |= 1 << (port % 8); 495 } 496 } 497 498 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 499 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 500 uint8_t *payload_in, 501 size_t len_in, 502 uint8_t *payload_out, 503 size_t *len_out, 504 CXLCCI *cci) 505 { 506 PCIEPort *usp = PCIE_PORT(cci->d); 507 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 508 int num_phys_ports = pcie_count_ds_ports(bus); 509 510 struct cxl_fmapi_ident_switch_dev_resp_pl { 511 uint8_t ingress_port_id; 512 uint8_t rsvd; 513 uint8_t num_physical_ports; 514 uint8_t num_vcss; 515 uint8_t active_port_bitmask[0x20]; 516 uint8_t active_vcs_bitmask[0x20]; 517 uint16_t total_vppbs; 518 uint16_t bound_vppbs; 519 uint8_t num_hdm_decoders_per_usp; 520 } QEMU_PACKED *out; 521 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 522 523 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 524 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 525 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 526 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 527 .active_vcs_bitmask[0] = 0x1, 528 .total_vppbs = num_phys_ports + 1, 529 .bound_vppbs = num_phys_ports + 1, 530 .num_hdm_decoders_per_usp = 4, 531 }; 532 533 /* Depends on the CCI type */ 534 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 535 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 536 } else { 537 /* MCTP? */ 538 out->ingress_port_id = 0; 539 } 540 541 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 542 out->active_port_bitmask); 543 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 544 545 *len_out = sizeof(*out); 546 547 return CXL_MBOX_SUCCESS; 548 } 549 550 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 551 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 552 uint8_t *payload_in, 553 size_t len_in, 554 uint8_t *payload_out, 555 size_t *len_out, 556 CXLCCI *cci) 557 { 558 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 559 struct cxl_fmapi_get_phys_port_state_req_pl { 560 uint8_t num_ports; 561 uint8_t ports[]; 562 } QEMU_PACKED *in; 563 564 /* 565 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 566 * Format 567 */ 568 struct cxl_fmapi_port_state_info_block { 569 uint8_t port_id; 570 uint8_t config_state; 571 uint8_t connected_device_cxl_version; 572 uint8_t rsv1; 573 uint8_t connected_device_type; 574 uint8_t port_cxl_version_bitmask; 575 uint8_t max_link_width; 576 uint8_t negotiated_link_width; 577 uint8_t supported_link_speeds_vector; 578 uint8_t max_link_speed; 579 uint8_t current_link_speed; 580 uint8_t ltssm_state; 581 uint8_t first_lane_num; 582 uint16_t link_state; 583 uint8_t supported_ld_count; 584 } QEMU_PACKED; 585 586 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 587 struct cxl_fmapi_get_phys_port_state_resp_pl { 588 uint8_t num_ports; 589 uint8_t rsv1[3]; 590 struct cxl_fmapi_port_state_info_block ports[]; 591 } QEMU_PACKED *out; 592 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 593 PCIEPort *usp = PCIE_PORT(cci->d); 594 size_t pl_size; 595 int i; 596 597 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 598 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 599 600 if (len_in < sizeof(*in)) { 601 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 602 } 603 /* Check if what was requested can fit */ 604 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 605 return CXL_MBOX_INVALID_INPUT; 606 } 607 608 /* For success there should be a match for each requested */ 609 out->num_ports = in->num_ports; 610 611 for (i = 0; i < in->num_ports; i++) { 612 struct cxl_fmapi_port_state_info_block *port; 613 /* First try to match on downstream port */ 614 PCIDevice *port_dev; 615 uint16_t lnkcap, lnkcap2, lnksta; 616 617 port = &out->ports[i]; 618 619 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 620 if (port_dev) { /* DSP */ 621 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 622 ->devices[0]; 623 port->config_state = 3; 624 if (ds_dev) { 625 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 626 port->connected_device_type = 5; /* Assume MLD for now */ 627 } else { 628 port->connected_device_type = 1; 629 } 630 } else { 631 port->connected_device_type = 0; 632 } 633 port->supported_ld_count = 3; 634 } else if (usp->port == in->ports[i]) { /* USP */ 635 port_dev = PCI_DEVICE(usp); 636 port->config_state = 4; 637 port->connected_device_type = 0; 638 } else { 639 return CXL_MBOX_INVALID_INPUT; 640 } 641 642 port->port_id = in->ports[i]; 643 /* Information on status of this port in lnksta, lnkcap */ 644 if (!port_dev->exp.exp_cap) { 645 return CXL_MBOX_INTERNAL_ERROR; 646 } 647 lnksta = port_dev->config_read(port_dev, 648 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 649 sizeof(lnksta)); 650 lnkcap = port_dev->config_read(port_dev, 651 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 652 sizeof(lnkcap)); 653 lnkcap2 = port_dev->config_read(port_dev, 654 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 655 sizeof(lnkcap2)); 656 657 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 658 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 659 /* No definition for SLS field in linux/pci_regs.h */ 660 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 661 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 662 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 663 /* TODO: Track down if we can get the rest of the info */ 664 port->ltssm_state = 0x7; 665 port->first_lane_num = 0; 666 port->link_state = 0; 667 port->port_cxl_version_bitmask = 0x2; 668 port->connected_device_cxl_version = 0x2; 669 } 670 671 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 672 *len_out = pl_size; 673 674 return CXL_MBOX_SUCCESS; 675 } 676 677 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 678 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 679 uint8_t *payload_in, 680 size_t len_in, 681 uint8_t *payload_out, 682 size_t *len_out, 683 CXLCCI *cci) 684 { 685 struct { 686 uint8_t status; 687 uint8_t rsvd; 688 uint16_t opcode; 689 uint16_t returncode; 690 uint16_t vendor_ext_status; 691 } QEMU_PACKED *bg_op_status; 692 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 693 694 bg_op_status = (void *)payload_out; 695 bg_op_status->status = cci->bg.complete_pct << 1; 696 if (cci->bg.runtime > 0) { 697 bg_op_status->status |= 1U << 0; 698 } 699 bg_op_status->opcode = cci->bg.opcode; 700 bg_op_status->returncode = cci->bg.ret_code; 701 *len_out = sizeof(*bg_op_status); 702 703 return CXL_MBOX_SUCCESS; 704 } 705 706 /* 707 * CXL r3.1 Section 8.2.9.1.5: 708 * Request Abort Background Operation (Opcode 0005h) 709 */ 710 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd, 711 uint8_t *payload_in, 712 size_t len_in, 713 uint8_t *payload_out, 714 size_t *len_out, 715 CXLCCI *cci) 716 { 717 int bg_set = cci->bg.opcode >> 8; 718 int bg_cmd = cci->bg.opcode & 0xff; 719 const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd]; 720 721 if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) { 722 return CXL_MBOX_REQUEST_ABORT_NOTSUP; 723 } 724 725 qemu_mutex_lock(&cci->bg.lock); 726 if (cci->bg.runtime) { 727 /* operation is near complete, let it finish */ 728 if (cci->bg.complete_pct < 85) { 729 timer_del(cci->bg.timer); 730 cci->bg.ret_code = CXL_MBOX_ABORTED; 731 cci->bg.starttime = 0; 732 cci->bg.runtime = 0; 733 cci->bg.aborted = true; 734 } 735 } 736 qemu_mutex_unlock(&cci->bg.lock); 737 738 return CXL_MBOX_SUCCESS; 739 } 740 741 #define CXL_FW_SLOTS 2 742 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 743 744 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 745 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 746 uint8_t *payload_in, 747 size_t len, 748 uint8_t *payload_out, 749 size_t *len_out, 750 CXLCCI *cci) 751 { 752 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 753 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 754 struct { 755 uint8_t slots_supported; 756 uint8_t slot_info; 757 uint8_t caps; 758 uint8_t rsvd[0xd]; 759 char fw_rev1[0x10]; 760 char fw_rev2[0x10]; 761 char fw_rev3[0x10]; 762 char fw_rev4[0x10]; 763 } QEMU_PACKED *fw_info; 764 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 765 766 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || 767 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || 768 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { 769 return CXL_MBOX_INTERNAL_ERROR; 770 } 771 772 fw_info = (void *)payload_out; 773 774 fw_info->slots_supported = CXL_FW_SLOTS; 775 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 776 ((cci->fw.staged_slot & 0x7) << 3); 777 fw_info->caps = BIT(0); /* online update supported */ 778 779 if (cci->fw.slot[0]) { 780 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 781 } 782 if (cci->fw.slot[1]) { 783 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 784 } 785 786 *len_out = sizeof(*fw_info); 787 return CXL_MBOX_SUCCESS; 788 } 789 790 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 791 #define CXL_FW_XFER_ALIGNMENT 128 792 793 #define CXL_FW_XFER_ACTION_FULL 0x0 794 #define CXL_FW_XFER_ACTION_INIT 0x1 795 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 796 #define CXL_FW_XFER_ACTION_END 0x3 797 #define CXL_FW_XFER_ACTION_ABORT 0x4 798 799 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 800 uint8_t *payload_in, 801 size_t len, 802 uint8_t *payload_out, 803 size_t *len_out, 804 CXLCCI *cci) 805 { 806 struct { 807 uint8_t action; 808 uint8_t slot; 809 uint8_t rsvd1[2]; 810 uint32_t offset; 811 uint8_t rsvd2[0x78]; 812 uint8_t data[]; 813 } QEMU_PACKED *fw_transfer = (void *)payload_in; 814 size_t offset, length; 815 816 if (len < sizeof(*fw_transfer)) { 817 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 818 } 819 820 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 821 /* 822 * At this point there aren't any on-going transfers 823 * running in the bg - this is serialized before this 824 * call altogether. Just mark the state machine and 825 * disregard any other input. 826 */ 827 cci->fw.transferring = false; 828 return CXL_MBOX_SUCCESS; 829 } 830 831 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 832 length = len - sizeof(*fw_transfer); 833 if (offset + length > CXL_FW_SIZE) { 834 return CXL_MBOX_INVALID_INPUT; 835 } 836 837 if (cci->fw.transferring) { 838 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 839 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 840 return CXL_MBOX_FW_XFER_IN_PROGRESS; 841 } 842 /* 843 * Abort partitioned package transfer if over 30 secs 844 * between parts. As opposed to the explicit ABORT action, 845 * semantically treat this condition as an error - as 846 * if a part action were passed without a previous INIT. 847 */ 848 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 849 cci->fw.transferring = false; 850 return CXL_MBOX_INVALID_INPUT; 851 } 852 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 853 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 854 return CXL_MBOX_INVALID_INPUT; 855 } 856 857 /* allow back-to-back retransmission */ 858 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 859 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 860 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 861 /* verify no overlaps */ 862 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 863 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 864 } 865 } 866 867 switch (fw_transfer->action) { 868 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 869 case CXL_FW_XFER_ACTION_END: 870 if (fw_transfer->slot == 0 || 871 fw_transfer->slot == cci->fw.active_slot || 872 fw_transfer->slot > CXL_FW_SLOTS) { 873 return CXL_MBOX_FW_INVALID_SLOT; 874 } 875 876 /* mark the slot used upon bg completion */ 877 break; 878 case CXL_FW_XFER_ACTION_INIT: 879 if (offset != 0) { 880 return CXL_MBOX_INVALID_INPUT; 881 } 882 883 cci->fw.transferring = true; 884 cci->fw.prev_offset = offset; 885 cci->fw.prev_len = length; 886 break; 887 case CXL_FW_XFER_ACTION_CONTINUE: 888 cci->fw.prev_offset = offset; 889 cci->fw.prev_len = length; 890 break; 891 default: 892 return CXL_MBOX_INVALID_INPUT; 893 } 894 895 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 896 cci->bg.runtime = 10 * 1000UL; 897 } else { 898 cci->bg.runtime = 2 * 1000UL; 899 } 900 /* keep relevant context for bg completion */ 901 cci->fw.curr_action = fw_transfer->action; 902 cci->fw.curr_slot = fw_transfer->slot; 903 *len_out = 0; 904 905 return CXL_MBOX_BG_STARTED; 906 } 907 908 static void __do_firmware_xfer(CXLCCI *cci) 909 { 910 switch (cci->fw.curr_action) { 911 case CXL_FW_XFER_ACTION_FULL: 912 case CXL_FW_XFER_ACTION_END: 913 cci->fw.slot[cci->fw.curr_slot - 1] = true; 914 cci->fw.transferring = false; 915 break; 916 case CXL_FW_XFER_ACTION_INIT: 917 case CXL_FW_XFER_ACTION_CONTINUE: 918 time(&cci->fw.last_partxfer); 919 break; 920 default: 921 break; 922 } 923 } 924 925 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 926 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 927 uint8_t *payload_in, 928 size_t len, 929 uint8_t *payload_out, 930 size_t *len_out, 931 CXLCCI *cci) 932 { 933 struct { 934 uint8_t action; 935 uint8_t slot; 936 } QEMU_PACKED *fw_activate = (void *)payload_in; 937 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 938 939 if (fw_activate->slot == 0 || 940 fw_activate->slot == cci->fw.active_slot || 941 fw_activate->slot > CXL_FW_SLOTS) { 942 return CXL_MBOX_FW_INVALID_SLOT; 943 } 944 945 /* ensure that an actual fw package is there */ 946 if (!cci->fw.slot[fw_activate->slot - 1]) { 947 return CXL_MBOX_FW_INVALID_SLOT; 948 } 949 950 switch (fw_activate->action) { 951 case 0: /* online */ 952 cci->fw.active_slot = fw_activate->slot; 953 break; 954 case 1: /* reset */ 955 cci->fw.staged_slot = fw_activate->slot; 956 break; 957 default: 958 return CXL_MBOX_INVALID_INPUT; 959 } 960 961 return CXL_MBOX_SUCCESS; 962 } 963 964 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 965 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 966 uint8_t *payload_in, 967 size_t len_in, 968 uint8_t *payload_out, 969 size_t *len_out, 970 CXLCCI *cci) 971 { 972 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 973 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 974 975 stq_le_p(payload_out, final_time); 976 *len_out = 8; 977 978 return CXL_MBOX_SUCCESS; 979 } 980 981 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 982 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 983 uint8_t *payload_in, 984 size_t len_in, 985 uint8_t *payload_out, 986 size_t *len_out, 987 CXLCCI *cci) 988 { 989 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 990 991 cxl_dstate->timestamp.set = true; 992 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 993 994 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 995 996 *len_out = 0; 997 return CXL_MBOX_SUCCESS; 998 } 999 1000 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 1001 static const QemuUUID cel_uuid = { 1002 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 1003 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 1004 }; 1005 1006 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 1007 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 1008 uint8_t *payload_in, 1009 size_t len_in, 1010 uint8_t *payload_out, 1011 size_t *len_out, 1012 CXLCCI *cci) 1013 { 1014 struct { 1015 uint16_t entries; 1016 uint8_t rsvd[6]; 1017 struct { 1018 QemuUUID uuid; 1019 uint32_t size; 1020 } log_entries[1]; 1021 } QEMU_PACKED *supported_logs = (void *)payload_out; 1022 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 1023 1024 supported_logs->entries = 1; 1025 supported_logs->log_entries[0].uuid = cel_uuid; 1026 supported_logs->log_entries[0].size = 4 * cci->cel_size; 1027 1028 *len_out = sizeof(*supported_logs); 1029 return CXL_MBOX_SUCCESS; 1030 } 1031 1032 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 1033 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 1034 uint8_t *payload_in, 1035 size_t len_in, 1036 uint8_t *payload_out, 1037 size_t *len_out, 1038 CXLCCI *cci) 1039 { 1040 struct { 1041 QemuUUID uuid; 1042 uint32_t offset; 1043 uint32_t length; 1044 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 1045 1046 get_log = (void *)payload_in; 1047 1048 if (get_log->length > cci->payload_max) { 1049 return CXL_MBOX_INVALID_INPUT; 1050 } 1051 1052 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 1053 return CXL_MBOX_INVALID_LOG; 1054 } 1055 1056 /* 1057 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 1058 * The device shall return Invalid Input if the Offset or Length 1059 * fields attempt to access beyond the size of the log as reported by Get 1060 * Supported Log. 1061 * 1062 * Only valid for there to be one entry per opcode, but the length + offset 1063 * may still be greater than that if the inputs are not valid and so access 1064 * beyond the end of cci->cel_log. 1065 */ 1066 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { 1067 return CXL_MBOX_INVALID_INPUT; 1068 } 1069 1070 /* Store off everything to local variables so we can wipe out the payload */ 1071 *len_out = get_log->length; 1072 1073 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 1074 1075 return CXL_MBOX_SUCCESS; 1076 } 1077 1078 /* CXL r3.1 section 8.2.9.6: Features */ 1079 /* 1080 * Get Supported Features output payload 1081 * CXL r3.1 section 8.2.9.6.1 Table 8-96 1082 */ 1083 typedef struct CXLSupportedFeatureHeader { 1084 uint16_t entries; 1085 uint16_t nsuppfeats_dev; 1086 uint32_t reserved; 1087 } QEMU_PACKED CXLSupportedFeatureHeader; 1088 1089 /* 1090 * Get Supported Features Supported Feature Entry 1091 * CXL r3.1 section 8.2.9.6.1 Table 8-97 1092 */ 1093 typedef struct CXLSupportedFeatureEntry { 1094 QemuUUID uuid; 1095 uint16_t feat_index; 1096 uint16_t get_feat_size; 1097 uint16_t set_feat_size; 1098 uint32_t attr_flags; 1099 uint8_t get_feat_version; 1100 uint8_t set_feat_version; 1101 uint16_t set_feat_effects; 1102 uint8_t rsvd[18]; 1103 } QEMU_PACKED CXLSupportedFeatureEntry; 1104 1105 /* 1106 * Get Supported Features Supported Feature Entry 1107 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 1108 */ 1109 /* Supported Feature Entry : attribute flags */ 1110 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 1111 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 1112 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 1113 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 1114 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 1115 1116 /* Supported Feature Entry : set feature effects */ 1117 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 1118 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 1119 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 1120 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 1121 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1122 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1123 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1124 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1125 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1126 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1127 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1128 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1129 1130 enum CXL_SUPPORTED_FEATURES_LIST { 1131 CXL_FEATURE_PATROL_SCRUB = 0, 1132 CXL_FEATURE_ECS, 1133 CXL_FEATURE_MAX 1134 }; 1135 1136 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1137 /* 1138 * Get Feature input payload 1139 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1140 */ 1141 /* Get Feature : Payload in selection */ 1142 enum CXL_GET_FEATURE_SELECTION { 1143 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1144 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1145 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1146 CXL_GET_FEATURE_SEL_MAX 1147 }; 1148 1149 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1150 /* 1151 * Set Feature input payload 1152 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1153 */ 1154 typedef struct CXLSetFeatureInHeader { 1155 QemuUUID uuid; 1156 uint32_t flags; 1157 uint16_t offset; 1158 uint8_t version; 1159 uint8_t rsvd[9]; 1160 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1161 1162 /* Set Feature : Payload in flags */ 1163 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1164 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1165 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1166 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1167 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1168 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1169 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1170 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1171 }; 1172 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1173 1174 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1175 static const QemuUUID patrol_scrub_uuid = { 1176 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1177 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1178 }; 1179 1180 typedef struct CXLMemPatrolScrubSetFeature { 1181 CXLSetFeatureInHeader hdr; 1182 CXLMemPatrolScrubWriteAttrs feat_data; 1183 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1184 1185 /* 1186 * CXL r3.1 section 8.2.9.9.11.2: 1187 * DDR5 Error Check Scrub (ECS) Control Feature 1188 */ 1189 static const QemuUUID ecs_uuid = { 1190 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1191 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1192 }; 1193 1194 typedef struct CXLMemECSSetFeature { 1195 CXLSetFeatureInHeader hdr; 1196 CXLMemECSWriteAttrs feat_data[]; 1197 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1198 1199 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1200 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1201 uint8_t *payload_in, 1202 size_t len_in, 1203 uint8_t *payload_out, 1204 size_t *len_out, 1205 CXLCCI *cci) 1206 { 1207 struct { 1208 uint32_t count; 1209 uint16_t start_index; 1210 uint16_t reserved; 1211 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1212 1213 struct { 1214 CXLSupportedFeatureHeader hdr; 1215 CXLSupportedFeatureEntry feat_entries[]; 1216 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1217 uint16_t index, req_entries; 1218 uint16_t entry; 1219 1220 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1221 return CXL_MBOX_UNSUPPORTED; 1222 } 1223 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1224 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1225 return CXL_MBOX_INVALID_INPUT; 1226 } 1227 1228 req_entries = (get_feats_in->count - 1229 sizeof(CXLSupportedFeatureHeader)) / 1230 sizeof(CXLSupportedFeatureEntry); 1231 req_entries = MIN(req_entries, 1232 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1233 1234 for (entry = 0, index = get_feats_in->start_index; 1235 entry < req_entries; index++) { 1236 switch (index) { 1237 case CXL_FEATURE_PATROL_SCRUB: 1238 /* Fill supported feature entry for device patrol scrub control */ 1239 get_feats_out->feat_entries[entry++] = 1240 (struct CXLSupportedFeatureEntry) { 1241 .uuid = patrol_scrub_uuid, 1242 .feat_index = index, 1243 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1244 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1245 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1246 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1247 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1248 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1249 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1250 }; 1251 break; 1252 case CXL_FEATURE_ECS: 1253 /* Fill supported feature entry for device DDR5 ECS control */ 1254 get_feats_out->feat_entries[entry++] = 1255 (struct CXLSupportedFeatureEntry) { 1256 .uuid = ecs_uuid, 1257 .feat_index = index, 1258 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1259 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1260 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1261 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1262 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1263 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1264 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1265 }; 1266 break; 1267 default: 1268 __builtin_unreachable(); 1269 } 1270 } 1271 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1272 get_feats_out->hdr.entries = req_entries; 1273 *len_out = sizeof(CXLSupportedFeatureHeader) + 1274 req_entries * sizeof(CXLSupportedFeatureEntry); 1275 1276 return CXL_MBOX_SUCCESS; 1277 } 1278 1279 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1280 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1281 uint8_t *payload_in, 1282 size_t len_in, 1283 uint8_t *payload_out, 1284 size_t *len_out, 1285 CXLCCI *cci) 1286 { 1287 struct { 1288 QemuUUID uuid; 1289 uint16_t offset; 1290 uint16_t count; 1291 uint8_t selection; 1292 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1293 uint16_t bytes_to_copy = 0; 1294 CXLType3Dev *ct3d; 1295 CXLSetFeatureInfo *set_feat_info; 1296 1297 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1298 return CXL_MBOX_UNSUPPORTED; 1299 } 1300 1301 ct3d = CXL_TYPE3(cci->d); 1302 get_feature = (void *)payload_in; 1303 1304 set_feat_info = &ct3d->set_feat_info; 1305 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1306 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1307 } 1308 1309 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1310 return CXL_MBOX_UNSUPPORTED; 1311 } 1312 if (get_feature->offset + get_feature->count > cci->payload_max) { 1313 return CXL_MBOX_INVALID_INPUT; 1314 } 1315 1316 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1317 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1318 return CXL_MBOX_INVALID_INPUT; 1319 } 1320 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1321 get_feature->offset; 1322 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1323 memcpy(payload_out, 1324 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1325 bytes_to_copy); 1326 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1327 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1328 return CXL_MBOX_INVALID_INPUT; 1329 } 1330 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1331 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1332 memcpy(payload_out, 1333 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1334 bytes_to_copy); 1335 } else { 1336 return CXL_MBOX_UNSUPPORTED; 1337 } 1338 1339 *len_out = bytes_to_copy; 1340 1341 return CXL_MBOX_SUCCESS; 1342 } 1343 1344 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1345 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1346 uint8_t *payload_in, 1347 size_t len_in, 1348 uint8_t *payload_out, 1349 size_t *len_out, 1350 CXLCCI *cci) 1351 { 1352 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1353 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1354 CXLMemPatrolScrubSetFeature *ps_set_feature; 1355 CXLMemECSWriteAttrs *ecs_write_attrs; 1356 CXLMemECSSetFeature *ecs_set_feature; 1357 CXLSetFeatureInfo *set_feat_info; 1358 uint16_t bytes_to_copy = 0; 1359 uint8_t data_transfer_flag; 1360 CXLType3Dev *ct3d; 1361 uint16_t count; 1362 1363 if (len_in < sizeof(*hdr)) { 1364 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1365 } 1366 1367 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1368 return CXL_MBOX_UNSUPPORTED; 1369 } 1370 ct3d = CXL_TYPE3(cci->d); 1371 set_feat_info = &ct3d->set_feat_info; 1372 1373 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1374 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1375 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1376 } 1377 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1378 set_feat_info->data_saved_across_reset = true; 1379 } else { 1380 set_feat_info->data_saved_across_reset = false; 1381 } 1382 1383 data_transfer_flag = 1384 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1385 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1386 set_feat_info->uuid = hdr->uuid; 1387 set_feat_info->data_size = 0; 1388 } 1389 set_feat_info->data_transfer_flag = data_transfer_flag; 1390 set_feat_info->data_offset = hdr->offset; 1391 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1392 1393 if (bytes_to_copy == 0) { 1394 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1395 } 1396 1397 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1398 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1399 return CXL_MBOX_UNSUPPORTED; 1400 } 1401 1402 ps_set_feature = (void *)payload_in; 1403 ps_write_attrs = &ps_set_feature->feat_data; 1404 1405 if ((uint32_t)hdr->offset + bytes_to_copy > 1406 sizeof(ct3d->patrol_scrub_wr_attrs)) { 1407 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1408 } 1409 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1410 ps_write_attrs, 1411 bytes_to_copy); 1412 set_feat_info->data_size += bytes_to_copy; 1413 1414 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1415 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1416 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1417 ct3d->patrol_scrub_attrs.scrub_cycle |= 1418 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1419 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1420 ct3d->patrol_scrub_attrs.scrub_flags |= 1421 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1422 } 1423 } else if (qemu_uuid_is_equal(&hdr->uuid, 1424 &ecs_uuid)) { 1425 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1426 return CXL_MBOX_UNSUPPORTED; 1427 } 1428 1429 ecs_set_feature = (void *)payload_in; 1430 ecs_write_attrs = ecs_set_feature->feat_data; 1431 1432 if ((uint32_t)hdr->offset + bytes_to_copy > 1433 sizeof(ct3d->ecs_wr_attrs)) { 1434 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1435 } 1436 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1437 ecs_write_attrs, 1438 bytes_to_copy); 1439 set_feat_info->data_size += bytes_to_copy; 1440 1441 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1442 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1443 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1444 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1445 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1446 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1447 } 1448 } 1449 } else { 1450 return CXL_MBOX_UNSUPPORTED; 1451 } 1452 1453 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1454 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1455 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1456 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1457 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1458 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1459 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1460 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1461 } 1462 set_feat_info->data_transfer_flag = 0; 1463 set_feat_info->data_saved_across_reset = false; 1464 set_feat_info->data_offset = 0; 1465 set_feat_info->data_size = 0; 1466 } 1467 1468 return CXL_MBOX_SUCCESS; 1469 } 1470 1471 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1472 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1473 uint8_t *payload_in, 1474 size_t len_in, 1475 uint8_t *payload_out, 1476 size_t *len_out, 1477 CXLCCI *cci) 1478 { 1479 struct { 1480 char fw_revision[0x10]; 1481 uint64_t total_capacity; 1482 uint64_t volatile_capacity; 1483 uint64_t persistent_capacity; 1484 uint64_t partition_align; 1485 uint16_t info_event_log_size; 1486 uint16_t warning_event_log_size; 1487 uint16_t failure_event_log_size; 1488 uint16_t fatal_event_log_size; 1489 uint32_t lsa_size; 1490 uint8_t poison_list_max_mer[3]; 1491 uint16_t inject_poison_limit; 1492 uint8_t poison_caps; 1493 uint8_t qos_telemetry_caps; 1494 uint16_t dc_event_log_size; 1495 } QEMU_PACKED *id; 1496 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1497 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1498 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1499 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1500 1501 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1502 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1503 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1504 return CXL_MBOX_INTERNAL_ERROR; 1505 } 1506 1507 id = (void *)payload_out; 1508 1509 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1510 1511 stq_le_p(&id->total_capacity, 1512 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1513 stq_le_p(&id->persistent_capacity, 1514 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1515 stq_le_p(&id->volatile_capacity, 1516 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1517 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1518 /* 256 poison records */ 1519 st24_le_p(id->poison_list_max_mer, 256); 1520 /* No limit - so limited by main poison record limit */ 1521 stw_le_p(&id->inject_poison_limit, 0); 1522 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1523 1524 *len_out = sizeof(*id); 1525 return CXL_MBOX_SUCCESS; 1526 } 1527 1528 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1529 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1530 uint8_t *payload_in, 1531 size_t len_in, 1532 uint8_t *payload_out, 1533 size_t *len_out, 1534 CXLCCI *cci) 1535 { 1536 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1537 struct { 1538 uint64_t active_vmem; 1539 uint64_t active_pmem; 1540 uint64_t next_vmem; 1541 uint64_t next_pmem; 1542 } QEMU_PACKED *part_info = (void *)payload_out; 1543 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1544 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1545 1546 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1547 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1548 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1549 return CXL_MBOX_INTERNAL_ERROR; 1550 } 1551 1552 stq_le_p(&part_info->active_vmem, 1553 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1554 /* 1555 * When both next_vmem and next_pmem are 0, there is no pending change to 1556 * partitioning. 1557 */ 1558 stq_le_p(&part_info->next_vmem, 0); 1559 stq_le_p(&part_info->active_pmem, 1560 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1561 stq_le_p(&part_info->next_pmem, 0); 1562 1563 *len_out = sizeof(*part_info); 1564 return CXL_MBOX_SUCCESS; 1565 } 1566 1567 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1568 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1569 uint8_t *payload_in, 1570 size_t len_in, 1571 uint8_t *payload_out, 1572 size_t *len_out, 1573 CXLCCI *cci) 1574 { 1575 struct { 1576 uint32_t offset; 1577 uint32_t length; 1578 } QEMU_PACKED *get_lsa; 1579 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1580 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1581 uint64_t offset, length; 1582 1583 get_lsa = (void *)payload_in; 1584 offset = get_lsa->offset; 1585 length = get_lsa->length; 1586 1587 if (offset + length > cvc->get_lsa_size(ct3d)) { 1588 *len_out = 0; 1589 return CXL_MBOX_INVALID_INPUT; 1590 } 1591 1592 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1593 return CXL_MBOX_SUCCESS; 1594 } 1595 1596 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1597 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1598 uint8_t *payload_in, 1599 size_t len_in, 1600 uint8_t *payload_out, 1601 size_t *len_out, 1602 CXLCCI *cci) 1603 { 1604 struct set_lsa_pl { 1605 uint32_t offset; 1606 uint32_t rsvd; 1607 uint8_t data[]; 1608 } QEMU_PACKED; 1609 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1610 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1611 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1612 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1613 1614 *len_out = 0; 1615 if (len_in < hdr_len) { 1616 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1617 } 1618 1619 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1620 return CXL_MBOX_INVALID_INPUT; 1621 } 1622 len_in -= hdr_len; 1623 1624 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1625 return CXL_MBOX_SUCCESS; 1626 } 1627 1628 /* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */ 1629 static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd, 1630 uint8_t *payload_in, 1631 size_t len_in, 1632 uint8_t *payload_out, 1633 size_t *len_out, 1634 CXLCCI *cci) 1635 { 1636 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1637 CXLAlertConfig *out = (CXLAlertConfig *)payload_out; 1638 1639 memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config)); 1640 *len_out = sizeof(ct3d->alert_config); 1641 1642 return CXL_MBOX_SUCCESS; 1643 } 1644 1645 /* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */ 1646 static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd, 1647 uint8_t *payload_in, 1648 size_t len_in, 1649 uint8_t *payload_out, 1650 size_t *len_out, 1651 CXLCCI *cci) 1652 { 1653 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1654 CXLAlertConfig *alert_config = &ct3d->alert_config; 1655 struct { 1656 uint8_t valid_alert_actions; 1657 uint8_t enable_alert_actions; 1658 uint8_t life_used_warn_thresh; 1659 uint8_t rsvd; 1660 uint16_t over_temp_warn_thresh; 1661 uint16_t under_temp_warn_thresh; 1662 uint16_t cor_vmem_err_warn_thresh; 1663 uint16_t cor_pmem_err_warn_thresh; 1664 } QEMU_PACKED *in = (void *)payload_in; 1665 1666 if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) { 1667 /* 1668 * CXL r3.2 Table 8-149 The life used warning threshold shall be 1669 * less than the life used critical alert value. 1670 */ 1671 if (in->life_used_warn_thresh >= 1672 alert_config->life_used_crit_alert_thresh) { 1673 return CXL_MBOX_INVALID_INPUT; 1674 } 1675 alert_config->life_used_warn_thresh = in->life_used_warn_thresh; 1676 alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH; 1677 } 1678 1679 if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) { 1680 /* 1681 * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold 1682 * shall be less than the the Device Over-Temperature Critical 1683 * Alert Threshold. 1684 */ 1685 if (in->over_temp_warn_thresh >= 1686 alert_config->over_temp_crit_alert_thresh) { 1687 return CXL_MBOX_INVALID_INPUT; 1688 } 1689 alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh; 1690 alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH; 1691 } 1692 1693 if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) { 1694 /* 1695 * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold 1696 * shall be higher than the the Device Under-Temperature Critical 1697 * Alert Threshold. 1698 */ 1699 if (in->under_temp_warn_thresh <= 1700 alert_config->under_temp_crit_alert_thresh) { 1701 return CXL_MBOX_INVALID_INPUT; 1702 } 1703 alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh; 1704 alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH; 1705 } 1706 1707 if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) { 1708 alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh; 1709 alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH; 1710 } 1711 1712 if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) { 1713 alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh; 1714 alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH; 1715 } 1716 return CXL_MBOX_SUCCESS; 1717 } 1718 1719 /* Perform the actual device zeroing */ 1720 static void __do_sanitization(CXLType3Dev *ct3d) 1721 { 1722 MemoryRegion *mr; 1723 1724 if (ct3d->hostvmem) { 1725 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1726 if (mr) { 1727 void *hostmem = memory_region_get_ram_ptr(mr); 1728 memset(hostmem, 0, memory_region_size(mr)); 1729 } 1730 } 1731 1732 if (ct3d->hostpmem) { 1733 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1734 if (mr) { 1735 void *hostmem = memory_region_get_ram_ptr(mr); 1736 memset(hostmem, 0, memory_region_size(mr)); 1737 } 1738 } 1739 if (ct3d->lsa) { 1740 mr = host_memory_backend_get_memory(ct3d->lsa); 1741 if (mr) { 1742 void *lsa = memory_region_get_ram_ptr(mr); 1743 memset(lsa, 0, memory_region_size(mr)); 1744 } 1745 } 1746 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1747 } 1748 1749 static int get_sanitize_duration(uint64_t total_mem) 1750 { 1751 int secs = 0; 1752 1753 if (total_mem <= 512) { 1754 secs = 4; 1755 } else if (total_mem <= 1024) { 1756 secs = 8; 1757 } else if (total_mem <= 2 * 1024) { 1758 secs = 15; 1759 } else if (total_mem <= 4 * 1024) { 1760 secs = 30; 1761 } else if (total_mem <= 8 * 1024) { 1762 secs = 60; 1763 } else if (total_mem <= 16 * 1024) { 1764 secs = 2 * 60; 1765 } else if (total_mem <= 32 * 1024) { 1766 secs = 4 * 60; 1767 } else if (total_mem <= 64 * 1024) { 1768 secs = 8 * 60; 1769 } else if (total_mem <= 128 * 1024) { 1770 secs = 15 * 60; 1771 } else if (total_mem <= 256 * 1024) { 1772 secs = 30 * 60; 1773 } else if (total_mem <= 512 * 1024) { 1774 secs = 60 * 60; 1775 } else if (total_mem <= 1024 * 1024) { 1776 secs = 120 * 60; 1777 } else { 1778 secs = 240 * 60; /* max 4 hrs */ 1779 } 1780 1781 return secs; 1782 } 1783 1784 /* 1785 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1786 * 1787 * Once the Sanitize command has started successfully, the device shall be 1788 * placed in the media disabled state. If the command fails or is interrupted 1789 * by a reset or power failure, it shall remain in the media disabled state 1790 * until a successful Sanitize command has been completed. During this state: 1791 * 1792 * 1. Memory writes to the device will have no effect, and all memory reads 1793 * will return random values (no user data returned, even for locations that 1794 * the failed Sanitize operation didn’t sanitize yet). 1795 * 1796 * 2. Mailbox commands shall still be processed in the disabled state, except 1797 * that commands that access Sanitized areas shall fail with the Media Disabled 1798 * error code. 1799 */ 1800 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1801 uint8_t *payload_in, 1802 size_t len_in, 1803 uint8_t *payload_out, 1804 size_t *len_out, 1805 CXLCCI *cci) 1806 { 1807 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1808 uint64_t total_mem; /* in Mb */ 1809 int secs; 1810 1811 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1812 secs = get_sanitize_duration(total_mem); 1813 1814 /* EBUSY other bg cmds as of now */ 1815 cci->bg.runtime = secs * 1000UL; 1816 *len_out = 0; 1817 1818 cxl_dev_disable_media(&ct3d->cxl_dstate); 1819 1820 /* sanitize when done */ 1821 return CXL_MBOX_BG_STARTED; 1822 } 1823 1824 struct dpa_range_list_entry { 1825 uint64_t starting_dpa; 1826 uint64_t length; 1827 } QEMU_PACKED; 1828 1829 struct CXLSanitizeInfo { 1830 uint32_t dpa_range_count; 1831 uint8_t fill_value; 1832 struct dpa_range_list_entry dpa_range_list[]; 1833 } QEMU_PACKED; 1834 1835 static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr) 1836 { 1837 MemoryRegion *mr; 1838 if (ct3d->hostvmem) { 1839 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1840 if (vmr) { 1841 *vmr = mr; 1842 } 1843 return memory_region_size(mr); 1844 } 1845 return 0; 1846 } 1847 1848 static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr) 1849 { 1850 MemoryRegion *mr; 1851 if (ct3d->hostpmem) { 1852 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1853 if (pmr) { 1854 *pmr = mr; 1855 } 1856 return memory_region_size(mr); 1857 } 1858 return 0; 1859 } 1860 1861 static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr) 1862 { 1863 MemoryRegion *mr; 1864 if (ct3d->dc.host_dc) { 1865 mr = host_memory_backend_get_memory(ct3d->dc.host_dc); 1866 if (dc_mr) { 1867 *dc_mr = mr; 1868 } 1869 return memory_region_size(mr); 1870 } 1871 return 0; 1872 } 1873 1874 static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr, 1875 size_t length) 1876 { 1877 uint64_t vmr_size, pmr_size, dc_size; 1878 1879 if ((dpa_addr % CXL_CACHE_LINE_SIZE) || 1880 (length % CXL_CACHE_LINE_SIZE) || 1881 (length <= 0)) { 1882 return -EINVAL; 1883 } 1884 1885 vmr_size = get_vmr_size(ct3d, NULL); 1886 pmr_size = get_pmr_size(ct3d, NULL); 1887 dc_size = get_dc_size(ct3d, NULL); 1888 1889 if (dpa_addr + length > vmr_size + pmr_size + dc_size) { 1890 return -EINVAL; 1891 } 1892 1893 if (dpa_addr > vmr_size + pmr_size) { 1894 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1895 return -ENODEV; 1896 } 1897 } 1898 1899 return 0; 1900 } 1901 1902 static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length, 1903 uint8_t fill_value) 1904 { 1905 1906 uint64_t vmr_size, pmr_size; 1907 AddressSpace *as = NULL; 1908 MemTxAttrs mem_attrs = {}; 1909 1910 vmr_size = get_vmr_size(ct3d, NULL); 1911 pmr_size = get_pmr_size(ct3d, NULL); 1912 1913 if (dpa_addr < vmr_size) { 1914 as = &ct3d->hostvmem_as; 1915 } else if (dpa_addr < vmr_size + pmr_size) { 1916 as = &ct3d->hostpmem_as; 1917 } else { 1918 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1919 return -ENODEV; 1920 } 1921 as = &ct3d->dc.host_dc_as; 1922 } 1923 1924 return address_space_set(as, dpa_addr, fill_value, length, mem_attrs); 1925 } 1926 1927 /* Perform the actual device zeroing */ 1928 static void __do_sanitize(CXLType3Dev *ct3d) 1929 { 1930 struct CXLSanitizeInfo *san_info = ct3d->media_op_sanitize; 1931 int dpa_range_count = san_info->dpa_range_count; 1932 int rc = 0; 1933 int i; 1934 1935 for (i = 0; i < dpa_range_count; i++) { 1936 rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa, 1937 san_info->dpa_range_list[i].length, 1938 san_info->fill_value); 1939 if (rc) { 1940 goto exit; 1941 } 1942 } 1943 exit: 1944 g_free(ct3d->media_op_sanitize); 1945 ct3d->media_op_sanitize = NULL; 1946 return; 1947 } 1948 1949 enum { 1950 MEDIA_OP_CLASS_GENERAL = 0x0, 1951 #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0 1952 MEDIA_OP_CLASS_SANITIZE = 0x1, 1953 #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0 1954 #define MEDIA_OP_SAN_SUBC_ZERO 0x1 1955 }; 1956 1957 struct media_op_supported_list_entry { 1958 uint8_t media_op_class; 1959 uint8_t media_op_subclass; 1960 }; 1961 1962 struct media_op_discovery_out_pl { 1963 uint64_t dpa_range_granularity; 1964 uint16_t total_supported_operations; 1965 uint16_t num_of_supported_operations; 1966 struct media_op_supported_list_entry entry[]; 1967 } QEMU_PACKED; 1968 1969 static const struct media_op_supported_list_entry media_op_matrix[] = { 1970 { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY }, 1971 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE }, 1972 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO }, 1973 }; 1974 1975 static CXLRetCode media_operations_discovery(uint8_t *payload_in, 1976 size_t len_in, 1977 uint8_t *payload_out, 1978 size_t *len_out) 1979 { 1980 struct { 1981 uint8_t media_operation_class; 1982 uint8_t media_operation_subclass; 1983 uint8_t rsvd[2]; 1984 uint32_t dpa_range_count; 1985 struct { 1986 uint16_t start_index; 1987 uint16_t num_ops; 1988 } discovery_osa; 1989 } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in; 1990 struct media_op_discovery_out_pl *media_out_pl = 1991 (struct media_op_discovery_out_pl *)payload_out; 1992 int num_ops, start_index, i; 1993 int count = 0; 1994 1995 if (len_in < sizeof(*media_op_in_disc_pl)) { 1996 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1997 } 1998 1999 num_ops = media_op_in_disc_pl->discovery_osa.num_ops; 2000 start_index = media_op_in_disc_pl->discovery_osa.start_index; 2001 2002 /* 2003 * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and 2004 * start index should not exceed the total number of entries for discovery 2005 * sub class command. 2006 */ 2007 if (media_op_in_disc_pl->dpa_range_count || 2008 start_index > ARRAY_SIZE(media_op_matrix)) { 2009 return CXL_MBOX_INVALID_INPUT; 2010 } 2011 2012 media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE; 2013 media_out_pl->total_supported_operations = 2014 ARRAY_SIZE(media_op_matrix); 2015 if (num_ops > 0) { 2016 for (i = start_index; i < start_index + num_ops; i++) { 2017 media_out_pl->entry[count].media_op_class = 2018 media_op_matrix[i].media_op_class; 2019 media_out_pl->entry[count].media_op_subclass = 2020 media_op_matrix[i].media_op_subclass; 2021 count++; 2022 if (count == num_ops) { 2023 break; 2024 } 2025 } 2026 } 2027 2028 media_out_pl->num_of_supported_operations = count; 2029 *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry); 2030 return CXL_MBOX_SUCCESS; 2031 } 2032 2033 static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d, 2034 uint8_t *payload_in, 2035 size_t len_in, 2036 uint8_t *payload_out, 2037 size_t *len_out, 2038 uint8_t fill_value, 2039 CXLCCI *cci) 2040 { 2041 struct media_operations_sanitize { 2042 uint8_t media_operation_class; 2043 uint8_t media_operation_subclass; 2044 uint8_t rsvd[2]; 2045 uint32_t dpa_range_count; 2046 struct dpa_range_list_entry dpa_range_list[]; 2047 } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in; 2048 uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count; 2049 uint64_t total_mem = 0; 2050 size_t dpa_range_list_size; 2051 int secs = 0, i; 2052 2053 if (dpa_range_count == 0) { 2054 return CXL_MBOX_SUCCESS; 2055 } 2056 2057 dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry); 2058 if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) { 2059 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2060 } 2061 2062 for (i = 0; i < dpa_range_count; i++) { 2063 uint64_t start_dpa = 2064 media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa; 2065 uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length; 2066 2067 if (validate_dpa_addr(ct3d, start_dpa, length)) { 2068 return CXL_MBOX_INVALID_INPUT; 2069 } 2070 total_mem += length; 2071 } 2072 ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) + 2073 dpa_range_list_size); 2074 2075 ct3d->media_op_sanitize->dpa_range_count = dpa_range_count; 2076 ct3d->media_op_sanitize->fill_value = fill_value; 2077 memcpy(ct3d->media_op_sanitize->dpa_range_list, 2078 media_op_in_sanitize_pl->dpa_range_list, 2079 dpa_range_list_size); 2080 secs = get_sanitize_duration(total_mem >> 20); 2081 2082 /* EBUSY other bg cmds as of now */ 2083 cci->bg.runtime = secs * 1000UL; 2084 *len_out = 0; 2085 /* 2086 * media op sanitize is targeted so no need to disable media or 2087 * clear event logs 2088 */ 2089 return CXL_MBOX_BG_STARTED; 2090 } 2091 2092 static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd, 2093 uint8_t *payload_in, 2094 size_t len_in, 2095 uint8_t *payload_out, 2096 size_t *len_out, 2097 CXLCCI *cci) 2098 { 2099 struct { 2100 uint8_t media_operation_class; 2101 uint8_t media_operation_subclass; 2102 uint8_t rsvd[2]; 2103 uint32_t dpa_range_count; 2104 } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in; 2105 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2106 uint8_t media_op_cl = 0; 2107 uint8_t media_op_subclass = 0; 2108 2109 if (len_in < sizeof(*media_op_in_common_pl)) { 2110 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2111 } 2112 2113 media_op_cl = media_op_in_common_pl->media_operation_class; 2114 media_op_subclass = media_op_in_common_pl->media_operation_subclass; 2115 2116 switch (media_op_cl) { 2117 case MEDIA_OP_CLASS_GENERAL: 2118 if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) { 2119 return CXL_MBOX_UNSUPPORTED; 2120 } 2121 2122 return media_operations_discovery(payload_in, len_in, payload_out, 2123 len_out); 2124 case MEDIA_OP_CLASS_SANITIZE: 2125 switch (media_op_subclass) { 2126 case MEDIA_OP_SAN_SUBC_SANITIZE: 2127 return media_operations_sanitize(ct3d, payload_in, len_in, 2128 payload_out, len_out, 0xF, 2129 cci); 2130 case MEDIA_OP_SAN_SUBC_ZERO: 2131 return media_operations_sanitize(ct3d, payload_in, len_in, 2132 payload_out, len_out, 0, 2133 cci); 2134 default: 2135 return CXL_MBOX_UNSUPPORTED; 2136 } 2137 default: 2138 return CXL_MBOX_UNSUPPORTED; 2139 } 2140 } 2141 2142 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 2143 uint8_t *payload_in, 2144 size_t len_in, 2145 uint8_t *payload_out, 2146 size_t *len_out, 2147 CXLCCI *cci) 2148 { 2149 uint32_t *state = (uint32_t *)payload_out; 2150 2151 *state = 0; 2152 *len_out = 4; 2153 return CXL_MBOX_SUCCESS; 2154 } 2155 2156 /* 2157 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 2158 * 2159 * This is very inefficient, but good enough for now! 2160 * Also the payload will always fit, so no need to handle the MORE flag and 2161 * make this stateful. We may want to allow longer poison lists to aid 2162 * testing that kernel functionality. 2163 */ 2164 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 2165 uint8_t *payload_in, 2166 size_t len_in, 2167 uint8_t *payload_out, 2168 size_t *len_out, 2169 CXLCCI *cci) 2170 { 2171 struct get_poison_list_pl { 2172 uint64_t pa; 2173 uint64_t length; 2174 } QEMU_PACKED; 2175 2176 struct get_poison_list_out_pl { 2177 uint8_t flags; 2178 uint8_t rsvd1; 2179 uint64_t overflow_timestamp; 2180 uint16_t count; 2181 uint8_t rsvd2[0x14]; 2182 struct { 2183 uint64_t addr; 2184 uint32_t length; 2185 uint32_t resv; 2186 } QEMU_PACKED records[]; 2187 } QEMU_PACKED; 2188 2189 struct get_poison_list_pl *in = (void *)payload_in; 2190 struct get_poison_list_out_pl *out = (void *)payload_out; 2191 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2192 uint16_t record_count = 0, i = 0; 2193 uint64_t query_start, query_length; 2194 CXLPoisonList *poison_list = &ct3d->poison_list; 2195 CXLPoison *ent; 2196 uint16_t out_pl_len; 2197 2198 query_start = ldq_le_p(&in->pa); 2199 /* 64 byte alignment required */ 2200 if (query_start & 0x3f) { 2201 return CXL_MBOX_INVALID_INPUT; 2202 } 2203 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2204 2205 QLIST_FOREACH(ent, poison_list, node) { 2206 /* Check for no overlap */ 2207 if (!ranges_overlap(ent->start, ent->length, 2208 query_start, query_length)) { 2209 continue; 2210 } 2211 record_count++; 2212 } 2213 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2214 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2215 2216 QLIST_FOREACH(ent, poison_list, node) { 2217 uint64_t start, stop; 2218 2219 /* Check for no overlap */ 2220 if (!ranges_overlap(ent->start, ent->length, 2221 query_start, query_length)) { 2222 continue; 2223 } 2224 2225 /* Deal with overlap */ 2226 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 2227 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 2228 query_start + query_length); 2229 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 2230 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2231 i++; 2232 } 2233 if (ct3d->poison_list_overflowed) { 2234 out->flags = (1 << 1); 2235 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 2236 } 2237 if (scan_media_running(cci)) { 2238 out->flags |= (1 << 2); 2239 } 2240 2241 stw_le_p(&out->count, record_count); 2242 *len_out = out_pl_len; 2243 return CXL_MBOX_SUCCESS; 2244 } 2245 2246 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 2247 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 2248 uint8_t *payload_in, 2249 size_t len_in, 2250 uint8_t *payload_out, 2251 size_t *len_out, 2252 CXLCCI *cci) 2253 { 2254 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2255 CXLPoisonList *poison_list = &ct3d->poison_list; 2256 CXLPoison *ent; 2257 struct inject_poison_pl { 2258 uint64_t dpa; 2259 }; 2260 struct inject_poison_pl *in = (void *)payload_in; 2261 uint64_t dpa = ldq_le_p(&in->dpa); 2262 CXLPoison *p; 2263 2264 QLIST_FOREACH(ent, poison_list, node) { 2265 if (dpa >= ent->start && 2266 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 2267 return CXL_MBOX_SUCCESS; 2268 } 2269 } 2270 /* 2271 * Freeze the list if there is an on-going scan media operation. 2272 */ 2273 if (scan_media_running(cci)) { 2274 /* 2275 * XXX: Spec is ambiguous - is this case considered 2276 * a successful return despite not adding to the list? 2277 */ 2278 goto success; 2279 } 2280 2281 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2282 return CXL_MBOX_INJECT_POISON_LIMIT; 2283 } 2284 p = g_new0(CXLPoison, 1); 2285 2286 p->length = CXL_CACHE_LINE_SIZE; 2287 p->start = dpa; 2288 p->type = CXL_POISON_TYPE_INJECTED; 2289 2290 /* 2291 * Possible todo: Merge with existing entry if next to it and if same type 2292 */ 2293 QLIST_INSERT_HEAD(poison_list, p, node); 2294 ct3d->poison_list_cnt++; 2295 success: 2296 *len_out = 0; 2297 2298 return CXL_MBOX_SUCCESS; 2299 } 2300 2301 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 2302 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 2303 uint8_t *payload_in, 2304 size_t len_in, 2305 uint8_t *payload_out, 2306 size_t *len_out, 2307 CXLCCI *cci) 2308 { 2309 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2310 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2311 CXLPoisonList *poison_list = &ct3d->poison_list; 2312 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 2313 struct clear_poison_pl { 2314 uint64_t dpa; 2315 uint8_t data[64]; 2316 }; 2317 CXLPoison *ent; 2318 uint64_t dpa; 2319 2320 struct clear_poison_pl *in = (void *)payload_in; 2321 2322 dpa = ldq_le_p(&in->dpa); 2323 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 2324 ct3d->dc.total_capacity) { 2325 return CXL_MBOX_INVALID_PA; 2326 } 2327 2328 /* Clearing a region with no poison is not an error so always do so */ 2329 if (cvc->set_cacheline) { 2330 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 2331 return CXL_MBOX_INTERNAL_ERROR; 2332 } 2333 } 2334 2335 /* 2336 * Freeze the list if there is an on-going scan media operation. 2337 */ 2338 if (scan_media_running(cci)) { 2339 /* 2340 * XXX: Spec is ambiguous - is this case considered 2341 * a successful return despite not removing from the list? 2342 */ 2343 goto success; 2344 } 2345 2346 QLIST_FOREACH(ent, poison_list, node) { 2347 /* 2348 * Test for contained in entry. Simpler than general case 2349 * as clearing 64 bytes and entries 64 byte aligned 2350 */ 2351 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 2352 break; 2353 } 2354 } 2355 if (!ent) { 2356 goto success; 2357 } 2358 2359 QLIST_REMOVE(ent, node); 2360 ct3d->poison_list_cnt--; 2361 2362 if (dpa > ent->start) { 2363 CXLPoison *frag; 2364 /* Cannot overflow as replacing existing entry */ 2365 2366 frag = g_new0(CXLPoison, 1); 2367 2368 frag->start = ent->start; 2369 frag->length = dpa - ent->start; 2370 frag->type = ent->type; 2371 2372 QLIST_INSERT_HEAD(poison_list, frag, node); 2373 ct3d->poison_list_cnt++; 2374 } 2375 2376 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 2377 CXLPoison *frag; 2378 2379 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2380 cxl_set_poison_list_overflowed(ct3d); 2381 } else { 2382 frag = g_new0(CXLPoison, 1); 2383 2384 frag->start = dpa + CXL_CACHE_LINE_SIZE; 2385 frag->length = ent->start + ent->length - frag->start; 2386 frag->type = ent->type; 2387 QLIST_INSERT_HEAD(poison_list, frag, node); 2388 ct3d->poison_list_cnt++; 2389 } 2390 } 2391 /* Any fragments have been added, free original entry */ 2392 g_free(ent); 2393 success: 2394 *len_out = 0; 2395 2396 return CXL_MBOX_SUCCESS; 2397 } 2398 2399 /* 2400 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 2401 */ 2402 static CXLRetCode 2403 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 2404 uint8_t *payload_in, 2405 size_t len_in, 2406 uint8_t *payload_out, 2407 size_t *len_out, 2408 CXLCCI *cci) 2409 { 2410 struct get_scan_media_capabilities_pl { 2411 uint64_t pa; 2412 uint64_t length; 2413 } QEMU_PACKED; 2414 2415 struct get_scan_media_capabilities_out_pl { 2416 uint32_t estimated_runtime_ms; 2417 }; 2418 2419 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2420 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2421 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 2422 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 2423 uint64_t query_start; 2424 uint64_t query_length; 2425 2426 query_start = ldq_le_p(&in->pa); 2427 /* 64 byte alignment required */ 2428 if (query_start & 0x3f) { 2429 return CXL_MBOX_INVALID_INPUT; 2430 } 2431 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2432 2433 if (query_start + query_length > cxl_dstate->static_mem_size) { 2434 return CXL_MBOX_INVALID_PA; 2435 } 2436 2437 /* 2438 * Just use 400 nanosecond access/read latency + 100 ns for 2439 * the cost of updating the poison list. For small enough 2440 * chunks return at least 1 ms. 2441 */ 2442 stl_le_p(&out->estimated_runtime_ms, 2443 MAX(1, query_length * (0.0005L / 64))); 2444 2445 *len_out = sizeof(*out); 2446 return CXL_MBOX_SUCCESS; 2447 } 2448 2449 static void __do_scan_media(CXLType3Dev *ct3d) 2450 { 2451 CXLPoison *ent; 2452 unsigned int results_cnt = 0; 2453 2454 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 2455 results_cnt++; 2456 } 2457 2458 /* only scan media may clear the overflow */ 2459 if (ct3d->poison_list_overflowed && 2460 ct3d->poison_list_cnt == results_cnt) { 2461 cxl_clear_poison_list_overflowed(ct3d); 2462 } 2463 /* scan media has run since last conventional reset */ 2464 ct3d->scan_media_hasrun = true; 2465 } 2466 2467 /* 2468 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 2469 */ 2470 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 2471 uint8_t *payload_in, 2472 size_t len_in, 2473 uint8_t *payload_out, 2474 size_t *len_out, 2475 CXLCCI *cci) 2476 { 2477 struct scan_media_pl { 2478 uint64_t pa; 2479 uint64_t length; 2480 uint8_t flags; 2481 } QEMU_PACKED; 2482 2483 struct scan_media_pl *in = (void *)payload_in; 2484 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2485 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2486 uint64_t query_start; 2487 uint64_t query_length; 2488 CXLPoison *ent, *next; 2489 2490 query_start = ldq_le_p(&in->pa); 2491 /* 64 byte alignment required */ 2492 if (query_start & 0x3f) { 2493 return CXL_MBOX_INVALID_INPUT; 2494 } 2495 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2496 2497 if (query_start + query_length > cxl_dstate->static_mem_size) { 2498 return CXL_MBOX_INVALID_PA; 2499 } 2500 if (ct3d->dc.num_regions && query_start + query_length >= 2501 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 2502 return CXL_MBOX_INVALID_PA; 2503 } 2504 2505 if (in->flags == 0) { /* TODO */ 2506 qemu_log_mask(LOG_UNIMP, 2507 "Scan Media Event Log is unsupported\n"); 2508 } 2509 2510 /* any previous results are discarded upon a new Scan Media */ 2511 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 2512 QLIST_REMOVE(ent, node); 2513 g_free(ent); 2514 } 2515 2516 /* kill the poison list - it will be recreated */ 2517 if (ct3d->poison_list_overflowed) { 2518 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 2519 QLIST_REMOVE(ent, node); 2520 g_free(ent); 2521 ct3d->poison_list_cnt--; 2522 } 2523 } 2524 2525 /* 2526 * Scan the backup list and move corresponding entries 2527 * into the results list, updating the poison list 2528 * when possible. 2529 */ 2530 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 2531 CXLPoison *res; 2532 2533 if (ent->start >= query_start + query_length || 2534 ent->start + ent->length <= query_start) { 2535 continue; 2536 } 2537 2538 /* 2539 * If a Get Poison List cmd comes in while this 2540 * scan is being done, it will see the new complete 2541 * list, while setting the respective flag. 2542 */ 2543 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 2544 CXLPoison *p = g_new0(CXLPoison, 1); 2545 2546 p->start = ent->start; 2547 p->length = ent->length; 2548 p->type = ent->type; 2549 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 2550 ct3d->poison_list_cnt++; 2551 } 2552 2553 res = g_new0(CXLPoison, 1); 2554 res->start = ent->start; 2555 res->length = ent->length; 2556 res->type = ent->type; 2557 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2558 2559 QLIST_REMOVE(ent, node); 2560 g_free(ent); 2561 } 2562 2563 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2564 *len_out = 0; 2565 2566 return CXL_MBOX_BG_STARTED; 2567 } 2568 2569 /* 2570 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2571 */ 2572 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2573 uint8_t *payload_in, 2574 size_t len_in, 2575 uint8_t *payload_out, 2576 size_t *len_out, 2577 CXLCCI *cci) 2578 { 2579 struct get_scan_media_results_out_pl { 2580 uint64_t dpa_restart; 2581 uint64_t length; 2582 uint8_t flags; 2583 uint8_t rsvd1; 2584 uint16_t count; 2585 uint8_t rsvd2[0xc]; 2586 struct { 2587 uint64_t addr; 2588 uint32_t length; 2589 uint32_t resv; 2590 } QEMU_PACKED records[]; 2591 } QEMU_PACKED; 2592 2593 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2594 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2595 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2596 CXLPoison *ent, *next; 2597 uint16_t total_count = 0, record_count = 0, i = 0; 2598 uint16_t out_pl_len; 2599 2600 if (!ct3d->scan_media_hasrun) { 2601 return CXL_MBOX_UNSUPPORTED; 2602 } 2603 2604 /* 2605 * Calculate limits, all entries are within the same address range of the 2606 * last scan media call. 2607 */ 2608 QLIST_FOREACH(ent, scan_media_results, node) { 2609 size_t rec_size = record_count * sizeof(out->records[0]); 2610 2611 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2612 record_count++; 2613 } 2614 total_count++; 2615 } 2616 2617 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2618 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2619 2620 memset(out, 0, out_pl_len); 2621 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2622 uint64_t start, stop; 2623 2624 if (i == record_count) { 2625 break; 2626 } 2627 2628 start = ROUND_DOWN(ent->start, 64ull); 2629 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2630 stq_le_p(&out->records[i].addr, start); 2631 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2632 i++; 2633 2634 /* consume the returning entry */ 2635 QLIST_REMOVE(ent, node); 2636 g_free(ent); 2637 } 2638 2639 stw_le_p(&out->count, record_count); 2640 if (total_count > record_count) { 2641 out->flags = (1 << 0); /* More Media Error Records */ 2642 } 2643 2644 *len_out = out_pl_len; 2645 return CXL_MBOX_SUCCESS; 2646 } 2647 2648 /* 2649 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2650 * (Opcode: 4800h) 2651 */ 2652 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2653 uint8_t *payload_in, 2654 size_t len_in, 2655 uint8_t *payload_out, 2656 size_t *len_out, 2657 CXLCCI *cci) 2658 { 2659 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2660 struct { 2661 uint8_t region_cnt; 2662 uint8_t start_rid; 2663 } QEMU_PACKED *in = (void *)payload_in; 2664 struct { 2665 uint8_t num_regions; 2666 uint8_t regions_returned; 2667 uint8_t rsvd1[6]; 2668 struct { 2669 uint64_t base; 2670 uint64_t decode_len; 2671 uint64_t region_len; 2672 uint64_t block_size; 2673 uint32_t dsmadhandle; 2674 uint8_t flags; 2675 uint8_t rsvd2[3]; 2676 } QEMU_PACKED records[]; 2677 } QEMU_PACKED *out = (void *)payload_out; 2678 struct { 2679 uint32_t num_extents_supported; 2680 uint32_t num_extents_available; 2681 uint32_t num_tags_supported; 2682 uint32_t num_tags_available; 2683 } QEMU_PACKED *extra_out; 2684 uint16_t record_count; 2685 uint16_t i; 2686 uint16_t out_pl_len; 2687 uint8_t start_rid; 2688 2689 start_rid = in->start_rid; 2690 if (start_rid >= ct3d->dc.num_regions) { 2691 return CXL_MBOX_INVALID_INPUT; 2692 } 2693 2694 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2695 2696 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2697 extra_out = (void *)(payload_out + out_pl_len); 2698 out_pl_len += sizeof(*extra_out); 2699 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2700 2701 out->num_regions = ct3d->dc.num_regions; 2702 out->regions_returned = record_count; 2703 for (i = 0; i < record_count; i++) { 2704 stq_le_p(&out->records[i].base, 2705 ct3d->dc.regions[start_rid + i].base); 2706 stq_le_p(&out->records[i].decode_len, 2707 ct3d->dc.regions[start_rid + i].decode_len / 2708 CXL_CAPACITY_MULTIPLIER); 2709 stq_le_p(&out->records[i].region_len, 2710 ct3d->dc.regions[start_rid + i].len); 2711 stq_le_p(&out->records[i].block_size, 2712 ct3d->dc.regions[start_rid + i].block_size); 2713 stl_le_p(&out->records[i].dsmadhandle, 2714 ct3d->dc.regions[start_rid + i].dsmadhandle); 2715 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2716 } 2717 /* 2718 * TODO: Assign values once extents and tags are introduced 2719 * to use. 2720 */ 2721 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2722 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2723 ct3d->dc.total_extent_count); 2724 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2725 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2726 2727 *len_out = out_pl_len; 2728 return CXL_MBOX_SUCCESS; 2729 } 2730 2731 /* 2732 * CXL r3.1 section 8.2.9.9.9.2: 2733 * Get Dynamic Capacity Extent List (Opcode 4801h) 2734 */ 2735 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2736 uint8_t *payload_in, 2737 size_t len_in, 2738 uint8_t *payload_out, 2739 size_t *len_out, 2740 CXLCCI *cci) 2741 { 2742 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2743 struct { 2744 uint32_t extent_cnt; 2745 uint32_t start_extent_id; 2746 } QEMU_PACKED *in = (void *)payload_in; 2747 struct { 2748 uint32_t count; 2749 uint32_t total_extents; 2750 uint32_t generation_num; 2751 uint8_t rsvd[4]; 2752 CXLDCExtentRaw records[]; 2753 } QEMU_PACKED *out = (void *)payload_out; 2754 uint32_t start_extent_id = in->start_extent_id; 2755 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2756 uint16_t record_count = 0, i = 0, record_done = 0; 2757 uint16_t out_pl_len, size; 2758 CXLDCExtent *ent; 2759 2760 if (start_extent_id > ct3d->dc.nr_extents_accepted) { 2761 return CXL_MBOX_INVALID_INPUT; 2762 } 2763 2764 record_count = MIN(in->extent_cnt, 2765 ct3d->dc.total_extent_count - start_extent_id); 2766 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2767 record_count = MIN(record_count, size / sizeof(out->records[0])); 2768 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2769 2770 stl_le_p(&out->count, record_count); 2771 stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); 2772 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2773 2774 if (record_count > 0) { 2775 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2776 2777 QTAILQ_FOREACH(ent, extent_list, node) { 2778 if (i++ < start_extent_id) { 2779 continue; 2780 } 2781 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2782 stq_le_p(&out_rec->len, ent->len); 2783 memcpy(&out_rec->tag, ent->tag, 0x10); 2784 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2785 2786 record_done++; 2787 out_rec++; 2788 if (record_done == record_count) { 2789 break; 2790 } 2791 } 2792 } 2793 2794 *len_out = out_pl_len; 2795 return CXL_MBOX_SUCCESS; 2796 } 2797 2798 /* 2799 * Check whether any bit between addr[nr, nr+size) is set, 2800 * return true if any bit is set, otherwise return false 2801 */ 2802 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2803 unsigned long size) 2804 { 2805 unsigned long res = find_next_bit(addr, size + nr, nr); 2806 2807 return res < nr + size; 2808 } 2809 2810 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2811 { 2812 int i; 2813 CXLDCRegion *region = &ct3d->dc.regions[0]; 2814 2815 if (dpa < region->base || 2816 dpa >= region->base + ct3d->dc.total_capacity) { 2817 return NULL; 2818 } 2819 2820 /* 2821 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2822 * 2823 * Regions are used in increasing-DPA order, with Region 0 being used for 2824 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2825 * So check from the last region to find where the dpa belongs. Extents that 2826 * cross multiple regions are not allowed. 2827 */ 2828 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2829 region = &ct3d->dc.regions[i]; 2830 if (dpa >= region->base) { 2831 if (dpa + len > region->base + region->len) { 2832 return NULL; 2833 } 2834 return region; 2835 } 2836 } 2837 2838 return NULL; 2839 } 2840 2841 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2842 uint64_t dpa, 2843 uint64_t len, 2844 uint8_t *tag, 2845 uint16_t shared_seq) 2846 { 2847 CXLDCExtent *extent; 2848 2849 extent = g_new0(CXLDCExtent, 1); 2850 extent->start_dpa = dpa; 2851 extent->len = len; 2852 if (tag) { 2853 memcpy(extent->tag, tag, 0x10); 2854 } 2855 extent->shared_seq = shared_seq; 2856 2857 QTAILQ_INSERT_TAIL(list, extent, node); 2858 } 2859 2860 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2861 CXLDCExtent *extent) 2862 { 2863 QTAILQ_REMOVE(list, extent, node); 2864 g_free(extent); 2865 } 2866 2867 /* 2868 * Add a new extent to the extent "group" if group exists; 2869 * otherwise, create a new group 2870 * Return value: the extent group where the extent is inserted. 2871 */ 2872 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2873 uint64_t dpa, 2874 uint64_t len, 2875 uint8_t *tag, 2876 uint16_t shared_seq) 2877 { 2878 if (!group) { 2879 group = g_new0(CXLDCExtentGroup, 1); 2880 QTAILQ_INIT(&group->list); 2881 } 2882 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2883 tag, shared_seq); 2884 return group; 2885 } 2886 2887 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2888 CXLDCExtentGroup *group) 2889 { 2890 QTAILQ_INSERT_TAIL(list, group, node); 2891 } 2892 2893 uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2894 { 2895 CXLDCExtent *ent, *ent_next; 2896 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2897 uint32_t extents_deleted = 0; 2898 2899 QTAILQ_REMOVE(list, group, node); 2900 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2901 cxl_remove_extent_from_extent_list(&group->list, ent); 2902 extents_deleted++; 2903 } 2904 g_free(group); 2905 2906 return extents_deleted; 2907 } 2908 2909 /* 2910 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2911 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2912 */ 2913 typedef struct CXLUpdateDCExtentListInPl { 2914 uint32_t num_entries_updated; 2915 uint8_t flags; 2916 uint8_t rsvd[3]; 2917 /* CXL r3.1 Table 8-169: Updated Extent */ 2918 struct { 2919 uint64_t start_dpa; 2920 uint64_t len; 2921 uint8_t rsvd[8]; 2922 } QEMU_PACKED updated_entries[]; 2923 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2924 2925 /* 2926 * For the extents in the extent list to operate, check whether they are valid 2927 * 1. The extent should be in the range of a valid DC region; 2928 * 2. The extent should not cross multiple regions; 2929 * 3. The start DPA and the length of the extent should align with the block 2930 * size of the region; 2931 * 4. The address range of multiple extents in the list should not overlap. 2932 */ 2933 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2934 const CXLUpdateDCExtentListInPl *in) 2935 { 2936 uint64_t min_block_size = UINT64_MAX; 2937 CXLDCRegion *region; 2938 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2939 g_autofree unsigned long *blk_bitmap = NULL; 2940 uint64_t dpa, len; 2941 uint32_t i; 2942 2943 for (i = 0; i < ct3d->dc.num_regions; i++) { 2944 region = &ct3d->dc.regions[i]; 2945 min_block_size = MIN(min_block_size, region->block_size); 2946 } 2947 2948 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2949 ct3d->dc.regions[0].base) / min_block_size); 2950 2951 for (i = 0; i < in->num_entries_updated; i++) { 2952 dpa = in->updated_entries[i].start_dpa; 2953 len = in->updated_entries[i].len; 2954 2955 region = cxl_find_dc_region(ct3d, dpa, len); 2956 if (!region) { 2957 return CXL_MBOX_INVALID_PA; 2958 } 2959 2960 dpa -= ct3d->dc.regions[0].base; 2961 if (dpa % region->block_size || len % region->block_size) { 2962 return CXL_MBOX_INVALID_EXTENT_LIST; 2963 } 2964 /* the dpa range already covered by some other extents in the list */ 2965 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2966 len / min_block_size)) { 2967 return CXL_MBOX_INVALID_EXTENT_LIST; 2968 } 2969 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2970 } 2971 2972 return CXL_MBOX_SUCCESS; 2973 } 2974 2975 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2976 const CXLUpdateDCExtentListInPl *in) 2977 { 2978 uint32_t i; 2979 CXLDCExtent *ent; 2980 CXLDCExtentGroup *ext_group; 2981 uint64_t dpa, len; 2982 Range range1, range2; 2983 2984 for (i = 0; i < in->num_entries_updated; i++) { 2985 dpa = in->updated_entries[i].start_dpa; 2986 len = in->updated_entries[i].len; 2987 2988 range_init_nofail(&range1, dpa, len); 2989 2990 /* 2991 * The host-accepted DPA range must be contained by the first extent 2992 * group in the pending list 2993 */ 2994 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2995 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2996 return CXL_MBOX_INVALID_PA; 2997 } 2998 2999 /* to-be-added range should not overlap with range already accepted */ 3000 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 3001 range_init_nofail(&range2, ent->start_dpa, ent->len); 3002 if (range_overlaps_range(&range1, &range2)) { 3003 return CXL_MBOX_INVALID_PA; 3004 } 3005 } 3006 } 3007 return CXL_MBOX_SUCCESS; 3008 } 3009 3010 /* 3011 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 3012 * An extent is added to the extent list and becomes usable only after the 3013 * response is processed successfully. 3014 */ 3015 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 3016 uint8_t *payload_in, 3017 size_t len_in, 3018 uint8_t *payload_out, 3019 size_t *len_out, 3020 CXLCCI *cci) 3021 { 3022 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3023 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3024 CXLDCExtentList *extent_list = &ct3d->dc.extents; 3025 uint32_t i, num; 3026 uint64_t dpa, len; 3027 CXLRetCode ret; 3028 3029 if (len_in < sizeof(*in)) { 3030 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3031 } 3032 3033 if (in->num_entries_updated == 0) { 3034 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3035 ct3d->dc.total_extent_count -= num; 3036 return CXL_MBOX_SUCCESS; 3037 } 3038 3039 if (len_in < 3040 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3041 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3042 } 3043 3044 /* Adding extents causes exceeding device's extent tracking ability. */ 3045 if (in->num_entries_updated + ct3d->dc.total_extent_count > 3046 CXL_NUM_EXTENTS_SUPPORTED) { 3047 return CXL_MBOX_RESOURCES_EXHAUSTED; 3048 } 3049 3050 ret = cxl_detect_malformed_extent_list(ct3d, in); 3051 if (ret != CXL_MBOX_SUCCESS) { 3052 return ret; 3053 } 3054 3055 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 3056 if (ret != CXL_MBOX_SUCCESS) { 3057 return ret; 3058 } 3059 3060 for (i = 0; i < in->num_entries_updated; i++) { 3061 dpa = in->updated_entries[i].start_dpa; 3062 len = in->updated_entries[i].len; 3063 3064 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 3065 ct3d->dc.total_extent_count += 1; 3066 ct3d->dc.nr_extents_accepted += 1; 3067 ct3_set_region_block_backed(ct3d, dpa, len); 3068 } 3069 /* Remove the first extent group in the pending list */ 3070 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3071 ct3d->dc.total_extent_count -= num; 3072 3073 return CXL_MBOX_SUCCESS; 3074 } 3075 3076 /* 3077 * Copy extent list from src to dst 3078 * Return value: number of extents copied 3079 */ 3080 static uint32_t copy_extent_list(CXLDCExtentList *dst, 3081 const CXLDCExtentList *src) 3082 { 3083 uint32_t cnt = 0; 3084 CXLDCExtent *ent; 3085 3086 if (!dst || !src) { 3087 return 0; 3088 } 3089 3090 QTAILQ_FOREACH(ent, src, node) { 3091 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 3092 ent->tag, ent->shared_seq); 3093 cnt++; 3094 } 3095 return cnt; 3096 } 3097 3098 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 3099 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 3100 uint32_t *updated_list_size) 3101 { 3102 CXLDCExtent *ent, *ent_next; 3103 uint64_t dpa, len; 3104 uint32_t i; 3105 int cnt_delta = 0; 3106 CXLRetCode ret = CXL_MBOX_SUCCESS; 3107 3108 QTAILQ_INIT(updated_list); 3109 copy_extent_list(updated_list, &ct3d->dc.extents); 3110 3111 for (i = 0; i < in->num_entries_updated; i++) { 3112 Range range; 3113 3114 dpa = in->updated_entries[i].start_dpa; 3115 len = in->updated_entries[i].len; 3116 3117 /* Check if the DPA range is not fully backed with valid extents */ 3118 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 3119 ret = CXL_MBOX_INVALID_PA; 3120 goto free_and_exit; 3121 } 3122 3123 /* After this point, extent overflow is the only error can happen */ 3124 while (len > 0) { 3125 QTAILQ_FOREACH(ent, updated_list, node) { 3126 range_init_nofail(&range, ent->start_dpa, ent->len); 3127 3128 if (range_contains(&range, dpa)) { 3129 uint64_t len1, len2 = 0, len_done = 0; 3130 uint64_t ent_start_dpa = ent->start_dpa; 3131 uint64_t ent_len = ent->len; 3132 3133 len1 = dpa - ent->start_dpa; 3134 /* Found the extent or the subset of an existing extent */ 3135 if (range_contains(&range, dpa + len - 1)) { 3136 len2 = ent_start_dpa + ent_len - dpa - len; 3137 } else { 3138 dpa = ent_start_dpa + ent_len; 3139 } 3140 len_done = ent_len - len1 - len2; 3141 3142 cxl_remove_extent_from_extent_list(updated_list, ent); 3143 cnt_delta--; 3144 3145 if (len1) { 3146 cxl_insert_extent_to_extent_list(updated_list, 3147 ent_start_dpa, 3148 len1, NULL, 0); 3149 cnt_delta++; 3150 } 3151 if (len2) { 3152 cxl_insert_extent_to_extent_list(updated_list, 3153 dpa + len, 3154 len2, NULL, 0); 3155 cnt_delta++; 3156 } 3157 3158 if (cnt_delta + ct3d->dc.total_extent_count > 3159 CXL_NUM_EXTENTS_SUPPORTED) { 3160 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 3161 goto free_and_exit; 3162 } 3163 3164 len -= len_done; 3165 break; 3166 } 3167 } 3168 } 3169 } 3170 free_and_exit: 3171 if (ret != CXL_MBOX_SUCCESS) { 3172 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 3173 cxl_remove_extent_from_extent_list(updated_list, ent); 3174 } 3175 *updated_list_size = 0; 3176 } else { 3177 *updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta; 3178 } 3179 3180 return ret; 3181 } 3182 3183 /* 3184 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 3185 */ 3186 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 3187 uint8_t *payload_in, 3188 size_t len_in, 3189 uint8_t *payload_out, 3190 size_t *len_out, 3191 CXLCCI *cci) 3192 { 3193 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3194 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3195 CXLDCExtentList updated_list; 3196 CXLDCExtent *ent, *ent_next; 3197 uint32_t updated_list_size; 3198 CXLRetCode ret; 3199 3200 if (len_in < sizeof(*in)) { 3201 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3202 } 3203 3204 if (in->num_entries_updated == 0) { 3205 return CXL_MBOX_INVALID_INPUT; 3206 } 3207 3208 if (len_in < 3209 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3210 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3211 } 3212 3213 ret = cxl_detect_malformed_extent_list(ct3d, in); 3214 if (ret != CXL_MBOX_SUCCESS) { 3215 return ret; 3216 } 3217 3218 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 3219 &updated_list_size); 3220 if (ret != CXL_MBOX_SUCCESS) { 3221 return ret; 3222 } 3223 3224 /* 3225 * If the dry run release passes, the returned updated_list will 3226 * be the updated extent list and we just need to clear the extents 3227 * in the accepted list and copy extents in the updated_list to accepted 3228 * list and update the extent count; 3229 */ 3230 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 3231 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 3232 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 3233 } 3234 copy_extent_list(&ct3d->dc.extents, &updated_list); 3235 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 3236 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 3237 cxl_remove_extent_from_extent_list(&updated_list, ent); 3238 } 3239 ct3d->dc.total_extent_count += (updated_list_size - 3240 ct3d->dc.nr_extents_accepted); 3241 3242 ct3d->dc.nr_extents_accepted = updated_list_size; 3243 3244 return CXL_MBOX_SUCCESS; 3245 } 3246 3247 /* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */ 3248 static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd, 3249 uint8_t *payload_in, 3250 size_t len_in, 3251 uint8_t *payload_out, 3252 size_t *len_out, 3253 CXLCCI *cci) 3254 { 3255 struct { 3256 uint8_t num_hosts; 3257 uint8_t num_regions_supported; 3258 uint8_t rsvd1[2]; 3259 uint16_t supported_add_sel_policy_bitmask; 3260 uint8_t rsvd2[2]; 3261 uint16_t supported_removal_policy_bitmask; 3262 uint8_t sanitize_on_release_bitmask; 3263 uint8_t rsvd3; 3264 uint64_t total_dynamic_capacity; 3265 uint64_t region_blk_size_bitmasks[8]; 3266 } QEMU_PACKED *out = (void *)payload_out; 3267 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3268 CXLDCRegion *region; 3269 int i; 3270 3271 out->num_hosts = 1; 3272 out->num_regions_supported = ct3d->dc.num_regions; 3273 stw_le_p(&out->supported_add_sel_policy_bitmask, 3274 BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE)); 3275 stw_le_p(&out->supported_removal_policy_bitmask, 3276 BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE)); 3277 out->sanitize_on_release_bitmask = 0; 3278 3279 stq_le_p(&out->total_dynamic_capacity, 3280 ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER); 3281 3282 for (i = 0; i < ct3d->dc.num_regions; i++) { 3283 region = &ct3d->dc.regions[i]; 3284 memcpy(&out->region_blk_size_bitmasks[i], 3285 ®ion->supported_blk_size_bitmask, 3286 sizeof(out->region_blk_size_bitmasks[i])); 3287 } 3288 3289 *len_out = sizeof(*out); 3290 return CXL_MBOX_SUCCESS; 3291 } 3292 3293 static void build_dsmas_flags(uint8_t *flags, CXLDCRegion *region) 3294 { 3295 *flags = 0; 3296 3297 if (region->nonvolatile) { 3298 *flags |= BIT(CXL_DSMAS_FLAGS_NONVOLATILE); 3299 } 3300 if (region->sharable) { 3301 *flags |= BIT(CXL_DSMAS_FLAGS_SHARABLE); 3302 } 3303 if (region->hw_managed_coherency) { 3304 *flags |= BIT(CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY); 3305 } 3306 if (region->ic_specific_dc_management) { 3307 *flags |= BIT(CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT); 3308 } 3309 if (region->rdonly) { 3310 *flags |= BIT(CXL_DSMAS_FLAGS_RDONLY); 3311 } 3312 } 3313 3314 /* 3315 * CXL r3.2 section 7.6.7.6.2: 3316 * Get Host DC Region Configuration (Opcode 5601h) 3317 */ 3318 static CXLRetCode cmd_fm_get_host_dc_region_config(const struct cxl_cmd *cmd, 3319 uint8_t *payload_in, 3320 size_t len_in, 3321 uint8_t *payload_out, 3322 size_t *len_out, 3323 CXLCCI *cci) 3324 { 3325 struct { 3326 uint16_t host_id; 3327 uint8_t region_cnt; 3328 uint8_t start_rid; 3329 } QEMU_PACKED *in = (void *)payload_in; 3330 struct { 3331 uint16_t host_id; 3332 uint8_t num_regions; 3333 uint8_t regions_returned; 3334 struct { 3335 uint64_t base; 3336 uint64_t decode_len; 3337 uint64_t region_len; 3338 uint64_t block_size; 3339 uint8_t flags; 3340 uint8_t rsvd1[3]; 3341 uint8_t sanitize; 3342 uint8_t rsvd2[3]; 3343 } QEMU_PACKED records[]; 3344 } QEMU_PACKED *out = (void *)payload_out; 3345 struct { 3346 uint32_t num_extents_supported; 3347 uint32_t num_extents_available; 3348 uint32_t num_tags_supported; 3349 uint32_t num_tags_available; 3350 } QEMU_PACKED *extra_out; 3351 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3352 uint16_t record_count, out_pl_len, i; 3353 3354 if (in->start_rid >= ct3d->dc.num_regions) { 3355 return CXL_MBOX_INVALID_INPUT; 3356 } 3357 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 3358 3359 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 3360 extra_out = (void *)out + out_pl_len; 3361 out_pl_len += sizeof(*extra_out); 3362 3363 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 3364 3365 stw_le_p(&out->host_id, 0); 3366 out->num_regions = ct3d->dc.num_regions; 3367 out->regions_returned = record_count; 3368 3369 for (i = 0; i < record_count; i++) { 3370 stq_le_p(&out->records[i].base, 3371 ct3d->dc.regions[in->start_rid + i].base); 3372 stq_le_p(&out->records[i].decode_len, 3373 ct3d->dc.regions[in->start_rid + i].decode_len / 3374 CXL_CAPACITY_MULTIPLIER); 3375 stq_le_p(&out->records[i].region_len, 3376 ct3d->dc.regions[in->start_rid + i].len); 3377 stq_le_p(&out->records[i].block_size, 3378 ct3d->dc.regions[in->start_rid + i].block_size); 3379 build_dsmas_flags(&out->records[i].flags, 3380 &ct3d->dc.regions[in->start_rid + i]); 3381 /* Sanitize is bit 0 of flags. */ 3382 out->records[i].sanitize = 3383 ct3d->dc.regions[in->start_rid + i].flags & BIT(0); 3384 } 3385 3386 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 3387 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 3388 ct3d->dc.total_extent_count); 3389 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 3390 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 3391 3392 *len_out = out_pl_len; 3393 return CXL_MBOX_SUCCESS; 3394 } 3395 3396 /* CXL r3.2 section 7.6.7.6.3: Set Host DC Region Configuration (Opcode 5602) */ 3397 static CXLRetCode cmd_fm_set_dc_region_config(const struct cxl_cmd *cmd, 3398 uint8_t *payload_in, 3399 size_t len_in, 3400 uint8_t *payload_out, 3401 size_t *len_out, 3402 CXLCCI *cci) 3403 { 3404 struct { 3405 uint8_t reg_id; 3406 uint8_t rsvd[3]; 3407 uint64_t block_sz; 3408 uint8_t flags; 3409 uint8_t rsvd2[3]; 3410 } QEMU_PACKED *in = (void *)payload_in; 3411 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3412 CXLEventDynamicCapacity dcEvent = {}; 3413 CXLDCRegion *region = &ct3d->dc.regions[in->reg_id]; 3414 3415 /* 3416 * CXL r3.2 7.6.7.6.3: Set DC Region Configuration 3417 * This command shall fail with Unsupported when the Sanitize on Release 3418 * field does not match the region’s configuration... and the device 3419 * does not support reconfiguration of the Sanitize on Release setting. 3420 * 3421 * Currently not reconfigurable, so always fail if sanitize bit (bit 0) 3422 * doesn't match. 3423 */ 3424 if ((in->flags & 0x1) != (region->flags & 0x1)) { 3425 return CXL_MBOX_UNSUPPORTED; 3426 } 3427 3428 if (in->reg_id >= DCD_MAX_NUM_REGION) { 3429 return CXL_MBOX_UNSUPPORTED; 3430 } 3431 3432 /* Check that no extents are in the region being reconfigured */ 3433 if (!bitmap_empty(region->blk_bitmap, region->len / region->block_size)) { 3434 return CXL_MBOX_UNSUPPORTED; 3435 } 3436 3437 /* Check that new block size is supported */ 3438 if (!is_power_of_2(in->block_sz) || 3439 !(in->block_sz & region->supported_blk_size_bitmask)) { 3440 return CXL_MBOX_INVALID_INPUT; 3441 } 3442 3443 /* Return success if new block size == current block size */ 3444 if (in->block_sz == region->block_size) { 3445 return CXL_MBOX_SUCCESS; 3446 } 3447 3448 /* Free bitmap and create new one for new block size. */ 3449 qemu_mutex_lock(®ion->bitmap_lock); 3450 g_free(region->blk_bitmap); 3451 region->blk_bitmap = bitmap_new(region->len / in->block_sz); 3452 qemu_mutex_unlock(®ion->bitmap_lock); 3453 region->block_size = in->block_sz; 3454 3455 /* Create event record and insert into event log */ 3456 cxl_assign_event_header(&dcEvent.hdr, 3457 &dynamic_capacity_uuid, 3458 (1 << CXL_EVENT_TYPE_INFO), 3459 sizeof(dcEvent), 3460 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 3461 dcEvent.type = DC_EVENT_REGION_CONFIG_UPDATED; 3462 dcEvent.validity_flags = 1; 3463 dcEvent.host_id = 0; 3464 dcEvent.updated_region_id = in->reg_id; 3465 3466 if (cxl_event_insert(&ct3d->cxl_dstate, 3467 CXL_EVENT_TYPE_DYNAMIC_CAP, 3468 (CXLEventRecordRaw *)&dcEvent)) { 3469 cxl_event_irq_assert(ct3d); 3470 } 3471 return CXL_MBOX_SUCCESS; 3472 } 3473 3474 /* CXL r3.2 section 7.6.7.6.4: Get DC Region Extent Lists (Opcode 5603h) */ 3475 static CXLRetCode cmd_fm_get_dc_region_extent_list(const struct cxl_cmd *cmd, 3476 uint8_t *payload_in, 3477 size_t len_in, 3478 uint8_t *payload_out, 3479 size_t *len_out, 3480 CXLCCI *cci) 3481 { 3482 struct { 3483 uint16_t host_id; 3484 uint8_t rsvd[2]; 3485 uint32_t extent_cnt; 3486 uint32_t start_extent_id; 3487 } QEMU_PACKED *in = (void *)payload_in; 3488 struct { 3489 uint16_t host_id; 3490 uint8_t rsvd[2]; 3491 uint32_t start_extent_id; 3492 uint32_t extents_returned; 3493 uint32_t total_extents; 3494 uint32_t list_generation_num; 3495 uint8_t rsvd2[4]; 3496 CXLDCExtentRaw records[]; 3497 } QEMU_PACKED *out = (void *)payload_out; 3498 QEMU_BUILD_BUG_ON(sizeof(*in) != 0xc); 3499 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3500 CXLDCExtent *ent; 3501 CXLDCExtentRaw *out_rec; 3502 uint16_t record_count = 0, record_done = 0, i = 0; 3503 uint16_t out_pl_len, max_size; 3504 3505 if (in->host_id != 0) { 3506 return CXL_MBOX_INVALID_INPUT; 3507 } 3508 3509 if (in->start_extent_id > ct3d->dc.nr_extents_accepted) { 3510 return CXL_MBOX_INVALID_INPUT; 3511 } 3512 3513 record_count = MIN(in->extent_cnt, 3514 ct3d->dc.nr_extents_accepted - in->start_extent_id); 3515 max_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 3516 record_count = MIN(record_count, max_size / sizeof(out->records[0])); 3517 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 3518 3519 stw_le_p(&out->host_id, in->host_id); 3520 stl_le_p(&out->start_extent_id, in->start_extent_id); 3521 stl_le_p(&out->extents_returned, record_count); 3522 stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); 3523 stl_le_p(&out->list_generation_num, ct3d->dc.ext_list_gen_seq); 3524 3525 if (record_count > 0) { 3526 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 3527 if (i++ < in->start_extent_id) { 3528 continue; 3529 } 3530 out_rec = &out->records[record_done]; 3531 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 3532 stq_le_p(&out_rec->len, ent->len); 3533 memcpy(&out_rec->tag, ent->tag, 0x10); 3534 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 3535 3536 record_done++; 3537 if (record_done == record_count) { 3538 break; 3539 } 3540 } 3541 } 3542 3543 *len_out = out_pl_len; 3544 return CXL_MBOX_SUCCESS; 3545 } 3546 3547 /* 3548 * Helper function to convert CXLDCExtentRaw to CXLUpdateDCExtentListInPl 3549 * in order to reuse cxl_detect_malformed_extent_list() function which accepts 3550 * CXLUpdateDCExtentListInPl as a parameter. 3551 */ 3552 static void convert_raw_extents(CXLDCExtentRaw raw_extents[], 3553 CXLUpdateDCExtentListInPl *extent_list, 3554 int count) 3555 { 3556 int i; 3557 3558 extent_list->num_entries_updated = count; 3559 3560 for (i = 0; i < count; i++) { 3561 extent_list->updated_entries[i].start_dpa = raw_extents[i].start_dpa; 3562 extent_list->updated_entries[i].len = raw_extents[i].len; 3563 } 3564 } 3565 3566 /* CXL r3.2 Section 7.6.7.6.5: Initiate Dynamic Capacity Add (Opcode 5604h) */ 3567 static CXLRetCode cmd_fm_initiate_dc_add(const struct cxl_cmd *cmd, 3568 uint8_t *payload_in, 3569 size_t len_in, 3570 uint8_t *payload_out, 3571 size_t *len_out, 3572 CXLCCI *cci) 3573 { 3574 struct { 3575 uint16_t host_id; 3576 uint8_t selection_policy; 3577 uint8_t reg_num; 3578 uint64_t length; 3579 uint8_t tag[0x10]; 3580 uint32_t ext_count; 3581 CXLDCExtentRaw extents[]; 3582 } QEMU_PACKED *in = (void *)payload_in; 3583 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3584 int i, rc; 3585 3586 switch (in->selection_policy) { 3587 case CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE: { 3588 /* Adding extents exceeds device's extent tracking ability. */ 3589 if (in->ext_count + ct3d->dc.total_extent_count > 3590 CXL_NUM_EXTENTS_SUPPORTED) { 3591 return CXL_MBOX_RESOURCES_EXHAUSTED; 3592 } 3593 3594 g_autofree CXLUpdateDCExtentListInPl *list = 3595 g_malloc0(sizeof(*list) + 3596 in->ext_count * sizeof(*list->updated_entries)); 3597 3598 convert_raw_extents(in->extents, list, in->ext_count); 3599 rc = cxl_detect_malformed_extent_list(ct3d, list); 3600 3601 for (i = 0; i < in->ext_count; i++) { 3602 CXLDCExtentRaw *ext = &in->extents[i]; 3603 3604 /* Check requested extents do not overlap with pending ones. */ 3605 if (cxl_extent_groups_overlaps_dpa_range(&ct3d->dc.extents_pending, 3606 ext->start_dpa, 3607 ext->len)) { 3608 return CXL_MBOX_INVALID_EXTENT_LIST; 3609 } 3610 /* Check requested extents do not overlap with existing ones. */ 3611 if (cxl_extents_overlaps_dpa_range(&ct3d->dc.extents, 3612 ext->start_dpa, 3613 ext->len)) { 3614 return CXL_MBOX_INVALID_EXTENT_LIST; 3615 } 3616 } 3617 3618 if (rc) { 3619 return rc; 3620 } 3621 3622 CXLDCExtentGroup *group = NULL; 3623 for (i = 0; i < in->ext_count; i++) { 3624 CXLDCExtentRaw *ext = &in->extents[i]; 3625 3626 group = cxl_insert_extent_to_extent_group(group, ext->start_dpa, 3627 ext->len, ext->tag, 3628 ext->shared_seq); 3629 } 3630 3631 cxl_extent_group_list_insert_tail(&ct3d->dc.extents_pending, group); 3632 ct3d->dc.total_extent_count += in->ext_count; 3633 cxl_create_dc_event_records_for_extents(ct3d, 3634 DC_EVENT_ADD_CAPACITY, 3635 in->extents, 3636 in->ext_count); 3637 3638 return CXL_MBOX_SUCCESS; 3639 } 3640 default: { 3641 qemu_log_mask(LOG_UNIMP, 3642 "CXL extent selection policy not supported.\n"); 3643 return CXL_MBOX_INVALID_INPUT; 3644 } 3645 } 3646 } 3647 3648 static const struct cxl_cmd cxl_cmd_set[256][256] = { 3649 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3650 cmd_infostat_bg_op_abort, 0, 0 }, 3651 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 3652 cmd_events_get_records, 1, 0 }, 3653 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 3654 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 3655 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 3656 cmd_events_get_interrupt_policy, 0, 0 }, 3657 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 3658 cmd_events_set_interrupt_policy, 3659 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 3660 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 3661 cmd_firmware_update_get_info, 0, 0 }, 3662 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 3663 cmd_firmware_update_transfer, ~0, 3664 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3665 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 3666 cmd_firmware_update_activate, 2, 3667 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3668 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3669 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 3670 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3671 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 3672 0, 0 }, 3673 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3674 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 3675 cmd_features_get_supported, 0x8, 0 }, 3676 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 3677 cmd_features_get_feature, 0x15, 0 }, 3678 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 3679 cmd_features_set_feature, 3680 ~0, 3681 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3682 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3683 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 3684 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 3685 CXL_MBOX_SECURITY_STATE_CHANGE)}, 3686 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 3687 cmd_identify_memory_device, 0, 0 }, 3688 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 3689 cmd_ccls_get_partition_info, 0, 0 }, 3690 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 3691 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 3692 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3693 [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = { 3694 "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG", 3695 cmd_get_alert_config, 0, 0 }, 3696 [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = { 3697 "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG", 3698 cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3699 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 3700 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3701 CXL_MBOX_SECURITY_STATE_CHANGE | 3702 CXL_MBOX_BACKGROUND_OPERATION | 3703 CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3704 [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations, 3705 ~0, 3706 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3707 CXL_MBOX_BACKGROUND_OPERATION)}, 3708 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 3709 cmd_get_security_state, 0, 0 }, 3710 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 3711 cmd_media_get_poison_list, 16, 0 }, 3712 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 3713 cmd_media_inject_poison, 8, 0 }, 3714 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 3715 cmd_media_clear_poison, 72, 0 }, 3716 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 3717 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 3718 cmd_media_get_scan_media_capabilities, 16, 0 }, 3719 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 3720 cmd_media_scan_media, 17, 3721 (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3722 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 3723 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 3724 cmd_media_get_scan_media_results, 0, 0 }, 3725 }; 3726 3727 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 3728 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 3729 cmd_dcd_get_dyn_cap_config, 2, 0 }, 3730 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 3731 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 3732 8, 0 }, 3733 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 3734 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 3735 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3736 [DCD_CONFIG][RELEASE_DYN_CAP] = { 3737 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 3738 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3739 }; 3740 3741 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 3742 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3743 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 3744 cmd_infostat_bg_op_sts, 0, 0 }, 3745 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3746 cmd_infostat_bg_op_abort, 0, 0 }, 3747 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3748 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 3749 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3750 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3751 0 }, 3752 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3753 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 3754 cmd_identify_switch_device, 0, 0 }, 3755 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 3756 cmd_get_physical_port_state, ~0, 0 }, 3757 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3758 cmd_tunnel_management_cmd, ~0, 0 }, 3759 }; 3760 3761 static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = { 3762 [FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO", 3763 cmd_fm_get_dcd_info, 0, 0 }, 3764 [FMAPI_DCD_MGMT][GET_HOST_DC_REGION_CONFIG] = { "GET_HOST_DC_REGION_CONFIG", 3765 cmd_fm_get_host_dc_region_config, 4, 0 }, 3766 [FMAPI_DCD_MGMT][SET_DC_REGION_CONFIG] = { "SET_DC_REGION_CONFIG", 3767 cmd_fm_set_dc_region_config, 16, 3768 (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | 3769 CXL_MBOX_CONFIG_CHANGE_CONV_RESET | 3770 CXL_MBOX_CONFIG_CHANGE_CXL_RESET | 3771 CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3772 CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, 3773 [FMAPI_DCD_MGMT][GET_DC_REGION_EXTENT_LIST] = { "GET_DC_REGION_EXTENT_LIST", 3774 cmd_fm_get_dc_region_extent_list, 12, 0 }, 3775 [FMAPI_DCD_MGMT][INITIATE_DC_ADD] = { "INIT_DC_ADD", 3776 cmd_fm_initiate_dc_add, ~0, 3777 (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | 3778 CXL_MBOX_CONFIG_CHANGE_CONV_RESET | 3779 CXL_MBOX_CONFIG_CHANGE_CXL_RESET | 3780 CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3781 CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, 3782 }; 3783 3784 /* 3785 * While the command is executing in the background, the device should 3786 * update the percentage complete in the Background Command Status Register 3787 * at least once per second. 3788 */ 3789 3790 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 3791 3792 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 3793 size_t len_in, uint8_t *pl_in, size_t *len_out, 3794 uint8_t *pl_out, bool *bg_started) 3795 { 3796 int ret; 3797 const struct cxl_cmd *cxl_cmd; 3798 opcode_handler h; 3799 CXLDeviceState *cxl_dstate; 3800 3801 *len_out = 0; 3802 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 3803 h = cxl_cmd->handler; 3804 if (!h) { 3805 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 3806 set << 8 | cmd); 3807 return CXL_MBOX_UNSUPPORTED; 3808 } 3809 3810 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 3811 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3812 } 3813 3814 /* Only one bg command at a time */ 3815 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3816 cci->bg.runtime > 0) { 3817 return CXL_MBOX_BUSY; 3818 } 3819 3820 /* forbid any selected commands while the media is disabled */ 3821 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 3822 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 3823 3824 if (cxl_dev_media_disabled(cxl_dstate)) { 3825 if (h == cmd_events_get_records || 3826 h == cmd_ccls_get_partition_info || 3827 h == cmd_ccls_set_lsa || 3828 h == cmd_ccls_get_lsa || 3829 h == cmd_logs_get_log || 3830 h == cmd_media_get_poison_list || 3831 h == cmd_media_inject_poison || 3832 h == cmd_media_clear_poison || 3833 h == cmd_sanitize_overwrite || 3834 h == cmd_firmware_update_transfer || 3835 h == cmd_firmware_update_activate) { 3836 return CXL_MBOX_MEDIA_DISABLED; 3837 } 3838 } 3839 } 3840 3841 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 3842 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3843 ret == CXL_MBOX_BG_STARTED) { 3844 *bg_started = true; 3845 } else { 3846 *bg_started = false; 3847 } 3848 3849 /* Set bg and the return code */ 3850 if (*bg_started) { 3851 uint64_t now; 3852 3853 cci->bg.opcode = (set << 8) | cmd; 3854 3855 cci->bg.complete_pct = 0; 3856 cci->bg.aborted = false; 3857 cci->bg.ret_code = 0; 3858 3859 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3860 cci->bg.starttime = now; 3861 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3862 } 3863 3864 return ret; 3865 } 3866 3867 static void bg_timercb(void *opaque) 3868 { 3869 CXLCCI *cci = opaque; 3870 uint64_t now, total_time; 3871 3872 qemu_mutex_lock(&cci->bg.lock); 3873 3874 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3875 total_time = cci->bg.starttime + cci->bg.runtime; 3876 3877 if (now >= total_time) { /* we are done */ 3878 uint16_t ret = CXL_MBOX_SUCCESS; 3879 3880 cci->bg.complete_pct = 100; 3881 cci->bg.ret_code = ret; 3882 switch (cci->bg.opcode) { 3883 case 0x0201: /* fw transfer */ 3884 __do_firmware_xfer(cci); 3885 break; 3886 case 0x4400: /* sanitize */ 3887 { 3888 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3889 3890 __do_sanitization(ct3d); 3891 cxl_dev_enable_media(&ct3d->cxl_dstate); 3892 } 3893 break; 3894 case 0x4402: /* Media Operations sanitize */ 3895 { 3896 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3897 __do_sanitize(ct3d); 3898 } 3899 break; 3900 case 0x4304: /* scan media */ 3901 { 3902 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3903 3904 __do_scan_media(ct3d); 3905 break; 3906 } 3907 default: 3908 __builtin_unreachable(); 3909 break; 3910 } 3911 } else { 3912 /* estimate only */ 3913 cci->bg.complete_pct = 3914 100 * (now - cci->bg.starttime) / cci->bg.runtime; 3915 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3916 } 3917 3918 if (cci->bg.complete_pct == 100) { 3919 /* TODO: generalize to switch CCI */ 3920 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3921 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 3922 PCIDevice *pdev = PCI_DEVICE(cci->d); 3923 3924 cci->bg.starttime = 0; 3925 /* registers are updated, allow new bg-capable cmds */ 3926 cci->bg.runtime = 0; 3927 3928 if (msix_enabled(pdev)) { 3929 msix_notify(pdev, cxl_dstate->mbox_msi_n); 3930 } else if (msi_enabled(pdev)) { 3931 msi_notify(pdev, cxl_dstate->mbox_msi_n); 3932 } 3933 } 3934 3935 qemu_mutex_unlock(&cci->bg.lock); 3936 } 3937 3938 static void cxl_rebuild_cel(CXLCCI *cci) 3939 { 3940 cci->cel_size = 0; /* Reset for a fresh build */ 3941 for (int set = 0; set < 256; set++) { 3942 for (int cmd = 0; cmd < 256; cmd++) { 3943 if (cci->cxl_cmd_set[set][cmd].handler) { 3944 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 3945 struct cel_log *log = 3946 &cci->cel_log[cci->cel_size]; 3947 3948 log->opcode = (set << 8) | cmd; 3949 log->effect = c->effect; 3950 cci->cel_size++; 3951 } 3952 } 3953 } 3954 } 3955 3956 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 3957 { 3958 cci->payload_max = payload_max; 3959 cxl_rebuild_cel(cci); 3960 3961 cci->bg.complete_pct = 0; 3962 cci->bg.starttime = 0; 3963 cci->bg.runtime = 0; 3964 cci->bg.aborted = false; 3965 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3966 bg_timercb, cci); 3967 qemu_mutex_init(&cci->bg.lock); 3968 3969 memset(&cci->fw, 0, sizeof(cci->fw)); 3970 cci->fw.active_slot = 1; 3971 cci->fw.slot[cci->fw.active_slot - 1] = true; 3972 cci->initialized = true; 3973 } 3974 3975 void cxl_destroy_cci(CXLCCI *cci) 3976 { 3977 qemu_mutex_destroy(&cci->bg.lock); 3978 cci->initialized = false; 3979 } 3980 3981 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 3982 { 3983 for (int set = 0; set < 256; set++) { 3984 for (int cmd = 0; cmd < 256; cmd++) { 3985 if (cxl_cmds[set][cmd].handler) { 3986 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 3987 } 3988 } 3989 } 3990 } 3991 3992 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 3993 size_t payload_max) 3994 { 3995 cci->payload_max = MAX(payload_max, cci->payload_max); 3996 cxl_copy_cci_commands(cci, cxl_cmd_set); 3997 cxl_rebuild_cel(cci); 3998 } 3999 4000 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 4001 DeviceState *d, size_t payload_max) 4002 { 4003 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 4004 cci->d = d; 4005 cci->intf = intf; 4006 cxl_init_cci(cci, payload_max); 4007 } 4008 4009 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 4010 { 4011 CXLType3Dev *ct3d = CXL_TYPE3(d); 4012 4013 cxl_copy_cci_commands(cci, cxl_cmd_set); 4014 if (ct3d->dc.num_regions) { 4015 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 4016 } 4017 cci->d = d; 4018 4019 /* No separation for PCI MB as protocol handled in PCI device */ 4020 cci->intf = d; 4021 cxl_init_cci(cci, payload_max); 4022 } 4023 4024 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 4025 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 4026 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 4027 0 }, 4028 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 4029 }; 4030 4031 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 4032 size_t payload_max) 4033 { 4034 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 4035 cci->d = d; 4036 cci->intf = intf; 4037 cxl_init_cci(cci, payload_max); 4038 } 4039 4040 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 4041 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 4042 [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT", 4043 cmd_get_response_msg_limit, 0, 0 }, 4044 [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT", 4045 cmd_set_response_msg_limit, 1, 0 }, 4046 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 4047 0 }, 4048 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 4049 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 4050 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 4051 cmd_tunnel_management_cmd, ~0, 0 }, 4052 }; 4053 4054 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 4055 DeviceState *intf, 4056 size_t payload_max) 4057 { 4058 CXLType3Dev *ct3d = CXL_TYPE3(d); 4059 4060 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 4061 if (ct3d->dc.num_regions) { 4062 cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd); 4063 } 4064 cci->d = d; 4065 cci->intf = intf; 4066 cxl_init_cci(cci, payload_max); 4067 } 4068