1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include <math.h> 11 12 #include "qemu/osdep.h" 13 #include "hw/pci/msi.h" 14 #include "hw/pci/msix.h" 15 #include "hw/cxl/cxl.h" 16 #include "hw/cxl/cxl_events.h" 17 #include "hw/cxl/cxl_mailbox.h" 18 #include "hw/pci/pci.h" 19 #include "hw/pci-bridge/cxl_upstream_port.h" 20 #include "qemu/cutils.h" 21 #include "qemu/host-utils.h" 22 #include "qemu/log.h" 23 #include "qemu/units.h" 24 #include "qemu/uuid.h" 25 #include "system/hostmem.h" 26 #include "qemu/range.h" 27 #include "qapi/qapi-types-cxl.h" 28 29 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 30 #define CXL_DC_EVENT_LOG_SIZE 8 31 #define CXL_NUM_TAGS_SUPPORTED 0 32 #define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0) 33 #define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1) 34 #define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2) 35 #define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3) 36 #define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4) 37 38 /* 39 * How to add a new command, example. The command set FOO, with cmd BAR. 40 * 1. Add the command set and cmd to the enum. 41 * FOO = 0x7f, 42 * #define BAR 0 43 * 2. Implement the handler 44 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 45 * CXLDeviceState *cxl_dstate, uint16_t *len) 46 * 3. Add the command to the cxl_cmd_set[][] 47 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 48 * 4. Implement your handler 49 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 50 * 51 * 52 * Writing the handler: 53 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 54 * in/out length of the payload. The handler is responsible for consuming the 55 * payload from cmd->payload and operating upon it as necessary. It must then 56 * fill the output data into cmd->payload (overwriting what was there), 57 * setting the length, and returning a valid return code. 58 * 59 * XXX: The handler need not worry about endianness. The payload is read out of 60 * a register interface that already deals with it. 61 */ 62 63 enum { 64 INFOSTAT = 0x00, 65 #define IS_IDENTIFY 0x1 66 #define BACKGROUND_OPERATION_STATUS 0x2 67 #define GET_RESPONSE_MSG_LIMIT 0x3 68 #define SET_RESPONSE_MSG_LIMIT 0x4 69 #define BACKGROUND_OPERATION_ABORT 0x5 70 EVENTS = 0x01, 71 #define GET_RECORDS 0x0 72 #define CLEAR_RECORDS 0x1 73 #define GET_INTERRUPT_POLICY 0x2 74 #define SET_INTERRUPT_POLICY 0x3 75 FIRMWARE_UPDATE = 0x02, 76 #define GET_INFO 0x0 77 #define TRANSFER 0x1 78 #define ACTIVATE 0x2 79 TIMESTAMP = 0x03, 80 #define GET 0x0 81 #define SET 0x1 82 LOGS = 0x04, 83 #define GET_SUPPORTED 0x0 84 #define GET_LOG 0x1 85 FEATURES = 0x05, 86 #define GET_SUPPORTED 0x0 87 #define GET_FEATURE 0x1 88 #define SET_FEATURE 0x2 89 IDENTIFY = 0x40, 90 #define MEMORY_DEVICE 0x0 91 CCLS = 0x41, 92 #define GET_PARTITION_INFO 0x0 93 #define GET_LSA 0x2 94 #define SET_LSA 0x3 95 HEALTH_INFO_ALERTS = 0x42, 96 #define GET_ALERT_CONFIG 0x1 97 #define SET_ALERT_CONFIG 0x2 98 SANITIZE = 0x44, 99 #define OVERWRITE 0x0 100 #define SECURE_ERASE 0x1 101 #define MEDIA_OPERATIONS 0x2 102 PERSISTENT_MEM = 0x45, 103 #define GET_SECURITY_STATE 0x0 104 MEDIA_AND_POISON = 0x43, 105 #define GET_POISON_LIST 0x0 106 #define INJECT_POISON 0x1 107 #define CLEAR_POISON 0x2 108 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 109 #define SCAN_MEDIA 0x4 110 #define GET_SCAN_MEDIA_RESULTS 0x5 111 DCD_CONFIG = 0x48, 112 #define GET_DC_CONFIG 0x0 113 #define GET_DYN_CAP_EXT_LIST 0x1 114 #define ADD_DYN_CAP_RSP 0x2 115 #define RELEASE_DYN_CAP 0x3 116 PHYSICAL_SWITCH = 0x51, 117 #define IDENTIFY_SWITCH_DEVICE 0x0 118 #define GET_PHYSICAL_PORT_STATE 0x1 119 TUNNEL = 0x53, 120 #define MANAGEMENT_COMMAND 0x0 121 FMAPI_DCD_MGMT = 0x56, 122 #define GET_DCD_INFO 0x0 123 #define GET_HOST_DC_REGION_CONFIG 0x1 124 #define SET_DC_REGION_CONFIG 0x2 125 #define GET_DC_REGION_EXTENT_LIST 0x3 126 #define INITIATE_DC_ADD 0x4 127 #define INITIATE_DC_RELEASE 0x5 128 }; 129 130 /* CCI Message Format CXL r3.1 Figure 7-19 */ 131 typedef struct CXLCCIMessage { 132 uint8_t category; 133 #define CXL_CCI_CAT_REQ 0 134 #define CXL_CCI_CAT_RSP 1 135 uint8_t tag; 136 uint8_t resv1; 137 uint8_t command; 138 uint8_t command_set; 139 uint8_t pl_length[3]; 140 uint16_t rc; 141 uint16_t vendor_specific; 142 uint8_t payload[]; 143 } QEMU_PACKED CXLCCIMessage; 144 145 /* This command is only defined to an MLD FM Owned LD or an MHD */ 146 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 147 uint8_t *payload_in, 148 size_t len_in, 149 uint8_t *payload_out, 150 size_t *len_out, 151 CXLCCI *cci) 152 { 153 PCIDevice *tunnel_target; 154 CXLCCI *target_cci; 155 struct { 156 uint8_t port_or_ld_id; 157 uint8_t target_type; 158 uint16_t size; 159 CXLCCIMessage ccimessage; 160 } QEMU_PACKED *in; 161 struct { 162 uint16_t resp_len; 163 uint8_t resv[2]; 164 CXLCCIMessage ccimessage; 165 } QEMU_PACKED *out; 166 size_t pl_length, length_out; 167 bool bg_started; 168 int rc; 169 170 if (cmd->in < sizeof(*in)) { 171 return CXL_MBOX_INVALID_INPUT; 172 } 173 in = (void *)payload_in; 174 out = (void *)payload_out; 175 176 if (len_in < sizeof(*in)) { 177 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 178 } 179 /* Enough room for minimum sized message - no payload */ 180 if (in->size < sizeof(in->ccimessage)) { 181 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 182 } 183 /* Length of input payload should be in->size + a wrapping tunnel header */ 184 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 185 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 186 } 187 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 188 return CXL_MBOX_INVALID_INPUT; 189 } 190 191 if (in->target_type != 0) { 192 qemu_log_mask(LOG_UNIMP, 193 "Tunneled Command sent to non existent FM-LD"); 194 return CXL_MBOX_INVALID_INPUT; 195 } 196 197 /* 198 * Target of a tunnel unfortunately depends on type of CCI readint 199 * the message. 200 * If in a switch, then it's the port number. 201 * If in an MLD it is the ld number. 202 * If in an MHD target type indicate where we are going. 203 */ 204 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 205 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 206 if (in->port_or_ld_id != 0) { 207 /* Only pretending to have one for now! */ 208 return CXL_MBOX_INVALID_INPUT; 209 } 210 target_cci = &ct3d->ld0_cci; 211 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 212 CXLUpstreamPort *usp = CXL_USP(cci->d); 213 214 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 215 in->port_or_ld_id); 216 if (!tunnel_target) { 217 return CXL_MBOX_INVALID_INPUT; 218 } 219 tunnel_target = 220 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 221 if (!tunnel_target) { 222 return CXL_MBOX_INVALID_INPUT; 223 } 224 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 225 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 226 /* Tunneled VDMs always land on FM Owned LD */ 227 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 228 } else { 229 return CXL_MBOX_INVALID_INPUT; 230 } 231 } else { 232 return CXL_MBOX_INVALID_INPUT; 233 } 234 235 pl_length = in->ccimessage.pl_length[2] << 16 | 236 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 237 rc = cxl_process_cci_message(target_cci, 238 in->ccimessage.command_set, 239 in->ccimessage.command, 240 pl_length, in->ccimessage.payload, 241 &length_out, out->ccimessage.payload, 242 &bg_started); 243 /* Payload should be in place. Rest of CCI header and needs filling */ 244 out->resp_len = length_out + sizeof(CXLCCIMessage); 245 st24_le_p(out->ccimessage.pl_length, length_out); 246 out->ccimessage.rc = rc; 247 out->ccimessage.category = CXL_CCI_CAT_RSP; 248 out->ccimessage.command = in->ccimessage.command; 249 out->ccimessage.command_set = in->ccimessage.command_set; 250 out->ccimessage.tag = in->ccimessage.tag; 251 *len_out = length_out + sizeof(*out); 252 253 return CXL_MBOX_SUCCESS; 254 } 255 256 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 257 uint8_t *payload_in, size_t len_in, 258 uint8_t *payload_out, size_t *len_out, 259 CXLCCI *cci) 260 { 261 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 262 CXLGetEventPayload *pl; 263 uint8_t log_type; 264 int max_recs; 265 266 if (cmd->in < sizeof(log_type)) { 267 return CXL_MBOX_INVALID_INPUT; 268 } 269 270 log_type = payload_in[0]; 271 272 pl = (CXLGetEventPayload *)payload_out; 273 274 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 275 CXL_EVENT_RECORD_SIZE; 276 if (max_recs > 0xFFFF) { 277 max_recs = 0xFFFF; 278 } 279 280 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 281 } 282 283 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 284 uint8_t *payload_in, 285 size_t len_in, 286 uint8_t *payload_out, 287 size_t *len_out, 288 CXLCCI *cci) 289 { 290 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 291 CXLClearEventPayload *pl; 292 293 pl = (CXLClearEventPayload *)payload_in; 294 295 if (len_in < sizeof(*pl) || 296 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { 297 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 298 } 299 300 *len_out = 0; 301 return cxl_event_clear_records(cxlds, pl); 302 } 303 304 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 305 uint8_t *payload_in, 306 size_t len_in, 307 uint8_t *payload_out, 308 size_t *len_out, 309 CXLCCI *cci) 310 { 311 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 312 CXLEventInterruptPolicy *policy; 313 CXLEventLog *log; 314 315 policy = (CXLEventInterruptPolicy *)payload_out; 316 317 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 318 if (log->irq_enabled) { 319 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 320 } 321 322 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 323 if (log->irq_enabled) { 324 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 325 } 326 327 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 328 if (log->irq_enabled) { 329 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 330 } 331 332 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 333 if (log->irq_enabled) { 334 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 335 } 336 337 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 338 if (log->irq_enabled) { 339 /* Dynamic Capacity borrows the same vector as info */ 340 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 341 } 342 343 *len_out = sizeof(*policy); 344 return CXL_MBOX_SUCCESS; 345 } 346 347 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 348 uint8_t *payload_in, 349 size_t len_in, 350 uint8_t *payload_out, 351 size_t *len_out, 352 CXLCCI *cci) 353 { 354 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 355 CXLEventInterruptPolicy *policy; 356 CXLEventLog *log; 357 358 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 359 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 360 } 361 362 policy = (CXLEventInterruptPolicy *)payload_in; 363 364 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 365 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 366 CXL_INT_MSI_MSIX; 367 368 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 369 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 370 CXL_INT_MSI_MSIX; 371 372 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 373 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 374 CXL_INT_MSI_MSIX; 375 376 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 377 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 378 CXL_INT_MSI_MSIX; 379 380 /* DCD is optional */ 381 if (len_in < sizeof(*policy)) { 382 return CXL_MBOX_SUCCESS; 383 } 384 385 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 386 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 387 CXL_INT_MSI_MSIX; 388 389 *len_out = 0; 390 return CXL_MBOX_SUCCESS; 391 } 392 393 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 394 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 395 uint8_t *payload_in, 396 size_t len_in, 397 uint8_t *payload_out, 398 size_t *len_out, 399 CXLCCI *cci) 400 { 401 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 402 struct { 403 uint16_t pcie_vid; 404 uint16_t pcie_did; 405 uint16_t pcie_subsys_vid; 406 uint16_t pcie_subsys_id; 407 uint64_t sn; 408 uint8_t max_message_size; 409 uint8_t component_type; 410 } QEMU_PACKED *is_identify; 411 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 412 413 is_identify = (void *)payload_out; 414 is_identify->pcie_vid = class->vendor_id; 415 is_identify->pcie_did = class->device_id; 416 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 417 is_identify->sn = CXL_USP(cci->d)->sn; 418 /* Subsystem info not defined for a USP */ 419 is_identify->pcie_subsys_vid = 0; 420 is_identify->pcie_subsys_id = 0; 421 is_identify->component_type = 0x0; /* Switch */ 422 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 423 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 424 425 is_identify->sn = CXL_TYPE3(cci->d)->sn; 426 /* 427 * We can't always use class->subsystem_vendor_id as 428 * it is not set if the defaults are used. 429 */ 430 is_identify->pcie_subsys_vid = 431 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 432 is_identify->pcie_subsys_id = 433 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 434 is_identify->component_type = 0x3; /* Type 3 */ 435 } 436 437 is_identify->max_message_size = (uint8_t)log2(cci->payload_max); 438 *len_out = sizeof(*is_identify); 439 return CXL_MBOX_SUCCESS; 440 } 441 442 /* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */ 443 static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd, 444 uint8_t *payload_in, 445 size_t len_in, 446 uint8_t *payload_out, 447 size_t *len_out, 448 CXLCCI *cci) 449 { 450 struct { 451 uint8_t rsp_limit; 452 } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out; 453 QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1); 454 455 get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max); 456 457 *len_out = sizeof(*get_rsp_msg_limit); 458 return CXL_MBOX_SUCCESS; 459 } 460 461 /* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */ 462 static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd, 463 uint8_t *payload_in, 464 size_t len_in, 465 uint8_t *payload_out, 466 size_t *len_out, 467 CXLCCI *cci) 468 { 469 struct { 470 uint8_t rsp_limit; 471 } QEMU_PACKED *in = (void *)payload_in; 472 QEMU_BUILD_BUG_ON(sizeof(*in) != 1); 473 struct { 474 uint8_t rsp_limit; 475 } QEMU_PACKED *out = (void *)payload_out; 476 QEMU_BUILD_BUG_ON(sizeof(*out) != 1); 477 478 if (in->rsp_limit < 8 || in->rsp_limit > 10) { 479 return CXL_MBOX_INVALID_INPUT; 480 } 481 482 cci->payload_max = 1 << in->rsp_limit; 483 out->rsp_limit = in->rsp_limit; 484 485 *len_out = sizeof(*out); 486 return CXL_MBOX_SUCCESS; 487 } 488 489 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 490 void *private) 491 { 492 uint8_t *bm = private; 493 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 494 uint8_t port = PCIE_PORT(d)->port; 495 bm[port / 8] |= 1 << (port % 8); 496 } 497 } 498 499 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 500 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 501 uint8_t *payload_in, 502 size_t len_in, 503 uint8_t *payload_out, 504 size_t *len_out, 505 CXLCCI *cci) 506 { 507 PCIEPort *usp = PCIE_PORT(cci->d); 508 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 509 int num_phys_ports = pcie_count_ds_ports(bus); 510 511 struct cxl_fmapi_ident_switch_dev_resp_pl { 512 uint8_t ingress_port_id; 513 uint8_t rsvd; 514 uint8_t num_physical_ports; 515 uint8_t num_vcss; 516 uint8_t active_port_bitmask[0x20]; 517 uint8_t active_vcs_bitmask[0x20]; 518 uint16_t total_vppbs; 519 uint16_t bound_vppbs; 520 uint8_t num_hdm_decoders_per_usp; 521 } QEMU_PACKED *out; 522 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 523 524 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 525 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 526 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 527 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 528 .active_vcs_bitmask[0] = 0x1, 529 .total_vppbs = num_phys_ports + 1, 530 .bound_vppbs = num_phys_ports + 1, 531 .num_hdm_decoders_per_usp = 4, 532 }; 533 534 /* Depends on the CCI type */ 535 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 536 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 537 } else { 538 /* MCTP? */ 539 out->ingress_port_id = 0; 540 } 541 542 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 543 out->active_port_bitmask); 544 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 545 546 *len_out = sizeof(*out); 547 548 return CXL_MBOX_SUCCESS; 549 } 550 551 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 552 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 553 uint8_t *payload_in, 554 size_t len_in, 555 uint8_t *payload_out, 556 size_t *len_out, 557 CXLCCI *cci) 558 { 559 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 560 struct cxl_fmapi_get_phys_port_state_req_pl { 561 uint8_t num_ports; 562 uint8_t ports[]; 563 } QEMU_PACKED *in; 564 565 /* 566 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 567 * Format 568 */ 569 struct cxl_fmapi_port_state_info_block { 570 uint8_t port_id; 571 uint8_t config_state; 572 uint8_t connected_device_cxl_version; 573 uint8_t rsv1; 574 uint8_t connected_device_type; 575 uint8_t port_cxl_version_bitmask; 576 uint8_t max_link_width; 577 uint8_t negotiated_link_width; 578 uint8_t supported_link_speeds_vector; 579 uint8_t max_link_speed; 580 uint8_t current_link_speed; 581 uint8_t ltssm_state; 582 uint8_t first_lane_num; 583 uint16_t link_state; 584 uint8_t supported_ld_count; 585 } QEMU_PACKED; 586 587 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 588 struct cxl_fmapi_get_phys_port_state_resp_pl { 589 uint8_t num_ports; 590 uint8_t rsv1[3]; 591 struct cxl_fmapi_port_state_info_block ports[]; 592 } QEMU_PACKED *out; 593 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 594 PCIEPort *usp = PCIE_PORT(cci->d); 595 size_t pl_size; 596 int i; 597 598 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 599 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 600 601 if (len_in < sizeof(*in)) { 602 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 603 } 604 /* Check if what was requested can fit */ 605 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 606 return CXL_MBOX_INVALID_INPUT; 607 } 608 609 /* For success there should be a match for each requested */ 610 out->num_ports = in->num_ports; 611 612 for (i = 0; i < in->num_ports; i++) { 613 struct cxl_fmapi_port_state_info_block *port; 614 /* First try to match on downstream port */ 615 PCIDevice *port_dev; 616 uint16_t lnkcap, lnkcap2, lnksta; 617 618 port = &out->ports[i]; 619 620 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 621 if (port_dev) { /* DSP */ 622 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 623 ->devices[0]; 624 port->config_state = 3; 625 if (ds_dev) { 626 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 627 port->connected_device_type = 5; /* Assume MLD for now */ 628 } else { 629 port->connected_device_type = 1; 630 } 631 } else { 632 port->connected_device_type = 0; 633 } 634 port->supported_ld_count = 3; 635 } else if (usp->port == in->ports[i]) { /* USP */ 636 port_dev = PCI_DEVICE(usp); 637 port->config_state = 4; 638 port->connected_device_type = 0; 639 } else { 640 return CXL_MBOX_INVALID_INPUT; 641 } 642 643 port->port_id = in->ports[i]; 644 /* Information on status of this port in lnksta, lnkcap */ 645 if (!port_dev->exp.exp_cap) { 646 return CXL_MBOX_INTERNAL_ERROR; 647 } 648 lnksta = port_dev->config_read(port_dev, 649 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 650 sizeof(lnksta)); 651 lnkcap = port_dev->config_read(port_dev, 652 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 653 sizeof(lnkcap)); 654 lnkcap2 = port_dev->config_read(port_dev, 655 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 656 sizeof(lnkcap2)); 657 658 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 659 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 660 /* No definition for SLS field in linux/pci_regs.h */ 661 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 662 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 663 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 664 /* TODO: Track down if we can get the rest of the info */ 665 port->ltssm_state = 0x7; 666 port->first_lane_num = 0; 667 port->link_state = 0; 668 port->port_cxl_version_bitmask = 0x2; 669 port->connected_device_cxl_version = 0x2; 670 } 671 672 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 673 *len_out = pl_size; 674 675 return CXL_MBOX_SUCCESS; 676 } 677 678 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 679 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 680 uint8_t *payload_in, 681 size_t len_in, 682 uint8_t *payload_out, 683 size_t *len_out, 684 CXLCCI *cci) 685 { 686 struct { 687 uint8_t status; 688 uint8_t rsvd; 689 uint16_t opcode; 690 uint16_t returncode; 691 uint16_t vendor_ext_status; 692 } QEMU_PACKED *bg_op_status; 693 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 694 695 bg_op_status = (void *)payload_out; 696 bg_op_status->status = cci->bg.complete_pct << 1; 697 if (cci->bg.runtime > 0) { 698 bg_op_status->status |= 1U << 0; 699 } 700 bg_op_status->opcode = cci->bg.opcode; 701 bg_op_status->returncode = cci->bg.ret_code; 702 *len_out = sizeof(*bg_op_status); 703 704 return CXL_MBOX_SUCCESS; 705 } 706 707 /* 708 * CXL r3.1 Section 8.2.9.1.5: 709 * Request Abort Background Operation (Opcode 0005h) 710 */ 711 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd, 712 uint8_t *payload_in, 713 size_t len_in, 714 uint8_t *payload_out, 715 size_t *len_out, 716 CXLCCI *cci) 717 { 718 int bg_set = cci->bg.opcode >> 8; 719 int bg_cmd = cci->bg.opcode & 0xff; 720 const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd]; 721 722 if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) { 723 return CXL_MBOX_REQUEST_ABORT_NOTSUP; 724 } 725 726 qemu_mutex_lock(&cci->bg.lock); 727 if (cci->bg.runtime) { 728 /* operation is near complete, let it finish */ 729 if (cci->bg.complete_pct < 85) { 730 timer_del(cci->bg.timer); 731 cci->bg.ret_code = CXL_MBOX_ABORTED; 732 cci->bg.starttime = 0; 733 cci->bg.runtime = 0; 734 cci->bg.aborted = true; 735 } 736 } 737 qemu_mutex_unlock(&cci->bg.lock); 738 739 return CXL_MBOX_SUCCESS; 740 } 741 742 #define CXL_FW_SLOTS 2 743 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 744 745 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 746 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 747 uint8_t *payload_in, 748 size_t len, 749 uint8_t *payload_out, 750 size_t *len_out, 751 CXLCCI *cci) 752 { 753 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 754 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 755 struct { 756 uint8_t slots_supported; 757 uint8_t slot_info; 758 uint8_t caps; 759 uint8_t rsvd[0xd]; 760 char fw_rev1[0x10]; 761 char fw_rev2[0x10]; 762 char fw_rev3[0x10]; 763 char fw_rev4[0x10]; 764 } QEMU_PACKED *fw_info; 765 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 766 767 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || 768 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || 769 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { 770 return CXL_MBOX_INTERNAL_ERROR; 771 } 772 773 fw_info = (void *)payload_out; 774 775 fw_info->slots_supported = CXL_FW_SLOTS; 776 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 777 ((cci->fw.staged_slot & 0x7) << 3); 778 fw_info->caps = BIT(0); /* online update supported */ 779 780 if (cci->fw.slot[0]) { 781 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 782 } 783 if (cci->fw.slot[1]) { 784 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 785 } 786 787 *len_out = sizeof(*fw_info); 788 return CXL_MBOX_SUCCESS; 789 } 790 791 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 792 #define CXL_FW_XFER_ALIGNMENT 128 793 794 #define CXL_FW_XFER_ACTION_FULL 0x0 795 #define CXL_FW_XFER_ACTION_INIT 0x1 796 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 797 #define CXL_FW_XFER_ACTION_END 0x3 798 #define CXL_FW_XFER_ACTION_ABORT 0x4 799 800 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 801 uint8_t *payload_in, 802 size_t len, 803 uint8_t *payload_out, 804 size_t *len_out, 805 CXLCCI *cci) 806 { 807 struct { 808 uint8_t action; 809 uint8_t slot; 810 uint8_t rsvd1[2]; 811 uint32_t offset; 812 uint8_t rsvd2[0x78]; 813 uint8_t data[]; 814 } QEMU_PACKED *fw_transfer = (void *)payload_in; 815 size_t offset, length; 816 817 if (len < sizeof(*fw_transfer)) { 818 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 819 } 820 821 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 822 /* 823 * At this point there aren't any on-going transfers 824 * running in the bg - this is serialized before this 825 * call altogether. Just mark the state machine and 826 * disregard any other input. 827 */ 828 cci->fw.transferring = false; 829 return CXL_MBOX_SUCCESS; 830 } 831 832 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 833 length = len - sizeof(*fw_transfer); 834 if (offset + length > CXL_FW_SIZE) { 835 return CXL_MBOX_INVALID_INPUT; 836 } 837 838 if (cci->fw.transferring) { 839 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 840 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 841 return CXL_MBOX_FW_XFER_IN_PROGRESS; 842 } 843 /* 844 * Abort partitioned package transfer if over 30 secs 845 * between parts. As opposed to the explicit ABORT action, 846 * semantically treat this condition as an error - as 847 * if a part action were passed without a previous INIT. 848 */ 849 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 850 cci->fw.transferring = false; 851 return CXL_MBOX_INVALID_INPUT; 852 } 853 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 854 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 855 return CXL_MBOX_INVALID_INPUT; 856 } 857 858 /* allow back-to-back retransmission */ 859 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 860 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 861 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 862 /* verify no overlaps */ 863 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 864 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 865 } 866 } 867 868 switch (fw_transfer->action) { 869 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 870 case CXL_FW_XFER_ACTION_END: 871 if (fw_transfer->slot == 0 || 872 fw_transfer->slot == cci->fw.active_slot || 873 fw_transfer->slot > CXL_FW_SLOTS) { 874 return CXL_MBOX_FW_INVALID_SLOT; 875 } 876 877 /* mark the slot used upon bg completion */ 878 break; 879 case CXL_FW_XFER_ACTION_INIT: 880 if (offset != 0) { 881 return CXL_MBOX_INVALID_INPUT; 882 } 883 884 cci->fw.transferring = true; 885 cci->fw.prev_offset = offset; 886 cci->fw.prev_len = length; 887 break; 888 case CXL_FW_XFER_ACTION_CONTINUE: 889 cci->fw.prev_offset = offset; 890 cci->fw.prev_len = length; 891 break; 892 default: 893 return CXL_MBOX_INVALID_INPUT; 894 } 895 896 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 897 cci->bg.runtime = 10 * 1000UL; 898 } else { 899 cci->bg.runtime = 2 * 1000UL; 900 } 901 /* keep relevant context for bg completion */ 902 cci->fw.curr_action = fw_transfer->action; 903 cci->fw.curr_slot = fw_transfer->slot; 904 *len_out = 0; 905 906 return CXL_MBOX_BG_STARTED; 907 } 908 909 static void __do_firmware_xfer(CXLCCI *cci) 910 { 911 switch (cci->fw.curr_action) { 912 case CXL_FW_XFER_ACTION_FULL: 913 case CXL_FW_XFER_ACTION_END: 914 cci->fw.slot[cci->fw.curr_slot - 1] = true; 915 cci->fw.transferring = false; 916 break; 917 case CXL_FW_XFER_ACTION_INIT: 918 case CXL_FW_XFER_ACTION_CONTINUE: 919 time(&cci->fw.last_partxfer); 920 break; 921 default: 922 break; 923 } 924 } 925 926 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 927 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 928 uint8_t *payload_in, 929 size_t len, 930 uint8_t *payload_out, 931 size_t *len_out, 932 CXLCCI *cci) 933 { 934 struct { 935 uint8_t action; 936 uint8_t slot; 937 } QEMU_PACKED *fw_activate = (void *)payload_in; 938 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 939 940 if (fw_activate->slot == 0 || 941 fw_activate->slot == cci->fw.active_slot || 942 fw_activate->slot > CXL_FW_SLOTS) { 943 return CXL_MBOX_FW_INVALID_SLOT; 944 } 945 946 /* ensure that an actual fw package is there */ 947 if (!cci->fw.slot[fw_activate->slot - 1]) { 948 return CXL_MBOX_FW_INVALID_SLOT; 949 } 950 951 switch (fw_activate->action) { 952 case 0: /* online */ 953 cci->fw.active_slot = fw_activate->slot; 954 break; 955 case 1: /* reset */ 956 cci->fw.staged_slot = fw_activate->slot; 957 break; 958 default: 959 return CXL_MBOX_INVALID_INPUT; 960 } 961 962 return CXL_MBOX_SUCCESS; 963 } 964 965 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 966 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 967 uint8_t *payload_in, 968 size_t len_in, 969 uint8_t *payload_out, 970 size_t *len_out, 971 CXLCCI *cci) 972 { 973 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 974 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 975 976 stq_le_p(payload_out, final_time); 977 *len_out = 8; 978 979 return CXL_MBOX_SUCCESS; 980 } 981 982 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 983 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 984 uint8_t *payload_in, 985 size_t len_in, 986 uint8_t *payload_out, 987 size_t *len_out, 988 CXLCCI *cci) 989 { 990 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 991 992 cxl_dstate->timestamp.set = true; 993 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 994 995 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 996 997 *len_out = 0; 998 return CXL_MBOX_SUCCESS; 999 } 1000 1001 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 1002 static const QemuUUID cel_uuid = { 1003 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 1004 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 1005 }; 1006 1007 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 1008 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 1009 uint8_t *payload_in, 1010 size_t len_in, 1011 uint8_t *payload_out, 1012 size_t *len_out, 1013 CXLCCI *cci) 1014 { 1015 struct { 1016 uint16_t entries; 1017 uint8_t rsvd[6]; 1018 struct { 1019 QemuUUID uuid; 1020 uint32_t size; 1021 } log_entries[1]; 1022 } QEMU_PACKED *supported_logs = (void *)payload_out; 1023 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 1024 1025 supported_logs->entries = 1; 1026 supported_logs->log_entries[0].uuid = cel_uuid; 1027 supported_logs->log_entries[0].size = 4 * cci->cel_size; 1028 1029 *len_out = sizeof(*supported_logs); 1030 return CXL_MBOX_SUCCESS; 1031 } 1032 1033 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 1034 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 1035 uint8_t *payload_in, 1036 size_t len_in, 1037 uint8_t *payload_out, 1038 size_t *len_out, 1039 CXLCCI *cci) 1040 { 1041 struct { 1042 QemuUUID uuid; 1043 uint32_t offset; 1044 uint32_t length; 1045 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 1046 1047 get_log = (void *)payload_in; 1048 1049 if (get_log->length > cci->payload_max) { 1050 return CXL_MBOX_INVALID_INPUT; 1051 } 1052 1053 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 1054 return CXL_MBOX_INVALID_LOG; 1055 } 1056 1057 /* 1058 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 1059 * The device shall return Invalid Input if the Offset or Length 1060 * fields attempt to access beyond the size of the log as reported by Get 1061 * Supported Log. 1062 * 1063 * Only valid for there to be one entry per opcode, but the length + offset 1064 * may still be greater than that if the inputs are not valid and so access 1065 * beyond the end of cci->cel_log. 1066 */ 1067 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { 1068 return CXL_MBOX_INVALID_INPUT; 1069 } 1070 1071 /* Store off everything to local variables so we can wipe out the payload */ 1072 *len_out = get_log->length; 1073 1074 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 1075 1076 return CXL_MBOX_SUCCESS; 1077 } 1078 1079 /* CXL r3.1 section 8.2.9.6: Features */ 1080 /* 1081 * Get Supported Features output payload 1082 * CXL r3.1 section 8.2.9.6.1 Table 8-96 1083 */ 1084 typedef struct CXLSupportedFeatureHeader { 1085 uint16_t entries; 1086 uint16_t nsuppfeats_dev; 1087 uint32_t reserved; 1088 } QEMU_PACKED CXLSupportedFeatureHeader; 1089 1090 /* 1091 * Get Supported Features Supported Feature Entry 1092 * CXL r3.1 section 8.2.9.6.1 Table 8-97 1093 */ 1094 typedef struct CXLSupportedFeatureEntry { 1095 QemuUUID uuid; 1096 uint16_t feat_index; 1097 uint16_t get_feat_size; 1098 uint16_t set_feat_size; 1099 uint32_t attr_flags; 1100 uint8_t get_feat_version; 1101 uint8_t set_feat_version; 1102 uint16_t set_feat_effects; 1103 uint8_t rsvd[18]; 1104 } QEMU_PACKED CXLSupportedFeatureEntry; 1105 1106 /* 1107 * Get Supported Features Supported Feature Entry 1108 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 1109 */ 1110 /* Supported Feature Entry : attribute flags */ 1111 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 1112 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 1113 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 1114 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 1115 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 1116 1117 /* Supported Feature Entry : set feature effects */ 1118 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 1119 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 1120 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 1121 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 1122 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1123 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1124 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1125 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1126 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1127 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1128 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1129 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1130 1131 enum CXL_SUPPORTED_FEATURES_LIST { 1132 CXL_FEATURE_PATROL_SCRUB = 0, 1133 CXL_FEATURE_ECS, 1134 CXL_FEATURE_MAX 1135 }; 1136 1137 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1138 /* 1139 * Get Feature input payload 1140 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1141 */ 1142 /* Get Feature : Payload in selection */ 1143 enum CXL_GET_FEATURE_SELECTION { 1144 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1145 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1146 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1147 CXL_GET_FEATURE_SEL_MAX 1148 }; 1149 1150 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1151 /* 1152 * Set Feature input payload 1153 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1154 */ 1155 typedef struct CXLSetFeatureInHeader { 1156 QemuUUID uuid; 1157 uint32_t flags; 1158 uint16_t offset; 1159 uint8_t version; 1160 uint8_t rsvd[9]; 1161 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1162 1163 /* Set Feature : Payload in flags */ 1164 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1165 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1166 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1167 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1168 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1169 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1170 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1171 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1172 }; 1173 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1174 1175 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1176 static const QemuUUID patrol_scrub_uuid = { 1177 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1178 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1179 }; 1180 1181 typedef struct CXLMemPatrolScrubSetFeature { 1182 CXLSetFeatureInHeader hdr; 1183 CXLMemPatrolScrubWriteAttrs feat_data; 1184 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1185 1186 /* 1187 * CXL r3.1 section 8.2.9.9.11.2: 1188 * DDR5 Error Check Scrub (ECS) Control Feature 1189 */ 1190 static const QemuUUID ecs_uuid = { 1191 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1192 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1193 }; 1194 1195 typedef struct CXLMemECSSetFeature { 1196 CXLSetFeatureInHeader hdr; 1197 CXLMemECSWriteAttrs feat_data[]; 1198 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1199 1200 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1201 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1202 uint8_t *payload_in, 1203 size_t len_in, 1204 uint8_t *payload_out, 1205 size_t *len_out, 1206 CXLCCI *cci) 1207 { 1208 struct { 1209 uint32_t count; 1210 uint16_t start_index; 1211 uint16_t reserved; 1212 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1213 1214 struct { 1215 CXLSupportedFeatureHeader hdr; 1216 CXLSupportedFeatureEntry feat_entries[]; 1217 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1218 uint16_t index, req_entries; 1219 uint16_t entry; 1220 1221 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1222 return CXL_MBOX_UNSUPPORTED; 1223 } 1224 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1225 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1226 return CXL_MBOX_INVALID_INPUT; 1227 } 1228 1229 req_entries = (get_feats_in->count - 1230 sizeof(CXLSupportedFeatureHeader)) / 1231 sizeof(CXLSupportedFeatureEntry); 1232 req_entries = MIN(req_entries, 1233 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1234 1235 for (entry = 0, index = get_feats_in->start_index; 1236 entry < req_entries; index++) { 1237 switch (index) { 1238 case CXL_FEATURE_PATROL_SCRUB: 1239 /* Fill supported feature entry for device patrol scrub control */ 1240 get_feats_out->feat_entries[entry++] = 1241 (struct CXLSupportedFeatureEntry) { 1242 .uuid = patrol_scrub_uuid, 1243 .feat_index = index, 1244 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1245 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1246 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1247 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1248 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1249 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1250 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1251 }; 1252 break; 1253 case CXL_FEATURE_ECS: 1254 /* Fill supported feature entry for device DDR5 ECS control */ 1255 get_feats_out->feat_entries[entry++] = 1256 (struct CXLSupportedFeatureEntry) { 1257 .uuid = ecs_uuid, 1258 .feat_index = index, 1259 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1260 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1261 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1262 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1263 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1264 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1265 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1266 }; 1267 break; 1268 default: 1269 __builtin_unreachable(); 1270 } 1271 } 1272 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1273 get_feats_out->hdr.entries = req_entries; 1274 *len_out = sizeof(CXLSupportedFeatureHeader) + 1275 req_entries * sizeof(CXLSupportedFeatureEntry); 1276 1277 return CXL_MBOX_SUCCESS; 1278 } 1279 1280 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1281 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1282 uint8_t *payload_in, 1283 size_t len_in, 1284 uint8_t *payload_out, 1285 size_t *len_out, 1286 CXLCCI *cci) 1287 { 1288 struct { 1289 QemuUUID uuid; 1290 uint16_t offset; 1291 uint16_t count; 1292 uint8_t selection; 1293 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1294 uint16_t bytes_to_copy = 0; 1295 CXLType3Dev *ct3d; 1296 CXLSetFeatureInfo *set_feat_info; 1297 1298 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1299 return CXL_MBOX_UNSUPPORTED; 1300 } 1301 1302 ct3d = CXL_TYPE3(cci->d); 1303 get_feature = (void *)payload_in; 1304 1305 set_feat_info = &ct3d->set_feat_info; 1306 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1307 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1308 } 1309 1310 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1311 return CXL_MBOX_UNSUPPORTED; 1312 } 1313 if (get_feature->offset + get_feature->count > cci->payload_max) { 1314 return CXL_MBOX_INVALID_INPUT; 1315 } 1316 1317 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1318 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1319 return CXL_MBOX_INVALID_INPUT; 1320 } 1321 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1322 get_feature->offset; 1323 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1324 memcpy(payload_out, 1325 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1326 bytes_to_copy); 1327 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1328 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1329 return CXL_MBOX_INVALID_INPUT; 1330 } 1331 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1332 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1333 memcpy(payload_out, 1334 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1335 bytes_to_copy); 1336 } else { 1337 return CXL_MBOX_UNSUPPORTED; 1338 } 1339 1340 *len_out = bytes_to_copy; 1341 1342 return CXL_MBOX_SUCCESS; 1343 } 1344 1345 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1346 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1347 uint8_t *payload_in, 1348 size_t len_in, 1349 uint8_t *payload_out, 1350 size_t *len_out, 1351 CXLCCI *cci) 1352 { 1353 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1354 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1355 CXLMemPatrolScrubSetFeature *ps_set_feature; 1356 CXLMemECSWriteAttrs *ecs_write_attrs; 1357 CXLMemECSSetFeature *ecs_set_feature; 1358 CXLSetFeatureInfo *set_feat_info; 1359 uint16_t bytes_to_copy = 0; 1360 uint8_t data_transfer_flag; 1361 CXLType3Dev *ct3d; 1362 uint16_t count; 1363 1364 if (len_in < sizeof(*hdr)) { 1365 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1366 } 1367 1368 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1369 return CXL_MBOX_UNSUPPORTED; 1370 } 1371 ct3d = CXL_TYPE3(cci->d); 1372 set_feat_info = &ct3d->set_feat_info; 1373 1374 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1375 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1376 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1377 } 1378 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1379 set_feat_info->data_saved_across_reset = true; 1380 } else { 1381 set_feat_info->data_saved_across_reset = false; 1382 } 1383 1384 data_transfer_flag = 1385 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1386 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1387 set_feat_info->uuid = hdr->uuid; 1388 set_feat_info->data_size = 0; 1389 } 1390 set_feat_info->data_transfer_flag = data_transfer_flag; 1391 set_feat_info->data_offset = hdr->offset; 1392 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1393 1394 if (bytes_to_copy == 0) { 1395 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1396 } 1397 1398 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1399 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1400 return CXL_MBOX_UNSUPPORTED; 1401 } 1402 1403 ps_set_feature = (void *)payload_in; 1404 ps_write_attrs = &ps_set_feature->feat_data; 1405 1406 if ((uint32_t)hdr->offset + bytes_to_copy > 1407 sizeof(ct3d->patrol_scrub_wr_attrs)) { 1408 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1409 } 1410 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1411 ps_write_attrs, 1412 bytes_to_copy); 1413 set_feat_info->data_size += bytes_to_copy; 1414 1415 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1416 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1417 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1418 ct3d->patrol_scrub_attrs.scrub_cycle |= 1419 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1420 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1421 ct3d->patrol_scrub_attrs.scrub_flags |= 1422 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1423 } 1424 } else if (qemu_uuid_is_equal(&hdr->uuid, 1425 &ecs_uuid)) { 1426 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1427 return CXL_MBOX_UNSUPPORTED; 1428 } 1429 1430 ecs_set_feature = (void *)payload_in; 1431 ecs_write_attrs = ecs_set_feature->feat_data; 1432 1433 if ((uint32_t)hdr->offset + bytes_to_copy > 1434 sizeof(ct3d->ecs_wr_attrs)) { 1435 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1436 } 1437 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1438 ecs_write_attrs, 1439 bytes_to_copy); 1440 set_feat_info->data_size += bytes_to_copy; 1441 1442 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1443 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1444 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1445 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1446 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1447 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1448 } 1449 } 1450 } else { 1451 return CXL_MBOX_UNSUPPORTED; 1452 } 1453 1454 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1455 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1456 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1457 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1458 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1459 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1460 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1461 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1462 } 1463 set_feat_info->data_transfer_flag = 0; 1464 set_feat_info->data_saved_across_reset = false; 1465 set_feat_info->data_offset = 0; 1466 set_feat_info->data_size = 0; 1467 } 1468 1469 return CXL_MBOX_SUCCESS; 1470 } 1471 1472 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1473 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1474 uint8_t *payload_in, 1475 size_t len_in, 1476 uint8_t *payload_out, 1477 size_t *len_out, 1478 CXLCCI *cci) 1479 { 1480 struct { 1481 char fw_revision[0x10]; 1482 uint64_t total_capacity; 1483 uint64_t volatile_capacity; 1484 uint64_t persistent_capacity; 1485 uint64_t partition_align; 1486 uint16_t info_event_log_size; 1487 uint16_t warning_event_log_size; 1488 uint16_t failure_event_log_size; 1489 uint16_t fatal_event_log_size; 1490 uint32_t lsa_size; 1491 uint8_t poison_list_max_mer[3]; 1492 uint16_t inject_poison_limit; 1493 uint8_t poison_caps; 1494 uint8_t qos_telemetry_caps; 1495 uint16_t dc_event_log_size; 1496 } QEMU_PACKED *id; 1497 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1498 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1499 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1500 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1501 1502 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1503 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1504 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1505 return CXL_MBOX_INTERNAL_ERROR; 1506 } 1507 1508 id = (void *)payload_out; 1509 1510 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1511 1512 stq_le_p(&id->total_capacity, 1513 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1514 stq_le_p(&id->persistent_capacity, 1515 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1516 stq_le_p(&id->volatile_capacity, 1517 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1518 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1519 /* 256 poison records */ 1520 st24_le_p(id->poison_list_max_mer, 256); 1521 /* No limit - so limited by main poison record limit */ 1522 stw_le_p(&id->inject_poison_limit, 0); 1523 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1524 1525 *len_out = sizeof(*id); 1526 return CXL_MBOX_SUCCESS; 1527 } 1528 1529 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1530 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1531 uint8_t *payload_in, 1532 size_t len_in, 1533 uint8_t *payload_out, 1534 size_t *len_out, 1535 CXLCCI *cci) 1536 { 1537 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1538 struct { 1539 uint64_t active_vmem; 1540 uint64_t active_pmem; 1541 uint64_t next_vmem; 1542 uint64_t next_pmem; 1543 } QEMU_PACKED *part_info = (void *)payload_out; 1544 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1545 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1546 1547 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1548 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1549 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1550 return CXL_MBOX_INTERNAL_ERROR; 1551 } 1552 1553 stq_le_p(&part_info->active_vmem, 1554 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1555 /* 1556 * When both next_vmem and next_pmem are 0, there is no pending change to 1557 * partitioning. 1558 */ 1559 stq_le_p(&part_info->next_vmem, 0); 1560 stq_le_p(&part_info->active_pmem, 1561 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1562 stq_le_p(&part_info->next_pmem, 0); 1563 1564 *len_out = sizeof(*part_info); 1565 return CXL_MBOX_SUCCESS; 1566 } 1567 1568 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1569 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1570 uint8_t *payload_in, 1571 size_t len_in, 1572 uint8_t *payload_out, 1573 size_t *len_out, 1574 CXLCCI *cci) 1575 { 1576 struct { 1577 uint32_t offset; 1578 uint32_t length; 1579 } QEMU_PACKED *get_lsa; 1580 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1581 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1582 uint64_t offset, length; 1583 1584 get_lsa = (void *)payload_in; 1585 offset = get_lsa->offset; 1586 length = get_lsa->length; 1587 1588 if (offset + length > cvc->get_lsa_size(ct3d)) { 1589 *len_out = 0; 1590 return CXL_MBOX_INVALID_INPUT; 1591 } 1592 1593 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1594 return CXL_MBOX_SUCCESS; 1595 } 1596 1597 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1598 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1599 uint8_t *payload_in, 1600 size_t len_in, 1601 uint8_t *payload_out, 1602 size_t *len_out, 1603 CXLCCI *cci) 1604 { 1605 struct set_lsa_pl { 1606 uint32_t offset; 1607 uint32_t rsvd; 1608 uint8_t data[]; 1609 } QEMU_PACKED; 1610 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1611 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1612 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1613 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1614 1615 *len_out = 0; 1616 if (len_in < hdr_len) { 1617 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1618 } 1619 1620 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1621 return CXL_MBOX_INVALID_INPUT; 1622 } 1623 len_in -= hdr_len; 1624 1625 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1626 return CXL_MBOX_SUCCESS; 1627 } 1628 1629 /* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */ 1630 static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd, 1631 uint8_t *payload_in, 1632 size_t len_in, 1633 uint8_t *payload_out, 1634 size_t *len_out, 1635 CXLCCI *cci) 1636 { 1637 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1638 CXLAlertConfig *out = (CXLAlertConfig *)payload_out; 1639 1640 memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config)); 1641 *len_out = sizeof(ct3d->alert_config); 1642 1643 return CXL_MBOX_SUCCESS; 1644 } 1645 1646 /* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */ 1647 static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd, 1648 uint8_t *payload_in, 1649 size_t len_in, 1650 uint8_t *payload_out, 1651 size_t *len_out, 1652 CXLCCI *cci) 1653 { 1654 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1655 CXLAlertConfig *alert_config = &ct3d->alert_config; 1656 struct { 1657 uint8_t valid_alert_actions; 1658 uint8_t enable_alert_actions; 1659 uint8_t life_used_warn_thresh; 1660 uint8_t rsvd; 1661 uint16_t over_temp_warn_thresh; 1662 uint16_t under_temp_warn_thresh; 1663 uint16_t cor_vmem_err_warn_thresh; 1664 uint16_t cor_pmem_err_warn_thresh; 1665 } QEMU_PACKED *in = (void *)payload_in; 1666 1667 if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) { 1668 /* 1669 * CXL r3.2 Table 8-149 The life used warning threshold shall be 1670 * less than the life used critical alert value. 1671 */ 1672 if (in->life_used_warn_thresh >= 1673 alert_config->life_used_crit_alert_thresh) { 1674 return CXL_MBOX_INVALID_INPUT; 1675 } 1676 alert_config->life_used_warn_thresh = in->life_used_warn_thresh; 1677 alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH; 1678 } 1679 1680 if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) { 1681 /* 1682 * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold 1683 * shall be less than the the Device Over-Temperature Critical 1684 * Alert Threshold. 1685 */ 1686 if (in->over_temp_warn_thresh >= 1687 alert_config->over_temp_crit_alert_thresh) { 1688 return CXL_MBOX_INVALID_INPUT; 1689 } 1690 alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh; 1691 alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH; 1692 } 1693 1694 if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) { 1695 /* 1696 * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold 1697 * shall be higher than the the Device Under-Temperature Critical 1698 * Alert Threshold. 1699 */ 1700 if (in->under_temp_warn_thresh <= 1701 alert_config->under_temp_crit_alert_thresh) { 1702 return CXL_MBOX_INVALID_INPUT; 1703 } 1704 alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh; 1705 alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH; 1706 } 1707 1708 if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) { 1709 alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh; 1710 alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH; 1711 } 1712 1713 if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) { 1714 alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh; 1715 alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH; 1716 } 1717 return CXL_MBOX_SUCCESS; 1718 } 1719 1720 /* Perform the actual device zeroing */ 1721 static void __do_sanitization(CXLType3Dev *ct3d) 1722 { 1723 MemoryRegion *mr; 1724 1725 if (ct3d->hostvmem) { 1726 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1727 if (mr) { 1728 void *hostmem = memory_region_get_ram_ptr(mr); 1729 memset(hostmem, 0, memory_region_size(mr)); 1730 } 1731 } 1732 1733 if (ct3d->hostpmem) { 1734 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1735 if (mr) { 1736 void *hostmem = memory_region_get_ram_ptr(mr); 1737 memset(hostmem, 0, memory_region_size(mr)); 1738 } 1739 } 1740 if (ct3d->lsa) { 1741 mr = host_memory_backend_get_memory(ct3d->lsa); 1742 if (mr) { 1743 void *lsa = memory_region_get_ram_ptr(mr); 1744 memset(lsa, 0, memory_region_size(mr)); 1745 } 1746 } 1747 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1748 } 1749 1750 static int get_sanitize_duration(uint64_t total_mem) 1751 { 1752 int secs = 0; 1753 1754 if (total_mem <= 512) { 1755 secs = 4; 1756 } else if (total_mem <= 1024) { 1757 secs = 8; 1758 } else if (total_mem <= 2 * 1024) { 1759 secs = 15; 1760 } else if (total_mem <= 4 * 1024) { 1761 secs = 30; 1762 } else if (total_mem <= 8 * 1024) { 1763 secs = 60; 1764 } else if (total_mem <= 16 * 1024) { 1765 secs = 2 * 60; 1766 } else if (total_mem <= 32 * 1024) { 1767 secs = 4 * 60; 1768 } else if (total_mem <= 64 * 1024) { 1769 secs = 8 * 60; 1770 } else if (total_mem <= 128 * 1024) { 1771 secs = 15 * 60; 1772 } else if (total_mem <= 256 * 1024) { 1773 secs = 30 * 60; 1774 } else if (total_mem <= 512 * 1024) { 1775 secs = 60 * 60; 1776 } else if (total_mem <= 1024 * 1024) { 1777 secs = 120 * 60; 1778 } else { 1779 secs = 240 * 60; /* max 4 hrs */ 1780 } 1781 1782 return secs; 1783 } 1784 1785 /* 1786 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1787 * 1788 * Once the Sanitize command has started successfully, the device shall be 1789 * placed in the media disabled state. If the command fails or is interrupted 1790 * by a reset or power failure, it shall remain in the media disabled state 1791 * until a successful Sanitize command has been completed. During this state: 1792 * 1793 * 1. Memory writes to the device will have no effect, and all memory reads 1794 * will return random values (no user data returned, even for locations that 1795 * the failed Sanitize operation didn’t sanitize yet). 1796 * 1797 * 2. Mailbox commands shall still be processed in the disabled state, except 1798 * that commands that access Sanitized areas shall fail with the Media Disabled 1799 * error code. 1800 */ 1801 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1802 uint8_t *payload_in, 1803 size_t len_in, 1804 uint8_t *payload_out, 1805 size_t *len_out, 1806 CXLCCI *cci) 1807 { 1808 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1809 uint64_t total_mem; /* in Mb */ 1810 int secs; 1811 1812 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1813 secs = get_sanitize_duration(total_mem); 1814 1815 /* EBUSY other bg cmds as of now */ 1816 cci->bg.runtime = secs * 1000UL; 1817 *len_out = 0; 1818 1819 cxl_dev_disable_media(&ct3d->cxl_dstate); 1820 1821 /* sanitize when done */ 1822 return CXL_MBOX_BG_STARTED; 1823 } 1824 1825 struct dpa_range_list_entry { 1826 uint64_t starting_dpa; 1827 uint64_t length; 1828 } QEMU_PACKED; 1829 1830 struct CXLSanitizeInfo { 1831 uint32_t dpa_range_count; 1832 uint8_t fill_value; 1833 struct dpa_range_list_entry dpa_range_list[]; 1834 } QEMU_PACKED; 1835 1836 static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr) 1837 { 1838 MemoryRegion *mr; 1839 if (ct3d->hostvmem) { 1840 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1841 if (vmr) { 1842 *vmr = mr; 1843 } 1844 return memory_region_size(mr); 1845 } 1846 return 0; 1847 } 1848 1849 static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr) 1850 { 1851 MemoryRegion *mr; 1852 if (ct3d->hostpmem) { 1853 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1854 if (pmr) { 1855 *pmr = mr; 1856 } 1857 return memory_region_size(mr); 1858 } 1859 return 0; 1860 } 1861 1862 static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr) 1863 { 1864 MemoryRegion *mr; 1865 if (ct3d->dc.host_dc) { 1866 mr = host_memory_backend_get_memory(ct3d->dc.host_dc); 1867 if (dc_mr) { 1868 *dc_mr = mr; 1869 } 1870 return memory_region_size(mr); 1871 } 1872 return 0; 1873 } 1874 1875 static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr, 1876 size_t length) 1877 { 1878 uint64_t vmr_size, pmr_size, dc_size; 1879 1880 if ((dpa_addr % CXL_CACHE_LINE_SIZE) || 1881 (length % CXL_CACHE_LINE_SIZE) || 1882 (length <= 0)) { 1883 return -EINVAL; 1884 } 1885 1886 vmr_size = get_vmr_size(ct3d, NULL); 1887 pmr_size = get_pmr_size(ct3d, NULL); 1888 dc_size = get_dc_size(ct3d, NULL); 1889 1890 if (dpa_addr + length > vmr_size + pmr_size + dc_size) { 1891 return -EINVAL; 1892 } 1893 1894 if (dpa_addr > vmr_size + pmr_size) { 1895 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1896 return -ENODEV; 1897 } 1898 } 1899 1900 return 0; 1901 } 1902 1903 static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length, 1904 uint8_t fill_value) 1905 { 1906 1907 uint64_t vmr_size, pmr_size; 1908 AddressSpace *as = NULL; 1909 MemTxAttrs mem_attrs = {}; 1910 1911 vmr_size = get_vmr_size(ct3d, NULL); 1912 pmr_size = get_pmr_size(ct3d, NULL); 1913 1914 if (dpa_addr < vmr_size) { 1915 as = &ct3d->hostvmem_as; 1916 } else if (dpa_addr < vmr_size + pmr_size) { 1917 as = &ct3d->hostpmem_as; 1918 } else { 1919 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1920 return -ENODEV; 1921 } 1922 as = &ct3d->dc.host_dc_as; 1923 } 1924 1925 return address_space_set(as, dpa_addr, fill_value, length, mem_attrs); 1926 } 1927 1928 /* Perform the actual device zeroing */ 1929 static void __do_sanitize(CXLType3Dev *ct3d) 1930 { 1931 struct CXLSanitizeInfo *san_info = ct3d->media_op_sanitize; 1932 int dpa_range_count = san_info->dpa_range_count; 1933 int rc = 0; 1934 int i; 1935 1936 for (i = 0; i < dpa_range_count; i++) { 1937 rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa, 1938 san_info->dpa_range_list[i].length, 1939 san_info->fill_value); 1940 if (rc) { 1941 goto exit; 1942 } 1943 } 1944 exit: 1945 g_free(ct3d->media_op_sanitize); 1946 ct3d->media_op_sanitize = NULL; 1947 return; 1948 } 1949 1950 enum { 1951 MEDIA_OP_CLASS_GENERAL = 0x0, 1952 #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0 1953 MEDIA_OP_CLASS_SANITIZE = 0x1, 1954 #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0 1955 #define MEDIA_OP_SAN_SUBC_ZERO 0x1 1956 }; 1957 1958 struct media_op_supported_list_entry { 1959 uint8_t media_op_class; 1960 uint8_t media_op_subclass; 1961 }; 1962 1963 struct media_op_discovery_out_pl { 1964 uint64_t dpa_range_granularity; 1965 uint16_t total_supported_operations; 1966 uint16_t num_of_supported_operations; 1967 struct media_op_supported_list_entry entry[]; 1968 } QEMU_PACKED; 1969 1970 static const struct media_op_supported_list_entry media_op_matrix[] = { 1971 { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY }, 1972 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE }, 1973 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO }, 1974 }; 1975 1976 static CXLRetCode media_operations_discovery(uint8_t *payload_in, 1977 size_t len_in, 1978 uint8_t *payload_out, 1979 size_t *len_out) 1980 { 1981 struct { 1982 uint8_t media_operation_class; 1983 uint8_t media_operation_subclass; 1984 uint8_t rsvd[2]; 1985 uint32_t dpa_range_count; 1986 struct { 1987 uint16_t start_index; 1988 uint16_t num_ops; 1989 } discovery_osa; 1990 } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in; 1991 struct media_op_discovery_out_pl *media_out_pl = 1992 (struct media_op_discovery_out_pl *)payload_out; 1993 int num_ops, start_index, i; 1994 int count = 0; 1995 1996 if (len_in < sizeof(*media_op_in_disc_pl)) { 1997 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1998 } 1999 2000 num_ops = media_op_in_disc_pl->discovery_osa.num_ops; 2001 start_index = media_op_in_disc_pl->discovery_osa.start_index; 2002 2003 /* 2004 * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and 2005 * start index should not exceed the total number of entries for discovery 2006 * sub class command. 2007 */ 2008 if (media_op_in_disc_pl->dpa_range_count || 2009 start_index > ARRAY_SIZE(media_op_matrix)) { 2010 return CXL_MBOX_INVALID_INPUT; 2011 } 2012 2013 media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE; 2014 media_out_pl->total_supported_operations = 2015 ARRAY_SIZE(media_op_matrix); 2016 if (num_ops > 0) { 2017 for (i = start_index; i < start_index + num_ops; i++) { 2018 media_out_pl->entry[count].media_op_class = 2019 media_op_matrix[i].media_op_class; 2020 media_out_pl->entry[count].media_op_subclass = 2021 media_op_matrix[i].media_op_subclass; 2022 count++; 2023 if (count == num_ops) { 2024 break; 2025 } 2026 } 2027 } 2028 2029 media_out_pl->num_of_supported_operations = count; 2030 *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry); 2031 return CXL_MBOX_SUCCESS; 2032 } 2033 2034 static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d, 2035 uint8_t *payload_in, 2036 size_t len_in, 2037 uint8_t *payload_out, 2038 size_t *len_out, 2039 uint8_t fill_value, 2040 CXLCCI *cci) 2041 { 2042 struct media_operations_sanitize { 2043 uint8_t media_operation_class; 2044 uint8_t media_operation_subclass; 2045 uint8_t rsvd[2]; 2046 uint32_t dpa_range_count; 2047 struct dpa_range_list_entry dpa_range_list[]; 2048 } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in; 2049 uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count; 2050 uint64_t total_mem = 0; 2051 size_t dpa_range_list_size; 2052 int secs = 0, i; 2053 2054 if (dpa_range_count == 0) { 2055 return CXL_MBOX_SUCCESS; 2056 } 2057 2058 dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry); 2059 if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) { 2060 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2061 } 2062 2063 for (i = 0; i < dpa_range_count; i++) { 2064 uint64_t start_dpa = 2065 media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa; 2066 uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length; 2067 2068 if (validate_dpa_addr(ct3d, start_dpa, length)) { 2069 return CXL_MBOX_INVALID_INPUT; 2070 } 2071 total_mem += length; 2072 } 2073 ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) + 2074 dpa_range_list_size); 2075 2076 ct3d->media_op_sanitize->dpa_range_count = dpa_range_count; 2077 ct3d->media_op_sanitize->fill_value = fill_value; 2078 memcpy(ct3d->media_op_sanitize->dpa_range_list, 2079 media_op_in_sanitize_pl->dpa_range_list, 2080 dpa_range_list_size); 2081 secs = get_sanitize_duration(total_mem >> 20); 2082 2083 /* EBUSY other bg cmds as of now */ 2084 cci->bg.runtime = secs * 1000UL; 2085 *len_out = 0; 2086 /* 2087 * media op sanitize is targeted so no need to disable media or 2088 * clear event logs 2089 */ 2090 return CXL_MBOX_BG_STARTED; 2091 } 2092 2093 static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd, 2094 uint8_t *payload_in, 2095 size_t len_in, 2096 uint8_t *payload_out, 2097 size_t *len_out, 2098 CXLCCI *cci) 2099 { 2100 struct { 2101 uint8_t media_operation_class; 2102 uint8_t media_operation_subclass; 2103 uint8_t rsvd[2]; 2104 uint32_t dpa_range_count; 2105 } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in; 2106 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2107 uint8_t media_op_cl = 0; 2108 uint8_t media_op_subclass = 0; 2109 2110 if (len_in < sizeof(*media_op_in_common_pl)) { 2111 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2112 } 2113 2114 media_op_cl = media_op_in_common_pl->media_operation_class; 2115 media_op_subclass = media_op_in_common_pl->media_operation_subclass; 2116 2117 switch (media_op_cl) { 2118 case MEDIA_OP_CLASS_GENERAL: 2119 if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) { 2120 return CXL_MBOX_UNSUPPORTED; 2121 } 2122 2123 return media_operations_discovery(payload_in, len_in, payload_out, 2124 len_out); 2125 case MEDIA_OP_CLASS_SANITIZE: 2126 switch (media_op_subclass) { 2127 case MEDIA_OP_SAN_SUBC_SANITIZE: 2128 return media_operations_sanitize(ct3d, payload_in, len_in, 2129 payload_out, len_out, 0xF, 2130 cci); 2131 case MEDIA_OP_SAN_SUBC_ZERO: 2132 return media_operations_sanitize(ct3d, payload_in, len_in, 2133 payload_out, len_out, 0, 2134 cci); 2135 default: 2136 return CXL_MBOX_UNSUPPORTED; 2137 } 2138 default: 2139 return CXL_MBOX_UNSUPPORTED; 2140 } 2141 } 2142 2143 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 2144 uint8_t *payload_in, 2145 size_t len_in, 2146 uint8_t *payload_out, 2147 size_t *len_out, 2148 CXLCCI *cci) 2149 { 2150 uint32_t *state = (uint32_t *)payload_out; 2151 2152 *state = 0; 2153 *len_out = 4; 2154 return CXL_MBOX_SUCCESS; 2155 } 2156 2157 /* 2158 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 2159 * 2160 * This is very inefficient, but good enough for now! 2161 * Also the payload will always fit, so no need to handle the MORE flag and 2162 * make this stateful. We may want to allow longer poison lists to aid 2163 * testing that kernel functionality. 2164 */ 2165 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 2166 uint8_t *payload_in, 2167 size_t len_in, 2168 uint8_t *payload_out, 2169 size_t *len_out, 2170 CXLCCI *cci) 2171 { 2172 struct get_poison_list_pl { 2173 uint64_t pa; 2174 uint64_t length; 2175 } QEMU_PACKED; 2176 2177 struct get_poison_list_out_pl { 2178 uint8_t flags; 2179 uint8_t rsvd1; 2180 uint64_t overflow_timestamp; 2181 uint16_t count; 2182 uint8_t rsvd2[0x14]; 2183 struct { 2184 uint64_t addr; 2185 uint32_t length; 2186 uint32_t resv; 2187 } QEMU_PACKED records[]; 2188 } QEMU_PACKED; 2189 2190 struct get_poison_list_pl *in = (void *)payload_in; 2191 struct get_poison_list_out_pl *out = (void *)payload_out; 2192 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2193 uint16_t record_count = 0, i = 0; 2194 uint64_t query_start, query_length; 2195 CXLPoisonList *poison_list = &ct3d->poison_list; 2196 CXLPoison *ent; 2197 uint16_t out_pl_len; 2198 2199 query_start = ldq_le_p(&in->pa); 2200 /* 64 byte alignment required */ 2201 if (query_start & 0x3f) { 2202 return CXL_MBOX_INVALID_INPUT; 2203 } 2204 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2205 2206 QLIST_FOREACH(ent, poison_list, node) { 2207 /* Check for no overlap */ 2208 if (!ranges_overlap(ent->start, ent->length, 2209 query_start, query_length)) { 2210 continue; 2211 } 2212 record_count++; 2213 } 2214 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2215 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2216 2217 QLIST_FOREACH(ent, poison_list, node) { 2218 uint64_t start, stop; 2219 2220 /* Check for no overlap */ 2221 if (!ranges_overlap(ent->start, ent->length, 2222 query_start, query_length)) { 2223 continue; 2224 } 2225 2226 /* Deal with overlap */ 2227 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 2228 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 2229 query_start + query_length); 2230 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 2231 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2232 i++; 2233 } 2234 if (ct3d->poison_list_overflowed) { 2235 out->flags = (1 << 1); 2236 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 2237 } 2238 if (scan_media_running(cci)) { 2239 out->flags |= (1 << 2); 2240 } 2241 2242 stw_le_p(&out->count, record_count); 2243 *len_out = out_pl_len; 2244 return CXL_MBOX_SUCCESS; 2245 } 2246 2247 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 2248 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 2249 uint8_t *payload_in, 2250 size_t len_in, 2251 uint8_t *payload_out, 2252 size_t *len_out, 2253 CXLCCI *cci) 2254 { 2255 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2256 CXLPoisonList *poison_list = &ct3d->poison_list; 2257 CXLPoison *ent; 2258 struct inject_poison_pl { 2259 uint64_t dpa; 2260 }; 2261 struct inject_poison_pl *in = (void *)payload_in; 2262 uint64_t dpa = ldq_le_p(&in->dpa); 2263 CXLPoison *p; 2264 2265 QLIST_FOREACH(ent, poison_list, node) { 2266 if (dpa >= ent->start && 2267 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 2268 return CXL_MBOX_SUCCESS; 2269 } 2270 } 2271 /* 2272 * Freeze the list if there is an on-going scan media operation. 2273 */ 2274 if (scan_media_running(cci)) { 2275 /* 2276 * XXX: Spec is ambiguous - is this case considered 2277 * a successful return despite not adding to the list? 2278 */ 2279 goto success; 2280 } 2281 2282 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2283 return CXL_MBOX_INJECT_POISON_LIMIT; 2284 } 2285 p = g_new0(CXLPoison, 1); 2286 2287 p->length = CXL_CACHE_LINE_SIZE; 2288 p->start = dpa; 2289 p->type = CXL_POISON_TYPE_INJECTED; 2290 2291 /* 2292 * Possible todo: Merge with existing entry if next to it and if same type 2293 */ 2294 QLIST_INSERT_HEAD(poison_list, p, node); 2295 ct3d->poison_list_cnt++; 2296 success: 2297 *len_out = 0; 2298 2299 return CXL_MBOX_SUCCESS; 2300 } 2301 2302 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 2303 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 2304 uint8_t *payload_in, 2305 size_t len_in, 2306 uint8_t *payload_out, 2307 size_t *len_out, 2308 CXLCCI *cci) 2309 { 2310 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2311 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2312 CXLPoisonList *poison_list = &ct3d->poison_list; 2313 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 2314 struct clear_poison_pl { 2315 uint64_t dpa; 2316 uint8_t data[64]; 2317 }; 2318 CXLPoison *ent; 2319 uint64_t dpa; 2320 2321 struct clear_poison_pl *in = (void *)payload_in; 2322 2323 dpa = ldq_le_p(&in->dpa); 2324 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 2325 ct3d->dc.total_capacity) { 2326 return CXL_MBOX_INVALID_PA; 2327 } 2328 2329 /* Clearing a region with no poison is not an error so always do so */ 2330 if (cvc->set_cacheline) { 2331 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 2332 return CXL_MBOX_INTERNAL_ERROR; 2333 } 2334 } 2335 2336 /* 2337 * Freeze the list if there is an on-going scan media operation. 2338 */ 2339 if (scan_media_running(cci)) { 2340 /* 2341 * XXX: Spec is ambiguous - is this case considered 2342 * a successful return despite not removing from the list? 2343 */ 2344 goto success; 2345 } 2346 2347 QLIST_FOREACH(ent, poison_list, node) { 2348 /* 2349 * Test for contained in entry. Simpler than general case 2350 * as clearing 64 bytes and entries 64 byte aligned 2351 */ 2352 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 2353 break; 2354 } 2355 } 2356 if (!ent) { 2357 goto success; 2358 } 2359 2360 QLIST_REMOVE(ent, node); 2361 ct3d->poison_list_cnt--; 2362 2363 if (dpa > ent->start) { 2364 CXLPoison *frag; 2365 /* Cannot overflow as replacing existing entry */ 2366 2367 frag = g_new0(CXLPoison, 1); 2368 2369 frag->start = ent->start; 2370 frag->length = dpa - ent->start; 2371 frag->type = ent->type; 2372 2373 QLIST_INSERT_HEAD(poison_list, frag, node); 2374 ct3d->poison_list_cnt++; 2375 } 2376 2377 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 2378 CXLPoison *frag; 2379 2380 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2381 cxl_set_poison_list_overflowed(ct3d); 2382 } else { 2383 frag = g_new0(CXLPoison, 1); 2384 2385 frag->start = dpa + CXL_CACHE_LINE_SIZE; 2386 frag->length = ent->start + ent->length - frag->start; 2387 frag->type = ent->type; 2388 QLIST_INSERT_HEAD(poison_list, frag, node); 2389 ct3d->poison_list_cnt++; 2390 } 2391 } 2392 /* Any fragments have been added, free original entry */ 2393 g_free(ent); 2394 success: 2395 *len_out = 0; 2396 2397 return CXL_MBOX_SUCCESS; 2398 } 2399 2400 /* 2401 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 2402 */ 2403 static CXLRetCode 2404 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 2405 uint8_t *payload_in, 2406 size_t len_in, 2407 uint8_t *payload_out, 2408 size_t *len_out, 2409 CXLCCI *cci) 2410 { 2411 struct get_scan_media_capabilities_pl { 2412 uint64_t pa; 2413 uint64_t length; 2414 } QEMU_PACKED; 2415 2416 struct get_scan_media_capabilities_out_pl { 2417 uint32_t estimated_runtime_ms; 2418 }; 2419 2420 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2421 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2422 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 2423 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 2424 uint64_t query_start; 2425 uint64_t query_length; 2426 2427 query_start = ldq_le_p(&in->pa); 2428 /* 64 byte alignment required */ 2429 if (query_start & 0x3f) { 2430 return CXL_MBOX_INVALID_INPUT; 2431 } 2432 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2433 2434 if (query_start + query_length > cxl_dstate->static_mem_size) { 2435 return CXL_MBOX_INVALID_PA; 2436 } 2437 2438 /* 2439 * Just use 400 nanosecond access/read latency + 100 ns for 2440 * the cost of updating the poison list. For small enough 2441 * chunks return at least 1 ms. 2442 */ 2443 stl_le_p(&out->estimated_runtime_ms, 2444 MAX(1, query_length * (0.0005L / 64))); 2445 2446 *len_out = sizeof(*out); 2447 return CXL_MBOX_SUCCESS; 2448 } 2449 2450 static void __do_scan_media(CXLType3Dev *ct3d) 2451 { 2452 CXLPoison *ent; 2453 unsigned int results_cnt = 0; 2454 2455 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 2456 results_cnt++; 2457 } 2458 2459 /* only scan media may clear the overflow */ 2460 if (ct3d->poison_list_overflowed && 2461 ct3d->poison_list_cnt == results_cnt) { 2462 cxl_clear_poison_list_overflowed(ct3d); 2463 } 2464 /* scan media has run since last conventional reset */ 2465 ct3d->scan_media_hasrun = true; 2466 } 2467 2468 /* 2469 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 2470 */ 2471 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 2472 uint8_t *payload_in, 2473 size_t len_in, 2474 uint8_t *payload_out, 2475 size_t *len_out, 2476 CXLCCI *cci) 2477 { 2478 struct scan_media_pl { 2479 uint64_t pa; 2480 uint64_t length; 2481 uint8_t flags; 2482 } QEMU_PACKED; 2483 2484 struct scan_media_pl *in = (void *)payload_in; 2485 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2486 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2487 uint64_t query_start; 2488 uint64_t query_length; 2489 CXLPoison *ent, *next; 2490 2491 query_start = ldq_le_p(&in->pa); 2492 /* 64 byte alignment required */ 2493 if (query_start & 0x3f) { 2494 return CXL_MBOX_INVALID_INPUT; 2495 } 2496 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2497 2498 if (query_start + query_length > cxl_dstate->static_mem_size) { 2499 return CXL_MBOX_INVALID_PA; 2500 } 2501 if (ct3d->dc.num_regions && query_start + query_length >= 2502 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 2503 return CXL_MBOX_INVALID_PA; 2504 } 2505 2506 if (in->flags == 0) { /* TODO */ 2507 qemu_log_mask(LOG_UNIMP, 2508 "Scan Media Event Log is unsupported\n"); 2509 } 2510 2511 /* any previous results are discarded upon a new Scan Media */ 2512 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 2513 QLIST_REMOVE(ent, node); 2514 g_free(ent); 2515 } 2516 2517 /* kill the poison list - it will be recreated */ 2518 if (ct3d->poison_list_overflowed) { 2519 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 2520 QLIST_REMOVE(ent, node); 2521 g_free(ent); 2522 ct3d->poison_list_cnt--; 2523 } 2524 } 2525 2526 /* 2527 * Scan the backup list and move corresponding entries 2528 * into the results list, updating the poison list 2529 * when possible. 2530 */ 2531 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 2532 CXLPoison *res; 2533 2534 if (ent->start >= query_start + query_length || 2535 ent->start + ent->length <= query_start) { 2536 continue; 2537 } 2538 2539 /* 2540 * If a Get Poison List cmd comes in while this 2541 * scan is being done, it will see the new complete 2542 * list, while setting the respective flag. 2543 */ 2544 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 2545 CXLPoison *p = g_new0(CXLPoison, 1); 2546 2547 p->start = ent->start; 2548 p->length = ent->length; 2549 p->type = ent->type; 2550 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 2551 ct3d->poison_list_cnt++; 2552 } 2553 2554 res = g_new0(CXLPoison, 1); 2555 res->start = ent->start; 2556 res->length = ent->length; 2557 res->type = ent->type; 2558 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2559 2560 QLIST_REMOVE(ent, node); 2561 g_free(ent); 2562 } 2563 2564 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2565 *len_out = 0; 2566 2567 return CXL_MBOX_BG_STARTED; 2568 } 2569 2570 /* 2571 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2572 */ 2573 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2574 uint8_t *payload_in, 2575 size_t len_in, 2576 uint8_t *payload_out, 2577 size_t *len_out, 2578 CXLCCI *cci) 2579 { 2580 struct get_scan_media_results_out_pl { 2581 uint64_t dpa_restart; 2582 uint64_t length; 2583 uint8_t flags; 2584 uint8_t rsvd1; 2585 uint16_t count; 2586 uint8_t rsvd2[0xc]; 2587 struct { 2588 uint64_t addr; 2589 uint32_t length; 2590 uint32_t resv; 2591 } QEMU_PACKED records[]; 2592 } QEMU_PACKED; 2593 2594 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2595 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2596 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2597 CXLPoison *ent, *next; 2598 uint16_t total_count = 0, record_count = 0, i = 0; 2599 uint16_t out_pl_len; 2600 2601 if (!ct3d->scan_media_hasrun) { 2602 return CXL_MBOX_UNSUPPORTED; 2603 } 2604 2605 /* 2606 * Calculate limits, all entries are within the same address range of the 2607 * last scan media call. 2608 */ 2609 QLIST_FOREACH(ent, scan_media_results, node) { 2610 size_t rec_size = record_count * sizeof(out->records[0]); 2611 2612 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2613 record_count++; 2614 } 2615 total_count++; 2616 } 2617 2618 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2619 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2620 2621 memset(out, 0, out_pl_len); 2622 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2623 uint64_t start, stop; 2624 2625 if (i == record_count) { 2626 break; 2627 } 2628 2629 start = ROUND_DOWN(ent->start, 64ull); 2630 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2631 stq_le_p(&out->records[i].addr, start); 2632 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2633 i++; 2634 2635 /* consume the returning entry */ 2636 QLIST_REMOVE(ent, node); 2637 g_free(ent); 2638 } 2639 2640 stw_le_p(&out->count, record_count); 2641 if (total_count > record_count) { 2642 out->flags = (1 << 0); /* More Media Error Records */ 2643 } 2644 2645 *len_out = out_pl_len; 2646 return CXL_MBOX_SUCCESS; 2647 } 2648 2649 /* 2650 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2651 * (Opcode: 4800h) 2652 */ 2653 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2654 uint8_t *payload_in, 2655 size_t len_in, 2656 uint8_t *payload_out, 2657 size_t *len_out, 2658 CXLCCI *cci) 2659 { 2660 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2661 struct { 2662 uint8_t region_cnt; 2663 uint8_t start_rid; 2664 } QEMU_PACKED *in = (void *)payload_in; 2665 struct { 2666 uint8_t num_regions; 2667 uint8_t regions_returned; 2668 uint8_t rsvd1[6]; 2669 struct { 2670 uint64_t base; 2671 uint64_t decode_len; 2672 uint64_t region_len; 2673 uint64_t block_size; 2674 uint32_t dsmadhandle; 2675 uint8_t flags; 2676 uint8_t rsvd2[3]; 2677 } QEMU_PACKED records[]; 2678 } QEMU_PACKED *out = (void *)payload_out; 2679 struct { 2680 uint32_t num_extents_supported; 2681 uint32_t num_extents_available; 2682 uint32_t num_tags_supported; 2683 uint32_t num_tags_available; 2684 } QEMU_PACKED *extra_out; 2685 uint16_t record_count; 2686 uint16_t i; 2687 uint16_t out_pl_len; 2688 uint8_t start_rid; 2689 2690 start_rid = in->start_rid; 2691 if (start_rid >= ct3d->dc.num_regions) { 2692 return CXL_MBOX_INVALID_INPUT; 2693 } 2694 2695 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2696 2697 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2698 extra_out = (void *)(payload_out + out_pl_len); 2699 out_pl_len += sizeof(*extra_out); 2700 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2701 2702 out->num_regions = ct3d->dc.num_regions; 2703 out->regions_returned = record_count; 2704 for (i = 0; i < record_count; i++) { 2705 stq_le_p(&out->records[i].base, 2706 ct3d->dc.regions[start_rid + i].base); 2707 stq_le_p(&out->records[i].decode_len, 2708 ct3d->dc.regions[start_rid + i].decode_len / 2709 CXL_CAPACITY_MULTIPLIER); 2710 stq_le_p(&out->records[i].region_len, 2711 ct3d->dc.regions[start_rid + i].len); 2712 stq_le_p(&out->records[i].block_size, 2713 ct3d->dc.regions[start_rid + i].block_size); 2714 stl_le_p(&out->records[i].dsmadhandle, 2715 ct3d->dc.regions[start_rid + i].dsmadhandle); 2716 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2717 } 2718 /* 2719 * TODO: Assign values once extents and tags are introduced 2720 * to use. 2721 */ 2722 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2723 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2724 ct3d->dc.total_extent_count); 2725 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2726 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2727 2728 *len_out = out_pl_len; 2729 return CXL_MBOX_SUCCESS; 2730 } 2731 2732 /* 2733 * CXL r3.1 section 8.2.9.9.9.2: 2734 * Get Dynamic Capacity Extent List (Opcode 4801h) 2735 */ 2736 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2737 uint8_t *payload_in, 2738 size_t len_in, 2739 uint8_t *payload_out, 2740 size_t *len_out, 2741 CXLCCI *cci) 2742 { 2743 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2744 struct { 2745 uint32_t extent_cnt; 2746 uint32_t start_extent_id; 2747 } QEMU_PACKED *in = (void *)payload_in; 2748 struct { 2749 uint32_t count; 2750 uint32_t total_extents; 2751 uint32_t generation_num; 2752 uint8_t rsvd[4]; 2753 CXLDCExtentRaw records[]; 2754 } QEMU_PACKED *out = (void *)payload_out; 2755 uint32_t start_extent_id = in->start_extent_id; 2756 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2757 uint16_t record_count = 0, i = 0, record_done = 0; 2758 uint16_t out_pl_len, size; 2759 CXLDCExtent *ent; 2760 2761 if (start_extent_id > ct3d->dc.nr_extents_accepted) { 2762 return CXL_MBOX_INVALID_INPUT; 2763 } 2764 2765 record_count = MIN(in->extent_cnt, 2766 ct3d->dc.total_extent_count - start_extent_id); 2767 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2768 record_count = MIN(record_count, size / sizeof(out->records[0])); 2769 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2770 2771 stl_le_p(&out->count, record_count); 2772 stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); 2773 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2774 2775 if (record_count > 0) { 2776 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2777 2778 QTAILQ_FOREACH(ent, extent_list, node) { 2779 if (i++ < start_extent_id) { 2780 continue; 2781 } 2782 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2783 stq_le_p(&out_rec->len, ent->len); 2784 memcpy(&out_rec->tag, ent->tag, 0x10); 2785 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2786 2787 record_done++; 2788 out_rec++; 2789 if (record_done == record_count) { 2790 break; 2791 } 2792 } 2793 } 2794 2795 *len_out = out_pl_len; 2796 return CXL_MBOX_SUCCESS; 2797 } 2798 2799 /* 2800 * Check whether any bit between addr[nr, nr+size) is set, 2801 * return true if any bit is set, otherwise return false 2802 */ 2803 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2804 unsigned long size) 2805 { 2806 unsigned long res = find_next_bit(addr, size + nr, nr); 2807 2808 return res < nr + size; 2809 } 2810 2811 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2812 { 2813 int i; 2814 CXLDCRegion *region = &ct3d->dc.regions[0]; 2815 2816 if (dpa < region->base || 2817 dpa >= region->base + ct3d->dc.total_capacity) { 2818 return NULL; 2819 } 2820 2821 /* 2822 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2823 * 2824 * Regions are used in increasing-DPA order, with Region 0 being used for 2825 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2826 * So check from the last region to find where the dpa belongs. Extents that 2827 * cross multiple regions are not allowed. 2828 */ 2829 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2830 region = &ct3d->dc.regions[i]; 2831 if (dpa >= region->base) { 2832 if (dpa + len > region->base + region->len) { 2833 return NULL; 2834 } 2835 return region; 2836 } 2837 } 2838 2839 return NULL; 2840 } 2841 2842 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2843 uint64_t dpa, 2844 uint64_t len, 2845 uint8_t *tag, 2846 uint16_t shared_seq) 2847 { 2848 CXLDCExtent *extent; 2849 2850 extent = g_new0(CXLDCExtent, 1); 2851 extent->start_dpa = dpa; 2852 extent->len = len; 2853 if (tag) { 2854 memcpy(extent->tag, tag, 0x10); 2855 } 2856 extent->shared_seq = shared_seq; 2857 2858 QTAILQ_INSERT_TAIL(list, extent, node); 2859 } 2860 2861 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2862 CXLDCExtent *extent) 2863 { 2864 QTAILQ_REMOVE(list, extent, node); 2865 g_free(extent); 2866 } 2867 2868 /* 2869 * Add a new extent to the extent "group" if group exists; 2870 * otherwise, create a new group 2871 * Return value: the extent group where the extent is inserted. 2872 */ 2873 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2874 uint64_t dpa, 2875 uint64_t len, 2876 uint8_t *tag, 2877 uint16_t shared_seq) 2878 { 2879 if (!group) { 2880 group = g_new0(CXLDCExtentGroup, 1); 2881 QTAILQ_INIT(&group->list); 2882 } 2883 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2884 tag, shared_seq); 2885 return group; 2886 } 2887 2888 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2889 CXLDCExtentGroup *group) 2890 { 2891 QTAILQ_INSERT_TAIL(list, group, node); 2892 } 2893 2894 uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2895 { 2896 CXLDCExtent *ent, *ent_next; 2897 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2898 uint32_t extents_deleted = 0; 2899 2900 QTAILQ_REMOVE(list, group, node); 2901 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2902 cxl_remove_extent_from_extent_list(&group->list, ent); 2903 extents_deleted++; 2904 } 2905 g_free(group); 2906 2907 return extents_deleted; 2908 } 2909 2910 /* 2911 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2912 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2913 */ 2914 typedef struct CXLUpdateDCExtentListInPl { 2915 uint32_t num_entries_updated; 2916 uint8_t flags; 2917 uint8_t rsvd[3]; 2918 /* CXL r3.1 Table 8-169: Updated Extent */ 2919 struct { 2920 uint64_t start_dpa; 2921 uint64_t len; 2922 uint8_t rsvd[8]; 2923 } QEMU_PACKED updated_entries[]; 2924 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2925 2926 /* 2927 * For the extents in the extent list to operate, check whether they are valid 2928 * 1. The extent should be in the range of a valid DC region; 2929 * 2. The extent should not cross multiple regions; 2930 * 3. The start DPA and the length of the extent should align with the block 2931 * size of the region; 2932 * 4. The address range of multiple extents in the list should not overlap. 2933 */ 2934 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2935 const CXLUpdateDCExtentListInPl *in) 2936 { 2937 uint64_t min_block_size = UINT64_MAX; 2938 CXLDCRegion *region; 2939 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2940 g_autofree unsigned long *blk_bitmap = NULL; 2941 uint64_t dpa, len; 2942 uint32_t i; 2943 2944 for (i = 0; i < ct3d->dc.num_regions; i++) { 2945 region = &ct3d->dc.regions[i]; 2946 min_block_size = MIN(min_block_size, region->block_size); 2947 } 2948 2949 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2950 ct3d->dc.regions[0].base) / min_block_size); 2951 2952 for (i = 0; i < in->num_entries_updated; i++) { 2953 dpa = in->updated_entries[i].start_dpa; 2954 len = in->updated_entries[i].len; 2955 2956 region = cxl_find_dc_region(ct3d, dpa, len); 2957 if (!region) { 2958 return CXL_MBOX_INVALID_PA; 2959 } 2960 2961 dpa -= ct3d->dc.regions[0].base; 2962 if (dpa % region->block_size || len % region->block_size) { 2963 return CXL_MBOX_INVALID_EXTENT_LIST; 2964 } 2965 /* the dpa range already covered by some other extents in the list */ 2966 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2967 len / min_block_size)) { 2968 return CXL_MBOX_INVALID_EXTENT_LIST; 2969 } 2970 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2971 } 2972 2973 return CXL_MBOX_SUCCESS; 2974 } 2975 2976 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2977 const CXLUpdateDCExtentListInPl *in) 2978 { 2979 uint32_t i; 2980 CXLDCExtent *ent; 2981 CXLDCExtentGroup *ext_group; 2982 uint64_t dpa, len; 2983 Range range1, range2; 2984 2985 for (i = 0; i < in->num_entries_updated; i++) { 2986 dpa = in->updated_entries[i].start_dpa; 2987 len = in->updated_entries[i].len; 2988 2989 range_init_nofail(&range1, dpa, len); 2990 2991 /* 2992 * The host-accepted DPA range must be contained by the first extent 2993 * group in the pending list 2994 */ 2995 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2996 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2997 return CXL_MBOX_INVALID_PA; 2998 } 2999 3000 /* to-be-added range should not overlap with range already accepted */ 3001 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 3002 range_init_nofail(&range2, ent->start_dpa, ent->len); 3003 if (range_overlaps_range(&range1, &range2)) { 3004 return CXL_MBOX_INVALID_PA; 3005 } 3006 } 3007 } 3008 return CXL_MBOX_SUCCESS; 3009 } 3010 3011 /* 3012 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 3013 * An extent is added to the extent list and becomes usable only after the 3014 * response is processed successfully. 3015 */ 3016 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 3017 uint8_t *payload_in, 3018 size_t len_in, 3019 uint8_t *payload_out, 3020 size_t *len_out, 3021 CXLCCI *cci) 3022 { 3023 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3024 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3025 CXLDCExtentList *extent_list = &ct3d->dc.extents; 3026 uint32_t i, num; 3027 uint64_t dpa, len; 3028 CXLRetCode ret; 3029 3030 if (len_in < sizeof(*in)) { 3031 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3032 } 3033 3034 if (in->num_entries_updated == 0) { 3035 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3036 ct3d->dc.total_extent_count -= num; 3037 return CXL_MBOX_SUCCESS; 3038 } 3039 3040 if (len_in < 3041 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3042 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3043 } 3044 3045 /* Adding extents causes exceeding device's extent tracking ability. */ 3046 if (in->num_entries_updated + ct3d->dc.total_extent_count > 3047 CXL_NUM_EXTENTS_SUPPORTED) { 3048 return CXL_MBOX_RESOURCES_EXHAUSTED; 3049 } 3050 3051 ret = cxl_detect_malformed_extent_list(ct3d, in); 3052 if (ret != CXL_MBOX_SUCCESS) { 3053 return ret; 3054 } 3055 3056 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 3057 if (ret != CXL_MBOX_SUCCESS) { 3058 return ret; 3059 } 3060 3061 for (i = 0; i < in->num_entries_updated; i++) { 3062 dpa = in->updated_entries[i].start_dpa; 3063 len = in->updated_entries[i].len; 3064 3065 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 3066 ct3d->dc.total_extent_count += 1; 3067 ct3d->dc.nr_extents_accepted += 1; 3068 ct3_set_region_block_backed(ct3d, dpa, len); 3069 } 3070 /* Remove the first extent group in the pending list */ 3071 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3072 ct3d->dc.total_extent_count -= num; 3073 3074 return CXL_MBOX_SUCCESS; 3075 } 3076 3077 /* 3078 * Copy extent list from src to dst 3079 * Return value: number of extents copied 3080 */ 3081 static uint32_t copy_extent_list(CXLDCExtentList *dst, 3082 const CXLDCExtentList *src) 3083 { 3084 uint32_t cnt = 0; 3085 CXLDCExtent *ent; 3086 3087 if (!dst || !src) { 3088 return 0; 3089 } 3090 3091 QTAILQ_FOREACH(ent, src, node) { 3092 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 3093 ent->tag, ent->shared_seq); 3094 cnt++; 3095 } 3096 return cnt; 3097 } 3098 3099 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 3100 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 3101 uint32_t *updated_list_size) 3102 { 3103 CXLDCExtent *ent, *ent_next; 3104 uint64_t dpa, len; 3105 uint32_t i; 3106 int cnt_delta = 0; 3107 CXLRetCode ret = CXL_MBOX_SUCCESS; 3108 3109 QTAILQ_INIT(updated_list); 3110 copy_extent_list(updated_list, &ct3d->dc.extents); 3111 3112 for (i = 0; i < in->num_entries_updated; i++) { 3113 Range range; 3114 3115 dpa = in->updated_entries[i].start_dpa; 3116 len = in->updated_entries[i].len; 3117 3118 /* Check if the DPA range is not fully backed with valid extents */ 3119 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 3120 ret = CXL_MBOX_INVALID_PA; 3121 goto free_and_exit; 3122 } 3123 3124 /* After this point, extent overflow is the only error can happen */ 3125 while (len > 0) { 3126 QTAILQ_FOREACH(ent, updated_list, node) { 3127 range_init_nofail(&range, ent->start_dpa, ent->len); 3128 3129 if (range_contains(&range, dpa)) { 3130 uint64_t len1, len2 = 0, len_done = 0; 3131 uint64_t ent_start_dpa = ent->start_dpa; 3132 uint64_t ent_len = ent->len; 3133 3134 len1 = dpa - ent->start_dpa; 3135 /* Found the extent or the subset of an existing extent */ 3136 if (range_contains(&range, dpa + len - 1)) { 3137 len2 = ent_start_dpa + ent_len - dpa - len; 3138 } else { 3139 dpa = ent_start_dpa + ent_len; 3140 } 3141 len_done = ent_len - len1 - len2; 3142 3143 cxl_remove_extent_from_extent_list(updated_list, ent); 3144 cnt_delta--; 3145 3146 if (len1) { 3147 cxl_insert_extent_to_extent_list(updated_list, 3148 ent_start_dpa, 3149 len1, NULL, 0); 3150 cnt_delta++; 3151 } 3152 if (len2) { 3153 cxl_insert_extent_to_extent_list(updated_list, 3154 dpa + len, 3155 len2, NULL, 0); 3156 cnt_delta++; 3157 } 3158 3159 if (cnt_delta + ct3d->dc.total_extent_count > 3160 CXL_NUM_EXTENTS_SUPPORTED) { 3161 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 3162 goto free_and_exit; 3163 } 3164 3165 len -= len_done; 3166 break; 3167 } 3168 } 3169 } 3170 } 3171 free_and_exit: 3172 if (ret != CXL_MBOX_SUCCESS) { 3173 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 3174 cxl_remove_extent_from_extent_list(updated_list, ent); 3175 } 3176 *updated_list_size = 0; 3177 } else { 3178 *updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta; 3179 } 3180 3181 return ret; 3182 } 3183 3184 /* 3185 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 3186 */ 3187 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 3188 uint8_t *payload_in, 3189 size_t len_in, 3190 uint8_t *payload_out, 3191 size_t *len_out, 3192 CXLCCI *cci) 3193 { 3194 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3195 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3196 CXLDCExtentList updated_list; 3197 CXLDCExtent *ent, *ent_next; 3198 uint32_t updated_list_size; 3199 CXLRetCode ret; 3200 3201 if (len_in < sizeof(*in)) { 3202 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3203 } 3204 3205 if (in->num_entries_updated == 0) { 3206 return CXL_MBOX_INVALID_INPUT; 3207 } 3208 3209 if (len_in < 3210 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3211 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3212 } 3213 3214 ret = cxl_detect_malformed_extent_list(ct3d, in); 3215 if (ret != CXL_MBOX_SUCCESS) { 3216 return ret; 3217 } 3218 3219 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 3220 &updated_list_size); 3221 if (ret != CXL_MBOX_SUCCESS) { 3222 return ret; 3223 } 3224 3225 /* 3226 * If the dry run release passes, the returned updated_list will 3227 * be the updated extent list and we just need to clear the extents 3228 * in the accepted list and copy extents in the updated_list to accepted 3229 * list and update the extent count; 3230 */ 3231 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 3232 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 3233 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 3234 } 3235 copy_extent_list(&ct3d->dc.extents, &updated_list); 3236 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 3237 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 3238 cxl_remove_extent_from_extent_list(&updated_list, ent); 3239 } 3240 ct3d->dc.total_extent_count += (updated_list_size - 3241 ct3d->dc.nr_extents_accepted); 3242 3243 ct3d->dc.nr_extents_accepted = updated_list_size; 3244 3245 return CXL_MBOX_SUCCESS; 3246 } 3247 3248 /* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */ 3249 static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd, 3250 uint8_t *payload_in, 3251 size_t len_in, 3252 uint8_t *payload_out, 3253 size_t *len_out, 3254 CXLCCI *cci) 3255 { 3256 struct { 3257 uint8_t num_hosts; 3258 uint8_t num_regions_supported; 3259 uint8_t rsvd1[2]; 3260 uint16_t supported_add_sel_policy_bitmask; 3261 uint8_t rsvd2[2]; 3262 uint16_t supported_removal_policy_bitmask; 3263 uint8_t sanitize_on_release_bitmask; 3264 uint8_t rsvd3; 3265 uint64_t total_dynamic_capacity; 3266 uint64_t region_blk_size_bitmasks[8]; 3267 } QEMU_PACKED *out = (void *)payload_out; 3268 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3269 CXLDCRegion *region; 3270 int i; 3271 3272 out->num_hosts = 1; 3273 out->num_regions_supported = ct3d->dc.num_regions; 3274 stw_le_p(&out->supported_add_sel_policy_bitmask, 3275 BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE)); 3276 stw_le_p(&out->supported_removal_policy_bitmask, 3277 BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE)); 3278 out->sanitize_on_release_bitmask = 0; 3279 3280 stq_le_p(&out->total_dynamic_capacity, 3281 ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER); 3282 3283 for (i = 0; i < ct3d->dc.num_regions; i++) { 3284 region = &ct3d->dc.regions[i]; 3285 memcpy(&out->region_blk_size_bitmasks[i], 3286 ®ion->supported_blk_size_bitmask, 3287 sizeof(out->region_blk_size_bitmasks[i])); 3288 } 3289 3290 *len_out = sizeof(*out); 3291 return CXL_MBOX_SUCCESS; 3292 } 3293 3294 static void build_dsmas_flags(uint8_t *flags, CXLDCRegion *region) 3295 { 3296 *flags = 0; 3297 3298 if (region->nonvolatile) { 3299 *flags |= BIT(CXL_DSMAS_FLAGS_NONVOLATILE); 3300 } 3301 if (region->sharable) { 3302 *flags |= BIT(CXL_DSMAS_FLAGS_SHARABLE); 3303 } 3304 if (region->hw_managed_coherency) { 3305 *flags |= BIT(CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY); 3306 } 3307 if (region->ic_specific_dc_management) { 3308 *flags |= BIT(CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT); 3309 } 3310 if (region->rdonly) { 3311 *flags |= BIT(CXL_DSMAS_FLAGS_RDONLY); 3312 } 3313 } 3314 3315 /* 3316 * CXL r3.2 section 7.6.7.6.2: 3317 * Get Host DC Region Configuration (Opcode 5601h) 3318 */ 3319 static CXLRetCode cmd_fm_get_host_dc_region_config(const struct cxl_cmd *cmd, 3320 uint8_t *payload_in, 3321 size_t len_in, 3322 uint8_t *payload_out, 3323 size_t *len_out, 3324 CXLCCI *cci) 3325 { 3326 struct { 3327 uint16_t host_id; 3328 uint8_t region_cnt; 3329 uint8_t start_rid; 3330 } QEMU_PACKED *in = (void *)payload_in; 3331 struct { 3332 uint16_t host_id; 3333 uint8_t num_regions; 3334 uint8_t regions_returned; 3335 struct { 3336 uint64_t base; 3337 uint64_t decode_len; 3338 uint64_t region_len; 3339 uint64_t block_size; 3340 uint8_t flags; 3341 uint8_t rsvd1[3]; 3342 uint8_t sanitize; 3343 uint8_t rsvd2[3]; 3344 } QEMU_PACKED records[]; 3345 } QEMU_PACKED *out = (void *)payload_out; 3346 struct { 3347 uint32_t num_extents_supported; 3348 uint32_t num_extents_available; 3349 uint32_t num_tags_supported; 3350 uint32_t num_tags_available; 3351 } QEMU_PACKED *extra_out; 3352 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3353 uint16_t record_count, out_pl_len, i; 3354 3355 if (in->start_rid >= ct3d->dc.num_regions) { 3356 return CXL_MBOX_INVALID_INPUT; 3357 } 3358 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 3359 3360 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 3361 extra_out = (void *)out + out_pl_len; 3362 out_pl_len += sizeof(*extra_out); 3363 3364 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 3365 3366 stw_le_p(&out->host_id, 0); 3367 out->num_regions = ct3d->dc.num_regions; 3368 out->regions_returned = record_count; 3369 3370 for (i = 0; i < record_count; i++) { 3371 stq_le_p(&out->records[i].base, 3372 ct3d->dc.regions[in->start_rid + i].base); 3373 stq_le_p(&out->records[i].decode_len, 3374 ct3d->dc.regions[in->start_rid + i].decode_len / 3375 CXL_CAPACITY_MULTIPLIER); 3376 stq_le_p(&out->records[i].region_len, 3377 ct3d->dc.regions[in->start_rid + i].len); 3378 stq_le_p(&out->records[i].block_size, 3379 ct3d->dc.regions[in->start_rid + i].block_size); 3380 build_dsmas_flags(&out->records[i].flags, 3381 &ct3d->dc.regions[in->start_rid + i]); 3382 /* Sanitize is bit 0 of flags. */ 3383 out->records[i].sanitize = 3384 ct3d->dc.regions[in->start_rid + i].flags & BIT(0); 3385 } 3386 3387 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 3388 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 3389 ct3d->dc.total_extent_count); 3390 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 3391 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 3392 3393 *len_out = out_pl_len; 3394 return CXL_MBOX_SUCCESS; 3395 } 3396 3397 /* CXL r3.2 section 7.6.7.6.3: Set Host DC Region Configuration (Opcode 5602) */ 3398 static CXLRetCode cmd_fm_set_dc_region_config(const struct cxl_cmd *cmd, 3399 uint8_t *payload_in, 3400 size_t len_in, 3401 uint8_t *payload_out, 3402 size_t *len_out, 3403 CXLCCI *cci) 3404 { 3405 struct { 3406 uint8_t reg_id; 3407 uint8_t rsvd[3]; 3408 uint64_t block_sz; 3409 uint8_t flags; 3410 uint8_t rsvd2[3]; 3411 } QEMU_PACKED *in = (void *)payload_in; 3412 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3413 CXLEventDynamicCapacity dcEvent = {}; 3414 CXLDCRegion *region = &ct3d->dc.regions[in->reg_id]; 3415 3416 /* 3417 * CXL r3.2 7.6.7.6.3: Set DC Region Configuration 3418 * This command shall fail with Unsupported when the Sanitize on Release 3419 * field does not match the region’s configuration... and the device 3420 * does not support reconfiguration of the Sanitize on Release setting. 3421 * 3422 * Currently not reconfigurable, so always fail if sanitize bit (bit 0) 3423 * doesn't match. 3424 */ 3425 if ((in->flags & 0x1) != (region->flags & 0x1)) { 3426 return CXL_MBOX_UNSUPPORTED; 3427 } 3428 3429 if (in->reg_id >= DCD_MAX_NUM_REGION) { 3430 return CXL_MBOX_UNSUPPORTED; 3431 } 3432 3433 /* Check that no extents are in the region being reconfigured */ 3434 if (!bitmap_empty(region->blk_bitmap, region->len / region->block_size)) { 3435 return CXL_MBOX_UNSUPPORTED; 3436 } 3437 3438 /* Check that new block size is supported */ 3439 if (!is_power_of_2(in->block_sz) || 3440 !(in->block_sz & region->supported_blk_size_bitmask)) { 3441 return CXL_MBOX_INVALID_INPUT; 3442 } 3443 3444 /* Return success if new block size == current block size */ 3445 if (in->block_sz == region->block_size) { 3446 return CXL_MBOX_SUCCESS; 3447 } 3448 3449 /* Free bitmap and create new one for new block size. */ 3450 qemu_mutex_lock(®ion->bitmap_lock); 3451 g_free(region->blk_bitmap); 3452 region->blk_bitmap = bitmap_new(region->len / in->block_sz); 3453 qemu_mutex_unlock(®ion->bitmap_lock); 3454 region->block_size = in->block_sz; 3455 3456 /* Create event record and insert into event log */ 3457 cxl_assign_event_header(&dcEvent.hdr, 3458 &dynamic_capacity_uuid, 3459 (1 << CXL_EVENT_TYPE_INFO), 3460 sizeof(dcEvent), 3461 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 3462 dcEvent.type = DC_EVENT_REGION_CONFIG_UPDATED; 3463 dcEvent.validity_flags = 1; 3464 dcEvent.host_id = 0; 3465 dcEvent.updated_region_id = in->reg_id; 3466 3467 if (cxl_event_insert(&ct3d->cxl_dstate, 3468 CXL_EVENT_TYPE_DYNAMIC_CAP, 3469 (CXLEventRecordRaw *)&dcEvent)) { 3470 cxl_event_irq_assert(ct3d); 3471 } 3472 return CXL_MBOX_SUCCESS; 3473 } 3474 3475 /* CXL r3.2 section 7.6.7.6.4: Get DC Region Extent Lists (Opcode 5603h) */ 3476 static CXLRetCode cmd_fm_get_dc_region_extent_list(const struct cxl_cmd *cmd, 3477 uint8_t *payload_in, 3478 size_t len_in, 3479 uint8_t *payload_out, 3480 size_t *len_out, 3481 CXLCCI *cci) 3482 { 3483 struct { 3484 uint16_t host_id; 3485 uint8_t rsvd[2]; 3486 uint32_t extent_cnt; 3487 uint32_t start_extent_id; 3488 } QEMU_PACKED *in = (void *)payload_in; 3489 struct { 3490 uint16_t host_id; 3491 uint8_t rsvd[2]; 3492 uint32_t start_extent_id; 3493 uint32_t extents_returned; 3494 uint32_t total_extents; 3495 uint32_t list_generation_num; 3496 uint8_t rsvd2[4]; 3497 CXLDCExtentRaw records[]; 3498 } QEMU_PACKED *out = (void *)payload_out; 3499 QEMU_BUILD_BUG_ON(sizeof(*in) != 0xc); 3500 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3501 CXLDCExtent *ent; 3502 CXLDCExtentRaw *out_rec; 3503 uint16_t record_count = 0, record_done = 0, i = 0; 3504 uint16_t out_pl_len, max_size; 3505 3506 if (in->host_id != 0) { 3507 return CXL_MBOX_INVALID_INPUT; 3508 } 3509 3510 if (in->start_extent_id > ct3d->dc.nr_extents_accepted) { 3511 return CXL_MBOX_INVALID_INPUT; 3512 } 3513 3514 record_count = MIN(in->extent_cnt, 3515 ct3d->dc.nr_extents_accepted - in->start_extent_id); 3516 max_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 3517 record_count = MIN(record_count, max_size / sizeof(out->records[0])); 3518 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 3519 3520 stw_le_p(&out->host_id, in->host_id); 3521 stl_le_p(&out->start_extent_id, in->start_extent_id); 3522 stl_le_p(&out->extents_returned, record_count); 3523 stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); 3524 stl_le_p(&out->list_generation_num, ct3d->dc.ext_list_gen_seq); 3525 3526 if (record_count > 0) { 3527 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 3528 if (i++ < in->start_extent_id) { 3529 continue; 3530 } 3531 out_rec = &out->records[record_done]; 3532 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 3533 stq_le_p(&out_rec->len, ent->len); 3534 memcpy(&out_rec->tag, ent->tag, 0x10); 3535 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 3536 3537 record_done++; 3538 if (record_done == record_count) { 3539 break; 3540 } 3541 } 3542 } 3543 3544 *len_out = out_pl_len; 3545 return CXL_MBOX_SUCCESS; 3546 } 3547 3548 /* 3549 * Helper function to convert CXLDCExtentRaw to CXLUpdateDCExtentListInPl 3550 * in order to reuse cxl_detect_malformed_extent_list() function which accepts 3551 * CXLUpdateDCExtentListInPl as a parameter. 3552 */ 3553 static void convert_raw_extents(CXLDCExtentRaw raw_extents[], 3554 CXLUpdateDCExtentListInPl *extent_list, 3555 int count) 3556 { 3557 int i; 3558 3559 extent_list->num_entries_updated = count; 3560 3561 for (i = 0; i < count; i++) { 3562 extent_list->updated_entries[i].start_dpa = raw_extents[i].start_dpa; 3563 extent_list->updated_entries[i].len = raw_extents[i].len; 3564 } 3565 } 3566 3567 /* CXL r3.2 Section 7.6.7.6.5: Initiate Dynamic Capacity Add (Opcode 5604h) */ 3568 static CXLRetCode cmd_fm_initiate_dc_add(const struct cxl_cmd *cmd, 3569 uint8_t *payload_in, 3570 size_t len_in, 3571 uint8_t *payload_out, 3572 size_t *len_out, 3573 CXLCCI *cci) 3574 { 3575 struct { 3576 uint16_t host_id; 3577 uint8_t selection_policy; 3578 uint8_t reg_num; 3579 uint64_t length; 3580 uint8_t tag[0x10]; 3581 uint32_t ext_count; 3582 CXLDCExtentRaw extents[]; 3583 } QEMU_PACKED *in = (void *)payload_in; 3584 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3585 int i, rc; 3586 3587 switch (in->selection_policy) { 3588 case CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE: { 3589 /* Adding extents exceeds device's extent tracking ability. */ 3590 if (in->ext_count + ct3d->dc.total_extent_count > 3591 CXL_NUM_EXTENTS_SUPPORTED) { 3592 return CXL_MBOX_RESOURCES_EXHAUSTED; 3593 } 3594 3595 g_autofree CXLUpdateDCExtentListInPl *list = 3596 g_malloc0(sizeof(*list) + 3597 in->ext_count * sizeof(*list->updated_entries)); 3598 3599 convert_raw_extents(in->extents, list, in->ext_count); 3600 rc = cxl_detect_malformed_extent_list(ct3d, list); 3601 3602 for (i = 0; i < in->ext_count; i++) { 3603 CXLDCExtentRaw *ext = &in->extents[i]; 3604 3605 /* Check requested extents do not overlap with pending ones. */ 3606 if (cxl_extent_groups_overlaps_dpa_range(&ct3d->dc.extents_pending, 3607 ext->start_dpa, 3608 ext->len)) { 3609 return CXL_MBOX_INVALID_EXTENT_LIST; 3610 } 3611 /* Check requested extents do not overlap with existing ones. */ 3612 if (cxl_extents_overlaps_dpa_range(&ct3d->dc.extents, 3613 ext->start_dpa, 3614 ext->len)) { 3615 return CXL_MBOX_INVALID_EXTENT_LIST; 3616 } 3617 } 3618 3619 if (rc) { 3620 return rc; 3621 } 3622 3623 CXLDCExtentGroup *group = NULL; 3624 for (i = 0; i < in->ext_count; i++) { 3625 CXLDCExtentRaw *ext = &in->extents[i]; 3626 3627 group = cxl_insert_extent_to_extent_group(group, ext->start_dpa, 3628 ext->len, ext->tag, 3629 ext->shared_seq); 3630 } 3631 3632 cxl_extent_group_list_insert_tail(&ct3d->dc.extents_pending, group); 3633 ct3d->dc.total_extent_count += in->ext_count; 3634 cxl_create_dc_event_records_for_extents(ct3d, 3635 DC_EVENT_ADD_CAPACITY, 3636 in->extents, 3637 in->ext_count); 3638 3639 return CXL_MBOX_SUCCESS; 3640 } 3641 default: { 3642 qemu_log_mask(LOG_UNIMP, 3643 "CXL extent selection policy not supported.\n"); 3644 return CXL_MBOX_INVALID_INPUT; 3645 } 3646 } 3647 } 3648 3649 #define CXL_EXTENT_REMOVAL_POLICY_MASK 0x0F 3650 #define CXL_FORCED_REMOVAL_MASK (1 << 4) 3651 /* 3652 * CXL r3.2 Section 7.6.7.6.6: 3653 * Initiate Dynamic Capacity Release (Opcode 5605h) 3654 */ 3655 static CXLRetCode cmd_fm_initiate_dc_release(const struct cxl_cmd *cmd, 3656 uint8_t *payload_in, 3657 size_t len_in, 3658 uint8_t *payload_out, 3659 size_t *len_out, 3660 CXLCCI *cci) 3661 { 3662 struct { 3663 uint16_t host_id; 3664 uint8_t flags; 3665 uint8_t reg_num; 3666 uint64_t length; 3667 uint8_t tag[0x10]; 3668 uint32_t ext_count; 3669 CXLDCExtentRaw extents[]; 3670 } QEMU_PACKED *in = (void *)payload_in; 3671 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3672 int i, rc; 3673 3674 switch (in->flags & CXL_EXTENT_REMOVAL_POLICY_MASK) { 3675 case CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE: { 3676 CXLDCExtentList updated_list; 3677 uint32_t updated_list_size; 3678 g_autofree CXLUpdateDCExtentListInPl *list = 3679 g_malloc0(sizeof(*list) + 3680 in->ext_count * sizeof(*list->updated_entries)); 3681 3682 convert_raw_extents(in->extents, list, in->ext_count); 3683 rc = cxl_detect_malformed_extent_list(ct3d, list); 3684 if (rc) { 3685 return rc; 3686 } 3687 3688 /* 3689 * Fail with Invalid PA if an extent is pending and Forced Removal 3690 * flag not set. 3691 */ 3692 if (!(in->flags & CXL_FORCED_REMOVAL_MASK)) { 3693 for (i = 0; i < in->ext_count; i++) { 3694 CXLDCExtentRaw ext = in->extents[i]; 3695 /* 3696 * Check requested extents don't overlap with pending 3697 * extents. 3698 */ 3699 if (cxl_extent_groups_overlaps_dpa_range( 3700 &ct3d->dc.extents_pending, 3701 ext.start_dpa, 3702 ext.len)) { 3703 return CXL_MBOX_INVALID_PA; 3704 } 3705 } 3706 } 3707 3708 rc = cxl_dc_extent_release_dry_run(ct3d, 3709 list, 3710 &updated_list, 3711 &updated_list_size); 3712 if (rc) { 3713 return rc; 3714 } 3715 cxl_create_dc_event_records_for_extents(ct3d, 3716 DC_EVENT_RELEASE_CAPACITY, 3717 in->extents, 3718 in->ext_count); 3719 return CXL_MBOX_SUCCESS; 3720 } 3721 default: { 3722 qemu_log_mask(LOG_UNIMP, 3723 "CXL extent removal policy not supported.\n"); 3724 return CXL_MBOX_INVALID_INPUT; 3725 } 3726 } 3727 } 3728 3729 static const struct cxl_cmd cxl_cmd_set[256][256] = { 3730 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3731 cmd_infostat_bg_op_abort, 0, 0 }, 3732 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 3733 cmd_events_get_records, 1, 0 }, 3734 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 3735 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 3736 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 3737 cmd_events_get_interrupt_policy, 0, 0 }, 3738 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 3739 cmd_events_set_interrupt_policy, 3740 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 3741 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 3742 cmd_firmware_update_get_info, 0, 0 }, 3743 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 3744 cmd_firmware_update_transfer, ~0, 3745 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3746 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 3747 cmd_firmware_update_activate, 2, 3748 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3749 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3750 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 3751 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3752 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 3753 0, 0 }, 3754 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3755 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 3756 cmd_features_get_supported, 0x8, 0 }, 3757 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 3758 cmd_features_get_feature, 0x15, 0 }, 3759 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 3760 cmd_features_set_feature, 3761 ~0, 3762 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3763 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3764 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 3765 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 3766 CXL_MBOX_SECURITY_STATE_CHANGE)}, 3767 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 3768 cmd_identify_memory_device, 0, 0 }, 3769 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 3770 cmd_ccls_get_partition_info, 0, 0 }, 3771 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 3772 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 3773 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3774 [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = { 3775 "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG", 3776 cmd_get_alert_config, 0, 0 }, 3777 [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = { 3778 "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG", 3779 cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3780 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 3781 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3782 CXL_MBOX_SECURITY_STATE_CHANGE | 3783 CXL_MBOX_BACKGROUND_OPERATION | 3784 CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3785 [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations, 3786 ~0, 3787 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3788 CXL_MBOX_BACKGROUND_OPERATION)}, 3789 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 3790 cmd_get_security_state, 0, 0 }, 3791 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 3792 cmd_media_get_poison_list, 16, 0 }, 3793 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 3794 cmd_media_inject_poison, 8, 0 }, 3795 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 3796 cmd_media_clear_poison, 72, 0 }, 3797 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 3798 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 3799 cmd_media_get_scan_media_capabilities, 16, 0 }, 3800 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 3801 cmd_media_scan_media, 17, 3802 (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3803 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 3804 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 3805 cmd_media_get_scan_media_results, 0, 0 }, 3806 }; 3807 3808 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 3809 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 3810 cmd_dcd_get_dyn_cap_config, 2, 0 }, 3811 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 3812 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 3813 8, 0 }, 3814 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 3815 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 3816 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3817 [DCD_CONFIG][RELEASE_DYN_CAP] = { 3818 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 3819 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3820 }; 3821 3822 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 3823 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3824 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 3825 cmd_infostat_bg_op_sts, 0, 0 }, 3826 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3827 cmd_infostat_bg_op_abort, 0, 0 }, 3828 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3829 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 3830 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3831 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3832 0 }, 3833 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3834 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 3835 cmd_identify_switch_device, 0, 0 }, 3836 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 3837 cmd_get_physical_port_state, ~0, 0 }, 3838 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3839 cmd_tunnel_management_cmd, ~0, 0 }, 3840 }; 3841 3842 static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = { 3843 [FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO", 3844 cmd_fm_get_dcd_info, 0, 0 }, 3845 [FMAPI_DCD_MGMT][GET_HOST_DC_REGION_CONFIG] = { "GET_HOST_DC_REGION_CONFIG", 3846 cmd_fm_get_host_dc_region_config, 4, 0 }, 3847 [FMAPI_DCD_MGMT][SET_DC_REGION_CONFIG] = { "SET_DC_REGION_CONFIG", 3848 cmd_fm_set_dc_region_config, 16, 3849 (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | 3850 CXL_MBOX_CONFIG_CHANGE_CONV_RESET | 3851 CXL_MBOX_CONFIG_CHANGE_CXL_RESET | 3852 CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3853 CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, 3854 [FMAPI_DCD_MGMT][GET_DC_REGION_EXTENT_LIST] = { "GET_DC_REGION_EXTENT_LIST", 3855 cmd_fm_get_dc_region_extent_list, 12, 0 }, 3856 [FMAPI_DCD_MGMT][INITIATE_DC_ADD] = { "INIT_DC_ADD", 3857 cmd_fm_initiate_dc_add, ~0, 3858 (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | 3859 CXL_MBOX_CONFIG_CHANGE_CONV_RESET | 3860 CXL_MBOX_CONFIG_CHANGE_CXL_RESET | 3861 CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3862 CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, 3863 [FMAPI_DCD_MGMT][INITIATE_DC_RELEASE] = { "INIT_DC_RELEASE", 3864 cmd_fm_initiate_dc_release, ~0, 3865 (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | 3866 CXL_MBOX_CONFIG_CHANGE_CONV_RESET | 3867 CXL_MBOX_CONFIG_CHANGE_CXL_RESET | 3868 CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3869 CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, 3870 }; 3871 3872 /* 3873 * While the command is executing in the background, the device should 3874 * update the percentage complete in the Background Command Status Register 3875 * at least once per second. 3876 */ 3877 3878 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 3879 3880 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 3881 size_t len_in, uint8_t *pl_in, size_t *len_out, 3882 uint8_t *pl_out, bool *bg_started) 3883 { 3884 int ret; 3885 const struct cxl_cmd *cxl_cmd; 3886 opcode_handler h; 3887 CXLDeviceState *cxl_dstate; 3888 3889 *len_out = 0; 3890 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 3891 h = cxl_cmd->handler; 3892 if (!h) { 3893 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 3894 set << 8 | cmd); 3895 return CXL_MBOX_UNSUPPORTED; 3896 } 3897 3898 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 3899 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3900 } 3901 3902 /* Only one bg command at a time */ 3903 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3904 cci->bg.runtime > 0) { 3905 return CXL_MBOX_BUSY; 3906 } 3907 3908 /* forbid any selected commands while the media is disabled */ 3909 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 3910 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 3911 3912 if (cxl_dev_media_disabled(cxl_dstate)) { 3913 if (h == cmd_events_get_records || 3914 h == cmd_ccls_get_partition_info || 3915 h == cmd_ccls_set_lsa || 3916 h == cmd_ccls_get_lsa || 3917 h == cmd_logs_get_log || 3918 h == cmd_media_get_poison_list || 3919 h == cmd_media_inject_poison || 3920 h == cmd_media_clear_poison || 3921 h == cmd_sanitize_overwrite || 3922 h == cmd_firmware_update_transfer || 3923 h == cmd_firmware_update_activate) { 3924 return CXL_MBOX_MEDIA_DISABLED; 3925 } 3926 } 3927 } 3928 3929 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 3930 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3931 ret == CXL_MBOX_BG_STARTED) { 3932 *bg_started = true; 3933 } else { 3934 *bg_started = false; 3935 } 3936 3937 /* Set bg and the return code */ 3938 if (*bg_started) { 3939 uint64_t now; 3940 3941 cci->bg.opcode = (set << 8) | cmd; 3942 3943 cci->bg.complete_pct = 0; 3944 cci->bg.aborted = false; 3945 cci->bg.ret_code = 0; 3946 3947 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3948 cci->bg.starttime = now; 3949 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3950 } 3951 3952 return ret; 3953 } 3954 3955 static void bg_timercb(void *opaque) 3956 { 3957 CXLCCI *cci = opaque; 3958 uint64_t now, total_time; 3959 3960 qemu_mutex_lock(&cci->bg.lock); 3961 3962 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3963 total_time = cci->bg.starttime + cci->bg.runtime; 3964 3965 if (now >= total_time) { /* we are done */ 3966 uint16_t ret = CXL_MBOX_SUCCESS; 3967 3968 cci->bg.complete_pct = 100; 3969 cci->bg.ret_code = ret; 3970 switch (cci->bg.opcode) { 3971 case 0x0201: /* fw transfer */ 3972 __do_firmware_xfer(cci); 3973 break; 3974 case 0x4400: /* sanitize */ 3975 { 3976 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3977 3978 __do_sanitization(ct3d); 3979 cxl_dev_enable_media(&ct3d->cxl_dstate); 3980 } 3981 break; 3982 case 0x4402: /* Media Operations sanitize */ 3983 { 3984 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3985 __do_sanitize(ct3d); 3986 } 3987 break; 3988 case 0x4304: /* scan media */ 3989 { 3990 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3991 3992 __do_scan_media(ct3d); 3993 break; 3994 } 3995 default: 3996 __builtin_unreachable(); 3997 break; 3998 } 3999 } else { 4000 /* estimate only */ 4001 cci->bg.complete_pct = 4002 100 * (now - cci->bg.starttime) / cci->bg.runtime; 4003 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 4004 } 4005 4006 if (cci->bg.complete_pct == 100) { 4007 /* TODO: generalize to switch CCI */ 4008 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 4009 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 4010 PCIDevice *pdev = PCI_DEVICE(cci->d); 4011 4012 cci->bg.starttime = 0; 4013 /* registers are updated, allow new bg-capable cmds */ 4014 cci->bg.runtime = 0; 4015 4016 if (msix_enabled(pdev)) { 4017 msix_notify(pdev, cxl_dstate->mbox_msi_n); 4018 } else if (msi_enabled(pdev)) { 4019 msi_notify(pdev, cxl_dstate->mbox_msi_n); 4020 } 4021 } 4022 4023 qemu_mutex_unlock(&cci->bg.lock); 4024 } 4025 4026 static void cxl_rebuild_cel(CXLCCI *cci) 4027 { 4028 cci->cel_size = 0; /* Reset for a fresh build */ 4029 for (int set = 0; set < 256; set++) { 4030 for (int cmd = 0; cmd < 256; cmd++) { 4031 if (cci->cxl_cmd_set[set][cmd].handler) { 4032 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 4033 struct cel_log *log = 4034 &cci->cel_log[cci->cel_size]; 4035 4036 log->opcode = (set << 8) | cmd; 4037 log->effect = c->effect; 4038 cci->cel_size++; 4039 } 4040 } 4041 } 4042 } 4043 4044 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 4045 { 4046 cci->payload_max = payload_max; 4047 cxl_rebuild_cel(cci); 4048 4049 cci->bg.complete_pct = 0; 4050 cci->bg.starttime = 0; 4051 cci->bg.runtime = 0; 4052 cci->bg.aborted = false; 4053 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 4054 bg_timercb, cci); 4055 qemu_mutex_init(&cci->bg.lock); 4056 4057 memset(&cci->fw, 0, sizeof(cci->fw)); 4058 cci->fw.active_slot = 1; 4059 cci->fw.slot[cci->fw.active_slot - 1] = true; 4060 cci->initialized = true; 4061 } 4062 4063 void cxl_destroy_cci(CXLCCI *cci) 4064 { 4065 qemu_mutex_destroy(&cci->bg.lock); 4066 cci->initialized = false; 4067 } 4068 4069 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 4070 { 4071 for (int set = 0; set < 256; set++) { 4072 for (int cmd = 0; cmd < 256; cmd++) { 4073 if (cxl_cmds[set][cmd].handler) { 4074 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 4075 } 4076 } 4077 } 4078 } 4079 4080 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 4081 size_t payload_max) 4082 { 4083 cci->payload_max = MAX(payload_max, cci->payload_max); 4084 cxl_copy_cci_commands(cci, cxl_cmd_set); 4085 cxl_rebuild_cel(cci); 4086 } 4087 4088 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 4089 DeviceState *d, size_t payload_max) 4090 { 4091 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 4092 cci->d = d; 4093 cci->intf = intf; 4094 cxl_init_cci(cci, payload_max); 4095 } 4096 4097 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 4098 { 4099 CXLType3Dev *ct3d = CXL_TYPE3(d); 4100 4101 cxl_copy_cci_commands(cci, cxl_cmd_set); 4102 if (ct3d->dc.num_regions) { 4103 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 4104 } 4105 cci->d = d; 4106 4107 /* No separation for PCI MB as protocol handled in PCI device */ 4108 cci->intf = d; 4109 cxl_init_cci(cci, payload_max); 4110 } 4111 4112 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 4113 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 4114 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 4115 0 }, 4116 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 4117 }; 4118 4119 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 4120 size_t payload_max) 4121 { 4122 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 4123 cci->d = d; 4124 cci->intf = intf; 4125 cxl_init_cci(cci, payload_max); 4126 } 4127 4128 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 4129 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 4130 [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT", 4131 cmd_get_response_msg_limit, 0, 0 }, 4132 [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT", 4133 cmd_set_response_msg_limit, 1, 0 }, 4134 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 4135 0 }, 4136 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 4137 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 4138 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 4139 cmd_tunnel_management_cmd, ~0, 0 }, 4140 }; 4141 4142 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 4143 DeviceState *intf, 4144 size_t payload_max) 4145 { 4146 CXLType3Dev *ct3d = CXL_TYPE3(d); 4147 4148 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 4149 if (ct3d->dc.num_regions) { 4150 cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd); 4151 } 4152 cci->d = d; 4153 cci->intf = intf; 4154 cxl_init_cci(cci, payload_max); 4155 } 4156