1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include <math.h> 11 12 #include "qemu/osdep.h" 13 #include "hw/pci/msi.h" 14 #include "hw/pci/msix.h" 15 #include "hw/cxl/cxl.h" 16 #include "hw/cxl/cxl_events.h" 17 #include "hw/cxl/cxl_mailbox.h" 18 #include "hw/pci/pci.h" 19 #include "hw/pci-bridge/cxl_upstream_port.h" 20 #include "qemu/cutils.h" 21 #include "qemu/log.h" 22 #include "qemu/units.h" 23 #include "qemu/uuid.h" 24 #include "system/hostmem.h" 25 #include "qemu/range.h" 26 #include "qapi/qapi-types-cxl.h" 27 28 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 29 #define CXL_DC_EVENT_LOG_SIZE 8 30 #define CXL_NUM_EXTENTS_SUPPORTED 512 31 #define CXL_NUM_TAGS_SUPPORTED 0 32 #define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0) 33 #define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1) 34 #define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2) 35 #define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3) 36 #define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4) 37 38 /* 39 * How to add a new command, example. The command set FOO, with cmd BAR. 40 * 1. Add the command set and cmd to the enum. 41 * FOO = 0x7f, 42 * #define BAR 0 43 * 2. Implement the handler 44 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 45 * CXLDeviceState *cxl_dstate, uint16_t *len) 46 * 3. Add the command to the cxl_cmd_set[][] 47 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 48 * 4. Implement your handler 49 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 50 * 51 * 52 * Writing the handler: 53 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 54 * in/out length of the payload. The handler is responsible for consuming the 55 * payload from cmd->payload and operating upon it as necessary. It must then 56 * fill the output data into cmd->payload (overwriting what was there), 57 * setting the length, and returning a valid return code. 58 * 59 * XXX: The handler need not worry about endianness. The payload is read out of 60 * a register interface that already deals with it. 61 */ 62 63 enum { 64 INFOSTAT = 0x00, 65 #define IS_IDENTIFY 0x1 66 #define BACKGROUND_OPERATION_STATUS 0x2 67 #define GET_RESPONSE_MSG_LIMIT 0x3 68 #define SET_RESPONSE_MSG_LIMIT 0x4 69 #define BACKGROUND_OPERATION_ABORT 0x5 70 EVENTS = 0x01, 71 #define GET_RECORDS 0x0 72 #define CLEAR_RECORDS 0x1 73 #define GET_INTERRUPT_POLICY 0x2 74 #define SET_INTERRUPT_POLICY 0x3 75 FIRMWARE_UPDATE = 0x02, 76 #define GET_INFO 0x0 77 #define TRANSFER 0x1 78 #define ACTIVATE 0x2 79 TIMESTAMP = 0x03, 80 #define GET 0x0 81 #define SET 0x1 82 LOGS = 0x04, 83 #define GET_SUPPORTED 0x0 84 #define GET_LOG 0x1 85 FEATURES = 0x05, 86 #define GET_SUPPORTED 0x0 87 #define GET_FEATURE 0x1 88 #define SET_FEATURE 0x2 89 IDENTIFY = 0x40, 90 #define MEMORY_DEVICE 0x0 91 CCLS = 0x41, 92 #define GET_PARTITION_INFO 0x0 93 #define GET_LSA 0x2 94 #define SET_LSA 0x3 95 HEALTH_INFO_ALERTS = 0x42, 96 #define GET_ALERT_CONFIG 0x1 97 #define SET_ALERT_CONFIG 0x2 98 SANITIZE = 0x44, 99 #define OVERWRITE 0x0 100 #define SECURE_ERASE 0x1 101 #define MEDIA_OPERATIONS 0x2 102 PERSISTENT_MEM = 0x45, 103 #define GET_SECURITY_STATE 0x0 104 MEDIA_AND_POISON = 0x43, 105 #define GET_POISON_LIST 0x0 106 #define INJECT_POISON 0x1 107 #define CLEAR_POISON 0x2 108 #define GET_SCAN_MEDIA_CAPABILITIES 0x3 109 #define SCAN_MEDIA 0x4 110 #define GET_SCAN_MEDIA_RESULTS 0x5 111 DCD_CONFIG = 0x48, 112 #define GET_DC_CONFIG 0x0 113 #define GET_DYN_CAP_EXT_LIST 0x1 114 #define ADD_DYN_CAP_RSP 0x2 115 #define RELEASE_DYN_CAP 0x3 116 PHYSICAL_SWITCH = 0x51, 117 #define IDENTIFY_SWITCH_DEVICE 0x0 118 #define GET_PHYSICAL_PORT_STATE 0x1 119 TUNNEL = 0x53, 120 #define MANAGEMENT_COMMAND 0x0 121 FMAPI_DCD_MGMT = 0x56, 122 #define GET_DCD_INFO 0x0 123 }; 124 125 /* CCI Message Format CXL r3.1 Figure 7-19 */ 126 typedef struct CXLCCIMessage { 127 uint8_t category; 128 #define CXL_CCI_CAT_REQ 0 129 #define CXL_CCI_CAT_RSP 1 130 uint8_t tag; 131 uint8_t resv1; 132 uint8_t command; 133 uint8_t command_set; 134 uint8_t pl_length[3]; 135 uint16_t rc; 136 uint16_t vendor_specific; 137 uint8_t payload[]; 138 } QEMU_PACKED CXLCCIMessage; 139 140 /* This command is only defined to an MLD FM Owned LD or an MHD */ 141 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 142 uint8_t *payload_in, 143 size_t len_in, 144 uint8_t *payload_out, 145 size_t *len_out, 146 CXLCCI *cci) 147 { 148 PCIDevice *tunnel_target; 149 CXLCCI *target_cci; 150 struct { 151 uint8_t port_or_ld_id; 152 uint8_t target_type; 153 uint16_t size; 154 CXLCCIMessage ccimessage; 155 } QEMU_PACKED *in; 156 struct { 157 uint16_t resp_len; 158 uint8_t resv[2]; 159 CXLCCIMessage ccimessage; 160 } QEMU_PACKED *out; 161 size_t pl_length, length_out; 162 bool bg_started; 163 int rc; 164 165 if (cmd->in < sizeof(*in)) { 166 return CXL_MBOX_INVALID_INPUT; 167 } 168 in = (void *)payload_in; 169 out = (void *)payload_out; 170 171 if (len_in < sizeof(*in)) { 172 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 173 } 174 /* Enough room for minimum sized message - no payload */ 175 if (in->size < sizeof(in->ccimessage)) { 176 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 177 } 178 /* Length of input payload should be in->size + a wrapping tunnel header */ 179 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 180 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 181 } 182 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 183 return CXL_MBOX_INVALID_INPUT; 184 } 185 186 if (in->target_type != 0) { 187 qemu_log_mask(LOG_UNIMP, 188 "Tunneled Command sent to non existent FM-LD"); 189 return CXL_MBOX_INVALID_INPUT; 190 } 191 192 /* 193 * Target of a tunnel unfortunately depends on type of CCI readint 194 * the message. 195 * If in a switch, then it's the port number. 196 * If in an MLD it is the ld number. 197 * If in an MHD target type indicate where we are going. 198 */ 199 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 200 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 201 if (in->port_or_ld_id != 0) { 202 /* Only pretending to have one for now! */ 203 return CXL_MBOX_INVALID_INPUT; 204 } 205 target_cci = &ct3d->ld0_cci; 206 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 207 CXLUpstreamPort *usp = CXL_USP(cci->d); 208 209 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 210 in->port_or_ld_id); 211 if (!tunnel_target) { 212 return CXL_MBOX_INVALID_INPUT; 213 } 214 tunnel_target = 215 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 216 if (!tunnel_target) { 217 return CXL_MBOX_INVALID_INPUT; 218 } 219 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 220 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 221 /* Tunneled VDMs always land on FM Owned LD */ 222 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 223 } else { 224 return CXL_MBOX_INVALID_INPUT; 225 } 226 } else { 227 return CXL_MBOX_INVALID_INPUT; 228 } 229 230 pl_length = in->ccimessage.pl_length[2] << 16 | 231 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 232 rc = cxl_process_cci_message(target_cci, 233 in->ccimessage.command_set, 234 in->ccimessage.command, 235 pl_length, in->ccimessage.payload, 236 &length_out, out->ccimessage.payload, 237 &bg_started); 238 /* Payload should be in place. Rest of CCI header and needs filling */ 239 out->resp_len = length_out + sizeof(CXLCCIMessage); 240 st24_le_p(out->ccimessage.pl_length, length_out); 241 out->ccimessage.rc = rc; 242 out->ccimessage.category = CXL_CCI_CAT_RSP; 243 out->ccimessage.command = in->ccimessage.command; 244 out->ccimessage.command_set = in->ccimessage.command_set; 245 out->ccimessage.tag = in->ccimessage.tag; 246 *len_out = length_out + sizeof(*out); 247 248 return CXL_MBOX_SUCCESS; 249 } 250 251 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 252 uint8_t *payload_in, size_t len_in, 253 uint8_t *payload_out, size_t *len_out, 254 CXLCCI *cci) 255 { 256 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 257 CXLGetEventPayload *pl; 258 uint8_t log_type; 259 int max_recs; 260 261 if (cmd->in < sizeof(log_type)) { 262 return CXL_MBOX_INVALID_INPUT; 263 } 264 265 log_type = payload_in[0]; 266 267 pl = (CXLGetEventPayload *)payload_out; 268 269 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 270 CXL_EVENT_RECORD_SIZE; 271 if (max_recs > 0xFFFF) { 272 max_recs = 0xFFFF; 273 } 274 275 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 276 } 277 278 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 279 uint8_t *payload_in, 280 size_t len_in, 281 uint8_t *payload_out, 282 size_t *len_out, 283 CXLCCI *cci) 284 { 285 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 286 CXLClearEventPayload *pl; 287 288 pl = (CXLClearEventPayload *)payload_in; 289 290 if (len_in < sizeof(*pl) || 291 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { 292 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 293 } 294 295 *len_out = 0; 296 return cxl_event_clear_records(cxlds, pl); 297 } 298 299 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 300 uint8_t *payload_in, 301 size_t len_in, 302 uint8_t *payload_out, 303 size_t *len_out, 304 CXLCCI *cci) 305 { 306 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 307 CXLEventInterruptPolicy *policy; 308 CXLEventLog *log; 309 310 policy = (CXLEventInterruptPolicy *)payload_out; 311 312 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 313 if (log->irq_enabled) { 314 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 315 } 316 317 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 318 if (log->irq_enabled) { 319 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 320 } 321 322 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 323 if (log->irq_enabled) { 324 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 325 } 326 327 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 328 if (log->irq_enabled) { 329 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 330 } 331 332 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 333 if (log->irq_enabled) { 334 /* Dynamic Capacity borrows the same vector as info */ 335 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 336 } 337 338 *len_out = sizeof(*policy); 339 return CXL_MBOX_SUCCESS; 340 } 341 342 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 343 uint8_t *payload_in, 344 size_t len_in, 345 uint8_t *payload_out, 346 size_t *len_out, 347 CXLCCI *cci) 348 { 349 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 350 CXLEventInterruptPolicy *policy; 351 CXLEventLog *log; 352 353 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 354 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 355 } 356 357 policy = (CXLEventInterruptPolicy *)payload_in; 358 359 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 360 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 361 CXL_INT_MSI_MSIX; 362 363 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 364 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 365 CXL_INT_MSI_MSIX; 366 367 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 368 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 369 CXL_INT_MSI_MSIX; 370 371 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 372 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 373 CXL_INT_MSI_MSIX; 374 375 /* DCD is optional */ 376 if (len_in < sizeof(*policy)) { 377 return CXL_MBOX_SUCCESS; 378 } 379 380 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 381 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 382 CXL_INT_MSI_MSIX; 383 384 *len_out = 0; 385 return CXL_MBOX_SUCCESS; 386 } 387 388 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 389 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 390 uint8_t *payload_in, 391 size_t len_in, 392 uint8_t *payload_out, 393 size_t *len_out, 394 CXLCCI *cci) 395 { 396 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 397 struct { 398 uint16_t pcie_vid; 399 uint16_t pcie_did; 400 uint16_t pcie_subsys_vid; 401 uint16_t pcie_subsys_id; 402 uint64_t sn; 403 uint8_t max_message_size; 404 uint8_t component_type; 405 } QEMU_PACKED *is_identify; 406 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 407 408 is_identify = (void *)payload_out; 409 is_identify->pcie_vid = class->vendor_id; 410 is_identify->pcie_did = class->device_id; 411 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 412 is_identify->sn = CXL_USP(cci->d)->sn; 413 /* Subsystem info not defined for a USP */ 414 is_identify->pcie_subsys_vid = 0; 415 is_identify->pcie_subsys_id = 0; 416 is_identify->component_type = 0x0; /* Switch */ 417 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 418 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 419 420 is_identify->sn = CXL_TYPE3(cci->d)->sn; 421 /* 422 * We can't always use class->subsystem_vendor_id as 423 * it is not set if the defaults are used. 424 */ 425 is_identify->pcie_subsys_vid = 426 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 427 is_identify->pcie_subsys_id = 428 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 429 is_identify->component_type = 0x3; /* Type 3 */ 430 } 431 432 is_identify->max_message_size = (uint8_t)log2(cci->payload_max); 433 *len_out = sizeof(*is_identify); 434 return CXL_MBOX_SUCCESS; 435 } 436 437 /* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */ 438 static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd, 439 uint8_t *payload_in, 440 size_t len_in, 441 uint8_t *payload_out, 442 size_t *len_out, 443 CXLCCI *cci) 444 { 445 struct { 446 uint8_t rsp_limit; 447 } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out; 448 QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1); 449 450 get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max); 451 452 *len_out = sizeof(*get_rsp_msg_limit); 453 return CXL_MBOX_SUCCESS; 454 } 455 456 /* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */ 457 static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd, 458 uint8_t *payload_in, 459 size_t len_in, 460 uint8_t *payload_out, 461 size_t *len_out, 462 CXLCCI *cci) 463 { 464 struct { 465 uint8_t rsp_limit; 466 } QEMU_PACKED *in = (void *)payload_in; 467 QEMU_BUILD_BUG_ON(sizeof(*in) != 1); 468 struct { 469 uint8_t rsp_limit; 470 } QEMU_PACKED *out = (void *)payload_out; 471 QEMU_BUILD_BUG_ON(sizeof(*out) != 1); 472 473 if (in->rsp_limit < 8 || in->rsp_limit > 10) { 474 return CXL_MBOX_INVALID_INPUT; 475 } 476 477 cci->payload_max = 1 << in->rsp_limit; 478 out->rsp_limit = in->rsp_limit; 479 480 *len_out = sizeof(*out); 481 return CXL_MBOX_SUCCESS; 482 } 483 484 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 485 void *private) 486 { 487 uint8_t *bm = private; 488 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 489 uint8_t port = PCIE_PORT(d)->port; 490 bm[port / 8] |= 1 << (port % 8); 491 } 492 } 493 494 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 495 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 496 uint8_t *payload_in, 497 size_t len_in, 498 uint8_t *payload_out, 499 size_t *len_out, 500 CXLCCI *cci) 501 { 502 PCIEPort *usp = PCIE_PORT(cci->d); 503 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 504 int num_phys_ports = pcie_count_ds_ports(bus); 505 506 struct cxl_fmapi_ident_switch_dev_resp_pl { 507 uint8_t ingress_port_id; 508 uint8_t rsvd; 509 uint8_t num_physical_ports; 510 uint8_t num_vcss; 511 uint8_t active_port_bitmask[0x20]; 512 uint8_t active_vcs_bitmask[0x20]; 513 uint16_t total_vppbs; 514 uint16_t bound_vppbs; 515 uint8_t num_hdm_decoders_per_usp; 516 } QEMU_PACKED *out; 517 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 518 519 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 520 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 521 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 522 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 523 .active_vcs_bitmask[0] = 0x1, 524 .total_vppbs = num_phys_ports + 1, 525 .bound_vppbs = num_phys_ports + 1, 526 .num_hdm_decoders_per_usp = 4, 527 }; 528 529 /* Depends on the CCI type */ 530 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 531 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 532 } else { 533 /* MCTP? */ 534 out->ingress_port_id = 0; 535 } 536 537 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 538 out->active_port_bitmask); 539 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 540 541 *len_out = sizeof(*out); 542 543 return CXL_MBOX_SUCCESS; 544 } 545 546 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 547 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 548 uint8_t *payload_in, 549 size_t len_in, 550 uint8_t *payload_out, 551 size_t *len_out, 552 CXLCCI *cci) 553 { 554 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 555 struct cxl_fmapi_get_phys_port_state_req_pl { 556 uint8_t num_ports; 557 uint8_t ports[]; 558 } QEMU_PACKED *in; 559 560 /* 561 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 562 * Format 563 */ 564 struct cxl_fmapi_port_state_info_block { 565 uint8_t port_id; 566 uint8_t config_state; 567 uint8_t connected_device_cxl_version; 568 uint8_t rsv1; 569 uint8_t connected_device_type; 570 uint8_t port_cxl_version_bitmask; 571 uint8_t max_link_width; 572 uint8_t negotiated_link_width; 573 uint8_t supported_link_speeds_vector; 574 uint8_t max_link_speed; 575 uint8_t current_link_speed; 576 uint8_t ltssm_state; 577 uint8_t first_lane_num; 578 uint16_t link_state; 579 uint8_t supported_ld_count; 580 } QEMU_PACKED; 581 582 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 583 struct cxl_fmapi_get_phys_port_state_resp_pl { 584 uint8_t num_ports; 585 uint8_t rsv1[3]; 586 struct cxl_fmapi_port_state_info_block ports[]; 587 } QEMU_PACKED *out; 588 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 589 PCIEPort *usp = PCIE_PORT(cci->d); 590 size_t pl_size; 591 int i; 592 593 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 594 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 595 596 if (len_in < sizeof(*in)) { 597 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 598 } 599 /* Check if what was requested can fit */ 600 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 601 return CXL_MBOX_INVALID_INPUT; 602 } 603 604 /* For success there should be a match for each requested */ 605 out->num_ports = in->num_ports; 606 607 for (i = 0; i < in->num_ports; i++) { 608 struct cxl_fmapi_port_state_info_block *port; 609 /* First try to match on downstream port */ 610 PCIDevice *port_dev; 611 uint16_t lnkcap, lnkcap2, lnksta; 612 613 port = &out->ports[i]; 614 615 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 616 if (port_dev) { /* DSP */ 617 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 618 ->devices[0]; 619 port->config_state = 3; 620 if (ds_dev) { 621 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 622 port->connected_device_type = 5; /* Assume MLD for now */ 623 } else { 624 port->connected_device_type = 1; 625 } 626 } else { 627 port->connected_device_type = 0; 628 } 629 port->supported_ld_count = 3; 630 } else if (usp->port == in->ports[i]) { /* USP */ 631 port_dev = PCI_DEVICE(usp); 632 port->config_state = 4; 633 port->connected_device_type = 0; 634 } else { 635 return CXL_MBOX_INVALID_INPUT; 636 } 637 638 port->port_id = in->ports[i]; 639 /* Information on status of this port in lnksta, lnkcap */ 640 if (!port_dev->exp.exp_cap) { 641 return CXL_MBOX_INTERNAL_ERROR; 642 } 643 lnksta = port_dev->config_read(port_dev, 644 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 645 sizeof(lnksta)); 646 lnkcap = port_dev->config_read(port_dev, 647 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 648 sizeof(lnkcap)); 649 lnkcap2 = port_dev->config_read(port_dev, 650 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 651 sizeof(lnkcap2)); 652 653 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 654 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 655 /* No definition for SLS field in linux/pci_regs.h */ 656 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 657 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 658 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 659 /* TODO: Track down if we can get the rest of the info */ 660 port->ltssm_state = 0x7; 661 port->first_lane_num = 0; 662 port->link_state = 0; 663 port->port_cxl_version_bitmask = 0x2; 664 port->connected_device_cxl_version = 0x2; 665 } 666 667 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 668 *len_out = pl_size; 669 670 return CXL_MBOX_SUCCESS; 671 } 672 673 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 674 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 675 uint8_t *payload_in, 676 size_t len_in, 677 uint8_t *payload_out, 678 size_t *len_out, 679 CXLCCI *cci) 680 { 681 struct { 682 uint8_t status; 683 uint8_t rsvd; 684 uint16_t opcode; 685 uint16_t returncode; 686 uint16_t vendor_ext_status; 687 } QEMU_PACKED *bg_op_status; 688 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 689 690 bg_op_status = (void *)payload_out; 691 bg_op_status->status = cci->bg.complete_pct << 1; 692 if (cci->bg.runtime > 0) { 693 bg_op_status->status |= 1U << 0; 694 } 695 bg_op_status->opcode = cci->bg.opcode; 696 bg_op_status->returncode = cci->bg.ret_code; 697 *len_out = sizeof(*bg_op_status); 698 699 return CXL_MBOX_SUCCESS; 700 } 701 702 /* 703 * CXL r3.1 Section 8.2.9.1.5: 704 * Request Abort Background Operation (Opcode 0005h) 705 */ 706 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd, 707 uint8_t *payload_in, 708 size_t len_in, 709 uint8_t *payload_out, 710 size_t *len_out, 711 CXLCCI *cci) 712 { 713 int bg_set = cci->bg.opcode >> 8; 714 int bg_cmd = cci->bg.opcode & 0xff; 715 const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd]; 716 717 if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) { 718 return CXL_MBOX_REQUEST_ABORT_NOTSUP; 719 } 720 721 qemu_mutex_lock(&cci->bg.lock); 722 if (cci->bg.runtime) { 723 /* operation is near complete, let it finish */ 724 if (cci->bg.complete_pct < 85) { 725 timer_del(cci->bg.timer); 726 cci->bg.ret_code = CXL_MBOX_ABORTED; 727 cci->bg.starttime = 0; 728 cci->bg.runtime = 0; 729 cci->bg.aborted = true; 730 } 731 } 732 qemu_mutex_unlock(&cci->bg.lock); 733 734 return CXL_MBOX_SUCCESS; 735 } 736 737 #define CXL_FW_SLOTS 2 738 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ 739 740 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 741 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 742 uint8_t *payload_in, 743 size_t len, 744 uint8_t *payload_out, 745 size_t *len_out, 746 CXLCCI *cci) 747 { 748 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 749 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 750 struct { 751 uint8_t slots_supported; 752 uint8_t slot_info; 753 uint8_t caps; 754 uint8_t rsvd[0xd]; 755 char fw_rev1[0x10]; 756 char fw_rev2[0x10]; 757 char fw_rev3[0x10]; 758 char fw_rev4[0x10]; 759 } QEMU_PACKED *fw_info; 760 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 761 762 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || 763 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || 764 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { 765 return CXL_MBOX_INTERNAL_ERROR; 766 } 767 768 fw_info = (void *)payload_out; 769 770 fw_info->slots_supported = CXL_FW_SLOTS; 771 fw_info->slot_info = (cci->fw.active_slot & 0x7) | 772 ((cci->fw.staged_slot & 0x7) << 3); 773 fw_info->caps = BIT(0); /* online update supported */ 774 775 if (cci->fw.slot[0]) { 776 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 777 } 778 if (cci->fw.slot[1]) { 779 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1"); 780 } 781 782 *len_out = sizeof(*fw_info); 783 return CXL_MBOX_SUCCESS; 784 } 785 786 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */ 787 #define CXL_FW_XFER_ALIGNMENT 128 788 789 #define CXL_FW_XFER_ACTION_FULL 0x0 790 #define CXL_FW_XFER_ACTION_INIT 0x1 791 #define CXL_FW_XFER_ACTION_CONTINUE 0x2 792 #define CXL_FW_XFER_ACTION_END 0x3 793 #define CXL_FW_XFER_ACTION_ABORT 0x4 794 795 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, 796 uint8_t *payload_in, 797 size_t len, 798 uint8_t *payload_out, 799 size_t *len_out, 800 CXLCCI *cci) 801 { 802 struct { 803 uint8_t action; 804 uint8_t slot; 805 uint8_t rsvd1[2]; 806 uint32_t offset; 807 uint8_t rsvd2[0x78]; 808 uint8_t data[]; 809 } QEMU_PACKED *fw_transfer = (void *)payload_in; 810 size_t offset, length; 811 812 if (len < sizeof(*fw_transfer)) { 813 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 814 } 815 816 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { 817 /* 818 * At this point there aren't any on-going transfers 819 * running in the bg - this is serialized before this 820 * call altogether. Just mark the state machine and 821 * disregard any other input. 822 */ 823 cci->fw.transferring = false; 824 return CXL_MBOX_SUCCESS; 825 } 826 827 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT; 828 length = len - sizeof(*fw_transfer); 829 if (offset + length > CXL_FW_SIZE) { 830 return CXL_MBOX_INVALID_INPUT; 831 } 832 833 if (cci->fw.transferring) { 834 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL || 835 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) { 836 return CXL_MBOX_FW_XFER_IN_PROGRESS; 837 } 838 /* 839 * Abort partitioned package transfer if over 30 secs 840 * between parts. As opposed to the explicit ABORT action, 841 * semantically treat this condition as an error - as 842 * if a part action were passed without a previous INIT. 843 */ 844 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) { 845 cci->fw.transferring = false; 846 return CXL_MBOX_INVALID_INPUT; 847 } 848 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 849 fw_transfer->action == CXL_FW_XFER_ACTION_END) { 850 return CXL_MBOX_INVALID_INPUT; 851 } 852 853 /* allow back-to-back retransmission */ 854 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) && 855 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE || 856 fw_transfer->action == CXL_FW_XFER_ACTION_END)) { 857 /* verify no overlaps */ 858 if (offset < cci->fw.prev_offset + cci->fw.prev_len) { 859 return CXL_MBOX_FW_XFER_OUT_OF_ORDER; 860 } 861 } 862 863 switch (fw_transfer->action) { 864 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */ 865 case CXL_FW_XFER_ACTION_END: 866 if (fw_transfer->slot == 0 || 867 fw_transfer->slot == cci->fw.active_slot || 868 fw_transfer->slot > CXL_FW_SLOTS) { 869 return CXL_MBOX_FW_INVALID_SLOT; 870 } 871 872 /* mark the slot used upon bg completion */ 873 break; 874 case CXL_FW_XFER_ACTION_INIT: 875 if (offset != 0) { 876 return CXL_MBOX_INVALID_INPUT; 877 } 878 879 cci->fw.transferring = true; 880 cci->fw.prev_offset = offset; 881 cci->fw.prev_len = length; 882 break; 883 case CXL_FW_XFER_ACTION_CONTINUE: 884 cci->fw.prev_offset = offset; 885 cci->fw.prev_len = length; 886 break; 887 default: 888 return CXL_MBOX_INVALID_INPUT; 889 } 890 891 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) { 892 cci->bg.runtime = 10 * 1000UL; 893 } else { 894 cci->bg.runtime = 2 * 1000UL; 895 } 896 /* keep relevant context for bg completion */ 897 cci->fw.curr_action = fw_transfer->action; 898 cci->fw.curr_slot = fw_transfer->slot; 899 *len_out = 0; 900 901 return CXL_MBOX_BG_STARTED; 902 } 903 904 static void __do_firmware_xfer(CXLCCI *cci) 905 { 906 switch (cci->fw.curr_action) { 907 case CXL_FW_XFER_ACTION_FULL: 908 case CXL_FW_XFER_ACTION_END: 909 cci->fw.slot[cci->fw.curr_slot - 1] = true; 910 cci->fw.transferring = false; 911 break; 912 case CXL_FW_XFER_ACTION_INIT: 913 case CXL_FW_XFER_ACTION_CONTINUE: 914 time(&cci->fw.last_partxfer); 915 break; 916 default: 917 break; 918 } 919 } 920 921 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */ 922 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd, 923 uint8_t *payload_in, 924 size_t len, 925 uint8_t *payload_out, 926 size_t *len_out, 927 CXLCCI *cci) 928 { 929 struct { 930 uint8_t action; 931 uint8_t slot; 932 } QEMU_PACKED *fw_activate = (void *)payload_in; 933 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2); 934 935 if (fw_activate->slot == 0 || 936 fw_activate->slot == cci->fw.active_slot || 937 fw_activate->slot > CXL_FW_SLOTS) { 938 return CXL_MBOX_FW_INVALID_SLOT; 939 } 940 941 /* ensure that an actual fw package is there */ 942 if (!cci->fw.slot[fw_activate->slot - 1]) { 943 return CXL_MBOX_FW_INVALID_SLOT; 944 } 945 946 switch (fw_activate->action) { 947 case 0: /* online */ 948 cci->fw.active_slot = fw_activate->slot; 949 break; 950 case 1: /* reset */ 951 cci->fw.staged_slot = fw_activate->slot; 952 break; 953 default: 954 return CXL_MBOX_INVALID_INPUT; 955 } 956 957 return CXL_MBOX_SUCCESS; 958 } 959 960 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 961 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 962 uint8_t *payload_in, 963 size_t len_in, 964 uint8_t *payload_out, 965 size_t *len_out, 966 CXLCCI *cci) 967 { 968 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 969 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 970 971 stq_le_p(payload_out, final_time); 972 *len_out = 8; 973 974 return CXL_MBOX_SUCCESS; 975 } 976 977 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 978 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 979 uint8_t *payload_in, 980 size_t len_in, 981 uint8_t *payload_out, 982 size_t *len_out, 983 CXLCCI *cci) 984 { 985 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 986 987 cxl_dstate->timestamp.set = true; 988 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 989 990 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 991 992 *len_out = 0; 993 return CXL_MBOX_SUCCESS; 994 } 995 996 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 997 static const QemuUUID cel_uuid = { 998 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 999 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 1000 }; 1001 1002 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 1003 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 1004 uint8_t *payload_in, 1005 size_t len_in, 1006 uint8_t *payload_out, 1007 size_t *len_out, 1008 CXLCCI *cci) 1009 { 1010 struct { 1011 uint16_t entries; 1012 uint8_t rsvd[6]; 1013 struct { 1014 QemuUUID uuid; 1015 uint32_t size; 1016 } log_entries[1]; 1017 } QEMU_PACKED *supported_logs = (void *)payload_out; 1018 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 1019 1020 supported_logs->entries = 1; 1021 supported_logs->log_entries[0].uuid = cel_uuid; 1022 supported_logs->log_entries[0].size = 4 * cci->cel_size; 1023 1024 *len_out = sizeof(*supported_logs); 1025 return CXL_MBOX_SUCCESS; 1026 } 1027 1028 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 1029 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 1030 uint8_t *payload_in, 1031 size_t len_in, 1032 uint8_t *payload_out, 1033 size_t *len_out, 1034 CXLCCI *cci) 1035 { 1036 struct { 1037 QemuUUID uuid; 1038 uint32_t offset; 1039 uint32_t length; 1040 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 1041 1042 get_log = (void *)payload_in; 1043 1044 if (get_log->length > cci->payload_max) { 1045 return CXL_MBOX_INVALID_INPUT; 1046 } 1047 1048 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 1049 return CXL_MBOX_INVALID_LOG; 1050 } 1051 1052 /* 1053 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 1054 * The device shall return Invalid Input if the Offset or Length 1055 * fields attempt to access beyond the size of the log as reported by Get 1056 * Supported Log. 1057 * 1058 * Only valid for there to be one entry per opcode, but the length + offset 1059 * may still be greater than that if the inputs are not valid and so access 1060 * beyond the end of cci->cel_log. 1061 */ 1062 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { 1063 return CXL_MBOX_INVALID_INPUT; 1064 } 1065 1066 /* Store off everything to local variables so we can wipe out the payload */ 1067 *len_out = get_log->length; 1068 1069 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 1070 1071 return CXL_MBOX_SUCCESS; 1072 } 1073 1074 /* CXL r3.1 section 8.2.9.6: Features */ 1075 /* 1076 * Get Supported Features output payload 1077 * CXL r3.1 section 8.2.9.6.1 Table 8-96 1078 */ 1079 typedef struct CXLSupportedFeatureHeader { 1080 uint16_t entries; 1081 uint16_t nsuppfeats_dev; 1082 uint32_t reserved; 1083 } QEMU_PACKED CXLSupportedFeatureHeader; 1084 1085 /* 1086 * Get Supported Features Supported Feature Entry 1087 * CXL r3.1 section 8.2.9.6.1 Table 8-97 1088 */ 1089 typedef struct CXLSupportedFeatureEntry { 1090 QemuUUID uuid; 1091 uint16_t feat_index; 1092 uint16_t get_feat_size; 1093 uint16_t set_feat_size; 1094 uint32_t attr_flags; 1095 uint8_t get_feat_version; 1096 uint8_t set_feat_version; 1097 uint16_t set_feat_effects; 1098 uint8_t rsvd[18]; 1099 } QEMU_PACKED CXLSupportedFeatureEntry; 1100 1101 /* 1102 * Get Supported Features Supported Feature Entry 1103 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97 1104 */ 1105 /* Supported Feature Entry : attribute flags */ 1106 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0) 1107 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1) 1108 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4) 1109 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5) 1110 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6) 1111 1112 /* Supported Feature Entry : set feature effects */ 1113 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0) 1114 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1) 1115 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2) 1116 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3) 1117 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4) 1118 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5) 1119 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6) 1120 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7) 1121 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8) 1122 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9) 1123 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10) 1124 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11) 1125 1126 enum CXL_SUPPORTED_FEATURES_LIST { 1127 CXL_FEATURE_PATROL_SCRUB = 0, 1128 CXL_FEATURE_ECS, 1129 CXL_FEATURE_MAX 1130 }; 1131 1132 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */ 1133 /* 1134 * Get Feature input payload 1135 * CXL r3.1 section 8.2.9.6.2 Table 8-99 1136 */ 1137 /* Get Feature : Payload in selection */ 1138 enum CXL_GET_FEATURE_SELECTION { 1139 CXL_GET_FEATURE_SEL_CURRENT_VALUE, 1140 CXL_GET_FEATURE_SEL_DEFAULT_VALUE, 1141 CXL_GET_FEATURE_SEL_SAVED_VALUE, 1142 CXL_GET_FEATURE_SEL_MAX 1143 }; 1144 1145 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */ 1146 /* 1147 * Set Feature input payload 1148 * CXL r3.1 section 8.2.9.6.3 Table 8-101 1149 */ 1150 typedef struct CXLSetFeatureInHeader { 1151 QemuUUID uuid; 1152 uint32_t flags; 1153 uint16_t offset; 1154 uint8_t version; 1155 uint8_t rsvd[9]; 1156 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader; 1157 1158 /* Set Feature : Payload in flags */ 1159 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7 1160 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER { 1161 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER, 1162 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER, 1163 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER, 1164 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER, 1165 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER, 1166 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX 1167 }; 1168 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3) 1169 1170 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */ 1171 static const QemuUUID patrol_scrub_uuid = { 1172 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 1173 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a) 1174 }; 1175 1176 typedef struct CXLMemPatrolScrubSetFeature { 1177 CXLSetFeatureInHeader hdr; 1178 CXLMemPatrolScrubWriteAttrs feat_data; 1179 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature; 1180 1181 /* 1182 * CXL r3.1 section 8.2.9.9.11.2: 1183 * DDR5 Error Check Scrub (ECS) Control Feature 1184 */ 1185 static const QemuUUID ecs_uuid = { 1186 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 1187 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86) 1188 }; 1189 1190 typedef struct CXLMemECSSetFeature { 1191 CXLSetFeatureInHeader hdr; 1192 CXLMemECSWriteAttrs feat_data[]; 1193 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature; 1194 1195 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */ 1196 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, 1197 uint8_t *payload_in, 1198 size_t len_in, 1199 uint8_t *payload_out, 1200 size_t *len_out, 1201 CXLCCI *cci) 1202 { 1203 struct { 1204 uint32_t count; 1205 uint16_t start_index; 1206 uint16_t reserved; 1207 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in; 1208 1209 struct { 1210 CXLSupportedFeatureHeader hdr; 1211 CXLSupportedFeatureEntry feat_entries[]; 1212 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out; 1213 uint16_t index, req_entries; 1214 uint16_t entry; 1215 1216 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1217 return CXL_MBOX_UNSUPPORTED; 1218 } 1219 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) || 1220 get_feats_in->start_index >= CXL_FEATURE_MAX) { 1221 return CXL_MBOX_INVALID_INPUT; 1222 } 1223 1224 req_entries = (get_feats_in->count - 1225 sizeof(CXLSupportedFeatureHeader)) / 1226 sizeof(CXLSupportedFeatureEntry); 1227 req_entries = MIN(req_entries, 1228 (CXL_FEATURE_MAX - get_feats_in->start_index)); 1229 1230 for (entry = 0, index = get_feats_in->start_index; 1231 entry < req_entries; index++) { 1232 switch (index) { 1233 case CXL_FEATURE_PATROL_SCRUB: 1234 /* Fill supported feature entry for device patrol scrub control */ 1235 get_feats_out->feat_entries[entry++] = 1236 (struct CXLSupportedFeatureEntry) { 1237 .uuid = patrol_scrub_uuid, 1238 .feat_index = index, 1239 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs), 1240 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs), 1241 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1242 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION, 1243 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION, 1244 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1245 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1246 }; 1247 break; 1248 case CXL_FEATURE_ECS: 1249 /* Fill supported feature entry for device DDR5 ECS control */ 1250 get_feats_out->feat_entries[entry++] = 1251 (struct CXLSupportedFeatureEntry) { 1252 .uuid = ecs_uuid, 1253 .feat_index = index, 1254 .get_feat_size = sizeof(CXLMemECSReadAttrs), 1255 .set_feat_size = sizeof(CXLMemECSWriteAttrs), 1256 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, 1257 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, 1258 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, 1259 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE | 1260 CXL_FEAT_ENTRY_SFE_CEL_VALID, 1261 }; 1262 break; 1263 default: 1264 __builtin_unreachable(); 1265 } 1266 } 1267 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX; 1268 get_feats_out->hdr.entries = req_entries; 1269 *len_out = sizeof(CXLSupportedFeatureHeader) + 1270 req_entries * sizeof(CXLSupportedFeatureEntry); 1271 1272 return CXL_MBOX_SUCCESS; 1273 } 1274 1275 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */ 1276 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, 1277 uint8_t *payload_in, 1278 size_t len_in, 1279 uint8_t *payload_out, 1280 size_t *len_out, 1281 CXLCCI *cci) 1282 { 1283 struct { 1284 QemuUUID uuid; 1285 uint16_t offset; 1286 uint16_t count; 1287 uint8_t selection; 1288 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature; 1289 uint16_t bytes_to_copy = 0; 1290 CXLType3Dev *ct3d; 1291 CXLSetFeatureInfo *set_feat_info; 1292 1293 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1294 return CXL_MBOX_UNSUPPORTED; 1295 } 1296 1297 ct3d = CXL_TYPE3(cci->d); 1298 get_feature = (void *)payload_in; 1299 1300 set_feat_info = &ct3d->set_feat_info; 1301 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) { 1302 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1303 } 1304 1305 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) { 1306 return CXL_MBOX_UNSUPPORTED; 1307 } 1308 if (get_feature->offset + get_feature->count > cci->payload_max) { 1309 return CXL_MBOX_INVALID_INPUT; 1310 } 1311 1312 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) { 1313 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) { 1314 return CXL_MBOX_INVALID_INPUT; 1315 } 1316 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) - 1317 get_feature->offset; 1318 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1319 memcpy(payload_out, 1320 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, 1321 bytes_to_copy); 1322 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { 1323 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { 1324 return CXL_MBOX_INVALID_INPUT; 1325 } 1326 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; 1327 bytes_to_copy = MIN(bytes_to_copy, get_feature->count); 1328 memcpy(payload_out, 1329 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, 1330 bytes_to_copy); 1331 } else { 1332 return CXL_MBOX_UNSUPPORTED; 1333 } 1334 1335 *len_out = bytes_to_copy; 1336 1337 return CXL_MBOX_SUCCESS; 1338 } 1339 1340 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */ 1341 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, 1342 uint8_t *payload_in, 1343 size_t len_in, 1344 uint8_t *payload_out, 1345 size_t *len_out, 1346 CXLCCI *cci) 1347 { 1348 CXLSetFeatureInHeader *hdr = (void *)payload_in; 1349 CXLMemPatrolScrubWriteAttrs *ps_write_attrs; 1350 CXLMemPatrolScrubSetFeature *ps_set_feature; 1351 CXLMemECSWriteAttrs *ecs_write_attrs; 1352 CXLMemECSSetFeature *ecs_set_feature; 1353 CXLSetFeatureInfo *set_feat_info; 1354 uint16_t bytes_to_copy = 0; 1355 uint8_t data_transfer_flag; 1356 CXLType3Dev *ct3d; 1357 uint16_t count; 1358 1359 if (len_in < sizeof(*hdr)) { 1360 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1361 } 1362 1363 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 1364 return CXL_MBOX_UNSUPPORTED; 1365 } 1366 ct3d = CXL_TYPE3(cci->d); 1367 set_feat_info = &ct3d->set_feat_info; 1368 1369 if (!qemu_uuid_is_null(&set_feat_info->uuid) && 1370 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) { 1371 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS; 1372 } 1373 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) { 1374 set_feat_info->data_saved_across_reset = true; 1375 } else { 1376 set_feat_info->data_saved_across_reset = false; 1377 } 1378 1379 data_transfer_flag = 1380 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK; 1381 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) { 1382 set_feat_info->uuid = hdr->uuid; 1383 set_feat_info->data_size = 0; 1384 } 1385 set_feat_info->data_transfer_flag = data_transfer_flag; 1386 set_feat_info->data_offset = hdr->offset; 1387 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); 1388 1389 if (bytes_to_copy == 0) { 1390 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1391 } 1392 1393 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1394 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { 1395 return CXL_MBOX_UNSUPPORTED; 1396 } 1397 1398 ps_set_feature = (void *)payload_in; 1399 ps_write_attrs = &ps_set_feature->feat_data; 1400 1401 if ((uint32_t)hdr->offset + bytes_to_copy > 1402 sizeof(ct3d->patrol_scrub_wr_attrs)) { 1403 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1404 } 1405 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, 1406 ps_write_attrs, 1407 bytes_to_copy); 1408 set_feat_info->data_size += bytes_to_copy; 1409 1410 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1411 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1412 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF; 1413 ct3d->patrol_scrub_attrs.scrub_cycle |= 1414 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF; 1415 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1; 1416 ct3d->patrol_scrub_attrs.scrub_flags |= 1417 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1; 1418 } 1419 } else if (qemu_uuid_is_equal(&hdr->uuid, 1420 &ecs_uuid)) { 1421 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) { 1422 return CXL_MBOX_UNSUPPORTED; 1423 } 1424 1425 ecs_set_feature = (void *)payload_in; 1426 ecs_write_attrs = ecs_set_feature->feat_data; 1427 1428 if ((uint32_t)hdr->offset + bytes_to_copy > 1429 sizeof(ct3d->ecs_wr_attrs)) { 1430 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1431 } 1432 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, 1433 ecs_write_attrs, 1434 bytes_to_copy); 1435 set_feat_info->data_size += bytes_to_copy; 1436 1437 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1438 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { 1439 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; 1440 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { 1441 ct3d->ecs_attrs.fru_attrs[count].ecs_config = 1442 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; 1443 } 1444 } 1445 } else { 1446 return CXL_MBOX_UNSUPPORTED; 1447 } 1448 1449 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || 1450 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER || 1451 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) { 1452 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID)); 1453 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { 1454 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); 1455 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { 1456 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); 1457 } 1458 set_feat_info->data_transfer_flag = 0; 1459 set_feat_info->data_saved_across_reset = false; 1460 set_feat_info->data_offset = 0; 1461 set_feat_info->data_size = 0; 1462 } 1463 1464 return CXL_MBOX_SUCCESS; 1465 } 1466 1467 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 1468 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 1469 uint8_t *payload_in, 1470 size_t len_in, 1471 uint8_t *payload_out, 1472 size_t *len_out, 1473 CXLCCI *cci) 1474 { 1475 struct { 1476 char fw_revision[0x10]; 1477 uint64_t total_capacity; 1478 uint64_t volatile_capacity; 1479 uint64_t persistent_capacity; 1480 uint64_t partition_align; 1481 uint16_t info_event_log_size; 1482 uint16_t warning_event_log_size; 1483 uint16_t failure_event_log_size; 1484 uint16_t fatal_event_log_size; 1485 uint32_t lsa_size; 1486 uint8_t poison_list_max_mer[3]; 1487 uint16_t inject_poison_limit; 1488 uint8_t poison_caps; 1489 uint8_t qos_telemetry_caps; 1490 uint16_t dc_event_log_size; 1491 } QEMU_PACKED *id; 1492 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 1493 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1494 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1495 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1496 1497 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1498 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1499 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1500 return CXL_MBOX_INTERNAL_ERROR; 1501 } 1502 1503 id = (void *)payload_out; 1504 1505 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 1506 1507 stq_le_p(&id->total_capacity, 1508 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 1509 stq_le_p(&id->persistent_capacity, 1510 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1511 stq_le_p(&id->volatile_capacity, 1512 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1513 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 1514 /* 256 poison records */ 1515 st24_le_p(id->poison_list_max_mer, 256); 1516 /* No limit - so limited by main poison record limit */ 1517 stw_le_p(&id->inject_poison_limit, 0); 1518 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 1519 1520 *len_out = sizeof(*id); 1521 return CXL_MBOX_SUCCESS; 1522 } 1523 1524 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 1525 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 1526 uint8_t *payload_in, 1527 size_t len_in, 1528 uint8_t *payload_out, 1529 size_t *len_out, 1530 CXLCCI *cci) 1531 { 1532 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 1533 struct { 1534 uint64_t active_vmem; 1535 uint64_t active_pmem; 1536 uint64_t next_vmem; 1537 uint64_t next_pmem; 1538 } QEMU_PACKED *part_info = (void *)payload_out; 1539 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 1540 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 1541 1542 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 1543 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 1544 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 1545 return CXL_MBOX_INTERNAL_ERROR; 1546 } 1547 1548 stq_le_p(&part_info->active_vmem, 1549 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 1550 /* 1551 * When both next_vmem and next_pmem are 0, there is no pending change to 1552 * partitioning. 1553 */ 1554 stq_le_p(&part_info->next_vmem, 0); 1555 stq_le_p(&part_info->active_pmem, 1556 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 1557 stq_le_p(&part_info->next_pmem, 0); 1558 1559 *len_out = sizeof(*part_info); 1560 return CXL_MBOX_SUCCESS; 1561 } 1562 1563 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 1564 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 1565 uint8_t *payload_in, 1566 size_t len_in, 1567 uint8_t *payload_out, 1568 size_t *len_out, 1569 CXLCCI *cci) 1570 { 1571 struct { 1572 uint32_t offset; 1573 uint32_t length; 1574 } QEMU_PACKED *get_lsa; 1575 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1576 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1577 uint64_t offset, length; 1578 1579 get_lsa = (void *)payload_in; 1580 offset = get_lsa->offset; 1581 length = get_lsa->length; 1582 1583 if (offset + length > cvc->get_lsa_size(ct3d)) { 1584 *len_out = 0; 1585 return CXL_MBOX_INVALID_INPUT; 1586 } 1587 1588 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 1589 return CXL_MBOX_SUCCESS; 1590 } 1591 1592 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 1593 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 1594 uint8_t *payload_in, 1595 size_t len_in, 1596 uint8_t *payload_out, 1597 size_t *len_out, 1598 CXLCCI *cci) 1599 { 1600 struct set_lsa_pl { 1601 uint32_t offset; 1602 uint32_t rsvd; 1603 uint8_t data[]; 1604 } QEMU_PACKED; 1605 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 1606 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1607 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1608 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 1609 1610 *len_out = 0; 1611 if (len_in < hdr_len) { 1612 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1613 } 1614 1615 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 1616 return CXL_MBOX_INVALID_INPUT; 1617 } 1618 len_in -= hdr_len; 1619 1620 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 1621 return CXL_MBOX_SUCCESS; 1622 } 1623 1624 /* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */ 1625 static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd, 1626 uint8_t *payload_in, 1627 size_t len_in, 1628 uint8_t *payload_out, 1629 size_t *len_out, 1630 CXLCCI *cci) 1631 { 1632 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1633 CXLAlertConfig *out = (CXLAlertConfig *)payload_out; 1634 1635 memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config)); 1636 *len_out = sizeof(ct3d->alert_config); 1637 1638 return CXL_MBOX_SUCCESS; 1639 } 1640 1641 /* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */ 1642 static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd, 1643 uint8_t *payload_in, 1644 size_t len_in, 1645 uint8_t *payload_out, 1646 size_t *len_out, 1647 CXLCCI *cci) 1648 { 1649 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1650 CXLAlertConfig *alert_config = &ct3d->alert_config; 1651 struct { 1652 uint8_t valid_alert_actions; 1653 uint8_t enable_alert_actions; 1654 uint8_t life_used_warn_thresh; 1655 uint8_t rsvd; 1656 uint16_t over_temp_warn_thresh; 1657 uint16_t under_temp_warn_thresh; 1658 uint16_t cor_vmem_err_warn_thresh; 1659 uint16_t cor_pmem_err_warn_thresh; 1660 } QEMU_PACKED *in = (void *)payload_in; 1661 1662 if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) { 1663 /* 1664 * CXL r3.2 Table 8-149 The life used warning threshold shall be 1665 * less than the life used critical alert value. 1666 */ 1667 if (in->life_used_warn_thresh >= 1668 alert_config->life_used_crit_alert_thresh) { 1669 return CXL_MBOX_INVALID_INPUT; 1670 } 1671 alert_config->life_used_warn_thresh = in->life_used_warn_thresh; 1672 alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH; 1673 } 1674 1675 if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) { 1676 /* 1677 * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold 1678 * shall be less than the the Device Over-Temperature Critical 1679 * Alert Threshold. 1680 */ 1681 if (in->over_temp_warn_thresh >= 1682 alert_config->over_temp_crit_alert_thresh) { 1683 return CXL_MBOX_INVALID_INPUT; 1684 } 1685 alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh; 1686 alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH; 1687 } 1688 1689 if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) { 1690 /* 1691 * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold 1692 * shall be higher than the the Device Under-Temperature Critical 1693 * Alert Threshold. 1694 */ 1695 if (in->under_temp_warn_thresh <= 1696 alert_config->under_temp_crit_alert_thresh) { 1697 return CXL_MBOX_INVALID_INPUT; 1698 } 1699 alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh; 1700 alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH; 1701 } 1702 1703 if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) { 1704 alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh; 1705 alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH; 1706 } 1707 1708 if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) { 1709 alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh; 1710 alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH; 1711 } 1712 return CXL_MBOX_SUCCESS; 1713 } 1714 1715 /* Perform the actual device zeroing */ 1716 static void __do_sanitization(CXLType3Dev *ct3d) 1717 { 1718 MemoryRegion *mr; 1719 1720 if (ct3d->hostvmem) { 1721 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1722 if (mr) { 1723 void *hostmem = memory_region_get_ram_ptr(mr); 1724 memset(hostmem, 0, memory_region_size(mr)); 1725 } 1726 } 1727 1728 if (ct3d->hostpmem) { 1729 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1730 if (mr) { 1731 void *hostmem = memory_region_get_ram_ptr(mr); 1732 memset(hostmem, 0, memory_region_size(mr)); 1733 } 1734 } 1735 if (ct3d->lsa) { 1736 mr = host_memory_backend_get_memory(ct3d->lsa); 1737 if (mr) { 1738 void *lsa = memory_region_get_ram_ptr(mr); 1739 memset(lsa, 0, memory_region_size(mr)); 1740 } 1741 } 1742 cxl_discard_all_event_records(&ct3d->cxl_dstate); 1743 } 1744 1745 static int get_sanitize_duration(uint64_t total_mem) 1746 { 1747 int secs = 0; 1748 1749 if (total_mem <= 512) { 1750 secs = 4; 1751 } else if (total_mem <= 1024) { 1752 secs = 8; 1753 } else if (total_mem <= 2 * 1024) { 1754 secs = 15; 1755 } else if (total_mem <= 4 * 1024) { 1756 secs = 30; 1757 } else if (total_mem <= 8 * 1024) { 1758 secs = 60; 1759 } else if (total_mem <= 16 * 1024) { 1760 secs = 2 * 60; 1761 } else if (total_mem <= 32 * 1024) { 1762 secs = 4 * 60; 1763 } else if (total_mem <= 64 * 1024) { 1764 secs = 8 * 60; 1765 } else if (total_mem <= 128 * 1024) { 1766 secs = 15 * 60; 1767 } else if (total_mem <= 256 * 1024) { 1768 secs = 30 * 60; 1769 } else if (total_mem <= 512 * 1024) { 1770 secs = 60 * 60; 1771 } else if (total_mem <= 1024 * 1024) { 1772 secs = 120 * 60; 1773 } else { 1774 secs = 240 * 60; /* max 4 hrs */ 1775 } 1776 1777 return secs; 1778 } 1779 1780 /* 1781 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 1782 * 1783 * Once the Sanitize command has started successfully, the device shall be 1784 * placed in the media disabled state. If the command fails or is interrupted 1785 * by a reset or power failure, it shall remain in the media disabled state 1786 * until a successful Sanitize command has been completed. During this state: 1787 * 1788 * 1. Memory writes to the device will have no effect, and all memory reads 1789 * will return random values (no user data returned, even for locations that 1790 * the failed Sanitize operation didn’t sanitize yet). 1791 * 1792 * 2. Mailbox commands shall still be processed in the disabled state, except 1793 * that commands that access Sanitized areas shall fail with the Media Disabled 1794 * error code. 1795 */ 1796 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 1797 uint8_t *payload_in, 1798 size_t len_in, 1799 uint8_t *payload_out, 1800 size_t *len_out, 1801 CXLCCI *cci) 1802 { 1803 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1804 uint64_t total_mem; /* in Mb */ 1805 int secs; 1806 1807 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 1808 secs = get_sanitize_duration(total_mem); 1809 1810 /* EBUSY other bg cmds as of now */ 1811 cci->bg.runtime = secs * 1000UL; 1812 *len_out = 0; 1813 1814 cxl_dev_disable_media(&ct3d->cxl_dstate); 1815 1816 /* sanitize when done */ 1817 return CXL_MBOX_BG_STARTED; 1818 } 1819 1820 struct dpa_range_list_entry { 1821 uint64_t starting_dpa; 1822 uint64_t length; 1823 } QEMU_PACKED; 1824 1825 struct CXLSanitizeInfo { 1826 uint32_t dpa_range_count; 1827 uint8_t fill_value; 1828 struct dpa_range_list_entry dpa_range_list[]; 1829 } QEMU_PACKED; 1830 1831 static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr) 1832 { 1833 MemoryRegion *mr; 1834 if (ct3d->hostvmem) { 1835 mr = host_memory_backend_get_memory(ct3d->hostvmem); 1836 if (vmr) { 1837 *vmr = mr; 1838 } 1839 return memory_region_size(mr); 1840 } 1841 return 0; 1842 } 1843 1844 static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr) 1845 { 1846 MemoryRegion *mr; 1847 if (ct3d->hostpmem) { 1848 mr = host_memory_backend_get_memory(ct3d->hostpmem); 1849 if (pmr) { 1850 *pmr = mr; 1851 } 1852 return memory_region_size(mr); 1853 } 1854 return 0; 1855 } 1856 1857 static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr) 1858 { 1859 MemoryRegion *mr; 1860 if (ct3d->dc.host_dc) { 1861 mr = host_memory_backend_get_memory(ct3d->dc.host_dc); 1862 if (dc_mr) { 1863 *dc_mr = mr; 1864 } 1865 return memory_region_size(mr); 1866 } 1867 return 0; 1868 } 1869 1870 static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr, 1871 size_t length) 1872 { 1873 uint64_t vmr_size, pmr_size, dc_size; 1874 1875 if ((dpa_addr % CXL_CACHE_LINE_SIZE) || 1876 (length % CXL_CACHE_LINE_SIZE) || 1877 (length <= 0)) { 1878 return -EINVAL; 1879 } 1880 1881 vmr_size = get_vmr_size(ct3d, NULL); 1882 pmr_size = get_pmr_size(ct3d, NULL); 1883 dc_size = get_dc_size(ct3d, NULL); 1884 1885 if (dpa_addr + length > vmr_size + pmr_size + dc_size) { 1886 return -EINVAL; 1887 } 1888 1889 if (dpa_addr > vmr_size + pmr_size) { 1890 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1891 return -ENODEV; 1892 } 1893 } 1894 1895 return 0; 1896 } 1897 1898 static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length, 1899 uint8_t fill_value) 1900 { 1901 1902 uint64_t vmr_size, pmr_size; 1903 AddressSpace *as = NULL; 1904 MemTxAttrs mem_attrs = {}; 1905 1906 vmr_size = get_vmr_size(ct3d, NULL); 1907 pmr_size = get_pmr_size(ct3d, NULL); 1908 1909 if (dpa_addr < vmr_size) { 1910 as = &ct3d->hostvmem_as; 1911 } else if (dpa_addr < vmr_size + pmr_size) { 1912 as = &ct3d->hostpmem_as; 1913 } else { 1914 if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { 1915 return -ENODEV; 1916 } 1917 as = &ct3d->dc.host_dc_as; 1918 } 1919 1920 return address_space_set(as, dpa_addr, fill_value, length, mem_attrs); 1921 } 1922 1923 /* Perform the actual device zeroing */ 1924 static void __do_sanitize(CXLType3Dev *ct3d) 1925 { 1926 struct CXLSanitizeInfo *san_info = ct3d->media_op_sanitize; 1927 int dpa_range_count = san_info->dpa_range_count; 1928 int rc = 0; 1929 int i; 1930 1931 for (i = 0; i < dpa_range_count; i++) { 1932 rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa, 1933 san_info->dpa_range_list[i].length, 1934 san_info->fill_value); 1935 if (rc) { 1936 goto exit; 1937 } 1938 } 1939 exit: 1940 g_free(ct3d->media_op_sanitize); 1941 ct3d->media_op_sanitize = NULL; 1942 return; 1943 } 1944 1945 enum { 1946 MEDIA_OP_CLASS_GENERAL = 0x0, 1947 #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0 1948 MEDIA_OP_CLASS_SANITIZE = 0x1, 1949 #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0 1950 #define MEDIA_OP_SAN_SUBC_ZERO 0x1 1951 }; 1952 1953 struct media_op_supported_list_entry { 1954 uint8_t media_op_class; 1955 uint8_t media_op_subclass; 1956 }; 1957 1958 struct media_op_discovery_out_pl { 1959 uint64_t dpa_range_granularity; 1960 uint16_t total_supported_operations; 1961 uint16_t num_of_supported_operations; 1962 struct media_op_supported_list_entry entry[]; 1963 } QEMU_PACKED; 1964 1965 static const struct media_op_supported_list_entry media_op_matrix[] = { 1966 { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY }, 1967 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE }, 1968 { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO }, 1969 }; 1970 1971 static CXLRetCode media_operations_discovery(uint8_t *payload_in, 1972 size_t len_in, 1973 uint8_t *payload_out, 1974 size_t *len_out) 1975 { 1976 struct { 1977 uint8_t media_operation_class; 1978 uint8_t media_operation_subclass; 1979 uint8_t rsvd[2]; 1980 uint32_t dpa_range_count; 1981 struct { 1982 uint16_t start_index; 1983 uint16_t num_ops; 1984 } discovery_osa; 1985 } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in; 1986 struct media_op_discovery_out_pl *media_out_pl = 1987 (struct media_op_discovery_out_pl *)payload_out; 1988 int num_ops, start_index, i; 1989 int count = 0; 1990 1991 if (len_in < sizeof(*media_op_in_disc_pl)) { 1992 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1993 } 1994 1995 num_ops = media_op_in_disc_pl->discovery_osa.num_ops; 1996 start_index = media_op_in_disc_pl->discovery_osa.start_index; 1997 1998 /* 1999 * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and 2000 * start index should not exceed the total number of entries for discovery 2001 * sub class command. 2002 */ 2003 if (media_op_in_disc_pl->dpa_range_count || 2004 start_index > ARRAY_SIZE(media_op_matrix)) { 2005 return CXL_MBOX_INVALID_INPUT; 2006 } 2007 2008 media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE; 2009 media_out_pl->total_supported_operations = 2010 ARRAY_SIZE(media_op_matrix); 2011 if (num_ops > 0) { 2012 for (i = start_index; i < start_index + num_ops; i++) { 2013 media_out_pl->entry[count].media_op_class = 2014 media_op_matrix[i].media_op_class; 2015 media_out_pl->entry[count].media_op_subclass = 2016 media_op_matrix[i].media_op_subclass; 2017 count++; 2018 if (count == num_ops) { 2019 break; 2020 } 2021 } 2022 } 2023 2024 media_out_pl->num_of_supported_operations = count; 2025 *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry); 2026 return CXL_MBOX_SUCCESS; 2027 } 2028 2029 static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d, 2030 uint8_t *payload_in, 2031 size_t len_in, 2032 uint8_t *payload_out, 2033 size_t *len_out, 2034 uint8_t fill_value, 2035 CXLCCI *cci) 2036 { 2037 struct media_operations_sanitize { 2038 uint8_t media_operation_class; 2039 uint8_t media_operation_subclass; 2040 uint8_t rsvd[2]; 2041 uint32_t dpa_range_count; 2042 struct dpa_range_list_entry dpa_range_list[]; 2043 } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in; 2044 uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count; 2045 uint64_t total_mem = 0; 2046 size_t dpa_range_list_size; 2047 int secs = 0, i; 2048 2049 if (dpa_range_count == 0) { 2050 return CXL_MBOX_SUCCESS; 2051 } 2052 2053 dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry); 2054 if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) { 2055 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2056 } 2057 2058 for (i = 0; i < dpa_range_count; i++) { 2059 uint64_t start_dpa = 2060 media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa; 2061 uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length; 2062 2063 if (validate_dpa_addr(ct3d, start_dpa, length)) { 2064 return CXL_MBOX_INVALID_INPUT; 2065 } 2066 total_mem += length; 2067 } 2068 ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) + 2069 dpa_range_list_size); 2070 2071 ct3d->media_op_sanitize->dpa_range_count = dpa_range_count; 2072 ct3d->media_op_sanitize->fill_value = fill_value; 2073 memcpy(ct3d->media_op_sanitize->dpa_range_list, 2074 media_op_in_sanitize_pl->dpa_range_list, 2075 dpa_range_list_size); 2076 secs = get_sanitize_duration(total_mem >> 20); 2077 2078 /* EBUSY other bg cmds as of now */ 2079 cci->bg.runtime = secs * 1000UL; 2080 *len_out = 0; 2081 /* 2082 * media op sanitize is targeted so no need to disable media or 2083 * clear event logs 2084 */ 2085 return CXL_MBOX_BG_STARTED; 2086 } 2087 2088 static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd, 2089 uint8_t *payload_in, 2090 size_t len_in, 2091 uint8_t *payload_out, 2092 size_t *len_out, 2093 CXLCCI *cci) 2094 { 2095 struct { 2096 uint8_t media_operation_class; 2097 uint8_t media_operation_subclass; 2098 uint8_t rsvd[2]; 2099 uint32_t dpa_range_count; 2100 } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in; 2101 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2102 uint8_t media_op_cl = 0; 2103 uint8_t media_op_subclass = 0; 2104 2105 if (len_in < sizeof(*media_op_in_common_pl)) { 2106 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 2107 } 2108 2109 media_op_cl = media_op_in_common_pl->media_operation_class; 2110 media_op_subclass = media_op_in_common_pl->media_operation_subclass; 2111 2112 switch (media_op_cl) { 2113 case MEDIA_OP_CLASS_GENERAL: 2114 if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) { 2115 return CXL_MBOX_UNSUPPORTED; 2116 } 2117 2118 return media_operations_discovery(payload_in, len_in, payload_out, 2119 len_out); 2120 case MEDIA_OP_CLASS_SANITIZE: 2121 switch (media_op_subclass) { 2122 case MEDIA_OP_SAN_SUBC_SANITIZE: 2123 return media_operations_sanitize(ct3d, payload_in, len_in, 2124 payload_out, len_out, 0xF, 2125 cci); 2126 case MEDIA_OP_SAN_SUBC_ZERO: 2127 return media_operations_sanitize(ct3d, payload_in, len_in, 2128 payload_out, len_out, 0, 2129 cci); 2130 default: 2131 return CXL_MBOX_UNSUPPORTED; 2132 } 2133 default: 2134 return CXL_MBOX_UNSUPPORTED; 2135 } 2136 } 2137 2138 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 2139 uint8_t *payload_in, 2140 size_t len_in, 2141 uint8_t *payload_out, 2142 size_t *len_out, 2143 CXLCCI *cci) 2144 { 2145 uint32_t *state = (uint32_t *)payload_out; 2146 2147 *state = 0; 2148 *len_out = 4; 2149 return CXL_MBOX_SUCCESS; 2150 } 2151 2152 /* 2153 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 2154 * 2155 * This is very inefficient, but good enough for now! 2156 * Also the payload will always fit, so no need to handle the MORE flag and 2157 * make this stateful. We may want to allow longer poison lists to aid 2158 * testing that kernel functionality. 2159 */ 2160 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 2161 uint8_t *payload_in, 2162 size_t len_in, 2163 uint8_t *payload_out, 2164 size_t *len_out, 2165 CXLCCI *cci) 2166 { 2167 struct get_poison_list_pl { 2168 uint64_t pa; 2169 uint64_t length; 2170 } QEMU_PACKED; 2171 2172 struct get_poison_list_out_pl { 2173 uint8_t flags; 2174 uint8_t rsvd1; 2175 uint64_t overflow_timestamp; 2176 uint16_t count; 2177 uint8_t rsvd2[0x14]; 2178 struct { 2179 uint64_t addr; 2180 uint32_t length; 2181 uint32_t resv; 2182 } QEMU_PACKED records[]; 2183 } QEMU_PACKED; 2184 2185 struct get_poison_list_pl *in = (void *)payload_in; 2186 struct get_poison_list_out_pl *out = (void *)payload_out; 2187 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2188 uint16_t record_count = 0, i = 0; 2189 uint64_t query_start, query_length; 2190 CXLPoisonList *poison_list = &ct3d->poison_list; 2191 CXLPoison *ent; 2192 uint16_t out_pl_len; 2193 2194 query_start = ldq_le_p(&in->pa); 2195 /* 64 byte alignment required */ 2196 if (query_start & 0x3f) { 2197 return CXL_MBOX_INVALID_INPUT; 2198 } 2199 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2200 2201 QLIST_FOREACH(ent, poison_list, node) { 2202 /* Check for no overlap */ 2203 if (!ranges_overlap(ent->start, ent->length, 2204 query_start, query_length)) { 2205 continue; 2206 } 2207 record_count++; 2208 } 2209 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2210 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2211 2212 QLIST_FOREACH(ent, poison_list, node) { 2213 uint64_t start, stop; 2214 2215 /* Check for no overlap */ 2216 if (!ranges_overlap(ent->start, ent->length, 2217 query_start, query_length)) { 2218 continue; 2219 } 2220 2221 /* Deal with overlap */ 2222 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 2223 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 2224 query_start + query_length); 2225 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 2226 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2227 i++; 2228 } 2229 if (ct3d->poison_list_overflowed) { 2230 out->flags = (1 << 1); 2231 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 2232 } 2233 if (scan_media_running(cci)) { 2234 out->flags |= (1 << 2); 2235 } 2236 2237 stw_le_p(&out->count, record_count); 2238 *len_out = out_pl_len; 2239 return CXL_MBOX_SUCCESS; 2240 } 2241 2242 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 2243 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 2244 uint8_t *payload_in, 2245 size_t len_in, 2246 uint8_t *payload_out, 2247 size_t *len_out, 2248 CXLCCI *cci) 2249 { 2250 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2251 CXLPoisonList *poison_list = &ct3d->poison_list; 2252 CXLPoison *ent; 2253 struct inject_poison_pl { 2254 uint64_t dpa; 2255 }; 2256 struct inject_poison_pl *in = (void *)payload_in; 2257 uint64_t dpa = ldq_le_p(&in->dpa); 2258 CXLPoison *p; 2259 2260 QLIST_FOREACH(ent, poison_list, node) { 2261 if (dpa >= ent->start && 2262 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 2263 return CXL_MBOX_SUCCESS; 2264 } 2265 } 2266 /* 2267 * Freeze the list if there is an on-going scan media operation. 2268 */ 2269 if (scan_media_running(cci)) { 2270 /* 2271 * XXX: Spec is ambiguous - is this case considered 2272 * a successful return despite not adding to the list? 2273 */ 2274 goto success; 2275 } 2276 2277 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2278 return CXL_MBOX_INJECT_POISON_LIMIT; 2279 } 2280 p = g_new0(CXLPoison, 1); 2281 2282 p->length = CXL_CACHE_LINE_SIZE; 2283 p->start = dpa; 2284 p->type = CXL_POISON_TYPE_INJECTED; 2285 2286 /* 2287 * Possible todo: Merge with existing entry if next to it and if same type 2288 */ 2289 QLIST_INSERT_HEAD(poison_list, p, node); 2290 ct3d->poison_list_cnt++; 2291 success: 2292 *len_out = 0; 2293 2294 return CXL_MBOX_SUCCESS; 2295 } 2296 2297 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 2298 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 2299 uint8_t *payload_in, 2300 size_t len_in, 2301 uint8_t *payload_out, 2302 size_t *len_out, 2303 CXLCCI *cci) 2304 { 2305 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2306 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2307 CXLPoisonList *poison_list = &ct3d->poison_list; 2308 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 2309 struct clear_poison_pl { 2310 uint64_t dpa; 2311 uint8_t data[64]; 2312 }; 2313 CXLPoison *ent; 2314 uint64_t dpa; 2315 2316 struct clear_poison_pl *in = (void *)payload_in; 2317 2318 dpa = ldq_le_p(&in->dpa); 2319 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 2320 ct3d->dc.total_capacity) { 2321 return CXL_MBOX_INVALID_PA; 2322 } 2323 2324 /* Clearing a region with no poison is not an error so always do so */ 2325 if (cvc->set_cacheline) { 2326 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 2327 return CXL_MBOX_INTERNAL_ERROR; 2328 } 2329 } 2330 2331 /* 2332 * Freeze the list if there is an on-going scan media operation. 2333 */ 2334 if (scan_media_running(cci)) { 2335 /* 2336 * XXX: Spec is ambiguous - is this case considered 2337 * a successful return despite not removing from the list? 2338 */ 2339 goto success; 2340 } 2341 2342 QLIST_FOREACH(ent, poison_list, node) { 2343 /* 2344 * Test for contained in entry. Simpler than general case 2345 * as clearing 64 bytes and entries 64 byte aligned 2346 */ 2347 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 2348 break; 2349 } 2350 } 2351 if (!ent) { 2352 goto success; 2353 } 2354 2355 QLIST_REMOVE(ent, node); 2356 ct3d->poison_list_cnt--; 2357 2358 if (dpa > ent->start) { 2359 CXLPoison *frag; 2360 /* Cannot overflow as replacing existing entry */ 2361 2362 frag = g_new0(CXLPoison, 1); 2363 2364 frag->start = ent->start; 2365 frag->length = dpa - ent->start; 2366 frag->type = ent->type; 2367 2368 QLIST_INSERT_HEAD(poison_list, frag, node); 2369 ct3d->poison_list_cnt++; 2370 } 2371 2372 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 2373 CXLPoison *frag; 2374 2375 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 2376 cxl_set_poison_list_overflowed(ct3d); 2377 } else { 2378 frag = g_new0(CXLPoison, 1); 2379 2380 frag->start = dpa + CXL_CACHE_LINE_SIZE; 2381 frag->length = ent->start + ent->length - frag->start; 2382 frag->type = ent->type; 2383 QLIST_INSERT_HEAD(poison_list, frag, node); 2384 ct3d->poison_list_cnt++; 2385 } 2386 } 2387 /* Any fragments have been added, free original entry */ 2388 g_free(ent); 2389 success: 2390 *len_out = 0; 2391 2392 return CXL_MBOX_SUCCESS; 2393 } 2394 2395 /* 2396 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities 2397 */ 2398 static CXLRetCode 2399 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd, 2400 uint8_t *payload_in, 2401 size_t len_in, 2402 uint8_t *payload_out, 2403 size_t *len_out, 2404 CXLCCI *cci) 2405 { 2406 struct get_scan_media_capabilities_pl { 2407 uint64_t pa; 2408 uint64_t length; 2409 } QEMU_PACKED; 2410 2411 struct get_scan_media_capabilities_out_pl { 2412 uint32_t estimated_runtime_ms; 2413 }; 2414 2415 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2416 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2417 struct get_scan_media_capabilities_pl *in = (void *)payload_in; 2418 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out; 2419 uint64_t query_start; 2420 uint64_t query_length; 2421 2422 query_start = ldq_le_p(&in->pa); 2423 /* 64 byte alignment required */ 2424 if (query_start & 0x3f) { 2425 return CXL_MBOX_INVALID_INPUT; 2426 } 2427 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2428 2429 if (query_start + query_length > cxl_dstate->static_mem_size) { 2430 return CXL_MBOX_INVALID_PA; 2431 } 2432 2433 /* 2434 * Just use 400 nanosecond access/read latency + 100 ns for 2435 * the cost of updating the poison list. For small enough 2436 * chunks return at least 1 ms. 2437 */ 2438 stl_le_p(&out->estimated_runtime_ms, 2439 MAX(1, query_length * (0.0005L / 64))); 2440 2441 *len_out = sizeof(*out); 2442 return CXL_MBOX_SUCCESS; 2443 } 2444 2445 static void __do_scan_media(CXLType3Dev *ct3d) 2446 { 2447 CXLPoison *ent; 2448 unsigned int results_cnt = 0; 2449 2450 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) { 2451 results_cnt++; 2452 } 2453 2454 /* only scan media may clear the overflow */ 2455 if (ct3d->poison_list_overflowed && 2456 ct3d->poison_list_cnt == results_cnt) { 2457 cxl_clear_poison_list_overflowed(ct3d); 2458 } 2459 /* scan media has run since last conventional reset */ 2460 ct3d->scan_media_hasrun = true; 2461 } 2462 2463 /* 2464 * CXL r3.1 section 8.2.9.9.4.5: Scan Media 2465 */ 2466 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd, 2467 uint8_t *payload_in, 2468 size_t len_in, 2469 uint8_t *payload_out, 2470 size_t *len_out, 2471 CXLCCI *cci) 2472 { 2473 struct scan_media_pl { 2474 uint64_t pa; 2475 uint64_t length; 2476 uint8_t flags; 2477 } QEMU_PACKED; 2478 2479 struct scan_media_pl *in = (void *)payload_in; 2480 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2481 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2482 uint64_t query_start; 2483 uint64_t query_length; 2484 CXLPoison *ent, *next; 2485 2486 query_start = ldq_le_p(&in->pa); 2487 /* 64 byte alignment required */ 2488 if (query_start & 0x3f) { 2489 return CXL_MBOX_INVALID_INPUT; 2490 } 2491 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 2492 2493 if (query_start + query_length > cxl_dstate->static_mem_size) { 2494 return CXL_MBOX_INVALID_PA; 2495 } 2496 if (ct3d->dc.num_regions && query_start + query_length >= 2497 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) { 2498 return CXL_MBOX_INVALID_PA; 2499 } 2500 2501 if (in->flags == 0) { /* TODO */ 2502 qemu_log_mask(LOG_UNIMP, 2503 "Scan Media Event Log is unsupported\n"); 2504 } 2505 2506 /* any previous results are discarded upon a new Scan Media */ 2507 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) { 2508 QLIST_REMOVE(ent, node); 2509 g_free(ent); 2510 } 2511 2512 /* kill the poison list - it will be recreated */ 2513 if (ct3d->poison_list_overflowed) { 2514 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) { 2515 QLIST_REMOVE(ent, node); 2516 g_free(ent); 2517 ct3d->poison_list_cnt--; 2518 } 2519 } 2520 2521 /* 2522 * Scan the backup list and move corresponding entries 2523 * into the results list, updating the poison list 2524 * when possible. 2525 */ 2526 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) { 2527 CXLPoison *res; 2528 2529 if (ent->start >= query_start + query_length || 2530 ent->start + ent->length <= query_start) { 2531 continue; 2532 } 2533 2534 /* 2535 * If a Get Poison List cmd comes in while this 2536 * scan is being done, it will see the new complete 2537 * list, while setting the respective flag. 2538 */ 2539 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) { 2540 CXLPoison *p = g_new0(CXLPoison, 1); 2541 2542 p->start = ent->start; 2543 p->length = ent->length; 2544 p->type = ent->type; 2545 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 2546 ct3d->poison_list_cnt++; 2547 } 2548 2549 res = g_new0(CXLPoison, 1); 2550 res->start = ent->start; 2551 res->length = ent->length; 2552 res->type = ent->type; 2553 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node); 2554 2555 QLIST_REMOVE(ent, node); 2556 g_free(ent); 2557 } 2558 2559 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64)); 2560 *len_out = 0; 2561 2562 return CXL_MBOX_BG_STARTED; 2563 } 2564 2565 /* 2566 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results 2567 */ 2568 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, 2569 uint8_t *payload_in, 2570 size_t len_in, 2571 uint8_t *payload_out, 2572 size_t *len_out, 2573 CXLCCI *cci) 2574 { 2575 struct get_scan_media_results_out_pl { 2576 uint64_t dpa_restart; 2577 uint64_t length; 2578 uint8_t flags; 2579 uint8_t rsvd1; 2580 uint16_t count; 2581 uint8_t rsvd2[0xc]; 2582 struct { 2583 uint64_t addr; 2584 uint32_t length; 2585 uint32_t resv; 2586 } QEMU_PACKED records[]; 2587 } QEMU_PACKED; 2588 2589 struct get_scan_media_results_out_pl *out = (void *)payload_out; 2590 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2591 CXLPoisonList *scan_media_results = &ct3d->scan_media_results; 2592 CXLPoison *ent, *next; 2593 uint16_t total_count = 0, record_count = 0, i = 0; 2594 uint16_t out_pl_len; 2595 2596 if (!ct3d->scan_media_hasrun) { 2597 return CXL_MBOX_UNSUPPORTED; 2598 } 2599 2600 /* 2601 * Calculate limits, all entries are within the same address range of the 2602 * last scan media call. 2603 */ 2604 QLIST_FOREACH(ent, scan_media_results, node) { 2605 size_t rec_size = record_count * sizeof(out->records[0]); 2606 2607 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) { 2608 record_count++; 2609 } 2610 total_count++; 2611 } 2612 2613 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2614 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2615 2616 memset(out, 0, out_pl_len); 2617 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) { 2618 uint64_t start, stop; 2619 2620 if (i == record_count) { 2621 break; 2622 } 2623 2624 start = ROUND_DOWN(ent->start, 64ull); 2625 stop = ROUND_DOWN(ent->start, 64ull) + ent->length; 2626 stq_le_p(&out->records[i].addr, start); 2627 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 2628 i++; 2629 2630 /* consume the returning entry */ 2631 QLIST_REMOVE(ent, node); 2632 g_free(ent); 2633 } 2634 2635 stw_le_p(&out->count, record_count); 2636 if (total_count > record_count) { 2637 out->flags = (1 << 0); /* More Media Error Records */ 2638 } 2639 2640 *len_out = out_pl_len; 2641 return CXL_MBOX_SUCCESS; 2642 } 2643 2644 /* 2645 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 2646 * (Opcode: 4800h) 2647 */ 2648 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 2649 uint8_t *payload_in, 2650 size_t len_in, 2651 uint8_t *payload_out, 2652 size_t *len_out, 2653 CXLCCI *cci) 2654 { 2655 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2656 struct { 2657 uint8_t region_cnt; 2658 uint8_t start_rid; 2659 } QEMU_PACKED *in = (void *)payload_in; 2660 struct { 2661 uint8_t num_regions; 2662 uint8_t regions_returned; 2663 uint8_t rsvd1[6]; 2664 struct { 2665 uint64_t base; 2666 uint64_t decode_len; 2667 uint64_t region_len; 2668 uint64_t block_size; 2669 uint32_t dsmadhandle; 2670 uint8_t flags; 2671 uint8_t rsvd2[3]; 2672 } QEMU_PACKED records[]; 2673 } QEMU_PACKED *out = (void *)payload_out; 2674 struct { 2675 uint32_t num_extents_supported; 2676 uint32_t num_extents_available; 2677 uint32_t num_tags_supported; 2678 uint32_t num_tags_available; 2679 } QEMU_PACKED *extra_out; 2680 uint16_t record_count; 2681 uint16_t i; 2682 uint16_t out_pl_len; 2683 uint8_t start_rid; 2684 2685 start_rid = in->start_rid; 2686 if (start_rid >= ct3d->dc.num_regions) { 2687 return CXL_MBOX_INVALID_INPUT; 2688 } 2689 2690 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 2691 2692 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2693 extra_out = (void *)(payload_out + out_pl_len); 2694 out_pl_len += sizeof(*extra_out); 2695 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 2696 2697 out->num_regions = ct3d->dc.num_regions; 2698 out->regions_returned = record_count; 2699 for (i = 0; i < record_count; i++) { 2700 stq_le_p(&out->records[i].base, 2701 ct3d->dc.regions[start_rid + i].base); 2702 stq_le_p(&out->records[i].decode_len, 2703 ct3d->dc.regions[start_rid + i].decode_len / 2704 CXL_CAPACITY_MULTIPLIER); 2705 stq_le_p(&out->records[i].region_len, 2706 ct3d->dc.regions[start_rid + i].len); 2707 stq_le_p(&out->records[i].block_size, 2708 ct3d->dc.regions[start_rid + i].block_size); 2709 stl_le_p(&out->records[i].dsmadhandle, 2710 ct3d->dc.regions[start_rid + i].dsmadhandle); 2711 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 2712 } 2713 /* 2714 * TODO: Assign values once extents and tags are introduced 2715 * to use. 2716 */ 2717 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 2718 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 2719 ct3d->dc.total_extent_count); 2720 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 2721 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 2722 2723 *len_out = out_pl_len; 2724 return CXL_MBOX_SUCCESS; 2725 } 2726 2727 /* 2728 * CXL r3.1 section 8.2.9.9.9.2: 2729 * Get Dynamic Capacity Extent List (Opcode 4801h) 2730 */ 2731 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 2732 uint8_t *payload_in, 2733 size_t len_in, 2734 uint8_t *payload_out, 2735 size_t *len_out, 2736 CXLCCI *cci) 2737 { 2738 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2739 struct { 2740 uint32_t extent_cnt; 2741 uint32_t start_extent_id; 2742 } QEMU_PACKED *in = (void *)payload_in; 2743 struct { 2744 uint32_t count; 2745 uint32_t total_extents; 2746 uint32_t generation_num; 2747 uint8_t rsvd[4]; 2748 CXLDCExtentRaw records[]; 2749 } QEMU_PACKED *out = (void *)payload_out; 2750 uint32_t start_extent_id = in->start_extent_id; 2751 CXLDCExtentList *extent_list = &ct3d->dc.extents; 2752 uint16_t record_count = 0, i = 0, record_done = 0; 2753 uint16_t out_pl_len, size; 2754 CXLDCExtent *ent; 2755 2756 if (start_extent_id > ct3d->dc.nr_extents_accepted) { 2757 return CXL_MBOX_INVALID_INPUT; 2758 } 2759 2760 record_count = MIN(in->extent_cnt, 2761 ct3d->dc.total_extent_count - start_extent_id); 2762 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 2763 record_count = MIN(record_count, size / sizeof(out->records[0])); 2764 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 2765 2766 stl_le_p(&out->count, record_count); 2767 stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); 2768 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 2769 2770 if (record_count > 0) { 2771 CXLDCExtentRaw *out_rec = &out->records[record_done]; 2772 2773 QTAILQ_FOREACH(ent, extent_list, node) { 2774 if (i++ < start_extent_id) { 2775 continue; 2776 } 2777 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 2778 stq_le_p(&out_rec->len, ent->len); 2779 memcpy(&out_rec->tag, ent->tag, 0x10); 2780 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 2781 2782 record_done++; 2783 out_rec++; 2784 if (record_done == record_count) { 2785 break; 2786 } 2787 } 2788 } 2789 2790 *len_out = out_pl_len; 2791 return CXL_MBOX_SUCCESS; 2792 } 2793 2794 /* 2795 * Check whether any bit between addr[nr, nr+size) is set, 2796 * return true if any bit is set, otherwise return false 2797 */ 2798 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 2799 unsigned long size) 2800 { 2801 unsigned long res = find_next_bit(addr, size + nr, nr); 2802 2803 return res < nr + size; 2804 } 2805 2806 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 2807 { 2808 int i; 2809 CXLDCRegion *region = &ct3d->dc.regions[0]; 2810 2811 if (dpa < region->base || 2812 dpa >= region->base + ct3d->dc.total_capacity) { 2813 return NULL; 2814 } 2815 2816 /* 2817 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 2818 * 2819 * Regions are used in increasing-DPA order, with Region 0 being used for 2820 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 2821 * So check from the last region to find where the dpa belongs. Extents that 2822 * cross multiple regions are not allowed. 2823 */ 2824 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 2825 region = &ct3d->dc.regions[i]; 2826 if (dpa >= region->base) { 2827 if (dpa + len > region->base + region->len) { 2828 return NULL; 2829 } 2830 return region; 2831 } 2832 } 2833 2834 return NULL; 2835 } 2836 2837 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 2838 uint64_t dpa, 2839 uint64_t len, 2840 uint8_t *tag, 2841 uint16_t shared_seq) 2842 { 2843 CXLDCExtent *extent; 2844 2845 extent = g_new0(CXLDCExtent, 1); 2846 extent->start_dpa = dpa; 2847 extent->len = len; 2848 if (tag) { 2849 memcpy(extent->tag, tag, 0x10); 2850 } 2851 extent->shared_seq = shared_seq; 2852 2853 QTAILQ_INSERT_TAIL(list, extent, node); 2854 } 2855 2856 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 2857 CXLDCExtent *extent) 2858 { 2859 QTAILQ_REMOVE(list, extent, node); 2860 g_free(extent); 2861 } 2862 2863 /* 2864 * Add a new extent to the extent "group" if group exists; 2865 * otherwise, create a new group 2866 * Return value: the extent group where the extent is inserted. 2867 */ 2868 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 2869 uint64_t dpa, 2870 uint64_t len, 2871 uint8_t *tag, 2872 uint16_t shared_seq) 2873 { 2874 if (!group) { 2875 group = g_new0(CXLDCExtentGroup, 1); 2876 QTAILQ_INIT(&group->list); 2877 } 2878 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 2879 tag, shared_seq); 2880 return group; 2881 } 2882 2883 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 2884 CXLDCExtentGroup *group) 2885 { 2886 QTAILQ_INSERT_TAIL(list, group, node); 2887 } 2888 2889 uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 2890 { 2891 CXLDCExtent *ent, *ent_next; 2892 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 2893 uint32_t extents_deleted = 0; 2894 2895 QTAILQ_REMOVE(list, group, node); 2896 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 2897 cxl_remove_extent_from_extent_list(&group->list, ent); 2898 extents_deleted++; 2899 } 2900 g_free(group); 2901 2902 return extents_deleted; 2903 } 2904 2905 /* 2906 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 2907 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 2908 */ 2909 typedef struct CXLUpdateDCExtentListInPl { 2910 uint32_t num_entries_updated; 2911 uint8_t flags; 2912 uint8_t rsvd[3]; 2913 /* CXL r3.1 Table 8-169: Updated Extent */ 2914 struct { 2915 uint64_t start_dpa; 2916 uint64_t len; 2917 uint8_t rsvd[8]; 2918 } QEMU_PACKED updated_entries[]; 2919 } QEMU_PACKED CXLUpdateDCExtentListInPl; 2920 2921 /* 2922 * For the extents in the extent list to operate, check whether they are valid 2923 * 1. The extent should be in the range of a valid DC region; 2924 * 2. The extent should not cross multiple regions; 2925 * 3. The start DPA and the length of the extent should align with the block 2926 * size of the region; 2927 * 4. The address range of multiple extents in the list should not overlap. 2928 */ 2929 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 2930 const CXLUpdateDCExtentListInPl *in) 2931 { 2932 uint64_t min_block_size = UINT64_MAX; 2933 CXLDCRegion *region; 2934 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 2935 g_autofree unsigned long *blk_bitmap = NULL; 2936 uint64_t dpa, len; 2937 uint32_t i; 2938 2939 for (i = 0; i < ct3d->dc.num_regions; i++) { 2940 region = &ct3d->dc.regions[i]; 2941 min_block_size = MIN(min_block_size, region->block_size); 2942 } 2943 2944 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 2945 ct3d->dc.regions[0].base) / min_block_size); 2946 2947 for (i = 0; i < in->num_entries_updated; i++) { 2948 dpa = in->updated_entries[i].start_dpa; 2949 len = in->updated_entries[i].len; 2950 2951 region = cxl_find_dc_region(ct3d, dpa, len); 2952 if (!region) { 2953 return CXL_MBOX_INVALID_PA; 2954 } 2955 2956 dpa -= ct3d->dc.regions[0].base; 2957 if (dpa % region->block_size || len % region->block_size) { 2958 return CXL_MBOX_INVALID_EXTENT_LIST; 2959 } 2960 /* the dpa range already covered by some other extents in the list */ 2961 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 2962 len / min_block_size)) { 2963 return CXL_MBOX_INVALID_EXTENT_LIST; 2964 } 2965 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 2966 } 2967 2968 return CXL_MBOX_SUCCESS; 2969 } 2970 2971 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 2972 const CXLUpdateDCExtentListInPl *in) 2973 { 2974 uint32_t i; 2975 CXLDCExtent *ent; 2976 CXLDCExtentGroup *ext_group; 2977 uint64_t dpa, len; 2978 Range range1, range2; 2979 2980 for (i = 0; i < in->num_entries_updated; i++) { 2981 dpa = in->updated_entries[i].start_dpa; 2982 len = in->updated_entries[i].len; 2983 2984 range_init_nofail(&range1, dpa, len); 2985 2986 /* 2987 * The host-accepted DPA range must be contained by the first extent 2988 * group in the pending list 2989 */ 2990 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 2991 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 2992 return CXL_MBOX_INVALID_PA; 2993 } 2994 2995 /* to-be-added range should not overlap with range already accepted */ 2996 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 2997 range_init_nofail(&range2, ent->start_dpa, ent->len); 2998 if (range_overlaps_range(&range1, &range2)) { 2999 return CXL_MBOX_INVALID_PA; 3000 } 3001 } 3002 } 3003 return CXL_MBOX_SUCCESS; 3004 } 3005 3006 /* 3007 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 3008 * An extent is added to the extent list and becomes usable only after the 3009 * response is processed successfully. 3010 */ 3011 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 3012 uint8_t *payload_in, 3013 size_t len_in, 3014 uint8_t *payload_out, 3015 size_t *len_out, 3016 CXLCCI *cci) 3017 { 3018 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3019 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3020 CXLDCExtentList *extent_list = &ct3d->dc.extents; 3021 uint32_t i, num; 3022 uint64_t dpa, len; 3023 CXLRetCode ret; 3024 3025 if (len_in < sizeof(*in)) { 3026 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3027 } 3028 3029 if (in->num_entries_updated == 0) { 3030 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3031 ct3d->dc.total_extent_count -= num; 3032 return CXL_MBOX_SUCCESS; 3033 } 3034 3035 if (len_in < 3036 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3037 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3038 } 3039 3040 /* Adding extents causes exceeding device's extent tracking ability. */ 3041 if (in->num_entries_updated + ct3d->dc.total_extent_count > 3042 CXL_NUM_EXTENTS_SUPPORTED) { 3043 return CXL_MBOX_RESOURCES_EXHAUSTED; 3044 } 3045 3046 ret = cxl_detect_malformed_extent_list(ct3d, in); 3047 if (ret != CXL_MBOX_SUCCESS) { 3048 return ret; 3049 } 3050 3051 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 3052 if (ret != CXL_MBOX_SUCCESS) { 3053 return ret; 3054 } 3055 3056 for (i = 0; i < in->num_entries_updated; i++) { 3057 dpa = in->updated_entries[i].start_dpa; 3058 len = in->updated_entries[i].len; 3059 3060 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 3061 ct3d->dc.total_extent_count += 1; 3062 ct3d->dc.nr_extents_accepted += 1; 3063 ct3_set_region_block_backed(ct3d, dpa, len); 3064 } 3065 /* Remove the first extent group in the pending list */ 3066 num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 3067 ct3d->dc.total_extent_count -= num; 3068 3069 return CXL_MBOX_SUCCESS; 3070 } 3071 3072 /* 3073 * Copy extent list from src to dst 3074 * Return value: number of extents copied 3075 */ 3076 static uint32_t copy_extent_list(CXLDCExtentList *dst, 3077 const CXLDCExtentList *src) 3078 { 3079 uint32_t cnt = 0; 3080 CXLDCExtent *ent; 3081 3082 if (!dst || !src) { 3083 return 0; 3084 } 3085 3086 QTAILQ_FOREACH(ent, src, node) { 3087 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 3088 ent->tag, ent->shared_seq); 3089 cnt++; 3090 } 3091 return cnt; 3092 } 3093 3094 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 3095 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 3096 uint32_t *updated_list_size) 3097 { 3098 CXLDCExtent *ent, *ent_next; 3099 uint64_t dpa, len; 3100 uint32_t i; 3101 int cnt_delta = 0; 3102 CXLRetCode ret = CXL_MBOX_SUCCESS; 3103 3104 QTAILQ_INIT(updated_list); 3105 copy_extent_list(updated_list, &ct3d->dc.extents); 3106 3107 for (i = 0; i < in->num_entries_updated; i++) { 3108 Range range; 3109 3110 dpa = in->updated_entries[i].start_dpa; 3111 len = in->updated_entries[i].len; 3112 3113 /* Check if the DPA range is not fully backed with valid extents */ 3114 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 3115 ret = CXL_MBOX_INVALID_PA; 3116 goto free_and_exit; 3117 } 3118 3119 /* After this point, extent overflow is the only error can happen */ 3120 while (len > 0) { 3121 QTAILQ_FOREACH(ent, updated_list, node) { 3122 range_init_nofail(&range, ent->start_dpa, ent->len); 3123 3124 if (range_contains(&range, dpa)) { 3125 uint64_t len1, len2 = 0, len_done = 0; 3126 uint64_t ent_start_dpa = ent->start_dpa; 3127 uint64_t ent_len = ent->len; 3128 3129 len1 = dpa - ent->start_dpa; 3130 /* Found the extent or the subset of an existing extent */ 3131 if (range_contains(&range, dpa + len - 1)) { 3132 len2 = ent_start_dpa + ent_len - dpa - len; 3133 } else { 3134 dpa = ent_start_dpa + ent_len; 3135 } 3136 len_done = ent_len - len1 - len2; 3137 3138 cxl_remove_extent_from_extent_list(updated_list, ent); 3139 cnt_delta--; 3140 3141 if (len1) { 3142 cxl_insert_extent_to_extent_list(updated_list, 3143 ent_start_dpa, 3144 len1, NULL, 0); 3145 cnt_delta++; 3146 } 3147 if (len2) { 3148 cxl_insert_extent_to_extent_list(updated_list, 3149 dpa + len, 3150 len2, NULL, 0); 3151 cnt_delta++; 3152 } 3153 3154 if (cnt_delta + ct3d->dc.total_extent_count > 3155 CXL_NUM_EXTENTS_SUPPORTED) { 3156 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 3157 goto free_and_exit; 3158 } 3159 3160 len -= len_done; 3161 break; 3162 } 3163 } 3164 } 3165 } 3166 free_and_exit: 3167 if (ret != CXL_MBOX_SUCCESS) { 3168 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 3169 cxl_remove_extent_from_extent_list(updated_list, ent); 3170 } 3171 *updated_list_size = 0; 3172 } else { 3173 *updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta; 3174 } 3175 3176 return ret; 3177 } 3178 3179 /* 3180 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 3181 */ 3182 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 3183 uint8_t *payload_in, 3184 size_t len_in, 3185 uint8_t *payload_out, 3186 size_t *len_out, 3187 CXLCCI *cci) 3188 { 3189 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 3190 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3191 CXLDCExtentList updated_list; 3192 CXLDCExtent *ent, *ent_next; 3193 uint32_t updated_list_size; 3194 CXLRetCode ret; 3195 3196 if (len_in < sizeof(*in)) { 3197 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3198 } 3199 3200 if (in->num_entries_updated == 0) { 3201 return CXL_MBOX_INVALID_INPUT; 3202 } 3203 3204 if (len_in < 3205 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { 3206 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3207 } 3208 3209 ret = cxl_detect_malformed_extent_list(ct3d, in); 3210 if (ret != CXL_MBOX_SUCCESS) { 3211 return ret; 3212 } 3213 3214 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 3215 &updated_list_size); 3216 if (ret != CXL_MBOX_SUCCESS) { 3217 return ret; 3218 } 3219 3220 /* 3221 * If the dry run release passes, the returned updated_list will 3222 * be the updated extent list and we just need to clear the extents 3223 * in the accepted list and copy extents in the updated_list to accepted 3224 * list and update the extent count; 3225 */ 3226 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 3227 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 3228 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 3229 } 3230 copy_extent_list(&ct3d->dc.extents, &updated_list); 3231 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 3232 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 3233 cxl_remove_extent_from_extent_list(&updated_list, ent); 3234 } 3235 ct3d->dc.total_extent_count += (updated_list_size - 3236 ct3d->dc.nr_extents_accepted); 3237 3238 ct3d->dc.nr_extents_accepted = updated_list_size; 3239 3240 return CXL_MBOX_SUCCESS; 3241 } 3242 3243 /* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */ 3244 static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd, 3245 uint8_t *payload_in, 3246 size_t len_in, 3247 uint8_t *payload_out, 3248 size_t *len_out, 3249 CXLCCI *cci) 3250 { 3251 struct { 3252 uint8_t num_hosts; 3253 uint8_t num_regions_supported; 3254 uint8_t rsvd1[2]; 3255 uint16_t supported_add_sel_policy_bitmask; 3256 uint8_t rsvd2[2]; 3257 uint16_t supported_removal_policy_bitmask; 3258 uint8_t sanitize_on_release_bitmask; 3259 uint8_t rsvd3; 3260 uint64_t total_dynamic_capacity; 3261 uint64_t region_blk_size_bitmasks[8]; 3262 } QEMU_PACKED *out = (void *)payload_out; 3263 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3264 CXLDCRegion *region; 3265 int i; 3266 3267 out->num_hosts = 1; 3268 out->num_regions_supported = ct3d->dc.num_regions; 3269 stw_le_p(&out->supported_add_sel_policy_bitmask, 3270 BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE)); 3271 stw_le_p(&out->supported_removal_policy_bitmask, 3272 BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE)); 3273 out->sanitize_on_release_bitmask = 0; 3274 3275 stq_le_p(&out->total_dynamic_capacity, 3276 ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER); 3277 3278 for (i = 0; i < ct3d->dc.num_regions; i++) { 3279 region = &ct3d->dc.regions[i]; 3280 memcpy(&out->region_blk_size_bitmasks[i], 3281 ®ion->supported_blk_size_bitmask, 3282 sizeof(out->region_blk_size_bitmasks[i])); 3283 } 3284 3285 *len_out = sizeof(*out); 3286 return CXL_MBOX_SUCCESS; 3287 } 3288 3289 static const struct cxl_cmd cxl_cmd_set[256][256] = { 3290 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3291 cmd_infostat_bg_op_abort, 0, 0 }, 3292 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 3293 cmd_events_get_records, 1, 0 }, 3294 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 3295 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE }, 3296 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 3297 cmd_events_get_interrupt_policy, 0, 0 }, 3298 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 3299 cmd_events_set_interrupt_policy, 3300 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE }, 3301 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 3302 cmd_firmware_update_get_info, 0, 0 }, 3303 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", 3304 cmd_firmware_update_transfer, ~0, 3305 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3306 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", 3307 cmd_firmware_update_activate, 2, 3308 CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, 3309 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3310 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 3311 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3312 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 3313 0, 0 }, 3314 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3315 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED", 3316 cmd_features_get_supported, 0x8, 0 }, 3317 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE", 3318 cmd_features_get_feature, 0x15, 0 }, 3319 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE", 3320 cmd_features_set_feature, 3321 ~0, 3322 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | 3323 CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3324 CXL_MBOX_IMMEDIATE_POLICY_CHANGE | 3325 CXL_MBOX_IMMEDIATE_LOG_CHANGE | 3326 CXL_MBOX_SECURITY_STATE_CHANGE)}, 3327 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 3328 cmd_identify_memory_device, 0, 0 }, 3329 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 3330 cmd_ccls_get_partition_info, 0, 0 }, 3331 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 3332 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 3333 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3334 [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = { 3335 "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG", 3336 cmd_get_alert_config, 0, 0 }, 3337 [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = { 3338 "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG", 3339 cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3340 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 3341 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3342 CXL_MBOX_SECURITY_STATE_CHANGE | 3343 CXL_MBOX_BACKGROUND_OPERATION | 3344 CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3345 [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations, 3346 ~0, 3347 (CXL_MBOX_IMMEDIATE_DATA_CHANGE | 3348 CXL_MBOX_BACKGROUND_OPERATION)}, 3349 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 3350 cmd_get_security_state, 0, 0 }, 3351 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 3352 cmd_media_get_poison_list, 16, 0 }, 3353 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 3354 cmd_media_inject_poison, 8, 0 }, 3355 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 3356 cmd_media_clear_poison, 72, 0 }, 3357 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = { 3358 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", 3359 cmd_media_get_scan_media_capabilities, 16, 0 }, 3360 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", 3361 cmd_media_scan_media, 17, 3362 (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, 3363 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { 3364 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", 3365 cmd_media_get_scan_media_results, 0, 0 }, 3366 }; 3367 3368 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 3369 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 3370 cmd_dcd_get_dyn_cap_config, 2, 0 }, 3371 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 3372 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 3373 8, 0 }, 3374 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 3375 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 3376 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3377 [DCD_CONFIG][RELEASE_DYN_CAP] = { 3378 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 3379 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE }, 3380 }; 3381 3382 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 3383 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3384 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 3385 cmd_infostat_bg_op_sts, 0, 0 }, 3386 [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", 3387 cmd_infostat_bg_op_abort, 0, 0 }, 3388 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3389 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, 3390 CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, 3391 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3392 0 }, 3393 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3394 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 3395 cmd_identify_switch_device, 0, 0 }, 3396 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 3397 cmd_get_physical_port_state, ~0, 0 }, 3398 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3399 cmd_tunnel_management_cmd, ~0, 0 }, 3400 }; 3401 3402 static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = { 3403 [FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO", 3404 cmd_fm_get_dcd_info, 0, 0 }, 3405 }; 3406 3407 /* 3408 * While the command is executing in the background, the device should 3409 * update the percentage complete in the Background Command Status Register 3410 * at least once per second. 3411 */ 3412 3413 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 3414 3415 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 3416 size_t len_in, uint8_t *pl_in, size_t *len_out, 3417 uint8_t *pl_out, bool *bg_started) 3418 { 3419 int ret; 3420 const struct cxl_cmd *cxl_cmd; 3421 opcode_handler h; 3422 CXLDeviceState *cxl_dstate; 3423 3424 *len_out = 0; 3425 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 3426 h = cxl_cmd->handler; 3427 if (!h) { 3428 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 3429 set << 8 | cmd); 3430 return CXL_MBOX_UNSUPPORTED; 3431 } 3432 3433 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 3434 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 3435 } 3436 3437 /* Only one bg command at a time */ 3438 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3439 cci->bg.runtime > 0) { 3440 return CXL_MBOX_BUSY; 3441 } 3442 3443 /* forbid any selected commands while the media is disabled */ 3444 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 3445 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 3446 3447 if (cxl_dev_media_disabled(cxl_dstate)) { 3448 if (h == cmd_events_get_records || 3449 h == cmd_ccls_get_partition_info || 3450 h == cmd_ccls_set_lsa || 3451 h == cmd_ccls_get_lsa || 3452 h == cmd_logs_get_log || 3453 h == cmd_media_get_poison_list || 3454 h == cmd_media_inject_poison || 3455 h == cmd_media_clear_poison || 3456 h == cmd_sanitize_overwrite || 3457 h == cmd_firmware_update_transfer || 3458 h == cmd_firmware_update_activate) { 3459 return CXL_MBOX_MEDIA_DISABLED; 3460 } 3461 } 3462 } 3463 3464 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 3465 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) && 3466 ret == CXL_MBOX_BG_STARTED) { 3467 *bg_started = true; 3468 } else { 3469 *bg_started = false; 3470 } 3471 3472 /* Set bg and the return code */ 3473 if (*bg_started) { 3474 uint64_t now; 3475 3476 cci->bg.opcode = (set << 8) | cmd; 3477 3478 cci->bg.complete_pct = 0; 3479 cci->bg.aborted = false; 3480 cci->bg.ret_code = 0; 3481 3482 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3483 cci->bg.starttime = now; 3484 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3485 } 3486 3487 return ret; 3488 } 3489 3490 static void bg_timercb(void *opaque) 3491 { 3492 CXLCCI *cci = opaque; 3493 uint64_t now, total_time; 3494 3495 qemu_mutex_lock(&cci->bg.lock); 3496 3497 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 3498 total_time = cci->bg.starttime + cci->bg.runtime; 3499 3500 if (now >= total_time) { /* we are done */ 3501 uint16_t ret = CXL_MBOX_SUCCESS; 3502 3503 cci->bg.complete_pct = 100; 3504 cci->bg.ret_code = ret; 3505 switch (cci->bg.opcode) { 3506 case 0x0201: /* fw transfer */ 3507 __do_firmware_xfer(cci); 3508 break; 3509 case 0x4400: /* sanitize */ 3510 { 3511 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3512 3513 __do_sanitization(ct3d); 3514 cxl_dev_enable_media(&ct3d->cxl_dstate); 3515 } 3516 break; 3517 case 0x4402: /* Media Operations sanitize */ 3518 { 3519 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3520 __do_sanitize(ct3d); 3521 } 3522 break; 3523 case 0x4304: /* scan media */ 3524 { 3525 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3526 3527 __do_scan_media(ct3d); 3528 break; 3529 } 3530 default: 3531 __builtin_unreachable(); 3532 break; 3533 } 3534 } else { 3535 /* estimate only */ 3536 cci->bg.complete_pct = 3537 100 * (now - cci->bg.starttime) / cci->bg.runtime; 3538 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 3539 } 3540 3541 if (cci->bg.complete_pct == 100) { 3542 /* TODO: generalize to switch CCI */ 3543 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 3544 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 3545 PCIDevice *pdev = PCI_DEVICE(cci->d); 3546 3547 cci->bg.starttime = 0; 3548 /* registers are updated, allow new bg-capable cmds */ 3549 cci->bg.runtime = 0; 3550 3551 if (msix_enabled(pdev)) { 3552 msix_notify(pdev, cxl_dstate->mbox_msi_n); 3553 } else if (msi_enabled(pdev)) { 3554 msi_notify(pdev, cxl_dstate->mbox_msi_n); 3555 } 3556 } 3557 3558 qemu_mutex_unlock(&cci->bg.lock); 3559 } 3560 3561 static void cxl_rebuild_cel(CXLCCI *cci) 3562 { 3563 cci->cel_size = 0; /* Reset for a fresh build */ 3564 for (int set = 0; set < 256; set++) { 3565 for (int cmd = 0; cmd < 256; cmd++) { 3566 if (cci->cxl_cmd_set[set][cmd].handler) { 3567 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 3568 struct cel_log *log = 3569 &cci->cel_log[cci->cel_size]; 3570 3571 log->opcode = (set << 8) | cmd; 3572 log->effect = c->effect; 3573 cci->cel_size++; 3574 } 3575 } 3576 } 3577 } 3578 3579 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 3580 { 3581 cci->payload_max = payload_max; 3582 cxl_rebuild_cel(cci); 3583 3584 cci->bg.complete_pct = 0; 3585 cci->bg.starttime = 0; 3586 cci->bg.runtime = 0; 3587 cci->bg.aborted = false; 3588 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3589 bg_timercb, cci); 3590 qemu_mutex_init(&cci->bg.lock); 3591 3592 memset(&cci->fw, 0, sizeof(cci->fw)); 3593 cci->fw.active_slot = 1; 3594 cci->fw.slot[cci->fw.active_slot - 1] = true; 3595 cci->initialized = true; 3596 } 3597 3598 void cxl_destroy_cci(CXLCCI *cci) 3599 { 3600 qemu_mutex_destroy(&cci->bg.lock); 3601 cci->initialized = false; 3602 } 3603 3604 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 3605 { 3606 for (int set = 0; set < 256; set++) { 3607 for (int cmd = 0; cmd < 256; cmd++) { 3608 if (cxl_cmds[set][cmd].handler) { 3609 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 3610 } 3611 } 3612 } 3613 } 3614 3615 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 3616 size_t payload_max) 3617 { 3618 cci->payload_max = MAX(payload_max, cci->payload_max); 3619 cxl_copy_cci_commands(cci, cxl_cmd_set); 3620 cxl_rebuild_cel(cci); 3621 } 3622 3623 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 3624 DeviceState *d, size_t payload_max) 3625 { 3626 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 3627 cci->d = d; 3628 cci->intf = intf; 3629 cxl_init_cci(cci, payload_max); 3630 } 3631 3632 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 3633 { 3634 CXLType3Dev *ct3d = CXL_TYPE3(d); 3635 3636 cxl_copy_cci_commands(cci, cxl_cmd_set); 3637 if (ct3d->dc.num_regions) { 3638 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 3639 } 3640 cci->d = d; 3641 3642 /* No separation for PCI MB as protocol handled in PCI device */ 3643 cci->intf = d; 3644 cxl_init_cci(cci, payload_max); 3645 } 3646 3647 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 3648 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 3649 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3650 0 }, 3651 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3652 }; 3653 3654 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 3655 size_t payload_max) 3656 { 3657 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 3658 cci->d = d; 3659 cci->intf = intf; 3660 cxl_init_cci(cci, payload_max); 3661 } 3662 3663 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 3664 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 3665 [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT", 3666 cmd_get_response_msg_limit, 0, 0 }, 3667 [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT", 3668 cmd_set_response_msg_limit, 1, 0 }, 3669 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 3670 0 }, 3671 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 3672 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 3673 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 3674 cmd_tunnel_management_cmd, ~0, 0 }, 3675 }; 3676 3677 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 3678 DeviceState *intf, 3679 size_t payload_max) 3680 { 3681 CXLType3Dev *ct3d = CXL_TYPE3(d); 3682 3683 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 3684 if (ct3d->dc.num_regions) { 3685 cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd); 3686 } 3687 cci->d = d; 3688 cci->intf = intf; 3689 cxl_init_cci(cci, payload_max); 3690 } 3691