1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/pci/msi.h" 12 #include "hw/pci/msix.h" 13 #include "hw/cxl/cxl.h" 14 #include "hw/cxl/cxl_events.h" 15 #include "hw/pci/pci.h" 16 #include "hw/pci-bridge/cxl_upstream_port.h" 17 #include "qemu/cutils.h" 18 #include "qemu/log.h" 19 #include "qemu/units.h" 20 #include "qemu/uuid.h" 21 #include "sysemu/hostmem.h" 22 23 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 24 25 /* 26 * How to add a new command, example. The command set FOO, with cmd BAR. 27 * 1. Add the command set and cmd to the enum. 28 * FOO = 0x7f, 29 * #define BAR 0 30 * 2. Implement the handler 31 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 32 * CXLDeviceState *cxl_dstate, uint16_t *len) 33 * 3. Add the command to the cxl_cmd_set[][] 34 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 35 * 4. Implement your handler 36 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 37 * 38 * 39 * Writing the handler: 40 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 41 * in/out length of the payload. The handler is responsible for consuming the 42 * payload from cmd->payload and operating upon it as necessary. It must then 43 * fill the output data into cmd->payload (overwriting what was there), 44 * setting the length, and returning a valid return code. 45 * 46 * XXX: The handler need not worry about endianness. The payload is read out of 47 * a register interface that already deals with it. 48 */ 49 50 enum { 51 INFOSTAT = 0x00, 52 #define IS_IDENTIFY 0x1 53 #define BACKGROUND_OPERATION_STATUS 0x2 54 EVENTS = 0x01, 55 #define GET_RECORDS 0x0 56 #define CLEAR_RECORDS 0x1 57 #define GET_INTERRUPT_POLICY 0x2 58 #define SET_INTERRUPT_POLICY 0x3 59 FIRMWARE_UPDATE = 0x02, 60 #define GET_INFO 0x0 61 TIMESTAMP = 0x03, 62 #define GET 0x0 63 #define SET 0x1 64 LOGS = 0x04, 65 #define GET_SUPPORTED 0x0 66 #define GET_LOG 0x1 67 IDENTIFY = 0x40, 68 #define MEMORY_DEVICE 0x0 69 CCLS = 0x41, 70 #define GET_PARTITION_INFO 0x0 71 #define GET_LSA 0x2 72 #define SET_LSA 0x3 73 SANITIZE = 0x44, 74 #define OVERWRITE 0x0 75 #define SECURE_ERASE 0x1 76 PERSISTENT_MEM = 0x45, 77 #define GET_SECURITY_STATE 0x0 78 MEDIA_AND_POISON = 0x43, 79 #define GET_POISON_LIST 0x0 80 #define INJECT_POISON 0x1 81 #define CLEAR_POISON 0x2 82 PHYSICAL_SWITCH = 0x51, 83 #define IDENTIFY_SWITCH_DEVICE 0x0 84 #define GET_PHYSICAL_PORT_STATE 0x1 85 TUNNEL = 0x53, 86 #define MANAGEMENT_COMMAND 0x0 87 }; 88 89 /* CCI Message Format CXL r3.0 Figure 7-19 */ 90 typedef struct CXLCCIMessage { 91 uint8_t category; 92 #define CXL_CCI_CAT_REQ 0 93 #define CXL_CCI_CAT_RSP 1 94 uint8_t tag; 95 uint8_t resv1; 96 uint8_t command; 97 uint8_t command_set; 98 uint8_t pl_length[3]; 99 uint16_t rc; 100 uint16_t vendor_specific; 101 uint8_t payload[]; 102 } QEMU_PACKED CXLCCIMessage; 103 104 /* This command is only defined to an MLD FM Owned LD or an MHD */ 105 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 106 uint8_t *payload_in, 107 size_t len_in, 108 uint8_t *payload_out, 109 size_t *len_out, 110 CXLCCI *cci) 111 { 112 PCIDevice *tunnel_target; 113 CXLCCI *target_cci; 114 struct { 115 uint8_t port_or_ld_id; 116 uint8_t target_type; 117 uint16_t size; 118 CXLCCIMessage ccimessage; 119 } QEMU_PACKED *in; 120 struct { 121 uint16_t resp_len; 122 uint8_t resv[2]; 123 CXLCCIMessage ccimessage; 124 } QEMU_PACKED *out; 125 size_t pl_length, length_out; 126 bool bg_started; 127 int rc; 128 129 if (cmd->in < sizeof(*in)) { 130 return CXL_MBOX_INVALID_INPUT; 131 } 132 in = (void *)payload_in; 133 out = (void *)payload_out; 134 135 /* Enough room for minimum sized message - no payload */ 136 if (in->size < sizeof(in->ccimessage)) { 137 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 138 } 139 /* Length of input payload should be in->size + a wrapping tunnel header */ 140 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 141 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 142 } 143 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 144 return CXL_MBOX_INVALID_INPUT; 145 } 146 147 if (in->target_type != 0) { 148 qemu_log_mask(LOG_UNIMP, 149 "Tunneled Command sent to non existent FM-LD"); 150 return CXL_MBOX_INVALID_INPUT; 151 } 152 153 /* 154 * Target of a tunnel unfortunately depends on type of CCI readint 155 * the message. 156 * If in a switch, then it's the port number. 157 * If in an MLD it is the ld number. 158 * If in an MHD target type indicate where we are going. 159 */ 160 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 161 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 162 if (in->port_or_ld_id != 0) { 163 /* Only pretending to have one for now! */ 164 return CXL_MBOX_INVALID_INPUT; 165 } 166 target_cci = &ct3d->ld0_cci; 167 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 168 CXLUpstreamPort *usp = CXL_USP(cci->d); 169 170 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 171 in->port_or_ld_id); 172 if (!tunnel_target) { 173 return CXL_MBOX_INVALID_INPUT; 174 } 175 tunnel_target = 176 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 177 if (!tunnel_target) { 178 return CXL_MBOX_INVALID_INPUT; 179 } 180 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 181 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 182 /* Tunneled VDMs always land on FM Owned LD */ 183 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 184 } else { 185 return CXL_MBOX_INVALID_INPUT; 186 } 187 } else { 188 return CXL_MBOX_INVALID_INPUT; 189 } 190 191 pl_length = in->ccimessage.pl_length[2] << 16 | 192 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 193 rc = cxl_process_cci_message(target_cci, 194 in->ccimessage.command_set, 195 in->ccimessage.command, 196 pl_length, in->ccimessage.payload, 197 &length_out, out->ccimessage.payload, 198 &bg_started); 199 /* Payload should be in place. Rest of CCI header and needs filling */ 200 out->resp_len = length_out + sizeof(CXLCCIMessage); 201 st24_le_p(out->ccimessage.pl_length, length_out); 202 out->ccimessage.rc = rc; 203 out->ccimessage.category = CXL_CCI_CAT_RSP; 204 out->ccimessage.command = in->ccimessage.command; 205 out->ccimessage.command_set = in->ccimessage.command_set; 206 out->ccimessage.tag = in->ccimessage.tag; 207 *len_out = length_out + sizeof(*out); 208 209 return CXL_MBOX_SUCCESS; 210 } 211 212 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 213 uint8_t *payload_in, size_t len_in, 214 uint8_t *payload_out, size_t *len_out, 215 CXLCCI *cci) 216 { 217 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 218 CXLGetEventPayload *pl; 219 uint8_t log_type; 220 int max_recs; 221 222 if (cmd->in < sizeof(log_type)) { 223 return CXL_MBOX_INVALID_INPUT; 224 } 225 226 log_type = payload_in[0]; 227 228 pl = (CXLGetEventPayload *)payload_out; 229 memset(pl, 0, sizeof(*pl)); 230 231 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 232 CXL_EVENT_RECORD_SIZE; 233 if (max_recs > 0xFFFF) { 234 max_recs = 0xFFFF; 235 } 236 237 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 238 } 239 240 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 241 uint8_t *payload_in, 242 size_t len_in, 243 uint8_t *payload_out, 244 size_t *len_out, 245 CXLCCI *cci) 246 { 247 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 248 CXLClearEventPayload *pl; 249 250 pl = (CXLClearEventPayload *)payload_in; 251 *len_out = 0; 252 return cxl_event_clear_records(cxlds, pl); 253 } 254 255 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 256 uint8_t *payload_in, 257 size_t len_in, 258 uint8_t *payload_out, 259 size_t *len_out, 260 CXLCCI *cci) 261 { 262 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 263 CXLEventInterruptPolicy *policy; 264 CXLEventLog *log; 265 266 policy = (CXLEventInterruptPolicy *)payload_out; 267 memset(policy, 0, sizeof(*policy)); 268 269 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 270 if (log->irq_enabled) { 271 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 272 } 273 274 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 275 if (log->irq_enabled) { 276 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 277 } 278 279 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 280 if (log->irq_enabled) { 281 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 282 } 283 284 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 285 if (log->irq_enabled) { 286 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 287 } 288 289 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 290 if (log->irq_enabled) { 291 /* Dynamic Capacity borrows the same vector as info */ 292 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 293 } 294 295 *len_out = sizeof(*policy); 296 return CXL_MBOX_SUCCESS; 297 } 298 299 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 300 uint8_t *payload_in, 301 size_t len_in, 302 uint8_t *payload_out, 303 size_t *len_out, 304 CXLCCI *cci) 305 { 306 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 307 CXLEventInterruptPolicy *policy; 308 CXLEventLog *log; 309 310 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 311 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 312 } 313 314 policy = (CXLEventInterruptPolicy *)payload_in; 315 316 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 317 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 318 CXL_INT_MSI_MSIX; 319 320 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 321 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 322 CXL_INT_MSI_MSIX; 323 324 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 325 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 326 CXL_INT_MSI_MSIX; 327 328 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 329 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 330 CXL_INT_MSI_MSIX; 331 332 /* DCD is optional */ 333 if (len_in < sizeof(*policy)) { 334 return CXL_MBOX_SUCCESS; 335 } 336 337 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 338 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 339 CXL_INT_MSI_MSIX; 340 341 *len_out = 0; 342 return CXL_MBOX_SUCCESS; 343 } 344 345 /* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */ 346 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 347 uint8_t *payload_in, 348 size_t len_in, 349 uint8_t *payload_out, 350 size_t *len_out, 351 CXLCCI *cci) 352 { 353 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 354 struct { 355 uint16_t pcie_vid; 356 uint16_t pcie_did; 357 uint16_t pcie_subsys_vid; 358 uint16_t pcie_subsys_id; 359 uint64_t sn; 360 uint8_t max_message_size; 361 uint8_t component_type; 362 } QEMU_PACKED *is_identify; 363 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 364 365 is_identify = (void *)payload_out; 366 memset(is_identify, 0, sizeof(*is_identify)); 367 is_identify->pcie_vid = class->vendor_id; 368 is_identify->pcie_did = class->device_id; 369 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 370 is_identify->sn = CXL_USP(cci->d)->sn; 371 /* Subsystem info not defined for a USP */ 372 is_identify->pcie_subsys_vid = 0; 373 is_identify->pcie_subsys_id = 0; 374 is_identify->component_type = 0x0; /* Switch */ 375 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 376 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 377 378 is_identify->sn = CXL_TYPE3(cci->d)->sn; 379 /* 380 * We can't always use class->subsystem_vendor_id as 381 * it is not set if the defaults are used. 382 */ 383 is_identify->pcie_subsys_vid = 384 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 385 is_identify->pcie_subsys_id = 386 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 387 is_identify->component_type = 0x3; /* Type 3 */ 388 } 389 390 /* TODO: Allow this to vary across different CCIs */ 391 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ 392 *len_out = sizeof(*is_identify); 393 return CXL_MBOX_SUCCESS; 394 } 395 396 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 397 void *private) 398 { 399 uint8_t *bm = private; 400 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 401 uint8_t port = PCIE_PORT(d)->port; 402 bm[port / 8] |= 1 << (port % 8); 403 } 404 } 405 406 /* CXL r3 8.2.9.1.1 */ 407 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 408 uint8_t *payload_in, 409 size_t len_in, 410 uint8_t *payload_out, 411 size_t *len_out, 412 CXLCCI *cci) 413 { 414 PCIEPort *usp = PCIE_PORT(cci->d); 415 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 416 int num_phys_ports = pcie_count_ds_ports(bus); 417 418 struct cxl_fmapi_ident_switch_dev_resp_pl { 419 uint8_t ingress_port_id; 420 uint8_t rsvd; 421 uint8_t num_physical_ports; 422 uint8_t num_vcss; 423 uint8_t active_port_bitmask[0x20]; 424 uint8_t active_vcs_bitmask[0x20]; 425 uint16_t total_vppbs; 426 uint16_t bound_vppbs; 427 uint8_t num_hdm_decoders_per_usp; 428 } QEMU_PACKED *out; 429 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 430 431 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 432 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 433 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 434 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 435 .active_vcs_bitmask[0] = 0x1, 436 .total_vppbs = num_phys_ports + 1, 437 .bound_vppbs = num_phys_ports + 1, 438 .num_hdm_decoders_per_usp = 4, 439 }; 440 441 /* Depends on the CCI type */ 442 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 443 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 444 } else { 445 /* MCTP? */ 446 out->ingress_port_id = 0; 447 } 448 449 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 450 out->active_port_bitmask); 451 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 452 453 *len_out = sizeof(*out); 454 455 return CXL_MBOX_SUCCESS; 456 } 457 458 /* CXL r3.0 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 459 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 460 uint8_t *payload_in, 461 size_t len_in, 462 uint8_t *payload_out, 463 size_t *len_out, 464 CXLCCI *cci) 465 { 466 /* CXL r3.0 Table 7-18: Get Physical Port State Request Payload */ 467 struct cxl_fmapi_get_phys_port_state_req_pl { 468 uint8_t num_ports; 469 uint8_t ports[]; 470 } QEMU_PACKED *in; 471 472 /* 473 * CXL r3.0 Table 7-20: Get Physical Port State Port Information Block 474 * Format 475 */ 476 struct cxl_fmapi_port_state_info_block { 477 uint8_t port_id; 478 uint8_t config_state; 479 uint8_t connected_device_cxl_version; 480 uint8_t rsv1; 481 uint8_t connected_device_type; 482 uint8_t port_cxl_version_bitmask; 483 uint8_t max_link_width; 484 uint8_t negotiated_link_width; 485 uint8_t supported_link_speeds_vector; 486 uint8_t max_link_speed; 487 uint8_t current_link_speed; 488 uint8_t ltssm_state; 489 uint8_t first_lane_num; 490 uint16_t link_state; 491 uint8_t supported_ld_count; 492 } QEMU_PACKED; 493 494 /* CXL r3.0 Table 7-19: Get Physical Port State Response Payload */ 495 struct cxl_fmapi_get_phys_port_state_resp_pl { 496 uint8_t num_ports; 497 uint8_t rsv1[3]; 498 struct cxl_fmapi_port_state_info_block ports[]; 499 } QEMU_PACKED *out; 500 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 501 PCIEPort *usp = PCIE_PORT(cci->d); 502 size_t pl_size; 503 int i; 504 505 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 506 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 507 508 /* Check if what was requested can fit */ 509 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 510 return CXL_MBOX_INVALID_INPUT; 511 } 512 513 /* For success there should be a match for each requested */ 514 out->num_ports = in->num_ports; 515 516 for (i = 0; i < in->num_ports; i++) { 517 struct cxl_fmapi_port_state_info_block *port; 518 /* First try to match on downstream port */ 519 PCIDevice *port_dev; 520 uint16_t lnkcap, lnkcap2, lnksta; 521 522 port = &out->ports[i]; 523 524 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 525 if (port_dev) { /* DSP */ 526 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 527 ->devices[0]; 528 port->config_state = 3; 529 if (ds_dev) { 530 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 531 port->connected_device_type = 5; /* Assume MLD for now */ 532 } else { 533 port->connected_device_type = 1; 534 } 535 } else { 536 port->connected_device_type = 0; 537 } 538 port->supported_ld_count = 3; 539 } else if (usp->port == in->ports[i]) { /* USP */ 540 port_dev = PCI_DEVICE(usp); 541 port->config_state = 4; 542 port->connected_device_type = 0; 543 } else { 544 return CXL_MBOX_INVALID_INPUT; 545 } 546 547 port->port_id = in->ports[i]; 548 /* Information on status of this port in lnksta, lnkcap */ 549 if (!port_dev->exp.exp_cap) { 550 return CXL_MBOX_INTERNAL_ERROR; 551 } 552 lnksta = port_dev->config_read(port_dev, 553 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 554 sizeof(lnksta)); 555 lnkcap = port_dev->config_read(port_dev, 556 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 557 sizeof(lnkcap)); 558 lnkcap2 = port_dev->config_read(port_dev, 559 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 560 sizeof(lnkcap2)); 561 562 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 563 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 564 /* No definition for SLS field in linux/pci_regs.h */ 565 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 566 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 567 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 568 /* TODO: Track down if we can get the rest of the info */ 569 port->ltssm_state = 0x7; 570 port->first_lane_num = 0; 571 port->link_state = 0; 572 port->port_cxl_version_bitmask = 0x2; 573 port->connected_device_cxl_version = 0x2; 574 } 575 576 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 577 *len_out = pl_size; 578 579 return CXL_MBOX_SUCCESS; 580 } 581 582 /* CXL r3.0 8.2.9.1.2 */ 583 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 584 uint8_t *payload_in, 585 size_t len_in, 586 uint8_t *payload_out, 587 size_t *len_out, 588 CXLCCI *cci) 589 { 590 struct { 591 uint8_t status; 592 uint8_t rsvd; 593 uint16_t opcode; 594 uint16_t returncode; 595 uint16_t vendor_ext_status; 596 } QEMU_PACKED *bg_op_status; 597 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 598 599 bg_op_status = (void *)payload_out; 600 memset(bg_op_status, 0, sizeof(*bg_op_status)); 601 bg_op_status->status = cci->bg.complete_pct << 1; 602 if (cci->bg.runtime > 0) { 603 bg_op_status->status |= 1U << 0; 604 } 605 bg_op_status->opcode = cci->bg.opcode; 606 bg_op_status->returncode = cci->bg.ret_code; 607 *len_out = sizeof(*bg_op_status); 608 609 return CXL_MBOX_SUCCESS; 610 } 611 612 /* 8.2.9.2.1 */ 613 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 614 uint8_t *payload_in, 615 size_t len, 616 uint8_t *payload_out, 617 size_t *len_out, 618 CXLCCI *cci) 619 { 620 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 621 struct { 622 uint8_t slots_supported; 623 uint8_t slot_info; 624 uint8_t caps; 625 uint8_t rsvd[0xd]; 626 char fw_rev1[0x10]; 627 char fw_rev2[0x10]; 628 char fw_rev3[0x10]; 629 char fw_rev4[0x10]; 630 } QEMU_PACKED *fw_info; 631 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 632 633 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || 634 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) { 635 return CXL_MBOX_INTERNAL_ERROR; 636 } 637 638 fw_info = (void *)payload_out; 639 memset(fw_info, 0, sizeof(*fw_info)); 640 641 fw_info->slots_supported = 2; 642 fw_info->slot_info = BIT(0) | BIT(3); 643 fw_info->caps = 0; 644 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 645 646 *len_out = sizeof(*fw_info); 647 return CXL_MBOX_SUCCESS; 648 } 649 650 /* 8.2.9.3.1 */ 651 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 652 uint8_t *payload_in, 653 size_t len_in, 654 uint8_t *payload_out, 655 size_t *len_out, 656 CXLCCI *cci) 657 { 658 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 659 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 660 661 stq_le_p(payload_out, final_time); 662 *len_out = 8; 663 664 return CXL_MBOX_SUCCESS; 665 } 666 667 /* 8.2.9.3.2 */ 668 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 669 uint8_t *payload_in, 670 size_t len_in, 671 uint8_t *payload_out, 672 size_t *len_out, 673 CXLCCI *cci) 674 { 675 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 676 677 cxl_dstate->timestamp.set = true; 678 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 679 680 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 681 682 *len_out = 0; 683 return CXL_MBOX_SUCCESS; 684 } 685 686 /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */ 687 static const QemuUUID cel_uuid = { 688 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 689 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 690 }; 691 692 /* 8.2.9.4.1 */ 693 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 694 uint8_t *payload_in, 695 size_t len_in, 696 uint8_t *payload_out, 697 size_t *len_out, 698 CXLCCI *cci) 699 { 700 struct { 701 uint16_t entries; 702 uint8_t rsvd[6]; 703 struct { 704 QemuUUID uuid; 705 uint32_t size; 706 } log_entries[1]; 707 } QEMU_PACKED *supported_logs = (void *)payload_out; 708 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 709 710 supported_logs->entries = 1; 711 supported_logs->log_entries[0].uuid = cel_uuid; 712 supported_logs->log_entries[0].size = 4 * cci->cel_size; 713 714 *len_out = sizeof(*supported_logs); 715 return CXL_MBOX_SUCCESS; 716 } 717 718 /* 8.2.9.4.2 */ 719 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 720 uint8_t *payload_in, 721 size_t len_in, 722 uint8_t *payload_out, 723 size_t *len_out, 724 CXLCCI *cci) 725 { 726 struct { 727 QemuUUID uuid; 728 uint32_t offset; 729 uint32_t length; 730 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 731 732 get_log = (void *)payload_in; 733 734 /* 735 * 8.2.9.4.2 736 * The device shall return Invalid Parameter if the Offset or Length 737 * fields attempt to access beyond the size of the log as reported by Get 738 * Supported Logs. 739 * 740 * XXX: Spec is wrong, "Invalid Parameter" isn't a thing. 741 * XXX: Spec doesn't address incorrect UUID incorrectness. 742 * 743 * The CEL buffer is large enough to fit all commands in the emulation, so 744 * the only possible failure would be if the mailbox itself isn't big 745 * enough. 746 */ 747 if (get_log->offset + get_log->length > cci->payload_max) { 748 return CXL_MBOX_INVALID_INPUT; 749 } 750 751 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 752 return CXL_MBOX_UNSUPPORTED; 753 } 754 755 /* Store off everything to local variables so we can wipe out the payload */ 756 *len_out = get_log->length; 757 758 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 759 760 return CXL_MBOX_SUCCESS; 761 } 762 763 /* 8.2.9.5.1.1 */ 764 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 765 uint8_t *payload_in, 766 size_t len_in, 767 uint8_t *payload_out, 768 size_t *len_out, 769 CXLCCI *cci) 770 { 771 struct { 772 char fw_revision[0x10]; 773 uint64_t total_capacity; 774 uint64_t volatile_capacity; 775 uint64_t persistent_capacity; 776 uint64_t partition_align; 777 uint16_t info_event_log_size; 778 uint16_t warning_event_log_size; 779 uint16_t failure_event_log_size; 780 uint16_t fatal_event_log_size; 781 uint32_t lsa_size; 782 uint8_t poison_list_max_mer[3]; 783 uint16_t inject_poison_limit; 784 uint8_t poison_caps; 785 uint8_t qos_telemetry_caps; 786 } QEMU_PACKED *id; 787 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43); 788 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 789 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 790 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 791 792 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 793 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { 794 return CXL_MBOX_INTERNAL_ERROR; 795 } 796 797 id = (void *)payload_out; 798 memset(id, 0, sizeof(*id)); 799 800 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 801 802 stq_le_p(&id->total_capacity, 803 cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER); 804 stq_le_p(&id->persistent_capacity, 805 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 806 stq_le_p(&id->volatile_capacity, 807 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 808 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 809 /* 256 poison records */ 810 st24_le_p(id->poison_list_max_mer, 256); 811 /* No limit - so limited by main poison record limit */ 812 stw_le_p(&id->inject_poison_limit, 0); 813 814 *len_out = sizeof(*id); 815 return CXL_MBOX_SUCCESS; 816 } 817 818 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 819 uint8_t *payload_in, 820 size_t len_in, 821 uint8_t *payload_out, 822 size_t *len_out, 823 CXLCCI *cci) 824 { 825 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 826 struct { 827 uint64_t active_vmem; 828 uint64_t active_pmem; 829 uint64_t next_vmem; 830 uint64_t next_pmem; 831 } QEMU_PACKED *part_info = (void *)payload_out; 832 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 833 834 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 835 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { 836 return CXL_MBOX_INTERNAL_ERROR; 837 } 838 839 stq_le_p(&part_info->active_vmem, 840 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 841 /* 842 * When both next_vmem and next_pmem are 0, there is no pending change to 843 * partitioning. 844 */ 845 stq_le_p(&part_info->next_vmem, 0); 846 stq_le_p(&part_info->active_pmem, 847 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 848 stq_le_p(&part_info->next_pmem, 0); 849 850 *len_out = sizeof(*part_info); 851 return CXL_MBOX_SUCCESS; 852 } 853 854 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 855 uint8_t *payload_in, 856 size_t len_in, 857 uint8_t *payload_out, 858 size_t *len_out, 859 CXLCCI *cci) 860 { 861 struct { 862 uint32_t offset; 863 uint32_t length; 864 } QEMU_PACKED *get_lsa; 865 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 866 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 867 uint32_t offset, length; 868 869 get_lsa = (void *)payload_in; 870 offset = get_lsa->offset; 871 length = get_lsa->length; 872 873 if (offset + length > cvc->get_lsa_size(ct3d)) { 874 *len_out = 0; 875 return CXL_MBOX_INVALID_INPUT; 876 } 877 878 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 879 return CXL_MBOX_SUCCESS; 880 } 881 882 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 883 uint8_t *payload_in, 884 size_t len_in, 885 uint8_t *payload_out, 886 size_t *len_out, 887 CXLCCI *cci) 888 { 889 struct set_lsa_pl { 890 uint32_t offset; 891 uint32_t rsvd; 892 uint8_t data[]; 893 } QEMU_PACKED; 894 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 895 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 896 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 897 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 898 899 *len_out = 0; 900 if (!len_in) { 901 return CXL_MBOX_SUCCESS; 902 } 903 904 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 905 return CXL_MBOX_INVALID_INPUT; 906 } 907 len_in -= hdr_len; 908 909 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 910 return CXL_MBOX_SUCCESS; 911 } 912 913 /* Perform the actual device zeroing */ 914 static void __do_sanitization(CXLType3Dev *ct3d) 915 { 916 MemoryRegion *mr; 917 918 if (ct3d->hostvmem) { 919 mr = host_memory_backend_get_memory(ct3d->hostvmem); 920 if (mr) { 921 void *hostmem = memory_region_get_ram_ptr(mr); 922 memset(hostmem, 0, memory_region_size(mr)); 923 } 924 } 925 926 if (ct3d->hostpmem) { 927 mr = host_memory_backend_get_memory(ct3d->hostpmem); 928 if (mr) { 929 void *hostmem = memory_region_get_ram_ptr(mr); 930 memset(hostmem, 0, memory_region_size(mr)); 931 } 932 } 933 if (ct3d->lsa) { 934 mr = host_memory_backend_get_memory(ct3d->lsa); 935 if (mr) { 936 void *lsa = memory_region_get_ram_ptr(mr); 937 memset(lsa, 0, memory_region_size(mr)); 938 } 939 } 940 } 941 942 /* 943 * CXL 3.0 spec section 8.2.9.8.5.1 - Sanitize. 944 * 945 * Once the Sanitize command has started successfully, the device shall be 946 * placed in the media disabled state. If the command fails or is interrupted 947 * by a reset or power failure, it shall remain in the media disabled state 948 * until a successful Sanitize command has been completed. During this state: 949 * 950 * 1. Memory writes to the device will have no effect, and all memory reads 951 * will return random values (no user data returned, even for locations that 952 * the failed Sanitize operation didn’t sanitize yet). 953 * 954 * 2. Mailbox commands shall still be processed in the disabled state, except 955 * that commands that access Sanitized areas shall fail with the Media Disabled 956 * error code. 957 */ 958 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 959 uint8_t *payload_in, 960 size_t len_in, 961 uint8_t *payload_out, 962 size_t *len_out, 963 CXLCCI *cci) 964 { 965 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 966 uint64_t total_mem; /* in Mb */ 967 int secs; 968 969 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 970 if (total_mem <= 512) { 971 secs = 4; 972 } else if (total_mem <= 1024) { 973 secs = 8; 974 } else if (total_mem <= 2 * 1024) { 975 secs = 15; 976 } else if (total_mem <= 4 * 1024) { 977 secs = 30; 978 } else if (total_mem <= 8 * 1024) { 979 secs = 60; 980 } else if (total_mem <= 16 * 1024) { 981 secs = 2 * 60; 982 } else if (total_mem <= 32 * 1024) { 983 secs = 4 * 60; 984 } else if (total_mem <= 64 * 1024) { 985 secs = 8 * 60; 986 } else if (total_mem <= 128 * 1024) { 987 secs = 15 * 60; 988 } else if (total_mem <= 256 * 1024) { 989 secs = 30 * 60; 990 } else if (total_mem <= 512 * 1024) { 991 secs = 60 * 60; 992 } else if (total_mem <= 1024 * 1024) { 993 secs = 120 * 60; 994 } else { 995 secs = 240 * 60; /* max 4 hrs */ 996 } 997 998 /* EBUSY other bg cmds as of now */ 999 cci->bg.runtime = secs * 1000UL; 1000 *len_out = 0; 1001 1002 cxl_dev_disable_media(&ct3d->cxl_dstate); 1003 1004 /* sanitize when done */ 1005 return CXL_MBOX_BG_STARTED; 1006 } 1007 1008 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 1009 uint8_t *payload_in, 1010 size_t len_in, 1011 uint8_t *payload_out, 1012 size_t *len_out, 1013 CXLCCI *cci) 1014 { 1015 uint32_t *state = (uint32_t *)payload_out; 1016 1017 *state = 0; 1018 *len_out = 4; 1019 return CXL_MBOX_SUCCESS; 1020 } 1021 /* 1022 * This is very inefficient, but good enough for now! 1023 * Also the payload will always fit, so no need to handle the MORE flag and 1024 * make this stateful. We may want to allow longer poison lists to aid 1025 * testing that kernel functionality. 1026 */ 1027 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 1028 uint8_t *payload_in, 1029 size_t len_in, 1030 uint8_t *payload_out, 1031 size_t *len_out, 1032 CXLCCI *cci) 1033 { 1034 struct get_poison_list_pl { 1035 uint64_t pa; 1036 uint64_t length; 1037 } QEMU_PACKED; 1038 1039 struct get_poison_list_out_pl { 1040 uint8_t flags; 1041 uint8_t rsvd1; 1042 uint64_t overflow_timestamp; 1043 uint16_t count; 1044 uint8_t rsvd2[0x14]; 1045 struct { 1046 uint64_t addr; 1047 uint32_t length; 1048 uint32_t resv; 1049 } QEMU_PACKED records[]; 1050 } QEMU_PACKED; 1051 1052 struct get_poison_list_pl *in = (void *)payload_in; 1053 struct get_poison_list_out_pl *out = (void *)payload_out; 1054 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1055 uint16_t record_count = 0, i = 0; 1056 uint64_t query_start, query_length; 1057 CXLPoisonList *poison_list = &ct3d->poison_list; 1058 CXLPoison *ent; 1059 uint16_t out_pl_len; 1060 1061 query_start = ldq_le_p(&in->pa); 1062 /* 64 byte alignment required */ 1063 if (query_start & 0x3f) { 1064 return CXL_MBOX_INVALID_INPUT; 1065 } 1066 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1067 1068 QLIST_FOREACH(ent, poison_list, node) { 1069 /* Check for no overlap */ 1070 if (ent->start >= query_start + query_length || 1071 ent->start + ent->length <= query_start) { 1072 continue; 1073 } 1074 record_count++; 1075 } 1076 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1077 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 1078 1079 memset(out, 0, out_pl_len); 1080 QLIST_FOREACH(ent, poison_list, node) { 1081 uint64_t start, stop; 1082 1083 /* Check for no overlap */ 1084 if (ent->start >= query_start + query_length || 1085 ent->start + ent->length <= query_start) { 1086 continue; 1087 } 1088 1089 /* Deal with overlap */ 1090 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 1091 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 1092 query_start + query_length); 1093 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 1094 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 1095 i++; 1096 } 1097 if (ct3d->poison_list_overflowed) { 1098 out->flags = (1 << 1); 1099 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 1100 } 1101 stw_le_p(&out->count, record_count); 1102 *len_out = out_pl_len; 1103 return CXL_MBOX_SUCCESS; 1104 } 1105 1106 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 1107 uint8_t *payload_in, 1108 size_t len_in, 1109 uint8_t *payload_out, 1110 size_t *len_out, 1111 CXLCCI *cci) 1112 { 1113 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1114 CXLPoisonList *poison_list = &ct3d->poison_list; 1115 CXLPoison *ent; 1116 struct inject_poison_pl { 1117 uint64_t dpa; 1118 }; 1119 struct inject_poison_pl *in = (void *)payload_in; 1120 uint64_t dpa = ldq_le_p(&in->dpa); 1121 CXLPoison *p; 1122 1123 QLIST_FOREACH(ent, poison_list, node) { 1124 if (dpa >= ent->start && 1125 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 1126 return CXL_MBOX_SUCCESS; 1127 } 1128 } 1129 1130 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1131 return CXL_MBOX_INJECT_POISON_LIMIT; 1132 } 1133 p = g_new0(CXLPoison, 1); 1134 1135 p->length = CXL_CACHE_LINE_SIZE; 1136 p->start = dpa; 1137 p->type = CXL_POISON_TYPE_INJECTED; 1138 1139 /* 1140 * Possible todo: Merge with existing entry if next to it and if same type 1141 */ 1142 QLIST_INSERT_HEAD(poison_list, p, node); 1143 ct3d->poison_list_cnt++; 1144 *len_out = 0; 1145 1146 return CXL_MBOX_SUCCESS; 1147 } 1148 1149 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 1150 uint8_t *payload_in, 1151 size_t len_in, 1152 uint8_t *payload_out, 1153 size_t *len_out, 1154 CXLCCI *cci) 1155 { 1156 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1157 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1158 CXLPoisonList *poison_list = &ct3d->poison_list; 1159 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1160 struct clear_poison_pl { 1161 uint64_t dpa; 1162 uint8_t data[64]; 1163 }; 1164 CXLPoison *ent; 1165 uint64_t dpa; 1166 1167 struct clear_poison_pl *in = (void *)payload_in; 1168 1169 dpa = ldq_le_p(&in->dpa); 1170 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) { 1171 return CXL_MBOX_INVALID_PA; 1172 } 1173 1174 /* Clearing a region with no poison is not an error so always do so */ 1175 if (cvc->set_cacheline) { 1176 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 1177 return CXL_MBOX_INTERNAL_ERROR; 1178 } 1179 } 1180 1181 QLIST_FOREACH(ent, poison_list, node) { 1182 /* 1183 * Test for contained in entry. Simpler than general case 1184 * as clearing 64 bytes and entries 64 byte aligned 1185 */ 1186 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 1187 break; 1188 } 1189 } 1190 if (!ent) { 1191 return CXL_MBOX_SUCCESS; 1192 } 1193 1194 QLIST_REMOVE(ent, node); 1195 ct3d->poison_list_cnt--; 1196 1197 if (dpa > ent->start) { 1198 CXLPoison *frag; 1199 /* Cannot overflow as replacing existing entry */ 1200 1201 frag = g_new0(CXLPoison, 1); 1202 1203 frag->start = ent->start; 1204 frag->length = dpa - ent->start; 1205 frag->type = ent->type; 1206 1207 QLIST_INSERT_HEAD(poison_list, frag, node); 1208 ct3d->poison_list_cnt++; 1209 } 1210 1211 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 1212 CXLPoison *frag; 1213 1214 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1215 cxl_set_poison_list_overflowed(ct3d); 1216 } else { 1217 frag = g_new0(CXLPoison, 1); 1218 1219 frag->start = dpa + CXL_CACHE_LINE_SIZE; 1220 frag->length = ent->start + ent->length - frag->start; 1221 frag->type = ent->type; 1222 QLIST_INSERT_HEAD(poison_list, frag, node); 1223 ct3d->poison_list_cnt++; 1224 } 1225 } 1226 /* Any fragments have been added, free original entry */ 1227 g_free(ent); 1228 *len_out = 0; 1229 1230 return CXL_MBOX_SUCCESS; 1231 } 1232 1233 #define IMMEDIATE_CONFIG_CHANGE (1 << 1) 1234 #define IMMEDIATE_DATA_CHANGE (1 << 2) 1235 #define IMMEDIATE_POLICY_CHANGE (1 << 3) 1236 #define IMMEDIATE_LOG_CHANGE (1 << 4) 1237 #define SECURITY_STATE_CHANGE (1 << 5) 1238 #define BACKGROUND_OPERATION (1 << 6) 1239 1240 static const struct cxl_cmd cxl_cmd_set[256][256] = { 1241 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 1242 cmd_events_get_records, 1, 0 }, 1243 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 1244 cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE }, 1245 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 1246 cmd_events_get_interrupt_policy, 0, 0 }, 1247 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 1248 cmd_events_set_interrupt_policy, 1249 ~0, IMMEDIATE_CONFIG_CHANGE }, 1250 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 1251 cmd_firmware_update_get_info, 0, 0 }, 1252 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1253 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 1254 8, IMMEDIATE_POLICY_CHANGE }, 1255 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 1256 0, 0 }, 1257 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1258 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 1259 cmd_identify_memory_device, 0, 0 }, 1260 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 1261 cmd_ccls_get_partition_info, 0, 0 }, 1262 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 1263 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 1264 ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, 1265 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 1266 IMMEDIATE_DATA_CHANGE | SECURITY_STATE_CHANGE | BACKGROUND_OPERATION }, 1267 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 1268 cmd_get_security_state, 0, 0 }, 1269 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 1270 cmd_media_get_poison_list, 16, 0 }, 1271 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 1272 cmd_media_inject_poison, 8, 0 }, 1273 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 1274 cmd_media_clear_poison, 72, 0 }, 1275 }; 1276 1277 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 1278 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 1279 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 1280 cmd_infostat_bg_op_sts, 0, 0 }, 1281 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1282 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 0, 1283 IMMEDIATE_POLICY_CHANGE }, 1284 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 1285 0 }, 1286 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1287 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 1288 cmd_identify_switch_device, 0, 0 }, 1289 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 1290 cmd_get_physical_port_state, ~0, 0 }, 1291 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 1292 cmd_tunnel_management_cmd, ~0, 0 }, 1293 }; 1294 1295 /* 1296 * While the command is executing in the background, the device should 1297 * update the percentage complete in the Background Command Status Register 1298 * at least once per second. 1299 */ 1300 1301 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 1302 1303 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 1304 size_t len_in, uint8_t *pl_in, size_t *len_out, 1305 uint8_t *pl_out, bool *bg_started) 1306 { 1307 int ret; 1308 const struct cxl_cmd *cxl_cmd; 1309 opcode_handler h; 1310 1311 *len_out = 0; 1312 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 1313 h = cxl_cmd->handler; 1314 if (!h) { 1315 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 1316 set << 8 | cmd); 1317 return CXL_MBOX_UNSUPPORTED; 1318 } 1319 1320 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 1321 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1322 } 1323 1324 /* Only one bg command at a time */ 1325 if ((cxl_cmd->effect & BACKGROUND_OPERATION) && 1326 cci->bg.runtime > 0) { 1327 return CXL_MBOX_BUSY; 1328 } 1329 1330 /* forbid any selected commands while overwriting */ 1331 if (sanitize_running(cci)) { 1332 if (h == cmd_events_get_records || 1333 h == cmd_ccls_get_partition_info || 1334 h == cmd_ccls_set_lsa || 1335 h == cmd_ccls_get_lsa || 1336 h == cmd_logs_get_log || 1337 h == cmd_media_get_poison_list || 1338 h == cmd_media_inject_poison || 1339 h == cmd_media_clear_poison || 1340 h == cmd_sanitize_overwrite) { 1341 return CXL_MBOX_MEDIA_DISABLED; 1342 } 1343 } 1344 1345 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 1346 if ((cxl_cmd->effect & BACKGROUND_OPERATION) && 1347 ret == CXL_MBOX_BG_STARTED) { 1348 *bg_started = true; 1349 } else { 1350 *bg_started = false; 1351 } 1352 1353 /* Set bg and the return code */ 1354 if (*bg_started) { 1355 uint64_t now; 1356 1357 cci->bg.opcode = (set << 8) | cmd; 1358 1359 cci->bg.complete_pct = 0; 1360 cci->bg.ret_code = 0; 1361 1362 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 1363 cci->bg.starttime = now; 1364 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 1365 } 1366 1367 return ret; 1368 } 1369 1370 static void bg_timercb(void *opaque) 1371 { 1372 CXLCCI *cci = opaque; 1373 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 1374 uint64_t total_time = cci->bg.starttime + cci->bg.runtime; 1375 1376 assert(cci->bg.runtime > 0); 1377 1378 if (now >= total_time) { /* we are done */ 1379 uint16_t ret = CXL_MBOX_SUCCESS; 1380 1381 cci->bg.complete_pct = 100; 1382 cci->bg.ret_code = ret; 1383 switch (cci->bg.opcode) { 1384 case 0x4400: /* sanitize */ 1385 { 1386 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1387 1388 __do_sanitization(ct3d); 1389 cxl_dev_enable_media(&ct3d->cxl_dstate); 1390 } 1391 break; 1392 case 0x4304: /* TODO: scan media */ 1393 break; 1394 default: 1395 __builtin_unreachable(); 1396 break; 1397 } 1398 } else { 1399 /* estimate only */ 1400 cci->bg.complete_pct = 100 * now / total_time; 1401 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 1402 } 1403 1404 if (cci->bg.complete_pct == 100) { 1405 /* TODO: generalize to switch CCI */ 1406 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1407 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1408 PCIDevice *pdev = PCI_DEVICE(cci->d); 1409 1410 cci->bg.starttime = 0; 1411 /* registers are updated, allow new bg-capable cmds */ 1412 cci->bg.runtime = 0; 1413 1414 if (msix_enabled(pdev)) { 1415 msix_notify(pdev, cxl_dstate->mbox_msi_n); 1416 } else if (msi_enabled(pdev)) { 1417 msi_notify(pdev, cxl_dstate->mbox_msi_n); 1418 } 1419 } 1420 } 1421 1422 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 1423 { 1424 cci->payload_max = payload_max; 1425 for (int set = 0; set < 256; set++) { 1426 for (int cmd = 0; cmd < 256; cmd++) { 1427 if (cci->cxl_cmd_set[set][cmd].handler) { 1428 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 1429 struct cel_log *log = 1430 &cci->cel_log[cci->cel_size]; 1431 1432 log->opcode = (set << 8) | cmd; 1433 log->effect = c->effect; 1434 cci->cel_size++; 1435 } 1436 } 1437 } 1438 cci->bg.complete_pct = 0; 1439 cci->bg.starttime = 0; 1440 cci->bg.runtime = 0; 1441 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1442 bg_timercb, cci); 1443 } 1444 1445 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 1446 DeviceState *d, size_t payload_max) 1447 { 1448 cci->cxl_cmd_set = cxl_cmd_set_sw; 1449 cci->d = d; 1450 cci->intf = intf; 1451 cxl_init_cci(cci, payload_max); 1452 } 1453 1454 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 1455 { 1456 cci->cxl_cmd_set = cxl_cmd_set; 1457 cci->d = d; 1458 1459 /* No separation for PCI MB as protocol handled in PCI device */ 1460 cci->intf = d; 1461 cxl_init_cci(cci, payload_max); 1462 } 1463 1464 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 1465 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 1466 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 1467 0 }, 1468 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1469 }; 1470 1471 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 1472 size_t payload_max) 1473 { 1474 cci->cxl_cmd_set = cxl_cmd_set_t3_ld; 1475 cci->d = d; 1476 cci->intf = intf; 1477 cxl_init_cci(cci, payload_max); 1478 } 1479 1480 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 1481 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 1482 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 1483 0 }, 1484 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1485 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1486 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 1487 cmd_tunnel_management_cmd, ~0, 0 }, 1488 }; 1489 1490 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 1491 DeviceState *intf, 1492 size_t payload_max) 1493 { 1494 cci->cxl_cmd_set = cxl_cmd_set_t3_fm_owned_ld_mctp; 1495 cci->d = d; 1496 cci->intf = intf; 1497 cxl_init_cci(cci, payload_max); 1498 } 1499