1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/pci/msi.h" 12 #include "hw/pci/msix.h" 13 #include "hw/cxl/cxl.h" 14 #include "hw/cxl/cxl_events.h" 15 #include "hw/pci/pci.h" 16 #include "hw/pci-bridge/cxl_upstream_port.h" 17 #include "qemu/cutils.h" 18 #include "qemu/log.h" 19 #include "qemu/units.h" 20 #include "qemu/uuid.h" 21 #include "sysemu/hostmem.h" 22 23 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 24 25 /* 26 * How to add a new command, example. The command set FOO, with cmd BAR. 27 * 1. Add the command set and cmd to the enum. 28 * FOO = 0x7f, 29 * #define BAR 0 30 * 2. Implement the handler 31 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 32 * CXLDeviceState *cxl_dstate, uint16_t *len) 33 * 3. Add the command to the cxl_cmd_set[][] 34 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 35 * 4. Implement your handler 36 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 37 * 38 * 39 * Writing the handler: 40 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 41 * in/out length of the payload. The handler is responsible for consuming the 42 * payload from cmd->payload and operating upon it as necessary. It must then 43 * fill the output data into cmd->payload (overwriting what was there), 44 * setting the length, and returning a valid return code. 45 * 46 * XXX: The handler need not worry about endianness. The payload is read out of 47 * a register interface that already deals with it. 48 */ 49 50 enum { 51 INFOSTAT = 0x00, 52 #define IS_IDENTIFY 0x1 53 #define BACKGROUND_OPERATION_STATUS 0x2 54 EVENTS = 0x01, 55 #define GET_RECORDS 0x0 56 #define CLEAR_RECORDS 0x1 57 #define GET_INTERRUPT_POLICY 0x2 58 #define SET_INTERRUPT_POLICY 0x3 59 FIRMWARE_UPDATE = 0x02, 60 #define GET_INFO 0x0 61 TIMESTAMP = 0x03, 62 #define GET 0x0 63 #define SET 0x1 64 LOGS = 0x04, 65 #define GET_SUPPORTED 0x0 66 #define GET_LOG 0x1 67 IDENTIFY = 0x40, 68 #define MEMORY_DEVICE 0x0 69 CCLS = 0x41, 70 #define GET_PARTITION_INFO 0x0 71 #define GET_LSA 0x2 72 #define SET_LSA 0x3 73 SANITIZE = 0x44, 74 #define OVERWRITE 0x0 75 #define SECURE_ERASE 0x1 76 PERSISTENT_MEM = 0x45, 77 #define GET_SECURITY_STATE 0x0 78 MEDIA_AND_POISON = 0x43, 79 #define GET_POISON_LIST 0x0 80 #define INJECT_POISON 0x1 81 #define CLEAR_POISON 0x2 82 PHYSICAL_SWITCH = 0x51, 83 #define IDENTIFY_SWITCH_DEVICE 0x0 84 #define GET_PHYSICAL_PORT_STATE 0x1 85 TUNNEL = 0x53, 86 #define MANAGEMENT_COMMAND 0x0 87 }; 88 89 /* CCI Message Format CXL r3.1 Figure 7-19 */ 90 typedef struct CXLCCIMessage { 91 uint8_t category; 92 #define CXL_CCI_CAT_REQ 0 93 #define CXL_CCI_CAT_RSP 1 94 uint8_t tag; 95 uint8_t resv1; 96 uint8_t command; 97 uint8_t command_set; 98 uint8_t pl_length[3]; 99 uint16_t rc; 100 uint16_t vendor_specific; 101 uint8_t payload[]; 102 } QEMU_PACKED CXLCCIMessage; 103 104 /* This command is only defined to an MLD FM Owned LD or an MHD */ 105 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 106 uint8_t *payload_in, 107 size_t len_in, 108 uint8_t *payload_out, 109 size_t *len_out, 110 CXLCCI *cci) 111 { 112 PCIDevice *tunnel_target; 113 CXLCCI *target_cci; 114 struct { 115 uint8_t port_or_ld_id; 116 uint8_t target_type; 117 uint16_t size; 118 CXLCCIMessage ccimessage; 119 } QEMU_PACKED *in; 120 struct { 121 uint16_t resp_len; 122 uint8_t resv[2]; 123 CXLCCIMessage ccimessage; 124 } QEMU_PACKED *out; 125 size_t pl_length, length_out; 126 bool bg_started; 127 int rc; 128 129 if (cmd->in < sizeof(*in)) { 130 return CXL_MBOX_INVALID_INPUT; 131 } 132 in = (void *)payload_in; 133 out = (void *)payload_out; 134 135 /* Enough room for minimum sized message - no payload */ 136 if (in->size < sizeof(in->ccimessage)) { 137 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 138 } 139 /* Length of input payload should be in->size + a wrapping tunnel header */ 140 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 141 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 142 } 143 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 144 return CXL_MBOX_INVALID_INPUT; 145 } 146 147 if (in->target_type != 0) { 148 qemu_log_mask(LOG_UNIMP, 149 "Tunneled Command sent to non existent FM-LD"); 150 return CXL_MBOX_INVALID_INPUT; 151 } 152 153 /* 154 * Target of a tunnel unfortunately depends on type of CCI readint 155 * the message. 156 * If in a switch, then it's the port number. 157 * If in an MLD it is the ld number. 158 * If in an MHD target type indicate where we are going. 159 */ 160 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 161 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 162 if (in->port_or_ld_id != 0) { 163 /* Only pretending to have one for now! */ 164 return CXL_MBOX_INVALID_INPUT; 165 } 166 target_cci = &ct3d->ld0_cci; 167 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 168 CXLUpstreamPort *usp = CXL_USP(cci->d); 169 170 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 171 in->port_or_ld_id); 172 if (!tunnel_target) { 173 return CXL_MBOX_INVALID_INPUT; 174 } 175 tunnel_target = 176 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 177 if (!tunnel_target) { 178 return CXL_MBOX_INVALID_INPUT; 179 } 180 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 181 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 182 /* Tunneled VDMs always land on FM Owned LD */ 183 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 184 } else { 185 return CXL_MBOX_INVALID_INPUT; 186 } 187 } else { 188 return CXL_MBOX_INVALID_INPUT; 189 } 190 191 pl_length = in->ccimessage.pl_length[2] << 16 | 192 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 193 rc = cxl_process_cci_message(target_cci, 194 in->ccimessage.command_set, 195 in->ccimessage.command, 196 pl_length, in->ccimessage.payload, 197 &length_out, out->ccimessage.payload, 198 &bg_started); 199 /* Payload should be in place. Rest of CCI header and needs filling */ 200 out->resp_len = length_out + sizeof(CXLCCIMessage); 201 st24_le_p(out->ccimessage.pl_length, length_out); 202 out->ccimessage.rc = rc; 203 out->ccimessage.category = CXL_CCI_CAT_RSP; 204 out->ccimessage.command = in->ccimessage.command; 205 out->ccimessage.command_set = in->ccimessage.command_set; 206 out->ccimessage.tag = in->ccimessage.tag; 207 *len_out = length_out + sizeof(*out); 208 209 return CXL_MBOX_SUCCESS; 210 } 211 212 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 213 uint8_t *payload_in, size_t len_in, 214 uint8_t *payload_out, size_t *len_out, 215 CXLCCI *cci) 216 { 217 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 218 CXLGetEventPayload *pl; 219 uint8_t log_type; 220 int max_recs; 221 222 if (cmd->in < sizeof(log_type)) { 223 return CXL_MBOX_INVALID_INPUT; 224 } 225 226 log_type = payload_in[0]; 227 228 pl = (CXLGetEventPayload *)payload_out; 229 memset(pl, 0, sizeof(*pl)); 230 231 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 232 CXL_EVENT_RECORD_SIZE; 233 if (max_recs > 0xFFFF) { 234 max_recs = 0xFFFF; 235 } 236 237 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 238 } 239 240 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 241 uint8_t *payload_in, 242 size_t len_in, 243 uint8_t *payload_out, 244 size_t *len_out, 245 CXLCCI *cci) 246 { 247 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 248 CXLClearEventPayload *pl; 249 250 pl = (CXLClearEventPayload *)payload_in; 251 *len_out = 0; 252 return cxl_event_clear_records(cxlds, pl); 253 } 254 255 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 256 uint8_t *payload_in, 257 size_t len_in, 258 uint8_t *payload_out, 259 size_t *len_out, 260 CXLCCI *cci) 261 { 262 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 263 CXLEventInterruptPolicy *policy; 264 CXLEventLog *log; 265 266 policy = (CXLEventInterruptPolicy *)payload_out; 267 memset(policy, 0, sizeof(*policy)); 268 269 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 270 if (log->irq_enabled) { 271 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 272 } 273 274 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 275 if (log->irq_enabled) { 276 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 277 } 278 279 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 280 if (log->irq_enabled) { 281 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 282 } 283 284 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 285 if (log->irq_enabled) { 286 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 287 } 288 289 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 290 if (log->irq_enabled) { 291 /* Dynamic Capacity borrows the same vector as info */ 292 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 293 } 294 295 *len_out = sizeof(*policy); 296 return CXL_MBOX_SUCCESS; 297 } 298 299 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 300 uint8_t *payload_in, 301 size_t len_in, 302 uint8_t *payload_out, 303 size_t *len_out, 304 CXLCCI *cci) 305 { 306 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 307 CXLEventInterruptPolicy *policy; 308 CXLEventLog *log; 309 310 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 311 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 312 } 313 314 policy = (CXLEventInterruptPolicy *)payload_in; 315 316 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 317 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 318 CXL_INT_MSI_MSIX; 319 320 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 321 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 322 CXL_INT_MSI_MSIX; 323 324 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 325 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 326 CXL_INT_MSI_MSIX; 327 328 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 329 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 330 CXL_INT_MSI_MSIX; 331 332 /* DCD is optional */ 333 if (len_in < sizeof(*policy)) { 334 return CXL_MBOX_SUCCESS; 335 } 336 337 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 338 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 339 CXL_INT_MSI_MSIX; 340 341 *len_out = 0; 342 return CXL_MBOX_SUCCESS; 343 } 344 345 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 346 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 347 uint8_t *payload_in, 348 size_t len_in, 349 uint8_t *payload_out, 350 size_t *len_out, 351 CXLCCI *cci) 352 { 353 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 354 struct { 355 uint16_t pcie_vid; 356 uint16_t pcie_did; 357 uint16_t pcie_subsys_vid; 358 uint16_t pcie_subsys_id; 359 uint64_t sn; 360 uint8_t max_message_size; 361 uint8_t component_type; 362 } QEMU_PACKED *is_identify; 363 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 364 365 is_identify = (void *)payload_out; 366 memset(is_identify, 0, sizeof(*is_identify)); 367 is_identify->pcie_vid = class->vendor_id; 368 is_identify->pcie_did = class->device_id; 369 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 370 is_identify->sn = CXL_USP(cci->d)->sn; 371 /* Subsystem info not defined for a USP */ 372 is_identify->pcie_subsys_vid = 0; 373 is_identify->pcie_subsys_id = 0; 374 is_identify->component_type = 0x0; /* Switch */ 375 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 376 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 377 378 is_identify->sn = CXL_TYPE3(cci->d)->sn; 379 /* 380 * We can't always use class->subsystem_vendor_id as 381 * it is not set if the defaults are used. 382 */ 383 is_identify->pcie_subsys_vid = 384 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 385 is_identify->pcie_subsys_id = 386 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 387 is_identify->component_type = 0x3; /* Type 3 */ 388 } 389 390 /* TODO: Allow this to vary across different CCIs */ 391 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ 392 *len_out = sizeof(*is_identify); 393 return CXL_MBOX_SUCCESS; 394 } 395 396 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 397 void *private) 398 { 399 uint8_t *bm = private; 400 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 401 uint8_t port = PCIE_PORT(d)->port; 402 bm[port / 8] |= 1 << (port % 8); 403 } 404 } 405 406 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 407 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 408 uint8_t *payload_in, 409 size_t len_in, 410 uint8_t *payload_out, 411 size_t *len_out, 412 CXLCCI *cci) 413 { 414 PCIEPort *usp = PCIE_PORT(cci->d); 415 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 416 int num_phys_ports = pcie_count_ds_ports(bus); 417 418 struct cxl_fmapi_ident_switch_dev_resp_pl { 419 uint8_t ingress_port_id; 420 uint8_t rsvd; 421 uint8_t num_physical_ports; 422 uint8_t num_vcss; 423 uint8_t active_port_bitmask[0x20]; 424 uint8_t active_vcs_bitmask[0x20]; 425 uint16_t total_vppbs; 426 uint16_t bound_vppbs; 427 uint8_t num_hdm_decoders_per_usp; 428 } QEMU_PACKED *out; 429 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 430 431 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 432 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 433 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 434 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 435 .active_vcs_bitmask[0] = 0x1, 436 .total_vppbs = num_phys_ports + 1, 437 .bound_vppbs = num_phys_ports + 1, 438 .num_hdm_decoders_per_usp = 4, 439 }; 440 441 /* Depends on the CCI type */ 442 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 443 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 444 } else { 445 /* MCTP? */ 446 out->ingress_port_id = 0; 447 } 448 449 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 450 out->active_port_bitmask); 451 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 452 453 *len_out = sizeof(*out); 454 455 return CXL_MBOX_SUCCESS; 456 } 457 458 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 459 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 460 uint8_t *payload_in, 461 size_t len_in, 462 uint8_t *payload_out, 463 size_t *len_out, 464 CXLCCI *cci) 465 { 466 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 467 struct cxl_fmapi_get_phys_port_state_req_pl { 468 uint8_t num_ports; 469 uint8_t ports[]; 470 } QEMU_PACKED *in; 471 472 /* 473 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 474 * Format 475 */ 476 struct cxl_fmapi_port_state_info_block { 477 uint8_t port_id; 478 uint8_t config_state; 479 uint8_t connected_device_cxl_version; 480 uint8_t rsv1; 481 uint8_t connected_device_type; 482 uint8_t port_cxl_version_bitmask; 483 uint8_t max_link_width; 484 uint8_t negotiated_link_width; 485 uint8_t supported_link_speeds_vector; 486 uint8_t max_link_speed; 487 uint8_t current_link_speed; 488 uint8_t ltssm_state; 489 uint8_t first_lane_num; 490 uint16_t link_state; 491 uint8_t supported_ld_count; 492 } QEMU_PACKED; 493 494 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 495 struct cxl_fmapi_get_phys_port_state_resp_pl { 496 uint8_t num_ports; 497 uint8_t rsv1[3]; 498 struct cxl_fmapi_port_state_info_block ports[]; 499 } QEMU_PACKED *out; 500 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 501 PCIEPort *usp = PCIE_PORT(cci->d); 502 size_t pl_size; 503 int i; 504 505 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 506 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 507 508 /* Check if what was requested can fit */ 509 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 510 return CXL_MBOX_INVALID_INPUT; 511 } 512 513 /* For success there should be a match for each requested */ 514 out->num_ports = in->num_ports; 515 516 for (i = 0; i < in->num_ports; i++) { 517 struct cxl_fmapi_port_state_info_block *port; 518 /* First try to match on downstream port */ 519 PCIDevice *port_dev; 520 uint16_t lnkcap, lnkcap2, lnksta; 521 522 port = &out->ports[i]; 523 524 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 525 if (port_dev) { /* DSP */ 526 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 527 ->devices[0]; 528 port->config_state = 3; 529 if (ds_dev) { 530 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 531 port->connected_device_type = 5; /* Assume MLD for now */ 532 } else { 533 port->connected_device_type = 1; 534 } 535 } else { 536 port->connected_device_type = 0; 537 } 538 port->supported_ld_count = 3; 539 } else if (usp->port == in->ports[i]) { /* USP */ 540 port_dev = PCI_DEVICE(usp); 541 port->config_state = 4; 542 port->connected_device_type = 0; 543 } else { 544 return CXL_MBOX_INVALID_INPUT; 545 } 546 547 port->port_id = in->ports[i]; 548 /* Information on status of this port in lnksta, lnkcap */ 549 if (!port_dev->exp.exp_cap) { 550 return CXL_MBOX_INTERNAL_ERROR; 551 } 552 lnksta = port_dev->config_read(port_dev, 553 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 554 sizeof(lnksta)); 555 lnkcap = port_dev->config_read(port_dev, 556 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 557 sizeof(lnkcap)); 558 lnkcap2 = port_dev->config_read(port_dev, 559 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 560 sizeof(lnkcap2)); 561 562 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 563 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 564 /* No definition for SLS field in linux/pci_regs.h */ 565 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 566 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 567 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 568 /* TODO: Track down if we can get the rest of the info */ 569 port->ltssm_state = 0x7; 570 port->first_lane_num = 0; 571 port->link_state = 0; 572 port->port_cxl_version_bitmask = 0x2; 573 port->connected_device_cxl_version = 0x2; 574 } 575 576 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 577 *len_out = pl_size; 578 579 return CXL_MBOX_SUCCESS; 580 } 581 582 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 583 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 584 uint8_t *payload_in, 585 size_t len_in, 586 uint8_t *payload_out, 587 size_t *len_out, 588 CXLCCI *cci) 589 { 590 struct { 591 uint8_t status; 592 uint8_t rsvd; 593 uint16_t opcode; 594 uint16_t returncode; 595 uint16_t vendor_ext_status; 596 } QEMU_PACKED *bg_op_status; 597 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 598 599 bg_op_status = (void *)payload_out; 600 memset(bg_op_status, 0, sizeof(*bg_op_status)); 601 bg_op_status->status = cci->bg.complete_pct << 1; 602 if (cci->bg.runtime > 0) { 603 bg_op_status->status |= 1U << 0; 604 } 605 bg_op_status->opcode = cci->bg.opcode; 606 bg_op_status->returncode = cci->bg.ret_code; 607 *len_out = sizeof(*bg_op_status); 608 609 return CXL_MBOX_SUCCESS; 610 } 611 612 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 613 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 614 uint8_t *payload_in, 615 size_t len, 616 uint8_t *payload_out, 617 size_t *len_out, 618 CXLCCI *cci) 619 { 620 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 621 struct { 622 uint8_t slots_supported; 623 uint8_t slot_info; 624 uint8_t caps; 625 uint8_t rsvd[0xd]; 626 char fw_rev1[0x10]; 627 char fw_rev2[0x10]; 628 char fw_rev3[0x10]; 629 char fw_rev4[0x10]; 630 } QEMU_PACKED *fw_info; 631 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 632 633 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || 634 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) { 635 return CXL_MBOX_INTERNAL_ERROR; 636 } 637 638 fw_info = (void *)payload_out; 639 memset(fw_info, 0, sizeof(*fw_info)); 640 641 fw_info->slots_supported = 2; 642 fw_info->slot_info = BIT(0) | BIT(3); 643 fw_info->caps = 0; 644 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 645 646 *len_out = sizeof(*fw_info); 647 return CXL_MBOX_SUCCESS; 648 } 649 650 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 651 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 652 uint8_t *payload_in, 653 size_t len_in, 654 uint8_t *payload_out, 655 size_t *len_out, 656 CXLCCI *cci) 657 { 658 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 659 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 660 661 stq_le_p(payload_out, final_time); 662 *len_out = 8; 663 664 return CXL_MBOX_SUCCESS; 665 } 666 667 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 668 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 669 uint8_t *payload_in, 670 size_t len_in, 671 uint8_t *payload_out, 672 size_t *len_out, 673 CXLCCI *cci) 674 { 675 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 676 677 cxl_dstate->timestamp.set = true; 678 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 679 680 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 681 682 *len_out = 0; 683 return CXL_MBOX_SUCCESS; 684 } 685 686 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 687 static const QemuUUID cel_uuid = { 688 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 689 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 690 }; 691 692 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 693 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 694 uint8_t *payload_in, 695 size_t len_in, 696 uint8_t *payload_out, 697 size_t *len_out, 698 CXLCCI *cci) 699 { 700 struct { 701 uint16_t entries; 702 uint8_t rsvd[6]; 703 struct { 704 QemuUUID uuid; 705 uint32_t size; 706 } log_entries[1]; 707 } QEMU_PACKED *supported_logs = (void *)payload_out; 708 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 709 710 supported_logs->entries = 1; 711 supported_logs->log_entries[0].uuid = cel_uuid; 712 supported_logs->log_entries[0].size = 4 * cci->cel_size; 713 714 *len_out = sizeof(*supported_logs); 715 return CXL_MBOX_SUCCESS; 716 } 717 718 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 719 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 720 uint8_t *payload_in, 721 size_t len_in, 722 uint8_t *payload_out, 723 size_t *len_out, 724 CXLCCI *cci) 725 { 726 struct { 727 QemuUUID uuid; 728 uint32_t offset; 729 uint32_t length; 730 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 731 732 get_log = (void *)payload_in; 733 734 /* 735 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 736 * The device shall return Invalid Input if the Offset or Length 737 * fields attempt to access beyond the size of the log as reported by Get 738 * Supported Logs. 739 * 740 * The CEL buffer is large enough to fit all commands in the emulation, so 741 * the only possible failure would be if the mailbox itself isn't big 742 * enough. 743 */ 744 if (get_log->offset + get_log->length > cci->payload_max) { 745 return CXL_MBOX_INVALID_INPUT; 746 } 747 748 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 749 return CXL_MBOX_INVALID_LOG; 750 } 751 752 /* Store off everything to local variables so we can wipe out the payload */ 753 *len_out = get_log->length; 754 755 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 756 757 return CXL_MBOX_SUCCESS; 758 } 759 760 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 761 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 762 uint8_t *payload_in, 763 size_t len_in, 764 uint8_t *payload_out, 765 size_t *len_out, 766 CXLCCI *cci) 767 { 768 struct { 769 char fw_revision[0x10]; 770 uint64_t total_capacity; 771 uint64_t volatile_capacity; 772 uint64_t persistent_capacity; 773 uint64_t partition_align; 774 uint16_t info_event_log_size; 775 uint16_t warning_event_log_size; 776 uint16_t failure_event_log_size; 777 uint16_t fatal_event_log_size; 778 uint32_t lsa_size; 779 uint8_t poison_list_max_mer[3]; 780 uint16_t inject_poison_limit; 781 uint8_t poison_caps; 782 uint8_t qos_telemetry_caps; 783 } QEMU_PACKED *id; 784 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43); 785 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 786 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 787 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 788 789 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 790 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { 791 return CXL_MBOX_INTERNAL_ERROR; 792 } 793 794 id = (void *)payload_out; 795 memset(id, 0, sizeof(*id)); 796 797 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 798 799 stq_le_p(&id->total_capacity, 800 cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER); 801 stq_le_p(&id->persistent_capacity, 802 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 803 stq_le_p(&id->volatile_capacity, 804 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 805 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 806 /* 256 poison records */ 807 st24_le_p(id->poison_list_max_mer, 256); 808 /* No limit - so limited by main poison record limit */ 809 stw_le_p(&id->inject_poison_limit, 0); 810 811 *len_out = sizeof(*id); 812 return CXL_MBOX_SUCCESS; 813 } 814 815 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 816 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 817 uint8_t *payload_in, 818 size_t len_in, 819 uint8_t *payload_out, 820 size_t *len_out, 821 CXLCCI *cci) 822 { 823 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 824 struct { 825 uint64_t active_vmem; 826 uint64_t active_pmem; 827 uint64_t next_vmem; 828 uint64_t next_pmem; 829 } QEMU_PACKED *part_info = (void *)payload_out; 830 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 831 832 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 833 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) { 834 return CXL_MBOX_INTERNAL_ERROR; 835 } 836 837 stq_le_p(&part_info->active_vmem, 838 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 839 /* 840 * When both next_vmem and next_pmem are 0, there is no pending change to 841 * partitioning. 842 */ 843 stq_le_p(&part_info->next_vmem, 0); 844 stq_le_p(&part_info->active_pmem, 845 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 846 stq_le_p(&part_info->next_pmem, 0); 847 848 *len_out = sizeof(*part_info); 849 return CXL_MBOX_SUCCESS; 850 } 851 852 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 853 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 854 uint8_t *payload_in, 855 size_t len_in, 856 uint8_t *payload_out, 857 size_t *len_out, 858 CXLCCI *cci) 859 { 860 struct { 861 uint32_t offset; 862 uint32_t length; 863 } QEMU_PACKED *get_lsa; 864 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 865 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 866 uint32_t offset, length; 867 868 get_lsa = (void *)payload_in; 869 offset = get_lsa->offset; 870 length = get_lsa->length; 871 872 if (offset + length > cvc->get_lsa_size(ct3d)) { 873 *len_out = 0; 874 return CXL_MBOX_INVALID_INPUT; 875 } 876 877 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 878 return CXL_MBOX_SUCCESS; 879 } 880 881 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 882 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 883 uint8_t *payload_in, 884 size_t len_in, 885 uint8_t *payload_out, 886 size_t *len_out, 887 CXLCCI *cci) 888 { 889 struct set_lsa_pl { 890 uint32_t offset; 891 uint32_t rsvd; 892 uint8_t data[]; 893 } QEMU_PACKED; 894 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 895 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 896 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 897 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 898 899 *len_out = 0; 900 if (!len_in) { 901 return CXL_MBOX_SUCCESS; 902 } 903 904 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 905 return CXL_MBOX_INVALID_INPUT; 906 } 907 len_in -= hdr_len; 908 909 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 910 return CXL_MBOX_SUCCESS; 911 } 912 913 /* Perform the actual device zeroing */ 914 static void __do_sanitization(CXLType3Dev *ct3d) 915 { 916 MemoryRegion *mr; 917 918 if (ct3d->hostvmem) { 919 mr = host_memory_backend_get_memory(ct3d->hostvmem); 920 if (mr) { 921 void *hostmem = memory_region_get_ram_ptr(mr); 922 memset(hostmem, 0, memory_region_size(mr)); 923 } 924 } 925 926 if (ct3d->hostpmem) { 927 mr = host_memory_backend_get_memory(ct3d->hostpmem); 928 if (mr) { 929 void *hostmem = memory_region_get_ram_ptr(mr); 930 memset(hostmem, 0, memory_region_size(mr)); 931 } 932 } 933 if (ct3d->lsa) { 934 mr = host_memory_backend_get_memory(ct3d->lsa); 935 if (mr) { 936 void *lsa = memory_region_get_ram_ptr(mr); 937 memset(lsa, 0, memory_region_size(mr)); 938 } 939 } 940 } 941 942 /* 943 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 944 * 945 * Once the Sanitize command has started successfully, the device shall be 946 * placed in the media disabled state. If the command fails or is interrupted 947 * by a reset or power failure, it shall remain in the media disabled state 948 * until a successful Sanitize command has been completed. During this state: 949 * 950 * 1. Memory writes to the device will have no effect, and all memory reads 951 * will return random values (no user data returned, even for locations that 952 * the failed Sanitize operation didn’t sanitize yet). 953 * 954 * 2. Mailbox commands shall still be processed in the disabled state, except 955 * that commands that access Sanitized areas shall fail with the Media Disabled 956 * error code. 957 */ 958 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 959 uint8_t *payload_in, 960 size_t len_in, 961 uint8_t *payload_out, 962 size_t *len_out, 963 CXLCCI *cci) 964 { 965 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 966 uint64_t total_mem; /* in Mb */ 967 int secs; 968 969 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 970 if (total_mem <= 512) { 971 secs = 4; 972 } else if (total_mem <= 1024) { 973 secs = 8; 974 } else if (total_mem <= 2 * 1024) { 975 secs = 15; 976 } else if (total_mem <= 4 * 1024) { 977 secs = 30; 978 } else if (total_mem <= 8 * 1024) { 979 secs = 60; 980 } else if (total_mem <= 16 * 1024) { 981 secs = 2 * 60; 982 } else if (total_mem <= 32 * 1024) { 983 secs = 4 * 60; 984 } else if (total_mem <= 64 * 1024) { 985 secs = 8 * 60; 986 } else if (total_mem <= 128 * 1024) { 987 secs = 15 * 60; 988 } else if (total_mem <= 256 * 1024) { 989 secs = 30 * 60; 990 } else if (total_mem <= 512 * 1024) { 991 secs = 60 * 60; 992 } else if (total_mem <= 1024 * 1024) { 993 secs = 120 * 60; 994 } else { 995 secs = 240 * 60; /* max 4 hrs */ 996 } 997 998 /* EBUSY other bg cmds as of now */ 999 cci->bg.runtime = secs * 1000UL; 1000 *len_out = 0; 1001 1002 cxl_dev_disable_media(&ct3d->cxl_dstate); 1003 1004 /* sanitize when done */ 1005 return CXL_MBOX_BG_STARTED; 1006 } 1007 1008 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 1009 uint8_t *payload_in, 1010 size_t len_in, 1011 uint8_t *payload_out, 1012 size_t *len_out, 1013 CXLCCI *cci) 1014 { 1015 uint32_t *state = (uint32_t *)payload_out; 1016 1017 *state = 0; 1018 *len_out = 4; 1019 return CXL_MBOX_SUCCESS; 1020 } 1021 1022 /* 1023 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 1024 * 1025 * This is very inefficient, but good enough for now! 1026 * Also the payload will always fit, so no need to handle the MORE flag and 1027 * make this stateful. We may want to allow longer poison lists to aid 1028 * testing that kernel functionality. 1029 */ 1030 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 1031 uint8_t *payload_in, 1032 size_t len_in, 1033 uint8_t *payload_out, 1034 size_t *len_out, 1035 CXLCCI *cci) 1036 { 1037 struct get_poison_list_pl { 1038 uint64_t pa; 1039 uint64_t length; 1040 } QEMU_PACKED; 1041 1042 struct get_poison_list_out_pl { 1043 uint8_t flags; 1044 uint8_t rsvd1; 1045 uint64_t overflow_timestamp; 1046 uint16_t count; 1047 uint8_t rsvd2[0x14]; 1048 struct { 1049 uint64_t addr; 1050 uint32_t length; 1051 uint32_t resv; 1052 } QEMU_PACKED records[]; 1053 } QEMU_PACKED; 1054 1055 struct get_poison_list_pl *in = (void *)payload_in; 1056 struct get_poison_list_out_pl *out = (void *)payload_out; 1057 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1058 uint16_t record_count = 0, i = 0; 1059 uint64_t query_start, query_length; 1060 CXLPoisonList *poison_list = &ct3d->poison_list; 1061 CXLPoison *ent; 1062 uint16_t out_pl_len; 1063 1064 query_start = ldq_le_p(&in->pa); 1065 /* 64 byte alignment required */ 1066 if (query_start & 0x3f) { 1067 return CXL_MBOX_INVALID_INPUT; 1068 } 1069 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1070 1071 QLIST_FOREACH(ent, poison_list, node) { 1072 /* Check for no overlap */ 1073 if (ent->start >= query_start + query_length || 1074 ent->start + ent->length <= query_start) { 1075 continue; 1076 } 1077 record_count++; 1078 } 1079 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1080 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 1081 1082 memset(out, 0, out_pl_len); 1083 QLIST_FOREACH(ent, poison_list, node) { 1084 uint64_t start, stop; 1085 1086 /* Check for no overlap */ 1087 if (ent->start >= query_start + query_length || 1088 ent->start + ent->length <= query_start) { 1089 continue; 1090 } 1091 1092 /* Deal with overlap */ 1093 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 1094 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 1095 query_start + query_length); 1096 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 1097 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 1098 i++; 1099 } 1100 if (ct3d->poison_list_overflowed) { 1101 out->flags = (1 << 1); 1102 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 1103 } 1104 stw_le_p(&out->count, record_count); 1105 *len_out = out_pl_len; 1106 return CXL_MBOX_SUCCESS; 1107 } 1108 1109 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 1110 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 1111 uint8_t *payload_in, 1112 size_t len_in, 1113 uint8_t *payload_out, 1114 size_t *len_out, 1115 CXLCCI *cci) 1116 { 1117 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1118 CXLPoisonList *poison_list = &ct3d->poison_list; 1119 CXLPoison *ent; 1120 struct inject_poison_pl { 1121 uint64_t dpa; 1122 }; 1123 struct inject_poison_pl *in = (void *)payload_in; 1124 uint64_t dpa = ldq_le_p(&in->dpa); 1125 CXLPoison *p; 1126 1127 QLIST_FOREACH(ent, poison_list, node) { 1128 if (dpa >= ent->start && 1129 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 1130 return CXL_MBOX_SUCCESS; 1131 } 1132 } 1133 1134 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1135 return CXL_MBOX_INJECT_POISON_LIMIT; 1136 } 1137 p = g_new0(CXLPoison, 1); 1138 1139 p->length = CXL_CACHE_LINE_SIZE; 1140 p->start = dpa; 1141 p->type = CXL_POISON_TYPE_INJECTED; 1142 1143 /* 1144 * Possible todo: Merge with existing entry if next to it and if same type 1145 */ 1146 QLIST_INSERT_HEAD(poison_list, p, node); 1147 ct3d->poison_list_cnt++; 1148 *len_out = 0; 1149 1150 return CXL_MBOX_SUCCESS; 1151 } 1152 1153 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 1154 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 1155 uint8_t *payload_in, 1156 size_t len_in, 1157 uint8_t *payload_out, 1158 size_t *len_out, 1159 CXLCCI *cci) 1160 { 1161 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1162 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1163 CXLPoisonList *poison_list = &ct3d->poison_list; 1164 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1165 struct clear_poison_pl { 1166 uint64_t dpa; 1167 uint8_t data[64]; 1168 }; 1169 CXLPoison *ent; 1170 uint64_t dpa; 1171 1172 struct clear_poison_pl *in = (void *)payload_in; 1173 1174 dpa = ldq_le_p(&in->dpa); 1175 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) { 1176 return CXL_MBOX_INVALID_PA; 1177 } 1178 1179 /* Clearing a region with no poison is not an error so always do so */ 1180 if (cvc->set_cacheline) { 1181 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 1182 return CXL_MBOX_INTERNAL_ERROR; 1183 } 1184 } 1185 1186 QLIST_FOREACH(ent, poison_list, node) { 1187 /* 1188 * Test for contained in entry. Simpler than general case 1189 * as clearing 64 bytes and entries 64 byte aligned 1190 */ 1191 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 1192 break; 1193 } 1194 } 1195 if (!ent) { 1196 return CXL_MBOX_SUCCESS; 1197 } 1198 1199 QLIST_REMOVE(ent, node); 1200 ct3d->poison_list_cnt--; 1201 1202 if (dpa > ent->start) { 1203 CXLPoison *frag; 1204 /* Cannot overflow as replacing existing entry */ 1205 1206 frag = g_new0(CXLPoison, 1); 1207 1208 frag->start = ent->start; 1209 frag->length = dpa - ent->start; 1210 frag->type = ent->type; 1211 1212 QLIST_INSERT_HEAD(poison_list, frag, node); 1213 ct3d->poison_list_cnt++; 1214 } 1215 1216 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 1217 CXLPoison *frag; 1218 1219 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1220 cxl_set_poison_list_overflowed(ct3d); 1221 } else { 1222 frag = g_new0(CXLPoison, 1); 1223 1224 frag->start = dpa + CXL_CACHE_LINE_SIZE; 1225 frag->length = ent->start + ent->length - frag->start; 1226 frag->type = ent->type; 1227 QLIST_INSERT_HEAD(poison_list, frag, node); 1228 ct3d->poison_list_cnt++; 1229 } 1230 } 1231 /* Any fragments have been added, free original entry */ 1232 g_free(ent); 1233 *len_out = 0; 1234 1235 return CXL_MBOX_SUCCESS; 1236 } 1237 1238 #define IMMEDIATE_CONFIG_CHANGE (1 << 1) 1239 #define IMMEDIATE_DATA_CHANGE (1 << 2) 1240 #define IMMEDIATE_POLICY_CHANGE (1 << 3) 1241 #define IMMEDIATE_LOG_CHANGE (1 << 4) 1242 #define SECURITY_STATE_CHANGE (1 << 5) 1243 #define BACKGROUND_OPERATION (1 << 6) 1244 1245 static const struct cxl_cmd cxl_cmd_set[256][256] = { 1246 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 1247 cmd_events_get_records, 1, 0 }, 1248 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 1249 cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE }, 1250 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 1251 cmd_events_get_interrupt_policy, 0, 0 }, 1252 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 1253 cmd_events_set_interrupt_policy, 1254 ~0, IMMEDIATE_CONFIG_CHANGE }, 1255 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 1256 cmd_firmware_update_get_info, 0, 0 }, 1257 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1258 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 1259 8, IMMEDIATE_POLICY_CHANGE }, 1260 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 1261 0, 0 }, 1262 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1263 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 1264 cmd_identify_memory_device, 0, 0 }, 1265 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 1266 cmd_ccls_get_partition_info, 0, 0 }, 1267 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 1268 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 1269 ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, 1270 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 1271 IMMEDIATE_DATA_CHANGE | SECURITY_STATE_CHANGE | BACKGROUND_OPERATION }, 1272 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 1273 cmd_get_security_state, 0, 0 }, 1274 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 1275 cmd_media_get_poison_list, 16, 0 }, 1276 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 1277 cmd_media_inject_poison, 8, 0 }, 1278 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 1279 cmd_media_clear_poison, 72, 0 }, 1280 }; 1281 1282 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 1283 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 1284 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 1285 cmd_infostat_bg_op_sts, 0, 0 }, 1286 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1287 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 0, 1288 IMMEDIATE_POLICY_CHANGE }, 1289 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 1290 0 }, 1291 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1292 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 1293 cmd_identify_switch_device, 0, 0 }, 1294 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 1295 cmd_get_physical_port_state, ~0, 0 }, 1296 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 1297 cmd_tunnel_management_cmd, ~0, 0 }, 1298 }; 1299 1300 /* 1301 * While the command is executing in the background, the device should 1302 * update the percentage complete in the Background Command Status Register 1303 * at least once per second. 1304 */ 1305 1306 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 1307 1308 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 1309 size_t len_in, uint8_t *pl_in, size_t *len_out, 1310 uint8_t *pl_out, bool *bg_started) 1311 { 1312 int ret; 1313 const struct cxl_cmd *cxl_cmd; 1314 opcode_handler h; 1315 1316 *len_out = 0; 1317 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 1318 h = cxl_cmd->handler; 1319 if (!h) { 1320 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 1321 set << 8 | cmd); 1322 return CXL_MBOX_UNSUPPORTED; 1323 } 1324 1325 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 1326 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1327 } 1328 1329 /* Only one bg command at a time */ 1330 if ((cxl_cmd->effect & BACKGROUND_OPERATION) && 1331 cci->bg.runtime > 0) { 1332 return CXL_MBOX_BUSY; 1333 } 1334 1335 /* forbid any selected commands while overwriting */ 1336 if (sanitize_running(cci)) { 1337 if (h == cmd_events_get_records || 1338 h == cmd_ccls_get_partition_info || 1339 h == cmd_ccls_set_lsa || 1340 h == cmd_ccls_get_lsa || 1341 h == cmd_logs_get_log || 1342 h == cmd_media_get_poison_list || 1343 h == cmd_media_inject_poison || 1344 h == cmd_media_clear_poison || 1345 h == cmd_sanitize_overwrite) { 1346 return CXL_MBOX_MEDIA_DISABLED; 1347 } 1348 } 1349 1350 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 1351 if ((cxl_cmd->effect & BACKGROUND_OPERATION) && 1352 ret == CXL_MBOX_BG_STARTED) { 1353 *bg_started = true; 1354 } else { 1355 *bg_started = false; 1356 } 1357 1358 /* Set bg and the return code */ 1359 if (*bg_started) { 1360 uint64_t now; 1361 1362 cci->bg.opcode = (set << 8) | cmd; 1363 1364 cci->bg.complete_pct = 0; 1365 cci->bg.ret_code = 0; 1366 1367 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 1368 cci->bg.starttime = now; 1369 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 1370 } 1371 1372 return ret; 1373 } 1374 1375 static void bg_timercb(void *opaque) 1376 { 1377 CXLCCI *cci = opaque; 1378 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 1379 uint64_t total_time = cci->bg.starttime + cci->bg.runtime; 1380 1381 assert(cci->bg.runtime > 0); 1382 1383 if (now >= total_time) { /* we are done */ 1384 uint16_t ret = CXL_MBOX_SUCCESS; 1385 1386 cci->bg.complete_pct = 100; 1387 cci->bg.ret_code = ret; 1388 switch (cci->bg.opcode) { 1389 case 0x4400: /* sanitize */ 1390 { 1391 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1392 1393 __do_sanitization(ct3d); 1394 cxl_dev_enable_media(&ct3d->cxl_dstate); 1395 } 1396 break; 1397 case 0x4304: /* TODO: scan media */ 1398 break; 1399 default: 1400 __builtin_unreachable(); 1401 break; 1402 } 1403 } else { 1404 /* estimate only */ 1405 cci->bg.complete_pct = 100 * now / total_time; 1406 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 1407 } 1408 1409 if (cci->bg.complete_pct == 100) { 1410 /* TODO: generalize to switch CCI */ 1411 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1412 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1413 PCIDevice *pdev = PCI_DEVICE(cci->d); 1414 1415 cci->bg.starttime = 0; 1416 /* registers are updated, allow new bg-capable cmds */ 1417 cci->bg.runtime = 0; 1418 1419 if (msix_enabled(pdev)) { 1420 msix_notify(pdev, cxl_dstate->mbox_msi_n); 1421 } else if (msi_enabled(pdev)) { 1422 msi_notify(pdev, cxl_dstate->mbox_msi_n); 1423 } 1424 } 1425 } 1426 1427 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 1428 { 1429 cci->payload_max = payload_max; 1430 for (int set = 0; set < 256; set++) { 1431 for (int cmd = 0; cmd < 256; cmd++) { 1432 if (cci->cxl_cmd_set[set][cmd].handler) { 1433 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 1434 struct cel_log *log = 1435 &cci->cel_log[cci->cel_size]; 1436 1437 log->opcode = (set << 8) | cmd; 1438 log->effect = c->effect; 1439 cci->cel_size++; 1440 } 1441 } 1442 } 1443 cci->bg.complete_pct = 0; 1444 cci->bg.starttime = 0; 1445 cci->bg.runtime = 0; 1446 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1447 bg_timercb, cci); 1448 } 1449 1450 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 1451 DeviceState *d, size_t payload_max) 1452 { 1453 cci->cxl_cmd_set = cxl_cmd_set_sw; 1454 cci->d = d; 1455 cci->intf = intf; 1456 cxl_init_cci(cci, payload_max); 1457 } 1458 1459 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 1460 { 1461 cci->cxl_cmd_set = cxl_cmd_set; 1462 cci->d = d; 1463 1464 /* No separation for PCI MB as protocol handled in PCI device */ 1465 cci->intf = d; 1466 cxl_init_cci(cci, payload_max); 1467 } 1468 1469 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 1470 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 1471 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 1472 0 }, 1473 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1474 }; 1475 1476 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 1477 size_t payload_max) 1478 { 1479 cci->cxl_cmd_set = cxl_cmd_set_t3_ld; 1480 cci->d = d; 1481 cci->intf = intf; 1482 cxl_init_cci(cci, payload_max); 1483 } 1484 1485 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 1486 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 1487 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 1488 0 }, 1489 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1490 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1491 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 1492 cmd_tunnel_management_cmd, ~0, 0 }, 1493 }; 1494 1495 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 1496 DeviceState *intf, 1497 size_t payload_max) 1498 { 1499 cci->cxl_cmd_set = cxl_cmd_set_t3_fm_owned_ld_mctp; 1500 cci->d = d; 1501 cci->intf = intf; 1502 cxl_init_cci(cci, payload_max); 1503 } 1504