1 /* 2 * CXL Utility library for mailbox interface 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "hw/pci/msi.h" 12 #include "hw/pci/msix.h" 13 #include "hw/cxl/cxl.h" 14 #include "hw/cxl/cxl_events.h" 15 #include "hw/pci/pci.h" 16 #include "hw/pci-bridge/cxl_upstream_port.h" 17 #include "qemu/cutils.h" 18 #include "qemu/log.h" 19 #include "qemu/units.h" 20 #include "qemu/uuid.h" 21 #include "sysemu/hostmem.h" 22 #include "qemu/range.h" 23 24 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) 25 #define CXL_DC_EVENT_LOG_SIZE 8 26 #define CXL_NUM_EXTENTS_SUPPORTED 512 27 #define CXL_NUM_TAGS_SUPPORTED 0 28 29 /* 30 * How to add a new command, example. The command set FOO, with cmd BAR. 31 * 1. Add the command set and cmd to the enum. 32 * FOO = 0x7f, 33 * #define BAR 0 34 * 2. Implement the handler 35 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, 36 * CXLDeviceState *cxl_dstate, uint16_t *len) 37 * 3. Add the command to the cxl_cmd_set[][] 38 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, 39 * 4. Implement your handler 40 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; } 41 * 42 * 43 * Writing the handler: 44 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the 45 * in/out length of the payload. The handler is responsible for consuming the 46 * payload from cmd->payload and operating upon it as necessary. It must then 47 * fill the output data into cmd->payload (overwriting what was there), 48 * setting the length, and returning a valid return code. 49 * 50 * XXX: The handler need not worry about endianness. The payload is read out of 51 * a register interface that already deals with it. 52 */ 53 54 enum { 55 INFOSTAT = 0x00, 56 #define IS_IDENTIFY 0x1 57 #define BACKGROUND_OPERATION_STATUS 0x2 58 EVENTS = 0x01, 59 #define GET_RECORDS 0x0 60 #define CLEAR_RECORDS 0x1 61 #define GET_INTERRUPT_POLICY 0x2 62 #define SET_INTERRUPT_POLICY 0x3 63 FIRMWARE_UPDATE = 0x02, 64 #define GET_INFO 0x0 65 TIMESTAMP = 0x03, 66 #define GET 0x0 67 #define SET 0x1 68 LOGS = 0x04, 69 #define GET_SUPPORTED 0x0 70 #define GET_LOG 0x1 71 IDENTIFY = 0x40, 72 #define MEMORY_DEVICE 0x0 73 CCLS = 0x41, 74 #define GET_PARTITION_INFO 0x0 75 #define GET_LSA 0x2 76 #define SET_LSA 0x3 77 SANITIZE = 0x44, 78 #define OVERWRITE 0x0 79 #define SECURE_ERASE 0x1 80 PERSISTENT_MEM = 0x45, 81 #define GET_SECURITY_STATE 0x0 82 MEDIA_AND_POISON = 0x43, 83 #define GET_POISON_LIST 0x0 84 #define INJECT_POISON 0x1 85 #define CLEAR_POISON 0x2 86 DCD_CONFIG = 0x48, 87 #define GET_DC_CONFIG 0x0 88 #define GET_DYN_CAP_EXT_LIST 0x1 89 #define ADD_DYN_CAP_RSP 0x2 90 #define RELEASE_DYN_CAP 0x3 91 PHYSICAL_SWITCH = 0x51, 92 #define IDENTIFY_SWITCH_DEVICE 0x0 93 #define GET_PHYSICAL_PORT_STATE 0x1 94 TUNNEL = 0x53, 95 #define MANAGEMENT_COMMAND 0x0 96 }; 97 98 /* CCI Message Format CXL r3.1 Figure 7-19 */ 99 typedef struct CXLCCIMessage { 100 uint8_t category; 101 #define CXL_CCI_CAT_REQ 0 102 #define CXL_CCI_CAT_RSP 1 103 uint8_t tag; 104 uint8_t resv1; 105 uint8_t command; 106 uint8_t command_set; 107 uint8_t pl_length[3]; 108 uint16_t rc; 109 uint16_t vendor_specific; 110 uint8_t payload[]; 111 } QEMU_PACKED CXLCCIMessage; 112 113 /* This command is only defined to an MLD FM Owned LD or an MHD */ 114 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, 115 uint8_t *payload_in, 116 size_t len_in, 117 uint8_t *payload_out, 118 size_t *len_out, 119 CXLCCI *cci) 120 { 121 PCIDevice *tunnel_target; 122 CXLCCI *target_cci; 123 struct { 124 uint8_t port_or_ld_id; 125 uint8_t target_type; 126 uint16_t size; 127 CXLCCIMessage ccimessage; 128 } QEMU_PACKED *in; 129 struct { 130 uint16_t resp_len; 131 uint8_t resv[2]; 132 CXLCCIMessage ccimessage; 133 } QEMU_PACKED *out; 134 size_t pl_length, length_out; 135 bool bg_started; 136 int rc; 137 138 if (cmd->in < sizeof(*in)) { 139 return CXL_MBOX_INVALID_INPUT; 140 } 141 in = (void *)payload_in; 142 out = (void *)payload_out; 143 144 /* Enough room for minimum sized message - no payload */ 145 if (in->size < sizeof(in->ccimessage)) { 146 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 147 } 148 /* Length of input payload should be in->size + a wrapping tunnel header */ 149 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) { 150 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 151 } 152 if (in->ccimessage.category != CXL_CCI_CAT_REQ) { 153 return CXL_MBOX_INVALID_INPUT; 154 } 155 156 if (in->target_type != 0) { 157 qemu_log_mask(LOG_UNIMP, 158 "Tunneled Command sent to non existent FM-LD"); 159 return CXL_MBOX_INVALID_INPUT; 160 } 161 162 /* 163 * Target of a tunnel unfortunately depends on type of CCI readint 164 * the message. 165 * If in a switch, then it's the port number. 166 * If in an MLD it is the ld number. 167 * If in an MHD target type indicate where we are going. 168 */ 169 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 170 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 171 if (in->port_or_ld_id != 0) { 172 /* Only pretending to have one for now! */ 173 return CXL_MBOX_INVALID_INPUT; 174 } 175 target_cci = &ct3d->ld0_cci; 176 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 177 CXLUpstreamPort *usp = CXL_USP(cci->d); 178 179 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus, 180 in->port_or_ld_id); 181 if (!tunnel_target) { 182 return CXL_MBOX_INVALID_INPUT; 183 } 184 tunnel_target = 185 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0]; 186 if (!tunnel_target) { 187 return CXL_MBOX_INVALID_INPUT; 188 } 189 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) { 190 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target); 191 /* Tunneled VDMs always land on FM Owned LD */ 192 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci; 193 } else { 194 return CXL_MBOX_INVALID_INPUT; 195 } 196 } else { 197 return CXL_MBOX_INVALID_INPUT; 198 } 199 200 pl_length = in->ccimessage.pl_length[2] << 16 | 201 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0]; 202 rc = cxl_process_cci_message(target_cci, 203 in->ccimessage.command_set, 204 in->ccimessage.command, 205 pl_length, in->ccimessage.payload, 206 &length_out, out->ccimessage.payload, 207 &bg_started); 208 /* Payload should be in place. Rest of CCI header and needs filling */ 209 out->resp_len = length_out + sizeof(CXLCCIMessage); 210 st24_le_p(out->ccimessage.pl_length, length_out); 211 out->ccimessage.rc = rc; 212 out->ccimessage.category = CXL_CCI_CAT_RSP; 213 out->ccimessage.command = in->ccimessage.command; 214 out->ccimessage.command_set = in->ccimessage.command_set; 215 out->ccimessage.tag = in->ccimessage.tag; 216 *len_out = length_out + sizeof(*out); 217 218 return CXL_MBOX_SUCCESS; 219 } 220 221 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd, 222 uint8_t *payload_in, size_t len_in, 223 uint8_t *payload_out, size_t *len_out, 224 CXLCCI *cci) 225 { 226 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 227 CXLGetEventPayload *pl; 228 uint8_t log_type; 229 int max_recs; 230 231 if (cmd->in < sizeof(log_type)) { 232 return CXL_MBOX_INVALID_INPUT; 233 } 234 235 log_type = payload_in[0]; 236 237 pl = (CXLGetEventPayload *)payload_out; 238 memset(pl, 0, sizeof(*pl)); 239 240 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) / 241 CXL_EVENT_RECORD_SIZE; 242 if (max_recs > 0xFFFF) { 243 max_recs = 0xFFFF; 244 } 245 246 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out); 247 } 248 249 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, 250 uint8_t *payload_in, 251 size_t len_in, 252 uint8_t *payload_out, 253 size_t *len_out, 254 CXLCCI *cci) 255 { 256 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 257 CXLClearEventPayload *pl; 258 259 pl = (CXLClearEventPayload *)payload_in; 260 *len_out = 0; 261 return cxl_event_clear_records(cxlds, pl); 262 } 263 264 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd, 265 uint8_t *payload_in, 266 size_t len_in, 267 uint8_t *payload_out, 268 size_t *len_out, 269 CXLCCI *cci) 270 { 271 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 272 CXLEventInterruptPolicy *policy; 273 CXLEventLog *log; 274 275 policy = (CXLEventInterruptPolicy *)payload_out; 276 memset(policy, 0, sizeof(*policy)); 277 278 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 279 if (log->irq_enabled) { 280 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 281 } 282 283 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 284 if (log->irq_enabled) { 285 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 286 } 287 288 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 289 if (log->irq_enabled) { 290 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 291 } 292 293 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 294 if (log->irq_enabled) { 295 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec); 296 } 297 298 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 299 if (log->irq_enabled) { 300 /* Dynamic Capacity borrows the same vector as info */ 301 policy->dyn_cap_settings = CXL_INT_MSI_MSIX; 302 } 303 304 *len_out = sizeof(*policy); 305 return CXL_MBOX_SUCCESS; 306 } 307 308 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd, 309 uint8_t *payload_in, 310 size_t len_in, 311 uint8_t *payload_out, 312 size_t *len_out, 313 CXLCCI *cci) 314 { 315 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate; 316 CXLEventInterruptPolicy *policy; 317 CXLEventLog *log; 318 319 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) { 320 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 321 } 322 323 policy = (CXLEventInterruptPolicy *)payload_in; 324 325 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO]; 326 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) == 327 CXL_INT_MSI_MSIX; 328 329 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN]; 330 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) == 331 CXL_INT_MSI_MSIX; 332 333 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL]; 334 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) == 335 CXL_INT_MSI_MSIX; 336 337 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL]; 338 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) == 339 CXL_INT_MSI_MSIX; 340 341 /* DCD is optional */ 342 if (len_in < sizeof(*policy)) { 343 return CXL_MBOX_SUCCESS; 344 } 345 346 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP]; 347 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) == 348 CXL_INT_MSI_MSIX; 349 350 *len_out = 0; 351 return CXL_MBOX_SUCCESS; 352 } 353 354 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */ 355 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, 356 uint8_t *payload_in, 357 size_t len_in, 358 uint8_t *payload_out, 359 size_t *len_out, 360 CXLCCI *cci) 361 { 362 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d); 363 struct { 364 uint16_t pcie_vid; 365 uint16_t pcie_did; 366 uint16_t pcie_subsys_vid; 367 uint16_t pcie_subsys_id; 368 uint64_t sn; 369 uint8_t max_message_size; 370 uint8_t component_type; 371 } QEMU_PACKED *is_identify; 372 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); 373 374 is_identify = (void *)payload_out; 375 memset(is_identify, 0, sizeof(*is_identify)); 376 is_identify->pcie_vid = class->vendor_id; 377 is_identify->pcie_did = class->device_id; 378 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) { 379 is_identify->sn = CXL_USP(cci->d)->sn; 380 /* Subsystem info not defined for a USP */ 381 is_identify->pcie_subsys_vid = 0; 382 is_identify->pcie_subsys_id = 0; 383 is_identify->component_type = 0x0; /* Switch */ 384 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { 385 PCIDevice *pci_dev = PCI_DEVICE(cci->d); 386 387 is_identify->sn = CXL_TYPE3(cci->d)->sn; 388 /* 389 * We can't always use class->subsystem_vendor_id as 390 * it is not set if the defaults are used. 391 */ 392 is_identify->pcie_subsys_vid = 393 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID); 394 is_identify->pcie_subsys_id = 395 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID); 396 is_identify->component_type = 0x3; /* Type 3 */ 397 } 398 399 /* TODO: Allow this to vary across different CCIs */ 400 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ 401 *len_out = sizeof(*is_identify); 402 return CXL_MBOX_SUCCESS; 403 } 404 405 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, 406 void *private) 407 { 408 uint8_t *bm = private; 409 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) { 410 uint8_t port = PCIE_PORT(d)->port; 411 bm[port / 8] |= 1 << (port % 8); 412 } 413 } 414 415 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */ 416 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd, 417 uint8_t *payload_in, 418 size_t len_in, 419 uint8_t *payload_out, 420 size_t *len_out, 421 CXLCCI *cci) 422 { 423 PCIEPort *usp = PCIE_PORT(cci->d); 424 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 425 int num_phys_ports = pcie_count_ds_ports(bus); 426 427 struct cxl_fmapi_ident_switch_dev_resp_pl { 428 uint8_t ingress_port_id; 429 uint8_t rsvd; 430 uint8_t num_physical_ports; 431 uint8_t num_vcss; 432 uint8_t active_port_bitmask[0x20]; 433 uint8_t active_vcs_bitmask[0x20]; 434 uint16_t total_vppbs; 435 uint16_t bound_vppbs; 436 uint8_t num_hdm_decoders_per_usp; 437 } QEMU_PACKED *out; 438 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49); 439 440 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out; 441 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) { 442 .num_physical_ports = num_phys_ports + 1, /* 1 USP */ 443 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */ 444 .active_vcs_bitmask[0] = 0x1, 445 .total_vppbs = num_phys_ports + 1, 446 .bound_vppbs = num_phys_ports + 1, 447 .num_hdm_decoders_per_usp = 4, 448 }; 449 450 /* Depends on the CCI type */ 451 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) { 452 out->ingress_port_id = PCIE_PORT(cci->intf)->port; 453 } else { 454 /* MCTP? */ 455 out->ingress_port_id = 0; 456 } 457 458 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm, 459 out->active_port_bitmask); 460 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8); 461 462 *len_out = sizeof(*out); 463 464 return CXL_MBOX_SUCCESS; 465 } 466 467 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */ 468 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, 469 uint8_t *payload_in, 470 size_t len_in, 471 uint8_t *payload_out, 472 size_t *len_out, 473 CXLCCI *cci) 474 { 475 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */ 476 struct cxl_fmapi_get_phys_port_state_req_pl { 477 uint8_t num_ports; 478 uint8_t ports[]; 479 } QEMU_PACKED *in; 480 481 /* 482 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block 483 * Format 484 */ 485 struct cxl_fmapi_port_state_info_block { 486 uint8_t port_id; 487 uint8_t config_state; 488 uint8_t connected_device_cxl_version; 489 uint8_t rsv1; 490 uint8_t connected_device_type; 491 uint8_t port_cxl_version_bitmask; 492 uint8_t max_link_width; 493 uint8_t negotiated_link_width; 494 uint8_t supported_link_speeds_vector; 495 uint8_t max_link_speed; 496 uint8_t current_link_speed; 497 uint8_t ltssm_state; 498 uint8_t first_lane_num; 499 uint16_t link_state; 500 uint8_t supported_ld_count; 501 } QEMU_PACKED; 502 503 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */ 504 struct cxl_fmapi_get_phys_port_state_resp_pl { 505 uint8_t num_ports; 506 uint8_t rsv1[3]; 507 struct cxl_fmapi_port_state_info_block ports[]; 508 } QEMU_PACKED *out; 509 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus; 510 PCIEPort *usp = PCIE_PORT(cci->d); 511 size_t pl_size; 512 int i; 513 514 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; 515 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; 516 517 /* Check if what was requested can fit */ 518 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { 519 return CXL_MBOX_INVALID_INPUT; 520 } 521 522 /* For success there should be a match for each requested */ 523 out->num_ports = in->num_ports; 524 525 for (i = 0; i < in->num_ports; i++) { 526 struct cxl_fmapi_port_state_info_block *port; 527 /* First try to match on downstream port */ 528 PCIDevice *port_dev; 529 uint16_t lnkcap, lnkcap2, lnksta; 530 531 port = &out->ports[i]; 532 533 port_dev = pcie_find_port_by_pn(bus, in->ports[i]); 534 if (port_dev) { /* DSP */ 535 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev)) 536 ->devices[0]; 537 port->config_state = 3; 538 if (ds_dev) { 539 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) { 540 port->connected_device_type = 5; /* Assume MLD for now */ 541 } else { 542 port->connected_device_type = 1; 543 } 544 } else { 545 port->connected_device_type = 0; 546 } 547 port->supported_ld_count = 3; 548 } else if (usp->port == in->ports[i]) { /* USP */ 549 port_dev = PCI_DEVICE(usp); 550 port->config_state = 4; 551 port->connected_device_type = 0; 552 } else { 553 return CXL_MBOX_INVALID_INPUT; 554 } 555 556 port->port_id = in->ports[i]; 557 /* Information on status of this port in lnksta, lnkcap */ 558 if (!port_dev->exp.exp_cap) { 559 return CXL_MBOX_INTERNAL_ERROR; 560 } 561 lnksta = port_dev->config_read(port_dev, 562 port_dev->exp.exp_cap + PCI_EXP_LNKSTA, 563 sizeof(lnksta)); 564 lnkcap = port_dev->config_read(port_dev, 565 port_dev->exp.exp_cap + PCI_EXP_LNKCAP, 566 sizeof(lnkcap)); 567 lnkcap2 = port_dev->config_read(port_dev, 568 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2, 569 sizeof(lnkcap2)); 570 571 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 572 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4; 573 /* No definition for SLS field in linux/pci_regs.h */ 574 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1; 575 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS; 576 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS; 577 /* TODO: Track down if we can get the rest of the info */ 578 port->ltssm_state = 0x7; 579 port->first_lane_num = 0; 580 port->link_state = 0; 581 port->port_cxl_version_bitmask = 0x2; 582 port->connected_device_cxl_version = 0x2; 583 } 584 585 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports; 586 *len_out = pl_size; 587 588 return CXL_MBOX_SUCCESS; 589 } 590 591 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */ 592 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, 593 uint8_t *payload_in, 594 size_t len_in, 595 uint8_t *payload_out, 596 size_t *len_out, 597 CXLCCI *cci) 598 { 599 struct { 600 uint8_t status; 601 uint8_t rsvd; 602 uint16_t opcode; 603 uint16_t returncode; 604 uint16_t vendor_ext_status; 605 } QEMU_PACKED *bg_op_status; 606 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8); 607 608 bg_op_status = (void *)payload_out; 609 memset(bg_op_status, 0, sizeof(*bg_op_status)); 610 bg_op_status->status = cci->bg.complete_pct << 1; 611 if (cci->bg.runtime > 0) { 612 bg_op_status->status |= 1U << 0; 613 } 614 bg_op_status->opcode = cci->bg.opcode; 615 bg_op_status->returncode = cci->bg.ret_code; 616 *len_out = sizeof(*bg_op_status); 617 618 return CXL_MBOX_SUCCESS; 619 } 620 621 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */ 622 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, 623 uint8_t *payload_in, 624 size_t len, 625 uint8_t *payload_out, 626 size_t *len_out, 627 CXLCCI *cci) 628 { 629 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 630 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 631 struct { 632 uint8_t slots_supported; 633 uint8_t slot_info; 634 uint8_t caps; 635 uint8_t rsvd[0xd]; 636 char fw_rev1[0x10]; 637 char fw_rev2[0x10]; 638 char fw_rev3[0x10]; 639 char fw_rev4[0x10]; 640 } QEMU_PACKED *fw_info; 641 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); 642 643 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || 644 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) || 645 (ct3d->dc.total_capacity < CXL_CAPACITY_MULTIPLIER)) { 646 return CXL_MBOX_INTERNAL_ERROR; 647 } 648 649 fw_info = (void *)payload_out; 650 memset(fw_info, 0, sizeof(*fw_info)); 651 652 fw_info->slots_supported = 2; 653 fw_info->slot_info = BIT(0) | BIT(3); 654 fw_info->caps = 0; 655 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0"); 656 657 *len_out = sizeof(*fw_info); 658 return CXL_MBOX_SUCCESS; 659 } 660 661 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */ 662 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd, 663 uint8_t *payload_in, 664 size_t len_in, 665 uint8_t *payload_out, 666 size_t *len_out, 667 CXLCCI *cci) 668 { 669 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 670 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); 671 672 stq_le_p(payload_out, final_time); 673 *len_out = 8; 674 675 return CXL_MBOX_SUCCESS; 676 } 677 678 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */ 679 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd, 680 uint8_t *payload_in, 681 size_t len_in, 682 uint8_t *payload_out, 683 size_t *len_out, 684 CXLCCI *cci) 685 { 686 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 687 688 cxl_dstate->timestamp.set = true; 689 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 690 691 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in); 692 693 *len_out = 0; 694 return CXL_MBOX_SUCCESS; 695 } 696 697 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */ 698 static const QemuUUID cel_uuid = { 699 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 700 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) 701 }; 702 703 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */ 704 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd, 705 uint8_t *payload_in, 706 size_t len_in, 707 uint8_t *payload_out, 708 size_t *len_out, 709 CXLCCI *cci) 710 { 711 struct { 712 uint16_t entries; 713 uint8_t rsvd[6]; 714 struct { 715 QemuUUID uuid; 716 uint32_t size; 717 } log_entries[1]; 718 } QEMU_PACKED *supported_logs = (void *)payload_out; 719 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c); 720 721 supported_logs->entries = 1; 722 supported_logs->log_entries[0].uuid = cel_uuid; 723 supported_logs->log_entries[0].size = 4 * cci->cel_size; 724 725 *len_out = sizeof(*supported_logs); 726 return CXL_MBOX_SUCCESS; 727 } 728 729 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */ 730 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, 731 uint8_t *payload_in, 732 size_t len_in, 733 uint8_t *payload_out, 734 size_t *len_out, 735 CXLCCI *cci) 736 { 737 struct { 738 QemuUUID uuid; 739 uint32_t offset; 740 uint32_t length; 741 } QEMU_PACKED QEMU_ALIGNED(16) *get_log; 742 743 get_log = (void *)payload_in; 744 745 /* 746 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) 747 * The device shall return Invalid Input if the Offset or Length 748 * fields attempt to access beyond the size of the log as reported by Get 749 * Supported Logs. 750 * 751 * The CEL buffer is large enough to fit all commands in the emulation, so 752 * the only possible failure would be if the mailbox itself isn't big 753 * enough. 754 */ 755 if (get_log->offset + get_log->length > cci->payload_max) { 756 return CXL_MBOX_INVALID_INPUT; 757 } 758 759 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { 760 return CXL_MBOX_INVALID_LOG; 761 } 762 763 /* Store off everything to local variables so we can wipe out the payload */ 764 *len_out = get_log->length; 765 766 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length); 767 768 return CXL_MBOX_SUCCESS; 769 } 770 771 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */ 772 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd, 773 uint8_t *payload_in, 774 size_t len_in, 775 uint8_t *payload_out, 776 size_t *len_out, 777 CXLCCI *cci) 778 { 779 struct { 780 char fw_revision[0x10]; 781 uint64_t total_capacity; 782 uint64_t volatile_capacity; 783 uint64_t persistent_capacity; 784 uint64_t partition_align; 785 uint16_t info_event_log_size; 786 uint16_t warning_event_log_size; 787 uint16_t failure_event_log_size; 788 uint16_t fatal_event_log_size; 789 uint32_t lsa_size; 790 uint8_t poison_list_max_mer[3]; 791 uint16_t inject_poison_limit; 792 uint8_t poison_caps; 793 uint8_t qos_telemetry_caps; 794 uint16_t dc_event_log_size; 795 } QEMU_PACKED *id; 796 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45); 797 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 798 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 799 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 800 801 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 802 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 803 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 804 return CXL_MBOX_INTERNAL_ERROR; 805 } 806 807 id = (void *)payload_out; 808 memset(id, 0, sizeof(*id)); 809 810 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); 811 812 stq_le_p(&id->total_capacity, 813 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER); 814 stq_le_p(&id->persistent_capacity, 815 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 816 stq_le_p(&id->volatile_capacity, 817 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 818 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); 819 /* 256 poison records */ 820 st24_le_p(id->poison_list_max_mer, 256); 821 /* No limit - so limited by main poison record limit */ 822 stw_le_p(&id->inject_poison_limit, 0); 823 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE); 824 825 *len_out = sizeof(*id); 826 return CXL_MBOX_SUCCESS; 827 } 828 829 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */ 830 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd, 831 uint8_t *payload_in, 832 size_t len_in, 833 uint8_t *payload_out, 834 size_t *len_out, 835 CXLCCI *cci) 836 { 837 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate; 838 struct { 839 uint64_t active_vmem; 840 uint64_t active_pmem; 841 uint64_t next_vmem; 842 uint64_t next_pmem; 843 } QEMU_PACKED *part_info = (void *)payload_out; 844 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); 845 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); 846 847 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) || 848 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) || 849 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) { 850 return CXL_MBOX_INTERNAL_ERROR; 851 } 852 853 stq_le_p(&part_info->active_vmem, 854 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); 855 /* 856 * When both next_vmem and next_pmem are 0, there is no pending change to 857 * partitioning. 858 */ 859 stq_le_p(&part_info->next_vmem, 0); 860 stq_le_p(&part_info->active_pmem, 861 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); 862 stq_le_p(&part_info->next_pmem, 0); 863 864 *len_out = sizeof(*part_info); 865 return CXL_MBOX_SUCCESS; 866 } 867 868 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */ 869 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, 870 uint8_t *payload_in, 871 size_t len_in, 872 uint8_t *payload_out, 873 size_t *len_out, 874 CXLCCI *cci) 875 { 876 struct { 877 uint32_t offset; 878 uint32_t length; 879 } QEMU_PACKED *get_lsa; 880 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 881 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 882 uint32_t offset, length; 883 884 get_lsa = (void *)payload_in; 885 offset = get_lsa->offset; 886 length = get_lsa->length; 887 888 if (offset + length > cvc->get_lsa_size(ct3d)) { 889 *len_out = 0; 890 return CXL_MBOX_INVALID_INPUT; 891 } 892 893 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset); 894 return CXL_MBOX_SUCCESS; 895 } 896 897 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */ 898 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, 899 uint8_t *payload_in, 900 size_t len_in, 901 uint8_t *payload_out, 902 size_t *len_out, 903 CXLCCI *cci) 904 { 905 struct set_lsa_pl { 906 uint32_t offset; 907 uint32_t rsvd; 908 uint8_t data[]; 909 } QEMU_PACKED; 910 struct set_lsa_pl *set_lsa_payload = (void *)payload_in; 911 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 912 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 913 const size_t hdr_len = offsetof(struct set_lsa_pl, data); 914 915 *len_out = 0; 916 if (!len_in) { 917 return CXL_MBOX_SUCCESS; 918 } 919 920 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { 921 return CXL_MBOX_INVALID_INPUT; 922 } 923 len_in -= hdr_len; 924 925 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset); 926 return CXL_MBOX_SUCCESS; 927 } 928 929 /* Perform the actual device zeroing */ 930 static void __do_sanitization(CXLType3Dev *ct3d) 931 { 932 MemoryRegion *mr; 933 934 if (ct3d->hostvmem) { 935 mr = host_memory_backend_get_memory(ct3d->hostvmem); 936 if (mr) { 937 void *hostmem = memory_region_get_ram_ptr(mr); 938 memset(hostmem, 0, memory_region_size(mr)); 939 } 940 } 941 942 if (ct3d->hostpmem) { 943 mr = host_memory_backend_get_memory(ct3d->hostpmem); 944 if (mr) { 945 void *hostmem = memory_region_get_ram_ptr(mr); 946 memset(hostmem, 0, memory_region_size(mr)); 947 } 948 } 949 if (ct3d->lsa) { 950 mr = host_memory_backend_get_memory(ct3d->lsa); 951 if (mr) { 952 void *lsa = memory_region_get_ram_ptr(mr); 953 memset(lsa, 0, memory_region_size(mr)); 954 } 955 } 956 } 957 958 /* 959 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) 960 * 961 * Once the Sanitize command has started successfully, the device shall be 962 * placed in the media disabled state. If the command fails or is interrupted 963 * by a reset or power failure, it shall remain in the media disabled state 964 * until a successful Sanitize command has been completed. During this state: 965 * 966 * 1. Memory writes to the device will have no effect, and all memory reads 967 * will return random values (no user data returned, even for locations that 968 * the failed Sanitize operation didn’t sanitize yet). 969 * 970 * 2. Mailbox commands shall still be processed in the disabled state, except 971 * that commands that access Sanitized areas shall fail with the Media Disabled 972 * error code. 973 */ 974 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, 975 uint8_t *payload_in, 976 size_t len_in, 977 uint8_t *payload_out, 978 size_t *len_out, 979 CXLCCI *cci) 980 { 981 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 982 uint64_t total_mem; /* in Mb */ 983 int secs; 984 985 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; 986 if (total_mem <= 512) { 987 secs = 4; 988 } else if (total_mem <= 1024) { 989 secs = 8; 990 } else if (total_mem <= 2 * 1024) { 991 secs = 15; 992 } else if (total_mem <= 4 * 1024) { 993 secs = 30; 994 } else if (total_mem <= 8 * 1024) { 995 secs = 60; 996 } else if (total_mem <= 16 * 1024) { 997 secs = 2 * 60; 998 } else if (total_mem <= 32 * 1024) { 999 secs = 4 * 60; 1000 } else if (total_mem <= 64 * 1024) { 1001 secs = 8 * 60; 1002 } else if (total_mem <= 128 * 1024) { 1003 secs = 15 * 60; 1004 } else if (total_mem <= 256 * 1024) { 1005 secs = 30 * 60; 1006 } else if (total_mem <= 512 * 1024) { 1007 secs = 60 * 60; 1008 } else if (total_mem <= 1024 * 1024) { 1009 secs = 120 * 60; 1010 } else { 1011 secs = 240 * 60; /* max 4 hrs */ 1012 } 1013 1014 /* EBUSY other bg cmds as of now */ 1015 cci->bg.runtime = secs * 1000UL; 1016 *len_out = 0; 1017 1018 cxl_dev_disable_media(&ct3d->cxl_dstate); 1019 1020 /* sanitize when done */ 1021 return CXL_MBOX_BG_STARTED; 1022 } 1023 1024 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, 1025 uint8_t *payload_in, 1026 size_t len_in, 1027 uint8_t *payload_out, 1028 size_t *len_out, 1029 CXLCCI *cci) 1030 { 1031 uint32_t *state = (uint32_t *)payload_out; 1032 1033 *state = 0; 1034 *len_out = 4; 1035 return CXL_MBOX_SUCCESS; 1036 } 1037 1038 /* 1039 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) 1040 * 1041 * This is very inefficient, but good enough for now! 1042 * Also the payload will always fit, so no need to handle the MORE flag and 1043 * make this stateful. We may want to allow longer poison lists to aid 1044 * testing that kernel functionality. 1045 */ 1046 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, 1047 uint8_t *payload_in, 1048 size_t len_in, 1049 uint8_t *payload_out, 1050 size_t *len_out, 1051 CXLCCI *cci) 1052 { 1053 struct get_poison_list_pl { 1054 uint64_t pa; 1055 uint64_t length; 1056 } QEMU_PACKED; 1057 1058 struct get_poison_list_out_pl { 1059 uint8_t flags; 1060 uint8_t rsvd1; 1061 uint64_t overflow_timestamp; 1062 uint16_t count; 1063 uint8_t rsvd2[0x14]; 1064 struct { 1065 uint64_t addr; 1066 uint32_t length; 1067 uint32_t resv; 1068 } QEMU_PACKED records[]; 1069 } QEMU_PACKED; 1070 1071 struct get_poison_list_pl *in = (void *)payload_in; 1072 struct get_poison_list_out_pl *out = (void *)payload_out; 1073 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1074 uint16_t record_count = 0, i = 0; 1075 uint64_t query_start, query_length; 1076 CXLPoisonList *poison_list = &ct3d->poison_list; 1077 CXLPoison *ent; 1078 uint16_t out_pl_len; 1079 1080 query_start = ldq_le_p(&in->pa); 1081 /* 64 byte alignment required */ 1082 if (query_start & 0x3f) { 1083 return CXL_MBOX_INVALID_INPUT; 1084 } 1085 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; 1086 1087 QLIST_FOREACH(ent, poison_list, node) { 1088 /* Check for no overlap */ 1089 if (ent->start >= query_start + query_length || 1090 ent->start + ent->length <= query_start) { 1091 continue; 1092 } 1093 record_count++; 1094 } 1095 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1096 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 1097 1098 memset(out, 0, out_pl_len); 1099 QLIST_FOREACH(ent, poison_list, node) { 1100 uint64_t start, stop; 1101 1102 /* Check for no overlap */ 1103 if (ent->start >= query_start + query_length || 1104 ent->start + ent->length <= query_start) { 1105 continue; 1106 } 1107 1108 /* Deal with overlap */ 1109 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); 1110 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, 1111 query_start + query_length); 1112 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); 1113 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); 1114 i++; 1115 } 1116 if (ct3d->poison_list_overflowed) { 1117 out->flags = (1 << 1); 1118 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); 1119 } 1120 stw_le_p(&out->count, record_count); 1121 *len_out = out_pl_len; 1122 return CXL_MBOX_SUCCESS; 1123 } 1124 1125 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */ 1126 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd, 1127 uint8_t *payload_in, 1128 size_t len_in, 1129 uint8_t *payload_out, 1130 size_t *len_out, 1131 CXLCCI *cci) 1132 { 1133 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1134 CXLPoisonList *poison_list = &ct3d->poison_list; 1135 CXLPoison *ent; 1136 struct inject_poison_pl { 1137 uint64_t dpa; 1138 }; 1139 struct inject_poison_pl *in = (void *)payload_in; 1140 uint64_t dpa = ldq_le_p(&in->dpa); 1141 CXLPoison *p; 1142 1143 QLIST_FOREACH(ent, poison_list, node) { 1144 if (dpa >= ent->start && 1145 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { 1146 return CXL_MBOX_SUCCESS; 1147 } 1148 } 1149 1150 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1151 return CXL_MBOX_INJECT_POISON_LIMIT; 1152 } 1153 p = g_new0(CXLPoison, 1); 1154 1155 p->length = CXL_CACHE_LINE_SIZE; 1156 p->start = dpa; 1157 p->type = CXL_POISON_TYPE_INJECTED; 1158 1159 /* 1160 * Possible todo: Merge with existing entry if next to it and if same type 1161 */ 1162 QLIST_INSERT_HEAD(poison_list, p, node); 1163 ct3d->poison_list_cnt++; 1164 *len_out = 0; 1165 1166 return CXL_MBOX_SUCCESS; 1167 } 1168 1169 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */ 1170 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd, 1171 uint8_t *payload_in, 1172 size_t len_in, 1173 uint8_t *payload_out, 1174 size_t *len_out, 1175 CXLCCI *cci) 1176 { 1177 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1178 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 1179 CXLPoisonList *poison_list = &ct3d->poison_list; 1180 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); 1181 struct clear_poison_pl { 1182 uint64_t dpa; 1183 uint8_t data[64]; 1184 }; 1185 CXLPoison *ent; 1186 uint64_t dpa; 1187 1188 struct clear_poison_pl *in = (void *)payload_in; 1189 1190 dpa = ldq_le_p(&in->dpa); 1191 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size + 1192 ct3d->dc.total_capacity) { 1193 return CXL_MBOX_INVALID_PA; 1194 } 1195 1196 /* Clearing a region with no poison is not an error so always do so */ 1197 if (cvc->set_cacheline) { 1198 if (!cvc->set_cacheline(ct3d, dpa, in->data)) { 1199 return CXL_MBOX_INTERNAL_ERROR; 1200 } 1201 } 1202 1203 QLIST_FOREACH(ent, poison_list, node) { 1204 /* 1205 * Test for contained in entry. Simpler than general case 1206 * as clearing 64 bytes and entries 64 byte aligned 1207 */ 1208 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { 1209 break; 1210 } 1211 } 1212 if (!ent) { 1213 return CXL_MBOX_SUCCESS; 1214 } 1215 1216 QLIST_REMOVE(ent, node); 1217 ct3d->poison_list_cnt--; 1218 1219 if (dpa > ent->start) { 1220 CXLPoison *frag; 1221 /* Cannot overflow as replacing existing entry */ 1222 1223 frag = g_new0(CXLPoison, 1); 1224 1225 frag->start = ent->start; 1226 frag->length = dpa - ent->start; 1227 frag->type = ent->type; 1228 1229 QLIST_INSERT_HEAD(poison_list, frag, node); 1230 ct3d->poison_list_cnt++; 1231 } 1232 1233 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { 1234 CXLPoison *frag; 1235 1236 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1237 cxl_set_poison_list_overflowed(ct3d); 1238 } else { 1239 frag = g_new0(CXLPoison, 1); 1240 1241 frag->start = dpa + CXL_CACHE_LINE_SIZE; 1242 frag->length = ent->start + ent->length - frag->start; 1243 frag->type = ent->type; 1244 QLIST_INSERT_HEAD(poison_list, frag, node); 1245 ct3d->poison_list_cnt++; 1246 } 1247 } 1248 /* Any fragments have been added, free original entry */ 1249 g_free(ent); 1250 *len_out = 0; 1251 1252 return CXL_MBOX_SUCCESS; 1253 } 1254 1255 /* 1256 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration 1257 * (Opcode: 4800h) 1258 */ 1259 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd, 1260 uint8_t *payload_in, 1261 size_t len_in, 1262 uint8_t *payload_out, 1263 size_t *len_out, 1264 CXLCCI *cci) 1265 { 1266 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1267 struct { 1268 uint8_t region_cnt; 1269 uint8_t start_rid; 1270 } QEMU_PACKED *in = (void *)payload_in; 1271 struct { 1272 uint8_t num_regions; 1273 uint8_t regions_returned; 1274 uint8_t rsvd1[6]; 1275 struct { 1276 uint64_t base; 1277 uint64_t decode_len; 1278 uint64_t region_len; 1279 uint64_t block_size; 1280 uint32_t dsmadhandle; 1281 uint8_t flags; 1282 uint8_t rsvd2[3]; 1283 } QEMU_PACKED records[]; 1284 } QEMU_PACKED *out = (void *)payload_out; 1285 struct { 1286 uint32_t num_extents_supported; 1287 uint32_t num_extents_available; 1288 uint32_t num_tags_supported; 1289 uint32_t num_tags_available; 1290 } QEMU_PACKED *extra_out; 1291 uint16_t record_count; 1292 uint16_t i; 1293 uint16_t out_pl_len; 1294 uint8_t start_rid; 1295 1296 start_rid = in->start_rid; 1297 if (start_rid >= ct3d->dc.num_regions) { 1298 return CXL_MBOX_INVALID_INPUT; 1299 } 1300 1301 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); 1302 1303 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1304 extra_out = (void *)(payload_out + out_pl_len); 1305 out_pl_len += sizeof(*extra_out); 1306 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); 1307 1308 out->num_regions = ct3d->dc.num_regions; 1309 out->regions_returned = record_count; 1310 for (i = 0; i < record_count; i++) { 1311 stq_le_p(&out->records[i].base, 1312 ct3d->dc.regions[start_rid + i].base); 1313 stq_le_p(&out->records[i].decode_len, 1314 ct3d->dc.regions[start_rid + i].decode_len / 1315 CXL_CAPACITY_MULTIPLIER); 1316 stq_le_p(&out->records[i].region_len, 1317 ct3d->dc.regions[start_rid + i].len); 1318 stq_le_p(&out->records[i].block_size, 1319 ct3d->dc.regions[start_rid + i].block_size); 1320 stl_le_p(&out->records[i].dsmadhandle, 1321 ct3d->dc.regions[start_rid + i].dsmadhandle); 1322 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags; 1323 } 1324 /* 1325 * TODO: Assign values once extents and tags are introduced 1326 * to use. 1327 */ 1328 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); 1329 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - 1330 ct3d->dc.total_extent_count); 1331 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); 1332 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); 1333 1334 *len_out = out_pl_len; 1335 return CXL_MBOX_SUCCESS; 1336 } 1337 1338 /* 1339 * CXL r3.1 section 8.2.9.9.9.2: 1340 * Get Dynamic Capacity Extent List (Opcode 4801h) 1341 */ 1342 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, 1343 uint8_t *payload_in, 1344 size_t len_in, 1345 uint8_t *payload_out, 1346 size_t *len_out, 1347 CXLCCI *cci) 1348 { 1349 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1350 struct { 1351 uint32_t extent_cnt; 1352 uint32_t start_extent_id; 1353 } QEMU_PACKED *in = (void *)payload_in; 1354 struct { 1355 uint32_t count; 1356 uint32_t total_extents; 1357 uint32_t generation_num; 1358 uint8_t rsvd[4]; 1359 CXLDCExtentRaw records[]; 1360 } QEMU_PACKED *out = (void *)payload_out; 1361 uint32_t start_extent_id = in->start_extent_id; 1362 CXLDCExtentList *extent_list = &ct3d->dc.extents; 1363 uint16_t record_count = 0, i = 0, record_done = 0; 1364 uint16_t out_pl_len, size; 1365 CXLDCExtent *ent; 1366 1367 if (start_extent_id > ct3d->dc.total_extent_count) { 1368 return CXL_MBOX_INVALID_INPUT; 1369 } 1370 1371 record_count = MIN(in->extent_cnt, 1372 ct3d->dc.total_extent_count - start_extent_id); 1373 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); 1374 record_count = MIN(record_count, size / sizeof(out->records[0])); 1375 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); 1376 1377 stl_le_p(&out->count, record_count); 1378 stl_le_p(&out->total_extents, ct3d->dc.total_extent_count); 1379 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); 1380 1381 if (record_count > 0) { 1382 CXLDCExtentRaw *out_rec = &out->records[record_done]; 1383 1384 QTAILQ_FOREACH(ent, extent_list, node) { 1385 if (i++ < start_extent_id) { 1386 continue; 1387 } 1388 stq_le_p(&out_rec->start_dpa, ent->start_dpa); 1389 stq_le_p(&out_rec->len, ent->len); 1390 memcpy(&out_rec->tag, ent->tag, 0x10); 1391 stw_le_p(&out_rec->shared_seq, ent->shared_seq); 1392 1393 record_done++; 1394 if (record_done == record_count) { 1395 break; 1396 } 1397 } 1398 } 1399 1400 *len_out = out_pl_len; 1401 return CXL_MBOX_SUCCESS; 1402 } 1403 1404 /* 1405 * Check whether any bit between addr[nr, nr+size) is set, 1406 * return true if any bit is set, otherwise return false 1407 */ 1408 bool test_any_bits_set(const unsigned long *addr, unsigned long nr, 1409 unsigned long size) 1410 { 1411 unsigned long res = find_next_bit(addr, size + nr, nr); 1412 1413 return res < nr + size; 1414 } 1415 1416 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len) 1417 { 1418 int i; 1419 CXLDCRegion *region = &ct3d->dc.regions[0]; 1420 1421 if (dpa < region->base || 1422 dpa >= region->base + ct3d->dc.total_capacity) { 1423 return NULL; 1424 } 1425 1426 /* 1427 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD) 1428 * 1429 * Regions are used in increasing-DPA order, with Region 0 being used for 1430 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA. 1431 * So check from the last region to find where the dpa belongs. Extents that 1432 * cross multiple regions are not allowed. 1433 */ 1434 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) { 1435 region = &ct3d->dc.regions[i]; 1436 if (dpa >= region->base) { 1437 if (dpa + len > region->base + region->len) { 1438 return NULL; 1439 } 1440 return region; 1441 } 1442 } 1443 1444 return NULL; 1445 } 1446 1447 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, 1448 uint64_t dpa, 1449 uint64_t len, 1450 uint8_t *tag, 1451 uint16_t shared_seq) 1452 { 1453 CXLDCExtent *extent; 1454 1455 extent = g_new0(CXLDCExtent, 1); 1456 extent->start_dpa = dpa; 1457 extent->len = len; 1458 if (tag) { 1459 memcpy(extent->tag, tag, 0x10); 1460 } 1461 extent->shared_seq = shared_seq; 1462 1463 QTAILQ_INSERT_TAIL(list, extent, node); 1464 } 1465 1466 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list, 1467 CXLDCExtent *extent) 1468 { 1469 QTAILQ_REMOVE(list, extent, node); 1470 g_free(extent); 1471 } 1472 1473 /* 1474 * Add a new extent to the extent "group" if group exists; 1475 * otherwise, create a new group 1476 * Return value: the extent group where the extent is inserted. 1477 */ 1478 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, 1479 uint64_t dpa, 1480 uint64_t len, 1481 uint8_t *tag, 1482 uint16_t shared_seq) 1483 { 1484 if (!group) { 1485 group = g_new0(CXLDCExtentGroup, 1); 1486 QTAILQ_INIT(&group->list); 1487 } 1488 cxl_insert_extent_to_extent_list(&group->list, dpa, len, 1489 tag, shared_seq); 1490 return group; 1491 } 1492 1493 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, 1494 CXLDCExtentGroup *group) 1495 { 1496 QTAILQ_INSERT_TAIL(list, group, node); 1497 } 1498 1499 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) 1500 { 1501 CXLDCExtent *ent, *ent_next; 1502 CXLDCExtentGroup *group = QTAILQ_FIRST(list); 1503 1504 QTAILQ_REMOVE(list, group, node); 1505 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { 1506 cxl_remove_extent_from_extent_list(&group->list, ent); 1507 } 1508 g_free(group); 1509 } 1510 1511 /* 1512 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload 1513 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload 1514 */ 1515 typedef struct CXLUpdateDCExtentListInPl { 1516 uint32_t num_entries_updated; 1517 uint8_t flags; 1518 uint8_t rsvd[3]; 1519 /* CXL r3.1 Table 8-169: Updated Extent */ 1520 struct { 1521 uint64_t start_dpa; 1522 uint64_t len; 1523 uint8_t rsvd[8]; 1524 } QEMU_PACKED updated_entries[]; 1525 } QEMU_PACKED CXLUpdateDCExtentListInPl; 1526 1527 /* 1528 * For the extents in the extent list to operate, check whether they are valid 1529 * 1. The extent should be in the range of a valid DC region; 1530 * 2. The extent should not cross multiple regions; 1531 * 3. The start DPA and the length of the extent should align with the block 1532 * size of the region; 1533 * 4. The address range of multiple extents in the list should not overlap. 1534 */ 1535 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d, 1536 const CXLUpdateDCExtentListInPl *in) 1537 { 1538 uint64_t min_block_size = UINT64_MAX; 1539 CXLDCRegion *region; 1540 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1]; 1541 g_autofree unsigned long *blk_bitmap = NULL; 1542 uint64_t dpa, len; 1543 uint32_t i; 1544 1545 for (i = 0; i < ct3d->dc.num_regions; i++) { 1546 region = &ct3d->dc.regions[i]; 1547 min_block_size = MIN(min_block_size, region->block_size); 1548 } 1549 1550 blk_bitmap = bitmap_new((lastregion->base + lastregion->len - 1551 ct3d->dc.regions[0].base) / min_block_size); 1552 1553 for (i = 0; i < in->num_entries_updated; i++) { 1554 dpa = in->updated_entries[i].start_dpa; 1555 len = in->updated_entries[i].len; 1556 1557 region = cxl_find_dc_region(ct3d, dpa, len); 1558 if (!region) { 1559 return CXL_MBOX_INVALID_PA; 1560 } 1561 1562 dpa -= ct3d->dc.regions[0].base; 1563 if (dpa % region->block_size || len % region->block_size) { 1564 return CXL_MBOX_INVALID_EXTENT_LIST; 1565 } 1566 /* the dpa range already covered by some other extents in the list */ 1567 if (test_any_bits_set(blk_bitmap, dpa / min_block_size, 1568 len / min_block_size)) { 1569 return CXL_MBOX_INVALID_EXTENT_LIST; 1570 } 1571 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size); 1572 } 1573 1574 return CXL_MBOX_SUCCESS; 1575 } 1576 1577 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d, 1578 const CXLUpdateDCExtentListInPl *in) 1579 { 1580 uint32_t i; 1581 CXLDCExtent *ent; 1582 CXLDCExtentGroup *ext_group; 1583 uint64_t dpa, len; 1584 Range range1, range2; 1585 1586 for (i = 0; i < in->num_entries_updated; i++) { 1587 dpa = in->updated_entries[i].start_dpa; 1588 len = in->updated_entries[i].len; 1589 1590 range_init_nofail(&range1, dpa, len); 1591 1592 /* 1593 * The host-accepted DPA range must be contained by the first extent 1594 * group in the pending list 1595 */ 1596 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending); 1597 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) { 1598 return CXL_MBOX_INVALID_PA; 1599 } 1600 1601 /* to-be-added range should not overlap with range already accepted */ 1602 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { 1603 range_init_nofail(&range2, ent->start_dpa, ent->len); 1604 if (range_overlaps_range(&range1, &range2)) { 1605 return CXL_MBOX_INVALID_PA; 1606 } 1607 } 1608 } 1609 return CXL_MBOX_SUCCESS; 1610 } 1611 1612 /* 1613 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h) 1614 * An extent is added to the extent list and becomes usable only after the 1615 * response is processed successfully. 1616 */ 1617 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, 1618 uint8_t *payload_in, 1619 size_t len_in, 1620 uint8_t *payload_out, 1621 size_t *len_out, 1622 CXLCCI *cci) 1623 { 1624 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 1625 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1626 CXLDCExtentList *extent_list = &ct3d->dc.extents; 1627 uint32_t i; 1628 uint64_t dpa, len; 1629 CXLRetCode ret; 1630 1631 if (in->num_entries_updated == 0) { 1632 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 1633 return CXL_MBOX_SUCCESS; 1634 } 1635 1636 /* Adding extents causes exceeding device's extent tracking ability. */ 1637 if (in->num_entries_updated + ct3d->dc.total_extent_count > 1638 CXL_NUM_EXTENTS_SUPPORTED) { 1639 return CXL_MBOX_RESOURCES_EXHAUSTED; 1640 } 1641 1642 ret = cxl_detect_malformed_extent_list(ct3d, in); 1643 if (ret != CXL_MBOX_SUCCESS) { 1644 return ret; 1645 } 1646 1647 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in); 1648 if (ret != CXL_MBOX_SUCCESS) { 1649 return ret; 1650 } 1651 1652 for (i = 0; i < in->num_entries_updated; i++) { 1653 dpa = in->updated_entries[i].start_dpa; 1654 len = in->updated_entries[i].len; 1655 1656 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); 1657 ct3d->dc.total_extent_count += 1; 1658 ct3_set_region_block_backed(ct3d, dpa, len); 1659 } 1660 /* Remove the first extent group in the pending list */ 1661 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); 1662 1663 return CXL_MBOX_SUCCESS; 1664 } 1665 1666 /* 1667 * Copy extent list from src to dst 1668 * Return value: number of extents copied 1669 */ 1670 static uint32_t copy_extent_list(CXLDCExtentList *dst, 1671 const CXLDCExtentList *src) 1672 { 1673 uint32_t cnt = 0; 1674 CXLDCExtent *ent; 1675 1676 if (!dst || !src) { 1677 return 0; 1678 } 1679 1680 QTAILQ_FOREACH(ent, src, node) { 1681 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len, 1682 ent->tag, ent->shared_seq); 1683 cnt++; 1684 } 1685 return cnt; 1686 } 1687 1688 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, 1689 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list, 1690 uint32_t *updated_list_size) 1691 { 1692 CXLDCExtent *ent, *ent_next; 1693 uint64_t dpa, len; 1694 uint32_t i; 1695 int cnt_delta = 0; 1696 CXLRetCode ret = CXL_MBOX_SUCCESS; 1697 1698 QTAILQ_INIT(updated_list); 1699 copy_extent_list(updated_list, &ct3d->dc.extents); 1700 1701 for (i = 0; i < in->num_entries_updated; i++) { 1702 Range range; 1703 1704 dpa = in->updated_entries[i].start_dpa; 1705 len = in->updated_entries[i].len; 1706 1707 /* Check if the DPA range is not fully backed with valid extents */ 1708 if (!ct3_test_region_block_backed(ct3d, dpa, len)) { 1709 ret = CXL_MBOX_INVALID_PA; 1710 goto free_and_exit; 1711 } 1712 1713 /* After this point, extent overflow is the only error can happen */ 1714 while (len > 0) { 1715 QTAILQ_FOREACH(ent, updated_list, node) { 1716 range_init_nofail(&range, ent->start_dpa, ent->len); 1717 1718 if (range_contains(&range, dpa)) { 1719 uint64_t len1, len2 = 0, len_done = 0; 1720 uint64_t ent_start_dpa = ent->start_dpa; 1721 uint64_t ent_len = ent->len; 1722 1723 len1 = dpa - ent->start_dpa; 1724 /* Found the extent or the subset of an existing extent */ 1725 if (range_contains(&range, dpa + len - 1)) { 1726 len2 = ent_start_dpa + ent_len - dpa - len; 1727 } else { 1728 dpa = ent_start_dpa + ent_len; 1729 } 1730 len_done = ent_len - len1 - len2; 1731 1732 cxl_remove_extent_from_extent_list(updated_list, ent); 1733 cnt_delta--; 1734 1735 if (len1) { 1736 cxl_insert_extent_to_extent_list(updated_list, 1737 ent_start_dpa, 1738 len1, NULL, 0); 1739 cnt_delta++; 1740 } 1741 if (len2) { 1742 cxl_insert_extent_to_extent_list(updated_list, 1743 dpa + len, 1744 len2, NULL, 0); 1745 cnt_delta++; 1746 } 1747 1748 if (cnt_delta + ct3d->dc.total_extent_count > 1749 CXL_NUM_EXTENTS_SUPPORTED) { 1750 ret = CXL_MBOX_RESOURCES_EXHAUSTED; 1751 goto free_and_exit; 1752 } 1753 1754 len -= len_done; 1755 break; 1756 } 1757 } 1758 } 1759 } 1760 free_and_exit: 1761 if (ret != CXL_MBOX_SUCCESS) { 1762 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) { 1763 cxl_remove_extent_from_extent_list(updated_list, ent); 1764 } 1765 *updated_list_size = 0; 1766 } else { 1767 *updated_list_size = ct3d->dc.total_extent_count + cnt_delta; 1768 } 1769 1770 return ret; 1771 } 1772 1773 /* 1774 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h) 1775 */ 1776 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, 1777 uint8_t *payload_in, 1778 size_t len_in, 1779 uint8_t *payload_out, 1780 size_t *len_out, 1781 CXLCCI *cci) 1782 { 1783 CXLUpdateDCExtentListInPl *in = (void *)payload_in; 1784 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1785 CXLDCExtentList updated_list; 1786 CXLDCExtent *ent, *ent_next; 1787 uint32_t updated_list_size; 1788 CXLRetCode ret; 1789 1790 if (in->num_entries_updated == 0) { 1791 return CXL_MBOX_INVALID_INPUT; 1792 } 1793 1794 ret = cxl_detect_malformed_extent_list(ct3d, in); 1795 if (ret != CXL_MBOX_SUCCESS) { 1796 return ret; 1797 } 1798 1799 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list, 1800 &updated_list_size); 1801 if (ret != CXL_MBOX_SUCCESS) { 1802 return ret; 1803 } 1804 1805 /* 1806 * If the dry run release passes, the returned updated_list will 1807 * be the updated extent list and we just need to clear the extents 1808 * in the accepted list and copy extents in the updated_list to accepted 1809 * list and update the extent count; 1810 */ 1811 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) { 1812 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len); 1813 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent); 1814 } 1815 copy_extent_list(&ct3d->dc.extents, &updated_list); 1816 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) { 1817 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); 1818 cxl_remove_extent_from_extent_list(&updated_list, ent); 1819 } 1820 ct3d->dc.total_extent_count = updated_list_size; 1821 1822 return CXL_MBOX_SUCCESS; 1823 } 1824 1825 #define IMMEDIATE_CONFIG_CHANGE (1 << 1) 1826 #define IMMEDIATE_DATA_CHANGE (1 << 2) 1827 #define IMMEDIATE_POLICY_CHANGE (1 << 3) 1828 #define IMMEDIATE_LOG_CHANGE (1 << 4) 1829 #define SECURITY_STATE_CHANGE (1 << 5) 1830 #define BACKGROUND_OPERATION (1 << 6) 1831 1832 static const struct cxl_cmd cxl_cmd_set[256][256] = { 1833 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", 1834 cmd_events_get_records, 1, 0 }, 1835 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", 1836 cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE }, 1837 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY", 1838 cmd_events_get_interrupt_policy, 0, 0 }, 1839 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY", 1840 cmd_events_set_interrupt_policy, 1841 ~0, IMMEDIATE_CONFIG_CHANGE }, 1842 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", 1843 cmd_firmware_update_get_info, 0, 0 }, 1844 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1845 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 1846 8, IMMEDIATE_POLICY_CHANGE }, 1847 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 1848 0, 0 }, 1849 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1850 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE", 1851 cmd_identify_memory_device, 0, 0 }, 1852 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", 1853 cmd_ccls_get_partition_info, 0, 0 }, 1854 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, 1855 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, 1856 ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, 1857 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, 1858 IMMEDIATE_DATA_CHANGE | SECURITY_STATE_CHANGE | BACKGROUND_OPERATION }, 1859 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", 1860 cmd_get_security_state, 0, 0 }, 1861 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", 1862 cmd_media_get_poison_list, 16, 0 }, 1863 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", 1864 cmd_media_inject_poison, 8, 0 }, 1865 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", 1866 cmd_media_clear_poison, 72, 0 }, 1867 }; 1868 1869 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = { 1870 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG", 1871 cmd_dcd_get_dyn_cap_config, 2, 0 }, 1872 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = { 1873 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list, 1874 8, 0 }, 1875 [DCD_CONFIG][ADD_DYN_CAP_RSP] = { 1876 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp, 1877 ~0, IMMEDIATE_DATA_CHANGE }, 1878 [DCD_CONFIG][RELEASE_DYN_CAP] = { 1879 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap, 1880 ~0, IMMEDIATE_DATA_CHANGE }, 1881 }; 1882 1883 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { 1884 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 1885 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", 1886 cmd_infostat_bg_op_sts, 0, 0 }, 1887 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 1888 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 0, 1889 IMMEDIATE_POLICY_CHANGE }, 1890 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 1891 0 }, 1892 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 1893 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE", 1894 cmd_identify_switch_device, 0, 0 }, 1895 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS", 1896 cmd_get_physical_port_state, ~0, 0 }, 1897 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 1898 cmd_tunnel_management_cmd, ~0, 0 }, 1899 }; 1900 1901 /* 1902 * While the command is executing in the background, the device should 1903 * update the percentage complete in the Background Command Status Register 1904 * at least once per second. 1905 */ 1906 1907 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL 1908 1909 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 1910 size_t len_in, uint8_t *pl_in, size_t *len_out, 1911 uint8_t *pl_out, bool *bg_started) 1912 { 1913 int ret; 1914 const struct cxl_cmd *cxl_cmd; 1915 opcode_handler h; 1916 1917 *len_out = 0; 1918 cxl_cmd = &cci->cxl_cmd_set[set][cmd]; 1919 h = cxl_cmd->handler; 1920 if (!h) { 1921 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n", 1922 set << 8 | cmd); 1923 return CXL_MBOX_UNSUPPORTED; 1924 } 1925 1926 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) { 1927 return CXL_MBOX_INVALID_PAYLOAD_LENGTH; 1928 } 1929 1930 /* Only one bg command at a time */ 1931 if ((cxl_cmd->effect & BACKGROUND_OPERATION) && 1932 cci->bg.runtime > 0) { 1933 return CXL_MBOX_BUSY; 1934 } 1935 1936 /* forbid any selected commands while overwriting */ 1937 if (sanitize_running(cci)) { 1938 if (h == cmd_events_get_records || 1939 h == cmd_ccls_get_partition_info || 1940 h == cmd_ccls_set_lsa || 1941 h == cmd_ccls_get_lsa || 1942 h == cmd_logs_get_log || 1943 h == cmd_media_get_poison_list || 1944 h == cmd_media_inject_poison || 1945 h == cmd_media_clear_poison || 1946 h == cmd_sanitize_overwrite) { 1947 return CXL_MBOX_MEDIA_DISABLED; 1948 } 1949 } 1950 1951 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci); 1952 if ((cxl_cmd->effect & BACKGROUND_OPERATION) && 1953 ret == CXL_MBOX_BG_STARTED) { 1954 *bg_started = true; 1955 } else { 1956 *bg_started = false; 1957 } 1958 1959 /* Set bg and the return code */ 1960 if (*bg_started) { 1961 uint64_t now; 1962 1963 cci->bg.opcode = (set << 8) | cmd; 1964 1965 cci->bg.complete_pct = 0; 1966 cci->bg.ret_code = 0; 1967 1968 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 1969 cci->bg.starttime = now; 1970 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 1971 } 1972 1973 return ret; 1974 } 1975 1976 static void bg_timercb(void *opaque) 1977 { 1978 CXLCCI *cci = opaque; 1979 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 1980 uint64_t total_time = cci->bg.starttime + cci->bg.runtime; 1981 1982 assert(cci->bg.runtime > 0); 1983 1984 if (now >= total_time) { /* we are done */ 1985 uint16_t ret = CXL_MBOX_SUCCESS; 1986 1987 cci->bg.complete_pct = 100; 1988 cci->bg.ret_code = ret; 1989 switch (cci->bg.opcode) { 1990 case 0x4400: /* sanitize */ 1991 { 1992 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 1993 1994 __do_sanitization(ct3d); 1995 cxl_dev_enable_media(&ct3d->cxl_dstate); 1996 } 1997 break; 1998 case 0x4304: /* TODO: scan media */ 1999 break; 2000 default: 2001 __builtin_unreachable(); 2002 break; 2003 } 2004 } else { 2005 /* estimate only */ 2006 cci->bg.complete_pct = 100 * now / total_time; 2007 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); 2008 } 2009 2010 if (cci->bg.complete_pct == 100) { 2011 /* TODO: generalize to switch CCI */ 2012 CXLType3Dev *ct3d = CXL_TYPE3(cci->d); 2013 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 2014 PCIDevice *pdev = PCI_DEVICE(cci->d); 2015 2016 cci->bg.starttime = 0; 2017 /* registers are updated, allow new bg-capable cmds */ 2018 cci->bg.runtime = 0; 2019 2020 if (msix_enabled(pdev)) { 2021 msix_notify(pdev, cxl_dstate->mbox_msi_n); 2022 } else if (msi_enabled(pdev)) { 2023 msi_notify(pdev, cxl_dstate->mbox_msi_n); 2024 } 2025 } 2026 } 2027 2028 static void cxl_rebuild_cel(CXLCCI *cci) 2029 { 2030 cci->cel_size = 0; /* Reset for a fresh build */ 2031 for (int set = 0; set < 256; set++) { 2032 for (int cmd = 0; cmd < 256; cmd++) { 2033 if (cci->cxl_cmd_set[set][cmd].handler) { 2034 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd]; 2035 struct cel_log *log = 2036 &cci->cel_log[cci->cel_size]; 2037 2038 log->opcode = (set << 8) | cmd; 2039 log->effect = c->effect; 2040 cci->cel_size++; 2041 } 2042 } 2043 } 2044 } 2045 2046 void cxl_init_cci(CXLCCI *cci, size_t payload_max) 2047 { 2048 cci->payload_max = payload_max; 2049 cxl_rebuild_cel(cci); 2050 2051 cci->bg.complete_pct = 0; 2052 cci->bg.starttime = 0; 2053 cci->bg.runtime = 0; 2054 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 2055 bg_timercb, cci); 2056 } 2057 2058 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) 2059 { 2060 for (int set = 0; set < 256; set++) { 2061 for (int cmd = 0; cmd < 256; cmd++) { 2062 if (cxl_cmds[set][cmd].handler) { 2063 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd]; 2064 } 2065 } 2066 } 2067 } 2068 2069 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], 2070 size_t payload_max) 2071 { 2072 cci->payload_max = MAX(payload_max, cci->payload_max); 2073 cxl_copy_cci_commands(cci, cxl_cmd_set); 2074 cxl_rebuild_cel(cci); 2075 } 2076 2077 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 2078 DeviceState *d, size_t payload_max) 2079 { 2080 cxl_copy_cci_commands(cci, cxl_cmd_set_sw); 2081 cci->d = d; 2082 cci->intf = intf; 2083 cxl_init_cci(cci, payload_max); 2084 } 2085 2086 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max) 2087 { 2088 CXLType3Dev *ct3d = CXL_TYPE3(d); 2089 2090 cxl_copy_cci_commands(cci, cxl_cmd_set); 2091 if (ct3d->dc.num_regions) { 2092 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd); 2093 } 2094 cci->d = d; 2095 2096 /* No separation for PCI MB as protocol handled in PCI device */ 2097 cci->intf = d; 2098 cxl_init_cci(cci, payload_max); 2099 } 2100 2101 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = { 2102 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, 2103 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 2104 0 }, 2105 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2106 }; 2107 2108 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, 2109 size_t payload_max) 2110 { 2111 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld); 2112 cci->d = d; 2113 cci->intf = intf; 2114 cxl_init_cci(cci, payload_max); 2115 } 2116 2117 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { 2118 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, 2119 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 2120 0 }, 2121 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, 2122 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, 2123 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND", 2124 cmd_tunnel_management_cmd, ~0, 0 }, 2125 }; 2126 2127 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 2128 DeviceState *intf, 2129 size_t payload_max) 2130 { 2131 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); 2132 cci->d = d; 2133 cci->intf = intf; 2134 cxl_init_cci(cci, payload_max); 2135 } 2136