1 /* 2 * CXL Type 3 (memory expander) device 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 * 9 * SPDX-License-Identifier: GPL-v2-only 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qemu/units.h" 14 #include "qemu/error-report.h" 15 #include "qapi/qapi-commands-cxl.h" 16 #include "hw/mem/memory-device.h" 17 #include "hw/mem/pc-dimm.h" 18 #include "hw/pci/pci.h" 19 #include "hw/qdev-properties.h" 20 #include "qapi/error.h" 21 #include "qemu/log.h" 22 #include "qemu/module.h" 23 #include "qemu/pmem.h" 24 #include "qemu/range.h" 25 #include "qemu/rcu.h" 26 #include "qemu/guest-random.h" 27 #include "sysemu/hostmem.h" 28 #include "sysemu/numa.h" 29 #include "hw/cxl/cxl.h" 30 #include "hw/pci/msix.h" 31 32 #define DWORD_BYTE 4 33 34 /* Default CDAT entries for a memory region */ 35 enum { 36 CT3_CDAT_DSMAS, 37 CT3_CDAT_DSLBIS0, 38 CT3_CDAT_DSLBIS1, 39 CT3_CDAT_DSLBIS2, 40 CT3_CDAT_DSLBIS3, 41 CT3_CDAT_DSEMTS, 42 CT3_CDAT_NUM_ENTRIES 43 }; 44 45 static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table, 46 int dsmad_handle, MemoryRegion *mr, 47 bool is_pmem, uint64_t dpa_base) 48 { 49 CDATDsmas *dsmas; 50 CDATDslbis *dslbis0; 51 CDATDslbis *dslbis1; 52 CDATDslbis *dslbis2; 53 CDATDslbis *dslbis3; 54 CDATDsemts *dsemts; 55 56 dsmas = g_malloc(sizeof(*dsmas)); 57 *dsmas = (CDATDsmas) { 58 .header = { 59 .type = CDAT_TYPE_DSMAS, 60 .length = sizeof(*dsmas), 61 }, 62 .DSMADhandle = dsmad_handle, 63 .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0, 64 .DPA_base = dpa_base, 65 .DPA_length = memory_region_size(mr), 66 }; 67 68 /* For now, no memory side cache, plausiblish numbers */ 69 dslbis0 = g_malloc(sizeof(*dslbis0)); 70 *dslbis0 = (CDATDslbis) { 71 .header = { 72 .type = CDAT_TYPE_DSLBIS, 73 .length = sizeof(*dslbis0), 74 }, 75 .handle = dsmad_handle, 76 .flags = HMAT_LB_MEM_MEMORY, 77 .data_type = HMAT_LB_DATA_READ_LATENCY, 78 .entry_base_unit = 10000, /* 10ns base */ 79 .entry[0] = 15, /* 150ns */ 80 }; 81 82 dslbis1 = g_malloc(sizeof(*dslbis1)); 83 *dslbis1 = (CDATDslbis) { 84 .header = { 85 .type = CDAT_TYPE_DSLBIS, 86 .length = sizeof(*dslbis1), 87 }, 88 .handle = dsmad_handle, 89 .flags = HMAT_LB_MEM_MEMORY, 90 .data_type = HMAT_LB_DATA_WRITE_LATENCY, 91 .entry_base_unit = 10000, 92 .entry[0] = 25, /* 250ns */ 93 }; 94 95 dslbis2 = g_malloc(sizeof(*dslbis2)); 96 *dslbis2 = (CDATDslbis) { 97 .header = { 98 .type = CDAT_TYPE_DSLBIS, 99 .length = sizeof(*dslbis2), 100 }, 101 .handle = dsmad_handle, 102 .flags = HMAT_LB_MEM_MEMORY, 103 .data_type = HMAT_LB_DATA_READ_BANDWIDTH, 104 .entry_base_unit = 1000, /* GB/s */ 105 .entry[0] = 16, 106 }; 107 108 dslbis3 = g_malloc(sizeof(*dslbis3)); 109 *dslbis3 = (CDATDslbis) { 110 .header = { 111 .type = CDAT_TYPE_DSLBIS, 112 .length = sizeof(*dslbis3), 113 }, 114 .handle = dsmad_handle, 115 .flags = HMAT_LB_MEM_MEMORY, 116 .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH, 117 .entry_base_unit = 1000, /* GB/s */ 118 .entry[0] = 16, 119 }; 120 121 dsemts = g_malloc(sizeof(*dsemts)); 122 *dsemts = (CDATDsemts) { 123 .header = { 124 .type = CDAT_TYPE_DSEMTS, 125 .length = sizeof(*dsemts), 126 }, 127 .DSMAS_handle = dsmad_handle, 128 /* 129 * NV: Reserved - the non volatile from DSMAS matters 130 * V: EFI_MEMORY_SP 131 */ 132 .EFI_memory_type_attr = is_pmem ? 2 : 1, 133 .DPA_offset = 0, 134 .DPA_length = memory_region_size(mr), 135 }; 136 137 /* Header always at start of structure */ 138 cdat_table[CT3_CDAT_DSMAS] = (CDATSubHeader *)dsmas; 139 cdat_table[CT3_CDAT_DSLBIS0] = (CDATSubHeader *)dslbis0; 140 cdat_table[CT3_CDAT_DSLBIS1] = (CDATSubHeader *)dslbis1; 141 cdat_table[CT3_CDAT_DSLBIS2] = (CDATSubHeader *)dslbis2; 142 cdat_table[CT3_CDAT_DSLBIS3] = (CDATSubHeader *)dslbis3; 143 cdat_table[CT3_CDAT_DSEMTS] = (CDATSubHeader *)dsemts; 144 } 145 146 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) 147 { 148 g_autofree CDATSubHeader **table = NULL; 149 CXLType3Dev *ct3d = priv; 150 MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL; 151 int dsmad_handle = 0; 152 int cur_ent = 0; 153 int len = 0; 154 155 if (!ct3d->hostpmem && !ct3d->hostvmem) { 156 return 0; 157 } 158 159 if (ct3d->hostvmem) { 160 volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem); 161 if (!volatile_mr) { 162 return -EINVAL; 163 } 164 len += CT3_CDAT_NUM_ENTRIES; 165 } 166 167 if (ct3d->hostpmem) { 168 nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem); 169 if (!nonvolatile_mr) { 170 return -EINVAL; 171 } 172 len += CT3_CDAT_NUM_ENTRIES; 173 } 174 175 table = g_malloc0(len * sizeof(*table)); 176 177 /* Now fill them in */ 178 if (volatile_mr) { 179 ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr, 180 false, 0); 181 cur_ent = CT3_CDAT_NUM_ENTRIES; 182 } 183 184 if (nonvolatile_mr) { 185 uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0; 186 ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, 187 nonvolatile_mr, true, base); 188 cur_ent += CT3_CDAT_NUM_ENTRIES; 189 } 190 assert(len == cur_ent); 191 192 *cdat_table = g_steal_pointer(&table); 193 194 return len; 195 } 196 197 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv) 198 { 199 int i; 200 201 for (i = 0; i < num; i++) { 202 g_free(cdat_table[i]); 203 } 204 g_free(cdat_table); 205 } 206 207 static bool cxl_doe_cdat_rsp(DOECap *doe_cap) 208 { 209 CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat; 210 uint16_t ent; 211 void *base; 212 uint32_t len; 213 CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap); 214 CDATRsp rsp; 215 216 assert(cdat->entry_len); 217 218 /* Discard if request length mismatched */ 219 if (pcie_doe_get_obj_len(req) < 220 DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) { 221 return false; 222 } 223 224 ent = req->entry_handle; 225 base = cdat->entry[ent].base; 226 len = cdat->entry[ent].length; 227 228 rsp = (CDATRsp) { 229 .header = { 230 .vendor_id = CXL_VENDOR_ID, 231 .data_obj_type = CXL_DOE_TABLE_ACCESS, 232 .reserved = 0x0, 233 .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE), 234 }, 235 .rsp_code = CXL_DOE_TAB_RSP, 236 .table_type = CXL_DOE_TAB_TYPE_CDAT, 237 .entry_handle = (ent < cdat->entry_len - 1) ? 238 ent + 1 : CXL_DOE_TAB_ENT_MAX, 239 }; 240 241 memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp)); 242 memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE), 243 base, len); 244 245 doe_cap->read_mbox_len += rsp.header.length; 246 247 return true; 248 } 249 250 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size) 251 { 252 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 253 uint32_t val; 254 255 if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) { 256 return val; 257 } 258 259 return pci_default_read_config(pci_dev, addr, size); 260 } 261 262 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val, 263 int size) 264 { 265 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 266 267 pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size); 268 pci_default_write_config(pci_dev, addr, val, size); 269 pcie_aer_write_config(pci_dev, addr, val, size); 270 } 271 272 /* 273 * Null value of all Fs suggested by IEEE RA guidelines for use of 274 * EU, OUI and CID 275 */ 276 #define UI64_NULL ~(0ULL) 277 278 static void build_dvsecs(CXLType3Dev *ct3d) 279 { 280 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; 281 uint8_t *dvsec; 282 uint32_t range1_size_hi, range1_size_lo, 283 range1_base_hi = 0, range1_base_lo = 0, 284 range2_size_hi = 0, range2_size_lo = 0, 285 range2_base_hi = 0, range2_base_lo = 0; 286 287 /* 288 * Volatile memory is mapped as (0x0) 289 * Persistent memory is mapped at (volatile->size) 290 */ 291 if (ct3d->hostvmem) { 292 range1_size_hi = ct3d->hostvmem->size >> 32; 293 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | 294 (ct3d->hostvmem->size & 0xF0000000); 295 if (ct3d->hostpmem) { 296 range2_size_hi = ct3d->hostpmem->size >> 32; 297 range2_size_lo = (2 << 5) | (2 << 2) | 0x3 | 298 (ct3d->hostpmem->size & 0xF0000000); 299 } 300 } else { 301 range1_size_hi = ct3d->hostpmem->size >> 32; 302 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 | 303 (ct3d->hostpmem->size & 0xF0000000); 304 } 305 306 dvsec = (uint8_t *)&(CXLDVSECDevice){ 307 .cap = 0x1e, 308 .ctrl = 0x2, 309 .status2 = 0x2, 310 .range1_size_hi = range1_size_hi, 311 .range1_size_lo = range1_size_lo, 312 .range1_base_hi = range1_base_hi, 313 .range1_base_lo = range1_base_lo, 314 .range2_size_hi = range2_size_hi, 315 .range2_size_lo = range2_size_lo, 316 .range2_base_hi = range2_base_hi, 317 .range2_base_lo = range2_base_lo, 318 }; 319 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 320 PCIE_CXL_DEVICE_DVSEC_LENGTH, 321 PCIE_CXL_DEVICE_DVSEC, 322 PCIE_CXL31_DEVICE_DVSEC_REVID, dvsec); 323 324 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ 325 .rsvd = 0, 326 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX, 327 .reg0_base_hi = 0, 328 .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX, 329 .reg1_base_hi = 0, 330 }; 331 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 332 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, 333 REG_LOC_DVSEC_REVID, dvsec); 334 dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){ 335 .phase2_duration = 0x603, /* 3 seconds */ 336 .phase2_power = 0x33, /* 0x33 miliwatts */ 337 }; 338 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 339 GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC, 340 GPF_DEVICE_DVSEC_REVID, dvsec); 341 342 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){ 343 .cap = 0x26, /* 68B, IO, Mem, non-MLD */ 344 .ctrl = 0x02, /* IO always enabled */ 345 .status = 0x26, /* same as capabilities */ 346 .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ 347 }; 348 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE, 349 PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH, 350 PCIE_FLEXBUS_PORT_DVSEC, 351 PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec); 352 } 353 354 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which) 355 { 356 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO; 357 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; 358 uint32_t *cache_mem = cregs->cache_mem_registers; 359 uint32_t ctrl; 360 361 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc); 362 /* TODO: Sanity checks that the decoder is possible */ 363 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); 364 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); 365 366 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl); 367 } 368 369 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which) 370 { 371 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO; 372 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb; 373 uint32_t *cache_mem = cregs->cache_mem_registers; 374 uint32_t ctrl; 375 376 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc); 377 378 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0); 379 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0); 380 381 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl); 382 } 383 384 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err) 385 { 386 switch (qmp_err) { 387 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY: 388 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY; 389 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY: 390 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY; 391 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY: 392 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY; 393 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC: 394 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC; 395 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY: 396 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY; 397 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY: 398 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY; 399 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY: 400 return CXL_RAS_UNC_ERR_MEM_BE_PARITY; 401 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC: 402 return CXL_RAS_UNC_ERR_MEM_DATA_ECC; 403 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD: 404 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD; 405 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING: 406 return CXL_RAS_UNC_ERR_RSVD_ENCODING; 407 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED: 408 return CXL_RAS_UNC_ERR_POISON_RECEIVED; 409 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW: 410 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW; 411 case CXL_UNCOR_ERROR_TYPE_INTERNAL: 412 return CXL_RAS_UNC_ERR_INTERNAL; 413 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX: 414 return CXL_RAS_UNC_ERR_CXL_IDE_TX; 415 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX: 416 return CXL_RAS_UNC_ERR_CXL_IDE_RX; 417 default: 418 return -EINVAL; 419 } 420 } 421 422 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err) 423 { 424 switch (qmp_err) { 425 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC: 426 return CXL_RAS_COR_ERR_CACHE_DATA_ECC; 427 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC: 428 return CXL_RAS_COR_ERR_MEM_DATA_ECC; 429 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD: 430 return CXL_RAS_COR_ERR_CRC_THRESHOLD; 431 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD: 432 return CXL_RAS_COR_ERR_RETRY_THRESHOLD; 433 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED: 434 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED; 435 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED: 436 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED; 437 case CXL_COR_ERROR_TYPE_PHYSICAL: 438 return CXL_RAS_COR_ERR_PHYSICAL; 439 default: 440 return -EINVAL; 441 } 442 } 443 444 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value, 445 unsigned size) 446 { 447 CXLComponentState *cxl_cstate = opaque; 448 ComponentRegisters *cregs = &cxl_cstate->crb; 449 CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate); 450 uint32_t *cache_mem = cregs->cache_mem_registers; 451 bool should_commit = false; 452 bool should_uncommit = false; 453 int which_hdm = -1; 454 455 assert(size == 4); 456 g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE); 457 458 switch (offset) { 459 case A_CXL_HDM_DECODER0_CTRL: 460 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 461 should_uncommit = !should_commit; 462 which_hdm = 0; 463 break; 464 case A_CXL_HDM_DECODER1_CTRL: 465 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 466 should_uncommit = !should_commit; 467 which_hdm = 1; 468 break; 469 case A_CXL_HDM_DECODER2_CTRL: 470 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 471 should_uncommit = !should_commit; 472 which_hdm = 2; 473 break; 474 case A_CXL_HDM_DECODER3_CTRL: 475 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 476 should_uncommit = !should_commit; 477 which_hdm = 3; 478 break; 479 case A_CXL_RAS_UNC_ERR_STATUS: 480 { 481 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL); 482 uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, 483 FIRST_ERROR_POINTER); 484 CXLError *cxl_err; 485 uint32_t unc_err; 486 487 /* 488 * If single bit written that corresponds to the first error 489 * pointer being cleared, update the status and header log. 490 */ 491 if (!QTAILQ_EMPTY(&ct3d->error_list)) { 492 if ((1 << fe) ^ value) { 493 CXLError *cxl_next; 494 /* 495 * Software is using wrong flow for multiple header recording 496 * Following behavior in PCIe r6.0 and assuming multiple 497 * header support. Implementation defined choice to clear all 498 * matching records if more than one bit set - which corresponds 499 * closest to behavior of hardware not capable of multiple 500 * header recording. 501 */ 502 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, 503 cxl_next) { 504 if ((1 << cxl_err->type) & value) { 505 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node); 506 g_free(cxl_err); 507 } 508 } 509 } else { 510 /* Done with previous FE, so drop from list */ 511 cxl_err = QTAILQ_FIRST(&ct3d->error_list); 512 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node); 513 g_free(cxl_err); 514 } 515 516 /* 517 * If there is another FE, then put that in place and update 518 * the header log 519 */ 520 if (!QTAILQ_EMPTY(&ct3d->error_list)) { 521 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0]; 522 int i; 523 524 cxl_err = QTAILQ_FIRST(&ct3d->error_list); 525 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) { 526 stl_le_p(header_log + i, cxl_err->header[i]); 527 } 528 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, 529 FIRST_ERROR_POINTER, cxl_err->type); 530 } else { 531 /* 532 * If no more errors, then follow recommendation of PCI spec 533 * r6.0 6.2.4.2 to set the first error pointer to a status 534 * bit that will never be used. 535 */ 536 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, 537 FIRST_ERROR_POINTER, 538 CXL_RAS_UNC_ERR_CXL_UNUSED); 539 } 540 stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl); 541 } 542 unc_err = 0; 543 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) { 544 unc_err |= 1 << cxl_err->type; 545 } 546 stl_le_p((uint8_t *)cache_mem + offset, unc_err); 547 548 return; 549 } 550 case A_CXL_RAS_COR_ERR_STATUS: 551 { 552 uint32_t rw1c = value; 553 uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset); 554 temp &= ~rw1c; 555 stl_le_p((uint8_t *)cache_mem + offset, temp); 556 return; 557 } 558 default: 559 break; 560 } 561 562 stl_le_p((uint8_t *)cache_mem + offset, value); 563 if (should_commit) { 564 hdm_decoder_commit(ct3d, which_hdm); 565 } else if (should_uncommit) { 566 hdm_decoder_uncommit(ct3d, which_hdm); 567 } 568 } 569 570 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp) 571 { 572 DeviceState *ds = DEVICE(ct3d); 573 574 if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) { 575 error_setg(errp, "at least one memdev property must be set"); 576 return false; 577 } else if (ct3d->hostmem && ct3d->hostpmem) { 578 error_setg(errp, "[memdev] cannot be used with new " 579 "[persistent-memdev] property"); 580 return false; 581 } else if (ct3d->hostmem) { 582 /* Use of hostmem property implies pmem */ 583 ct3d->hostpmem = ct3d->hostmem; 584 ct3d->hostmem = NULL; 585 } 586 587 if (ct3d->hostpmem && !ct3d->lsa) { 588 error_setg(errp, "lsa property must be set for persistent devices"); 589 return false; 590 } 591 592 if (ct3d->hostvmem) { 593 MemoryRegion *vmr; 594 char *v_name; 595 596 vmr = host_memory_backend_get_memory(ct3d->hostvmem); 597 if (!vmr) { 598 error_setg(errp, "volatile memdev must have backing device"); 599 return false; 600 } 601 memory_region_set_nonvolatile(vmr, false); 602 memory_region_set_enabled(vmr, true); 603 host_memory_backend_set_mapped(ct3d->hostvmem, true); 604 if (ds->id) { 605 v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id); 606 } else { 607 v_name = g_strdup("cxl-type3-dpa-vmem-space"); 608 } 609 address_space_init(&ct3d->hostvmem_as, vmr, v_name); 610 ct3d->cxl_dstate.vmem_size = memory_region_size(vmr); 611 ct3d->cxl_dstate.mem_size += memory_region_size(vmr); 612 g_free(v_name); 613 } 614 615 if (ct3d->hostpmem) { 616 MemoryRegion *pmr; 617 char *p_name; 618 619 pmr = host_memory_backend_get_memory(ct3d->hostpmem); 620 if (!pmr) { 621 error_setg(errp, "persistent memdev must have backing device"); 622 return false; 623 } 624 memory_region_set_nonvolatile(pmr, true); 625 memory_region_set_enabled(pmr, true); 626 host_memory_backend_set_mapped(ct3d->hostpmem, true); 627 if (ds->id) { 628 p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id); 629 } else { 630 p_name = g_strdup("cxl-type3-dpa-pmem-space"); 631 } 632 address_space_init(&ct3d->hostpmem_as, pmr, p_name); 633 ct3d->cxl_dstate.pmem_size = memory_region_size(pmr); 634 ct3d->cxl_dstate.mem_size += memory_region_size(pmr); 635 g_free(p_name); 636 } 637 638 return true; 639 } 640 641 static DOEProtocol doe_cdat_prot[] = { 642 { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp }, 643 { } 644 }; 645 646 static void ct3_realize(PCIDevice *pci_dev, Error **errp) 647 { 648 ERRP_GUARD(); 649 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 650 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; 651 ComponentRegisters *regs = &cxl_cstate->crb; 652 MemoryRegion *mr = ®s->component_registers; 653 uint8_t *pci_conf = pci_dev->config; 654 unsigned short msix_num = 6; 655 int i, rc; 656 657 QTAILQ_INIT(&ct3d->error_list); 658 659 if (!cxl_setup_memory(ct3d, errp)) { 660 return; 661 } 662 663 pci_config_set_prog_interface(pci_conf, 0x10); 664 665 pcie_endpoint_cap_init(pci_dev, 0x80); 666 if (ct3d->sn != UI64_NULL) { 667 pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn); 668 cxl_cstate->dvsec_offset = 0x100 + 0x0c; 669 } else { 670 cxl_cstate->dvsec_offset = 0x100; 671 } 672 673 ct3d->cxl_cstate.pdev = pci_dev; 674 build_dvsecs(ct3d); 675 676 regs->special_ops = g_new0(MemoryRegionOps, 1); 677 regs->special_ops->write = ct3d_reg_write; 678 679 cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate, 680 TYPE_CXL_TYPE3); 681 682 pci_register_bar( 683 pci_dev, CXL_COMPONENT_REG_BAR_IDX, 684 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr); 685 686 cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate, 687 &ct3d->cci); 688 pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX, 689 PCI_BASE_ADDRESS_SPACE_MEMORY | 690 PCI_BASE_ADDRESS_MEM_TYPE_64, 691 &ct3d->cxl_dstate.device_registers); 692 693 /* MSI(-X) Initialization */ 694 rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); 695 if (rc) { 696 goto err_address_space_free; 697 } 698 for (i = 0; i < msix_num; i++) { 699 msix_vector_use(pci_dev, i); 700 } 701 702 /* DOE Initialization */ 703 pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0); 704 705 cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table; 706 cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; 707 cxl_cstate->cdat.private = ct3d; 708 if (!cxl_doe_cdat_init(cxl_cstate, errp)) { 709 goto err_free_special_ops; 710 } 711 712 pcie_cap_deverr_init(pci_dev); 713 /* Leave a bit of room for expansion */ 714 rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL); 715 if (rc) { 716 goto err_release_cdat; 717 } 718 cxl_event_init(&ct3d->cxl_dstate, 2); 719 720 return; 721 722 err_release_cdat: 723 cxl_doe_cdat_release(cxl_cstate); 724 err_free_special_ops: 725 g_free(regs->special_ops); 726 err_address_space_free: 727 if (ct3d->hostpmem) { 728 address_space_destroy(&ct3d->hostpmem_as); 729 } 730 if (ct3d->hostvmem) { 731 address_space_destroy(&ct3d->hostvmem_as); 732 } 733 return; 734 } 735 736 static void ct3_exit(PCIDevice *pci_dev) 737 { 738 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev); 739 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; 740 ComponentRegisters *regs = &cxl_cstate->crb; 741 742 pcie_aer_exit(pci_dev); 743 cxl_doe_cdat_release(cxl_cstate); 744 g_free(regs->special_ops); 745 if (ct3d->hostpmem) { 746 address_space_destroy(&ct3d->hostpmem_as); 747 } 748 if (ct3d->hostvmem) { 749 address_space_destroy(&ct3d->hostvmem_as); 750 } 751 } 752 753 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa) 754 { 755 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO; 756 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers; 757 unsigned int hdm_count; 758 uint32_t cap; 759 uint64_t dpa_base = 0; 760 int i; 761 762 cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY); 763 hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap, 764 CXL_HDM_DECODER_CAPABILITY, 765 DECODER_COUNT)); 766 767 for (i = 0; i < hdm_count; i++) { 768 uint64_t decoder_base, decoder_size, hpa_offset, skip; 769 uint32_t hdm_ctrl, low, high; 770 int ig, iw; 771 772 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc); 773 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc); 774 decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000); 775 776 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc); 777 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc); 778 decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000); 779 780 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO + 781 i * hdm_inc); 782 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI + 783 i * hdm_inc); 784 skip = ((uint64_t)high << 32) | (low & 0xf0000000); 785 dpa_base += skip; 786 787 hpa_offset = (uint64_t)host_addr - decoder_base; 788 789 hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc); 790 iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW); 791 ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG); 792 if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) { 793 return false; 794 } 795 if (((uint64_t)host_addr < decoder_base) || 796 (hpa_offset >= decoder_size)) { 797 int decoded_iw = cxl_interleave_ways_dec(iw, &error_fatal); 798 799 if (decoded_iw == 0) { 800 return false; 801 } 802 803 dpa_base += decoder_size / decoded_iw; 804 continue; 805 } 806 807 *dpa = dpa_base + 808 ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) | 809 ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) 810 >> iw)); 811 812 return true; 813 } 814 return false; 815 } 816 817 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d, 818 hwaddr host_addr, 819 unsigned int size, 820 AddressSpace **as, 821 uint64_t *dpa_offset) 822 { 823 MemoryRegion *vmr = NULL, *pmr = NULL; 824 825 if (ct3d->hostvmem) { 826 vmr = host_memory_backend_get_memory(ct3d->hostvmem); 827 } 828 if (ct3d->hostpmem) { 829 pmr = host_memory_backend_get_memory(ct3d->hostpmem); 830 } 831 832 if (!vmr && !pmr) { 833 return -ENODEV; 834 } 835 836 if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) { 837 return -EINVAL; 838 } 839 840 if (*dpa_offset > ct3d->cxl_dstate.mem_size) { 841 return -EINVAL; 842 } 843 844 if (vmr) { 845 if (*dpa_offset < memory_region_size(vmr)) { 846 *as = &ct3d->hostvmem_as; 847 } else { 848 *as = &ct3d->hostpmem_as; 849 *dpa_offset -= memory_region_size(vmr); 850 } 851 } else { 852 *as = &ct3d->hostpmem_as; 853 } 854 855 return 0; 856 } 857 858 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, 859 unsigned size, MemTxAttrs attrs) 860 { 861 CXLType3Dev *ct3d = CXL_TYPE3(d); 862 uint64_t dpa_offset = 0; 863 AddressSpace *as = NULL; 864 int res; 865 866 res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size, 867 &as, &dpa_offset); 868 if (res) { 869 return MEMTX_ERROR; 870 } 871 872 if (sanitize_running(&ct3d->cci)) { 873 qemu_guest_getrandom_nofail(data, size); 874 return MEMTX_OK; 875 } 876 877 return address_space_read(as, dpa_offset, attrs, data, size); 878 } 879 880 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, 881 unsigned size, MemTxAttrs attrs) 882 { 883 CXLType3Dev *ct3d = CXL_TYPE3(d); 884 uint64_t dpa_offset = 0; 885 AddressSpace *as = NULL; 886 int res; 887 888 res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size, 889 &as, &dpa_offset); 890 if (res) { 891 return MEMTX_ERROR; 892 } 893 894 if (sanitize_running(&ct3d->cci)) { 895 return MEMTX_OK; 896 } 897 898 return address_space_write(as, dpa_offset, attrs, &data, size); 899 } 900 901 static void ct3d_reset(DeviceState *dev) 902 { 903 CXLType3Dev *ct3d = CXL_TYPE3(dev); 904 uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; 905 uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask; 906 907 cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE); 908 cxl_device_register_init_t3(ct3d); 909 910 /* 911 * Bring up an endpoint to target with MCTP over VDM. 912 * This device is emulating an MLD with single LD for now. 913 */ 914 cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci, 915 DEVICE(ct3d), DEVICE(ct3d), 916 512); /* Max payload made up */ 917 cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d), 918 512); /* Max payload made up */ 919 920 } 921 922 static Property ct3_props[] = { 923 DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND, 924 HostMemoryBackend *), /* for backward compatibility */ 925 DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem, 926 TYPE_MEMORY_BACKEND, HostMemoryBackend *), 927 DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem, 928 TYPE_MEMORY_BACKEND, HostMemoryBackend *), 929 DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND, 930 HostMemoryBackend *), 931 DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL), 932 DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename), 933 DEFINE_PROP_END_OF_LIST(), 934 }; 935 936 static uint64_t get_lsa_size(CXLType3Dev *ct3d) 937 { 938 MemoryRegion *mr; 939 940 if (!ct3d->lsa) { 941 return 0; 942 } 943 944 mr = host_memory_backend_get_memory(ct3d->lsa); 945 return memory_region_size(mr); 946 } 947 948 static void validate_lsa_access(MemoryRegion *mr, uint64_t size, 949 uint64_t offset) 950 { 951 assert(offset + size <= memory_region_size(mr)); 952 assert(offset + size > offset); 953 } 954 955 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size, 956 uint64_t offset) 957 { 958 MemoryRegion *mr; 959 void *lsa; 960 961 if (!ct3d->lsa) { 962 return 0; 963 } 964 965 mr = host_memory_backend_get_memory(ct3d->lsa); 966 validate_lsa_access(mr, size, offset); 967 968 lsa = memory_region_get_ram_ptr(mr) + offset; 969 memcpy(buf, lsa, size); 970 971 return size; 972 } 973 974 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, 975 uint64_t offset) 976 { 977 MemoryRegion *mr; 978 void *lsa; 979 980 if (!ct3d->lsa) { 981 return; 982 } 983 984 mr = host_memory_backend_get_memory(ct3d->lsa); 985 validate_lsa_access(mr, size, offset); 986 987 lsa = memory_region_get_ram_ptr(mr) + offset; 988 memcpy(lsa, buf, size); 989 memory_region_set_dirty(mr, offset, size); 990 991 /* 992 * Just like the PMEM, if the guest is not allowed to exit gracefully, label 993 * updates will get lost. 994 */ 995 } 996 997 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data) 998 { 999 MemoryRegion *vmr = NULL, *pmr = NULL; 1000 AddressSpace *as; 1001 1002 if (ct3d->hostvmem) { 1003 vmr = host_memory_backend_get_memory(ct3d->hostvmem); 1004 } 1005 if (ct3d->hostpmem) { 1006 pmr = host_memory_backend_get_memory(ct3d->hostpmem); 1007 } 1008 1009 if (!vmr && !pmr) { 1010 return false; 1011 } 1012 1013 if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) { 1014 return false; 1015 } 1016 1017 if (vmr) { 1018 if (dpa_offset < memory_region_size(vmr)) { 1019 as = &ct3d->hostvmem_as; 1020 } else { 1021 as = &ct3d->hostpmem_as; 1022 dpa_offset -= memory_region_size(vmr); 1023 } 1024 } else { 1025 as = &ct3d->hostpmem_as; 1026 } 1027 1028 address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data, 1029 CXL_CACHE_LINE_SIZE); 1030 return true; 1031 } 1032 1033 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d) 1034 { 1035 ct3d->poison_list_overflowed = true; 1036 ct3d->poison_list_overflow_ts = 1037 cxl_device_get_timestamp(&ct3d->cxl_dstate); 1038 } 1039 1040 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, 1041 Error **errp) 1042 { 1043 Object *obj = object_resolve_path(path, NULL); 1044 CXLType3Dev *ct3d; 1045 CXLPoison *p; 1046 1047 if (length % 64) { 1048 error_setg(errp, "Poison injection must be in multiples of 64 bytes"); 1049 return; 1050 } 1051 if (start % 64) { 1052 error_setg(errp, "Poison start address must be 64 byte aligned"); 1053 return; 1054 } 1055 if (!obj) { 1056 error_setg(errp, "Unable to resolve path"); 1057 return; 1058 } 1059 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1060 error_setg(errp, "Path does not point to a CXL type 3 device"); 1061 return; 1062 } 1063 1064 ct3d = CXL_TYPE3(obj); 1065 1066 QLIST_FOREACH(p, &ct3d->poison_list, node) { 1067 if (((start >= p->start) && (start < p->start + p->length)) || 1068 ((start + length > p->start) && 1069 (start + length <= p->start + p->length))) { 1070 error_setg(errp, 1071 "Overlap with existing poisoned region not supported"); 1072 return; 1073 } 1074 } 1075 1076 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { 1077 cxl_set_poison_list_overflowed(ct3d); 1078 return; 1079 } 1080 1081 p = g_new0(CXLPoison, 1); 1082 p->length = length; 1083 p->start = start; 1084 /* Different from injected via the mbox */ 1085 p->type = CXL_POISON_TYPE_INTERNAL; 1086 1087 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); 1088 ct3d->poison_list_cnt++; 1089 } 1090 1091 /* For uncorrectable errors include support for multiple header recording */ 1092 void qmp_cxl_inject_uncorrectable_errors(const char *path, 1093 CXLUncorErrorRecordList *errors, 1094 Error **errp) 1095 { 1096 Object *obj = object_resolve_path(path, NULL); 1097 static PCIEAERErr err = {}; 1098 CXLType3Dev *ct3d; 1099 CXLError *cxl_err; 1100 uint32_t *reg_state; 1101 uint32_t unc_err; 1102 bool first; 1103 1104 if (!obj) { 1105 error_setg(errp, "Unable to resolve path"); 1106 return; 1107 } 1108 1109 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1110 error_setg(errp, "Path does not point to a CXL type 3 device"); 1111 return; 1112 } 1113 1114 err.status = PCI_ERR_UNC_INTN; 1115 err.source_id = pci_requester_id(PCI_DEVICE(obj)); 1116 err.flags = 0; 1117 1118 ct3d = CXL_TYPE3(obj); 1119 1120 first = QTAILQ_EMPTY(&ct3d->error_list); 1121 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; 1122 while (errors) { 1123 uint32List *header = errors->value->header; 1124 uint8_t header_count = 0; 1125 int cxl_err_code; 1126 1127 cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type); 1128 if (cxl_err_code < 0) { 1129 error_setg(errp, "Unknown error code"); 1130 return; 1131 } 1132 1133 /* If the error is masked, nothing to do here */ 1134 if (!((1 << cxl_err_code) & 1135 ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) { 1136 errors = errors->next; 1137 continue; 1138 } 1139 1140 cxl_err = g_malloc0(sizeof(*cxl_err)); 1141 1142 cxl_err->type = cxl_err_code; 1143 while (header && header_count < 32) { 1144 cxl_err->header[header_count++] = header->value; 1145 header = header->next; 1146 } 1147 if (header_count > 32) { 1148 error_setg(errp, "Header must be 32 DWORD or less"); 1149 return; 1150 } 1151 QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node); 1152 1153 errors = errors->next; 1154 } 1155 1156 if (first && !QTAILQ_EMPTY(&ct3d->error_list)) { 1157 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers; 1158 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL); 1159 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0]; 1160 int i; 1161 1162 cxl_err = QTAILQ_FIRST(&ct3d->error_list); 1163 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) { 1164 stl_le_p(header_log + i, cxl_err->header[i]); 1165 } 1166 1167 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL, 1168 FIRST_ERROR_POINTER, cxl_err->type); 1169 stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl); 1170 } 1171 1172 unc_err = 0; 1173 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) { 1174 unc_err |= (1 << cxl_err->type); 1175 } 1176 if (!unc_err) { 1177 return; 1178 } 1179 1180 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err); 1181 pcie_aer_inject_error(PCI_DEVICE(obj), &err); 1182 1183 return; 1184 } 1185 1186 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type, 1187 Error **errp) 1188 { 1189 static PCIEAERErr err = {}; 1190 Object *obj = object_resolve_path(path, NULL); 1191 CXLType3Dev *ct3d; 1192 uint32_t *reg_state; 1193 uint32_t cor_err; 1194 int cxl_err_type; 1195 1196 if (!obj) { 1197 error_setg(errp, "Unable to resolve path"); 1198 return; 1199 } 1200 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1201 error_setg(errp, "Path does not point to a CXL type 3 device"); 1202 return; 1203 } 1204 1205 err.status = PCI_ERR_COR_INTERNAL; 1206 err.source_id = pci_requester_id(PCI_DEVICE(obj)); 1207 err.flags = PCIE_AER_ERR_IS_CORRECTABLE; 1208 1209 ct3d = CXL_TYPE3(obj); 1210 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; 1211 cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS); 1212 1213 cxl_err_type = ct3d_qmp_cor_err_to_cxl(type); 1214 if (cxl_err_type < 0) { 1215 error_setg(errp, "Invalid COR error"); 1216 return; 1217 } 1218 /* If the error is masked, nothting to do here */ 1219 if (!((1 << cxl_err_type) & 1220 ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) { 1221 return; 1222 } 1223 1224 cor_err |= (1 << cxl_err_type); 1225 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err); 1226 1227 pcie_aer_inject_error(PCI_DEVICE(obj), &err); 1228 } 1229 1230 static void cxl_assign_event_header(CXLEventRecordHdr *hdr, 1231 const QemuUUID *uuid, uint32_t flags, 1232 uint8_t length, uint64_t timestamp) 1233 { 1234 st24_le_p(&hdr->flags, flags); 1235 hdr->length = length; 1236 memcpy(&hdr->id, uuid, sizeof(hdr->id)); 1237 stq_le_p(&hdr->timestamp, timestamp); 1238 } 1239 1240 static const QemuUUID gen_media_uuid = { 1241 .data = UUID(0xfbcd0a77, 0xc260, 0x417f, 1242 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6), 1243 }; 1244 1245 static const QemuUUID dram_uuid = { 1246 .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf, 1247 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24), 1248 }; 1249 1250 static const QemuUUID memory_module_uuid = { 1251 .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86, 1252 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74), 1253 }; 1254 1255 #define CXL_GMER_VALID_CHANNEL BIT(0) 1256 #define CXL_GMER_VALID_RANK BIT(1) 1257 #define CXL_GMER_VALID_DEVICE BIT(2) 1258 #define CXL_GMER_VALID_COMPONENT BIT(3) 1259 1260 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log) 1261 { 1262 switch (log) { 1263 case CXL_EVENT_LOG_INFORMATIONAL: 1264 return CXL_EVENT_TYPE_INFO; 1265 case CXL_EVENT_LOG_WARNING: 1266 return CXL_EVENT_TYPE_WARN; 1267 case CXL_EVENT_LOG_FAILURE: 1268 return CXL_EVENT_TYPE_FAIL; 1269 case CXL_EVENT_LOG_FATAL: 1270 return CXL_EVENT_TYPE_FATAL; 1271 /* DCD not yet supported */ 1272 default: 1273 return -EINVAL; 1274 } 1275 } 1276 /* Component ID is device specific. Define this as a string. */ 1277 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log, 1278 uint8_t flags, uint64_t dpa, 1279 uint8_t descriptor, uint8_t type, 1280 uint8_t transaction_type, 1281 bool has_channel, uint8_t channel, 1282 bool has_rank, uint8_t rank, 1283 bool has_device, uint32_t device, 1284 const char *component_id, 1285 Error **errp) 1286 { 1287 Object *obj = object_resolve_path(path, NULL); 1288 CXLEventGenMedia gem; 1289 CXLEventRecordHdr *hdr = &gem.hdr; 1290 CXLDeviceState *cxlds; 1291 CXLType3Dev *ct3d; 1292 uint16_t valid_flags = 0; 1293 uint8_t enc_log; 1294 int rc; 1295 1296 if (!obj) { 1297 error_setg(errp, "Unable to resolve path"); 1298 return; 1299 } 1300 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1301 error_setg(errp, "Path does not point to a CXL type 3 device"); 1302 return; 1303 } 1304 ct3d = CXL_TYPE3(obj); 1305 cxlds = &ct3d->cxl_dstate; 1306 1307 rc = ct3d_qmp_cxl_event_log_enc(log); 1308 if (rc < 0) { 1309 error_setg(errp, "Unhandled error log type"); 1310 return; 1311 } 1312 enc_log = rc; 1313 1314 memset(&gem, 0, sizeof(gem)); 1315 cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem), 1316 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 1317 1318 stq_le_p(&gem.phys_addr, dpa); 1319 gem.descriptor = descriptor; 1320 gem.type = type; 1321 gem.transaction_type = transaction_type; 1322 1323 if (has_channel) { 1324 gem.channel = channel; 1325 valid_flags |= CXL_GMER_VALID_CHANNEL; 1326 } 1327 1328 if (has_rank) { 1329 gem.rank = rank; 1330 valid_flags |= CXL_GMER_VALID_RANK; 1331 } 1332 1333 if (has_device) { 1334 st24_le_p(gem.device, device); 1335 valid_flags |= CXL_GMER_VALID_DEVICE; 1336 } 1337 1338 if (component_id) { 1339 strncpy((char *)gem.component_id, component_id, 1340 sizeof(gem.component_id) - 1); 1341 valid_flags |= CXL_GMER_VALID_COMPONENT; 1342 } 1343 1344 stw_le_p(&gem.validity_flags, valid_flags); 1345 1346 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) { 1347 cxl_event_irq_assert(ct3d); 1348 } 1349 } 1350 1351 #define CXL_DRAM_VALID_CHANNEL BIT(0) 1352 #define CXL_DRAM_VALID_RANK BIT(1) 1353 #define CXL_DRAM_VALID_NIBBLE_MASK BIT(2) 1354 #define CXL_DRAM_VALID_BANK_GROUP BIT(3) 1355 #define CXL_DRAM_VALID_BANK BIT(4) 1356 #define CXL_DRAM_VALID_ROW BIT(5) 1357 #define CXL_DRAM_VALID_COLUMN BIT(6) 1358 #define CXL_DRAM_VALID_CORRECTION_MASK BIT(7) 1359 1360 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags, 1361 uint64_t dpa, uint8_t descriptor, 1362 uint8_t type, uint8_t transaction_type, 1363 bool has_channel, uint8_t channel, 1364 bool has_rank, uint8_t rank, 1365 bool has_nibble_mask, uint32_t nibble_mask, 1366 bool has_bank_group, uint8_t bank_group, 1367 bool has_bank, uint8_t bank, 1368 bool has_row, uint32_t row, 1369 bool has_column, uint16_t column, 1370 bool has_correction_mask, 1371 uint64List *correction_mask, 1372 Error **errp) 1373 { 1374 Object *obj = object_resolve_path(path, NULL); 1375 CXLEventDram dram; 1376 CXLEventRecordHdr *hdr = &dram.hdr; 1377 CXLDeviceState *cxlds; 1378 CXLType3Dev *ct3d; 1379 uint16_t valid_flags = 0; 1380 uint8_t enc_log; 1381 int rc; 1382 1383 if (!obj) { 1384 error_setg(errp, "Unable to resolve path"); 1385 return; 1386 } 1387 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1388 error_setg(errp, "Path does not point to a CXL type 3 device"); 1389 return; 1390 } 1391 ct3d = CXL_TYPE3(obj); 1392 cxlds = &ct3d->cxl_dstate; 1393 1394 rc = ct3d_qmp_cxl_event_log_enc(log); 1395 if (rc < 0) { 1396 error_setg(errp, "Unhandled error log type"); 1397 return; 1398 } 1399 enc_log = rc; 1400 1401 memset(&dram, 0, sizeof(dram)); 1402 cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram), 1403 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 1404 stq_le_p(&dram.phys_addr, dpa); 1405 dram.descriptor = descriptor; 1406 dram.type = type; 1407 dram.transaction_type = transaction_type; 1408 1409 if (has_channel) { 1410 dram.channel = channel; 1411 valid_flags |= CXL_DRAM_VALID_CHANNEL; 1412 } 1413 1414 if (has_rank) { 1415 dram.rank = rank; 1416 valid_flags |= CXL_DRAM_VALID_RANK; 1417 } 1418 1419 if (has_nibble_mask) { 1420 st24_le_p(dram.nibble_mask, nibble_mask); 1421 valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK; 1422 } 1423 1424 if (has_bank_group) { 1425 dram.bank_group = bank_group; 1426 valid_flags |= CXL_DRAM_VALID_BANK_GROUP; 1427 } 1428 1429 if (has_bank) { 1430 dram.bank = bank; 1431 valid_flags |= CXL_DRAM_VALID_BANK; 1432 } 1433 1434 if (has_row) { 1435 st24_le_p(dram.row, row); 1436 valid_flags |= CXL_DRAM_VALID_ROW; 1437 } 1438 1439 if (has_column) { 1440 stw_le_p(&dram.column, column); 1441 valid_flags |= CXL_DRAM_VALID_COLUMN; 1442 } 1443 1444 if (has_correction_mask) { 1445 int count = 0; 1446 while (correction_mask && count < 4) { 1447 stq_le_p(&dram.correction_mask[count], 1448 correction_mask->value); 1449 count++; 1450 correction_mask = correction_mask->next; 1451 } 1452 valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK; 1453 } 1454 1455 stw_le_p(&dram.validity_flags, valid_flags); 1456 1457 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) { 1458 cxl_event_irq_assert(ct3d); 1459 } 1460 return; 1461 } 1462 1463 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, 1464 uint8_t flags, uint8_t type, 1465 uint8_t health_status, 1466 uint8_t media_status, 1467 uint8_t additional_status, 1468 uint8_t life_used, 1469 int16_t temperature, 1470 uint32_t dirty_shutdown_count, 1471 uint32_t corrected_volatile_error_count, 1472 uint32_t corrected_persist_error_count, 1473 Error **errp) 1474 { 1475 Object *obj = object_resolve_path(path, NULL); 1476 CXLEventMemoryModule module; 1477 CXLEventRecordHdr *hdr = &module.hdr; 1478 CXLDeviceState *cxlds; 1479 CXLType3Dev *ct3d; 1480 uint8_t enc_log; 1481 int rc; 1482 1483 if (!obj) { 1484 error_setg(errp, "Unable to resolve path"); 1485 return; 1486 } 1487 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { 1488 error_setg(errp, "Path does not point to a CXL type 3 device"); 1489 return; 1490 } 1491 ct3d = CXL_TYPE3(obj); 1492 cxlds = &ct3d->cxl_dstate; 1493 1494 rc = ct3d_qmp_cxl_event_log_enc(log); 1495 if (rc < 0) { 1496 error_setg(errp, "Unhandled error log type"); 1497 return; 1498 } 1499 enc_log = rc; 1500 1501 memset(&module, 0, sizeof(module)); 1502 cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module), 1503 cxl_device_get_timestamp(&ct3d->cxl_dstate)); 1504 1505 module.type = type; 1506 module.health_status = health_status; 1507 module.media_status = media_status; 1508 module.additional_status = additional_status; 1509 module.life_used = life_used; 1510 stw_le_p(&module.temperature, temperature); 1511 stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count); 1512 stl_le_p(&module.corrected_volatile_error_count, 1513 corrected_volatile_error_count); 1514 stl_le_p(&module.corrected_persistent_error_count, 1515 corrected_persist_error_count); 1516 1517 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) { 1518 cxl_event_irq_assert(ct3d); 1519 } 1520 } 1521 1522 static void ct3_class_init(ObjectClass *oc, void *data) 1523 { 1524 DeviceClass *dc = DEVICE_CLASS(oc); 1525 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); 1526 CXLType3Class *cvc = CXL_TYPE3_CLASS(oc); 1527 1528 pc->realize = ct3_realize; 1529 pc->exit = ct3_exit; 1530 pc->class_id = PCI_CLASS_MEMORY_CXL; 1531 pc->vendor_id = PCI_VENDOR_ID_INTEL; 1532 pc->device_id = 0xd93; /* LVF for now */ 1533 pc->revision = 1; 1534 1535 pc->config_write = ct3d_config_write; 1536 pc->config_read = ct3d_config_read; 1537 1538 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1539 dc->desc = "CXL Memory Device (Type 3)"; 1540 dc->reset = ct3d_reset; 1541 device_class_set_props(dc, ct3_props); 1542 1543 cvc->get_lsa_size = get_lsa_size; 1544 cvc->get_lsa = get_lsa; 1545 cvc->set_lsa = set_lsa; 1546 cvc->set_cacheline = set_cacheline; 1547 } 1548 1549 static const TypeInfo ct3d_info = { 1550 .name = TYPE_CXL_TYPE3, 1551 .parent = TYPE_PCI_DEVICE, 1552 .class_size = sizeof(struct CXLType3Class), 1553 .class_init = ct3_class_init, 1554 .instance_size = sizeof(CXLType3Dev), 1555 .interfaces = (InterfaceInfo[]) { 1556 { INTERFACE_CXL_DEVICE }, 1557 { INTERFACE_PCIE_DEVICE }, 1558 {} 1559 }, 1560 }; 1561 1562 static void ct3d_registers(void) 1563 { 1564 type_register_static(&ct3d_info); 1565 } 1566 1567 type_init(ct3d_registers); 1568