1 /* 2 * QEMU CXL Devices 3 * 4 * Copyright (c) 2020 Intel 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #ifndef CXL_DEVICE_H 11 #define CXL_DEVICE_H 12 13 #include "hw/cxl/cxl_component.h" 14 #include "hw/pci/pci_device.h" 15 #include "hw/register.h" 16 #include "hw/cxl/cxl_events.h" 17 18 /* 19 * The following is how a CXL device's Memory Device registers are laid out. 20 * The only requirement from the spec is that the capabilities array and the 21 * capability headers start at offset 0 and are contiguously packed. The headers 22 * themselves provide offsets to the register fields. For this emulation, the 23 * actual registers * will start at offset 0x80 (m == 0x80). No secondary 24 * mailbox is implemented which means that the offset of the start of the 25 * mailbox payload (n) is given by 26 * n = m + sizeof(mailbox registers) + sizeof(device registers). 27 * 28 * +---------------------------------+ 29 * | | 30 * | Memory Device Registers | 31 * | | 32 * n + PAYLOAD_SIZE_MAX ----------------------------------- 33 * ^ | | 34 * | | | 35 * | | | 36 * | | | 37 * | | | 38 * | | Mailbox Payload | 39 * | | | 40 * | | | 41 * | | | 42 * n ----------------------------------- 43 * ^ | Mailbox Registers | 44 * | | | 45 * | ----------------------------------- 46 * | | | 47 * | | Device Registers | 48 * | | | 49 * m ----------------------------------> 50 * ^ | Memory Device Capability Header| 51 * | ----------------------------------- 52 * | | Mailbox Capability Header | 53 * | ----------------------------------- 54 * | | Device Capability Header | 55 * | ----------------------------------- 56 * | | Device Cap Array Register | 57 * 0 +---------------------------------+ 58 * 59 */ 60 61 #define CXL_DEVICE_CAP_HDR1_OFFSET 0x10 /* Figure 138 */ 62 #define CXL_DEVICE_CAP_REG_SIZE 0x10 /* 8.2.8.2 */ 63 #define CXL_DEVICE_CAPS_MAX 4 /* 8.2.8.2.1 + 8.2.8.5 */ 64 #define CXL_CAPS_SIZE \ 65 (CXL_DEVICE_CAP_REG_SIZE * (CXL_DEVICE_CAPS_MAX + 1)) /* +1 for header */ 66 67 #define CXL_DEVICE_STATUS_REGISTERS_OFFSET 0x80 /* Read comment above */ 68 #define CXL_DEVICE_STATUS_REGISTERS_LENGTH 0x8 /* 8.2.8.3.1 */ 69 70 #define CXL_MAILBOX_REGISTERS_OFFSET \ 71 (CXL_DEVICE_STATUS_REGISTERS_OFFSET + CXL_DEVICE_STATUS_REGISTERS_LENGTH) 72 #define CXL_MAILBOX_REGISTERS_SIZE 0x20 /* 8.2.8.4, Figure 139 */ 73 #define CXL_MAILBOX_PAYLOAD_SHIFT 11 74 #define CXL_MAILBOX_MAX_PAYLOAD_SIZE (1 << CXL_MAILBOX_PAYLOAD_SHIFT) 75 #define CXL_MAILBOX_REGISTERS_LENGTH \ 76 (CXL_MAILBOX_REGISTERS_SIZE + CXL_MAILBOX_MAX_PAYLOAD_SIZE) 77 78 #define CXL_MEMORY_DEVICE_REGISTERS_OFFSET \ 79 (CXL_MAILBOX_REGISTERS_OFFSET + CXL_MAILBOX_REGISTERS_LENGTH) 80 #define CXL_MEMORY_DEVICE_REGISTERS_LENGTH 0x8 81 82 #define CXL_MMIO_SIZE \ 83 (CXL_DEVICE_CAP_REG_SIZE + CXL_DEVICE_STATUS_REGISTERS_LENGTH + \ 84 CXL_MAILBOX_REGISTERS_LENGTH + CXL_MEMORY_DEVICE_REGISTERS_LENGTH) 85 86 /* 8.2.8.4.5.1 Command Return Codes */ 87 typedef enum { 88 CXL_MBOX_SUCCESS = 0x0, 89 CXL_MBOX_BG_STARTED = 0x1, 90 CXL_MBOX_INVALID_INPUT = 0x2, 91 CXL_MBOX_UNSUPPORTED = 0x3, 92 CXL_MBOX_INTERNAL_ERROR = 0x4, 93 CXL_MBOX_RETRY_REQUIRED = 0x5, 94 CXL_MBOX_BUSY = 0x6, 95 CXL_MBOX_MEDIA_DISABLED = 0x7, 96 CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8, 97 CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9, 98 CXL_MBOX_FW_AUTH_FAILED = 0xa, 99 CXL_MBOX_FW_INVALID_SLOT = 0xb, 100 CXL_MBOX_FW_ROLLEDBACK = 0xc, 101 CXL_MBOX_FW_REST_REQD = 0xd, 102 CXL_MBOX_INVALID_HANDLE = 0xe, 103 CXL_MBOX_INVALID_PA = 0xf, 104 CXL_MBOX_INJECT_POISON_LIMIT = 0x10, 105 CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11, 106 CXL_MBOX_ABORTED = 0x12, 107 CXL_MBOX_INVALID_SECURITY_STATE = 0x13, 108 CXL_MBOX_INCORRECT_PASSPHRASE = 0x14, 109 CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15, 110 CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16, 111 CXL_MBOX_MAX = 0x17 112 } CXLRetCode; 113 114 typedef struct CXLCCI CXLCCI; 115 typedef struct cxl_device_state CXLDeviceState; 116 struct cxl_cmd; 117 typedef CXLRetCode (*opcode_handler)(const struct cxl_cmd *cmd, 118 uint8_t *payload_in, size_t len_in, 119 uint8_t *payload_out, size_t *len_out, 120 CXLCCI *cci); 121 struct cxl_cmd { 122 const char *name; 123 opcode_handler handler; 124 ssize_t in; 125 uint16_t effect; /* Reported in CEL */ 126 }; 127 128 typedef struct CXLEvent { 129 CXLEventRecordRaw data; 130 QSIMPLEQ_ENTRY(CXLEvent) node; 131 } CXLEvent; 132 133 typedef struct CXLEventLog { 134 uint16_t next_handle; 135 uint16_t overflow_err_count; 136 uint64_t first_overflow_timestamp; 137 uint64_t last_overflow_timestamp; 138 bool irq_enabled; 139 int irq_vec; 140 QemuMutex lock; 141 QSIMPLEQ_HEAD(, CXLEvent) events; 142 } CXLEventLog; 143 144 typedef struct CXLCCI { 145 const struct cxl_cmd (*cxl_cmd_set)[256]; 146 struct cel_log { 147 uint16_t opcode; 148 uint16_t effect; 149 } cel_log[1 << 16]; 150 size_t cel_size; 151 152 /* background command handling (times in ms) */ 153 struct { 154 uint16_t opcode; 155 uint16_t complete_pct; 156 uint16_t ret_code; /* Current value of retcode */ 157 uint64_t starttime; 158 /* set by each bg cmd, cleared by the bg_timer when complete */ 159 uint64_t runtime; 160 QEMUTimer *timer; 161 } bg; 162 size_t payload_max; 163 /* Pointer to device hosting the CCI */ 164 DeviceState *d; 165 /* Pointer to the device hosting the protocol conversion */ 166 DeviceState *intf; 167 } CXLCCI; 168 169 typedef struct cxl_device_state { 170 MemoryRegion device_registers; 171 172 /* mmio for device capabilities array - 8.2.8.2 */ 173 struct { 174 MemoryRegion device; 175 union { 176 uint8_t dev_reg_state[CXL_DEVICE_STATUS_REGISTERS_LENGTH]; 177 uint16_t dev_reg_state16[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 2]; 178 uint32_t dev_reg_state32[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 4]; 179 uint64_t dev_reg_state64[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 8]; 180 }; 181 uint64_t event_status; 182 }; 183 MemoryRegion memory_device; 184 struct { 185 MemoryRegion caps; 186 union { 187 uint32_t caps_reg_state32[CXL_CAPS_SIZE / 4]; 188 uint64_t caps_reg_state64[CXL_CAPS_SIZE / 8]; 189 }; 190 }; 191 192 /* mmio for the mailbox registers 8.2.8.4 */ 193 struct { 194 MemoryRegion mailbox; 195 uint16_t payload_size; 196 uint8_t mbox_msi_n; 197 union { 198 uint8_t mbox_reg_state[CXL_MAILBOX_REGISTERS_LENGTH]; 199 uint16_t mbox_reg_state16[CXL_MAILBOX_REGISTERS_LENGTH / 2]; 200 uint32_t mbox_reg_state32[CXL_MAILBOX_REGISTERS_LENGTH / 4]; 201 uint64_t mbox_reg_state64[CXL_MAILBOX_REGISTERS_LENGTH / 8]; 202 }; 203 }; 204 205 struct { 206 bool set; 207 uint64_t last_set; 208 uint64_t host_set; 209 } timestamp; 210 211 /* memory region size, HDM */ 212 uint64_t mem_size; 213 uint64_t pmem_size; 214 uint64_t vmem_size; 215 216 const struct cxl_cmd (*cxl_cmd_set)[256]; 217 CXLEventLog event_logs[CXL_EVENT_TYPE_MAX]; 218 } CXLDeviceState; 219 220 /* Initialize the register block for a device */ 221 void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev, 222 CXLCCI *cci); 223 224 typedef struct CXLType3Dev CXLType3Dev; 225 typedef struct CSWMBCCIDev CSWMBCCIDev; 226 /* Set up default values for the register block */ 227 void cxl_device_register_init_t3(CXLType3Dev *ct3d); 228 void cxl_device_register_init_swcci(CSWMBCCIDev *sw); 229 230 /* 231 * CXL 2.0 - 8.2.8.1 including errata F4 232 * Documented as a 128 bit register, but 64 bit accesses and the second 233 * 64 bits are currently reserved. 234 */ 235 REG64(CXL_DEV_CAP_ARRAY, 0) 236 FIELD(CXL_DEV_CAP_ARRAY, CAP_ID, 0, 16) 237 FIELD(CXL_DEV_CAP_ARRAY, CAP_VERSION, 16, 8) 238 FIELD(CXL_DEV_CAP_ARRAY, CAP_COUNT, 32, 16) 239 240 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type, 241 bool available); 242 243 /* 244 * Helper macro to initialize capability headers for CXL devices. 245 * 246 * In the 8.2.8.2, this is listed as a 128b register, but in 8.2.8, it says: 247 * > No registers defined in Section 8.2.8 are larger than 64-bits wide so that 248 * > is the maximum access size allowed for these registers. If this rule is not 249 * > followed, the behavior is undefined 250 * 251 * CXL 2.0 Errata F4 states further that the layouts in the specification are 252 * shown as greater than 128 bits, but implementations are expected to 253 * use any size of access up to 64 bits. 254 * 255 * Here we've chosen to make it 4 dwords. The spec allows any pow2 multiple 256 * access to be used for a register up to 64 bits. 257 */ 258 #define CXL_DEVICE_CAPABILITY_HEADER_REGISTER(n, offset) \ 259 REG32(CXL_DEV_##n##_CAP_HDR0, offset) \ 260 FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_ID, 0, 16) \ 261 FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_VERSION, 16, 8) \ 262 REG32(CXL_DEV_##n##_CAP_HDR1, offset + 4) \ 263 FIELD(CXL_DEV_##n##_CAP_HDR1, CAP_OFFSET, 0, 32) \ 264 REG32(CXL_DEV_##n##_CAP_HDR2, offset + 8) \ 265 FIELD(CXL_DEV_##n##_CAP_HDR2, CAP_LENGTH, 0, 32) 266 267 CXL_DEVICE_CAPABILITY_HEADER_REGISTER(DEVICE_STATUS, CXL_DEVICE_CAP_HDR1_OFFSET) 268 CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MAILBOX, CXL_DEVICE_CAP_HDR1_OFFSET + \ 269 CXL_DEVICE_CAP_REG_SIZE) 270 CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE, 271 CXL_DEVICE_CAP_HDR1_OFFSET + 272 CXL_DEVICE_CAP_REG_SIZE * 2) 273 274 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max); 275 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, 276 DeviceState *d, size_t payload_max); 277 void cxl_init_cci(CXLCCI *cci, size_t payload_max); 278 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, 279 size_t len_in, uint8_t *pl_in, 280 size_t *len_out, uint8_t *pl_out, 281 bool *bg_started); 282 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, 283 DeviceState *intf, 284 size_t payload_max); 285 286 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, 287 DeviceState *intf, size_t payload_max); 288 289 #define cxl_device_cap_init(dstate, reg, cap_id, ver) \ 290 do { \ 291 uint32_t *cap_hdrs = dstate->caps_reg_state32; \ 292 int which = R_CXL_DEV_##reg##_CAP_HDR0; \ 293 cap_hdrs[which] = \ 294 FIELD_DP32(cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, \ 295 CAP_ID, cap_id); \ 296 cap_hdrs[which] = FIELD_DP32( \ 297 cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, CAP_VERSION, ver); \ 298 cap_hdrs[which + 1] = \ 299 FIELD_DP32(cap_hdrs[which + 1], CXL_DEV_##reg##_CAP_HDR1, \ 300 CAP_OFFSET, CXL_##reg##_REGISTERS_OFFSET); \ 301 cap_hdrs[which + 2] = \ 302 FIELD_DP32(cap_hdrs[which + 2], CXL_DEV_##reg##_CAP_HDR2, \ 303 CAP_LENGTH, CXL_##reg##_REGISTERS_LENGTH); \ 304 } while (0) 305 306 /* CXL 3.0 8.2.8.3.1 Event Status Register */ 307 REG64(CXL_DEV_EVENT_STATUS, 0) 308 FIELD(CXL_DEV_EVENT_STATUS, EVENT_STATUS, 0, 32) 309 310 /* CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register */ 311 REG32(CXL_DEV_MAILBOX_CAP, 0) 312 FIELD(CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, 0, 5) 313 FIELD(CXL_DEV_MAILBOX_CAP, INT_CAP, 5, 1) 314 FIELD(CXL_DEV_MAILBOX_CAP, BG_INT_CAP, 6, 1) 315 FIELD(CXL_DEV_MAILBOX_CAP, MSI_N, 7, 4) 316 317 /* CXL 2.0 8.2.8.4.4 Mailbox Control Register */ 318 REG32(CXL_DEV_MAILBOX_CTRL, 4) 319 FIELD(CXL_DEV_MAILBOX_CTRL, DOORBELL, 0, 1) 320 FIELD(CXL_DEV_MAILBOX_CTRL, INT_EN, 1, 1) 321 FIELD(CXL_DEV_MAILBOX_CTRL, BG_INT_EN, 2, 1) 322 323 /* CXL 2.0 8.2.8.4.5 Command Register */ 324 REG64(CXL_DEV_MAILBOX_CMD, 8) 325 FIELD(CXL_DEV_MAILBOX_CMD, COMMAND, 0, 8) 326 FIELD(CXL_DEV_MAILBOX_CMD, COMMAND_SET, 8, 8) 327 FIELD(CXL_DEV_MAILBOX_CMD, LENGTH, 16, 20) 328 329 /* CXL 2.0 8.2.8.4.6 Mailbox Status Register */ 330 REG64(CXL_DEV_MAILBOX_STS, 0x10) 331 FIELD(CXL_DEV_MAILBOX_STS, BG_OP, 0, 1) 332 FIELD(CXL_DEV_MAILBOX_STS, ERRNO, 32, 16) 333 FIELD(CXL_DEV_MAILBOX_STS, VENDOR_ERRNO, 48, 16) 334 335 /* CXL 2.0 8.2.8.4.7 Background Command Status Register */ 336 REG64(CXL_DEV_BG_CMD_STS, 0x18) 337 FIELD(CXL_DEV_BG_CMD_STS, OP, 0, 16) 338 FIELD(CXL_DEV_BG_CMD_STS, PERCENTAGE_COMP, 16, 7) 339 FIELD(CXL_DEV_BG_CMD_STS, RET_CODE, 32, 16) 340 FIELD(CXL_DEV_BG_CMD_STS, VENDOR_RET_CODE, 48, 16) 341 342 /* CXL 2.0 8.2.8.4.8 Command Payload Registers */ 343 REG32(CXL_DEV_CMD_PAYLOAD, 0x20) 344 345 REG64(CXL_MEM_DEV_STS, 0) 346 FIELD(CXL_MEM_DEV_STS, FATAL, 0, 1) 347 FIELD(CXL_MEM_DEV_STS, FW_HALT, 1, 1) 348 FIELD(CXL_MEM_DEV_STS, MEDIA_STATUS, 2, 2) 349 FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1) 350 FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3) 351 352 static inline void __toggle_media(CXLDeviceState *cxl_dstate, int val) 353 { 354 uint64_t dev_status_reg; 355 356 dev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, val); 357 cxl_dstate->mbox_reg_state64[R_CXL_MEM_DEV_STS] = dev_status_reg; 358 } 359 #define cxl_dev_disable_media(cxlds) \ 360 do { __toggle_media((cxlds), 0x3); } while (0) 361 #define cxl_dev_enable_media(cxlds) \ 362 do { __toggle_media((cxlds), 0x1); } while (0) 363 364 static inline bool sanitize_running(CXLCCI *cci) 365 { 366 return !!cci->bg.runtime && cci->bg.opcode == 0x4400; 367 } 368 369 typedef struct CXLError { 370 QTAILQ_ENTRY(CXLError) node; 371 int type; /* Error code as per FE definition */ 372 uint32_t header[CXL_RAS_ERR_HEADER_NUM]; 373 } CXLError; 374 375 typedef QTAILQ_HEAD(, CXLError) CXLErrorList; 376 377 typedef struct CXLPoison { 378 uint64_t start, length; 379 uint8_t type; 380 #define CXL_POISON_TYPE_EXTERNAL 0x1 381 #define CXL_POISON_TYPE_INTERNAL 0x2 382 #define CXL_POISON_TYPE_INJECTED 0x3 383 QLIST_ENTRY(CXLPoison) node; 384 } CXLPoison; 385 386 typedef QLIST_HEAD(, CXLPoison) CXLPoisonList; 387 #define CXL_POISON_LIST_LIMIT 256 388 389 struct CXLType3Dev { 390 /* Private */ 391 PCIDevice parent_obj; 392 393 /* Properties */ 394 HostMemoryBackend *hostmem; /* deprecated */ 395 HostMemoryBackend *hostvmem; 396 HostMemoryBackend *hostpmem; 397 HostMemoryBackend *lsa; 398 uint64_t sn; 399 400 /* State */ 401 AddressSpace hostvmem_as; 402 AddressSpace hostpmem_as; 403 CXLComponentState cxl_cstate; 404 CXLDeviceState cxl_dstate; 405 CXLCCI cci; /* Primary PCI mailbox CCI */ 406 /* Always intialized as no way to know if a VDM might show up */ 407 CXLCCI vdm_fm_owned_ld_mctp_cci; 408 CXLCCI ld0_cci; 409 410 /* DOE */ 411 DOECap doe_cdat; 412 413 /* Error injection */ 414 CXLErrorList error_list; 415 416 /* Poison Injection - cache */ 417 CXLPoisonList poison_list; 418 unsigned int poison_list_cnt; 419 bool poison_list_overflowed; 420 uint64_t poison_list_overflow_ts; 421 }; 422 423 #define TYPE_CXL_TYPE3 "cxl-type3" 424 OBJECT_DECLARE_TYPE(CXLType3Dev, CXLType3Class, CXL_TYPE3) 425 426 struct CXLType3Class { 427 /* Private */ 428 PCIDeviceClass parent_class; 429 430 /* public */ 431 uint64_t (*get_lsa_size)(CXLType3Dev *ct3d); 432 433 uint64_t (*get_lsa)(CXLType3Dev *ct3d, void *buf, uint64_t size, 434 uint64_t offset); 435 void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size, 436 uint64_t offset); 437 bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset, 438 uint8_t *data); 439 }; 440 441 struct CSWMBCCIDev { 442 PCIDevice parent_obj; 443 PCIDevice *target; 444 CXLComponentState cxl_cstate; 445 CXLDeviceState cxl_dstate; 446 CXLCCI *cci; 447 }; 448 449 #define TYPE_CXL_SWITCH_MAILBOX_CCI "cxl-switch-mailbox-cci" 450 OBJECT_DECLARE_TYPE(CSWMBCCIDev, CSWMBCCIClass, CXL_SWITCH_MAILBOX_CCI) 451 452 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, 453 unsigned size, MemTxAttrs attrs); 454 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, 455 unsigned size, MemTxAttrs attrs); 456 457 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds); 458 459 void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num); 460 bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type, 461 CXLEventRecordRaw *event); 462 CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl, 463 uint8_t log_type, int max_recs, 464 size_t *len); 465 CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, 466 CXLClearEventPayload *pl); 467 468 void cxl_event_irq_assert(CXLType3Dev *ct3d); 469 470 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d); 471 472 #endif 473