1 /*
2 * QEMU CXL Devices
3 *
4 * Copyright (c) 2020 Intel
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #ifndef CXL_DEVICE_H
11 #define CXL_DEVICE_H
12
13 #include "hw/cxl/cxl_component.h"
14 #include "hw/pci/pci_device.h"
15 #include "hw/register.h"
16 #include "hw/cxl/cxl_events.h"
17
18 /*
19 * The following is how a CXL device's Memory Device registers are laid out.
20 * The only requirement from the spec is that the capabilities array and the
21 * capability headers start at offset 0 and are contiguously packed. The headers
22 * themselves provide offsets to the register fields. For this emulation, the
23 * actual registers * will start at offset 0x80 (m == 0x80). No secondary
24 * mailbox is implemented which means that the offset of the start of the
25 * mailbox payload (n) is given by
26 * n = m + sizeof(mailbox registers) + sizeof(device registers).
27 *
28 * +---------------------------------+
29 * | |
30 * | Memory Device Registers |
31 * | |
32 * n + PAYLOAD_SIZE_MAX -----------------------------------
33 * ^ | |
34 * | | |
35 * | | |
36 * | | |
37 * | | |
38 * | | Mailbox Payload |
39 * | | |
40 * | | |
41 * | | |
42 * n -----------------------------------
43 * ^ | Mailbox Registers |
44 * | | |
45 * | -----------------------------------
46 * | | |
47 * | | Device Registers |
48 * | | |
49 * m ---------------------------------->
50 * ^ | Memory Device Capability Header|
51 * | -----------------------------------
52 * | | Mailbox Capability Header |
53 * | -----------------------------------
54 * | | Device Capability Header |
55 * | -----------------------------------
56 * | | Device Cap Array Register |
57 * 0 +---------------------------------+
58 *
59 */
60
61 /* CXL r3.1 Figure 8-12: CXL Device Registers */
62 #define CXL_DEVICE_CAP_HDR1_OFFSET 0x10
63 /* CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register */
64 #define CXL_DEVICE_CAP_REG_SIZE 0x10
65
66 /*
67 * CXL r3.1 Section 8.2.8.2.1: CXL Device Capabilities +
68 * CXL r3.1 Section 8.2.8.5: Memory Device Capabilities
69 */
70 #define CXL_DEVICE_CAPS_MAX 4
71 #define CXL_CAPS_SIZE \
72 (CXL_DEVICE_CAP_REG_SIZE * (CXL_DEVICE_CAPS_MAX + 1)) /* +1 for header */
73
74 #define CXL_DEVICE_STATUS_REGISTERS_OFFSET 0x80 /* Read comment above */
75 /*
76 * CXL r3.1 Section 8.2.8.3: Device Status Registers
77 * As it is the only Device Status Register in CXL r3.1
78 */
79 #define CXL_DEVICE_STATUS_REGISTERS_LENGTH 0x8
80
81 #define CXL_MAILBOX_REGISTERS_OFFSET \
82 (CXL_DEVICE_STATUS_REGISTERS_OFFSET + CXL_DEVICE_STATUS_REGISTERS_LENGTH)
83 /* CXL r3.1 Figure 8-13: Mailbox Registers */
84 #define CXL_MAILBOX_REGISTERS_SIZE 0x20
85 #define CXL_MAILBOX_PAYLOAD_SHIFT 11
86 #define CXL_MAILBOX_MAX_PAYLOAD_SIZE (1 << CXL_MAILBOX_PAYLOAD_SHIFT)
87 #define CXL_MAILBOX_REGISTERS_LENGTH \
88 (CXL_MAILBOX_REGISTERS_SIZE + CXL_MAILBOX_MAX_PAYLOAD_SIZE)
89
90 #define CXL_MEMORY_DEVICE_REGISTERS_OFFSET \
91 (CXL_MAILBOX_REGISTERS_OFFSET + CXL_MAILBOX_REGISTERS_LENGTH)
92 #define CXL_MEMORY_DEVICE_REGISTERS_LENGTH 0x8
93
94 #define CXL_MMIO_SIZE \
95 (CXL_DEVICE_CAP_REG_SIZE + CXL_DEVICE_STATUS_REGISTERS_LENGTH + \
96 CXL_MAILBOX_REGISTERS_LENGTH + CXL_MEMORY_DEVICE_REGISTERS_LENGTH)
97
98 /* CXL r3.1 Table 8-34: Command Return Codes */
99 typedef enum {
100 CXL_MBOX_SUCCESS = 0x0,
101 CXL_MBOX_BG_STARTED = 0x1,
102 CXL_MBOX_INVALID_INPUT = 0x2,
103 CXL_MBOX_UNSUPPORTED = 0x3,
104 CXL_MBOX_INTERNAL_ERROR = 0x4,
105 CXL_MBOX_RETRY_REQUIRED = 0x5,
106 CXL_MBOX_BUSY = 0x6,
107 CXL_MBOX_MEDIA_DISABLED = 0x7,
108 CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8,
109 CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9,
110 CXL_MBOX_FW_AUTH_FAILED = 0xa,
111 CXL_MBOX_FW_INVALID_SLOT = 0xb,
112 CXL_MBOX_FW_ROLLEDBACK = 0xc,
113 CXL_MBOX_FW_REST_REQD = 0xd,
114 CXL_MBOX_INVALID_HANDLE = 0xe,
115 CXL_MBOX_INVALID_PA = 0xf,
116 CXL_MBOX_INJECT_POISON_LIMIT = 0x10,
117 CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11,
118 CXL_MBOX_ABORTED = 0x12,
119 CXL_MBOX_INVALID_SECURITY_STATE = 0x13,
120 CXL_MBOX_INCORRECT_PASSPHRASE = 0x14,
121 CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15,
122 CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16,
123 CXL_MBOX_INVALID_LOG = 0x17,
124 CXL_MBOX_INTERRUPTED = 0x18,
125 CXL_MBOX_UNSUPPORTED_FEATURE_VERSION = 0x19,
126 CXL_MBOX_UNSUPPORTED_FEATURE_SELECTION_VALUE = 0x1a,
127 CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS = 0x1b,
128 CXL_MBOX_FEATURE_TRANSFER_OUT_OF_ORDER = 0x1c,
129 CXL_MBOX_RESOURCES_EXHAUSTED = 0x1d,
130 CXL_MBOX_INVALID_EXTENT_LIST = 0x1e,
131 CXL_MBOX_TRANSFER_OUT_OF_ORDER = 0x1f,
132 CXL_MBOX_REQUEST_ABORT_NOTSUP = 0x20,
133 CXL_MBOX_MAX = 0x20
134 } CXLRetCode;
135
136 /* r3.2 Section 7.6.7.6.2: Table 7-66: DSMAS Flags Bits */
137 typedef enum {
138 CXL_DSMAS_FLAGS_NONVOLATILE = 2,
139 CXL_DSMAS_FLAGS_SHARABLE = 3,
140 CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY = 4,
141 CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT = 5,
142 CXL_DSMAS_FLAGS_RDONLY = 6,
143 } CXLDSMASFlags;
144
145 typedef struct CXLCCI CXLCCI;
146 typedef struct cxl_device_state CXLDeviceState;
147 struct cxl_cmd;
148 typedef CXLRetCode (*opcode_handler)(const struct cxl_cmd *cmd,
149 uint8_t *payload_in, size_t len_in,
150 uint8_t *payload_out, size_t *len_out,
151 CXLCCI *cci);
152 struct cxl_cmd {
153 const char *name;
154 opcode_handler handler;
155 ssize_t in;
156 uint16_t effect; /* Reported in CEL */
157 };
158
159 typedef struct CXLEvent {
160 CXLEventRecordRaw data;
161 QSIMPLEQ_ENTRY(CXLEvent) node;
162 } CXLEvent;
163
164 typedef struct CXLEventLog {
165 uint16_t next_handle;
166 uint16_t overflow_err_count;
167 uint64_t first_overflow_timestamp;
168 uint64_t last_overflow_timestamp;
169 bool irq_enabled;
170 int irq_vec;
171 QemuMutex lock;
172 QSIMPLEQ_HEAD(, CXLEvent) events;
173 } CXLEventLog;
174
175 typedef struct CXLCCI {
176 struct cxl_cmd cxl_cmd_set[256][256];
177 struct cel_log {
178 uint16_t opcode;
179 uint16_t effect;
180 } cel_log[1 << 16];
181 size_t cel_size;
182
183 /* background command handling (times in ms) */
184 struct {
185 uint16_t opcode;
186 uint16_t complete_pct;
187 uint16_t ret_code; /* Current value of retcode */
188 bool aborted;
189 uint64_t starttime;
190 /* set by each bg cmd, cleared by the bg_timer when complete */
191 uint64_t runtime;
192 QEMUTimer *timer;
193 QemuMutex lock; /* serializes mbox abort vs timer cb */
194 } bg;
195
196 /* firmware update */
197 struct {
198 uint8_t active_slot;
199 uint8_t staged_slot;
200 bool slot[4];
201 uint8_t curr_action;
202 uint8_t curr_slot;
203 /* handle partial transfers */
204 bool transferring;
205 size_t prev_offset;
206 size_t prev_len;
207 time_t last_partxfer;
208 } fw;
209
210 size_t payload_max;
211 /* Pointer to device hosting the CCI */
212 DeviceState *d;
213 /* Pointer to the device hosting the protocol conversion */
214 DeviceState *intf;
215 bool initialized;
216 } CXLCCI;
217
218 typedef struct cxl_device_state {
219 MemoryRegion device_registers;
220
221 /* CXL r3.1 Section 8.2.8.3: Device Status Registers */
222 struct {
223 MemoryRegion device;
224 union {
225 uint8_t dev_reg_state[CXL_DEVICE_STATUS_REGISTERS_LENGTH];
226 uint16_t dev_reg_state16[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 2];
227 uint32_t dev_reg_state32[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 4];
228 uint64_t dev_reg_state64[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 8];
229 };
230 uint64_t event_status;
231 };
232 MemoryRegion memory_device;
233 struct {
234 MemoryRegion caps;
235 union {
236 uint32_t caps_reg_state32[CXL_CAPS_SIZE / 4];
237 uint64_t caps_reg_state64[CXL_CAPS_SIZE / 8];
238 };
239 };
240
241 /* CXL r3.1 Section 8.2.8.4: Mailbox Registers */
242 struct {
243 MemoryRegion mailbox;
244 uint16_t payload_size;
245 uint8_t mbox_msi_n;
246 union {
247 uint8_t mbox_reg_state[CXL_MAILBOX_REGISTERS_LENGTH];
248 uint16_t mbox_reg_state16[CXL_MAILBOX_REGISTERS_LENGTH / 2];
249 uint32_t mbox_reg_state32[CXL_MAILBOX_REGISTERS_LENGTH / 4];
250 uint64_t mbox_reg_state64[CXL_MAILBOX_REGISTERS_LENGTH / 8];
251 };
252 };
253
254 /* Stash the memory device status value */
255 uint64_t memdev_status;
256
257 struct {
258 bool set;
259 uint64_t last_set;
260 uint64_t host_set;
261 } timestamp;
262
263 /* memory region size, HDM */
264 uint64_t static_mem_size;
265 uint64_t pmem_size;
266 uint64_t vmem_size;
267
268 const struct cxl_cmd (*cxl_cmd_set)[256];
269 CXLEventLog event_logs[CXL_EVENT_TYPE_MAX];
270 } CXLDeviceState;
271
272 /* Initialize the register block for a device */
273 void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev,
274 CXLCCI *cci);
275
276 typedef struct CXLType3Dev CXLType3Dev;
277 typedef struct CSWMBCCIDev CSWMBCCIDev;
278 /* Set up default values for the register block */
279 void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n);
280 void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n);
281
282 /*
283 * CXL r3.1 Section 8.2.8.1: CXL Device Capabilities Array Register
284 * Documented as a 128 bit register, but 64 bit accesses and the second
285 * 64 bits are currently reserved.
286 */
287 REG64(CXL_DEV_CAP_ARRAY, 0)
288 FIELD(CXL_DEV_CAP_ARRAY, CAP_ID, 0, 16)
289 FIELD(CXL_DEV_CAP_ARRAY, CAP_VERSION, 16, 8)
290 FIELD(CXL_DEV_CAP_ARRAY, CAP_COUNT, 32, 16)
291
292 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
293 bool available);
294
295 /*
296 * Helper macro to initialize capability headers for CXL devices.
297 *
298 * In CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register, this is
299 * listed as a 128b register, but in CXL r3.1 Section 8.2.8: CXL Device Register
300 * Interface, it says:
301 * > No registers defined in Section 8.2.8 are larger than 64-bits wide so that
302 * > is the maximum access size allowed for these registers. If this rule is not
303 * > followed, the behavior is undefined.
304 *
305 * > To illustrate how the fields fit together, the layouts ... are shown as
306 * > wider than a 64 bit register. Implementations are expected to use any size
307 * > accesses for this information up to 64 bits without lost of functionality
308 *
309 * Here we've chosen to make it 4 dwords.
310 */
311 #define CXL_DEVICE_CAPABILITY_HEADER_REGISTER(n, offset) \
312 REG32(CXL_DEV_##n##_CAP_HDR0, offset) \
313 FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_ID, 0, 16) \
314 FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_VERSION, 16, 8) \
315 REG32(CXL_DEV_##n##_CAP_HDR1, offset + 4) \
316 FIELD(CXL_DEV_##n##_CAP_HDR1, CAP_OFFSET, 0, 32) \
317 REG32(CXL_DEV_##n##_CAP_HDR2, offset + 8) \
318 FIELD(CXL_DEV_##n##_CAP_HDR2, CAP_LENGTH, 0, 32)
319
320 CXL_DEVICE_CAPABILITY_HEADER_REGISTER(DEVICE_STATUS, CXL_DEVICE_CAP_HDR1_OFFSET)
321 CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MAILBOX, CXL_DEVICE_CAP_HDR1_OFFSET + \
322 CXL_DEVICE_CAP_REG_SIZE)
323 CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE,
324 CXL_DEVICE_CAP_HDR1_OFFSET +
325 CXL_DEVICE_CAP_REG_SIZE * 2)
326
327 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max);
328 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
329 DeviceState *d, size_t payload_max);
330 void cxl_init_cci(CXLCCI *cci, size_t payload_max);
331 void cxl_destroy_cci(CXLCCI *cci);
332 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256],
333 size_t payload_max);
334 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
335 size_t len_in, uint8_t *pl_in,
336 size_t *len_out, uint8_t *pl_out,
337 bool *bg_started);
338 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
339 DeviceState *intf,
340 size_t payload_max);
341
342 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d,
343 DeviceState *intf, size_t payload_max);
344
345 #define cxl_device_cap_init(dstate, reg, cap_id, ver) \
346 do { \
347 uint32_t *cap_hdrs = dstate->caps_reg_state32; \
348 int which = R_CXL_DEV_##reg##_CAP_HDR0; \
349 cap_hdrs[which] = \
350 FIELD_DP32(cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, \
351 CAP_ID, cap_id); \
352 cap_hdrs[which] = FIELD_DP32( \
353 cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, CAP_VERSION, ver); \
354 cap_hdrs[which + 1] = \
355 FIELD_DP32(cap_hdrs[which + 1], CXL_DEV_##reg##_CAP_HDR1, \
356 CAP_OFFSET, CXL_##reg##_REGISTERS_OFFSET); \
357 cap_hdrs[which + 2] = \
358 FIELD_DP32(cap_hdrs[which + 2], CXL_DEV_##reg##_CAP_HDR2, \
359 CAP_LENGTH, CXL_##reg##_REGISTERS_LENGTH); \
360 } while (0)
361
362 /* CXL r3.2 Section 8.2.8.3.1: Event Status Register */
363 #define CXL_DEVICE_STATUS_VERSION 2
364 REG64(CXL_DEV_EVENT_STATUS, 0)
365 FIELD(CXL_DEV_EVENT_STATUS, EVENT_STATUS, 0, 32)
366
367 #define CXL_DEV_MAILBOX_VERSION 1
368 /* CXL r3.1 Section 8.2.8.4.3: Mailbox Capabilities Register */
369 REG32(CXL_DEV_MAILBOX_CAP, 0)
370 FIELD(CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, 0, 5)
371 FIELD(CXL_DEV_MAILBOX_CAP, INT_CAP, 5, 1)
372 FIELD(CXL_DEV_MAILBOX_CAP, BG_INT_CAP, 6, 1)
373 FIELD(CXL_DEV_MAILBOX_CAP, MSI_N, 7, 4)
374 FIELD(CXL_DEV_MAILBOX_CAP, MBOX_READY_TIME, 11, 8)
375 FIELD(CXL_DEV_MAILBOX_CAP, TYPE, 19, 4)
376
377 /* CXL r3.1 Section 8.2.8.4.4: Mailbox Control Register */
378 REG32(CXL_DEV_MAILBOX_CTRL, 4)
379 FIELD(CXL_DEV_MAILBOX_CTRL, DOORBELL, 0, 1)
380 FIELD(CXL_DEV_MAILBOX_CTRL, INT_EN, 1, 1)
381 FIELD(CXL_DEV_MAILBOX_CTRL, BG_INT_EN, 2, 1)
382
383 /* CXL r3.1 Section 8.2.8.4.5: Command Register */
384 REG64(CXL_DEV_MAILBOX_CMD, 8)
385 FIELD(CXL_DEV_MAILBOX_CMD, COMMAND, 0, 8)
386 FIELD(CXL_DEV_MAILBOX_CMD, COMMAND_SET, 8, 8)
387 FIELD(CXL_DEV_MAILBOX_CMD, LENGTH, 16, 20)
388
389 /* CXL r3.1 Section 8.2.8.4.6: Mailbox Status Register */
390 REG64(CXL_DEV_MAILBOX_STS, 0x10)
391 FIELD(CXL_DEV_MAILBOX_STS, BG_OP, 0, 1)
392 FIELD(CXL_DEV_MAILBOX_STS, ERRNO, 32, 16)
393 FIELD(CXL_DEV_MAILBOX_STS, VENDOR_ERRNO, 48, 16)
394
395 /* CXL r3.1 Section 8.2.8.4.7: Background Command Status Register */
396 REG64(CXL_DEV_BG_CMD_STS, 0x18)
397 FIELD(CXL_DEV_BG_CMD_STS, OP, 0, 16)
398 FIELD(CXL_DEV_BG_CMD_STS, PERCENTAGE_COMP, 16, 7)
399 FIELD(CXL_DEV_BG_CMD_STS, RET_CODE, 32, 16)
400 FIELD(CXL_DEV_BG_CMD_STS, VENDOR_RET_CODE, 48, 16)
401
402 /* CXL r3.1 Section 8.2.8.4.8: Command Payload Registers */
403 REG32(CXL_DEV_CMD_PAYLOAD, 0x20)
404
405 /* CXL r3.1 Section 8.2.8.4.1: Memory Device Status Registers */
406 #define CXL_MEM_DEV_STATUS_VERSION 1
407 REG64(CXL_MEM_DEV_STS, 0)
408 FIELD(CXL_MEM_DEV_STS, FATAL, 0, 1)
409 FIELD(CXL_MEM_DEV_STS, FW_HALT, 1, 1)
410 FIELD(CXL_MEM_DEV_STS, MEDIA_STATUS, 2, 2)
411 FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1)
412 FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3)
413
__toggle_media(CXLDeviceState * cxl_dstate,int val)414 static inline void __toggle_media(CXLDeviceState *cxl_dstate, int val)
415 {
416 uint64_t dev_status_reg;
417
418 dev_status_reg = cxl_dstate->memdev_status;
419 dev_status_reg = FIELD_DP64(dev_status_reg, CXL_MEM_DEV_STS, MEDIA_STATUS,
420 val);
421 cxl_dstate->memdev_status = dev_status_reg;
422 }
423 #define cxl_dev_disable_media(cxlds) \
424 do { __toggle_media((cxlds), 0x3); } while (0)
425 #define cxl_dev_enable_media(cxlds) \
426 do { __toggle_media((cxlds), 0x1); } while (0)
427
cxl_dev_media_disabled(CXLDeviceState * cxl_dstate)428 static inline bool cxl_dev_media_disabled(CXLDeviceState *cxl_dstate)
429 {
430 uint64_t dev_status_reg = cxl_dstate->mbox_reg_state64[R_CXL_MEM_DEV_STS];
431 return FIELD_EX64(dev_status_reg, CXL_MEM_DEV_STS, MEDIA_STATUS) == 0x3;
432 }
scan_media_running(CXLCCI * cci)433 static inline bool scan_media_running(CXLCCI *cci)
434 {
435 return !!cci->bg.runtime && cci->bg.opcode == 0x4304;
436 }
437
438 typedef struct CXLError {
439 QTAILQ_ENTRY(CXLError) node;
440 int type; /* Error code as per FE definition */
441 uint32_t header[CXL_RAS_ERR_HEADER_NUM];
442 } CXLError;
443
444 typedef QTAILQ_HEAD(, CXLError) CXLErrorList;
445
446 typedef struct CXLPoison {
447 uint64_t start, length;
448 uint8_t type;
449 #define CXL_POISON_TYPE_EXTERNAL 0x1
450 #define CXL_POISON_TYPE_INTERNAL 0x2
451 #define CXL_POISON_TYPE_INJECTED 0x3
452 QLIST_ENTRY(CXLPoison) node;
453 } CXLPoison;
454
455 typedef QLIST_HEAD(, CXLPoison) CXLPoisonList;
456 #define CXL_POISON_LIST_LIMIT 256
457
458 /* CXL memory device patrol scrub control attributes */
459 typedef struct CXLMemPatrolScrubReadAttrs {
460 uint8_t scrub_cycle_cap;
461 uint16_t scrub_cycle;
462 uint8_t scrub_flags;
463 } QEMU_PACKED CXLMemPatrolScrubReadAttrs;
464
465 typedef struct CXLMemPatrolScrubWriteAttrs {
466 uint8_t scrub_cycle_hr;
467 uint8_t scrub_flags;
468 } QEMU_PACKED CXLMemPatrolScrubWriteAttrs;
469
470 #define CXL_MEMDEV_PS_GET_FEATURE_VERSION 0x01
471 #define CXL_MEMDEV_PS_SET_FEATURE_VERSION 0x01
472 #define CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT BIT(0)
473 #define CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT BIT(1)
474 #define CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT 12
475 #define CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT 1
476 #define CXL_MEMDEV_PS_ENABLE_DEFAULT 0
477
478 /* CXL memory device DDR5 ECS control attributes */
479 #define CXL_ECS_GET_FEATURE_VERSION 0x01
480 #define CXL_ECS_SET_FEATURE_VERSION 0x01
481 #define CXL_ECS_LOG_ENTRY_TYPE_DEFAULT 0x01
482 #define CXL_ECS_REALTIME_REPORT_CAP_DEFAULT 1
483 #define CXL_ECS_THRESHOLD_COUNT_DEFAULT 3 /* 3: 256, 4: 1024, 5: 4096 */
484 #define CXL_ECS_MODE_DEFAULT 0
485 #define CXL_ECS_NUM_MEDIA_FRUS 3 /* Default */
486
487 typedef struct CXLMemECSFRUReadAttrs {
488 uint8_t ecs_cap;
489 uint16_t ecs_config;
490 uint8_t ecs_flags;
491 } QEMU_PACKED CXLMemECSFRUReadAttrs;
492
493 typedef struct CXLMemECSReadAttrs {
494 uint8_t ecs_log_cap;
495 CXLMemECSFRUReadAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS];
496 } QEMU_PACKED CXLMemECSReadAttrs;
497
498 typedef struct CXLMemECSFRUWriteAttrs {
499 uint16_t ecs_config;
500 } QEMU_PACKED CXLMemECSFRUWriteAttrs;
501
502 typedef struct CXLMemECSWriteAttrs {
503 uint8_t ecs_log_cap;
504 CXLMemECSFRUWriteAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS];
505 } QEMU_PACKED CXLMemECSWriteAttrs;
506
507 #define DCD_MAX_NUM_REGION 8
508
509 typedef struct CXLDCExtentRaw {
510 uint64_t start_dpa;
511 uint64_t len;
512 uint8_t tag[0x10];
513 uint16_t shared_seq;
514 uint8_t rsvd[0x6];
515 } QEMU_PACKED CXLDCExtentRaw;
516
517 typedef struct CXLDCExtent {
518 uint64_t start_dpa;
519 uint64_t len;
520 uint8_t tag[0x10];
521 uint16_t shared_seq;
522 uint8_t rsvd[0x6];
523
524 QTAILQ_ENTRY(CXLDCExtent) node;
525 } CXLDCExtent;
526 typedef QTAILQ_HEAD(, CXLDCExtent) CXLDCExtentList;
527
528 typedef struct CXLDCExtentGroup {
529 CXLDCExtentList list;
530 QTAILQ_ENTRY(CXLDCExtentGroup) node;
531 } CXLDCExtentGroup;
532 typedef QTAILQ_HEAD(, CXLDCExtentGroup) CXLDCExtentGroupList;
533
534 typedef struct CXLDCRegion {
535 uint64_t base; /* aligned to 256*MiB */
536 uint64_t decode_len; /* aligned to 256*MiB */
537 uint64_t len;
538 uint64_t block_size;
539 uint32_t dsmadhandle;
540 uint8_t flags;
541 unsigned long *blk_bitmap;
542 uint64_t supported_blk_size_bitmask;
543 QemuMutex bitmap_lock;
544 /* Following bools make up dsmas flags, as defined in the CDAT */
545 bool nonvolatile;
546 bool sharable;
547 bool hw_managed_coherency;
548 bool ic_specific_dc_management;
549 bool rdonly;
550 } CXLDCRegion;
551
552 typedef struct CXLSetFeatureInfo {
553 QemuUUID uuid;
554 uint8_t data_transfer_flag;
555 bool data_saved_across_reset;
556 uint16_t data_offset;
557 size_t data_size;
558 } CXLSetFeatureInfo;
559
560 struct CXLSanitizeInfo;
561
562 typedef struct CXLAlertConfig {
563 uint8_t valid_alerts;
564 uint8_t enable_alerts;
565 uint8_t life_used_crit_alert_thresh;
566 uint8_t life_used_warn_thresh;
567 uint16_t over_temp_crit_alert_thresh;
568 uint16_t under_temp_crit_alert_thresh;
569 uint16_t over_temp_warn_thresh;
570 uint16_t under_temp_warn_thresh;
571 uint16_t cor_vmem_err_warn_thresh;
572 uint16_t cor_pmem_err_warn_thresh;
573 } QEMU_PACKED CXLAlertConfig;
574
575 struct CXLType3Dev {
576 /* Private */
577 PCIDevice parent_obj;
578
579 /* Properties */
580 HostMemoryBackend *hostmem; /* deprecated */
581 HostMemoryBackend *hostvmem;
582 HostMemoryBackend *hostpmem;
583 HostMemoryBackend *lsa;
584 uint64_t sn;
585
586 /* State */
587 AddressSpace hostvmem_as;
588 AddressSpace hostpmem_as;
589 CXLComponentState cxl_cstate;
590 CXLDeviceState cxl_dstate;
591 CXLCCI cci; /* Primary PCI mailbox CCI */
592 /* Always initialized as no way to know if a VDM might show up */
593 CXLCCI vdm_fm_owned_ld_mctp_cci;
594 CXLCCI ld0_cci;
595
596 CXLAlertConfig alert_config;
597
598 /* PCIe link characteristics */
599 PCIExpLinkSpeed speed;
600 PCIExpLinkWidth width;
601
602 /* DOE */
603 DOECap doe_cdat;
604
605 /* Error injection */
606 CXLErrorList error_list;
607
608 /* Poison Injection - cache */
609 CXLPoisonList poison_list;
610 unsigned int poison_list_cnt;
611 bool poison_list_overflowed;
612 uint64_t poison_list_overflow_ts;
613 /* Poison Injection - backup */
614 CXLPoisonList poison_list_bkp;
615 CXLPoisonList scan_media_results;
616 bool scan_media_hasrun;
617
618 CXLSetFeatureInfo set_feat_info;
619
620 /* Patrol scrub control attributes */
621 CXLMemPatrolScrubReadAttrs patrol_scrub_attrs;
622 CXLMemPatrolScrubWriteAttrs patrol_scrub_wr_attrs;
623 /* ECS control attributes */
624 CXLMemECSReadAttrs ecs_attrs;
625 CXLMemECSWriteAttrs ecs_wr_attrs;
626
627 struct dynamic_capacity {
628 HostMemoryBackend *host_dc;
629 AddressSpace host_dc_as;
630 /*
631 * total_capacity is equivalent to the dynamic capability
632 * memory region size.
633 */
634 uint64_t total_capacity; /* 256M aligned */
635 CXLDCExtentList extents;
636 CXLDCExtentGroupList extents_pending;
637 uint32_t total_extent_count;
638 uint32_t nr_extents_accepted;
639 uint32_t ext_list_gen_seq;
640
641 uint8_t num_regions; /* 0-8 regions */
642 CXLDCRegion regions[DCD_MAX_NUM_REGION];
643 } dc;
644
645 struct CXLSanitizeInfo *media_op_sanitize;
646 };
647
648 #define TYPE_CXL_TYPE3 "cxl-type3"
649 OBJECT_DECLARE_TYPE(CXLType3Dev, CXLType3Class, CXL_TYPE3)
650
651 struct CXLType3Class {
652 /* Private */
653 PCIDeviceClass parent_class;
654
655 /* public */
656 uint64_t (*get_lsa_size)(CXLType3Dev *ct3d);
657
658 uint64_t (*get_lsa)(CXLType3Dev *ct3d, void *buf, uint64_t size,
659 uint64_t offset);
660 void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size,
661 uint64_t offset);
662 bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset,
663 uint8_t *data);
664 };
665
666 struct CSWMBCCIDev {
667 PCIDevice parent_obj;
668 PCIDevice *target;
669 CXLComponentState cxl_cstate;
670 CXLDeviceState cxl_dstate;
671 CXLCCI *cci;
672 };
673
674 #define TYPE_CXL_SWITCH_MAILBOX_CCI "cxl-switch-mailbox-cci"
675 OBJECT_DECLARE_TYPE(CSWMBCCIDev, CSWMBCCIClass, CXL_SWITCH_MAILBOX_CCI)
676
677 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
678 unsigned size, MemTxAttrs attrs);
679 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
680 unsigned size, MemTxAttrs attrs);
681
682 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds);
683
684 void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num);
685 bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type,
686 CXLEventRecordRaw *event);
687 CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
688 uint8_t log_type, int max_recs,
689 size_t *len);
690 CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds,
691 CXLClearEventPayload *pl);
692 void cxl_discard_all_event_records(CXLDeviceState *cxlds);
693
694 void cxl_event_irq_assert(CXLType3Dev *ct3d);
695
696 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d);
697 void cxl_clear_poison_list_overflowed(CXLType3Dev *ct3d);
698
699 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len);
700
701 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list,
702 CXLDCExtent *extent);
703 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list, uint64_t dpa,
704 uint64_t len, uint8_t *tag,
705 uint16_t shared_seq);
706 bool test_any_bits_set(const unsigned long *addr, unsigned long nr,
707 unsigned long size);
708 bool cxl_extents_contains_dpa_range(CXLDCExtentList *list,
709 uint64_t dpa, uint64_t len);
710 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group,
711 uint64_t dpa,
712 uint64_t len,
713 uint8_t *tag,
714 uint16_t shared_seq);
715 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
716 CXLDCExtentGroup *group);
717 uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list);
718 void ct3_set_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
719 uint64_t len);
720 void ct3_clear_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
721 uint64_t len);
722 bool ct3_test_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
723 uint64_t len);
724 void cxl_assign_event_header(CXLEventRecordHdr *hdr,
725 const QemuUUID *uuid, uint32_t flags,
726 uint8_t length, uint64_t timestamp);
727 void cxl_create_dc_event_records_for_extents(CXLType3Dev *ct3d,
728 CXLDCEventType type,
729 CXLDCExtentRaw extents[],
730 uint32_t ext_count);
731 bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list,
732 uint64_t dpa, uint64_t len);
733 bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list,
734 uint64_t dpa, uint64_t len);
735 #endif
736