1 /*
2 * CXL Utility library for mailbox interface
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "hw/pci/msi.h"
12 #include "hw/pci/msix.h"
13 #include "hw/cxl/cxl.h"
14 #include "hw/cxl/cxl_events.h"
15 #include "hw/cxl/cxl_mailbox.h"
16 #include "hw/pci/pci.h"
17 #include "hw/pci-bridge/cxl_upstream_port.h"
18 #include "qemu/cutils.h"
19 #include "qemu/log.h"
20 #include "qemu/units.h"
21 #include "qemu/uuid.h"
22 #include "sysemu/hostmem.h"
23 #include "qemu/range.h"
24
25 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
26 #define CXL_DC_EVENT_LOG_SIZE 8
27 #define CXL_NUM_EXTENTS_SUPPORTED 512
28 #define CXL_NUM_TAGS_SUPPORTED 0
29
30 /*
31 * How to add a new command, example. The command set FOO, with cmd BAR.
32 * 1. Add the command set and cmd to the enum.
33 * FOO = 0x7f,
34 * #define BAR 0
35 * 2. Implement the handler
36 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
37 * CXLDeviceState *cxl_dstate, uint16_t *len)
38 * 3. Add the command to the cxl_cmd_set[][]
39 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
40 * 4. Implement your handler
41 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
42 *
43 *
44 * Writing the handler:
45 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
46 * in/out length of the payload. The handler is responsible for consuming the
47 * payload from cmd->payload and operating upon it as necessary. It must then
48 * fill the output data into cmd->payload (overwriting what was there),
49 * setting the length, and returning a valid return code.
50 *
51 * XXX: The handler need not worry about endianness. The payload is read out of
52 * a register interface that already deals with it.
53 */
54
55 enum {
56 INFOSTAT = 0x00,
57 #define IS_IDENTIFY 0x1
58 #define BACKGROUND_OPERATION_STATUS 0x2
59 EVENTS = 0x01,
60 #define GET_RECORDS 0x0
61 #define CLEAR_RECORDS 0x1
62 #define GET_INTERRUPT_POLICY 0x2
63 #define SET_INTERRUPT_POLICY 0x3
64 FIRMWARE_UPDATE = 0x02,
65 #define GET_INFO 0x0
66 #define TRANSFER 0x1
67 #define ACTIVATE 0x2
68 TIMESTAMP = 0x03,
69 #define GET 0x0
70 #define SET 0x1
71 LOGS = 0x04,
72 #define GET_SUPPORTED 0x0
73 #define GET_LOG 0x1
74 FEATURES = 0x05,
75 #define GET_SUPPORTED 0x0
76 #define GET_FEATURE 0x1
77 #define SET_FEATURE 0x2
78 IDENTIFY = 0x40,
79 #define MEMORY_DEVICE 0x0
80 CCLS = 0x41,
81 #define GET_PARTITION_INFO 0x0
82 #define GET_LSA 0x2
83 #define SET_LSA 0x3
84 SANITIZE = 0x44,
85 #define OVERWRITE 0x0
86 #define SECURE_ERASE 0x1
87 PERSISTENT_MEM = 0x45,
88 #define GET_SECURITY_STATE 0x0
89 MEDIA_AND_POISON = 0x43,
90 #define GET_POISON_LIST 0x0
91 #define INJECT_POISON 0x1
92 #define CLEAR_POISON 0x2
93 #define GET_SCAN_MEDIA_CAPABILITIES 0x3
94 #define SCAN_MEDIA 0x4
95 #define GET_SCAN_MEDIA_RESULTS 0x5
96 DCD_CONFIG = 0x48,
97 #define GET_DC_CONFIG 0x0
98 #define GET_DYN_CAP_EXT_LIST 0x1
99 #define ADD_DYN_CAP_RSP 0x2
100 #define RELEASE_DYN_CAP 0x3
101 PHYSICAL_SWITCH = 0x51,
102 #define IDENTIFY_SWITCH_DEVICE 0x0
103 #define GET_PHYSICAL_PORT_STATE 0x1
104 TUNNEL = 0x53,
105 #define MANAGEMENT_COMMAND 0x0
106 };
107
108 /* CCI Message Format CXL r3.1 Figure 7-19 */
109 typedef struct CXLCCIMessage {
110 uint8_t category;
111 #define CXL_CCI_CAT_REQ 0
112 #define CXL_CCI_CAT_RSP 1
113 uint8_t tag;
114 uint8_t resv1;
115 uint8_t command;
116 uint8_t command_set;
117 uint8_t pl_length[3];
118 uint16_t rc;
119 uint16_t vendor_specific;
120 uint8_t payload[];
121 } QEMU_PACKED CXLCCIMessage;
122
123 /* This command is only defined to an MLD FM Owned LD or an MHD */
cmd_tunnel_management_cmd(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)124 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd,
125 uint8_t *payload_in,
126 size_t len_in,
127 uint8_t *payload_out,
128 size_t *len_out,
129 CXLCCI *cci)
130 {
131 PCIDevice *tunnel_target;
132 CXLCCI *target_cci;
133 struct {
134 uint8_t port_or_ld_id;
135 uint8_t target_type;
136 uint16_t size;
137 CXLCCIMessage ccimessage;
138 } QEMU_PACKED *in;
139 struct {
140 uint16_t resp_len;
141 uint8_t resv[2];
142 CXLCCIMessage ccimessage;
143 } QEMU_PACKED *out;
144 size_t pl_length, length_out;
145 bool bg_started;
146 int rc;
147
148 if (cmd->in < sizeof(*in)) {
149 return CXL_MBOX_INVALID_INPUT;
150 }
151 in = (void *)payload_in;
152 out = (void *)payload_out;
153
154 /* Enough room for minimum sized message - no payload */
155 if (in->size < sizeof(in->ccimessage)) {
156 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
157 }
158 /* Length of input payload should be in->size + a wrapping tunnel header */
159 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) {
160 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
161 }
162 if (in->ccimessage.category != CXL_CCI_CAT_REQ) {
163 return CXL_MBOX_INVALID_INPUT;
164 }
165
166 if (in->target_type != 0) {
167 qemu_log_mask(LOG_UNIMP,
168 "Tunneled Command sent to non existent FM-LD");
169 return CXL_MBOX_INVALID_INPUT;
170 }
171
172 /*
173 * Target of a tunnel unfortunately depends on type of CCI readint
174 * the message.
175 * If in a switch, then it's the port number.
176 * If in an MLD it is the ld number.
177 * If in an MHD target type indicate where we are going.
178 */
179 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
180 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
181 if (in->port_or_ld_id != 0) {
182 /* Only pretending to have one for now! */
183 return CXL_MBOX_INVALID_INPUT;
184 }
185 target_cci = &ct3d->ld0_cci;
186 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
187 CXLUpstreamPort *usp = CXL_USP(cci->d);
188
189 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus,
190 in->port_or_ld_id);
191 if (!tunnel_target) {
192 return CXL_MBOX_INVALID_INPUT;
193 }
194 tunnel_target =
195 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0];
196 if (!tunnel_target) {
197 return CXL_MBOX_INVALID_INPUT;
198 }
199 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) {
200 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target);
201 /* Tunneled VDMs always land on FM Owned LD */
202 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci;
203 } else {
204 return CXL_MBOX_INVALID_INPUT;
205 }
206 } else {
207 return CXL_MBOX_INVALID_INPUT;
208 }
209
210 pl_length = in->ccimessage.pl_length[2] << 16 |
211 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0];
212 rc = cxl_process_cci_message(target_cci,
213 in->ccimessage.command_set,
214 in->ccimessage.command,
215 pl_length, in->ccimessage.payload,
216 &length_out, out->ccimessage.payload,
217 &bg_started);
218 /* Payload should be in place. Rest of CCI header and needs filling */
219 out->resp_len = length_out + sizeof(CXLCCIMessage);
220 st24_le_p(out->ccimessage.pl_length, length_out);
221 out->ccimessage.rc = rc;
222 out->ccimessage.category = CXL_CCI_CAT_RSP;
223 out->ccimessage.command = in->ccimessage.command;
224 out->ccimessage.command_set = in->ccimessage.command_set;
225 out->ccimessage.tag = in->ccimessage.tag;
226 *len_out = length_out + sizeof(*out);
227
228 return CXL_MBOX_SUCCESS;
229 }
230
cmd_events_get_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)231 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
232 uint8_t *payload_in, size_t len_in,
233 uint8_t *payload_out, size_t *len_out,
234 CXLCCI *cci)
235 {
236 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
237 CXLGetEventPayload *pl;
238 uint8_t log_type;
239 int max_recs;
240
241 if (cmd->in < sizeof(log_type)) {
242 return CXL_MBOX_INVALID_INPUT;
243 }
244
245 log_type = payload_in[0];
246
247 pl = (CXLGetEventPayload *)payload_out;
248
249 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
250 CXL_EVENT_RECORD_SIZE;
251 if (max_recs > 0xFFFF) {
252 max_recs = 0xFFFF;
253 }
254
255 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out);
256 }
257
cmd_events_clear_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)258 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
259 uint8_t *payload_in,
260 size_t len_in,
261 uint8_t *payload_out,
262 size_t *len_out,
263 CXLCCI *cci)
264 {
265 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
266 CXLClearEventPayload *pl;
267
268 pl = (CXLClearEventPayload *)payload_in;
269 *len_out = 0;
270 return cxl_event_clear_records(cxlds, pl);
271 }
272
cmd_events_get_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)273 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
274 uint8_t *payload_in,
275 size_t len_in,
276 uint8_t *payload_out,
277 size_t *len_out,
278 CXLCCI *cci)
279 {
280 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
281 CXLEventInterruptPolicy *policy;
282 CXLEventLog *log;
283
284 policy = (CXLEventInterruptPolicy *)payload_out;
285
286 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
287 if (log->irq_enabled) {
288 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
289 }
290
291 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
292 if (log->irq_enabled) {
293 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
294 }
295
296 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
297 if (log->irq_enabled) {
298 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
299 }
300
301 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
302 if (log->irq_enabled) {
303 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
304 }
305
306 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
307 if (log->irq_enabled) {
308 /* Dynamic Capacity borrows the same vector as info */
309 policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
310 }
311
312 *len_out = sizeof(*policy);
313 return CXL_MBOX_SUCCESS;
314 }
315
cmd_events_set_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)316 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
317 uint8_t *payload_in,
318 size_t len_in,
319 uint8_t *payload_out,
320 size_t *len_out,
321 CXLCCI *cci)
322 {
323 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
324 CXLEventInterruptPolicy *policy;
325 CXLEventLog *log;
326
327 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) {
328 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
329 }
330
331 policy = (CXLEventInterruptPolicy *)payload_in;
332
333 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
334 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
335 CXL_INT_MSI_MSIX;
336
337 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
338 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
339 CXL_INT_MSI_MSIX;
340
341 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
342 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
343 CXL_INT_MSI_MSIX;
344
345 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
346 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
347 CXL_INT_MSI_MSIX;
348
349 /* DCD is optional */
350 if (len_in < sizeof(*policy)) {
351 return CXL_MBOX_SUCCESS;
352 }
353
354 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
355 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
356 CXL_INT_MSI_MSIX;
357
358 *len_out = 0;
359 return CXL_MBOX_SUCCESS;
360 }
361
362 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */
cmd_infostat_identify(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)363 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
364 uint8_t *payload_in,
365 size_t len_in,
366 uint8_t *payload_out,
367 size_t *len_out,
368 CXLCCI *cci)
369 {
370 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d);
371 struct {
372 uint16_t pcie_vid;
373 uint16_t pcie_did;
374 uint16_t pcie_subsys_vid;
375 uint16_t pcie_subsys_id;
376 uint64_t sn;
377 uint8_t max_message_size;
378 uint8_t component_type;
379 } QEMU_PACKED *is_identify;
380 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
381
382 is_identify = (void *)payload_out;
383 is_identify->pcie_vid = class->vendor_id;
384 is_identify->pcie_did = class->device_id;
385 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
386 is_identify->sn = CXL_USP(cci->d)->sn;
387 /* Subsystem info not defined for a USP */
388 is_identify->pcie_subsys_vid = 0;
389 is_identify->pcie_subsys_id = 0;
390 is_identify->component_type = 0x0; /* Switch */
391 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
392 PCIDevice *pci_dev = PCI_DEVICE(cci->d);
393
394 is_identify->sn = CXL_TYPE3(cci->d)->sn;
395 /*
396 * We can't always use class->subsystem_vendor_id as
397 * it is not set if the defaults are used.
398 */
399 is_identify->pcie_subsys_vid =
400 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID);
401 is_identify->pcie_subsys_id =
402 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID);
403 is_identify->component_type = 0x3; /* Type 3 */
404 }
405
406 /* TODO: Allow this to vary across different CCIs */
407 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
408 *len_out = sizeof(*is_identify);
409 return CXL_MBOX_SUCCESS;
410 }
411
cxl_set_dsp_active_bm(PCIBus * b,PCIDevice * d,void * private)412 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
413 void *private)
414 {
415 uint8_t *bm = private;
416 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) {
417 uint8_t port = PCIE_PORT(d)->port;
418 bm[port / 8] |= 1 << (port % 8);
419 }
420 }
421
422 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */
cmd_identify_switch_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)423 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
424 uint8_t *payload_in,
425 size_t len_in,
426 uint8_t *payload_out,
427 size_t *len_out,
428 CXLCCI *cci)
429 {
430 PCIEPort *usp = PCIE_PORT(cci->d);
431 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
432 int num_phys_ports = pcie_count_ds_ports(bus);
433
434 struct cxl_fmapi_ident_switch_dev_resp_pl {
435 uint8_t ingress_port_id;
436 uint8_t rsvd;
437 uint8_t num_physical_ports;
438 uint8_t num_vcss;
439 uint8_t active_port_bitmask[0x20];
440 uint8_t active_vcs_bitmask[0x20];
441 uint16_t total_vppbs;
442 uint16_t bound_vppbs;
443 uint8_t num_hdm_decoders_per_usp;
444 } QEMU_PACKED *out;
445 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49);
446
447 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
448 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
449 .num_physical_ports = num_phys_ports + 1, /* 1 USP */
450 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */
451 .active_vcs_bitmask[0] = 0x1,
452 .total_vppbs = num_phys_ports + 1,
453 .bound_vppbs = num_phys_ports + 1,
454 .num_hdm_decoders_per_usp = 4,
455 };
456
457 /* Depends on the CCI type */
458 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) {
459 out->ingress_port_id = PCIE_PORT(cci->intf)->port;
460 } else {
461 /* MCTP? */
462 out->ingress_port_id = 0;
463 }
464
465 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm,
466 out->active_port_bitmask);
467 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8);
468
469 *len_out = sizeof(*out);
470
471 return CXL_MBOX_SUCCESS;
472 }
473
474 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
cmd_get_physical_port_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)475 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
476 uint8_t *payload_in,
477 size_t len_in,
478 uint8_t *payload_out,
479 size_t *len_out,
480 CXLCCI *cci)
481 {
482 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */
483 struct cxl_fmapi_get_phys_port_state_req_pl {
484 uint8_t num_ports;
485 uint8_t ports[];
486 } QEMU_PACKED *in;
487
488 /*
489 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block
490 * Format
491 */
492 struct cxl_fmapi_port_state_info_block {
493 uint8_t port_id;
494 uint8_t config_state;
495 uint8_t connected_device_cxl_version;
496 uint8_t rsv1;
497 uint8_t connected_device_type;
498 uint8_t port_cxl_version_bitmask;
499 uint8_t max_link_width;
500 uint8_t negotiated_link_width;
501 uint8_t supported_link_speeds_vector;
502 uint8_t max_link_speed;
503 uint8_t current_link_speed;
504 uint8_t ltssm_state;
505 uint8_t first_lane_num;
506 uint16_t link_state;
507 uint8_t supported_ld_count;
508 } QEMU_PACKED;
509
510 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */
511 struct cxl_fmapi_get_phys_port_state_resp_pl {
512 uint8_t num_ports;
513 uint8_t rsv1[3];
514 struct cxl_fmapi_port_state_info_block ports[];
515 } QEMU_PACKED *out;
516 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
517 PCIEPort *usp = PCIE_PORT(cci->d);
518 size_t pl_size;
519 int i;
520
521 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in;
522 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out;
523
524 /* Check if what was requested can fit */
525 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) {
526 return CXL_MBOX_INVALID_INPUT;
527 }
528
529 /* For success there should be a match for each requested */
530 out->num_ports = in->num_ports;
531
532 for (i = 0; i < in->num_ports; i++) {
533 struct cxl_fmapi_port_state_info_block *port;
534 /* First try to match on downstream port */
535 PCIDevice *port_dev;
536 uint16_t lnkcap, lnkcap2, lnksta;
537
538 port = &out->ports[i];
539
540 port_dev = pcie_find_port_by_pn(bus, in->ports[i]);
541 if (port_dev) { /* DSP */
542 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev))
543 ->devices[0];
544 port->config_state = 3;
545 if (ds_dev) {
546 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) {
547 port->connected_device_type = 5; /* Assume MLD for now */
548 } else {
549 port->connected_device_type = 1;
550 }
551 } else {
552 port->connected_device_type = 0;
553 }
554 port->supported_ld_count = 3;
555 } else if (usp->port == in->ports[i]) { /* USP */
556 port_dev = PCI_DEVICE(usp);
557 port->config_state = 4;
558 port->connected_device_type = 0;
559 } else {
560 return CXL_MBOX_INVALID_INPUT;
561 }
562
563 port->port_id = in->ports[i];
564 /* Information on status of this port in lnksta, lnkcap */
565 if (!port_dev->exp.exp_cap) {
566 return CXL_MBOX_INTERNAL_ERROR;
567 }
568 lnksta = port_dev->config_read(port_dev,
569 port_dev->exp.exp_cap + PCI_EXP_LNKSTA,
570 sizeof(lnksta));
571 lnkcap = port_dev->config_read(port_dev,
572 port_dev->exp.exp_cap + PCI_EXP_LNKCAP,
573 sizeof(lnkcap));
574 lnkcap2 = port_dev->config_read(port_dev,
575 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2,
576 sizeof(lnkcap2));
577
578 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
579 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4;
580 /* No definition for SLS field in linux/pci_regs.h */
581 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1;
582 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS;
583 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS;
584 /* TODO: Track down if we can get the rest of the info */
585 port->ltssm_state = 0x7;
586 port->first_lane_num = 0;
587 port->link_state = 0;
588 port->port_cxl_version_bitmask = 0x2;
589 port->connected_device_cxl_version = 0x2;
590 }
591
592 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports;
593 *len_out = pl_size;
594
595 return CXL_MBOX_SUCCESS;
596 }
597
598 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */
cmd_infostat_bg_op_sts(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)599 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
600 uint8_t *payload_in,
601 size_t len_in,
602 uint8_t *payload_out,
603 size_t *len_out,
604 CXLCCI *cci)
605 {
606 struct {
607 uint8_t status;
608 uint8_t rsvd;
609 uint16_t opcode;
610 uint16_t returncode;
611 uint16_t vendor_ext_status;
612 } QEMU_PACKED *bg_op_status;
613 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8);
614
615 bg_op_status = (void *)payload_out;
616 bg_op_status->status = cci->bg.complete_pct << 1;
617 if (cci->bg.runtime > 0) {
618 bg_op_status->status |= 1U << 0;
619 }
620 bg_op_status->opcode = cci->bg.opcode;
621 bg_op_status->returncode = cci->bg.ret_code;
622 *len_out = sizeof(*bg_op_status);
623
624 return CXL_MBOX_SUCCESS;
625 }
626
627 #define CXL_FW_SLOTS 2
628 #define CXL_FW_SIZE 0x02000000 /* 32 mb */
629
630 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
cmd_firmware_update_get_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)631 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
632 uint8_t *payload_in,
633 size_t len,
634 uint8_t *payload_out,
635 size_t *len_out,
636 CXLCCI *cci)
637 {
638 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
639 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
640 struct {
641 uint8_t slots_supported;
642 uint8_t slot_info;
643 uint8_t caps;
644 uint8_t rsvd[0xd];
645 char fw_rev1[0x10];
646 char fw_rev2[0x10];
647 char fw_rev3[0x10];
648 char fw_rev4[0x10];
649 } QEMU_PACKED *fw_info;
650 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
651
652 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
653 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) ||
654 (ct3d->dc.total_capacity < CXL_CAPACITY_MULTIPLIER)) {
655 return CXL_MBOX_INTERNAL_ERROR;
656 }
657
658 fw_info = (void *)payload_out;
659
660 fw_info->slots_supported = CXL_FW_SLOTS;
661 fw_info->slot_info = (cci->fw.active_slot & 0x7) |
662 ((cci->fw.staged_slot & 0x7) << 3);
663 fw_info->caps = BIT(0); /* online update supported */
664
665 if (cci->fw.slot[0]) {
666 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
667 }
668 if (cci->fw.slot[1]) {
669 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1");
670 }
671
672 *len_out = sizeof(*fw_info);
673 return CXL_MBOX_SUCCESS;
674 }
675
676 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */
677 #define CXL_FW_XFER_ALIGNMENT 128
678
679 #define CXL_FW_XFER_ACTION_FULL 0x0
680 #define CXL_FW_XFER_ACTION_INIT 0x1
681 #define CXL_FW_XFER_ACTION_CONTINUE 0x2
682 #define CXL_FW_XFER_ACTION_END 0x3
683 #define CXL_FW_XFER_ACTION_ABORT 0x4
684
cmd_firmware_update_transfer(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)685 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd,
686 uint8_t *payload_in,
687 size_t len,
688 uint8_t *payload_out,
689 size_t *len_out,
690 CXLCCI *cci)
691 {
692 struct {
693 uint8_t action;
694 uint8_t slot;
695 uint8_t rsvd1[2];
696 uint32_t offset;
697 uint8_t rsvd2[0x78];
698 uint8_t data[];
699 } QEMU_PACKED *fw_transfer = (void *)payload_in;
700 size_t offset, length;
701
702 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) {
703 /*
704 * At this point there aren't any on-going transfers
705 * running in the bg - this is serialized before this
706 * call altogether. Just mark the state machine and
707 * disregard any other input.
708 */
709 cci->fw.transferring = false;
710 return CXL_MBOX_SUCCESS;
711 }
712
713 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT;
714 length = len - sizeof(*fw_transfer);
715 if (offset + length > CXL_FW_SIZE) {
716 return CXL_MBOX_INVALID_INPUT;
717 }
718
719 if (cci->fw.transferring) {
720 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL ||
721 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) {
722 return CXL_MBOX_FW_XFER_IN_PROGRESS;
723 }
724 /*
725 * Abort partitioned package transfer if over 30 secs
726 * between parts. As opposed to the explicit ABORT action,
727 * semantically treat this condition as an error - as
728 * if a part action were passed without a previous INIT.
729 */
730 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) {
731 cci->fw.transferring = false;
732 return CXL_MBOX_INVALID_INPUT;
733 }
734 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
735 fw_transfer->action == CXL_FW_XFER_ACTION_END) {
736 return CXL_MBOX_INVALID_INPUT;
737 }
738
739 /* allow back-to-back retransmission */
740 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) &&
741 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
742 fw_transfer->action == CXL_FW_XFER_ACTION_END)) {
743 /* verify no overlaps */
744 if (offset < cci->fw.prev_offset + cci->fw.prev_len) {
745 return CXL_MBOX_FW_XFER_OUT_OF_ORDER;
746 }
747 }
748
749 switch (fw_transfer->action) {
750 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */
751 case CXL_FW_XFER_ACTION_END:
752 if (fw_transfer->slot == 0 ||
753 fw_transfer->slot == cci->fw.active_slot ||
754 fw_transfer->slot > CXL_FW_SLOTS) {
755 return CXL_MBOX_FW_INVALID_SLOT;
756 }
757
758 /* mark the slot used upon bg completion */
759 break;
760 case CXL_FW_XFER_ACTION_INIT:
761 if (offset != 0) {
762 return CXL_MBOX_INVALID_INPUT;
763 }
764
765 cci->fw.transferring = true;
766 cci->fw.prev_offset = offset;
767 cci->fw.prev_len = length;
768 break;
769 case CXL_FW_XFER_ACTION_CONTINUE:
770 cci->fw.prev_offset = offset;
771 cci->fw.prev_len = length;
772 break;
773 default:
774 return CXL_MBOX_INVALID_INPUT;
775 }
776
777 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) {
778 cci->bg.runtime = 10 * 1000UL;
779 } else {
780 cci->bg.runtime = 2 * 1000UL;
781 }
782 /* keep relevant context for bg completion */
783 cci->fw.curr_action = fw_transfer->action;
784 cci->fw.curr_slot = fw_transfer->slot;
785 *len_out = 0;
786
787 return CXL_MBOX_BG_STARTED;
788 }
789
__do_firmware_xfer(CXLCCI * cci)790 static void __do_firmware_xfer(CXLCCI *cci)
791 {
792 switch (cci->fw.curr_action) {
793 case CXL_FW_XFER_ACTION_FULL:
794 case CXL_FW_XFER_ACTION_END:
795 cci->fw.slot[cci->fw.curr_slot - 1] = true;
796 cci->fw.transferring = false;
797 break;
798 case CXL_FW_XFER_ACTION_INIT:
799 case CXL_FW_XFER_ACTION_CONTINUE:
800 time(&cci->fw.last_partxfer);
801 break;
802 default:
803 break;
804 }
805 }
806
807 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */
cmd_firmware_update_activate(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)808 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd,
809 uint8_t *payload_in,
810 size_t len,
811 uint8_t *payload_out,
812 size_t *len_out,
813 CXLCCI *cci)
814 {
815 struct {
816 uint8_t action;
817 uint8_t slot;
818 } QEMU_PACKED *fw_activate = (void *)payload_in;
819 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2);
820
821 if (fw_activate->slot == 0 ||
822 fw_activate->slot == cci->fw.active_slot ||
823 fw_activate->slot > CXL_FW_SLOTS) {
824 return CXL_MBOX_FW_INVALID_SLOT;
825 }
826
827 /* ensure that an actual fw package is there */
828 if (!cci->fw.slot[fw_activate->slot - 1]) {
829 return CXL_MBOX_FW_INVALID_SLOT;
830 }
831
832 switch (fw_activate->action) {
833 case 0: /* online */
834 cci->fw.active_slot = fw_activate->slot;
835 break;
836 case 1: /* reset */
837 cci->fw.staged_slot = fw_activate->slot;
838 break;
839 default:
840 return CXL_MBOX_INVALID_INPUT;
841 }
842
843 return CXL_MBOX_SUCCESS;
844 }
845
846 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
cmd_timestamp_get(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)847 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
848 uint8_t *payload_in,
849 size_t len_in,
850 uint8_t *payload_out,
851 size_t *len_out,
852 CXLCCI *cci)
853 {
854 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
855 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
856
857 stq_le_p(payload_out, final_time);
858 *len_out = 8;
859
860 return CXL_MBOX_SUCCESS;
861 }
862
863 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */
cmd_timestamp_set(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)864 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
865 uint8_t *payload_in,
866 size_t len_in,
867 uint8_t *payload_out,
868 size_t *len_out,
869 CXLCCI *cci)
870 {
871 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
872
873 cxl_dstate->timestamp.set = true;
874 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
875
876 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in);
877
878 *len_out = 0;
879 return CXL_MBOX_SUCCESS;
880 }
881
882 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */
883 static const QemuUUID cel_uuid = {
884 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
885 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
886 };
887
888 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */
cmd_logs_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)889 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
890 uint8_t *payload_in,
891 size_t len_in,
892 uint8_t *payload_out,
893 size_t *len_out,
894 CXLCCI *cci)
895 {
896 struct {
897 uint16_t entries;
898 uint8_t rsvd[6];
899 struct {
900 QemuUUID uuid;
901 uint32_t size;
902 } log_entries[1];
903 } QEMU_PACKED *supported_logs = (void *)payload_out;
904 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
905
906 supported_logs->entries = 1;
907 supported_logs->log_entries[0].uuid = cel_uuid;
908 supported_logs->log_entries[0].size = 4 * cci->cel_size;
909
910 *len_out = sizeof(*supported_logs);
911 return CXL_MBOX_SUCCESS;
912 }
913
914 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */
cmd_logs_get_log(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)915 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
916 uint8_t *payload_in,
917 size_t len_in,
918 uint8_t *payload_out,
919 size_t *len_out,
920 CXLCCI *cci)
921 {
922 struct {
923 QemuUUID uuid;
924 uint32_t offset;
925 uint32_t length;
926 } QEMU_PACKED QEMU_ALIGNED(16) *get_log;
927
928 get_log = (void *)payload_in;
929
930 /*
931 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
932 * The device shall return Invalid Input if the Offset or Length
933 * fields attempt to access beyond the size of the log as reported by Get
934 * Supported Logs.
935 *
936 * The CEL buffer is large enough to fit all commands in the emulation, so
937 * the only possible failure would be if the mailbox itself isn't big
938 * enough.
939 */
940 if (get_log->offset + get_log->length > cci->payload_max) {
941 return CXL_MBOX_INVALID_INPUT;
942 }
943
944 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
945 return CXL_MBOX_INVALID_LOG;
946 }
947
948 /* Store off everything to local variables so we can wipe out the payload */
949 *len_out = get_log->length;
950
951 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length);
952
953 return CXL_MBOX_SUCCESS;
954 }
955
956 /* CXL r3.1 section 8.2.9.6: Features */
957 /*
958 * Get Supported Features output payload
959 * CXL r3.1 section 8.2.9.6.1 Table 8-96
960 */
961 typedef struct CXLSupportedFeatureHeader {
962 uint16_t entries;
963 uint16_t nsuppfeats_dev;
964 uint32_t reserved;
965 } QEMU_PACKED CXLSupportedFeatureHeader;
966
967 /*
968 * Get Supported Features Supported Feature Entry
969 * CXL r3.1 section 8.2.9.6.1 Table 8-97
970 */
971 typedef struct CXLSupportedFeatureEntry {
972 QemuUUID uuid;
973 uint16_t feat_index;
974 uint16_t get_feat_size;
975 uint16_t set_feat_size;
976 uint32_t attr_flags;
977 uint8_t get_feat_version;
978 uint8_t set_feat_version;
979 uint16_t set_feat_effects;
980 uint8_t rsvd[18];
981 } QEMU_PACKED CXLSupportedFeatureEntry;
982
983 /*
984 * Get Supported Features Supported Feature Entry
985 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97
986 */
987 /* Supported Feature Entry : attribute flags */
988 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0)
989 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1)
990 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4)
991 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5)
992 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6)
993
994 /* Supported Feature Entry : set feature effects */
995 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0)
996 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1)
997 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2)
998 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3)
999 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4)
1000 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5)
1001 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6)
1002 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7)
1003 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8)
1004 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9)
1005 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10)
1006 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11)
1007
1008 enum CXL_SUPPORTED_FEATURES_LIST {
1009 CXL_FEATURE_PATROL_SCRUB = 0,
1010 CXL_FEATURE_ECS,
1011 CXL_FEATURE_MAX
1012 };
1013
1014 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */
1015 /*
1016 * Get Feature input payload
1017 * CXL r3.1 section 8.2.9.6.2 Table 8-99
1018 */
1019 /* Get Feature : Payload in selection */
1020 enum CXL_GET_FEATURE_SELECTION {
1021 CXL_GET_FEATURE_SEL_CURRENT_VALUE,
1022 CXL_GET_FEATURE_SEL_DEFAULT_VALUE,
1023 CXL_GET_FEATURE_SEL_SAVED_VALUE,
1024 CXL_GET_FEATURE_SEL_MAX
1025 };
1026
1027 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */
1028 /*
1029 * Set Feature input payload
1030 * CXL r3.1 section 8.2.9.6.3 Table 8-101
1031 */
1032 typedef struct CXLSetFeatureInHeader {
1033 QemuUUID uuid;
1034 uint32_t flags;
1035 uint16_t offset;
1036 uint8_t version;
1037 uint8_t rsvd[9];
1038 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader;
1039
1040 /* Set Feature : Payload in flags */
1041 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7
1042 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER {
1043 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER,
1044 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER,
1045 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER,
1046 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER,
1047 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER,
1048 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
1049 };
1050 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3)
1051
1052 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
1053 static const QemuUUID patrol_scrub_uuid = {
1054 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
1055 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
1056 };
1057
1058 typedef struct CXLMemPatrolScrubSetFeature {
1059 CXLSetFeatureInHeader hdr;
1060 CXLMemPatrolScrubWriteAttrs feat_data;
1061 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature;
1062
1063 /*
1064 * CXL r3.1 section 8.2.9.9.11.2:
1065 * DDR5 Error Check Scrub (ECS) Control Feature
1066 */
1067 static const QemuUUID ecs_uuid = {
1068 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba,
1069 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86)
1070 };
1071
1072 typedef struct CXLMemECSSetFeature {
1073 CXLSetFeatureInHeader hdr;
1074 CXLMemECSWriteAttrs feat_data[];
1075 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature;
1076
1077 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
cmd_features_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1078 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
1079 uint8_t *payload_in,
1080 size_t len_in,
1081 uint8_t *payload_out,
1082 size_t *len_out,
1083 CXLCCI *cci)
1084 {
1085 struct {
1086 uint32_t count;
1087 uint16_t start_index;
1088 uint16_t reserved;
1089 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in;
1090
1091 struct {
1092 CXLSupportedFeatureHeader hdr;
1093 CXLSupportedFeatureEntry feat_entries[];
1094 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out;
1095 uint16_t index, req_entries;
1096 uint16_t entry;
1097
1098 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1099 return CXL_MBOX_UNSUPPORTED;
1100 }
1101 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) ||
1102 get_feats_in->start_index >= CXL_FEATURE_MAX) {
1103 return CXL_MBOX_INVALID_INPUT;
1104 }
1105
1106 req_entries = (get_feats_in->count -
1107 sizeof(CXLSupportedFeatureHeader)) /
1108 sizeof(CXLSupportedFeatureEntry);
1109 req_entries = MIN(req_entries,
1110 (CXL_FEATURE_MAX - get_feats_in->start_index));
1111
1112 for (entry = 0, index = get_feats_in->start_index;
1113 entry < req_entries; index++) {
1114 switch (index) {
1115 case CXL_FEATURE_PATROL_SCRUB:
1116 /* Fill supported feature entry for device patrol scrub control */
1117 get_feats_out->feat_entries[entry++] =
1118 (struct CXLSupportedFeatureEntry) {
1119 .uuid = patrol_scrub_uuid,
1120 .feat_index = index,
1121 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs),
1122 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs),
1123 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1124 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION,
1125 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION,
1126 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1127 CXL_FEAT_ENTRY_SFE_CEL_VALID,
1128 };
1129 break;
1130 case CXL_FEATURE_ECS:
1131 /* Fill supported feature entry for device DDR5 ECS control */
1132 get_feats_out->feat_entries[entry++] =
1133 (struct CXLSupportedFeatureEntry) {
1134 .uuid = ecs_uuid,
1135 .feat_index = index,
1136 .get_feat_size = CXL_ECS_NUM_MEDIA_FRUS *
1137 sizeof(CXLMemECSReadAttrs),
1138 .set_feat_size = CXL_ECS_NUM_MEDIA_FRUS *
1139 sizeof(CXLMemECSWriteAttrs),
1140 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1141 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION,
1142 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION,
1143 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1144 CXL_FEAT_ENTRY_SFE_CEL_VALID,
1145 };
1146 break;
1147 default:
1148 __builtin_unreachable();
1149 }
1150 }
1151 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX;
1152 get_feats_out->hdr.entries = req_entries;
1153 *len_out = sizeof(CXLSupportedFeatureHeader) +
1154 req_entries * sizeof(CXLSupportedFeatureEntry);
1155
1156 return CXL_MBOX_SUCCESS;
1157 }
1158
1159 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */
cmd_features_get_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1160 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd,
1161 uint8_t *payload_in,
1162 size_t len_in,
1163 uint8_t *payload_out,
1164 size_t *len_out,
1165 CXLCCI *cci)
1166 {
1167 struct {
1168 QemuUUID uuid;
1169 uint16_t offset;
1170 uint16_t count;
1171 uint8_t selection;
1172 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature;
1173 uint16_t bytes_to_copy = 0;
1174 CXLType3Dev *ct3d;
1175 CXLSetFeatureInfo *set_feat_info;
1176
1177 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1178 return CXL_MBOX_UNSUPPORTED;
1179 }
1180
1181 ct3d = CXL_TYPE3(cci->d);
1182 get_feature = (void *)payload_in;
1183
1184 set_feat_info = &ct3d->set_feat_info;
1185 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) {
1186 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1187 }
1188
1189 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) {
1190 return CXL_MBOX_UNSUPPORTED;
1191 }
1192 if (get_feature->offset + get_feature->count > cci->payload_max) {
1193 return CXL_MBOX_INVALID_INPUT;
1194 }
1195
1196 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) {
1197 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) {
1198 return CXL_MBOX_INVALID_INPUT;
1199 }
1200 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) -
1201 get_feature->offset;
1202 bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1203 memcpy(payload_out,
1204 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset,
1205 bytes_to_copy);
1206 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) {
1207 if (get_feature->offset >= CXL_ECS_NUM_MEDIA_FRUS *
1208 sizeof(CXLMemECSReadAttrs)) {
1209 return CXL_MBOX_INVALID_INPUT;
1210 }
1211 bytes_to_copy = CXL_ECS_NUM_MEDIA_FRUS *
1212 sizeof(CXLMemECSReadAttrs) -
1213 get_feature->offset;
1214 bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1215 memcpy(payload_out,
1216 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset,
1217 bytes_to_copy);
1218 } else {
1219 return CXL_MBOX_UNSUPPORTED;
1220 }
1221
1222 *len_out = bytes_to_copy;
1223
1224 return CXL_MBOX_SUCCESS;
1225 }
1226
1227 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */
cmd_features_set_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1228 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
1229 uint8_t *payload_in,
1230 size_t len_in,
1231 uint8_t *payload_out,
1232 size_t *len_out,
1233 CXLCCI *cci)
1234 {
1235 CXLSetFeatureInHeader *hdr = (void *)payload_in;
1236 CXLMemPatrolScrubWriteAttrs *ps_write_attrs;
1237 CXLMemPatrolScrubSetFeature *ps_set_feature;
1238 CXLMemECSWriteAttrs *ecs_write_attrs;
1239 CXLMemECSSetFeature *ecs_set_feature;
1240 CXLSetFeatureInfo *set_feat_info;
1241 uint16_t bytes_to_copy = 0;
1242 uint8_t data_transfer_flag;
1243 CXLType3Dev *ct3d;
1244 uint16_t count;
1245
1246
1247 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1248 return CXL_MBOX_UNSUPPORTED;
1249 }
1250 ct3d = CXL_TYPE3(cci->d);
1251 set_feat_info = &ct3d->set_feat_info;
1252
1253 if (!qemu_uuid_is_null(&set_feat_info->uuid) &&
1254 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) {
1255 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1256 }
1257 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) {
1258 set_feat_info->data_saved_across_reset = true;
1259 } else {
1260 set_feat_info->data_saved_across_reset = false;
1261 }
1262
1263 data_transfer_flag =
1264 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK;
1265 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) {
1266 set_feat_info->uuid = hdr->uuid;
1267 set_feat_info->data_size = 0;
1268 }
1269 set_feat_info->data_transfer_flag = data_transfer_flag;
1270 set_feat_info->data_offset = hdr->offset;
1271 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader);
1272
1273 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1274 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) {
1275 return CXL_MBOX_UNSUPPORTED;
1276 }
1277
1278 ps_set_feature = (void *)payload_in;
1279 ps_write_attrs = &ps_set_feature->feat_data;
1280 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset,
1281 ps_write_attrs,
1282 bytes_to_copy);
1283 set_feat_info->data_size += bytes_to_copy;
1284
1285 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1286 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1287 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF;
1288 ct3d->patrol_scrub_attrs.scrub_cycle |=
1289 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF;
1290 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1;
1291 ct3d->patrol_scrub_attrs.scrub_flags |=
1292 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1;
1293 }
1294 } else if (qemu_uuid_is_equal(&hdr->uuid,
1295 &ecs_uuid)) {
1296 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) {
1297 return CXL_MBOX_UNSUPPORTED;
1298 }
1299
1300 ecs_set_feature = (void *)payload_in;
1301 ecs_write_attrs = ecs_set_feature->feat_data;
1302 memcpy((uint8_t *)ct3d->ecs_wr_attrs + hdr->offset,
1303 ecs_write_attrs,
1304 bytes_to_copy);
1305 set_feat_info->data_size += bytes_to_copy;
1306
1307 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1308 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1309 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
1310 ct3d->ecs_attrs[count].ecs_log_cap =
1311 ct3d->ecs_wr_attrs[count].ecs_log_cap;
1312 ct3d->ecs_attrs[count].ecs_config =
1313 ct3d->ecs_wr_attrs[count].ecs_config & 0x1F;
1314 }
1315 }
1316 } else {
1317 return CXL_MBOX_UNSUPPORTED;
1318 }
1319
1320 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1321 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER ||
1322 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) {
1323 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID));
1324 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1325 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size);
1326 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) {
1327 memset(ct3d->ecs_wr_attrs, 0, set_feat_info->data_size);
1328 }
1329 set_feat_info->data_transfer_flag = 0;
1330 set_feat_info->data_saved_across_reset = false;
1331 set_feat_info->data_offset = 0;
1332 set_feat_info->data_size = 0;
1333 }
1334
1335 return CXL_MBOX_SUCCESS;
1336 }
1337
1338 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
cmd_identify_memory_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1339 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
1340 uint8_t *payload_in,
1341 size_t len_in,
1342 uint8_t *payload_out,
1343 size_t *len_out,
1344 CXLCCI *cci)
1345 {
1346 struct {
1347 char fw_revision[0x10];
1348 uint64_t total_capacity;
1349 uint64_t volatile_capacity;
1350 uint64_t persistent_capacity;
1351 uint64_t partition_align;
1352 uint16_t info_event_log_size;
1353 uint16_t warning_event_log_size;
1354 uint16_t failure_event_log_size;
1355 uint16_t fatal_event_log_size;
1356 uint32_t lsa_size;
1357 uint8_t poison_list_max_mer[3];
1358 uint16_t inject_poison_limit;
1359 uint8_t poison_caps;
1360 uint8_t qos_telemetry_caps;
1361 uint16_t dc_event_log_size;
1362 } QEMU_PACKED *id;
1363 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45);
1364 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1365 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1366 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1367
1368 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1369 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1370 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1371 return CXL_MBOX_INTERNAL_ERROR;
1372 }
1373
1374 id = (void *)payload_out;
1375
1376 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
1377
1378 stq_le_p(&id->total_capacity,
1379 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER);
1380 stq_le_p(&id->persistent_capacity,
1381 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1382 stq_le_p(&id->volatile_capacity,
1383 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1384 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
1385 /* 256 poison records */
1386 st24_le_p(id->poison_list_max_mer, 256);
1387 /* No limit - so limited by main poison record limit */
1388 stw_le_p(&id->inject_poison_limit, 0);
1389 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE);
1390
1391 *len_out = sizeof(*id);
1392 return CXL_MBOX_SUCCESS;
1393 }
1394
1395 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */
cmd_ccls_get_partition_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1396 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
1397 uint8_t *payload_in,
1398 size_t len_in,
1399 uint8_t *payload_out,
1400 size_t *len_out,
1401 CXLCCI *cci)
1402 {
1403 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
1404 struct {
1405 uint64_t active_vmem;
1406 uint64_t active_pmem;
1407 uint64_t next_vmem;
1408 uint64_t next_pmem;
1409 } QEMU_PACKED *part_info = (void *)payload_out;
1410 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
1411 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
1412
1413 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1414 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1415 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1416 return CXL_MBOX_INTERNAL_ERROR;
1417 }
1418
1419 stq_le_p(&part_info->active_vmem,
1420 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1421 /*
1422 * When both next_vmem and next_pmem are 0, there is no pending change to
1423 * partitioning.
1424 */
1425 stq_le_p(&part_info->next_vmem, 0);
1426 stq_le_p(&part_info->active_pmem,
1427 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1428 stq_le_p(&part_info->next_pmem, 0);
1429
1430 *len_out = sizeof(*part_info);
1431 return CXL_MBOX_SUCCESS;
1432 }
1433
1434 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */
cmd_ccls_get_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1435 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
1436 uint8_t *payload_in,
1437 size_t len_in,
1438 uint8_t *payload_out,
1439 size_t *len_out,
1440 CXLCCI *cci)
1441 {
1442 struct {
1443 uint32_t offset;
1444 uint32_t length;
1445 } QEMU_PACKED *get_lsa;
1446 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1447 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1448 uint32_t offset, length;
1449
1450 get_lsa = (void *)payload_in;
1451 offset = get_lsa->offset;
1452 length = get_lsa->length;
1453
1454 if (offset + length > cvc->get_lsa_size(ct3d)) {
1455 *len_out = 0;
1456 return CXL_MBOX_INVALID_INPUT;
1457 }
1458
1459 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset);
1460 return CXL_MBOX_SUCCESS;
1461 }
1462
1463 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */
cmd_ccls_set_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1464 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
1465 uint8_t *payload_in,
1466 size_t len_in,
1467 uint8_t *payload_out,
1468 size_t *len_out,
1469 CXLCCI *cci)
1470 {
1471 struct set_lsa_pl {
1472 uint32_t offset;
1473 uint32_t rsvd;
1474 uint8_t data[];
1475 } QEMU_PACKED;
1476 struct set_lsa_pl *set_lsa_payload = (void *)payload_in;
1477 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1478 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1479 const size_t hdr_len = offsetof(struct set_lsa_pl, data);
1480
1481 *len_out = 0;
1482 if (!len_in) {
1483 return CXL_MBOX_SUCCESS;
1484 }
1485
1486 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
1487 return CXL_MBOX_INVALID_INPUT;
1488 }
1489 len_in -= hdr_len;
1490
1491 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset);
1492 return CXL_MBOX_SUCCESS;
1493 }
1494
1495 /* Perform the actual device zeroing */
__do_sanitization(CXLType3Dev * ct3d)1496 static void __do_sanitization(CXLType3Dev *ct3d)
1497 {
1498 MemoryRegion *mr;
1499
1500 if (ct3d->hostvmem) {
1501 mr = host_memory_backend_get_memory(ct3d->hostvmem);
1502 if (mr) {
1503 void *hostmem = memory_region_get_ram_ptr(mr);
1504 memset(hostmem, 0, memory_region_size(mr));
1505 }
1506 }
1507
1508 if (ct3d->hostpmem) {
1509 mr = host_memory_backend_get_memory(ct3d->hostpmem);
1510 if (mr) {
1511 void *hostmem = memory_region_get_ram_ptr(mr);
1512 memset(hostmem, 0, memory_region_size(mr));
1513 }
1514 }
1515 if (ct3d->lsa) {
1516 mr = host_memory_backend_get_memory(ct3d->lsa);
1517 if (mr) {
1518 void *lsa = memory_region_get_ram_ptr(mr);
1519 memset(lsa, 0, memory_region_size(mr));
1520 }
1521 }
1522 cxl_discard_all_event_records(&ct3d->cxl_dstate);
1523 }
1524
1525 /*
1526 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
1527 *
1528 * Once the Sanitize command has started successfully, the device shall be
1529 * placed in the media disabled state. If the command fails or is interrupted
1530 * by a reset or power failure, it shall remain in the media disabled state
1531 * until a successful Sanitize command has been completed. During this state:
1532 *
1533 * 1. Memory writes to the device will have no effect, and all memory reads
1534 * will return random values (no user data returned, even for locations that
1535 * the failed Sanitize operation didn’t sanitize yet).
1536 *
1537 * 2. Mailbox commands shall still be processed in the disabled state, except
1538 * that commands that access Sanitized areas shall fail with the Media Disabled
1539 * error code.
1540 */
cmd_sanitize_overwrite(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1541 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
1542 uint8_t *payload_in,
1543 size_t len_in,
1544 uint8_t *payload_out,
1545 size_t *len_out,
1546 CXLCCI *cci)
1547 {
1548 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1549 uint64_t total_mem; /* in Mb */
1550 int secs;
1551
1552 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
1553 if (total_mem <= 512) {
1554 secs = 4;
1555 } else if (total_mem <= 1024) {
1556 secs = 8;
1557 } else if (total_mem <= 2 * 1024) {
1558 secs = 15;
1559 } else if (total_mem <= 4 * 1024) {
1560 secs = 30;
1561 } else if (total_mem <= 8 * 1024) {
1562 secs = 60;
1563 } else if (total_mem <= 16 * 1024) {
1564 secs = 2 * 60;
1565 } else if (total_mem <= 32 * 1024) {
1566 secs = 4 * 60;
1567 } else if (total_mem <= 64 * 1024) {
1568 secs = 8 * 60;
1569 } else if (total_mem <= 128 * 1024) {
1570 secs = 15 * 60;
1571 } else if (total_mem <= 256 * 1024) {
1572 secs = 30 * 60;
1573 } else if (total_mem <= 512 * 1024) {
1574 secs = 60 * 60;
1575 } else if (total_mem <= 1024 * 1024) {
1576 secs = 120 * 60;
1577 } else {
1578 secs = 240 * 60; /* max 4 hrs */
1579 }
1580
1581 /* EBUSY other bg cmds as of now */
1582 cci->bg.runtime = secs * 1000UL;
1583 *len_out = 0;
1584
1585 cxl_dev_disable_media(&ct3d->cxl_dstate);
1586
1587 /* sanitize when done */
1588 return CXL_MBOX_BG_STARTED;
1589 }
1590
cmd_get_security_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1591 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
1592 uint8_t *payload_in,
1593 size_t len_in,
1594 uint8_t *payload_out,
1595 size_t *len_out,
1596 CXLCCI *cci)
1597 {
1598 uint32_t *state = (uint32_t *)payload_out;
1599
1600 *state = 0;
1601 *len_out = 4;
1602 return CXL_MBOX_SUCCESS;
1603 }
1604
1605 /*
1606 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h)
1607 *
1608 * This is very inefficient, but good enough for now!
1609 * Also the payload will always fit, so no need to handle the MORE flag and
1610 * make this stateful. We may want to allow longer poison lists to aid
1611 * testing that kernel functionality.
1612 */
cmd_media_get_poison_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1613 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
1614 uint8_t *payload_in,
1615 size_t len_in,
1616 uint8_t *payload_out,
1617 size_t *len_out,
1618 CXLCCI *cci)
1619 {
1620 struct get_poison_list_pl {
1621 uint64_t pa;
1622 uint64_t length;
1623 } QEMU_PACKED;
1624
1625 struct get_poison_list_out_pl {
1626 uint8_t flags;
1627 uint8_t rsvd1;
1628 uint64_t overflow_timestamp;
1629 uint16_t count;
1630 uint8_t rsvd2[0x14];
1631 struct {
1632 uint64_t addr;
1633 uint32_t length;
1634 uint32_t resv;
1635 } QEMU_PACKED records[];
1636 } QEMU_PACKED;
1637
1638 struct get_poison_list_pl *in = (void *)payload_in;
1639 struct get_poison_list_out_pl *out = (void *)payload_out;
1640 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1641 uint16_t record_count = 0, i = 0;
1642 uint64_t query_start, query_length;
1643 CXLPoisonList *poison_list = &ct3d->poison_list;
1644 CXLPoison *ent;
1645 uint16_t out_pl_len;
1646
1647 query_start = ldq_le_p(&in->pa);
1648 /* 64 byte alignment required */
1649 if (query_start & 0x3f) {
1650 return CXL_MBOX_INVALID_INPUT;
1651 }
1652 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
1653
1654 QLIST_FOREACH(ent, poison_list, node) {
1655 /* Check for no overlap */
1656 if (!ranges_overlap(ent->start, ent->length,
1657 query_start, query_length)) {
1658 continue;
1659 }
1660 record_count++;
1661 }
1662 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
1663 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
1664
1665 QLIST_FOREACH(ent, poison_list, node) {
1666 uint64_t start, stop;
1667
1668 /* Check for no overlap */
1669 if (!ranges_overlap(ent->start, ent->length,
1670 query_start, query_length)) {
1671 continue;
1672 }
1673
1674 /* Deal with overlap */
1675 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
1676 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
1677 query_start + query_length);
1678 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
1679 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
1680 i++;
1681 }
1682 if (ct3d->poison_list_overflowed) {
1683 out->flags = (1 << 1);
1684 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
1685 }
1686 if (scan_media_running(cci)) {
1687 out->flags |= (1 << 2);
1688 }
1689
1690 stw_le_p(&out->count, record_count);
1691 *len_out = out_pl_len;
1692 return CXL_MBOX_SUCCESS;
1693 }
1694
1695 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */
cmd_media_inject_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1696 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
1697 uint8_t *payload_in,
1698 size_t len_in,
1699 uint8_t *payload_out,
1700 size_t *len_out,
1701 CXLCCI *cci)
1702 {
1703 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1704 CXLPoisonList *poison_list = &ct3d->poison_list;
1705 CXLPoison *ent;
1706 struct inject_poison_pl {
1707 uint64_t dpa;
1708 };
1709 struct inject_poison_pl *in = (void *)payload_in;
1710 uint64_t dpa = ldq_le_p(&in->dpa);
1711 CXLPoison *p;
1712
1713 QLIST_FOREACH(ent, poison_list, node) {
1714 if (dpa >= ent->start &&
1715 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
1716 return CXL_MBOX_SUCCESS;
1717 }
1718 }
1719 /*
1720 * Freeze the list if there is an on-going scan media operation.
1721 */
1722 if (scan_media_running(cci)) {
1723 /*
1724 * XXX: Spec is ambiguous - is this case considered
1725 * a successful return despite not adding to the list?
1726 */
1727 goto success;
1728 }
1729
1730 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1731 return CXL_MBOX_INJECT_POISON_LIMIT;
1732 }
1733 p = g_new0(CXLPoison, 1);
1734
1735 p->length = CXL_CACHE_LINE_SIZE;
1736 p->start = dpa;
1737 p->type = CXL_POISON_TYPE_INJECTED;
1738
1739 /*
1740 * Possible todo: Merge with existing entry if next to it and if same type
1741 */
1742 QLIST_INSERT_HEAD(poison_list, p, node);
1743 ct3d->poison_list_cnt++;
1744 success:
1745 *len_out = 0;
1746
1747 return CXL_MBOX_SUCCESS;
1748 }
1749
1750 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */
cmd_media_clear_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1751 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
1752 uint8_t *payload_in,
1753 size_t len_in,
1754 uint8_t *payload_out,
1755 size_t *len_out,
1756 CXLCCI *cci)
1757 {
1758 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1759 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1760 CXLPoisonList *poison_list = &ct3d->poison_list;
1761 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1762 struct clear_poison_pl {
1763 uint64_t dpa;
1764 uint8_t data[64];
1765 };
1766 CXLPoison *ent;
1767 uint64_t dpa;
1768
1769 struct clear_poison_pl *in = (void *)payload_in;
1770
1771 dpa = ldq_le_p(&in->dpa);
1772 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size +
1773 ct3d->dc.total_capacity) {
1774 return CXL_MBOX_INVALID_PA;
1775 }
1776
1777 /* Clearing a region with no poison is not an error so always do so */
1778 if (cvc->set_cacheline) {
1779 if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
1780 return CXL_MBOX_INTERNAL_ERROR;
1781 }
1782 }
1783
1784 /*
1785 * Freeze the list if there is an on-going scan media operation.
1786 */
1787 if (scan_media_running(cci)) {
1788 /*
1789 * XXX: Spec is ambiguous - is this case considered
1790 * a successful return despite not removing from the list?
1791 */
1792 goto success;
1793 }
1794
1795 QLIST_FOREACH(ent, poison_list, node) {
1796 /*
1797 * Test for contained in entry. Simpler than general case
1798 * as clearing 64 bytes and entries 64 byte aligned
1799 */
1800 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
1801 break;
1802 }
1803 }
1804 if (!ent) {
1805 goto success;
1806 }
1807
1808 QLIST_REMOVE(ent, node);
1809 ct3d->poison_list_cnt--;
1810
1811 if (dpa > ent->start) {
1812 CXLPoison *frag;
1813 /* Cannot overflow as replacing existing entry */
1814
1815 frag = g_new0(CXLPoison, 1);
1816
1817 frag->start = ent->start;
1818 frag->length = dpa - ent->start;
1819 frag->type = ent->type;
1820
1821 QLIST_INSERT_HEAD(poison_list, frag, node);
1822 ct3d->poison_list_cnt++;
1823 }
1824
1825 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
1826 CXLPoison *frag;
1827
1828 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1829 cxl_set_poison_list_overflowed(ct3d);
1830 } else {
1831 frag = g_new0(CXLPoison, 1);
1832
1833 frag->start = dpa + CXL_CACHE_LINE_SIZE;
1834 frag->length = ent->start + ent->length - frag->start;
1835 frag->type = ent->type;
1836 QLIST_INSERT_HEAD(poison_list, frag, node);
1837 ct3d->poison_list_cnt++;
1838 }
1839 }
1840 /* Any fragments have been added, free original entry */
1841 g_free(ent);
1842 success:
1843 *len_out = 0;
1844
1845 return CXL_MBOX_SUCCESS;
1846 }
1847
1848 /*
1849 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities
1850 */
1851 static CXLRetCode
cmd_media_get_scan_media_capabilities(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1852 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd,
1853 uint8_t *payload_in,
1854 size_t len_in,
1855 uint8_t *payload_out,
1856 size_t *len_out,
1857 CXLCCI *cci)
1858 {
1859 struct get_scan_media_capabilities_pl {
1860 uint64_t pa;
1861 uint64_t length;
1862 } QEMU_PACKED;
1863
1864 struct get_scan_media_capabilities_out_pl {
1865 uint32_t estimated_runtime_ms;
1866 };
1867
1868 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1869 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1870 struct get_scan_media_capabilities_pl *in = (void *)payload_in;
1871 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out;
1872 uint64_t query_start;
1873 uint64_t query_length;
1874
1875 query_start = ldq_le_p(&in->pa);
1876 /* 64 byte alignment required */
1877 if (query_start & 0x3f) {
1878 return CXL_MBOX_INVALID_INPUT;
1879 }
1880 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
1881
1882 if (query_start + query_length > cxl_dstate->static_mem_size) {
1883 return CXL_MBOX_INVALID_PA;
1884 }
1885
1886 /*
1887 * Just use 400 nanosecond access/read latency + 100 ns for
1888 * the cost of updating the poison list. For small enough
1889 * chunks return at least 1 ms.
1890 */
1891 stl_le_p(&out->estimated_runtime_ms,
1892 MAX(1, query_length * (0.0005L / 64)));
1893
1894 *len_out = sizeof(*out);
1895 return CXL_MBOX_SUCCESS;
1896 }
1897
__do_scan_media(CXLType3Dev * ct3d)1898 static void __do_scan_media(CXLType3Dev *ct3d)
1899 {
1900 CXLPoison *ent;
1901 unsigned int results_cnt = 0;
1902
1903 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) {
1904 results_cnt++;
1905 }
1906
1907 /* only scan media may clear the overflow */
1908 if (ct3d->poison_list_overflowed &&
1909 ct3d->poison_list_cnt == results_cnt) {
1910 cxl_clear_poison_list_overflowed(ct3d);
1911 }
1912 /* scan media has run since last conventional reset */
1913 ct3d->scan_media_hasrun = true;
1914 }
1915
1916 /*
1917 * CXL r3.1 section 8.2.9.9.4.5: Scan Media
1918 */
cmd_media_scan_media(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1919 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd,
1920 uint8_t *payload_in,
1921 size_t len_in,
1922 uint8_t *payload_out,
1923 size_t *len_out,
1924 CXLCCI *cci)
1925 {
1926 struct scan_media_pl {
1927 uint64_t pa;
1928 uint64_t length;
1929 uint8_t flags;
1930 } QEMU_PACKED;
1931
1932 struct scan_media_pl *in = (void *)payload_in;
1933 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1934 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1935 uint64_t query_start;
1936 uint64_t query_length;
1937 CXLPoison *ent, *next;
1938
1939 query_start = ldq_le_p(&in->pa);
1940 /* 64 byte alignment required */
1941 if (query_start & 0x3f) {
1942 return CXL_MBOX_INVALID_INPUT;
1943 }
1944 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
1945
1946 if (query_start + query_length > cxl_dstate->static_mem_size) {
1947 return CXL_MBOX_INVALID_PA;
1948 }
1949 if (ct3d->dc.num_regions && query_start + query_length >=
1950 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) {
1951 return CXL_MBOX_INVALID_PA;
1952 }
1953
1954 if (in->flags == 0) { /* TODO */
1955 qemu_log_mask(LOG_UNIMP,
1956 "Scan Media Event Log is unsupported\n");
1957 }
1958
1959 /* any previous results are discarded upon a new Scan Media */
1960 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) {
1961 QLIST_REMOVE(ent, node);
1962 g_free(ent);
1963 }
1964
1965 /* kill the poison list - it will be recreated */
1966 if (ct3d->poison_list_overflowed) {
1967 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) {
1968 QLIST_REMOVE(ent, node);
1969 g_free(ent);
1970 ct3d->poison_list_cnt--;
1971 }
1972 }
1973
1974 /*
1975 * Scan the backup list and move corresponding entries
1976 * into the results list, updating the poison list
1977 * when possible.
1978 */
1979 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) {
1980 CXLPoison *res;
1981
1982 if (ent->start >= query_start + query_length ||
1983 ent->start + ent->length <= query_start) {
1984 continue;
1985 }
1986
1987 /*
1988 * If a Get Poison List cmd comes in while this
1989 * scan is being done, it will see the new complete
1990 * list, while setting the respective flag.
1991 */
1992 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
1993 CXLPoison *p = g_new0(CXLPoison, 1);
1994
1995 p->start = ent->start;
1996 p->length = ent->length;
1997 p->type = ent->type;
1998 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1999 ct3d->poison_list_cnt++;
2000 }
2001
2002 res = g_new0(CXLPoison, 1);
2003 res->start = ent->start;
2004 res->length = ent->length;
2005 res->type = ent->type;
2006 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node);
2007
2008 QLIST_REMOVE(ent, node);
2009 g_free(ent);
2010 }
2011
2012 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64));
2013 *len_out = 0;
2014
2015 return CXL_MBOX_BG_STARTED;
2016 }
2017
2018 /*
2019 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results
2020 */
cmd_media_get_scan_media_results(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2021 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd,
2022 uint8_t *payload_in,
2023 size_t len_in,
2024 uint8_t *payload_out,
2025 size_t *len_out,
2026 CXLCCI *cci)
2027 {
2028 struct get_scan_media_results_out_pl {
2029 uint64_t dpa_restart;
2030 uint64_t length;
2031 uint8_t flags;
2032 uint8_t rsvd1;
2033 uint16_t count;
2034 uint8_t rsvd2[0xc];
2035 struct {
2036 uint64_t addr;
2037 uint32_t length;
2038 uint32_t resv;
2039 } QEMU_PACKED records[];
2040 } QEMU_PACKED;
2041
2042 struct get_scan_media_results_out_pl *out = (void *)payload_out;
2043 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2044 CXLPoisonList *scan_media_results = &ct3d->scan_media_results;
2045 CXLPoison *ent, *next;
2046 uint16_t total_count = 0, record_count = 0, i = 0;
2047 uint16_t out_pl_len;
2048
2049 if (!ct3d->scan_media_hasrun) {
2050 return CXL_MBOX_UNSUPPORTED;
2051 }
2052
2053 /*
2054 * Calculate limits, all entries are within the same address range of the
2055 * last scan media call.
2056 */
2057 QLIST_FOREACH(ent, scan_media_results, node) {
2058 size_t rec_size = record_count * sizeof(out->records[0]);
2059
2060 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) {
2061 record_count++;
2062 }
2063 total_count++;
2064 }
2065
2066 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2067 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2068
2069 memset(out, 0, out_pl_len);
2070 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) {
2071 uint64_t start, stop;
2072
2073 if (i == record_count) {
2074 break;
2075 }
2076
2077 start = ROUND_DOWN(ent->start, 64ull);
2078 stop = ROUND_DOWN(ent->start, 64ull) + ent->length;
2079 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
2080 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
2081 i++;
2082
2083 /* consume the returning entry */
2084 QLIST_REMOVE(ent, node);
2085 g_free(ent);
2086 }
2087
2088 stw_le_p(&out->count, record_count);
2089 if (total_count > record_count) {
2090 out->flags = (1 << 0); /* More Media Error Records */
2091 }
2092
2093 *len_out = out_pl_len;
2094 return CXL_MBOX_SUCCESS;
2095 }
2096
2097 /*
2098 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration
2099 * (Opcode: 4800h)
2100 */
cmd_dcd_get_dyn_cap_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2101 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd,
2102 uint8_t *payload_in,
2103 size_t len_in,
2104 uint8_t *payload_out,
2105 size_t *len_out,
2106 CXLCCI *cci)
2107 {
2108 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2109 struct {
2110 uint8_t region_cnt;
2111 uint8_t start_rid;
2112 } QEMU_PACKED *in = (void *)payload_in;
2113 struct {
2114 uint8_t num_regions;
2115 uint8_t regions_returned;
2116 uint8_t rsvd1[6];
2117 struct {
2118 uint64_t base;
2119 uint64_t decode_len;
2120 uint64_t region_len;
2121 uint64_t block_size;
2122 uint32_t dsmadhandle;
2123 uint8_t flags;
2124 uint8_t rsvd2[3];
2125 } QEMU_PACKED records[];
2126 } QEMU_PACKED *out = (void *)payload_out;
2127 struct {
2128 uint32_t num_extents_supported;
2129 uint32_t num_extents_available;
2130 uint32_t num_tags_supported;
2131 uint32_t num_tags_available;
2132 } QEMU_PACKED *extra_out;
2133 uint16_t record_count;
2134 uint16_t i;
2135 uint16_t out_pl_len;
2136 uint8_t start_rid;
2137
2138 start_rid = in->start_rid;
2139 if (start_rid >= ct3d->dc.num_regions) {
2140 return CXL_MBOX_INVALID_INPUT;
2141 }
2142
2143 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt);
2144
2145 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2146 extra_out = (void *)(payload_out + out_pl_len);
2147 out_pl_len += sizeof(*extra_out);
2148 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2149
2150 out->num_regions = ct3d->dc.num_regions;
2151 out->regions_returned = record_count;
2152 for (i = 0; i < record_count; i++) {
2153 stq_le_p(&out->records[i].base,
2154 ct3d->dc.regions[start_rid + i].base);
2155 stq_le_p(&out->records[i].decode_len,
2156 ct3d->dc.regions[start_rid + i].decode_len /
2157 CXL_CAPACITY_MULTIPLIER);
2158 stq_le_p(&out->records[i].region_len,
2159 ct3d->dc.regions[start_rid + i].len);
2160 stq_le_p(&out->records[i].block_size,
2161 ct3d->dc.regions[start_rid + i].block_size);
2162 stl_le_p(&out->records[i].dsmadhandle,
2163 ct3d->dc.regions[start_rid + i].dsmadhandle);
2164 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags;
2165 }
2166 /*
2167 * TODO: Assign values once extents and tags are introduced
2168 * to use.
2169 */
2170 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED);
2171 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED -
2172 ct3d->dc.total_extent_count);
2173 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED);
2174 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED);
2175
2176 *len_out = out_pl_len;
2177 return CXL_MBOX_SUCCESS;
2178 }
2179
2180 /*
2181 * CXL r3.1 section 8.2.9.9.9.2:
2182 * Get Dynamic Capacity Extent List (Opcode 4801h)
2183 */
cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2184 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
2185 uint8_t *payload_in,
2186 size_t len_in,
2187 uint8_t *payload_out,
2188 size_t *len_out,
2189 CXLCCI *cci)
2190 {
2191 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2192 struct {
2193 uint32_t extent_cnt;
2194 uint32_t start_extent_id;
2195 } QEMU_PACKED *in = (void *)payload_in;
2196 struct {
2197 uint32_t count;
2198 uint32_t total_extents;
2199 uint32_t generation_num;
2200 uint8_t rsvd[4];
2201 CXLDCExtentRaw records[];
2202 } QEMU_PACKED *out = (void *)payload_out;
2203 uint32_t start_extent_id = in->start_extent_id;
2204 CXLDCExtentList *extent_list = &ct3d->dc.extents;
2205 uint16_t record_count = 0, i = 0, record_done = 0;
2206 uint16_t out_pl_len, size;
2207 CXLDCExtent *ent;
2208
2209 if (start_extent_id > ct3d->dc.total_extent_count) {
2210 return CXL_MBOX_INVALID_INPUT;
2211 }
2212
2213 record_count = MIN(in->extent_cnt,
2214 ct3d->dc.total_extent_count - start_extent_id);
2215 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out);
2216 record_count = MIN(record_count, size / sizeof(out->records[0]));
2217 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2218
2219 stl_le_p(&out->count, record_count);
2220 stl_le_p(&out->total_extents, ct3d->dc.total_extent_count);
2221 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq);
2222
2223 if (record_count > 0) {
2224 CXLDCExtentRaw *out_rec = &out->records[record_done];
2225
2226 QTAILQ_FOREACH(ent, extent_list, node) {
2227 if (i++ < start_extent_id) {
2228 continue;
2229 }
2230 stq_le_p(&out_rec->start_dpa, ent->start_dpa);
2231 stq_le_p(&out_rec->len, ent->len);
2232 memcpy(&out_rec->tag, ent->tag, 0x10);
2233 stw_le_p(&out_rec->shared_seq, ent->shared_seq);
2234
2235 record_done++;
2236 if (record_done == record_count) {
2237 break;
2238 }
2239 }
2240 }
2241
2242 *len_out = out_pl_len;
2243 return CXL_MBOX_SUCCESS;
2244 }
2245
2246 /*
2247 * Check whether any bit between addr[nr, nr+size) is set,
2248 * return true if any bit is set, otherwise return false
2249 */
test_any_bits_set(const unsigned long * addr,unsigned long nr,unsigned long size)2250 bool test_any_bits_set(const unsigned long *addr, unsigned long nr,
2251 unsigned long size)
2252 {
2253 unsigned long res = find_next_bit(addr, size + nr, nr);
2254
2255 return res < nr + size;
2256 }
2257
cxl_find_dc_region(CXLType3Dev * ct3d,uint64_t dpa,uint64_t len)2258 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len)
2259 {
2260 int i;
2261 CXLDCRegion *region = &ct3d->dc.regions[0];
2262
2263 if (dpa < region->base ||
2264 dpa >= region->base + ct3d->dc.total_capacity) {
2265 return NULL;
2266 }
2267
2268 /*
2269 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD)
2270 *
2271 * Regions are used in increasing-DPA order, with Region 0 being used for
2272 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA.
2273 * So check from the last region to find where the dpa belongs. Extents that
2274 * cross multiple regions are not allowed.
2275 */
2276 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) {
2277 region = &ct3d->dc.regions[i];
2278 if (dpa >= region->base) {
2279 if (dpa + len > region->base + region->len) {
2280 return NULL;
2281 }
2282 return region;
2283 }
2284 }
2285
2286 return NULL;
2287 }
2288
cxl_insert_extent_to_extent_list(CXLDCExtentList * list,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2289 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list,
2290 uint64_t dpa,
2291 uint64_t len,
2292 uint8_t *tag,
2293 uint16_t shared_seq)
2294 {
2295 CXLDCExtent *extent;
2296
2297 extent = g_new0(CXLDCExtent, 1);
2298 extent->start_dpa = dpa;
2299 extent->len = len;
2300 if (tag) {
2301 memcpy(extent->tag, tag, 0x10);
2302 }
2303 extent->shared_seq = shared_seq;
2304
2305 QTAILQ_INSERT_TAIL(list, extent, node);
2306 }
2307
cxl_remove_extent_from_extent_list(CXLDCExtentList * list,CXLDCExtent * extent)2308 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list,
2309 CXLDCExtent *extent)
2310 {
2311 QTAILQ_REMOVE(list, extent, node);
2312 g_free(extent);
2313 }
2314
2315 /*
2316 * Add a new extent to the extent "group" if group exists;
2317 * otherwise, create a new group
2318 * Return value: the extent group where the extent is inserted.
2319 */
cxl_insert_extent_to_extent_group(CXLDCExtentGroup * group,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2320 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group,
2321 uint64_t dpa,
2322 uint64_t len,
2323 uint8_t *tag,
2324 uint16_t shared_seq)
2325 {
2326 if (!group) {
2327 group = g_new0(CXLDCExtentGroup, 1);
2328 QTAILQ_INIT(&group->list);
2329 }
2330 cxl_insert_extent_to_extent_list(&group->list, dpa, len,
2331 tag, shared_seq);
2332 return group;
2333 }
2334
cxl_extent_group_list_insert_tail(CXLDCExtentGroupList * list,CXLDCExtentGroup * group)2335 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
2336 CXLDCExtentGroup *group)
2337 {
2338 QTAILQ_INSERT_TAIL(list, group, node);
2339 }
2340
cxl_extent_group_list_delete_front(CXLDCExtentGroupList * list)2341 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list)
2342 {
2343 CXLDCExtent *ent, *ent_next;
2344 CXLDCExtentGroup *group = QTAILQ_FIRST(list);
2345
2346 QTAILQ_REMOVE(list, group, node);
2347 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
2348 cxl_remove_extent_from_extent_list(&group->list, ent);
2349 }
2350 g_free(group);
2351 }
2352
2353 /*
2354 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload
2355 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload
2356 */
2357 typedef struct CXLUpdateDCExtentListInPl {
2358 uint32_t num_entries_updated;
2359 uint8_t flags;
2360 uint8_t rsvd[3];
2361 /* CXL r3.1 Table 8-169: Updated Extent */
2362 struct {
2363 uint64_t start_dpa;
2364 uint64_t len;
2365 uint8_t rsvd[8];
2366 } QEMU_PACKED updated_entries[];
2367 } QEMU_PACKED CXLUpdateDCExtentListInPl;
2368
2369 /*
2370 * For the extents in the extent list to operate, check whether they are valid
2371 * 1. The extent should be in the range of a valid DC region;
2372 * 2. The extent should not cross multiple regions;
2373 * 3. The start DPA and the length of the extent should align with the block
2374 * size of the region;
2375 * 4. The address range of multiple extents in the list should not overlap.
2376 */
cxl_detect_malformed_extent_list(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2377 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d,
2378 const CXLUpdateDCExtentListInPl *in)
2379 {
2380 uint64_t min_block_size = UINT64_MAX;
2381 CXLDCRegion *region;
2382 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1];
2383 g_autofree unsigned long *blk_bitmap = NULL;
2384 uint64_t dpa, len;
2385 uint32_t i;
2386
2387 for (i = 0; i < ct3d->dc.num_regions; i++) {
2388 region = &ct3d->dc.regions[i];
2389 min_block_size = MIN(min_block_size, region->block_size);
2390 }
2391
2392 blk_bitmap = bitmap_new((lastregion->base + lastregion->len -
2393 ct3d->dc.regions[0].base) / min_block_size);
2394
2395 for (i = 0; i < in->num_entries_updated; i++) {
2396 dpa = in->updated_entries[i].start_dpa;
2397 len = in->updated_entries[i].len;
2398
2399 region = cxl_find_dc_region(ct3d, dpa, len);
2400 if (!region) {
2401 return CXL_MBOX_INVALID_PA;
2402 }
2403
2404 dpa -= ct3d->dc.regions[0].base;
2405 if (dpa % region->block_size || len % region->block_size) {
2406 return CXL_MBOX_INVALID_EXTENT_LIST;
2407 }
2408 /* the dpa range already covered by some other extents in the list */
2409 if (test_any_bits_set(blk_bitmap, dpa / min_block_size,
2410 len / min_block_size)) {
2411 return CXL_MBOX_INVALID_EXTENT_LIST;
2412 }
2413 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size);
2414 }
2415
2416 return CXL_MBOX_SUCCESS;
2417 }
2418
cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2419 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d,
2420 const CXLUpdateDCExtentListInPl *in)
2421 {
2422 uint32_t i;
2423 CXLDCExtent *ent;
2424 CXLDCExtentGroup *ext_group;
2425 uint64_t dpa, len;
2426 Range range1, range2;
2427
2428 for (i = 0; i < in->num_entries_updated; i++) {
2429 dpa = in->updated_entries[i].start_dpa;
2430 len = in->updated_entries[i].len;
2431
2432 range_init_nofail(&range1, dpa, len);
2433
2434 /*
2435 * The host-accepted DPA range must be contained by the first extent
2436 * group in the pending list
2437 */
2438 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending);
2439 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) {
2440 return CXL_MBOX_INVALID_PA;
2441 }
2442
2443 /* to-be-added range should not overlap with range already accepted */
2444 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) {
2445 range_init_nofail(&range2, ent->start_dpa, ent->len);
2446 if (range_overlaps_range(&range1, &range2)) {
2447 return CXL_MBOX_INVALID_PA;
2448 }
2449 }
2450 }
2451 return CXL_MBOX_SUCCESS;
2452 }
2453
2454 /*
2455 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h)
2456 * An extent is added to the extent list and becomes usable only after the
2457 * response is processed successfully.
2458 */
cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2459 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
2460 uint8_t *payload_in,
2461 size_t len_in,
2462 uint8_t *payload_out,
2463 size_t *len_out,
2464 CXLCCI *cci)
2465 {
2466 CXLUpdateDCExtentListInPl *in = (void *)payload_in;
2467 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2468 CXLDCExtentList *extent_list = &ct3d->dc.extents;
2469 uint32_t i;
2470 uint64_t dpa, len;
2471 CXLRetCode ret;
2472
2473 if (in->num_entries_updated == 0) {
2474 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
2475 return CXL_MBOX_SUCCESS;
2476 }
2477
2478 /* Adding extents causes exceeding device's extent tracking ability. */
2479 if (in->num_entries_updated + ct3d->dc.total_extent_count >
2480 CXL_NUM_EXTENTS_SUPPORTED) {
2481 return CXL_MBOX_RESOURCES_EXHAUSTED;
2482 }
2483
2484 ret = cxl_detect_malformed_extent_list(ct3d, in);
2485 if (ret != CXL_MBOX_SUCCESS) {
2486 return ret;
2487 }
2488
2489 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in);
2490 if (ret != CXL_MBOX_SUCCESS) {
2491 return ret;
2492 }
2493
2494 for (i = 0; i < in->num_entries_updated; i++) {
2495 dpa = in->updated_entries[i].start_dpa;
2496 len = in->updated_entries[i].len;
2497
2498 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0);
2499 ct3d->dc.total_extent_count += 1;
2500 ct3_set_region_block_backed(ct3d, dpa, len);
2501 }
2502 /* Remove the first extent group in the pending list */
2503 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
2504
2505 return CXL_MBOX_SUCCESS;
2506 }
2507
2508 /*
2509 * Copy extent list from src to dst
2510 * Return value: number of extents copied
2511 */
copy_extent_list(CXLDCExtentList * dst,const CXLDCExtentList * src)2512 static uint32_t copy_extent_list(CXLDCExtentList *dst,
2513 const CXLDCExtentList *src)
2514 {
2515 uint32_t cnt = 0;
2516 CXLDCExtent *ent;
2517
2518 if (!dst || !src) {
2519 return 0;
2520 }
2521
2522 QTAILQ_FOREACH(ent, src, node) {
2523 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len,
2524 ent->tag, ent->shared_seq);
2525 cnt++;
2526 }
2527 return cnt;
2528 }
2529
cxl_dc_extent_release_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in,CXLDCExtentList * updated_list,uint32_t * updated_list_size)2530 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d,
2531 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list,
2532 uint32_t *updated_list_size)
2533 {
2534 CXLDCExtent *ent, *ent_next;
2535 uint64_t dpa, len;
2536 uint32_t i;
2537 int cnt_delta = 0;
2538 CXLRetCode ret = CXL_MBOX_SUCCESS;
2539
2540 QTAILQ_INIT(updated_list);
2541 copy_extent_list(updated_list, &ct3d->dc.extents);
2542
2543 for (i = 0; i < in->num_entries_updated; i++) {
2544 Range range;
2545
2546 dpa = in->updated_entries[i].start_dpa;
2547 len = in->updated_entries[i].len;
2548
2549 /* Check if the DPA range is not fully backed with valid extents */
2550 if (!ct3_test_region_block_backed(ct3d, dpa, len)) {
2551 ret = CXL_MBOX_INVALID_PA;
2552 goto free_and_exit;
2553 }
2554
2555 /* After this point, extent overflow is the only error can happen */
2556 while (len > 0) {
2557 QTAILQ_FOREACH(ent, updated_list, node) {
2558 range_init_nofail(&range, ent->start_dpa, ent->len);
2559
2560 if (range_contains(&range, dpa)) {
2561 uint64_t len1, len2 = 0, len_done = 0;
2562 uint64_t ent_start_dpa = ent->start_dpa;
2563 uint64_t ent_len = ent->len;
2564
2565 len1 = dpa - ent->start_dpa;
2566 /* Found the extent or the subset of an existing extent */
2567 if (range_contains(&range, dpa + len - 1)) {
2568 len2 = ent_start_dpa + ent_len - dpa - len;
2569 } else {
2570 dpa = ent_start_dpa + ent_len;
2571 }
2572 len_done = ent_len - len1 - len2;
2573
2574 cxl_remove_extent_from_extent_list(updated_list, ent);
2575 cnt_delta--;
2576
2577 if (len1) {
2578 cxl_insert_extent_to_extent_list(updated_list,
2579 ent_start_dpa,
2580 len1, NULL, 0);
2581 cnt_delta++;
2582 }
2583 if (len2) {
2584 cxl_insert_extent_to_extent_list(updated_list,
2585 dpa + len,
2586 len2, NULL, 0);
2587 cnt_delta++;
2588 }
2589
2590 if (cnt_delta + ct3d->dc.total_extent_count >
2591 CXL_NUM_EXTENTS_SUPPORTED) {
2592 ret = CXL_MBOX_RESOURCES_EXHAUSTED;
2593 goto free_and_exit;
2594 }
2595
2596 len -= len_done;
2597 break;
2598 }
2599 }
2600 }
2601 }
2602 free_and_exit:
2603 if (ret != CXL_MBOX_SUCCESS) {
2604 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) {
2605 cxl_remove_extent_from_extent_list(updated_list, ent);
2606 }
2607 *updated_list_size = 0;
2608 } else {
2609 *updated_list_size = ct3d->dc.total_extent_count + cnt_delta;
2610 }
2611
2612 return ret;
2613 }
2614
2615 /*
2616 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h)
2617 */
cmd_dcd_release_dyn_cap(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2618 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
2619 uint8_t *payload_in,
2620 size_t len_in,
2621 uint8_t *payload_out,
2622 size_t *len_out,
2623 CXLCCI *cci)
2624 {
2625 CXLUpdateDCExtentListInPl *in = (void *)payload_in;
2626 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2627 CXLDCExtentList updated_list;
2628 CXLDCExtent *ent, *ent_next;
2629 uint32_t updated_list_size;
2630 CXLRetCode ret;
2631
2632 if (in->num_entries_updated == 0) {
2633 return CXL_MBOX_INVALID_INPUT;
2634 }
2635
2636 ret = cxl_detect_malformed_extent_list(ct3d, in);
2637 if (ret != CXL_MBOX_SUCCESS) {
2638 return ret;
2639 }
2640
2641 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list,
2642 &updated_list_size);
2643 if (ret != CXL_MBOX_SUCCESS) {
2644 return ret;
2645 }
2646
2647 /*
2648 * If the dry run release passes, the returned updated_list will
2649 * be the updated extent list and we just need to clear the extents
2650 * in the accepted list and copy extents in the updated_list to accepted
2651 * list and update the extent count;
2652 */
2653 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) {
2654 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len);
2655 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent);
2656 }
2657 copy_extent_list(&ct3d->dc.extents, &updated_list);
2658 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) {
2659 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len);
2660 cxl_remove_extent_from_extent_list(&updated_list, ent);
2661 }
2662 ct3d->dc.total_extent_count = updated_list_size;
2663
2664 return CXL_MBOX_SUCCESS;
2665 }
2666
2667 static const struct cxl_cmd cxl_cmd_set[256][256] = {
2668 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
2669 cmd_events_get_records, 1, 0 },
2670 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
2671 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE },
2672 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
2673 cmd_events_get_interrupt_policy, 0, 0 },
2674 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
2675 cmd_events_set_interrupt_policy,
2676 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE },
2677 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
2678 cmd_firmware_update_get_info, 0, 0 },
2679 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER",
2680 cmd_firmware_update_transfer, ~0, CXL_MBOX_BACKGROUND_OPERATION },
2681 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE",
2682 cmd_firmware_update_activate, 2, CXL_MBOX_BACKGROUND_OPERATION },
2683 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
2684 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
2685 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
2686 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
2687 0, 0 },
2688 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
2689 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED",
2690 cmd_features_get_supported, 0x8, 0 },
2691 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE",
2692 cmd_features_get_feature, 0x15, 0 },
2693 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE",
2694 cmd_features_set_feature,
2695 ~0,
2696 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
2697 CXL_MBOX_IMMEDIATE_DATA_CHANGE |
2698 CXL_MBOX_IMMEDIATE_POLICY_CHANGE |
2699 CXL_MBOX_IMMEDIATE_LOG_CHANGE |
2700 CXL_MBOX_SECURITY_STATE_CHANGE)},
2701 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
2702 cmd_identify_memory_device, 0, 0 },
2703 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
2704 cmd_ccls_get_partition_info, 0, 0 },
2705 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
2706 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
2707 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE },
2708 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0,
2709 (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
2710 CXL_MBOX_SECURITY_STATE_CHANGE |
2711 CXL_MBOX_BACKGROUND_OPERATION)},
2712 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE",
2713 cmd_get_security_state, 0, 0 },
2714 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
2715 cmd_media_get_poison_list, 16, 0 },
2716 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
2717 cmd_media_inject_poison, 8, 0 },
2718 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
2719 cmd_media_clear_poison, 72, 0 },
2720 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = {
2721 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES",
2722 cmd_media_get_scan_media_capabilities, 16, 0 },
2723 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA",
2724 cmd_media_scan_media, 17, CXL_MBOX_BACKGROUND_OPERATION },
2725 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = {
2726 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS",
2727 cmd_media_get_scan_media_results, 0, 0 },
2728 };
2729
2730 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = {
2731 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG",
2732 cmd_dcd_get_dyn_cap_config, 2, 0 },
2733 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = {
2734 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list,
2735 8, 0 },
2736 [DCD_CONFIG][ADD_DYN_CAP_RSP] = {
2737 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp,
2738 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
2739 [DCD_CONFIG][RELEASE_DYN_CAP] = {
2740 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap,
2741 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
2742 };
2743
2744 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
2745 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
2746 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS",
2747 cmd_infostat_bg_op_sts, 0, 0 },
2748 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
2749 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8,
2750 CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
2751 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
2752 0 },
2753 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
2754 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE",
2755 cmd_identify_switch_device, 0, 0 },
2756 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS",
2757 cmd_get_physical_port_state, ~0, 0 },
2758 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
2759 cmd_tunnel_management_cmd, ~0, 0 },
2760 };
2761
2762 /*
2763 * While the command is executing in the background, the device should
2764 * update the percentage complete in the Background Command Status Register
2765 * at least once per second.
2766 */
2767
2768 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
2769
cxl_process_cci_message(CXLCCI * cci,uint8_t set,uint8_t cmd,size_t len_in,uint8_t * pl_in,size_t * len_out,uint8_t * pl_out,bool * bg_started)2770 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
2771 size_t len_in, uint8_t *pl_in, size_t *len_out,
2772 uint8_t *pl_out, bool *bg_started)
2773 {
2774 int ret;
2775 const struct cxl_cmd *cxl_cmd;
2776 opcode_handler h;
2777 CXLDeviceState *cxl_dstate;
2778
2779 *len_out = 0;
2780 cxl_cmd = &cci->cxl_cmd_set[set][cmd];
2781 h = cxl_cmd->handler;
2782 if (!h) {
2783 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
2784 set << 8 | cmd);
2785 return CXL_MBOX_UNSUPPORTED;
2786 }
2787
2788 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) {
2789 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2790 }
2791
2792 /* Only one bg command at a time */
2793 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
2794 cci->bg.runtime > 0) {
2795 return CXL_MBOX_BUSY;
2796 }
2797
2798 /* forbid any selected commands while the media is disabled */
2799 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
2800 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
2801
2802 if (cxl_dev_media_disabled(cxl_dstate)) {
2803 if (h == cmd_events_get_records ||
2804 h == cmd_ccls_get_partition_info ||
2805 h == cmd_ccls_set_lsa ||
2806 h == cmd_ccls_get_lsa ||
2807 h == cmd_logs_get_log ||
2808 h == cmd_media_get_poison_list ||
2809 h == cmd_media_inject_poison ||
2810 h == cmd_media_clear_poison ||
2811 h == cmd_sanitize_overwrite ||
2812 h == cmd_firmware_update_transfer ||
2813 h == cmd_firmware_update_activate) {
2814 return CXL_MBOX_MEDIA_DISABLED;
2815 }
2816 }
2817 }
2818
2819 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
2820 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
2821 ret == CXL_MBOX_BG_STARTED) {
2822 *bg_started = true;
2823 } else {
2824 *bg_started = false;
2825 }
2826
2827 /* Set bg and the return code */
2828 if (*bg_started) {
2829 uint64_t now;
2830
2831 cci->bg.opcode = (set << 8) | cmd;
2832
2833 cci->bg.complete_pct = 0;
2834 cci->bg.ret_code = 0;
2835
2836 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
2837 cci->bg.starttime = now;
2838 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
2839 }
2840
2841 return ret;
2842 }
2843
bg_timercb(void * opaque)2844 static void bg_timercb(void *opaque)
2845 {
2846 CXLCCI *cci = opaque;
2847 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
2848 uint64_t total_time = cci->bg.starttime + cci->bg.runtime;
2849
2850 assert(cci->bg.runtime > 0);
2851
2852 if (now >= total_time) { /* we are done */
2853 uint16_t ret = CXL_MBOX_SUCCESS;
2854
2855 cci->bg.complete_pct = 100;
2856 cci->bg.ret_code = ret;
2857 switch (cci->bg.opcode) {
2858 case 0x0201: /* fw transfer */
2859 __do_firmware_xfer(cci);
2860 break;
2861 case 0x4400: /* sanitize */
2862 {
2863 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2864
2865 __do_sanitization(ct3d);
2866 cxl_dev_enable_media(&ct3d->cxl_dstate);
2867 }
2868 break;
2869 case 0x4304: /* scan media */
2870 {
2871 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2872
2873 __do_scan_media(ct3d);
2874 break;
2875 }
2876 default:
2877 __builtin_unreachable();
2878 break;
2879 }
2880 } else {
2881 /* estimate only */
2882 cci->bg.complete_pct = 100 * now / total_time;
2883 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
2884 }
2885
2886 if (cci->bg.complete_pct == 100) {
2887 /* TODO: generalize to switch CCI */
2888 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2889 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2890 PCIDevice *pdev = PCI_DEVICE(cci->d);
2891
2892 cci->bg.starttime = 0;
2893 /* registers are updated, allow new bg-capable cmds */
2894 cci->bg.runtime = 0;
2895
2896 if (msix_enabled(pdev)) {
2897 msix_notify(pdev, cxl_dstate->mbox_msi_n);
2898 } else if (msi_enabled(pdev)) {
2899 msi_notify(pdev, cxl_dstate->mbox_msi_n);
2900 }
2901 }
2902 }
2903
cxl_rebuild_cel(CXLCCI * cci)2904 static void cxl_rebuild_cel(CXLCCI *cci)
2905 {
2906 cci->cel_size = 0; /* Reset for a fresh build */
2907 for (int set = 0; set < 256; set++) {
2908 for (int cmd = 0; cmd < 256; cmd++) {
2909 if (cci->cxl_cmd_set[set][cmd].handler) {
2910 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd];
2911 struct cel_log *log =
2912 &cci->cel_log[cci->cel_size];
2913
2914 log->opcode = (set << 8) | cmd;
2915 log->effect = c->effect;
2916 cci->cel_size++;
2917 }
2918 }
2919 }
2920 }
2921
cxl_init_cci(CXLCCI * cci,size_t payload_max)2922 void cxl_init_cci(CXLCCI *cci, size_t payload_max)
2923 {
2924 cci->payload_max = payload_max;
2925 cxl_rebuild_cel(cci);
2926
2927 cci->bg.complete_pct = 0;
2928 cci->bg.starttime = 0;
2929 cci->bg.runtime = 0;
2930 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2931 bg_timercb, cci);
2932
2933 memset(&cci->fw, 0, sizeof(cci->fw));
2934 cci->fw.active_slot = 1;
2935 cci->fw.slot[cci->fw.active_slot - 1] = true;
2936 }
2937
cxl_copy_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmds)[256])2938 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256])
2939 {
2940 for (int set = 0; set < 256; set++) {
2941 for (int cmd = 0; cmd < 256; cmd++) {
2942 if (cxl_cmds[set][cmd].handler) {
2943 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd];
2944 }
2945 }
2946 }
2947 }
2948
cxl_add_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmd_set)[256],size_t payload_max)2949 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256],
2950 size_t payload_max)
2951 {
2952 cci->payload_max = MAX(payload_max, cci->payload_max);
2953 cxl_copy_cci_commands(cci, cxl_cmd_set);
2954 cxl_rebuild_cel(cci);
2955 }
2956
cxl_initialize_mailbox_swcci(CXLCCI * cci,DeviceState * intf,DeviceState * d,size_t payload_max)2957 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
2958 DeviceState *d, size_t payload_max)
2959 {
2960 cxl_copy_cci_commands(cci, cxl_cmd_set_sw);
2961 cci->d = d;
2962 cci->intf = intf;
2963 cxl_init_cci(cci, payload_max);
2964 }
2965
cxl_initialize_mailbox_t3(CXLCCI * cci,DeviceState * d,size_t payload_max)2966 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max)
2967 {
2968 CXLType3Dev *ct3d = CXL_TYPE3(d);
2969
2970 cxl_copy_cci_commands(cci, cxl_cmd_set);
2971 if (ct3d->dc.num_regions) {
2972 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd);
2973 }
2974 cci->d = d;
2975
2976 /* No separation for PCI MB as protocol handled in PCI device */
2977 cci->intf = d;
2978 cxl_init_cci(cci, payload_max);
2979 }
2980
2981 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = {
2982 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
2983 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
2984 0 },
2985 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
2986 };
2987
cxl_initialize_t3_ld_cci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)2988 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf,
2989 size_t payload_max)
2990 {
2991 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld);
2992 cci->d = d;
2993 cci->intf = intf;
2994 cxl_init_cci(cci, payload_max);
2995 }
2996
2997 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = {
2998 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0},
2999 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
3000 0 },
3001 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3002 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
3003 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
3004 cmd_tunnel_management_cmd, ~0, 0 },
3005 };
3006
cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)3007 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
3008 DeviceState *intf,
3009 size_t payload_max)
3010 {
3011 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp);
3012 cci->d = d;
3013 cci->intf = intf;
3014 cxl_init_cci(cci, payload_max);
3015 }
3016