1 /*
2 * CXL Utility library for mailbox interface
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "hw/pci/msi.h"
12 #include "hw/pci/msix.h"
13 #include "hw/cxl/cxl.h"
14 #include "hw/cxl/cxl_events.h"
15 #include "hw/cxl/cxl_mailbox.h"
16 #include "hw/pci/pci.h"
17 #include "hw/pci-bridge/cxl_upstream_port.h"
18 #include "qemu/cutils.h"
19 #include "qemu/log.h"
20 #include "qemu/units.h"
21 #include "qemu/uuid.h"
22 #include "sysemu/hostmem.h"
23 #include "qemu/range.h"
24
25 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
26 #define CXL_DC_EVENT_LOG_SIZE 8
27 #define CXL_NUM_EXTENTS_SUPPORTED 512
28 #define CXL_NUM_TAGS_SUPPORTED 0
29
30 /*
31 * How to add a new command, example. The command set FOO, with cmd BAR.
32 * 1. Add the command set and cmd to the enum.
33 * FOO = 0x7f,
34 * #define BAR 0
35 * 2. Implement the handler
36 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
37 * CXLDeviceState *cxl_dstate, uint16_t *len)
38 * 3. Add the command to the cxl_cmd_set[][]
39 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
40 * 4. Implement your handler
41 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
42 *
43 *
44 * Writing the handler:
45 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
46 * in/out length of the payload. The handler is responsible for consuming the
47 * payload from cmd->payload and operating upon it as necessary. It must then
48 * fill the output data into cmd->payload (overwriting what was there),
49 * setting the length, and returning a valid return code.
50 *
51 * XXX: The handler need not worry about endianness. The payload is read out of
52 * a register interface that already deals with it.
53 */
54
55 enum {
56 INFOSTAT = 0x00,
57 #define IS_IDENTIFY 0x1
58 #define BACKGROUND_OPERATION_STATUS 0x2
59 EVENTS = 0x01,
60 #define GET_RECORDS 0x0
61 #define CLEAR_RECORDS 0x1
62 #define GET_INTERRUPT_POLICY 0x2
63 #define SET_INTERRUPT_POLICY 0x3
64 FIRMWARE_UPDATE = 0x02,
65 #define GET_INFO 0x0
66 #define TRANSFER 0x1
67 #define ACTIVATE 0x2
68 TIMESTAMP = 0x03,
69 #define GET 0x0
70 #define SET 0x1
71 LOGS = 0x04,
72 #define GET_SUPPORTED 0x0
73 #define GET_LOG 0x1
74 FEATURES = 0x05,
75 #define GET_SUPPORTED 0x0
76 #define GET_FEATURE 0x1
77 #define SET_FEATURE 0x2
78 IDENTIFY = 0x40,
79 #define MEMORY_DEVICE 0x0
80 CCLS = 0x41,
81 #define GET_PARTITION_INFO 0x0
82 #define GET_LSA 0x2
83 #define SET_LSA 0x3
84 SANITIZE = 0x44,
85 #define OVERWRITE 0x0
86 #define SECURE_ERASE 0x1
87 PERSISTENT_MEM = 0x45,
88 #define GET_SECURITY_STATE 0x0
89 MEDIA_AND_POISON = 0x43,
90 #define GET_POISON_LIST 0x0
91 #define INJECT_POISON 0x1
92 #define CLEAR_POISON 0x2
93 #define GET_SCAN_MEDIA_CAPABILITIES 0x3
94 #define SCAN_MEDIA 0x4
95 #define GET_SCAN_MEDIA_RESULTS 0x5
96 DCD_CONFIG = 0x48,
97 #define GET_DC_CONFIG 0x0
98 #define GET_DYN_CAP_EXT_LIST 0x1
99 #define ADD_DYN_CAP_RSP 0x2
100 #define RELEASE_DYN_CAP 0x3
101 PHYSICAL_SWITCH = 0x51,
102 #define IDENTIFY_SWITCH_DEVICE 0x0
103 #define GET_PHYSICAL_PORT_STATE 0x1
104 TUNNEL = 0x53,
105 #define MANAGEMENT_COMMAND 0x0
106 };
107
108 /* CCI Message Format CXL r3.1 Figure 7-19 */
109 typedef struct CXLCCIMessage {
110 uint8_t category;
111 #define CXL_CCI_CAT_REQ 0
112 #define CXL_CCI_CAT_RSP 1
113 uint8_t tag;
114 uint8_t resv1;
115 uint8_t command;
116 uint8_t command_set;
117 uint8_t pl_length[3];
118 uint16_t rc;
119 uint16_t vendor_specific;
120 uint8_t payload[];
121 } QEMU_PACKED CXLCCIMessage;
122
123 /* This command is only defined to an MLD FM Owned LD or an MHD */
cmd_tunnel_management_cmd(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)124 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd,
125 uint8_t *payload_in,
126 size_t len_in,
127 uint8_t *payload_out,
128 size_t *len_out,
129 CXLCCI *cci)
130 {
131 PCIDevice *tunnel_target;
132 CXLCCI *target_cci;
133 struct {
134 uint8_t port_or_ld_id;
135 uint8_t target_type;
136 uint16_t size;
137 CXLCCIMessage ccimessage;
138 } QEMU_PACKED *in;
139 struct {
140 uint16_t resp_len;
141 uint8_t resv[2];
142 CXLCCIMessage ccimessage;
143 } QEMU_PACKED *out;
144 size_t pl_length, length_out;
145 bool bg_started;
146 int rc;
147
148 if (cmd->in < sizeof(*in)) {
149 return CXL_MBOX_INVALID_INPUT;
150 }
151 in = (void *)payload_in;
152 out = (void *)payload_out;
153
154 if (len_in < sizeof(*in)) {
155 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
156 }
157 /* Enough room for minimum sized message - no payload */
158 if (in->size < sizeof(in->ccimessage)) {
159 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
160 }
161 /* Length of input payload should be in->size + a wrapping tunnel header */
162 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) {
163 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
164 }
165 if (in->ccimessage.category != CXL_CCI_CAT_REQ) {
166 return CXL_MBOX_INVALID_INPUT;
167 }
168
169 if (in->target_type != 0) {
170 qemu_log_mask(LOG_UNIMP,
171 "Tunneled Command sent to non existent FM-LD");
172 return CXL_MBOX_INVALID_INPUT;
173 }
174
175 /*
176 * Target of a tunnel unfortunately depends on type of CCI readint
177 * the message.
178 * If in a switch, then it's the port number.
179 * If in an MLD it is the ld number.
180 * If in an MHD target type indicate where we are going.
181 */
182 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
183 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
184 if (in->port_or_ld_id != 0) {
185 /* Only pretending to have one for now! */
186 return CXL_MBOX_INVALID_INPUT;
187 }
188 target_cci = &ct3d->ld0_cci;
189 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
190 CXLUpstreamPort *usp = CXL_USP(cci->d);
191
192 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus,
193 in->port_or_ld_id);
194 if (!tunnel_target) {
195 return CXL_MBOX_INVALID_INPUT;
196 }
197 tunnel_target =
198 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0];
199 if (!tunnel_target) {
200 return CXL_MBOX_INVALID_INPUT;
201 }
202 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) {
203 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target);
204 /* Tunneled VDMs always land on FM Owned LD */
205 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci;
206 } else {
207 return CXL_MBOX_INVALID_INPUT;
208 }
209 } else {
210 return CXL_MBOX_INVALID_INPUT;
211 }
212
213 pl_length = in->ccimessage.pl_length[2] << 16 |
214 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0];
215 rc = cxl_process_cci_message(target_cci,
216 in->ccimessage.command_set,
217 in->ccimessage.command,
218 pl_length, in->ccimessage.payload,
219 &length_out, out->ccimessage.payload,
220 &bg_started);
221 /* Payload should be in place. Rest of CCI header and needs filling */
222 out->resp_len = length_out + sizeof(CXLCCIMessage);
223 st24_le_p(out->ccimessage.pl_length, length_out);
224 out->ccimessage.rc = rc;
225 out->ccimessage.category = CXL_CCI_CAT_RSP;
226 out->ccimessage.command = in->ccimessage.command;
227 out->ccimessage.command_set = in->ccimessage.command_set;
228 out->ccimessage.tag = in->ccimessage.tag;
229 *len_out = length_out + sizeof(*out);
230
231 return CXL_MBOX_SUCCESS;
232 }
233
cmd_events_get_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)234 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
235 uint8_t *payload_in, size_t len_in,
236 uint8_t *payload_out, size_t *len_out,
237 CXLCCI *cci)
238 {
239 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
240 CXLGetEventPayload *pl;
241 uint8_t log_type;
242 int max_recs;
243
244 if (cmd->in < sizeof(log_type)) {
245 return CXL_MBOX_INVALID_INPUT;
246 }
247
248 log_type = payload_in[0];
249
250 pl = (CXLGetEventPayload *)payload_out;
251
252 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
253 CXL_EVENT_RECORD_SIZE;
254 if (max_recs > 0xFFFF) {
255 max_recs = 0xFFFF;
256 }
257
258 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out);
259 }
260
cmd_events_clear_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)261 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
262 uint8_t *payload_in,
263 size_t len_in,
264 uint8_t *payload_out,
265 size_t *len_out,
266 CXLCCI *cci)
267 {
268 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
269 CXLClearEventPayload *pl;
270
271 pl = (CXLClearEventPayload *)payload_in;
272
273 if (len_in < sizeof(*pl) ||
274 len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) {
275 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
276 }
277
278 *len_out = 0;
279 return cxl_event_clear_records(cxlds, pl);
280 }
281
cmd_events_get_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)282 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
283 uint8_t *payload_in,
284 size_t len_in,
285 uint8_t *payload_out,
286 size_t *len_out,
287 CXLCCI *cci)
288 {
289 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
290 CXLEventInterruptPolicy *policy;
291 CXLEventLog *log;
292
293 policy = (CXLEventInterruptPolicy *)payload_out;
294
295 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
296 if (log->irq_enabled) {
297 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
298 }
299
300 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
301 if (log->irq_enabled) {
302 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
303 }
304
305 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
306 if (log->irq_enabled) {
307 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
308 }
309
310 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
311 if (log->irq_enabled) {
312 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
313 }
314
315 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
316 if (log->irq_enabled) {
317 /* Dynamic Capacity borrows the same vector as info */
318 policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
319 }
320
321 *len_out = sizeof(*policy);
322 return CXL_MBOX_SUCCESS;
323 }
324
cmd_events_set_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)325 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
326 uint8_t *payload_in,
327 size_t len_in,
328 uint8_t *payload_out,
329 size_t *len_out,
330 CXLCCI *cci)
331 {
332 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
333 CXLEventInterruptPolicy *policy;
334 CXLEventLog *log;
335
336 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) {
337 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
338 }
339
340 policy = (CXLEventInterruptPolicy *)payload_in;
341
342 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
343 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
344 CXL_INT_MSI_MSIX;
345
346 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
347 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
348 CXL_INT_MSI_MSIX;
349
350 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
351 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
352 CXL_INT_MSI_MSIX;
353
354 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
355 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
356 CXL_INT_MSI_MSIX;
357
358 /* DCD is optional */
359 if (len_in < sizeof(*policy)) {
360 return CXL_MBOX_SUCCESS;
361 }
362
363 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
364 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
365 CXL_INT_MSI_MSIX;
366
367 *len_out = 0;
368 return CXL_MBOX_SUCCESS;
369 }
370
371 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */
cmd_infostat_identify(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)372 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
373 uint8_t *payload_in,
374 size_t len_in,
375 uint8_t *payload_out,
376 size_t *len_out,
377 CXLCCI *cci)
378 {
379 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d);
380 struct {
381 uint16_t pcie_vid;
382 uint16_t pcie_did;
383 uint16_t pcie_subsys_vid;
384 uint16_t pcie_subsys_id;
385 uint64_t sn;
386 uint8_t max_message_size;
387 uint8_t component_type;
388 } QEMU_PACKED *is_identify;
389 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
390
391 is_identify = (void *)payload_out;
392 is_identify->pcie_vid = class->vendor_id;
393 is_identify->pcie_did = class->device_id;
394 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
395 is_identify->sn = CXL_USP(cci->d)->sn;
396 /* Subsystem info not defined for a USP */
397 is_identify->pcie_subsys_vid = 0;
398 is_identify->pcie_subsys_id = 0;
399 is_identify->component_type = 0x0; /* Switch */
400 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
401 PCIDevice *pci_dev = PCI_DEVICE(cci->d);
402
403 is_identify->sn = CXL_TYPE3(cci->d)->sn;
404 /*
405 * We can't always use class->subsystem_vendor_id as
406 * it is not set if the defaults are used.
407 */
408 is_identify->pcie_subsys_vid =
409 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID);
410 is_identify->pcie_subsys_id =
411 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID);
412 is_identify->component_type = 0x3; /* Type 3 */
413 }
414
415 /* TODO: Allow this to vary across different CCIs */
416 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
417 *len_out = sizeof(*is_identify);
418 return CXL_MBOX_SUCCESS;
419 }
420
cxl_set_dsp_active_bm(PCIBus * b,PCIDevice * d,void * private)421 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
422 void *private)
423 {
424 uint8_t *bm = private;
425 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) {
426 uint8_t port = PCIE_PORT(d)->port;
427 bm[port / 8] |= 1 << (port % 8);
428 }
429 }
430
431 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */
cmd_identify_switch_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)432 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
433 uint8_t *payload_in,
434 size_t len_in,
435 uint8_t *payload_out,
436 size_t *len_out,
437 CXLCCI *cci)
438 {
439 PCIEPort *usp = PCIE_PORT(cci->d);
440 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
441 int num_phys_ports = pcie_count_ds_ports(bus);
442
443 struct cxl_fmapi_ident_switch_dev_resp_pl {
444 uint8_t ingress_port_id;
445 uint8_t rsvd;
446 uint8_t num_physical_ports;
447 uint8_t num_vcss;
448 uint8_t active_port_bitmask[0x20];
449 uint8_t active_vcs_bitmask[0x20];
450 uint16_t total_vppbs;
451 uint16_t bound_vppbs;
452 uint8_t num_hdm_decoders_per_usp;
453 } QEMU_PACKED *out;
454 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49);
455
456 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
457 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
458 .num_physical_ports = num_phys_ports + 1, /* 1 USP */
459 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */
460 .active_vcs_bitmask[0] = 0x1,
461 .total_vppbs = num_phys_ports + 1,
462 .bound_vppbs = num_phys_ports + 1,
463 .num_hdm_decoders_per_usp = 4,
464 };
465
466 /* Depends on the CCI type */
467 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) {
468 out->ingress_port_id = PCIE_PORT(cci->intf)->port;
469 } else {
470 /* MCTP? */
471 out->ingress_port_id = 0;
472 }
473
474 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm,
475 out->active_port_bitmask);
476 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8);
477
478 *len_out = sizeof(*out);
479
480 return CXL_MBOX_SUCCESS;
481 }
482
483 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
cmd_get_physical_port_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)484 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
485 uint8_t *payload_in,
486 size_t len_in,
487 uint8_t *payload_out,
488 size_t *len_out,
489 CXLCCI *cci)
490 {
491 /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */
492 struct cxl_fmapi_get_phys_port_state_req_pl {
493 uint8_t num_ports;
494 uint8_t ports[];
495 } QEMU_PACKED *in;
496
497 /*
498 * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block
499 * Format
500 */
501 struct cxl_fmapi_port_state_info_block {
502 uint8_t port_id;
503 uint8_t config_state;
504 uint8_t connected_device_cxl_version;
505 uint8_t rsv1;
506 uint8_t connected_device_type;
507 uint8_t port_cxl_version_bitmask;
508 uint8_t max_link_width;
509 uint8_t negotiated_link_width;
510 uint8_t supported_link_speeds_vector;
511 uint8_t max_link_speed;
512 uint8_t current_link_speed;
513 uint8_t ltssm_state;
514 uint8_t first_lane_num;
515 uint16_t link_state;
516 uint8_t supported_ld_count;
517 } QEMU_PACKED;
518
519 /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */
520 struct cxl_fmapi_get_phys_port_state_resp_pl {
521 uint8_t num_ports;
522 uint8_t rsv1[3];
523 struct cxl_fmapi_port_state_info_block ports[];
524 } QEMU_PACKED *out;
525 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
526 PCIEPort *usp = PCIE_PORT(cci->d);
527 size_t pl_size;
528 int i;
529
530 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in;
531 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out;
532
533 if (len_in < sizeof(*in)) {
534 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
535 }
536 /* Check if what was requested can fit */
537 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) {
538 return CXL_MBOX_INVALID_INPUT;
539 }
540
541 /* For success there should be a match for each requested */
542 out->num_ports = in->num_ports;
543
544 for (i = 0; i < in->num_ports; i++) {
545 struct cxl_fmapi_port_state_info_block *port;
546 /* First try to match on downstream port */
547 PCIDevice *port_dev;
548 uint16_t lnkcap, lnkcap2, lnksta;
549
550 port = &out->ports[i];
551
552 port_dev = pcie_find_port_by_pn(bus, in->ports[i]);
553 if (port_dev) { /* DSP */
554 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev))
555 ->devices[0];
556 port->config_state = 3;
557 if (ds_dev) {
558 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) {
559 port->connected_device_type = 5; /* Assume MLD for now */
560 } else {
561 port->connected_device_type = 1;
562 }
563 } else {
564 port->connected_device_type = 0;
565 }
566 port->supported_ld_count = 3;
567 } else if (usp->port == in->ports[i]) { /* USP */
568 port_dev = PCI_DEVICE(usp);
569 port->config_state = 4;
570 port->connected_device_type = 0;
571 } else {
572 return CXL_MBOX_INVALID_INPUT;
573 }
574
575 port->port_id = in->ports[i];
576 /* Information on status of this port in lnksta, lnkcap */
577 if (!port_dev->exp.exp_cap) {
578 return CXL_MBOX_INTERNAL_ERROR;
579 }
580 lnksta = port_dev->config_read(port_dev,
581 port_dev->exp.exp_cap + PCI_EXP_LNKSTA,
582 sizeof(lnksta));
583 lnkcap = port_dev->config_read(port_dev,
584 port_dev->exp.exp_cap + PCI_EXP_LNKCAP,
585 sizeof(lnkcap));
586 lnkcap2 = port_dev->config_read(port_dev,
587 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2,
588 sizeof(lnkcap2));
589
590 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
591 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4;
592 /* No definition for SLS field in linux/pci_regs.h */
593 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1;
594 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS;
595 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS;
596 /* TODO: Track down if we can get the rest of the info */
597 port->ltssm_state = 0x7;
598 port->first_lane_num = 0;
599 port->link_state = 0;
600 port->port_cxl_version_bitmask = 0x2;
601 port->connected_device_cxl_version = 0x2;
602 }
603
604 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports;
605 *len_out = pl_size;
606
607 return CXL_MBOX_SUCCESS;
608 }
609
610 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */
cmd_infostat_bg_op_sts(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)611 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
612 uint8_t *payload_in,
613 size_t len_in,
614 uint8_t *payload_out,
615 size_t *len_out,
616 CXLCCI *cci)
617 {
618 struct {
619 uint8_t status;
620 uint8_t rsvd;
621 uint16_t opcode;
622 uint16_t returncode;
623 uint16_t vendor_ext_status;
624 } QEMU_PACKED *bg_op_status;
625 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8);
626
627 bg_op_status = (void *)payload_out;
628 bg_op_status->status = cci->bg.complete_pct << 1;
629 if (cci->bg.runtime > 0) {
630 bg_op_status->status |= 1U << 0;
631 }
632 bg_op_status->opcode = cci->bg.opcode;
633 bg_op_status->returncode = cci->bg.ret_code;
634 *len_out = sizeof(*bg_op_status);
635
636 return CXL_MBOX_SUCCESS;
637 }
638
639 #define CXL_FW_SLOTS 2
640 #define CXL_FW_SIZE 0x02000000 /* 32 mb */
641
642 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
cmd_firmware_update_get_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)643 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
644 uint8_t *payload_in,
645 size_t len,
646 uint8_t *payload_out,
647 size_t *len_out,
648 CXLCCI *cci)
649 {
650 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
651 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
652 struct {
653 uint8_t slots_supported;
654 uint8_t slot_info;
655 uint8_t caps;
656 uint8_t rsvd[0xd];
657 char fw_rev1[0x10];
658 char fw_rev2[0x10];
659 char fw_rev3[0x10];
660 char fw_rev4[0x10];
661 } QEMU_PACKED *fw_info;
662 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
663
664 if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) ||
665 !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) ||
666 !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) {
667 return CXL_MBOX_INTERNAL_ERROR;
668 }
669
670 fw_info = (void *)payload_out;
671
672 fw_info->slots_supported = CXL_FW_SLOTS;
673 fw_info->slot_info = (cci->fw.active_slot & 0x7) |
674 ((cci->fw.staged_slot & 0x7) << 3);
675 fw_info->caps = BIT(0); /* online update supported */
676
677 if (cci->fw.slot[0]) {
678 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
679 }
680 if (cci->fw.slot[1]) {
681 pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1");
682 }
683
684 *len_out = sizeof(*fw_info);
685 return CXL_MBOX_SUCCESS;
686 }
687
688 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */
689 #define CXL_FW_XFER_ALIGNMENT 128
690
691 #define CXL_FW_XFER_ACTION_FULL 0x0
692 #define CXL_FW_XFER_ACTION_INIT 0x1
693 #define CXL_FW_XFER_ACTION_CONTINUE 0x2
694 #define CXL_FW_XFER_ACTION_END 0x3
695 #define CXL_FW_XFER_ACTION_ABORT 0x4
696
cmd_firmware_update_transfer(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)697 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd,
698 uint8_t *payload_in,
699 size_t len,
700 uint8_t *payload_out,
701 size_t *len_out,
702 CXLCCI *cci)
703 {
704 struct {
705 uint8_t action;
706 uint8_t slot;
707 uint8_t rsvd1[2];
708 uint32_t offset;
709 uint8_t rsvd2[0x78];
710 uint8_t data[];
711 } QEMU_PACKED *fw_transfer = (void *)payload_in;
712 size_t offset, length;
713
714 if (len < sizeof(*fw_transfer)) {
715 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
716 }
717
718 if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) {
719 /*
720 * At this point there aren't any on-going transfers
721 * running in the bg - this is serialized before this
722 * call altogether. Just mark the state machine and
723 * disregard any other input.
724 */
725 cci->fw.transferring = false;
726 return CXL_MBOX_SUCCESS;
727 }
728
729 offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT;
730 length = len - sizeof(*fw_transfer);
731 if (offset + length > CXL_FW_SIZE) {
732 return CXL_MBOX_INVALID_INPUT;
733 }
734
735 if (cci->fw.transferring) {
736 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL ||
737 fw_transfer->action == CXL_FW_XFER_ACTION_INIT) {
738 return CXL_MBOX_FW_XFER_IN_PROGRESS;
739 }
740 /*
741 * Abort partitioned package transfer if over 30 secs
742 * between parts. As opposed to the explicit ABORT action,
743 * semantically treat this condition as an error - as
744 * if a part action were passed without a previous INIT.
745 */
746 if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) {
747 cci->fw.transferring = false;
748 return CXL_MBOX_INVALID_INPUT;
749 }
750 } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
751 fw_transfer->action == CXL_FW_XFER_ACTION_END) {
752 return CXL_MBOX_INVALID_INPUT;
753 }
754
755 /* allow back-to-back retransmission */
756 if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) &&
757 (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
758 fw_transfer->action == CXL_FW_XFER_ACTION_END)) {
759 /* verify no overlaps */
760 if (offset < cci->fw.prev_offset + cci->fw.prev_len) {
761 return CXL_MBOX_FW_XFER_OUT_OF_ORDER;
762 }
763 }
764
765 switch (fw_transfer->action) {
766 case CXL_FW_XFER_ACTION_FULL: /* ignores offset */
767 case CXL_FW_XFER_ACTION_END:
768 if (fw_transfer->slot == 0 ||
769 fw_transfer->slot == cci->fw.active_slot ||
770 fw_transfer->slot > CXL_FW_SLOTS) {
771 return CXL_MBOX_FW_INVALID_SLOT;
772 }
773
774 /* mark the slot used upon bg completion */
775 break;
776 case CXL_FW_XFER_ACTION_INIT:
777 if (offset != 0) {
778 return CXL_MBOX_INVALID_INPUT;
779 }
780
781 cci->fw.transferring = true;
782 cci->fw.prev_offset = offset;
783 cci->fw.prev_len = length;
784 break;
785 case CXL_FW_XFER_ACTION_CONTINUE:
786 cci->fw.prev_offset = offset;
787 cci->fw.prev_len = length;
788 break;
789 default:
790 return CXL_MBOX_INVALID_INPUT;
791 }
792
793 if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) {
794 cci->bg.runtime = 10 * 1000UL;
795 } else {
796 cci->bg.runtime = 2 * 1000UL;
797 }
798 /* keep relevant context for bg completion */
799 cci->fw.curr_action = fw_transfer->action;
800 cci->fw.curr_slot = fw_transfer->slot;
801 *len_out = 0;
802
803 return CXL_MBOX_BG_STARTED;
804 }
805
__do_firmware_xfer(CXLCCI * cci)806 static void __do_firmware_xfer(CXLCCI *cci)
807 {
808 switch (cci->fw.curr_action) {
809 case CXL_FW_XFER_ACTION_FULL:
810 case CXL_FW_XFER_ACTION_END:
811 cci->fw.slot[cci->fw.curr_slot - 1] = true;
812 cci->fw.transferring = false;
813 break;
814 case CXL_FW_XFER_ACTION_INIT:
815 case CXL_FW_XFER_ACTION_CONTINUE:
816 time(&cci->fw.last_partxfer);
817 break;
818 default:
819 break;
820 }
821 }
822
823 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */
cmd_firmware_update_activate(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)824 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd,
825 uint8_t *payload_in,
826 size_t len,
827 uint8_t *payload_out,
828 size_t *len_out,
829 CXLCCI *cci)
830 {
831 struct {
832 uint8_t action;
833 uint8_t slot;
834 } QEMU_PACKED *fw_activate = (void *)payload_in;
835 QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2);
836
837 if (fw_activate->slot == 0 ||
838 fw_activate->slot == cci->fw.active_slot ||
839 fw_activate->slot > CXL_FW_SLOTS) {
840 return CXL_MBOX_FW_INVALID_SLOT;
841 }
842
843 /* ensure that an actual fw package is there */
844 if (!cci->fw.slot[fw_activate->slot - 1]) {
845 return CXL_MBOX_FW_INVALID_SLOT;
846 }
847
848 switch (fw_activate->action) {
849 case 0: /* online */
850 cci->fw.active_slot = fw_activate->slot;
851 break;
852 case 1: /* reset */
853 cci->fw.staged_slot = fw_activate->slot;
854 break;
855 default:
856 return CXL_MBOX_INVALID_INPUT;
857 }
858
859 return CXL_MBOX_SUCCESS;
860 }
861
862 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
cmd_timestamp_get(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)863 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
864 uint8_t *payload_in,
865 size_t len_in,
866 uint8_t *payload_out,
867 size_t *len_out,
868 CXLCCI *cci)
869 {
870 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
871 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
872
873 stq_le_p(payload_out, final_time);
874 *len_out = 8;
875
876 return CXL_MBOX_SUCCESS;
877 }
878
879 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */
cmd_timestamp_set(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)880 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
881 uint8_t *payload_in,
882 size_t len_in,
883 uint8_t *payload_out,
884 size_t *len_out,
885 CXLCCI *cci)
886 {
887 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
888
889 cxl_dstate->timestamp.set = true;
890 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
891
892 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in);
893
894 *len_out = 0;
895 return CXL_MBOX_SUCCESS;
896 }
897
898 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */
899 static const QemuUUID cel_uuid = {
900 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
901 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
902 };
903
904 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */
cmd_logs_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)905 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
906 uint8_t *payload_in,
907 size_t len_in,
908 uint8_t *payload_out,
909 size_t *len_out,
910 CXLCCI *cci)
911 {
912 struct {
913 uint16_t entries;
914 uint8_t rsvd[6];
915 struct {
916 QemuUUID uuid;
917 uint32_t size;
918 } log_entries[1];
919 } QEMU_PACKED *supported_logs = (void *)payload_out;
920 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
921
922 supported_logs->entries = 1;
923 supported_logs->log_entries[0].uuid = cel_uuid;
924 supported_logs->log_entries[0].size = 4 * cci->cel_size;
925
926 *len_out = sizeof(*supported_logs);
927 return CXL_MBOX_SUCCESS;
928 }
929
930 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */
cmd_logs_get_log(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)931 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
932 uint8_t *payload_in,
933 size_t len_in,
934 uint8_t *payload_out,
935 size_t *len_out,
936 CXLCCI *cci)
937 {
938 struct {
939 QemuUUID uuid;
940 uint32_t offset;
941 uint32_t length;
942 } QEMU_PACKED QEMU_ALIGNED(16) *get_log;
943
944 get_log = (void *)payload_in;
945
946 if (get_log->length > cci->payload_max) {
947 return CXL_MBOX_INVALID_INPUT;
948 }
949
950 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
951 return CXL_MBOX_INVALID_LOG;
952 }
953
954 /*
955 * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
956 * The device shall return Invalid Input if the Offset or Length
957 * fields attempt to access beyond the size of the log as reported by Get
958 * Supported Log.
959 *
960 * Only valid for there to be one entry per opcode, but the length + offset
961 * may still be greater than that if the inputs are not valid and so access
962 * beyond the end of cci->cel_log.
963 */
964 if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) {
965 return CXL_MBOX_INVALID_INPUT;
966 }
967
968 /* Store off everything to local variables so we can wipe out the payload */
969 *len_out = get_log->length;
970
971 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length);
972
973 return CXL_MBOX_SUCCESS;
974 }
975
976 /* CXL r3.1 section 8.2.9.6: Features */
977 /*
978 * Get Supported Features output payload
979 * CXL r3.1 section 8.2.9.6.1 Table 8-96
980 */
981 typedef struct CXLSupportedFeatureHeader {
982 uint16_t entries;
983 uint16_t nsuppfeats_dev;
984 uint32_t reserved;
985 } QEMU_PACKED CXLSupportedFeatureHeader;
986
987 /*
988 * Get Supported Features Supported Feature Entry
989 * CXL r3.1 section 8.2.9.6.1 Table 8-97
990 */
991 typedef struct CXLSupportedFeatureEntry {
992 QemuUUID uuid;
993 uint16_t feat_index;
994 uint16_t get_feat_size;
995 uint16_t set_feat_size;
996 uint32_t attr_flags;
997 uint8_t get_feat_version;
998 uint8_t set_feat_version;
999 uint16_t set_feat_effects;
1000 uint8_t rsvd[18];
1001 } QEMU_PACKED CXLSupportedFeatureEntry;
1002
1003 /*
1004 * Get Supported Features Supported Feature Entry
1005 * CXL rev 3.1 section 8.2.9.6.1 Table 8-97
1006 */
1007 /* Supported Feature Entry : attribute flags */
1008 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0)
1009 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1)
1010 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4)
1011 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5)
1012 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6)
1013
1014 /* Supported Feature Entry : set feature effects */
1015 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0)
1016 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1)
1017 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2)
1018 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3)
1019 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4)
1020 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5)
1021 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6)
1022 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7)
1023 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8)
1024 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9)
1025 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10)
1026 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11)
1027
1028 enum CXL_SUPPORTED_FEATURES_LIST {
1029 CXL_FEATURE_PATROL_SCRUB = 0,
1030 CXL_FEATURE_ECS,
1031 CXL_FEATURE_MAX
1032 };
1033
1034 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */
1035 /*
1036 * Get Feature input payload
1037 * CXL r3.1 section 8.2.9.6.2 Table 8-99
1038 */
1039 /* Get Feature : Payload in selection */
1040 enum CXL_GET_FEATURE_SELECTION {
1041 CXL_GET_FEATURE_SEL_CURRENT_VALUE,
1042 CXL_GET_FEATURE_SEL_DEFAULT_VALUE,
1043 CXL_GET_FEATURE_SEL_SAVED_VALUE,
1044 CXL_GET_FEATURE_SEL_MAX
1045 };
1046
1047 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */
1048 /*
1049 * Set Feature input payload
1050 * CXL r3.1 section 8.2.9.6.3 Table 8-101
1051 */
1052 typedef struct CXLSetFeatureInHeader {
1053 QemuUUID uuid;
1054 uint32_t flags;
1055 uint16_t offset;
1056 uint8_t version;
1057 uint8_t rsvd[9];
1058 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader;
1059
1060 /* Set Feature : Payload in flags */
1061 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7
1062 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER {
1063 CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER,
1064 CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER,
1065 CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER,
1066 CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER,
1067 CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER,
1068 CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
1069 };
1070 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3)
1071
1072 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
1073 static const QemuUUID patrol_scrub_uuid = {
1074 .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
1075 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
1076 };
1077
1078 typedef struct CXLMemPatrolScrubSetFeature {
1079 CXLSetFeatureInHeader hdr;
1080 CXLMemPatrolScrubWriteAttrs feat_data;
1081 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature;
1082
1083 /*
1084 * CXL r3.1 section 8.2.9.9.11.2:
1085 * DDR5 Error Check Scrub (ECS) Control Feature
1086 */
1087 static const QemuUUID ecs_uuid = {
1088 .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba,
1089 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86)
1090 };
1091
1092 typedef struct CXLMemECSSetFeature {
1093 CXLSetFeatureInHeader hdr;
1094 CXLMemECSWriteAttrs feat_data[];
1095 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature;
1096
1097 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
cmd_features_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1098 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
1099 uint8_t *payload_in,
1100 size_t len_in,
1101 uint8_t *payload_out,
1102 size_t *len_out,
1103 CXLCCI *cci)
1104 {
1105 struct {
1106 uint32_t count;
1107 uint16_t start_index;
1108 uint16_t reserved;
1109 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in;
1110
1111 struct {
1112 CXLSupportedFeatureHeader hdr;
1113 CXLSupportedFeatureEntry feat_entries[];
1114 } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out;
1115 uint16_t index, req_entries;
1116 uint16_t entry;
1117
1118 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1119 return CXL_MBOX_UNSUPPORTED;
1120 }
1121 if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) ||
1122 get_feats_in->start_index >= CXL_FEATURE_MAX) {
1123 return CXL_MBOX_INVALID_INPUT;
1124 }
1125
1126 req_entries = (get_feats_in->count -
1127 sizeof(CXLSupportedFeatureHeader)) /
1128 sizeof(CXLSupportedFeatureEntry);
1129 req_entries = MIN(req_entries,
1130 (CXL_FEATURE_MAX - get_feats_in->start_index));
1131
1132 for (entry = 0, index = get_feats_in->start_index;
1133 entry < req_entries; index++) {
1134 switch (index) {
1135 case CXL_FEATURE_PATROL_SCRUB:
1136 /* Fill supported feature entry for device patrol scrub control */
1137 get_feats_out->feat_entries[entry++] =
1138 (struct CXLSupportedFeatureEntry) {
1139 .uuid = patrol_scrub_uuid,
1140 .feat_index = index,
1141 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs),
1142 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs),
1143 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1144 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION,
1145 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION,
1146 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1147 CXL_FEAT_ENTRY_SFE_CEL_VALID,
1148 };
1149 break;
1150 case CXL_FEATURE_ECS:
1151 /* Fill supported feature entry for device DDR5 ECS control */
1152 get_feats_out->feat_entries[entry++] =
1153 (struct CXLSupportedFeatureEntry) {
1154 .uuid = ecs_uuid,
1155 .feat_index = index,
1156 .get_feat_size = sizeof(CXLMemECSReadAttrs),
1157 .set_feat_size = sizeof(CXLMemECSWriteAttrs),
1158 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1159 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION,
1160 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION,
1161 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1162 CXL_FEAT_ENTRY_SFE_CEL_VALID,
1163 };
1164 break;
1165 default:
1166 __builtin_unreachable();
1167 }
1168 }
1169 get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX;
1170 get_feats_out->hdr.entries = req_entries;
1171 *len_out = sizeof(CXLSupportedFeatureHeader) +
1172 req_entries * sizeof(CXLSupportedFeatureEntry);
1173
1174 return CXL_MBOX_SUCCESS;
1175 }
1176
1177 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */
cmd_features_get_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1178 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd,
1179 uint8_t *payload_in,
1180 size_t len_in,
1181 uint8_t *payload_out,
1182 size_t *len_out,
1183 CXLCCI *cci)
1184 {
1185 struct {
1186 QemuUUID uuid;
1187 uint16_t offset;
1188 uint16_t count;
1189 uint8_t selection;
1190 } QEMU_PACKED QEMU_ALIGNED(16) * get_feature;
1191 uint16_t bytes_to_copy = 0;
1192 CXLType3Dev *ct3d;
1193 CXLSetFeatureInfo *set_feat_info;
1194
1195 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1196 return CXL_MBOX_UNSUPPORTED;
1197 }
1198
1199 ct3d = CXL_TYPE3(cci->d);
1200 get_feature = (void *)payload_in;
1201
1202 set_feat_info = &ct3d->set_feat_info;
1203 if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) {
1204 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1205 }
1206
1207 if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) {
1208 return CXL_MBOX_UNSUPPORTED;
1209 }
1210 if (get_feature->offset + get_feature->count > cci->payload_max) {
1211 return CXL_MBOX_INVALID_INPUT;
1212 }
1213
1214 if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) {
1215 if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) {
1216 return CXL_MBOX_INVALID_INPUT;
1217 }
1218 bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) -
1219 get_feature->offset;
1220 bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1221 memcpy(payload_out,
1222 (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset,
1223 bytes_to_copy);
1224 } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) {
1225 if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) {
1226 return CXL_MBOX_INVALID_INPUT;
1227 }
1228 bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset;
1229 bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1230 memcpy(payload_out,
1231 (uint8_t *)&ct3d->ecs_attrs + get_feature->offset,
1232 bytes_to_copy);
1233 } else {
1234 return CXL_MBOX_UNSUPPORTED;
1235 }
1236
1237 *len_out = bytes_to_copy;
1238
1239 return CXL_MBOX_SUCCESS;
1240 }
1241
1242 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */
cmd_features_set_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1243 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
1244 uint8_t *payload_in,
1245 size_t len_in,
1246 uint8_t *payload_out,
1247 size_t *len_out,
1248 CXLCCI *cci)
1249 {
1250 CXLSetFeatureInHeader *hdr = (void *)payload_in;
1251 CXLMemPatrolScrubWriteAttrs *ps_write_attrs;
1252 CXLMemPatrolScrubSetFeature *ps_set_feature;
1253 CXLMemECSWriteAttrs *ecs_write_attrs;
1254 CXLMemECSSetFeature *ecs_set_feature;
1255 CXLSetFeatureInfo *set_feat_info;
1256 uint16_t bytes_to_copy = 0;
1257 uint8_t data_transfer_flag;
1258 CXLType3Dev *ct3d;
1259 uint16_t count;
1260
1261 if (len_in < sizeof(*hdr)) {
1262 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1263 }
1264
1265 if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1266 return CXL_MBOX_UNSUPPORTED;
1267 }
1268 ct3d = CXL_TYPE3(cci->d);
1269 set_feat_info = &ct3d->set_feat_info;
1270
1271 if (!qemu_uuid_is_null(&set_feat_info->uuid) &&
1272 !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) {
1273 return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1274 }
1275 if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) {
1276 set_feat_info->data_saved_across_reset = true;
1277 } else {
1278 set_feat_info->data_saved_across_reset = false;
1279 }
1280
1281 data_transfer_flag =
1282 hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK;
1283 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) {
1284 set_feat_info->uuid = hdr->uuid;
1285 set_feat_info->data_size = 0;
1286 }
1287 set_feat_info->data_transfer_flag = data_transfer_flag;
1288 set_feat_info->data_offset = hdr->offset;
1289 bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader);
1290
1291 if (bytes_to_copy == 0) {
1292 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1293 }
1294
1295 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1296 if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) {
1297 return CXL_MBOX_UNSUPPORTED;
1298 }
1299
1300 ps_set_feature = (void *)payload_in;
1301 ps_write_attrs = &ps_set_feature->feat_data;
1302
1303 if ((uint32_t)hdr->offset + bytes_to_copy >
1304 sizeof(ct3d->patrol_scrub_wr_attrs)) {
1305 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1306 }
1307 memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset,
1308 ps_write_attrs,
1309 bytes_to_copy);
1310 set_feat_info->data_size += bytes_to_copy;
1311
1312 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1313 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1314 ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF;
1315 ct3d->patrol_scrub_attrs.scrub_cycle |=
1316 ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF;
1317 ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1;
1318 ct3d->patrol_scrub_attrs.scrub_flags |=
1319 ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1;
1320 }
1321 } else if (qemu_uuid_is_equal(&hdr->uuid,
1322 &ecs_uuid)) {
1323 if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) {
1324 return CXL_MBOX_UNSUPPORTED;
1325 }
1326
1327 ecs_set_feature = (void *)payload_in;
1328 ecs_write_attrs = ecs_set_feature->feat_data;
1329
1330 if ((uint32_t)hdr->offset + bytes_to_copy >
1331 sizeof(ct3d->ecs_wr_attrs)) {
1332 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1333 }
1334 memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset,
1335 ecs_write_attrs,
1336 bytes_to_copy);
1337 set_feat_info->data_size += bytes_to_copy;
1338
1339 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1340 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1341 ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap;
1342 for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
1343 ct3d->ecs_attrs.fru_attrs[count].ecs_config =
1344 ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F;
1345 }
1346 }
1347 } else {
1348 return CXL_MBOX_UNSUPPORTED;
1349 }
1350
1351 if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1352 data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER ||
1353 data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) {
1354 memset(&set_feat_info->uuid, 0, sizeof(QemuUUID));
1355 if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1356 memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size);
1357 } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) {
1358 memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size);
1359 }
1360 set_feat_info->data_transfer_flag = 0;
1361 set_feat_info->data_saved_across_reset = false;
1362 set_feat_info->data_offset = 0;
1363 set_feat_info->data_size = 0;
1364 }
1365
1366 return CXL_MBOX_SUCCESS;
1367 }
1368
1369 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
cmd_identify_memory_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1370 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
1371 uint8_t *payload_in,
1372 size_t len_in,
1373 uint8_t *payload_out,
1374 size_t *len_out,
1375 CXLCCI *cci)
1376 {
1377 struct {
1378 char fw_revision[0x10];
1379 uint64_t total_capacity;
1380 uint64_t volatile_capacity;
1381 uint64_t persistent_capacity;
1382 uint64_t partition_align;
1383 uint16_t info_event_log_size;
1384 uint16_t warning_event_log_size;
1385 uint16_t failure_event_log_size;
1386 uint16_t fatal_event_log_size;
1387 uint32_t lsa_size;
1388 uint8_t poison_list_max_mer[3];
1389 uint16_t inject_poison_limit;
1390 uint8_t poison_caps;
1391 uint8_t qos_telemetry_caps;
1392 uint16_t dc_event_log_size;
1393 } QEMU_PACKED *id;
1394 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45);
1395 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1396 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1397 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1398
1399 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1400 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1401 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1402 return CXL_MBOX_INTERNAL_ERROR;
1403 }
1404
1405 id = (void *)payload_out;
1406
1407 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
1408
1409 stq_le_p(&id->total_capacity,
1410 cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER);
1411 stq_le_p(&id->persistent_capacity,
1412 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1413 stq_le_p(&id->volatile_capacity,
1414 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1415 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
1416 /* 256 poison records */
1417 st24_le_p(id->poison_list_max_mer, 256);
1418 /* No limit - so limited by main poison record limit */
1419 stw_le_p(&id->inject_poison_limit, 0);
1420 stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE);
1421
1422 *len_out = sizeof(*id);
1423 return CXL_MBOX_SUCCESS;
1424 }
1425
1426 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */
cmd_ccls_get_partition_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1427 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
1428 uint8_t *payload_in,
1429 size_t len_in,
1430 uint8_t *payload_out,
1431 size_t *len_out,
1432 CXLCCI *cci)
1433 {
1434 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
1435 struct {
1436 uint64_t active_vmem;
1437 uint64_t active_pmem;
1438 uint64_t next_vmem;
1439 uint64_t next_pmem;
1440 } QEMU_PACKED *part_info = (void *)payload_out;
1441 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
1442 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
1443
1444 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1445 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1446 (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1447 return CXL_MBOX_INTERNAL_ERROR;
1448 }
1449
1450 stq_le_p(&part_info->active_vmem,
1451 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1452 /*
1453 * When both next_vmem and next_pmem are 0, there is no pending change to
1454 * partitioning.
1455 */
1456 stq_le_p(&part_info->next_vmem, 0);
1457 stq_le_p(&part_info->active_pmem,
1458 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1459 stq_le_p(&part_info->next_pmem, 0);
1460
1461 *len_out = sizeof(*part_info);
1462 return CXL_MBOX_SUCCESS;
1463 }
1464
1465 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */
cmd_ccls_get_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1466 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
1467 uint8_t *payload_in,
1468 size_t len_in,
1469 uint8_t *payload_out,
1470 size_t *len_out,
1471 CXLCCI *cci)
1472 {
1473 struct {
1474 uint32_t offset;
1475 uint32_t length;
1476 } QEMU_PACKED *get_lsa;
1477 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1478 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1479 uint64_t offset, length;
1480
1481 get_lsa = (void *)payload_in;
1482 offset = get_lsa->offset;
1483 length = get_lsa->length;
1484
1485 if (offset + length > cvc->get_lsa_size(ct3d)) {
1486 *len_out = 0;
1487 return CXL_MBOX_INVALID_INPUT;
1488 }
1489
1490 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset);
1491 return CXL_MBOX_SUCCESS;
1492 }
1493
1494 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */
cmd_ccls_set_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1495 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
1496 uint8_t *payload_in,
1497 size_t len_in,
1498 uint8_t *payload_out,
1499 size_t *len_out,
1500 CXLCCI *cci)
1501 {
1502 struct set_lsa_pl {
1503 uint32_t offset;
1504 uint32_t rsvd;
1505 uint8_t data[];
1506 } QEMU_PACKED;
1507 struct set_lsa_pl *set_lsa_payload = (void *)payload_in;
1508 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1509 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1510 const size_t hdr_len = offsetof(struct set_lsa_pl, data);
1511
1512 *len_out = 0;
1513 if (len_in < hdr_len) {
1514 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1515 }
1516
1517 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
1518 return CXL_MBOX_INVALID_INPUT;
1519 }
1520 len_in -= hdr_len;
1521
1522 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset);
1523 return CXL_MBOX_SUCCESS;
1524 }
1525
1526 /* Perform the actual device zeroing */
__do_sanitization(CXLType3Dev * ct3d)1527 static void __do_sanitization(CXLType3Dev *ct3d)
1528 {
1529 MemoryRegion *mr;
1530
1531 if (ct3d->hostvmem) {
1532 mr = host_memory_backend_get_memory(ct3d->hostvmem);
1533 if (mr) {
1534 void *hostmem = memory_region_get_ram_ptr(mr);
1535 memset(hostmem, 0, memory_region_size(mr));
1536 }
1537 }
1538
1539 if (ct3d->hostpmem) {
1540 mr = host_memory_backend_get_memory(ct3d->hostpmem);
1541 if (mr) {
1542 void *hostmem = memory_region_get_ram_ptr(mr);
1543 memset(hostmem, 0, memory_region_size(mr));
1544 }
1545 }
1546 if (ct3d->lsa) {
1547 mr = host_memory_backend_get_memory(ct3d->lsa);
1548 if (mr) {
1549 void *lsa = memory_region_get_ram_ptr(mr);
1550 memset(lsa, 0, memory_region_size(mr));
1551 }
1552 }
1553 cxl_discard_all_event_records(&ct3d->cxl_dstate);
1554 }
1555
1556 /*
1557 * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
1558 *
1559 * Once the Sanitize command has started successfully, the device shall be
1560 * placed in the media disabled state. If the command fails or is interrupted
1561 * by a reset or power failure, it shall remain in the media disabled state
1562 * until a successful Sanitize command has been completed. During this state:
1563 *
1564 * 1. Memory writes to the device will have no effect, and all memory reads
1565 * will return random values (no user data returned, even for locations that
1566 * the failed Sanitize operation didn’t sanitize yet).
1567 *
1568 * 2. Mailbox commands shall still be processed in the disabled state, except
1569 * that commands that access Sanitized areas shall fail with the Media Disabled
1570 * error code.
1571 */
cmd_sanitize_overwrite(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1572 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
1573 uint8_t *payload_in,
1574 size_t len_in,
1575 uint8_t *payload_out,
1576 size_t *len_out,
1577 CXLCCI *cci)
1578 {
1579 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1580 uint64_t total_mem; /* in Mb */
1581 int secs;
1582
1583 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
1584 if (total_mem <= 512) {
1585 secs = 4;
1586 } else if (total_mem <= 1024) {
1587 secs = 8;
1588 } else if (total_mem <= 2 * 1024) {
1589 secs = 15;
1590 } else if (total_mem <= 4 * 1024) {
1591 secs = 30;
1592 } else if (total_mem <= 8 * 1024) {
1593 secs = 60;
1594 } else if (total_mem <= 16 * 1024) {
1595 secs = 2 * 60;
1596 } else if (total_mem <= 32 * 1024) {
1597 secs = 4 * 60;
1598 } else if (total_mem <= 64 * 1024) {
1599 secs = 8 * 60;
1600 } else if (total_mem <= 128 * 1024) {
1601 secs = 15 * 60;
1602 } else if (total_mem <= 256 * 1024) {
1603 secs = 30 * 60;
1604 } else if (total_mem <= 512 * 1024) {
1605 secs = 60 * 60;
1606 } else if (total_mem <= 1024 * 1024) {
1607 secs = 120 * 60;
1608 } else {
1609 secs = 240 * 60; /* max 4 hrs */
1610 }
1611
1612 /* EBUSY other bg cmds as of now */
1613 cci->bg.runtime = secs * 1000UL;
1614 *len_out = 0;
1615
1616 cxl_dev_disable_media(&ct3d->cxl_dstate);
1617
1618 /* sanitize when done */
1619 return CXL_MBOX_BG_STARTED;
1620 }
1621
cmd_get_security_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1622 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
1623 uint8_t *payload_in,
1624 size_t len_in,
1625 uint8_t *payload_out,
1626 size_t *len_out,
1627 CXLCCI *cci)
1628 {
1629 uint32_t *state = (uint32_t *)payload_out;
1630
1631 *state = 0;
1632 *len_out = 4;
1633 return CXL_MBOX_SUCCESS;
1634 }
1635
1636 /*
1637 * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h)
1638 *
1639 * This is very inefficient, but good enough for now!
1640 * Also the payload will always fit, so no need to handle the MORE flag and
1641 * make this stateful. We may want to allow longer poison lists to aid
1642 * testing that kernel functionality.
1643 */
cmd_media_get_poison_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1644 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
1645 uint8_t *payload_in,
1646 size_t len_in,
1647 uint8_t *payload_out,
1648 size_t *len_out,
1649 CXLCCI *cci)
1650 {
1651 struct get_poison_list_pl {
1652 uint64_t pa;
1653 uint64_t length;
1654 } QEMU_PACKED;
1655
1656 struct get_poison_list_out_pl {
1657 uint8_t flags;
1658 uint8_t rsvd1;
1659 uint64_t overflow_timestamp;
1660 uint16_t count;
1661 uint8_t rsvd2[0x14];
1662 struct {
1663 uint64_t addr;
1664 uint32_t length;
1665 uint32_t resv;
1666 } QEMU_PACKED records[];
1667 } QEMU_PACKED;
1668
1669 struct get_poison_list_pl *in = (void *)payload_in;
1670 struct get_poison_list_out_pl *out = (void *)payload_out;
1671 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1672 uint16_t record_count = 0, i = 0;
1673 uint64_t query_start, query_length;
1674 CXLPoisonList *poison_list = &ct3d->poison_list;
1675 CXLPoison *ent;
1676 uint16_t out_pl_len;
1677
1678 query_start = ldq_le_p(&in->pa);
1679 /* 64 byte alignment required */
1680 if (query_start & 0x3f) {
1681 return CXL_MBOX_INVALID_INPUT;
1682 }
1683 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
1684
1685 QLIST_FOREACH(ent, poison_list, node) {
1686 /* Check for no overlap */
1687 if (!ranges_overlap(ent->start, ent->length,
1688 query_start, query_length)) {
1689 continue;
1690 }
1691 record_count++;
1692 }
1693 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
1694 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
1695
1696 QLIST_FOREACH(ent, poison_list, node) {
1697 uint64_t start, stop;
1698
1699 /* Check for no overlap */
1700 if (!ranges_overlap(ent->start, ent->length,
1701 query_start, query_length)) {
1702 continue;
1703 }
1704
1705 /* Deal with overlap */
1706 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
1707 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
1708 query_start + query_length);
1709 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
1710 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
1711 i++;
1712 }
1713 if (ct3d->poison_list_overflowed) {
1714 out->flags = (1 << 1);
1715 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
1716 }
1717 if (scan_media_running(cci)) {
1718 out->flags |= (1 << 2);
1719 }
1720
1721 stw_le_p(&out->count, record_count);
1722 *len_out = out_pl_len;
1723 return CXL_MBOX_SUCCESS;
1724 }
1725
1726 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */
cmd_media_inject_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1727 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
1728 uint8_t *payload_in,
1729 size_t len_in,
1730 uint8_t *payload_out,
1731 size_t *len_out,
1732 CXLCCI *cci)
1733 {
1734 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1735 CXLPoisonList *poison_list = &ct3d->poison_list;
1736 CXLPoison *ent;
1737 struct inject_poison_pl {
1738 uint64_t dpa;
1739 };
1740 struct inject_poison_pl *in = (void *)payload_in;
1741 uint64_t dpa = ldq_le_p(&in->dpa);
1742 CXLPoison *p;
1743
1744 QLIST_FOREACH(ent, poison_list, node) {
1745 if (dpa >= ent->start &&
1746 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
1747 return CXL_MBOX_SUCCESS;
1748 }
1749 }
1750 /*
1751 * Freeze the list if there is an on-going scan media operation.
1752 */
1753 if (scan_media_running(cci)) {
1754 /*
1755 * XXX: Spec is ambiguous - is this case considered
1756 * a successful return despite not adding to the list?
1757 */
1758 goto success;
1759 }
1760
1761 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1762 return CXL_MBOX_INJECT_POISON_LIMIT;
1763 }
1764 p = g_new0(CXLPoison, 1);
1765
1766 p->length = CXL_CACHE_LINE_SIZE;
1767 p->start = dpa;
1768 p->type = CXL_POISON_TYPE_INJECTED;
1769
1770 /*
1771 * Possible todo: Merge with existing entry if next to it and if same type
1772 */
1773 QLIST_INSERT_HEAD(poison_list, p, node);
1774 ct3d->poison_list_cnt++;
1775 success:
1776 *len_out = 0;
1777
1778 return CXL_MBOX_SUCCESS;
1779 }
1780
1781 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */
cmd_media_clear_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1782 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
1783 uint8_t *payload_in,
1784 size_t len_in,
1785 uint8_t *payload_out,
1786 size_t *len_out,
1787 CXLCCI *cci)
1788 {
1789 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1790 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1791 CXLPoisonList *poison_list = &ct3d->poison_list;
1792 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1793 struct clear_poison_pl {
1794 uint64_t dpa;
1795 uint8_t data[64];
1796 };
1797 CXLPoison *ent;
1798 uint64_t dpa;
1799
1800 struct clear_poison_pl *in = (void *)payload_in;
1801
1802 dpa = ldq_le_p(&in->dpa);
1803 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size +
1804 ct3d->dc.total_capacity) {
1805 return CXL_MBOX_INVALID_PA;
1806 }
1807
1808 /* Clearing a region with no poison is not an error so always do so */
1809 if (cvc->set_cacheline) {
1810 if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
1811 return CXL_MBOX_INTERNAL_ERROR;
1812 }
1813 }
1814
1815 /*
1816 * Freeze the list if there is an on-going scan media operation.
1817 */
1818 if (scan_media_running(cci)) {
1819 /*
1820 * XXX: Spec is ambiguous - is this case considered
1821 * a successful return despite not removing from the list?
1822 */
1823 goto success;
1824 }
1825
1826 QLIST_FOREACH(ent, poison_list, node) {
1827 /*
1828 * Test for contained in entry. Simpler than general case
1829 * as clearing 64 bytes and entries 64 byte aligned
1830 */
1831 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
1832 break;
1833 }
1834 }
1835 if (!ent) {
1836 goto success;
1837 }
1838
1839 QLIST_REMOVE(ent, node);
1840 ct3d->poison_list_cnt--;
1841
1842 if (dpa > ent->start) {
1843 CXLPoison *frag;
1844 /* Cannot overflow as replacing existing entry */
1845
1846 frag = g_new0(CXLPoison, 1);
1847
1848 frag->start = ent->start;
1849 frag->length = dpa - ent->start;
1850 frag->type = ent->type;
1851
1852 QLIST_INSERT_HEAD(poison_list, frag, node);
1853 ct3d->poison_list_cnt++;
1854 }
1855
1856 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
1857 CXLPoison *frag;
1858
1859 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1860 cxl_set_poison_list_overflowed(ct3d);
1861 } else {
1862 frag = g_new0(CXLPoison, 1);
1863
1864 frag->start = dpa + CXL_CACHE_LINE_SIZE;
1865 frag->length = ent->start + ent->length - frag->start;
1866 frag->type = ent->type;
1867 QLIST_INSERT_HEAD(poison_list, frag, node);
1868 ct3d->poison_list_cnt++;
1869 }
1870 }
1871 /* Any fragments have been added, free original entry */
1872 g_free(ent);
1873 success:
1874 *len_out = 0;
1875
1876 return CXL_MBOX_SUCCESS;
1877 }
1878
1879 /*
1880 * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities
1881 */
1882 static CXLRetCode
cmd_media_get_scan_media_capabilities(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1883 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd,
1884 uint8_t *payload_in,
1885 size_t len_in,
1886 uint8_t *payload_out,
1887 size_t *len_out,
1888 CXLCCI *cci)
1889 {
1890 struct get_scan_media_capabilities_pl {
1891 uint64_t pa;
1892 uint64_t length;
1893 } QEMU_PACKED;
1894
1895 struct get_scan_media_capabilities_out_pl {
1896 uint32_t estimated_runtime_ms;
1897 };
1898
1899 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1900 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1901 struct get_scan_media_capabilities_pl *in = (void *)payload_in;
1902 struct get_scan_media_capabilities_out_pl *out = (void *)payload_out;
1903 uint64_t query_start;
1904 uint64_t query_length;
1905
1906 query_start = ldq_le_p(&in->pa);
1907 /* 64 byte alignment required */
1908 if (query_start & 0x3f) {
1909 return CXL_MBOX_INVALID_INPUT;
1910 }
1911 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
1912
1913 if (query_start + query_length > cxl_dstate->static_mem_size) {
1914 return CXL_MBOX_INVALID_PA;
1915 }
1916
1917 /*
1918 * Just use 400 nanosecond access/read latency + 100 ns for
1919 * the cost of updating the poison list. For small enough
1920 * chunks return at least 1 ms.
1921 */
1922 stl_le_p(&out->estimated_runtime_ms,
1923 MAX(1, query_length * (0.0005L / 64)));
1924
1925 *len_out = sizeof(*out);
1926 return CXL_MBOX_SUCCESS;
1927 }
1928
__do_scan_media(CXLType3Dev * ct3d)1929 static void __do_scan_media(CXLType3Dev *ct3d)
1930 {
1931 CXLPoison *ent;
1932 unsigned int results_cnt = 0;
1933
1934 QLIST_FOREACH(ent, &ct3d->scan_media_results, node) {
1935 results_cnt++;
1936 }
1937
1938 /* only scan media may clear the overflow */
1939 if (ct3d->poison_list_overflowed &&
1940 ct3d->poison_list_cnt == results_cnt) {
1941 cxl_clear_poison_list_overflowed(ct3d);
1942 }
1943 /* scan media has run since last conventional reset */
1944 ct3d->scan_media_hasrun = true;
1945 }
1946
1947 /*
1948 * CXL r3.1 section 8.2.9.9.4.5: Scan Media
1949 */
cmd_media_scan_media(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1950 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd,
1951 uint8_t *payload_in,
1952 size_t len_in,
1953 uint8_t *payload_out,
1954 size_t *len_out,
1955 CXLCCI *cci)
1956 {
1957 struct scan_media_pl {
1958 uint64_t pa;
1959 uint64_t length;
1960 uint8_t flags;
1961 } QEMU_PACKED;
1962
1963 struct scan_media_pl *in = (void *)payload_in;
1964 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1965 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1966 uint64_t query_start;
1967 uint64_t query_length;
1968 CXLPoison *ent, *next;
1969
1970 query_start = ldq_le_p(&in->pa);
1971 /* 64 byte alignment required */
1972 if (query_start & 0x3f) {
1973 return CXL_MBOX_INVALID_INPUT;
1974 }
1975 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
1976
1977 if (query_start + query_length > cxl_dstate->static_mem_size) {
1978 return CXL_MBOX_INVALID_PA;
1979 }
1980 if (ct3d->dc.num_regions && query_start + query_length >=
1981 cxl_dstate->static_mem_size + ct3d->dc.total_capacity) {
1982 return CXL_MBOX_INVALID_PA;
1983 }
1984
1985 if (in->flags == 0) { /* TODO */
1986 qemu_log_mask(LOG_UNIMP,
1987 "Scan Media Event Log is unsupported\n");
1988 }
1989
1990 /* any previous results are discarded upon a new Scan Media */
1991 QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) {
1992 QLIST_REMOVE(ent, node);
1993 g_free(ent);
1994 }
1995
1996 /* kill the poison list - it will be recreated */
1997 if (ct3d->poison_list_overflowed) {
1998 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) {
1999 QLIST_REMOVE(ent, node);
2000 g_free(ent);
2001 ct3d->poison_list_cnt--;
2002 }
2003 }
2004
2005 /*
2006 * Scan the backup list and move corresponding entries
2007 * into the results list, updating the poison list
2008 * when possible.
2009 */
2010 QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) {
2011 CXLPoison *res;
2012
2013 if (ent->start >= query_start + query_length ||
2014 ent->start + ent->length <= query_start) {
2015 continue;
2016 }
2017
2018 /*
2019 * If a Get Poison List cmd comes in while this
2020 * scan is being done, it will see the new complete
2021 * list, while setting the respective flag.
2022 */
2023 if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
2024 CXLPoison *p = g_new0(CXLPoison, 1);
2025
2026 p->start = ent->start;
2027 p->length = ent->length;
2028 p->type = ent->type;
2029 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
2030 ct3d->poison_list_cnt++;
2031 }
2032
2033 res = g_new0(CXLPoison, 1);
2034 res->start = ent->start;
2035 res->length = ent->length;
2036 res->type = ent->type;
2037 QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node);
2038
2039 QLIST_REMOVE(ent, node);
2040 g_free(ent);
2041 }
2042
2043 cci->bg.runtime = MAX(1, query_length * (0.0005L / 64));
2044 *len_out = 0;
2045
2046 return CXL_MBOX_BG_STARTED;
2047 }
2048
2049 /*
2050 * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results
2051 */
cmd_media_get_scan_media_results(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2052 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd,
2053 uint8_t *payload_in,
2054 size_t len_in,
2055 uint8_t *payload_out,
2056 size_t *len_out,
2057 CXLCCI *cci)
2058 {
2059 struct get_scan_media_results_out_pl {
2060 uint64_t dpa_restart;
2061 uint64_t length;
2062 uint8_t flags;
2063 uint8_t rsvd1;
2064 uint16_t count;
2065 uint8_t rsvd2[0xc];
2066 struct {
2067 uint64_t addr;
2068 uint32_t length;
2069 uint32_t resv;
2070 } QEMU_PACKED records[];
2071 } QEMU_PACKED;
2072
2073 struct get_scan_media_results_out_pl *out = (void *)payload_out;
2074 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2075 CXLPoisonList *scan_media_results = &ct3d->scan_media_results;
2076 CXLPoison *ent, *next;
2077 uint16_t total_count = 0, record_count = 0, i = 0;
2078 uint16_t out_pl_len;
2079
2080 if (!ct3d->scan_media_hasrun) {
2081 return CXL_MBOX_UNSUPPORTED;
2082 }
2083
2084 /*
2085 * Calculate limits, all entries are within the same address range of the
2086 * last scan media call.
2087 */
2088 QLIST_FOREACH(ent, scan_media_results, node) {
2089 size_t rec_size = record_count * sizeof(out->records[0]);
2090
2091 if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) {
2092 record_count++;
2093 }
2094 total_count++;
2095 }
2096
2097 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2098 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2099
2100 memset(out, 0, out_pl_len);
2101 QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) {
2102 uint64_t start, stop;
2103
2104 if (i == record_count) {
2105 break;
2106 }
2107
2108 start = ROUND_DOWN(ent->start, 64ull);
2109 stop = ROUND_DOWN(ent->start, 64ull) + ent->length;
2110 stq_le_p(&out->records[i].addr, start);
2111 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
2112 i++;
2113
2114 /* consume the returning entry */
2115 QLIST_REMOVE(ent, node);
2116 g_free(ent);
2117 }
2118
2119 stw_le_p(&out->count, record_count);
2120 if (total_count > record_count) {
2121 out->flags = (1 << 0); /* More Media Error Records */
2122 }
2123
2124 *len_out = out_pl_len;
2125 return CXL_MBOX_SUCCESS;
2126 }
2127
2128 /*
2129 * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration
2130 * (Opcode: 4800h)
2131 */
cmd_dcd_get_dyn_cap_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2132 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd,
2133 uint8_t *payload_in,
2134 size_t len_in,
2135 uint8_t *payload_out,
2136 size_t *len_out,
2137 CXLCCI *cci)
2138 {
2139 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2140 struct {
2141 uint8_t region_cnt;
2142 uint8_t start_rid;
2143 } QEMU_PACKED *in = (void *)payload_in;
2144 struct {
2145 uint8_t num_regions;
2146 uint8_t regions_returned;
2147 uint8_t rsvd1[6];
2148 struct {
2149 uint64_t base;
2150 uint64_t decode_len;
2151 uint64_t region_len;
2152 uint64_t block_size;
2153 uint32_t dsmadhandle;
2154 uint8_t flags;
2155 uint8_t rsvd2[3];
2156 } QEMU_PACKED records[];
2157 } QEMU_PACKED *out = (void *)payload_out;
2158 struct {
2159 uint32_t num_extents_supported;
2160 uint32_t num_extents_available;
2161 uint32_t num_tags_supported;
2162 uint32_t num_tags_available;
2163 } QEMU_PACKED *extra_out;
2164 uint16_t record_count;
2165 uint16_t i;
2166 uint16_t out_pl_len;
2167 uint8_t start_rid;
2168
2169 start_rid = in->start_rid;
2170 if (start_rid >= ct3d->dc.num_regions) {
2171 return CXL_MBOX_INVALID_INPUT;
2172 }
2173
2174 record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt);
2175
2176 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2177 extra_out = (void *)(payload_out + out_pl_len);
2178 out_pl_len += sizeof(*extra_out);
2179 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2180
2181 out->num_regions = ct3d->dc.num_regions;
2182 out->regions_returned = record_count;
2183 for (i = 0; i < record_count; i++) {
2184 stq_le_p(&out->records[i].base,
2185 ct3d->dc.regions[start_rid + i].base);
2186 stq_le_p(&out->records[i].decode_len,
2187 ct3d->dc.regions[start_rid + i].decode_len /
2188 CXL_CAPACITY_MULTIPLIER);
2189 stq_le_p(&out->records[i].region_len,
2190 ct3d->dc.regions[start_rid + i].len);
2191 stq_le_p(&out->records[i].block_size,
2192 ct3d->dc.regions[start_rid + i].block_size);
2193 stl_le_p(&out->records[i].dsmadhandle,
2194 ct3d->dc.regions[start_rid + i].dsmadhandle);
2195 out->records[i].flags = ct3d->dc.regions[start_rid + i].flags;
2196 }
2197 /*
2198 * TODO: Assign values once extents and tags are introduced
2199 * to use.
2200 */
2201 stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED);
2202 stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED -
2203 ct3d->dc.total_extent_count);
2204 stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED);
2205 stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED);
2206
2207 *len_out = out_pl_len;
2208 return CXL_MBOX_SUCCESS;
2209 }
2210
2211 /*
2212 * CXL r3.1 section 8.2.9.9.9.2:
2213 * Get Dynamic Capacity Extent List (Opcode 4801h)
2214 */
cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2215 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
2216 uint8_t *payload_in,
2217 size_t len_in,
2218 uint8_t *payload_out,
2219 size_t *len_out,
2220 CXLCCI *cci)
2221 {
2222 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2223 struct {
2224 uint32_t extent_cnt;
2225 uint32_t start_extent_id;
2226 } QEMU_PACKED *in = (void *)payload_in;
2227 struct {
2228 uint32_t count;
2229 uint32_t total_extents;
2230 uint32_t generation_num;
2231 uint8_t rsvd[4];
2232 CXLDCExtentRaw records[];
2233 } QEMU_PACKED *out = (void *)payload_out;
2234 uint32_t start_extent_id = in->start_extent_id;
2235 CXLDCExtentList *extent_list = &ct3d->dc.extents;
2236 uint16_t record_count = 0, i = 0, record_done = 0;
2237 uint16_t out_pl_len, size;
2238 CXLDCExtent *ent;
2239
2240 if (start_extent_id > ct3d->dc.total_extent_count) {
2241 return CXL_MBOX_INVALID_INPUT;
2242 }
2243
2244 record_count = MIN(in->extent_cnt,
2245 ct3d->dc.total_extent_count - start_extent_id);
2246 size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out);
2247 record_count = MIN(record_count, size / sizeof(out->records[0]));
2248 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2249
2250 stl_le_p(&out->count, record_count);
2251 stl_le_p(&out->total_extents, ct3d->dc.total_extent_count);
2252 stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq);
2253
2254 if (record_count > 0) {
2255 CXLDCExtentRaw *out_rec = &out->records[record_done];
2256
2257 QTAILQ_FOREACH(ent, extent_list, node) {
2258 if (i++ < start_extent_id) {
2259 continue;
2260 }
2261 stq_le_p(&out_rec->start_dpa, ent->start_dpa);
2262 stq_le_p(&out_rec->len, ent->len);
2263 memcpy(&out_rec->tag, ent->tag, 0x10);
2264 stw_le_p(&out_rec->shared_seq, ent->shared_seq);
2265
2266 record_done++;
2267 out_rec++;
2268 if (record_done == record_count) {
2269 break;
2270 }
2271 }
2272 }
2273
2274 *len_out = out_pl_len;
2275 return CXL_MBOX_SUCCESS;
2276 }
2277
2278 /*
2279 * Check whether any bit between addr[nr, nr+size) is set,
2280 * return true if any bit is set, otherwise return false
2281 */
test_any_bits_set(const unsigned long * addr,unsigned long nr,unsigned long size)2282 bool test_any_bits_set(const unsigned long *addr, unsigned long nr,
2283 unsigned long size)
2284 {
2285 unsigned long res = find_next_bit(addr, size + nr, nr);
2286
2287 return res < nr + size;
2288 }
2289
cxl_find_dc_region(CXLType3Dev * ct3d,uint64_t dpa,uint64_t len)2290 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len)
2291 {
2292 int i;
2293 CXLDCRegion *region = &ct3d->dc.regions[0];
2294
2295 if (dpa < region->base ||
2296 dpa >= region->base + ct3d->dc.total_capacity) {
2297 return NULL;
2298 }
2299
2300 /*
2301 * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD)
2302 *
2303 * Regions are used in increasing-DPA order, with Region 0 being used for
2304 * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA.
2305 * So check from the last region to find where the dpa belongs. Extents that
2306 * cross multiple regions are not allowed.
2307 */
2308 for (i = ct3d->dc.num_regions - 1; i >= 0; i--) {
2309 region = &ct3d->dc.regions[i];
2310 if (dpa >= region->base) {
2311 if (dpa + len > region->base + region->len) {
2312 return NULL;
2313 }
2314 return region;
2315 }
2316 }
2317
2318 return NULL;
2319 }
2320
cxl_insert_extent_to_extent_list(CXLDCExtentList * list,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2321 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list,
2322 uint64_t dpa,
2323 uint64_t len,
2324 uint8_t *tag,
2325 uint16_t shared_seq)
2326 {
2327 CXLDCExtent *extent;
2328
2329 extent = g_new0(CXLDCExtent, 1);
2330 extent->start_dpa = dpa;
2331 extent->len = len;
2332 if (tag) {
2333 memcpy(extent->tag, tag, 0x10);
2334 }
2335 extent->shared_seq = shared_seq;
2336
2337 QTAILQ_INSERT_TAIL(list, extent, node);
2338 }
2339
cxl_remove_extent_from_extent_list(CXLDCExtentList * list,CXLDCExtent * extent)2340 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list,
2341 CXLDCExtent *extent)
2342 {
2343 QTAILQ_REMOVE(list, extent, node);
2344 g_free(extent);
2345 }
2346
2347 /*
2348 * Add a new extent to the extent "group" if group exists;
2349 * otherwise, create a new group
2350 * Return value: the extent group where the extent is inserted.
2351 */
cxl_insert_extent_to_extent_group(CXLDCExtentGroup * group,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2352 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group,
2353 uint64_t dpa,
2354 uint64_t len,
2355 uint8_t *tag,
2356 uint16_t shared_seq)
2357 {
2358 if (!group) {
2359 group = g_new0(CXLDCExtentGroup, 1);
2360 QTAILQ_INIT(&group->list);
2361 }
2362 cxl_insert_extent_to_extent_list(&group->list, dpa, len,
2363 tag, shared_seq);
2364 return group;
2365 }
2366
cxl_extent_group_list_insert_tail(CXLDCExtentGroupList * list,CXLDCExtentGroup * group)2367 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
2368 CXLDCExtentGroup *group)
2369 {
2370 QTAILQ_INSERT_TAIL(list, group, node);
2371 }
2372
cxl_extent_group_list_delete_front(CXLDCExtentGroupList * list)2373 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list)
2374 {
2375 CXLDCExtent *ent, *ent_next;
2376 CXLDCExtentGroup *group = QTAILQ_FIRST(list);
2377
2378 QTAILQ_REMOVE(list, group, node);
2379 QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
2380 cxl_remove_extent_from_extent_list(&group->list, ent);
2381 }
2382 g_free(group);
2383 }
2384
2385 /*
2386 * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload
2387 * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload
2388 */
2389 typedef struct CXLUpdateDCExtentListInPl {
2390 uint32_t num_entries_updated;
2391 uint8_t flags;
2392 uint8_t rsvd[3];
2393 /* CXL r3.1 Table 8-169: Updated Extent */
2394 struct {
2395 uint64_t start_dpa;
2396 uint64_t len;
2397 uint8_t rsvd[8];
2398 } QEMU_PACKED updated_entries[];
2399 } QEMU_PACKED CXLUpdateDCExtentListInPl;
2400
2401 /*
2402 * For the extents in the extent list to operate, check whether they are valid
2403 * 1. The extent should be in the range of a valid DC region;
2404 * 2. The extent should not cross multiple regions;
2405 * 3. The start DPA and the length of the extent should align with the block
2406 * size of the region;
2407 * 4. The address range of multiple extents in the list should not overlap.
2408 */
cxl_detect_malformed_extent_list(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2409 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d,
2410 const CXLUpdateDCExtentListInPl *in)
2411 {
2412 uint64_t min_block_size = UINT64_MAX;
2413 CXLDCRegion *region;
2414 CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1];
2415 g_autofree unsigned long *blk_bitmap = NULL;
2416 uint64_t dpa, len;
2417 uint32_t i;
2418
2419 for (i = 0; i < ct3d->dc.num_regions; i++) {
2420 region = &ct3d->dc.regions[i];
2421 min_block_size = MIN(min_block_size, region->block_size);
2422 }
2423
2424 blk_bitmap = bitmap_new((lastregion->base + lastregion->len -
2425 ct3d->dc.regions[0].base) / min_block_size);
2426
2427 for (i = 0; i < in->num_entries_updated; i++) {
2428 dpa = in->updated_entries[i].start_dpa;
2429 len = in->updated_entries[i].len;
2430
2431 region = cxl_find_dc_region(ct3d, dpa, len);
2432 if (!region) {
2433 return CXL_MBOX_INVALID_PA;
2434 }
2435
2436 dpa -= ct3d->dc.regions[0].base;
2437 if (dpa % region->block_size || len % region->block_size) {
2438 return CXL_MBOX_INVALID_EXTENT_LIST;
2439 }
2440 /* the dpa range already covered by some other extents in the list */
2441 if (test_any_bits_set(blk_bitmap, dpa / min_block_size,
2442 len / min_block_size)) {
2443 return CXL_MBOX_INVALID_EXTENT_LIST;
2444 }
2445 bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size);
2446 }
2447
2448 return CXL_MBOX_SUCCESS;
2449 }
2450
cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2451 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d,
2452 const CXLUpdateDCExtentListInPl *in)
2453 {
2454 uint32_t i;
2455 CXLDCExtent *ent;
2456 CXLDCExtentGroup *ext_group;
2457 uint64_t dpa, len;
2458 Range range1, range2;
2459
2460 for (i = 0; i < in->num_entries_updated; i++) {
2461 dpa = in->updated_entries[i].start_dpa;
2462 len = in->updated_entries[i].len;
2463
2464 range_init_nofail(&range1, dpa, len);
2465
2466 /*
2467 * The host-accepted DPA range must be contained by the first extent
2468 * group in the pending list
2469 */
2470 ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending);
2471 if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) {
2472 return CXL_MBOX_INVALID_PA;
2473 }
2474
2475 /* to-be-added range should not overlap with range already accepted */
2476 QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) {
2477 range_init_nofail(&range2, ent->start_dpa, ent->len);
2478 if (range_overlaps_range(&range1, &range2)) {
2479 return CXL_MBOX_INVALID_PA;
2480 }
2481 }
2482 }
2483 return CXL_MBOX_SUCCESS;
2484 }
2485
2486 /*
2487 * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h)
2488 * An extent is added to the extent list and becomes usable only after the
2489 * response is processed successfully.
2490 */
cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2491 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
2492 uint8_t *payload_in,
2493 size_t len_in,
2494 uint8_t *payload_out,
2495 size_t *len_out,
2496 CXLCCI *cci)
2497 {
2498 CXLUpdateDCExtentListInPl *in = (void *)payload_in;
2499 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2500 CXLDCExtentList *extent_list = &ct3d->dc.extents;
2501 uint32_t i;
2502 uint64_t dpa, len;
2503 CXLRetCode ret;
2504
2505 if (len_in < sizeof(*in)) {
2506 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2507 }
2508
2509 if (in->num_entries_updated == 0) {
2510 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
2511 return CXL_MBOX_SUCCESS;
2512 }
2513
2514 if (len_in <
2515 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
2516 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2517 }
2518
2519 /* Adding extents causes exceeding device's extent tracking ability. */
2520 if (in->num_entries_updated + ct3d->dc.total_extent_count >
2521 CXL_NUM_EXTENTS_SUPPORTED) {
2522 return CXL_MBOX_RESOURCES_EXHAUSTED;
2523 }
2524
2525 ret = cxl_detect_malformed_extent_list(ct3d, in);
2526 if (ret != CXL_MBOX_SUCCESS) {
2527 return ret;
2528 }
2529
2530 ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in);
2531 if (ret != CXL_MBOX_SUCCESS) {
2532 return ret;
2533 }
2534
2535 for (i = 0; i < in->num_entries_updated; i++) {
2536 dpa = in->updated_entries[i].start_dpa;
2537 len = in->updated_entries[i].len;
2538
2539 cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0);
2540 ct3d->dc.total_extent_count += 1;
2541 ct3_set_region_block_backed(ct3d, dpa, len);
2542 }
2543 /* Remove the first extent group in the pending list */
2544 cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
2545
2546 return CXL_MBOX_SUCCESS;
2547 }
2548
2549 /*
2550 * Copy extent list from src to dst
2551 * Return value: number of extents copied
2552 */
copy_extent_list(CXLDCExtentList * dst,const CXLDCExtentList * src)2553 static uint32_t copy_extent_list(CXLDCExtentList *dst,
2554 const CXLDCExtentList *src)
2555 {
2556 uint32_t cnt = 0;
2557 CXLDCExtent *ent;
2558
2559 if (!dst || !src) {
2560 return 0;
2561 }
2562
2563 QTAILQ_FOREACH(ent, src, node) {
2564 cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len,
2565 ent->tag, ent->shared_seq);
2566 cnt++;
2567 }
2568 return cnt;
2569 }
2570
cxl_dc_extent_release_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in,CXLDCExtentList * updated_list,uint32_t * updated_list_size)2571 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d,
2572 const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list,
2573 uint32_t *updated_list_size)
2574 {
2575 CXLDCExtent *ent, *ent_next;
2576 uint64_t dpa, len;
2577 uint32_t i;
2578 int cnt_delta = 0;
2579 CXLRetCode ret = CXL_MBOX_SUCCESS;
2580
2581 QTAILQ_INIT(updated_list);
2582 copy_extent_list(updated_list, &ct3d->dc.extents);
2583
2584 for (i = 0; i < in->num_entries_updated; i++) {
2585 Range range;
2586
2587 dpa = in->updated_entries[i].start_dpa;
2588 len = in->updated_entries[i].len;
2589
2590 /* Check if the DPA range is not fully backed with valid extents */
2591 if (!ct3_test_region_block_backed(ct3d, dpa, len)) {
2592 ret = CXL_MBOX_INVALID_PA;
2593 goto free_and_exit;
2594 }
2595
2596 /* After this point, extent overflow is the only error can happen */
2597 while (len > 0) {
2598 QTAILQ_FOREACH(ent, updated_list, node) {
2599 range_init_nofail(&range, ent->start_dpa, ent->len);
2600
2601 if (range_contains(&range, dpa)) {
2602 uint64_t len1, len2 = 0, len_done = 0;
2603 uint64_t ent_start_dpa = ent->start_dpa;
2604 uint64_t ent_len = ent->len;
2605
2606 len1 = dpa - ent->start_dpa;
2607 /* Found the extent or the subset of an existing extent */
2608 if (range_contains(&range, dpa + len - 1)) {
2609 len2 = ent_start_dpa + ent_len - dpa - len;
2610 } else {
2611 dpa = ent_start_dpa + ent_len;
2612 }
2613 len_done = ent_len - len1 - len2;
2614
2615 cxl_remove_extent_from_extent_list(updated_list, ent);
2616 cnt_delta--;
2617
2618 if (len1) {
2619 cxl_insert_extent_to_extent_list(updated_list,
2620 ent_start_dpa,
2621 len1, NULL, 0);
2622 cnt_delta++;
2623 }
2624 if (len2) {
2625 cxl_insert_extent_to_extent_list(updated_list,
2626 dpa + len,
2627 len2, NULL, 0);
2628 cnt_delta++;
2629 }
2630
2631 if (cnt_delta + ct3d->dc.total_extent_count >
2632 CXL_NUM_EXTENTS_SUPPORTED) {
2633 ret = CXL_MBOX_RESOURCES_EXHAUSTED;
2634 goto free_and_exit;
2635 }
2636
2637 len -= len_done;
2638 break;
2639 }
2640 }
2641 }
2642 }
2643 free_and_exit:
2644 if (ret != CXL_MBOX_SUCCESS) {
2645 QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) {
2646 cxl_remove_extent_from_extent_list(updated_list, ent);
2647 }
2648 *updated_list_size = 0;
2649 } else {
2650 *updated_list_size = ct3d->dc.total_extent_count + cnt_delta;
2651 }
2652
2653 return ret;
2654 }
2655
2656 /*
2657 * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h)
2658 */
cmd_dcd_release_dyn_cap(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2659 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
2660 uint8_t *payload_in,
2661 size_t len_in,
2662 uint8_t *payload_out,
2663 size_t *len_out,
2664 CXLCCI *cci)
2665 {
2666 CXLUpdateDCExtentListInPl *in = (void *)payload_in;
2667 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2668 CXLDCExtentList updated_list;
2669 CXLDCExtent *ent, *ent_next;
2670 uint32_t updated_list_size;
2671 CXLRetCode ret;
2672
2673 if (len_in < sizeof(*in)) {
2674 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2675 }
2676
2677 if (in->num_entries_updated == 0) {
2678 return CXL_MBOX_INVALID_INPUT;
2679 }
2680
2681 if (len_in <
2682 sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
2683 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2684 }
2685
2686 ret = cxl_detect_malformed_extent_list(ct3d, in);
2687 if (ret != CXL_MBOX_SUCCESS) {
2688 return ret;
2689 }
2690
2691 ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list,
2692 &updated_list_size);
2693 if (ret != CXL_MBOX_SUCCESS) {
2694 return ret;
2695 }
2696
2697 /*
2698 * If the dry run release passes, the returned updated_list will
2699 * be the updated extent list and we just need to clear the extents
2700 * in the accepted list and copy extents in the updated_list to accepted
2701 * list and update the extent count;
2702 */
2703 QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) {
2704 ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len);
2705 cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent);
2706 }
2707 copy_extent_list(&ct3d->dc.extents, &updated_list);
2708 QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) {
2709 ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len);
2710 cxl_remove_extent_from_extent_list(&updated_list, ent);
2711 }
2712 ct3d->dc.total_extent_count = updated_list_size;
2713
2714 return CXL_MBOX_SUCCESS;
2715 }
2716
2717 static const struct cxl_cmd cxl_cmd_set[256][256] = {
2718 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
2719 cmd_events_get_records, 1, 0 },
2720 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
2721 cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE },
2722 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
2723 cmd_events_get_interrupt_policy, 0, 0 },
2724 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
2725 cmd_events_set_interrupt_policy,
2726 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE },
2727 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
2728 cmd_firmware_update_get_info, 0, 0 },
2729 [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER",
2730 cmd_firmware_update_transfer, ~0, CXL_MBOX_BACKGROUND_OPERATION },
2731 [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE",
2732 cmd_firmware_update_activate, 2, CXL_MBOX_BACKGROUND_OPERATION },
2733 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
2734 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
2735 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
2736 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
2737 0, 0 },
2738 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
2739 [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED",
2740 cmd_features_get_supported, 0x8, 0 },
2741 [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE",
2742 cmd_features_get_feature, 0x15, 0 },
2743 [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE",
2744 cmd_features_set_feature,
2745 ~0,
2746 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
2747 CXL_MBOX_IMMEDIATE_DATA_CHANGE |
2748 CXL_MBOX_IMMEDIATE_POLICY_CHANGE |
2749 CXL_MBOX_IMMEDIATE_LOG_CHANGE |
2750 CXL_MBOX_SECURITY_STATE_CHANGE)},
2751 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
2752 cmd_identify_memory_device, 0, 0 },
2753 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
2754 cmd_ccls_get_partition_info, 0, 0 },
2755 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
2756 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
2757 ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE },
2758 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0,
2759 (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
2760 CXL_MBOX_SECURITY_STATE_CHANGE |
2761 CXL_MBOX_BACKGROUND_OPERATION)},
2762 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE",
2763 cmd_get_security_state, 0, 0 },
2764 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
2765 cmd_media_get_poison_list, 16, 0 },
2766 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
2767 cmd_media_inject_poison, 8, 0 },
2768 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
2769 cmd_media_clear_poison, 72, 0 },
2770 [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = {
2771 "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES",
2772 cmd_media_get_scan_media_capabilities, 16, 0 },
2773 [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA",
2774 cmd_media_scan_media, 17, CXL_MBOX_BACKGROUND_OPERATION },
2775 [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = {
2776 "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS",
2777 cmd_media_get_scan_media_results, 0, 0 },
2778 };
2779
2780 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = {
2781 [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG",
2782 cmd_dcd_get_dyn_cap_config, 2, 0 },
2783 [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = {
2784 "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list,
2785 8, 0 },
2786 [DCD_CONFIG][ADD_DYN_CAP_RSP] = {
2787 "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp,
2788 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
2789 [DCD_CONFIG][RELEASE_DYN_CAP] = {
2790 "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap,
2791 ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
2792 };
2793
2794 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
2795 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
2796 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS",
2797 cmd_infostat_bg_op_sts, 0, 0 },
2798 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
2799 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8,
2800 CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
2801 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
2802 0 },
2803 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
2804 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE",
2805 cmd_identify_switch_device, 0, 0 },
2806 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS",
2807 cmd_get_physical_port_state, ~0, 0 },
2808 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
2809 cmd_tunnel_management_cmd, ~0, 0 },
2810 };
2811
2812 /*
2813 * While the command is executing in the background, the device should
2814 * update the percentage complete in the Background Command Status Register
2815 * at least once per second.
2816 */
2817
2818 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
2819
cxl_process_cci_message(CXLCCI * cci,uint8_t set,uint8_t cmd,size_t len_in,uint8_t * pl_in,size_t * len_out,uint8_t * pl_out,bool * bg_started)2820 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
2821 size_t len_in, uint8_t *pl_in, size_t *len_out,
2822 uint8_t *pl_out, bool *bg_started)
2823 {
2824 int ret;
2825 const struct cxl_cmd *cxl_cmd;
2826 opcode_handler h;
2827 CXLDeviceState *cxl_dstate;
2828
2829 *len_out = 0;
2830 cxl_cmd = &cci->cxl_cmd_set[set][cmd];
2831 h = cxl_cmd->handler;
2832 if (!h) {
2833 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
2834 set << 8 | cmd);
2835 return CXL_MBOX_UNSUPPORTED;
2836 }
2837
2838 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) {
2839 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2840 }
2841
2842 /* Only one bg command at a time */
2843 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
2844 cci->bg.runtime > 0) {
2845 return CXL_MBOX_BUSY;
2846 }
2847
2848 /* forbid any selected commands while the media is disabled */
2849 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
2850 cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
2851
2852 if (cxl_dev_media_disabled(cxl_dstate)) {
2853 if (h == cmd_events_get_records ||
2854 h == cmd_ccls_get_partition_info ||
2855 h == cmd_ccls_set_lsa ||
2856 h == cmd_ccls_get_lsa ||
2857 h == cmd_logs_get_log ||
2858 h == cmd_media_get_poison_list ||
2859 h == cmd_media_inject_poison ||
2860 h == cmd_media_clear_poison ||
2861 h == cmd_sanitize_overwrite ||
2862 h == cmd_firmware_update_transfer ||
2863 h == cmd_firmware_update_activate) {
2864 return CXL_MBOX_MEDIA_DISABLED;
2865 }
2866 }
2867 }
2868
2869 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
2870 if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
2871 ret == CXL_MBOX_BG_STARTED) {
2872 *bg_started = true;
2873 } else {
2874 *bg_started = false;
2875 }
2876
2877 /* Set bg and the return code */
2878 if (*bg_started) {
2879 uint64_t now;
2880
2881 cci->bg.opcode = (set << 8) | cmd;
2882
2883 cci->bg.complete_pct = 0;
2884 cci->bg.ret_code = 0;
2885
2886 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
2887 cci->bg.starttime = now;
2888 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
2889 }
2890
2891 return ret;
2892 }
2893
bg_timercb(void * opaque)2894 static void bg_timercb(void *opaque)
2895 {
2896 CXLCCI *cci = opaque;
2897 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
2898 uint64_t total_time = cci->bg.starttime + cci->bg.runtime;
2899
2900 assert(cci->bg.runtime > 0);
2901
2902 if (now >= total_time) { /* we are done */
2903 uint16_t ret = CXL_MBOX_SUCCESS;
2904
2905 cci->bg.complete_pct = 100;
2906 cci->bg.ret_code = ret;
2907 switch (cci->bg.opcode) {
2908 case 0x0201: /* fw transfer */
2909 __do_firmware_xfer(cci);
2910 break;
2911 case 0x4400: /* sanitize */
2912 {
2913 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2914
2915 __do_sanitization(ct3d);
2916 cxl_dev_enable_media(&ct3d->cxl_dstate);
2917 }
2918 break;
2919 case 0x4304: /* scan media */
2920 {
2921 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2922
2923 __do_scan_media(ct3d);
2924 break;
2925 }
2926 default:
2927 __builtin_unreachable();
2928 break;
2929 }
2930 } else {
2931 /* estimate only */
2932 cci->bg.complete_pct =
2933 100 * (now - cci->bg.starttime) / cci->bg.runtime;
2934 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
2935 }
2936
2937 if (cci->bg.complete_pct == 100) {
2938 /* TODO: generalize to switch CCI */
2939 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2940 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2941 PCIDevice *pdev = PCI_DEVICE(cci->d);
2942
2943 cci->bg.starttime = 0;
2944 /* registers are updated, allow new bg-capable cmds */
2945 cci->bg.runtime = 0;
2946
2947 if (msix_enabled(pdev)) {
2948 msix_notify(pdev, cxl_dstate->mbox_msi_n);
2949 } else if (msi_enabled(pdev)) {
2950 msi_notify(pdev, cxl_dstate->mbox_msi_n);
2951 }
2952 }
2953 }
2954
cxl_rebuild_cel(CXLCCI * cci)2955 static void cxl_rebuild_cel(CXLCCI *cci)
2956 {
2957 cci->cel_size = 0; /* Reset for a fresh build */
2958 for (int set = 0; set < 256; set++) {
2959 for (int cmd = 0; cmd < 256; cmd++) {
2960 if (cci->cxl_cmd_set[set][cmd].handler) {
2961 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd];
2962 struct cel_log *log =
2963 &cci->cel_log[cci->cel_size];
2964
2965 log->opcode = (set << 8) | cmd;
2966 log->effect = c->effect;
2967 cci->cel_size++;
2968 }
2969 }
2970 }
2971 }
2972
cxl_init_cci(CXLCCI * cci,size_t payload_max)2973 void cxl_init_cci(CXLCCI *cci, size_t payload_max)
2974 {
2975 cci->payload_max = payload_max;
2976 cxl_rebuild_cel(cci);
2977
2978 cci->bg.complete_pct = 0;
2979 cci->bg.starttime = 0;
2980 cci->bg.runtime = 0;
2981 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2982 bg_timercb, cci);
2983
2984 memset(&cci->fw, 0, sizeof(cci->fw));
2985 cci->fw.active_slot = 1;
2986 cci->fw.slot[cci->fw.active_slot - 1] = true;
2987 }
2988
cxl_copy_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmds)[256])2989 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256])
2990 {
2991 for (int set = 0; set < 256; set++) {
2992 for (int cmd = 0; cmd < 256; cmd++) {
2993 if (cxl_cmds[set][cmd].handler) {
2994 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd];
2995 }
2996 }
2997 }
2998 }
2999
cxl_add_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmd_set)[256],size_t payload_max)3000 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256],
3001 size_t payload_max)
3002 {
3003 cci->payload_max = MAX(payload_max, cci->payload_max);
3004 cxl_copy_cci_commands(cci, cxl_cmd_set);
3005 cxl_rebuild_cel(cci);
3006 }
3007
cxl_initialize_mailbox_swcci(CXLCCI * cci,DeviceState * intf,DeviceState * d,size_t payload_max)3008 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
3009 DeviceState *d, size_t payload_max)
3010 {
3011 cxl_copy_cci_commands(cci, cxl_cmd_set_sw);
3012 cci->d = d;
3013 cci->intf = intf;
3014 cxl_init_cci(cci, payload_max);
3015 }
3016
cxl_initialize_mailbox_t3(CXLCCI * cci,DeviceState * d,size_t payload_max)3017 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max)
3018 {
3019 CXLType3Dev *ct3d = CXL_TYPE3(d);
3020
3021 cxl_copy_cci_commands(cci, cxl_cmd_set);
3022 if (ct3d->dc.num_regions) {
3023 cxl_copy_cci_commands(cci, cxl_cmd_set_dcd);
3024 }
3025 cci->d = d;
3026
3027 /* No separation for PCI MB as protocol handled in PCI device */
3028 cci->intf = d;
3029 cxl_init_cci(cci, payload_max);
3030 }
3031
3032 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = {
3033 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
3034 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
3035 0 },
3036 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3037 };
3038
cxl_initialize_t3_ld_cci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)3039 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf,
3040 size_t payload_max)
3041 {
3042 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld);
3043 cci->d = d;
3044 cci->intf = intf;
3045 cxl_init_cci(cci, payload_max);
3046 }
3047
3048 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = {
3049 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0},
3050 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
3051 0 },
3052 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3053 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
3054 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
3055 cmd_tunnel_management_cmd, ~0, 0 },
3056 };
3057
cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)3058 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
3059 DeviceState *intf,
3060 size_t payload_max)
3061 {
3062 cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp);
3063 cci->d = d;
3064 cci->intf = intf;
3065 cxl_init_cci(cci, payload_max);
3066 }
3067